tatami_chunked
Helpers to create custom chunked tatami matrices
Loading...
Searching...
No Matches
CustomSparseChunkedMatrix.hpp
Go to the documentation of this file.
1#ifndef TATAMI_CHUNKED_CUSTOM_SPARSE_CHUNKED_MATRIX_HPP
2#define TATAMI_CHUNKED_CUSTOM_SPARSE_CHUNKED_MATRIX_HPP
3
4#include "custom_internals.hpp"
6#include "SlabCacheStats.hpp"
7#include "LruSlabCache.hpp"
10#include "utils.hpp"
11
12#include <vector>
13#include <cstddef>
14
15#include "tatami/tatami.hpp"
16#include "sanisizer/sanisizer.hpp"
17
23namespace tatami_chunked {
24
34 std::size_t maximum_cache_size = sanisizer::cap<std::size_t>(100000000);
35
42
47 bool cache_subset = false;
48};
49
55template<typename ChunkValue_, typename Index_>
57public:
66 virtual ~CustomSparseChunkedMatrixWorkspace() = default;
111 virtual void extract(
112 Index_ chunk_row_id,
113 Index_ chunk_column_id,
114 bool row,
115 Index_ target_start,
116 Index_ target_length,
117 Index_ non_target_start,
118 Index_ non_target_length,
119 const std::vector<ChunkValue_*>& output_values,
120 const std::vector<Index_*>& output_indices,
121 Index_* output_number,
122 Index_ shift
123 ) = 0;
124
163 virtual void extract(
164 Index_ chunk_row_id,
165 Index_ chunk_column_id,
166 bool row,
167 Index_ target_start,
168 Index_ target_length,
169 const std::vector<Index_>& non_target_indices,
170 const std::vector<ChunkValue_*>& output_values,
171 const std::vector<Index_*>& output_indices,
172 Index_* output_number,
173 Index_ shift
174 ) = 0;
175
213 virtual void extract(
214 Index_ chunk_row_id,
215 Index_ chunk_column_id,
216 bool row,
217 const std::vector<Index_>& target_indices,
218 Index_ non_target_start,
219 Index_ non_target_length,
220 const std::vector<ChunkValue_*>& output_values,
221 const std::vector<Index_*>& output_indices,
222 Index_* output_number,
223 Index_ shift
224 ) = 0;
225
262 virtual void extract(
263 Index_ chunk_row_id,
264 Index_ chunk_column_id,
265 bool row,
266 const std::vector<Index_>& target_indices,
267 const std::vector<Index_>& non_target_indices,
268 const std::vector<ChunkValue_*>& output_values,
269 const std::vector<Index_*>& output_indices,
270 Index_* output_number,
271 Index_ shift
272 ) = 0;
273};
274
280template<typename ChunkValue_, typename Index_>
282public:
291 virtual ~CustomSparseChunkedMatrixManager() = default;
299 virtual std::unique_ptr<CustomSparseChunkedMatrixWorkspace<ChunkValue_, Index_> > new_workspace() const = 0;
300
308 std::unique_ptr<CustomSparseChunkedMatrixWorkspace<ChunkValue_, Index_> > new_workspace_exact() const {
309 return new_workspace();
310 }
311
315 virtual bool prefer_rows() const = 0;
316
322 virtual const ChunkDimensionStats<Index_>& row_stats() const = 0;
323
329 virtual const ChunkDimensionStats<Index_>& column_stats() const = 0;
330};
331
335namespace CustomChunkedMatrix_internal {
336
337/*********************
338 **** Base classes ***
339 *********************/
340
341template<bool oracle_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
342class SoloSparseCore {
343 WorkspacePtr_ my_chunk_workspace;
344 const ChunkCoordinator<true, ChunkValue_, Index_>& my_coordinator;
345
347 typename std::conditional<oracle_, tatami::PredictionIndex, bool>::type my_counter = 0;
348
350 typedef typename I<decltype(my_factory)>::Slab Slab;
351
352 // These two instances are not fully allocated Slabs; rather, tmp_solo just
353 // holds the content for a single chunk, while final_solo holds the content
354 // across chunks but only for the requested dimension element. Both cases
355 // are likely to be much smaller than a full Slab, so we're already more
356 // memory-efficient than 'require_minimum_cache = true'.
357 SparseSingleWorkspace<ChunkValue_, Index_> my_tmp_solo;
358 Slab my_final_solo;
359
360public:
361 SoloSparseCore(
362 WorkspacePtr_ chunk_workspace,
363 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
364 [[maybe_unused]] const SlabCacheStats<Index_>& slab_stats, // for consistency with the other base classes.
365 bool row,
367 Index_ non_target_length,
368 bool needs_value,
369 bool needs_index
370 ) :
371 my_chunk_workspace(std::move(chunk_workspace)),
372 my_coordinator(coordinator),
373 my_oracle(std::move(oracle)),
374 my_factory(1, non_target_length, 1, needs_value, needs_index),
375 my_tmp_solo(
376 my_coordinator.get_target_chunkdim(row),
377 my_coordinator.get_non_target_chunkdim(row),
378 needs_value,
379 needs_index
380 ),
381 my_final_solo(my_factory.create())
382 {}
383
384 template<typename ... Args_>
385 std::pair<const Slab*, Index_> fetch_raw(Index_ i, bool row, Args_&& ... args) {
386 if constexpr(oracle_) {
387 i = my_oracle->get(my_counter++);
388 }
389 return my_coordinator.fetch_single(row, i, std::forward<Args_>(args)..., *my_chunk_workspace, my_tmp_solo, my_final_solo);
390 }
391};
392
393template<typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
394class MyopicSparseCore {
395 WorkspacePtr_ my_chunk_workspace;
396 const ChunkCoordinator<true, ChunkValue_, Index_>& my_coordinator;
397
398 SparseSlabFactory<ChunkValue_, Index_, Index_> my_factory;
399 typedef typename I<decltype(my_factory)>::Slab Slab;
400
401 LruSlabCache<Index_, Slab> my_cache;
402
403public:
404 MyopicSparseCore(
405 WorkspacePtr_ chunk_workspace,
406 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
407 const SlabCacheStats<Index_>& slab_stats,
408 bool row,
409 [[maybe_unused]] tatami::MaybeOracle<false, Index_> oracle, // for consistency with the other base classes
410 Index_ non_target_length,
411 bool needs_value,
412 bool needs_index
413 ) :
414 my_chunk_workspace(std::move(chunk_workspace)),
415 my_coordinator(coordinator),
416 my_factory(coordinator.get_target_chunkdim(row), non_target_length, slab_stats, needs_value, needs_index),
417 my_cache(slab_stats.max_slabs_in_cache)
418 {}
419
420 template<typename ... Args_>
421 std::pair<const Slab*, Index_> fetch_raw(Index_ i, bool row, Args_&& ... args) {
422 return my_coordinator.fetch_myopic(row, i, std::forward<Args_>(args)..., *my_chunk_workspace, my_cache, my_factory);
423 }
424};
425
426template<bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
427class OracularSparseCore {
428protected:
429 WorkspacePtr_ my_chunk_workspace;
430 const ChunkCoordinator<true, ChunkValue_, Index_>& my_coordinator;
431
432 SparseSlabFactory<ChunkValue_, Index_, Index_> my_factory;
433 typedef typename I<decltype(my_factory)>::Slab Slab;
434
435 typename std::conditional<use_subset_, OracularSubsettedSlabCache<Index_, Index_, Slab>, OracularSlabCache<Index_, Index_, Slab> >::type my_cache;
436
437public:
438 OracularSparseCore(
439 WorkspacePtr_ chunk_workspace,
440 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
441 const SlabCacheStats<Index_>& slab_stats,
442 bool row,
444 Index_ non_target_length,
445 bool needs_value,
446 bool needs_index
447 ) :
448 my_chunk_workspace(std::move(chunk_workspace)),
449 my_coordinator(coordinator),
450 my_factory(coordinator.get_target_chunkdim(row), non_target_length, slab_stats, needs_value, needs_index),
451 my_cache(std::move(oracle), slab_stats.max_slabs_in_cache)
452 {}
453
454 template<typename ... Args_>
455 std::pair<const Slab*, Index_> fetch_raw([[maybe_unused]] Index_ i, bool row, Args_&& ... args) {
456 if constexpr(use_subset_) {
457 return my_coordinator.fetch_oracular_subsetted(row, std::forward<Args_>(args)..., *my_chunk_workspace, my_cache, my_factory);
458 } else {
459 return my_coordinator.fetch_oracular(row, std::forward<Args_>(args)..., *my_chunk_workspace, my_cache, my_factory);
460 }
461 }
462};
463
464template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
465using SparseCore = typename std::conditional<solo_,
466 SoloSparseCore<oracle_, Value_, Index_, ChunkValue_, WorkspacePtr_>,
467 typename std::conditional<oracle_,
468 OracularSparseCore<use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_>,
469 MyopicSparseCore<Value_, Index_, ChunkValue_, WorkspacePtr_>
470 >::type
471>::type;
472
473/***********************
474 **** Sparse classes ***
475 ***********************/
476
477template<class Slab_, typename Index_, typename Value_>
478tatami::SparseRange<Value_, Index_> process_sparse_slab(const std::pair<const Slab_*, Index_>& fetched, Value_* value_buffer, Index_* index_buffer, bool needs_value, bool needs_index) {
479 auto num = fetched.first->number[fetched.second];
480
481 if (needs_value) {
482 auto vptr = fetched.first->values[fetched.second];
483 std::copy_n(vptr, num, value_buffer);
484 } else {
485 value_buffer = NULL;
486 }
487
488 if (needs_index) {
489 auto iptr = fetched.first->indices[fetched.second];
490 std::copy_n(iptr, num, index_buffer);
491 } else {
492 index_buffer = NULL;
493 }
494
495 return tatami::SparseRange<Value_, Index_>(num, value_buffer, index_buffer);
496}
497
498template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
499class SparseFull : public tatami::SparseExtractor<oracle_, Value_, Index_> {
500public:
501 SparseFull(
502 WorkspacePtr_ chunk_workspace,
503 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
504 const SlabCacheStats<Index_>& slab_stats,
505 bool row,
507 const tatami::Options& opt
508 ) :
509 my_row(row),
510 my_non_target_dim(coordinator.get_non_target_dim(row)),
511 my_needs_value(opt.sparse_extract_value),
512 my_needs_index(opt.sparse_extract_index),
513 my_core(
514 std::move(chunk_workspace),
515 coordinator,
516 slab_stats,
517 row,
518 std::move(oracle),
519 my_non_target_dim,
520 opt.sparse_extract_value,
521 opt.sparse_extract_index
522 )
523 {}
524
525 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
526 auto fetched = my_core.fetch_raw(i, my_row, 0, my_non_target_dim);
527 return process_sparse_slab(fetched, value_buffer, index_buffer, my_needs_value, my_needs_index);
528 }
529
530private:
531 bool my_row;
532 Index_ my_non_target_dim;
533 bool my_needs_value, my_needs_index;
534 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
535};
536
537template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
538class SparseBlock : public tatami::SparseExtractor<oracle_, Value_, Index_> {
539public:
540 SparseBlock(
541 WorkspacePtr_ chunk_workspace,
542 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
543 const SlabCacheStats<Index_>& slab_stats,
544 bool row,
546 Index_ block_start,
547 Index_ block_length,
548 const tatami::Options& opt
549 ) :
550 my_row(row),
551 my_block_start(block_start),
552 my_block_length(block_length),
553 my_needs_value(opt.sparse_extract_value),
554 my_needs_index(opt.sparse_extract_index),
555 my_core(
556 std::move(chunk_workspace),
557 coordinator,
558 slab_stats,
559 row,
560 std::move(oracle),
561 block_length,
562 my_needs_value,
563 my_needs_index
564 )
565 {}
566
567 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
568 auto fetched = my_core.fetch_raw(i, my_row, my_block_start, my_block_length);
569 return process_sparse_slab(fetched, value_buffer, index_buffer, my_needs_value, my_needs_index);
570 }
571
572private:
573 bool my_row;
574 Index_ my_block_start, my_block_length;
575 bool my_needs_value, my_needs_index;
576 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
577};
578
579template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
580class SparseIndex : public tatami::SparseExtractor<oracle_, Value_, Index_> {
581public:
582 SparseIndex(
583 WorkspacePtr_ chunk_workspace,
584 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
585 const SlabCacheStats<Index_>& slab_stats,
586 bool row,
588 tatami::VectorPtr<Index_> indices_ptr,
589 const tatami::Options& opt
590 ) :
591 my_row(row),
592 my_indices_ptr(std::move(indices_ptr)),
593 my_needs_value(opt.sparse_extract_value),
594 my_needs_index(opt.sparse_extract_index),
595 my_core(
596 std::move(chunk_workspace),
597 coordinator,
598 slab_stats,
599 row,
600 std::move(oracle),
601 my_indices_ptr->size(),
602 my_needs_value,
603 my_needs_index
604 )
605 {}
606
607 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
608 auto fetched = my_core.fetch_raw(i, my_row, *my_indices_ptr, my_tmp_indices);
609 return process_sparse_slab(fetched, value_buffer, index_buffer, my_needs_value, my_needs_index);
610 }
611
612private:
613 bool my_row;
614 tatami::VectorPtr<Index_> my_indices_ptr;
615 std::vector<Index_> my_tmp_indices;
616 bool my_needs_value, my_needs_index;
617 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
618};
619
620/**************************
621 **** Densified classes ***
622 **************************/
623
624template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
625class DensifiedFull : public tatami::DenseExtractor<oracle_, Value_, Index_> {
626public:
627 DensifiedFull(
628 WorkspacePtr_ chunk_workspace,
629 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
630 const SlabCacheStats<Index_>& slab_stats,
631 bool row,
633 const tatami::Options&
634 ) :
635 my_row(row),
636 my_non_target_dim(coordinator.get_non_target_dim(row)),
637 my_core(
638 std::move(chunk_workspace),
639 coordinator,
640 slab_stats,
641 row,
642 std::move(oracle),
643 my_non_target_dim,
644 true,
645 true
646 )
647 {}
648
649 const Value_* fetch(Index_ i, Value_* buffer) {
650 auto contents = my_core.fetch_raw(i, my_row, 0, my_non_target_dim);
651
652 Index_ num = contents.first->number[contents.second];
653 auto vptr = contents.first->values[contents.second];
654 auto iptr = contents.first->indices[contents.second];
655
656 std::fill_n(buffer, my_non_target_dim, 0);
657 for (Index_ x = 0; x < num; ++x, ++iptr, ++vptr) {
658 buffer[*iptr] = *vptr;
659 }
660 return buffer;
661 }
662
663private:
664 bool my_row;
665 Index_ my_non_target_dim;
666 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
667};
668
669template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
670class DensifiedBlock : public tatami::DenseExtractor<oracle_, Value_, Index_> {
671public:
672 DensifiedBlock(
673 WorkspacePtr_ chunk_workspace,
674 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
675 const SlabCacheStats<Index_>& slab_stats,
676 bool row,
678 Index_ block_start,
679 Index_ block_length,
680 const tatami::Options&
681 ) :
682 my_row(row),
683 my_block_start(block_start),
684 my_block_length(block_length),
685 my_core(
686 std::move(chunk_workspace),
687 coordinator,
688 slab_stats,
689 row,
690 std::move(oracle),
691 block_length,
692 true,
693 true
694 )
695 {}
696
697 const Value_* fetch(Index_ i, Value_* buffer) {
698 auto contents = my_core.fetch_raw(i, my_row, my_block_start, my_block_length);
699
700 auto vptr = contents.first->values[contents.second];
701 auto iptr = contents.first->indices[contents.second];
702 auto num = contents.first->number[contents.second];
703
704 std::fill_n(buffer, my_block_length, 0);
705 for (Index_ x = 0; x < num; ++x, ++iptr, ++vptr) {
706 buffer[*iptr - my_block_start] = *vptr;
707 }
708 return buffer;
709 }
710
711private:
712 bool my_row;
713 Index_ my_block_start, my_block_length;
714 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
715};
716
717template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
718class DensifiedIndex : public tatami::DenseExtractor<oracle_, Value_, Index_> {
719public:
720 DensifiedIndex(
721 WorkspacePtr_ chunk_workspace,
722 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
723 const SlabCacheStats<Index_>& slab_stats,
724 bool row,
726 tatami::VectorPtr<Index_> indices_ptr,
727 const tatami::Options&
728 ) :
729 my_row(row),
730 my_indices_ptr(std::move(indices_ptr)),
731 my_core(
732 std::move(chunk_workspace),
733 coordinator,
734 slab_stats,
735 row,
736 std::move(oracle),
737 my_indices_ptr->size(),
738 true,
739 true
740 )
741 {
742 const auto& indices = *my_indices_ptr;
743 if (!indices.empty()) {
744 my_remap_offset = indices.front();
745 Index_ alloc = indices.back() - my_remap_offset + 1; // alloc must be <= dim extent, which should fit in an Index_.
746 tatami::resize_container_to_Index_size(my_remap, alloc);
747 Index_ counter = 0;
748 for (auto i : indices) {
749 my_remap[i - my_remap_offset] = counter;
750 ++counter;
751 }
752 }
753 }
754
755 const Value_* fetch(Index_ i, Value_* buffer) {
756 auto contents = my_core.fetch_raw(i, my_row, *my_indices_ptr, my_tmp_indices);
757
758 auto vptr = contents.first->values[contents.second];
759 auto iptr = contents.first->indices[contents.second];
760 auto num = contents.first->number[contents.second];
761
762 auto nidx = my_indices_ptr->size();
763 std::fill_n(buffer, nidx, 0);
764 for (Index_ x = 0; x <num; ++x, ++iptr, ++vptr) {
765 buffer[my_remap[*iptr - my_remap_offset]] = *vptr;
766 }
767 return buffer;
768 }
769
770private:
771 bool my_row;
772 tatami::VectorPtr<Index_> my_indices_ptr;
773 Index_ my_remap_offset = 0;
774 std::vector<Index_> my_remap;
775 std::vector<Index_> my_tmp_indices;
776 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
777};
778
779}
799template<typename Value_, typename Index_, typename ChunkValue_, class Manager_ = CustomSparseChunkedMatrixManager<ChunkValue_, Index_> >
800class CustomSparseChunkedMatrix : public tatami::Matrix<Value_, Index_> {
801public:
806 CustomSparseChunkedMatrix(std::shared_ptr<Manager_> manager, const CustomSparseChunkedMatrixOptions& opt) :
807 my_manager(std::move(manager)),
808 my_coordinator(my_manager->row_stats(), my_manager->column_stats()),
809 my_cache_size_in_bytes(opt.maximum_cache_size),
810 my_require_minimum_cache(opt.require_minimum_cache),
811 my_cache_subset(opt.cache_subset)
812 {}
813
814private:
815 std::shared_ptr<Manager_> my_manager;
816 CustomChunkedMatrix_internal::ChunkCoordinator<true, ChunkValue_, Index_> my_coordinator;
817 std::size_t my_cache_size_in_bytes;
818 bool my_require_minimum_cache;
819 bool my_cache_subset;
820
821public:
822 Index_ nrow() const {
823 return my_coordinator.get_nrow();
824 }
825
826 Index_ ncol() const {
827 return my_coordinator.get_ncol();
828 }
829
830 bool prefer_rows() const {
831 return my_manager->prefer_rows();
832 }
833
834 bool uses_oracle(bool) const {
835 return true;
836 }
837
838 double prefer_rows_proportion() const {
839 return static_cast<double>(my_manager->prefer_rows());
840 }
841
842 bool is_sparse() const {
843 return true;
844 }
845
846 double is_sparse_proportion() const {
847 return 1;
848 }
849
850 using tatami::Matrix<Value_, Index_>::dense;
851
852 using tatami::Matrix<Value_, Index_>::sparse;
853
854 /********************
855 *** Myopic dense ***
856 ********************/
857private:
858 template<
859 template<bool, typename, typename> class Interface_,
860 bool oracle_,
861 template<bool, bool, bool, typename, typename, typename, class> class Extractor_,
862 typename ... Args_
863 >
864 std::unique_ptr<Interface_<oracle_, Value_, Index_> > raw_internal(bool row, Index_ non_target_length, const tatami::Options& opt, Args_&& ... args) const {
865 std::size_t element_size = (opt.sparse_extract_value ? sizeof(ChunkValue_) : 0) + (opt.sparse_extract_index ? sizeof(Index_) : 0);
866 auto stats = [&]{
867 if (row) {
868 // Remember, the num_chunks_per_column is the number of slabs needed to divide up all the *rows* of the matrix.
869 return SlabCacheStats<Index_>(
870 my_coordinator.get_chunk_nrow(),
871 non_target_length,
872 my_coordinator.get_num_chunks_per_column(), // already Index_, no need to do a protected cast.
873 my_cache_size_in_bytes,
874 element_size,
875 my_require_minimum_cache
876 );
877 } else {
878 // Same as above, but this time, the num_chunks_per_row is the number of slabs needed to divide up all the *columns* of the matrix.
879 return SlabCacheStats<Index_>(
880 my_coordinator.get_chunk_ncol(),
881 non_target_length,
882 my_coordinator.get_num_chunks_per_row(),
883 my_cache_size_in_bytes,
884 element_size,
885 my_require_minimum_cache
886 );
887 }
888 }();
889
890 auto wrk = my_manager->new_workspace_exact();
891 if (stats.max_slabs_in_cache == 0) {
892 return std::make_unique<Extractor_<true, oracle_, false, Value_, Index_, ChunkValue_, I<decltype(wrk)> > >(std::move(wrk), my_coordinator, stats, row, std::forward<Args_>(args)...);
893 } else if constexpr(oracle_) {
894 if (my_cache_subset) {
895 return std::make_unique<Extractor_<false, true, true, Value_, Index_, ChunkValue_, I<decltype(wrk)> > >(std::move(wrk), my_coordinator, stats, row, std::forward<Args_>(args)...);
896 } else {
897 return std::make_unique<Extractor_<false, true, false, Value_, Index_, ChunkValue_, I<decltype(wrk)> > >(std::move(wrk), my_coordinator, stats, row, std::forward<Args_>(args)...);
898 }
899 } else {
900 return std::make_unique<Extractor_<false, false, false, Value_, Index_, ChunkValue_, I<decltype(wrk)> > >(std::move(wrk), my_coordinator, stats, row, std::forward<Args_>(args)...);
901 }
902 }
903
904public:
905 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, const tatami::Options& opt) const {
906 return raw_internal<tatami::DenseExtractor, false, CustomChunkedMatrix_internal::DensifiedFull>(row, my_coordinator.get_non_target_dim(row), opt, false, opt);
907 }
908
909 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
910 return raw_internal<tatami::DenseExtractor, false, CustomChunkedMatrix_internal::DensifiedBlock>(row, block_length, opt, false, block_start, block_length, opt);
911 }
912
913 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
914 auto num_indices = indices_ptr->size();
915 return raw_internal<tatami::DenseExtractor, false, CustomChunkedMatrix_internal::DensifiedIndex>(row, num_indices, opt, false, std::move(indices_ptr), opt);
916 }
917
918 /***********************
919 *** Oracular dense ***
920 ***********************/
921public:
922 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
923 bool row,
924 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
925 const tatami::Options& opt)
926 const {
927 return raw_internal<tatami::DenseExtractor, true, CustomChunkedMatrix_internal::DensifiedFull>(row, my_coordinator.get_non_target_dim(row), opt, std::move(oracle), opt);
928 }
929
930 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
931 bool row,
932 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
933 Index_ block_start,
934 Index_ block_length,
935 const tatami::Options& opt)
936 const {
937 return raw_internal<tatami::DenseExtractor, true, CustomChunkedMatrix_internal::DensifiedBlock>(row, block_length, opt, std::move(oracle), block_start, block_length, opt);
938 }
939
940 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
941 bool row,
942 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
943 tatami::VectorPtr<Index_> indices_ptr,
944 const tatami::Options& opt)
945 const {
946 auto num_indices = indices_ptr->size();
947 return raw_internal<tatami::DenseExtractor, true, CustomChunkedMatrix_internal::DensifiedIndex>(row, num_indices, opt, std::move(oracle), std::move(indices_ptr), opt);
948 }
949
950 /*********************
951 *** Myopic sparse ***
952 *********************/
953public:
954 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, const tatami::Options& opt) const {
955 return raw_internal<tatami::SparseExtractor, false, CustomChunkedMatrix_internal::SparseFull>(row, my_coordinator.get_non_target_dim(row), opt, false, opt);
956 }
957
958 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
959 return raw_internal<tatami::SparseExtractor, false, CustomChunkedMatrix_internal::SparseBlock>(row, block_length, opt, false, block_start, block_length, opt);
960 }
961
962 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
963 auto num_indices = indices_ptr->size();
964 return raw_internal<tatami::SparseExtractor, false, CustomChunkedMatrix_internal::SparseIndex>(row, num_indices, opt, false, std::move(indices_ptr), opt);
965 }
966
967 /***********************
968 *** Oracular sparse ***
969 ***********************/
970public:
971 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
972 bool row,
973 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
974 const tatami::Options& opt)
975 const {
976 return raw_internal<tatami::SparseExtractor, true, CustomChunkedMatrix_internal::SparseFull>(row, my_coordinator.get_non_target_dim(row), opt, std::move(oracle), opt);
977 }
978
979 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
980 bool row,
981 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
982 Index_ block_start,
983 Index_ block_length,
984 const tatami::Options& opt)
985 const {
986 return raw_internal<tatami::SparseExtractor, true, CustomChunkedMatrix_internal::SparseBlock>(row, block_length, opt, std::move(oracle), block_start, block_length, opt);
987 }
988
989 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
990 bool row,
991 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
992 tatami::VectorPtr<Index_> indices_ptr,
993 const tatami::Options& opt)
994 const {
995 auto num_indices = indices_ptr->size();
996 return raw_internal<tatami::SparseExtractor, true, CustomChunkedMatrix_internal::SparseIndex>(row, num_indices, opt, std::move(oracle), std::move(indices_ptr), opt);
997 }
998};
999
1000}
1001
1002#endif
Create a LRU cache of slabs.
Create a oracle-aware cache for slabs.
Create a oracle-aware cache with subsets.
Slab cache statistics.
Factory for sparse slabs.
Manager of chunks for a CustomSparseChunkedMatrix.
Definition CustomSparseChunkedMatrix.hpp:281
std::unique_ptr< CustomSparseChunkedMatrixWorkspace< ChunkValue_, Index_ > > new_workspace_exact() const
Definition CustomSparseChunkedMatrix.hpp:308
virtual const ChunkDimensionStats< Index_ > & column_stats() const =0
virtual const ChunkDimensionStats< Index_ > & row_stats() const =0
virtual std::unique_ptr< CustomSparseChunkedMatrixWorkspace< ChunkValue_, Index_ > > new_workspace() const =0
Workspace for extracting data from a CustomSparseChunkedMatrixManager.
Definition CustomSparseChunkedMatrix.hpp:56
virtual void extract(Index_ chunk_row_id, Index_ chunk_column_id, bool row, Index_ target_start, Index_ target_length, const std::vector< Index_ > &non_target_indices, const std::vector< ChunkValue_ * > &output_values, const std::vector< Index_ * > &output_indices, Index_ *output_number, Index_ shift)=0
virtual void extract(Index_ chunk_row_id, Index_ chunk_column_id, bool row, const std::vector< Index_ > &target_indices, Index_ non_target_start, Index_ non_target_length, const std::vector< ChunkValue_ * > &output_values, const std::vector< Index_ * > &output_indices, Index_ *output_number, Index_ shift)=0
virtual void extract(Index_ chunk_row_id, Index_ chunk_column_id, bool row, const std::vector< Index_ > &target_indices, const std::vector< Index_ > &non_target_indices, const std::vector< ChunkValue_ * > &output_values, const std::vector< Index_ * > &output_indices, Index_ *output_number, Index_ shift)=0
virtual void extract(Index_ chunk_row_id, Index_ chunk_column_id, bool row, Index_ target_start, Index_ target_length, Index_ non_target_start, Index_ non_target_length, const std::vector< ChunkValue_ * > &output_values, const std::vector< Index_ * > &output_indices, Index_ *output_number, Index_ shift)=0
Matrix of custom sparse chunks.
Definition CustomSparseChunkedMatrix.hpp:800
CustomSparseChunkedMatrix(std::shared_ptr< Manager_ > manager, const CustomSparseChunkedMatrixOptions &opt)
Definition CustomSparseChunkedMatrix.hpp:806
Factory for sparse slabs.
Definition SparseSlabFactory.hpp:34
Slab create()
Definition SparseSlabFactory.hpp:162
Methods to handle chunked tatami matrices.
Definition ChunkDimensionStats.hpp:11
std::shared_ptr< const std::vector< Index_ > > VectorPtr
typename std::conditional< oracle_, OracularSparseExtractor< Value_, Index_ >, MyopicSparseExtractor< Value_, Index_ > >::type SparseExtractor
typename std::conditional< oracle_, std::shared_ptr< const Oracle< Index_ > >, bool >::type MaybeOracle
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
bool sparse_extract_index
bool sparse_extract_value
Statistics for regular chunks along a dimension.
Definition ChunkDimensionStats.hpp:42
Options for data extraction from a CustomSparseChunkedMatrix.
Definition CustomSparseChunkedMatrix.hpp:28
std::size_t maximum_cache_size
Definition CustomSparseChunkedMatrix.hpp:34
bool cache_subset
Definition CustomSparseChunkedMatrix.hpp:47
bool require_minimum_cache
Definition CustomSparseChunkedMatrix.hpp:41
Statistics for slab caching.
Definition SlabCacheStats.hpp:26