tatami_chunked
Helpers to create custom chunked tatami matrices
Loading...
Searching...
No Matches
CustomSparseChunkedMatrix.hpp
Go to the documentation of this file.
1#ifndef TATAMI_CHUNKED_CUSTOM_SPARSE_CHUNKED_MATRIX_HPP
2#define TATAMI_CHUNKED_CUSTOM_SPARSE_CHUNKED_MATRIX_HPP
3
4#include "tatami/tatami.hpp"
5#include "custom_internals.hpp"
7#include "SlabCacheStats.hpp"
8#include "LruSlabCache.hpp"
11
12#include <vector>
13#include <cstddef>
14
15#include "sanisizer/sanisizer.hpp"
16
22namespace tatami_chunked {
23
33 std::size_t maximum_cache_size = sanisizer::cap<std::size_t>(100000000);
34
41
46 bool cache_subset = false;
47};
48
54template<typename ChunkValue_, typename Index_>
56public:
65 virtual ~CustomSparseChunkedMatrixWorkspace() = default;
110 virtual void extract(
111 Index_ chunk_row_id,
112 Index_ chunk_column_id,
113 bool row,
114 Index_ target_start,
115 Index_ target_length,
116 Index_ non_target_start,
117 Index_ non_target_length,
118 const std::vector<ChunkValue_*>& output_values,
119 const std::vector<Index_*>& output_indices,
120 Index_* output_number,
121 Index_ shift
122 ) = 0;
123
162 virtual void extract(
163 Index_ chunk_row_id,
164 Index_ chunk_column_id,
165 bool row,
166 Index_ target_start,
167 Index_ target_length,
168 const std::vector<Index_>& non_target_indices,
169 const std::vector<ChunkValue_*>& output_values,
170 const std::vector<Index_*>& output_indices,
171 Index_* output_number,
172 Index_ shift
173 ) = 0;
174
212 virtual void extract(
213 Index_ chunk_row_id,
214 Index_ chunk_column_id,
215 bool row,
216 const std::vector<Index_>& target_indices,
217 Index_ non_target_start,
218 Index_ non_target_length,
219 const std::vector<ChunkValue_*>& output_values,
220 const std::vector<Index_*>& output_indices,
221 Index_* output_number,
222 Index_ shift
223 ) = 0;
224
261 virtual void extract(
262 Index_ chunk_row_id,
263 Index_ chunk_column_id,
264 bool row,
265 const std::vector<Index_>& target_indices,
266 const std::vector<Index_>& non_target_indices,
267 const std::vector<ChunkValue_*>& output_values,
268 const std::vector<Index_*>& output_indices,
269 Index_* output_number,
270 Index_ shift
271 ) = 0;
272};
273
279template<typename ChunkValue_, typename Index_>
281public:
290 virtual ~CustomSparseChunkedMatrixManager() = default;
298 virtual std::unique_ptr<CustomSparseChunkedMatrixWorkspace<ChunkValue_, Index_> > new_workspace() const = 0;
299
307 std::unique_ptr<CustomSparseChunkedMatrixWorkspace<ChunkValue_, Index_> > new_workspace_exact() const {
308 return new_workspace();
309 }
310
314 virtual bool prefer_rows() const = 0;
315
321 virtual const ChunkDimensionStats<Index_>& row_stats() const = 0;
322
328 virtual const ChunkDimensionStats<Index_>& column_stats() const = 0;
329};
330
334namespace CustomChunkedMatrix_internal {
335
336/*********************
337 **** Base classes ***
338 *********************/
339
340template<bool oracle_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
341class SoloSparseCore {
342 WorkspacePtr_ my_chunk_workspace;
343 const ChunkCoordinator<true, ChunkValue_, Index_>& my_coordinator;
344
346 typename std::conditional<oracle_, tatami::PredictionIndex, bool>::type my_counter = 0;
347
349 typedef typename decltype(my_factory)::Slab Slab;
350
351 // These two instances are not fully allocated Slabs; rather, tmp_solo just
352 // holds the content for a single chunk, while final_solo holds the content
353 // across chunks but only for the requested dimension element. Both cases
354 // are likely to be much smaller than a full Slab, so we're already more
355 // memory-efficient than 'require_minimum_cache = true'.
356 SparseSingleWorkspace<ChunkValue_, Index_> my_tmp_solo;
357 Slab my_final_solo;
358
359public:
360 SoloSparseCore(
361 WorkspacePtr_ chunk_workspace,
362 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
363 [[maybe_unused]] const SlabCacheStats<Index_>& slab_stats, // for consistency with the other base classes.
364 bool row,
366 Index_ non_target_length,
367 bool needs_value,
368 bool needs_index
369 ) :
370 my_chunk_workspace(std::move(chunk_workspace)),
371 my_coordinator(coordinator),
372 my_oracle(std::move(oracle)),
373 my_factory(1, non_target_length, 1, needs_value, needs_index),
374 my_tmp_solo(
375 my_coordinator.get_target_chunkdim(row),
376 my_coordinator.get_non_target_chunkdim(row),
377 needs_value,
378 needs_index
379 ),
380 my_final_solo(my_factory.create())
381 {}
382
383 template<typename ... Args_>
384 std::pair<const Slab*, Index_> fetch_raw(Index_ i, bool row, Args_&& ... args) {
385 if constexpr(oracle_) {
386 i = my_oracle->get(my_counter++);
387 }
388 return my_coordinator.fetch_single(row, i, std::forward<Args_>(args)..., *my_chunk_workspace, my_tmp_solo, my_final_solo);
389 }
390};
391
392template<typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
393class MyopicSparseCore {
394 WorkspacePtr_ my_chunk_workspace;
395 const ChunkCoordinator<true, ChunkValue_, Index_>& my_coordinator;
396
397 SparseSlabFactory<ChunkValue_, Index_, Index_> my_factory;
398 typedef typename decltype(my_factory)::Slab Slab;
399
400 LruSlabCache<Index_, Slab> my_cache;
401
402public:
403 MyopicSparseCore(
404 WorkspacePtr_ chunk_workspace,
405 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
406 const SlabCacheStats<Index_>& slab_stats,
407 bool row,
408 [[maybe_unused]] tatami::MaybeOracle<false, Index_> oracle, // for consistency with the other base classes
409 Index_ non_target_length,
410 bool needs_value,
411 bool needs_index
412 ) :
413 my_chunk_workspace(std::move(chunk_workspace)),
414 my_coordinator(coordinator),
415 my_factory(coordinator.get_target_chunkdim(row), non_target_length, slab_stats, needs_value, needs_index),
416 my_cache(slab_stats.max_slabs_in_cache)
417 {}
418
419 template<typename ... Args_>
420 std::pair<const Slab*, Index_> fetch_raw(Index_ i, bool row, Args_&& ... args) {
421 return my_coordinator.fetch_myopic(row, i, std::forward<Args_>(args)..., *my_chunk_workspace, my_cache, my_factory);
422 }
423};
424
425template<bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
426class OracularSparseCore {
427protected:
428 WorkspacePtr_ my_chunk_workspace;
429 const ChunkCoordinator<true, ChunkValue_, Index_>& my_coordinator;
430
431 SparseSlabFactory<ChunkValue_, Index_, Index_> my_factory;
432 typedef typename decltype(my_factory)::Slab Slab;
433
434 typename std::conditional<use_subset_, OracularSubsettedSlabCache<Index_, Index_, Slab>, OracularSlabCache<Index_, Index_, Slab> >::type my_cache;
435
436public:
437 OracularSparseCore(
438 WorkspacePtr_ chunk_workspace,
439 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
440 const SlabCacheStats<Index_>& slab_stats,
441 bool row,
443 Index_ non_target_length,
444 bool needs_value,
445 bool needs_index
446 ) :
447 my_chunk_workspace(std::move(chunk_workspace)),
448 my_coordinator(coordinator),
449 my_factory(coordinator.get_target_chunkdim(row), non_target_length, slab_stats, needs_value, needs_index),
450 my_cache(std::move(oracle), slab_stats.max_slabs_in_cache)
451 {}
452
453 template<typename ... Args_>
454 std::pair<const Slab*, Index_> fetch_raw([[maybe_unused]] Index_ i, bool row, Args_&& ... args) {
455 if constexpr(use_subset_) {
456 return my_coordinator.fetch_oracular_subsetted(row, std::forward<Args_>(args)..., *my_chunk_workspace, my_cache, my_factory);
457 } else {
458 return my_coordinator.fetch_oracular(row, std::forward<Args_>(args)..., *my_chunk_workspace, my_cache, my_factory);
459 }
460 }
461};
462
463template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
464using SparseCore = typename std::conditional<solo_,
465 SoloSparseCore<oracle_, Value_, Index_, ChunkValue_, WorkspacePtr_>,
466 typename std::conditional<oracle_,
467 OracularSparseCore<use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_>,
468 MyopicSparseCore<Value_, Index_, ChunkValue_, WorkspacePtr_>
469 >::type
470>::type;
471
472/***********************
473 **** Sparse classes ***
474 ***********************/
475
476template<class Slab_, typename Index_, typename Value_>
477tatami::SparseRange<Value_, Index_> process_sparse_slab(const std::pair<const Slab_*, Index_>& fetched, Value_* value_buffer, Index_* index_buffer, bool needs_value, bool needs_index) {
478 auto num = fetched.first->number[fetched.second];
479
480 if (needs_value) {
481 auto vptr = fetched.first->values[fetched.second];
482 std::copy_n(vptr, num, value_buffer);
483 } else {
484 value_buffer = NULL;
485 }
486
487 if (needs_index) {
488 auto iptr = fetched.first->indices[fetched.second];
489 std::copy_n(iptr, num, index_buffer);
490 } else {
491 index_buffer = NULL;
492 }
493
494 return tatami::SparseRange<Value_, Index_>(num, value_buffer, index_buffer);
495}
496
497template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
498class SparseFull : public tatami::SparseExtractor<oracle_, Value_, Index_> {
499public:
500 SparseFull(
501 WorkspacePtr_ chunk_workspace,
502 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
503 const SlabCacheStats<Index_>& slab_stats,
504 bool row,
506 const tatami::Options& opt
507 ) :
508 my_row(row),
509 my_non_target_dim(coordinator.get_non_target_dim(row)),
510 my_needs_value(opt.sparse_extract_value),
511 my_needs_index(opt.sparse_extract_index),
512 my_core(
513 std::move(chunk_workspace),
514 coordinator,
515 slab_stats,
516 row,
517 std::move(oracle),
518 my_non_target_dim,
519 opt.sparse_extract_value,
520 opt.sparse_extract_index
521 )
522 {}
523
524 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
525 auto fetched = my_core.fetch_raw(i, my_row, 0, my_non_target_dim);
526 return process_sparse_slab(fetched, value_buffer, index_buffer, my_needs_value, my_needs_index);
527 }
528
529private:
530 bool my_row;
531 Index_ my_non_target_dim;
532 bool my_needs_value, my_needs_index;
533 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
534};
535
536template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
537class SparseBlock : public tatami::SparseExtractor<oracle_, Value_, Index_> {
538public:
539 SparseBlock(
540 WorkspacePtr_ chunk_workspace,
541 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
542 const SlabCacheStats<Index_>& slab_stats,
543 bool row,
545 Index_ block_start,
546 Index_ block_length,
547 const tatami::Options& opt
548 ) :
549 my_row(row),
550 my_block_start(block_start),
551 my_block_length(block_length),
552 my_needs_value(opt.sparse_extract_value),
553 my_needs_index(opt.sparse_extract_index),
554 my_core(
555 std::move(chunk_workspace),
556 coordinator,
557 slab_stats,
558 row,
559 std::move(oracle),
560 block_length,
561 my_needs_value,
562 my_needs_index
563 )
564 {}
565
566 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
567 auto fetched = my_core.fetch_raw(i, my_row, my_block_start, my_block_length);
568 return process_sparse_slab(fetched, value_buffer, index_buffer, my_needs_value, my_needs_index);
569 }
570
571private:
572 bool my_row;
573 Index_ my_block_start, my_block_length;
574 bool my_needs_value, my_needs_index;
575 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
576};
577
578template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
579class SparseIndex : public tatami::SparseExtractor<oracle_, Value_, Index_> {
580public:
581 SparseIndex(
582 WorkspacePtr_ chunk_workspace,
583 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
584 const SlabCacheStats<Index_>& slab_stats,
585 bool row,
587 tatami::VectorPtr<Index_> indices_ptr,
588 const tatami::Options& opt
589 ) :
590 my_row(row),
591 my_indices_ptr(std::move(indices_ptr)),
592 my_needs_value(opt.sparse_extract_value),
593 my_needs_index(opt.sparse_extract_index),
594 my_core(
595 std::move(chunk_workspace),
596 coordinator,
597 slab_stats,
598 row,
599 std::move(oracle),
600 my_indices_ptr->size(),
601 my_needs_value,
602 my_needs_index
603 )
604 {}
605
606 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
607 auto fetched = my_core.fetch_raw(i, my_row, *my_indices_ptr, my_tmp_indices);
608 return process_sparse_slab(fetched, value_buffer, index_buffer, my_needs_value, my_needs_index);
609 }
610
611private:
612 bool my_row;
613 tatami::VectorPtr<Index_> my_indices_ptr;
614 std::vector<Index_> my_tmp_indices;
615 bool my_needs_value, my_needs_index;
616 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
617};
618
619/**************************
620 **** Densified classes ***
621 **************************/
622
623template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
624class DensifiedFull : public tatami::DenseExtractor<oracle_, Value_, Index_> {
625public:
626 DensifiedFull(
627 WorkspacePtr_ chunk_workspace,
628 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
629 const SlabCacheStats<Index_>& slab_stats,
630 bool row,
632 const tatami::Options&
633 ) :
634 my_row(row),
635 my_non_target_dim(coordinator.get_non_target_dim(row)),
636 my_core(
637 std::move(chunk_workspace),
638 coordinator,
639 slab_stats,
640 row,
641 std::move(oracle),
642 my_non_target_dim,
643 true,
644 true
645 )
646 {}
647
648 const Value_* fetch(Index_ i, Value_* buffer) {
649 auto contents = my_core.fetch_raw(i, my_row, 0, my_non_target_dim);
650
651 Index_ num = contents.first->number[contents.second];
652 auto vptr = contents.first->values[contents.second];
653 auto iptr = contents.first->indices[contents.second];
654
655 std::fill_n(buffer, my_non_target_dim, 0);
656 for (Index_ x = 0; x < num; ++x, ++iptr, ++vptr) {
657 buffer[*iptr] = *vptr;
658 }
659 return buffer;
660 }
661
662private:
663 bool my_row;
664 Index_ my_non_target_dim;
665 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
666};
667
668template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
669class DensifiedBlock : public tatami::DenseExtractor<oracle_, Value_, Index_> {
670public:
671 DensifiedBlock(
672 WorkspacePtr_ chunk_workspace,
673 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
674 const SlabCacheStats<Index_>& slab_stats,
675 bool row,
677 Index_ block_start,
678 Index_ block_length,
679 const tatami::Options&
680 ) :
681 my_row(row),
682 my_block_start(block_start),
683 my_block_length(block_length),
684 my_core(
685 std::move(chunk_workspace),
686 coordinator,
687 slab_stats,
688 row,
689 std::move(oracle),
690 block_length,
691 true,
692 true
693 )
694 {}
695
696 const Value_* fetch(Index_ i, Value_* buffer) {
697 auto contents = my_core.fetch_raw(i, my_row, my_block_start, my_block_length);
698
699 auto vptr = contents.first->values[contents.second];
700 auto iptr = contents.first->indices[contents.second];
701 auto num = contents.first->number[contents.second];
702
703 std::fill_n(buffer, my_block_length, 0);
704 for (Index_ x = 0; x < num; ++x, ++iptr, ++vptr) {
705 buffer[*iptr - my_block_start] = *vptr;
706 }
707 return buffer;
708 }
709
710private:
711 bool my_row;
712 Index_ my_block_start, my_block_length;
713 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
714};
715
716template<bool solo_, bool oracle_, bool use_subset_, typename Value_, typename Index_, typename ChunkValue_, class WorkspacePtr_>
717class DensifiedIndex : public tatami::DenseExtractor<oracle_, Value_, Index_> {
718public:
719 DensifiedIndex(
720 WorkspacePtr_ chunk_workspace,
721 const ChunkCoordinator<true, ChunkValue_, Index_>& coordinator,
722 const SlabCacheStats<Index_>& slab_stats,
723 bool row,
725 tatami::VectorPtr<Index_> indices_ptr,
726 const tatami::Options&
727 ) :
728 my_row(row),
729 my_indices_ptr(std::move(indices_ptr)),
730 my_core(
731 std::move(chunk_workspace),
732 coordinator,
733 slab_stats,
734 row,
735 std::move(oracle),
736 my_indices_ptr->size(),
737 true,
738 true
739 )
740 {
741 const auto& indices = *my_indices_ptr;
742 if (!indices.empty()) {
743 my_remap_offset = indices.front();
744 Index_ alloc = indices.back() - my_remap_offset + 1; // alloc must be <= dim extent, which should fit in an Index_.
745 tatami::resize_container_to_Index_size(my_remap, alloc);
746 Index_ counter = 0;
747 for (auto i : indices) {
748 my_remap[i - my_remap_offset] = counter;
749 ++counter;
750 }
751 }
752 }
753
754 const Value_* fetch(Index_ i, Value_* buffer) {
755 auto contents = my_core.fetch_raw(i, my_row, *my_indices_ptr, my_tmp_indices);
756
757 auto vptr = contents.first->values[contents.second];
758 auto iptr = contents.first->indices[contents.second];
759 auto num = contents.first->number[contents.second];
760
761 auto nidx = my_indices_ptr->size();
762 std::fill_n(buffer, nidx, 0);
763 for (Index_ x = 0; x <num; ++x, ++iptr, ++vptr) {
764 buffer[my_remap[*iptr - my_remap_offset]] = *vptr;
765 }
766 return buffer;
767 }
768
769private:
770 bool my_row;
771 tatami::VectorPtr<Index_> my_indices_ptr;
772 Index_ my_remap_offset = 0;
773 std::vector<Index_> my_remap;
774 std::vector<Index_> my_tmp_indices;
775 SparseCore<solo_, oracle_, use_subset_, Value_, Index_, ChunkValue_, WorkspacePtr_> my_core;
776};
777
778}
798template<typename Value_, typename Index_, typename ChunkValue_, class Manager_ = CustomSparseChunkedMatrixManager<ChunkValue_, Index_> >
799class CustomSparseChunkedMatrix : public tatami::Matrix<Value_, Index_> {
800public:
805 CustomSparseChunkedMatrix(std::shared_ptr<Manager_> manager, const CustomSparseChunkedMatrixOptions& opt) :
806 my_manager(std::move(manager)),
807 my_coordinator(my_manager->row_stats(), my_manager->column_stats()),
808 my_cache_size_in_bytes(opt.maximum_cache_size),
809 my_require_minimum_cache(opt.require_minimum_cache),
810 my_cache_subset(opt.cache_subset)
811 {}
812
813private:
814 std::shared_ptr<Manager_> my_manager;
815 CustomChunkedMatrix_internal::ChunkCoordinator<true, ChunkValue_, Index_> my_coordinator;
816 std::size_t my_cache_size_in_bytes;
817 bool my_require_minimum_cache;
818 bool my_cache_subset;
819
820public:
821 Index_ nrow() const {
822 return my_coordinator.get_nrow();
823 }
824
825 Index_ ncol() const {
826 return my_coordinator.get_ncol();
827 }
828
829 bool prefer_rows() const {
830 return my_manager->prefer_rows();
831 }
832
833 bool uses_oracle(bool) const {
834 return true;
835 }
836
837 double prefer_rows_proportion() const {
838 return static_cast<double>(my_manager->prefer_rows());
839 }
840
841 bool is_sparse() const {
842 return true;
843 }
844
845 double is_sparse_proportion() const {
846 return 1;
847 }
848
849 using tatami::Matrix<Value_, Index_>::dense;
850
851 using tatami::Matrix<Value_, Index_>::sparse;
852
853 /********************
854 *** Myopic dense ***
855 ********************/
856private:
857 template<
858 template<bool, typename, typename> class Interface_,
859 bool oracle_,
860 template<bool, bool, bool, typename, typename, typename, class> class Extractor_,
861 typename ... Args_
862 >
863 std::unique_ptr<Interface_<oracle_, Value_, Index_> > raw_internal(bool row, Index_ non_target_length, const tatami::Options& opt, Args_&& ... args) const {
864 std::size_t element_size = (opt.sparse_extract_value ? sizeof(ChunkValue_) : 0) + (opt.sparse_extract_index ? sizeof(Index_) : 0);
865 auto stats = [&]{
866 if (row) {
867 // Remember, the num_chunks_per_column is the number of slabs needed to divide up all the *rows* of the matrix.
868 return SlabCacheStats<Index_>(
869 my_coordinator.get_chunk_nrow(),
870 non_target_length,
871 my_coordinator.get_num_chunks_per_column(),
872 my_cache_size_in_bytes,
873 element_size,
874 my_require_minimum_cache
875 );
876 } else {
877 // Remember, the num_chunks_per_row is the number of slabs needed to divide up all the *columns* of the matrix.
878 return SlabCacheStats<Index_>(
879 my_coordinator.get_chunk_ncol(),
880 non_target_length,
881 my_coordinator.get_num_chunks_per_row(),
882 my_cache_size_in_bytes,
883 element_size,
884 my_require_minimum_cache
885 );
886 }
887 }();
888
889 auto wrk = my_manager->new_workspace_exact();
890 if (stats.max_slabs_in_cache == 0) {
891 return std::make_unique<Extractor_<true, oracle_, false, Value_, Index_, ChunkValue_, decltype(wrk)> >(std::move(wrk), my_coordinator, stats, row, std::forward<Args_>(args)...);
892 } else if constexpr(oracle_) {
893 if (my_cache_subset) {
894 return std::make_unique<Extractor_<false, true, true, Value_, Index_, ChunkValue_, decltype(wrk)> >(std::move(wrk), my_coordinator, stats, row, std::forward<Args_>(args)...);
895 } else {
896 return std::make_unique<Extractor_<false, true, false, Value_, Index_, ChunkValue_, decltype(wrk)> >(std::move(wrk), my_coordinator, stats, row, std::forward<Args_>(args)...);
897 }
898 } else {
899 return std::make_unique<Extractor_<false, false, false, Value_, Index_, ChunkValue_, decltype(wrk)> >(std::move(wrk), my_coordinator, stats, row, std::forward<Args_>(args)...);
900 }
901 }
902
903public:
904 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, const tatami::Options& opt) const {
905 return raw_internal<tatami::DenseExtractor, false, CustomChunkedMatrix_internal::DensifiedFull>(row, my_coordinator.get_non_target_dim(row), opt, false, opt);
906 }
907
908 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
909 return raw_internal<tatami::DenseExtractor, false, CustomChunkedMatrix_internal::DensifiedBlock>(row, block_length, opt, false, block_start, block_length, opt);
910 }
911
912 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
913 auto num_indices = indices_ptr->size();
914 return raw_internal<tatami::DenseExtractor, false, CustomChunkedMatrix_internal::DensifiedIndex>(row, num_indices, opt, false, std::move(indices_ptr), opt);
915 }
916
917 /***********************
918 *** Oracular dense ***
919 ***********************/
920public:
921 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
922 bool row,
923 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
924 const tatami::Options& opt)
925 const {
926 return raw_internal<tatami::DenseExtractor, true, CustomChunkedMatrix_internal::DensifiedFull>(row, my_coordinator.get_non_target_dim(row), opt, std::move(oracle), opt);
927 }
928
929 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
930 bool row,
931 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
932 Index_ block_start,
933 Index_ block_length,
934 const tatami::Options& opt)
935 const {
936 return raw_internal<tatami::DenseExtractor, true, CustomChunkedMatrix_internal::DensifiedBlock>(row, block_length, opt, std::move(oracle), block_start, block_length, opt);
937 }
938
939 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
940 bool row,
941 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
942 tatami::VectorPtr<Index_> indices_ptr,
943 const tatami::Options& opt)
944 const {
945 auto num_indices = indices_ptr->size();
946 return raw_internal<tatami::DenseExtractor, true, CustomChunkedMatrix_internal::DensifiedIndex>(row, num_indices, opt, std::move(oracle), std::move(indices_ptr), opt);
947 }
948
949 /*********************
950 *** Myopic sparse ***
951 *********************/
952public:
953 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, const tatami::Options& opt) const {
954 return raw_internal<tatami::SparseExtractor, false, CustomChunkedMatrix_internal::SparseFull>(row, my_coordinator.get_non_target_dim(row), opt, false, opt);
955 }
956
957 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
958 return raw_internal<tatami::SparseExtractor, false, CustomChunkedMatrix_internal::SparseBlock>(row, block_length, opt, false, block_start, block_length, opt);
959 }
960
961 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
962 auto num_indices = indices_ptr->size();
963 return raw_internal<tatami::SparseExtractor, false, CustomChunkedMatrix_internal::SparseIndex>(row, num_indices, opt, false, std::move(indices_ptr), opt);
964 }
965
966 /***********************
967 *** Oracular sparse ***
968 ***********************/
969public:
970 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
971 bool row,
972 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
973 const tatami::Options& opt)
974 const {
975 return raw_internal<tatami::SparseExtractor, true, CustomChunkedMatrix_internal::SparseFull>(row, my_coordinator.get_non_target_dim(row), opt, std::move(oracle), opt);
976 }
977
978 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
979 bool row,
980 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
981 Index_ block_start,
982 Index_ block_length,
983 const tatami::Options& opt)
984 const {
985 return raw_internal<tatami::SparseExtractor, true, CustomChunkedMatrix_internal::SparseBlock>(row, block_length, opt, std::move(oracle), block_start, block_length, opt);
986 }
987
988 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
989 bool row,
990 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
991 tatami::VectorPtr<Index_> indices_ptr,
992 const tatami::Options& opt)
993 const {
994 auto num_indices = indices_ptr->size();
995 return raw_internal<tatami::SparseExtractor, true, CustomChunkedMatrix_internal::SparseIndex>(row, num_indices, opt, std::move(oracle), std::move(indices_ptr), opt);
996 }
997};
998
999}
1000
1001#endif
Create a LRU cache of slabs.
Create a oracle-aware cache for slabs.
Create a oracle-aware cache with subsets.
Slab cache statistics.
Factory for sparse slabs.
Manager of chunks for a CustomSparseChunkedMatrix.
Definition CustomSparseChunkedMatrix.hpp:280
std::unique_ptr< CustomSparseChunkedMatrixWorkspace< ChunkValue_, Index_ > > new_workspace_exact() const
Definition CustomSparseChunkedMatrix.hpp:307
virtual const ChunkDimensionStats< Index_ > & column_stats() const =0
virtual const ChunkDimensionStats< Index_ > & row_stats() const =0
virtual std::unique_ptr< CustomSparseChunkedMatrixWorkspace< ChunkValue_, Index_ > > new_workspace() const =0
Workspace for extracting data from a CustomSparseChunkedMatrixManager.
Definition CustomSparseChunkedMatrix.hpp:55
virtual void extract(Index_ chunk_row_id, Index_ chunk_column_id, bool row, Index_ target_start, Index_ target_length, const std::vector< Index_ > &non_target_indices, const std::vector< ChunkValue_ * > &output_values, const std::vector< Index_ * > &output_indices, Index_ *output_number, Index_ shift)=0
virtual void extract(Index_ chunk_row_id, Index_ chunk_column_id, bool row, const std::vector< Index_ > &target_indices, Index_ non_target_start, Index_ non_target_length, const std::vector< ChunkValue_ * > &output_values, const std::vector< Index_ * > &output_indices, Index_ *output_number, Index_ shift)=0
virtual void extract(Index_ chunk_row_id, Index_ chunk_column_id, bool row, const std::vector< Index_ > &target_indices, const std::vector< Index_ > &non_target_indices, const std::vector< ChunkValue_ * > &output_values, const std::vector< Index_ * > &output_indices, Index_ *output_number, Index_ shift)=0
virtual void extract(Index_ chunk_row_id, Index_ chunk_column_id, bool row, Index_ target_start, Index_ target_length, Index_ non_target_start, Index_ non_target_length, const std::vector< ChunkValue_ * > &output_values, const std::vector< Index_ * > &output_indices, Index_ *output_number, Index_ shift)=0
Matrix of custom sparse chunks.
Definition CustomSparseChunkedMatrix.hpp:799
CustomSparseChunkedMatrix(std::shared_ptr< Manager_ > manager, const CustomSparseChunkedMatrixOptions &opt)
Definition CustomSparseChunkedMatrix.hpp:805
Factory for sparse slabs.
Definition SparseSlabFactory.hpp:31
Slab create()
Definition SparseSlabFactory.hpp:161
Methods to handle chunked tatami matrices.
Definition ChunkDimensionStats.hpp:4
std::shared_ptr< const std::vector< Index_ > > VectorPtr
typename std::conditional< oracle_, OracularSparseExtractor< Value_, Index_ >, MyopicSparseExtractor< Value_, Index_ > >::type SparseExtractor
typename std::conditional< oracle_, std::shared_ptr< const Oracle< Index_ > >, bool >::type MaybeOracle
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
bool sparse_extract_index
bool sparse_extract_value
Statistics for regular chunks along a dimension.
Definition ChunkDimensionStats.hpp:35
Options for data extraction from a CustomSparseChunkedMatrix.
Definition CustomSparseChunkedMatrix.hpp:27
std::size_t maximum_cache_size
Definition CustomSparseChunkedMatrix.hpp:33
bool cache_subset
Definition CustomSparseChunkedMatrix.hpp:46
bool require_minimum_cache
Definition CustomSparseChunkedMatrix.hpp:40
Statistics for slab caching.
Definition SlabCacheStats.hpp:26