tatami_chunked
Helpers to create custom chunked tatami matrices
Loading...
Searching...
No Matches
CustomSparseChunkedMatrix.hpp
Go to the documentation of this file.
1#ifndef TATAMI_CHUNKED_CUSTOM_SPARSE_CHUNKED_MATRIX_HPP
2#define TATAMI_CHUNKED_CUSTOM_SPARSE_CHUNKED_MATRIX_HPP
3
4#include "tatami/tatami.hpp"
5#include "custom_internals.hpp"
7#include "SlabCacheStats.hpp"
8#include "LruSlabCache.hpp"
11
12#include <vector>
13
19namespace tatami_chunked {
20
39
43namespace CustomChunkedMatrix_internal {
44
45/*********************
46 **** Base classes ***
47 *********************/
48
49template<bool oracle_, typename Value_, typename Index_, typename Chunk_>
50class SoloSparseCore {
51 const ChunkCoordinator<Index_, true, Chunk_>& my_coordinator;
52 typename Chunk_::Workspace my_chunk_workspace;
53
55 typename std::conditional<oracle_, size_t, bool>::type my_counter = 0;
56
58 typedef typename decltype(my_factory)::Slab Slab;
59
60 // These two instances are not fully allocated Slabs; rather, tmp_solo just
61 // holds the content for a single chunk, while final_solo holds the content
62 // across chunks but only for the requested dimension element. Both cases
63 // are likely to be much smaller than a full Slab, so we're already more
64 // memory-efficient than 'require_minimum_cache = true'.
66 Slab my_final_solo;
67
68public:
71 [[maybe_unused]] const SlabCacheStats& slab_stats, // for consistency with the other base classes.
72 bool row,
75 bool needs_value,
76 bool needs_index) :
77 my_coordinator(coordinator),
78 my_oracle(std::move(oracle)),
81 coordinator.get_primary_chunkdim(row),
82 coordinator.get_secondary_chunkdim(row),
85 ),
87 {}
88
89 template<typename ... Args_>
90 std::pair<const Slab*, Index_> fetch_raw(Index_ i, bool row, Args_&& ... args) {
91 if constexpr(oracle_) {
92 i = my_oracle->get(my_counter++);
93 }
94 return my_coordinator.fetch_single(row, i, std::forward<Args_>(args)..., my_chunk_workspace, my_tmp_solo, my_final_solo);
95 }
96};
97
98template<typename Value_, typename Index_, typename Chunk_>
99class MyopicSparseCore {
100 const ChunkCoordinator<Index_, true, Chunk_>& my_coordinator;
101 typename Chunk_::Workspace my_chunk_workspace;
102
103 SparseSlabFactory<typename Chunk_::value_type, Index_, Index_> my_factory;
104 typedef typename decltype(my_factory)::Slab Slab;
105
106 LruSlabCache<Index_, Slab> my_cache;
107
108public:
109 MyopicSparseCore(
110 const ChunkCoordinator<Index_, true, Chunk_>& coordinator,
111 const SlabCacheStats& slab_stats,
112 bool row,
113 [[maybe_unused]] tatami::MaybeOracle<false, Index_> oracle, // for consistency with the other base classes
114 Index_ secondary_length,
115 bool needs_value,
116 bool needs_index) :
117 my_coordinator(coordinator),
118 my_factory(coordinator.get_primary_chunkdim(row), secondary_length, slab_stats, needs_value, needs_index),
119 my_cache(slab_stats.max_slabs_in_cache)
120 {}
121
122 template<typename ... Args_>
123 std::pair<const Slab*, Index_> fetch_raw(Index_ i, bool row, Args_&& ... args) {
124 return my_coordinator.fetch_myopic(row, i, std::forward<Args_>(args)..., my_chunk_workspace, my_cache, my_factory);
125 }
126};
127
128template<typename Value_, typename Index_, typename Chunk_>
129class OracularSparseCore {
130protected:
131 const ChunkCoordinator<Index_, true, Chunk_>& my_coordinator;
132 typename Chunk_::Workspace my_chunk_workspace;
133
134 SparseSlabFactory<typename Chunk_::value_type, Index_, Index_> my_factory;
135 typedef typename decltype(my_factory)::Slab Slab;
136
137 typename std::conditional<Chunk_::use_subset, OracularSubsettedSlabCache<Index_, Index_, Slab>, OracularSlabCache<Index_, Index_, Slab> >::type my_cache;
138
139public:
140 OracularSparseCore(
141 const ChunkCoordinator<Index_, true, Chunk_>& coordinator,
142 const SlabCacheStats& slab_stats,
143 bool row,
145 Index_ secondary_length,
146 bool needs_value,
147 bool needs_index) :
148 my_coordinator(coordinator),
149 my_factory(coordinator.get_primary_chunkdim(row), secondary_length, slab_stats, needs_value, needs_index),
150 my_cache(std::move(oracle), slab_stats.max_slabs_in_cache)
151 {}
152
153 template<typename ... Args_>
154 std::pair<const Slab*, Index_> fetch_raw([[maybe_unused]] Index_ i, bool row, Args_&& ... args) {
155 return my_coordinator.fetch_oracular(row, std::forward<Args_>(args)..., my_chunk_workspace, my_cache, my_factory);
156 }
157};
158
159template<bool solo_, bool oracle_, typename Value_, typename Index_, typename Chunk_>
160using SparseCore = typename std::conditional<solo_,
161 SoloSparseCore<oracle_, Value_, Index_, Chunk_>,
162 typename std::conditional<oracle_,
163 OracularSparseCore<Value_, Index_, Chunk_>,
164 MyopicSparseCore<Value_, Index_, Chunk_>
165 >::type
166>::type;
167
168/***********************
169 **** Sparse classes ***
170 ***********************/
171
172template<class Slab_, typename Index_, typename Value_>
173tatami::SparseRange<Value_, Index_> process_sparse_slab(const std::pair<const Slab_*, Index_>& fetched, Value_* value_buffer, Index_* index_buffer, bool needs_value, bool needs_index) {
174 auto num = fetched.first->number[fetched.second];
175
176 if (needs_value) {
177 auto vptr = fetched.first->values[fetched.second];
178 std::copy_n(vptr, num, value_buffer);
179 } else {
180 value_buffer = NULL;
181 }
182
183 if (needs_index) {
184 auto iptr = fetched.first->indices[fetched.second];
185 std::copy_n(iptr, num, index_buffer);
186 } else {
187 index_buffer = NULL;
188 }
189
190 return tatami::SparseRange<Value_, Index_>(num, value_buffer, index_buffer);
191}
192
193template<bool solo_, bool oracle_, typename Value_, typename Index_, typename Chunk_>
194class SparseFull : public tatami::SparseExtractor<oracle_, Value_, Index_> {
195public:
196 SparseFull(
197 const ChunkCoordinator<Index_, true, Chunk_>& coordinator,
198 const SlabCacheStats& slab_stats,
199 bool row,
201 const tatami::Options& opt) :
202 my_row(row),
203 my_secondary_dim(coordinator.get_secondary_dim(row)),
204 my_needs_value(opt.sparse_extract_value),
205 my_needs_index(opt.sparse_extract_index),
206 my_core(
207 coordinator,
208 slab_stats,
209 row,
210 std::move(oracle),
211 my_secondary_dim,
212 opt.sparse_extract_value,
213 opt.sparse_extract_index
214 )
215 {}
216
217 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
218 auto fetched = my_core.fetch_raw(i, my_row, 0, my_secondary_dim);
219 return process_sparse_slab(fetched, value_buffer, index_buffer, my_needs_value, my_needs_index);
220 }
221
222private:
223 bool my_row;
224 Index_ my_secondary_dim;
225 bool my_needs_value, my_needs_index;
226 SparseCore<solo_, oracle_, Value_, Index_, Chunk_> my_core;
227};
228
229template<bool solo_, bool oracle_, typename Value_, typename Index_, typename Chunk_>
230class SparseBlock : public tatami::SparseExtractor<oracle_, Value_, Index_> {
231public:
232 SparseBlock(
233 const ChunkCoordinator<Index_, true, Chunk_>& coordinator,
234 const SlabCacheStats& slab_stats,
235 bool row,
237 Index_ block_start,
238 Index_ block_length,
239 const tatami::Options& opt) :
240 my_row(row),
241 my_block_start(block_start),
242 my_block_length(block_length),
243 my_needs_value(opt.sparse_extract_value),
244 my_needs_index(opt.sparse_extract_index),
245 my_core(
246 coordinator,
247 slab_stats,
248 row,
249 std::move(oracle),
250 block_length,
251 my_needs_value,
252 my_needs_index
253 )
254 {}
255
256 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
257 auto fetched = my_core.fetch_raw(i, my_row, my_block_start, my_block_length);
258 return process_sparse_slab(fetched, value_buffer, index_buffer, my_needs_value, my_needs_index);
259 }
260
261private:
262 bool my_row;
263 Index_ my_block_start, my_block_length;
264 bool my_needs_value, my_needs_index;
265 SparseCore<solo_, oracle_, Value_, Index_, Chunk_> my_core;
266};
267
268template<bool solo_, bool oracle_, typename Value_, typename Index_, typename Chunk_>
269class SparseIndex : public tatami::SparseExtractor<oracle_, Value_, Index_> {
270public:
271 SparseIndex(
272 const ChunkCoordinator<Index_, true, Chunk_>& coordinator,
273 const SlabCacheStats& slab_stats,
274 bool row,
276 tatami::VectorPtr<Index_> indices_ptr,
277 const tatami::Options& opt) :
278 my_row(row),
279 my_indices_ptr(std::move(indices_ptr)),
280 my_needs_value(opt.sparse_extract_value),
281 my_needs_index(opt.sparse_extract_index),
282 my_core(
283 coordinator,
284 slab_stats,
285 row,
286 std::move(oracle),
287 my_indices_ptr->size(),
288 my_needs_value,
289 my_needs_index
290 )
291 {}
292
293 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
294 auto fetched = my_core.fetch_raw(i, my_row, *my_indices_ptr, my_tmp_indices);
295 return process_sparse_slab(fetched, value_buffer, index_buffer, my_needs_value, my_needs_index);
296 }
297
298private:
299 bool my_row;
300 tatami::VectorPtr<Index_> my_indices_ptr;
301 std::vector<Index_> my_tmp_indices;
302 bool my_needs_value, my_needs_index;
303 SparseCore<solo_, oracle_, Value_, Index_, Chunk_> my_core;
304};
305
306/**************************
307 **** Densified classes ***
308 **************************/
309
310template<bool solo_, bool oracle_, typename Value_, typename Index_, typename Chunk_>
311class DensifiedFull : public tatami::DenseExtractor<oracle_, Value_, Index_> {
312public:
313 DensifiedFull(
314 const ChunkCoordinator<Index_, true, Chunk_>& coordinator,
315 const SlabCacheStats& slab_stats,
316 bool row,
318 const tatami::Options&) :
319 my_row(row),
320 my_secondary_dim(coordinator.get_secondary_dim(row)),
321 my_core(
322 coordinator,
323 slab_stats,
324 row,
325 std::move(oracle),
326 my_secondary_dim,
327 true,
328 true
329 )
330 {}
331
332 const Value_* fetch(Index_ i, Value_* buffer) {
333 auto contents = my_core.fetch_raw(i, my_row, 0, my_secondary_dim);
334
335 Index_ num = contents.first->number[contents.second];
336 auto vptr = contents.first->values[contents.second];
337 auto iptr = contents.first->indices[contents.second];
338
339 std::fill_n(buffer, my_secondary_dim, 0);
340 for (Index_ x = 0; x < num; ++x, ++iptr, ++vptr) {
341 buffer[*iptr] = *vptr;
342 }
343 return buffer;
344 }
345
346private:
347 bool my_row;
348 Index_ my_secondary_dim;
349 SparseCore<solo_, oracle_, Value_, Index_, Chunk_> my_core;
350};
351
352template<bool solo_, bool oracle_, typename Value_, typename Index_, typename Chunk_>
353class DensifiedBlock : public tatami::DenseExtractor<oracle_, Value_, Index_> {
354public:
355 DensifiedBlock(
356 const ChunkCoordinator<Index_, true, Chunk_>& coordinator,
357 const SlabCacheStats& slab_stats,
358 bool row,
360 Index_ block_start,
361 Index_ block_length,
362 const tatami::Options&) :
363 my_row(row),
364 my_block_start(block_start),
365 my_block_length(block_length),
366 my_core(
367 coordinator,
368 slab_stats,
369 row,
370 std::move(oracle),
371 block_length,
372 true,
373 true
374 )
375 {}
376
377 const Value_* fetch(Index_ i, Value_* buffer) {
378 auto contents = my_core.fetch_raw(i, my_row, my_block_start, my_block_length);
379
380 auto vptr = contents.first->values[contents.second];
381 auto iptr = contents.first->indices[contents.second];
382 auto num = contents.first->number[contents.second];
383
384 std::fill_n(buffer, my_block_length, 0);
385 for (Index_ x = 0; x < num; ++x, ++iptr, ++vptr) {
386 buffer[*iptr - my_block_start] = *vptr;
387 }
388 return buffer;
389 }
390
391private:
392 bool my_row;
393 Index_ my_block_start, my_block_length;
394 SparseCore<solo_, oracle_, Value_, Index_, Chunk_> my_core;
395};
396
397template<bool solo_, bool oracle_, typename Value_, typename Index_, typename Chunk_>
398class DensifiedIndex : public tatami::DenseExtractor<oracle_, Value_, Index_> {
399public:
400 DensifiedIndex(
401 const ChunkCoordinator<Index_, true, Chunk_>& coordinator,
402 const SlabCacheStats& slab_stats,
403 bool row,
405 tatami::VectorPtr<Index_> indices_ptr,
406 const tatami::Options&) :
407 my_row(row),
408 my_indices_ptr(std::move(indices_ptr)),
409 my_core(
410 coordinator,
411 slab_stats,
412 row,
413 std::move(oracle),
414 my_indices_ptr->size(),
415 true,
416 true
417 )
418 {
419 const auto& indices = *my_indices_ptr;
420 if (!indices.empty()) {
421 my_remap_offset = indices.front();
422 size_t alloc = indices.back() - my_remap_offset + 1;
423 my_remap.resize(alloc);
424 Index_ counter = 0;
425 for (auto i : indices) {
426 my_remap[i - my_remap_offset] = counter;
427 ++counter;
428 }
429 }
430 }
431
432 const Value_* fetch(Index_ i, Value_* buffer) {
433 auto contents = my_core.fetch_raw(i, my_row, *my_indices_ptr, my_tmp_indices);
434
435 auto vptr = contents.first->values[contents.second];
436 auto iptr = contents.first->indices[contents.second];
437 auto num = contents.first->number[contents.second];
438
439 auto nidx = my_indices_ptr->size();
440 std::fill_n(buffer, nidx, 0);
441 for (Index_ x = 0; x <num; ++x, ++iptr, ++vptr) {
442 buffer[my_remap[*iptr - my_remap_offset]] = *vptr;
443 }
444 return buffer;
445 }
446
447private:
448 bool my_row;
449 tatami::VectorPtr<Index_> my_indices_ptr;
450 Index_ my_remap_offset = 0;
451 std::vector<Index_> my_remap;
452 std::vector<Index_> my_tmp_indices;
453 SparseCore<solo_, oracle_, Value_, Index_, Chunk_> my_core;
454};
455
456}
475template<typename Value_, typename Index_, typename Chunk_>
476class CustomSparseChunkedMatrix : public tatami::Matrix<Value_, Index_> {
477public:
490 cache_size_in_bytes(opt.maximum_cache_size),
491 require_minimum_cache(opt.require_minimum_cache)
492 {}
493
494private:
495 CustomChunkedMatrix_internal::ChunkCoordinator<Index_, true, Chunk_> coordinator;
496 size_t cache_size_in_bytes;
497 bool require_minimum_cache;
498
499public:
500 Index_ nrow() const {
501 return coordinator.get_nrow();
502 }
503
504 Index_ ncol() const {
505 return coordinator.get_ncol();
506 }
507
508 bool prefer_rows() const {
509 return coordinator.prefer_rows_internal();
510 }
511
512 bool uses_oracle(bool) const {
513 return true;
514 }
515
516 double prefer_rows_proportion() const {
517 return static_cast<double>(coordinator.prefer_rows_internal());
518 }
519
520 bool is_sparse() const {
521 return true;
522 }
523
524 double is_sparse_proportion() const {
525 return 1;
526 }
527
528 using tatami::Matrix<Value_, Index_>::dense;
529
530 using tatami::Matrix<Value_, Index_>::sparse;
531
532 /********************
533 *** Myopic dense ***
534 ********************/
535private:
536 template<
537 template<bool, typename, typename> class Interface_,
538 bool oracle_,
539 template<bool, bool, typename, typename, class> class Extractor_,
540 typename ... Args_
541 >
542 std::unique_ptr<Interface_<oracle_, Value_, Index_> > raw_internal(bool row, Index_ secondary_length, const tatami::Options& opt, Args_&& ... args) const {
543 size_t element_size = (opt.sparse_extract_value ? sizeof(typename Chunk_::value_type) : 0) + (opt.sparse_extract_index ? sizeof(Index_) : 0);
544
545 if (row) {
546 // Remember, the num_chunks_per_column is the number of slabs needed to divide up all the *rows* of the matrix.
547 SlabCacheStats stats(coordinator.get_chunk_nrow(), secondary_length, coordinator.get_num_chunks_per_column(), cache_size_in_bytes, element_size, require_minimum_cache);
548 if (stats.max_slabs_in_cache > 0) {
549 return std::make_unique<Extractor_<false, oracle_, Value_, Index_, Chunk_> >(coordinator, stats, row, std::forward<Args_>(args)...);
550 } else {
551 return std::make_unique<Extractor_<true, oracle_, Value_, Index_, Chunk_> >(coordinator, stats, row, std::forward<Args_>(args)...);
552 }
553 } else {
554 // Remember, the num_chunks_per_row is the number of slabs needed to divide up all the *columns* of the matrix.
555 SlabCacheStats stats(coordinator.get_chunk_ncol(), secondary_length, coordinator.get_num_chunks_per_row(), cache_size_in_bytes, element_size, require_minimum_cache);
556 if (stats.max_slabs_in_cache > 0) {
557 return std::make_unique<Extractor_<false, oracle_, Value_, Index_, Chunk_> >(coordinator, stats, row, std::forward<Args_>(args)...);
558 } else {
559 return std::make_unique<Extractor_<true, oracle_, Value_, Index_, Chunk_> >(coordinator, stats, row, std::forward<Args_>(args)...);
560 }
561 }
562 }
563
564 template<bool oracle_>
565 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > dense_internal(bool row, tatami::MaybeOracle<oracle_, Index_> oracle, const tatami::Options& opt) const {
566 return raw_internal<tatami::DenseExtractor, oracle_, CustomChunkedMatrix_internal::DensifiedFull>(row, coordinator.get_secondary_dim(row), opt, std::move(oracle), opt);
567 }
568
569 template<bool oracle_>
570 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > dense_internal(
571 bool row,
573 Index_ block_start,
574 Index_ block_length,
575 const tatami::Options& opt)
576 const {
577 return raw_internal<tatami::DenseExtractor, oracle_, CustomChunkedMatrix_internal::DensifiedBlock>(row, block_length, opt, std::move(oracle), block_start, block_length, opt);
578 }
579
580 template<bool oracle_>
581 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > dense_internal(
582 bool row,
584 tatami::VectorPtr<Index_> indices_ptr,
585 const tatami::Options& opt)
586 const {
587 auto num_indices = indices_ptr->size();
588 return raw_internal<tatami::DenseExtractor, oracle_, CustomChunkedMatrix_internal::DensifiedIndex>(row, num_indices, opt, std::move(oracle), std::move(indices_ptr), opt);
589 }
590
591public:
592 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, const tatami::Options& opt) const {
593 return dense_internal<false>(row, false, opt);
594 }
595
596 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
597 return dense_internal<false>(row, false, block_start, block_length, opt);
598 }
599
600 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
601 return dense_internal<false>(row, false, std::move(indices_ptr), opt);
602 }
603
604 /***********************
605 *** Oracular dense ***
606 ***********************/
607public:
608 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
609 bool row,
610 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
611 const tatami::Options& opt)
612 const {
613 return dense_internal<true>(row, std::move(oracle), opt);
614 }
615
616 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
617 bool row,
618 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
619 Index_ block_start,
620 Index_ block_length,
621 const tatami::Options& opt)
622 const {
623 return dense_internal<true>(row, std::move(oracle), block_start, block_length, opt);
624 }
625
626 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
627 bool row,
628 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
629 tatami::VectorPtr<Index_> indices_ptr,
630 const tatami::Options& opt)
631 const {
632 return dense_internal<true>(row, std::move(oracle), std::move(indices_ptr), opt);
633 }
634
635 /*********************
636 *** Myopic sparse ***
637 *********************/
638private:
639 template<bool oracle_>
640 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > sparse_internal(bool row, tatami::MaybeOracle<oracle_, Index_> oracle, const tatami::Options& opt) const {
641 return raw_internal<tatami::SparseExtractor, oracle_, CustomChunkedMatrix_internal::SparseFull>(row, coordinator.get_secondary_dim(row), opt, std::move(oracle), opt);
642 }
643
644 template<bool oracle_>
645 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > sparse_internal(
646 bool row,
648 Index_ block_start,
649 Index_ block_length,
650 const tatami::Options& opt)
651 const {
652 return raw_internal<tatami::SparseExtractor, oracle_, CustomChunkedMatrix_internal::SparseBlock>(row, block_length, opt, std::move(oracle), block_start, block_length, opt);
653 }
654
655 template<bool oracle_>
656 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > sparse_internal(
657 bool row,
659 tatami::VectorPtr<Index_> indices_ptr,
660 const tatami::Options& opt)
661 const {
662 auto num_indices = indices_ptr->size();
663 return raw_internal<tatami::SparseExtractor, oracle_, CustomChunkedMatrix_internal::SparseIndex>(row, num_indices, opt, std::move(oracle), std::move(indices_ptr), opt);
664 }
665
666public:
667 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, const tatami::Options& opt) const {
668 return sparse_internal<false>(row, false, opt);
669 }
670
671 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
672 return sparse_internal<false>(row, false, block_start, block_length, opt);
673 }
674
675 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
676 return sparse_internal<false>(row, false, std::move(indices_ptr), opt);
677 }
678
679 /***********************
680 *** Oracular sparse ***
681 ***********************/
682public:
683 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
684 bool row,
685 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
686 const tatami::Options& opt)
687 const {
688 return sparse_internal<true>(row, std::move(oracle), opt);
689 }
690
691 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
692 bool row,
693 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
694 Index_ block_start,
695 Index_ block_length,
696 const tatami::Options& opt)
697 const {
698 return sparse_internal<true>(row, std::move(oracle), block_start, block_length, opt);
699 }
700
701 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
702 bool row,
703 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
704 tatami::VectorPtr<Index_> indices_ptr,
705 const tatami::Options& opt)
706 const {
707 return sparse_internal<true>(row, std::move(oracle), std::move(indices_ptr), opt);
708 }
709};
710
711}
712
713#endif
Create a LRU cache of slabs.
Create a oracle-aware cache for slabs.
Create a oracle-aware cache with subsets.
Slab cache statistics.
Factory for sparse slabs.
Matrix of custom sparse chunks.
Definition CustomSparseChunkedMatrix.hpp:476
CustomSparseChunkedMatrix(Index_ mat_nrow, Index_ mat_ncol, Index_ chunk_nrow, Index_ chunk_ncol, std::vector< Chunk_ > chunks, bool row_major, const CustomSparseChunkedMatrixOptions &opt)
Definition CustomSparseChunkedMatrix.hpp:488
Methods to handle chunked tatami matrices.
Definition ChunkDimensionStats.hpp:4
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
typename std::conditional< oracle_, OracularSparseExtractor< Value_, Index_ >, MyopicSparseExtractor< Value_, Index_ > >::type SparseExtractor
typename std::conditional< oracle_, std::shared_ptr< const Oracle< Index_ > >, bool >::type MaybeOracle
std::shared_ptr< const std::vector< Index_ > > VectorPtr
bool sparse_extract_value
Statistics for regular chunks along a dimension.
Definition ChunkDimensionStats.hpp:35
Options for data extraction from a CustomSparseChunkedMatrix.
Definition CustomSparseChunkedMatrix.hpp:24
size_t maximum_cache_size
Definition CustomSparseChunkedMatrix.hpp:30
bool require_minimum_cache
Definition CustomSparseChunkedMatrix.hpp:37
Statistics for slab caching.
Definition SlabCacheStats.hpp:20