tatami_r
R bindings to tatami matrices
Loading...
Searching...
No Matches
UnknownMatrix.hpp
Go to the documentation of this file.
1#ifndef TATAMI_R_UNKNOWNMATRIX_HPP
2#define TATAMI_R_UNKNOWNMATRIX_HPP
3
4#include "Rcpp.h"
5#include "tatami/tatami.hpp"
6
7#include "parallelize.hpp"
8#include "dense_extractor.hpp"
9#include "sparse_extractor.hpp"
10
11#include <vector>
12#include <memory>
13#include <string>
14#include <stdexcept>
15#include <optional>
16#include <cstddef>
17
23namespace tatami_r {
24
33 std::optional<std::size_t> maximum_cache_size;
34
41};
42
55template<typename Value_, typename Index_, typename CachedValue_ = Value_, typename CachedIndex_ = Index_>
56class UnknownMatrix : public tatami::Matrix<Value_, Index_> {
57public:
64 UnknownMatrix(Rcpp::RObject seed, const UnknownMatrixOptions& opt) :
65 my_original_seed(seed),
66 my_delayed_env(Rcpp::Environment::namespace_env("DelayedArray")),
67 my_sparse_env(Rcpp::Environment::namespace_env("SparseArray")),
68 my_dense_extractor(my_delayed_env["extract_array"]),
69 my_sparse_extractor(my_sparse_env["extract_sparse_array"])
70 {
71 // We assume the constructor only occurs on the main thread, so we
72 // won't bother locking things up. I'm also not sure that the
73 // operations in the initialization list are thread-safe.
74
75 {
76 const auto base = Rcpp::Environment::base_env();
77 const Rcpp::Function fun = base["dim"];
78 const Rcpp::RObject output = fun(seed);
79 if (output.sexp_type() != INTSXP) {
80 auto ctype = get_class_name(my_original_seed);
81 throw std::runtime_error("'dim(<" + ctype + ">)' should return an integer vector");
82 }
83
84 const Rcpp::IntegerVector dims(output);
85 if (dims.size() != 2 || dims[0] < 0 || dims[1] < 0) {
86 auto ctype = get_class_name(my_original_seed);
87 throw std::runtime_error("'dim(<" + ctype + ">)' should contain two non-negative integers");
88 }
89
90 // If this cast is okay, all subsequent casts from 'int' to 'Index_' will be okay.
91 // This is because all subsequent casts will involve values that are smaller than 'dims', e.g., chunk extents.
92 // For example, an ArbitraryArrayGrid is restricted by the ticks, while a RegularArrayGrid must have chunkdim <= refdim.
93 my_nrow = sanisizer::cast<Index_>(dims[0]);
94 my_ncol = sanisizer::cast<Index_>(dims[1]);
95
96 // Checking that we can safely create an Rcpp::IntegerVector without overfllow.
97 // We do it here once, so that we don't need to check in each call to consecutive_indices() or increment_indices() or whatever.
99 }
100
101 {
102 const Rcpp::Function fun = my_delayed_env["is_sparse"];
103 const Rcpp::LogicalVector is_sparse = fun(seed);
104 if (is_sparse.size() != 1) {
105 auto ctype = get_class_name(my_original_seed);
106 throw std::runtime_error("'is_sparse(<" + ctype + ">)' should return a logical vector of length 1");
107 }
108 my_sparse = (is_sparse[0] != 0);
109 }
110
111 {
112 tatami::resize_container_to_Index_size(my_row_chunk_map, my_nrow);
113 tatami::resize_container_to_Index_size(my_col_chunk_map, my_ncol);
114
115 const Rcpp::Function fun = my_delayed_env["chunkGrid"];
116 const Rcpp::RObject grid = fun(seed);
117
118 if (grid == R_NilValue) {
119 my_row_max_chunk_size = 1;
120 my_col_max_chunk_size = 1;
121 std::iota(my_row_chunk_map.begin(), my_row_chunk_map.end(), static_cast<Index_>(0));
122 std::iota(my_col_chunk_map.begin(), my_col_chunk_map.end(), static_cast<Index_>(0));
123 my_row_chunk_ticks.resize(sanisizer::sum<decltype(my_row_chunk_ticks.size())>(my_nrow, 1));
124 std::iota(my_row_chunk_ticks.begin(), my_row_chunk_ticks.end(), static_cast<Index_>(0));
125 my_col_chunk_ticks.resize(sanisizer::sum<decltype(my_col_chunk_ticks.size())>(my_ncol, 1));
126 std::iota(my_col_chunk_ticks.begin(), my_col_chunk_ticks.end(), static_cast<Index_>(0));
127
128 // Both dense and sparse inputs are implicitly column-major, so
129 // if there isn't chunking information to the contrary, we'll
130 // favor extraction of the columns.
131 my_prefer_rows = false;
132
133 } else {
134 auto grid_cls = get_class_name(grid);
135
136 if (grid_cls == "RegularArrayGrid") {
137 const Rcpp::IntegerVector spacings(Rcpp::RObject(grid.slot("spacings")));
138 if (spacings.size() != 2) {
139 auto ctype = get_class_name(seed);
140 throw std::runtime_error("'chunkGrid(<" + ctype + ">)@spacings' should be an integer vector of length 2 with non-negative values");
141 }
142
143 const auto populate = [](
144 const Index_ extent,
145 const Index_ spacing,
146 std::vector<Index_>& map,
147 std::vector<Index_>& ticks
148 ) -> void {
149 if (spacing == 0) {
150 ticks.push_back(0);
151 } else {
152 ticks.reserve((extent / spacing) + (extent % spacing > 0) + 1);
153 Index_ start = 0;
154 ticks.push_back(start);
155 while (start != extent) {
156 auto to_fill = std::min(spacing, extent - start);
157 std::fill_n(map.begin() + start, to_fill, ticks.size() - 1);
158 start += to_fill;
159 ticks.push_back(start);
160 }
161 }
162 };
163
164 my_row_max_chunk_size = spacings[0];
165 populate(my_nrow, my_row_max_chunk_size, my_row_chunk_map, my_row_chunk_ticks);
166 my_col_max_chunk_size = spacings[1];
167 populate(my_ncol, my_col_max_chunk_size, my_col_chunk_map, my_col_chunk_ticks);
168
169 } else if (grid_cls == "ArbitraryArrayGrid") {
170 const Rcpp::List ticks(Rcpp::RObject(grid.slot("tickmarks")));
171 if (ticks.size() != 2) {
172 auto ctype = get_class_name(seed);
173 throw std::runtime_error("'chunkGrid(<" + ctype + ">)@tickmarks' should return a list of length 2");
174 }
175
176 const auto populate = [](
177 const Index_ extent,
178 const Rcpp::IntegerVector& ticks,
179 std::vector<Index_>& map,
180 std::vector<Index_>& new_ticks,
181 Index_& max_chunk_size
182 ) -> void {
183 if (ticks.size() != 0 && ticks[ticks.size() - 1] != static_cast<int>(extent)) {
184 throw std::runtime_error("invalid ticks returned by 'chunkGrid'");
185 }
186 new_ticks.resize(sanisizer::sum<decltype(new_ticks.size())>(ticks.size(), 1));
187 std::copy(ticks.begin(), ticks.end(), new_ticks.begin() + 1);
188
189 max_chunk_size = 0;
190 int start = 0;
192 Index_ counter = 0;
193
194 for (auto t : ticks) {
195 if (t < start) {
196 throw std::runtime_error("invalid ticks returned by 'chunkGrid'");
197 }
198 Index_ to_fill = t - start;
199 if (to_fill > max_chunk_size) {
200 max_chunk_size = to_fill;
201 }
202 std::fill_n(map.begin() + start, to_fill, counter);
203 ++counter;
204 start = t;
205 }
206 };
207
208 Rcpp::IntegerVector first(ticks[0]);
209 populate(my_nrow, first, my_row_chunk_map, my_row_chunk_ticks, my_row_max_chunk_size);
210 Rcpp::IntegerVector second(ticks[1]);
211 populate(my_ncol, second, my_col_chunk_map, my_col_chunk_ticks, my_col_max_chunk_size);
212
213 } else {
214 auto ctype = get_class_name(seed);
215 throw std::runtime_error("instance of unknown class '" + grid_cls + "' returned by 'chunkGrid(<" + ctype + ">)");
216 }
217
218 // Choose the dimension that requires pulling out fewer chunks.
219 const auto chunks_per_row = my_col_chunk_ticks.size() - 1;
220 const auto chunks_per_col = my_row_chunk_ticks.size() - 1;
221 my_prefer_rows = chunks_per_row <= chunks_per_col;
222 }
223 }
224
225 my_require_minimum_cache = opt.require_minimum_cache;
226 if (opt.maximum_cache_size.has_value()) {
227 my_cache_size_in_bytes = *(opt.maximum_cache_size);
228 } else {
229 Rcpp::Function fun = my_delayed_env["getAutoBlockSize"];
230 Rcpp::NumericVector bsize = fun();
231 if (bsize.size() != 1 || bsize[0] < 0) {
232 throw std::runtime_error("'getAutoBlockSize()' should return a non-negative number of bytes");
233 } else if (bsize[0] > std::numeric_limits<std::size_t>::max()) {
234 throw std::runtime_error("integer overflow from the current value of 'getAutoBlockSize()'");
235 }
236 my_cache_size_in_bytes = bsize[0];
237 }
238 }
239
246 UnknownMatrix(Rcpp::RObject seed) : UnknownMatrix(std::move(seed), UnknownMatrixOptions()) {}
247
248private:
249 Index_ my_nrow, my_ncol;
250 bool my_sparse, my_prefer_rows;
251
252 std::vector<Index_> my_row_chunk_map, my_col_chunk_map;
253 std::vector<Index_> my_row_chunk_ticks, my_col_chunk_ticks;
254
255 // To decide how many chunks to store in the cache, we pretend the largest
256 // chunk is a good representative. This is a bit suboptimal for irregular
257 // chunks but the LruSlabCache class doesn't have a good way of dealing
258 // with this right now. The fundamental problem is that variable slabs will
259 // either (i) all reach the maximum allocation eventually, if slabs are
260 // reused, or (ii) require lots of allocations, if slabs are not reused, or
261 // (iii) require manual defragmentation, if slabs are reused in a manner
262 // that avoids inflation to the maximum allocation.
263 Index_ my_row_max_chunk_size, my_col_max_chunk_size;
264
265 std::size_t my_cache_size_in_bytes;
266 bool my_require_minimum_cache;
267
268 Rcpp::RObject my_original_seed;
269 Rcpp::Environment my_delayed_env, my_sparse_env;
270 Rcpp::Function my_dense_extractor, my_sparse_extractor;
271
272public:
273 Index_ nrow() const {
274 return my_nrow;
275 }
276
277 Index_ ncol() const {
278 return my_ncol;
279 }
280
281 bool is_sparse() const {
282 return my_sparse;
283 }
284
285 double is_sparse_proportion() const {
286 return static_cast<double>(my_sparse);
287 }
288
289 bool prefer_rows() const {
290 return my_prefer_rows;
291 }
292
293 double prefer_rows_proportion() const {
294 return static_cast<double>(my_prefer_rows);
295 }
296
297 bool uses_oracle(bool) const {
298 return true;
299 }
300
301private:
302 Index_ max_primary_chunk_length(const bool row) const {
303 return (row ? my_row_max_chunk_size : my_col_max_chunk_size);
304 }
305
306 Index_ primary_num_chunks(const bool row, const Index_ primary_chunk_length) const {
307 auto primary_dim = (row ? my_nrow : my_ncol);
308 if (primary_chunk_length == 0) {
309 return primary_dim;
310 } else {
311 return primary_dim / primary_chunk_length;
312 }
313 }
314
315 Index_ secondary_dim(const bool row) const {
316 return (row ? my_ncol : my_nrow);
317 }
318
319 const std::vector<Index_>& chunk_ticks(const bool row) const {
320 if (row) {
321 return my_row_chunk_ticks;
322 } else {
323 return my_col_chunk_ticks;
324 }
325 }
326
327 const std::vector<Index_>& chunk_map(const bool row) const {
328 if (row) {
329 return my_row_chunk_map;
330 } else {
331 return my_col_chunk_map;
332 }
333 }
334
335 /********************
336 *** Myopic dense ***
337 ********************/
338private:
339 template<
340 bool oracle_,
341 template <bool, bool, typename, typename, typename> class FromDense_,
342 template <bool, bool, typename, typename, typename, typename> class FromSparse_,
343 typename ... Args_
344 >
345 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > populate_dense_internal(
346 const bool row,
347 const Index_ non_target_length,
349 Args_&& ... args
350 ) const {
351 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > output;
352
353 const Index_ max_target_chunk_length = max_primary_chunk_length(row);
354 tatami_chunked::SlabCacheStats<Index_> stats(
355 /* target length = */ max_target_chunk_length,
356 /* non_target_length = */ non_target_length,
357 /* target_num_slabs = */ primary_num_chunks(row, max_target_chunk_length),
358 /* cache_size_in_bytes = */ my_cache_size_in_bytes,
359 /* element_size = */ sizeof(CachedValue_),
360 /* require_minimum_cache = */ my_require_minimum_cache
361 );
362
363 const auto& map = chunk_map(row);
364 const auto& ticks = chunk_ticks(row);
365 const bool solo = (stats.max_slabs_in_cache == 0);
366
367#ifdef TATAMI_R_PARALLELIZE_UNKNOWN
368 // This involves some Rcpp initializations, so we lock it just in case.
369 auto& mexec = executor();
370 mexec.run([&]() -> void {
371#endif
372
373 if (!my_sparse) {
374 if (solo) {
375 output.reset(
376 new FromDense_<true, oracle_, Value_, Index_, CachedValue_>(
377 my_original_seed,
378 my_dense_extractor,
379 row,
380 std::move(oracle),
381 std::forward<Args_>(args)...,
382 ticks,
383 map,
384 stats
385 )
386 );
387
388 } else {
389 output.reset(
390 new FromDense_<false, oracle_, Value_, Index_, CachedValue_>(
391 my_original_seed,
392 my_dense_extractor,
393 row,
394 std::move(oracle),
395 std::forward<Args_>(args)...,
396 ticks,
397 map,
398 stats
399 )
400 );
401 }
402
403 } else {
404 if (solo) {
405 output.reset(
406 new FromSparse_<true, oracle_, Value_, Index_, CachedValue_, CachedIndex_>(
407 my_original_seed,
408 my_sparse_extractor,
409 row,
410 std::move(oracle),
411 std::forward<Args_>(args)...,
412 max_target_chunk_length,
413 ticks,
414 map,
415 stats
416 )
417 );
418
419 } else {
420 output.reset(
421 new FromSparse_<false, oracle_, Value_, Index_, CachedValue_, CachedIndex_>(
422 my_original_seed,
423 my_sparse_extractor,
424 row,
425 std::move(oracle),
426 std::forward<Args_>(args)...,
427 max_target_chunk_length,
428 ticks,
429 map,
430 stats
431 )
432 );
433 }
434 }
435
436#ifdef TATAMI_R_PARALLELIZE_UNKNOWN
437 });
438#endif
439
440 return output;
441 }
442
443 template<bool oracle_>
444 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > populate_dense(
445 const bool row,
447 const tatami::Options&
448 ) const {
449 const Index_ non_target_dim = secondary_dim(row);
450 return populate_dense_internal<oracle_, DenseFull, DensifiedSparseFull>(
451 row,
452 non_target_dim,
453 std::move(ora),
454 non_target_dim
455 );
456 }
457
458 template<bool oracle_>
459 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > populate_dense(
460 const bool row,
462 const Index_ block_start,
463 const Index_ block_length,
464 const tatami::Options&
465 ) const {
466 return populate_dense_internal<oracle_, DenseBlock, DensifiedSparseBlock>(
467 row,
468 block_length,
469 std::move(ora),
470 block_start,
471 block_length
472 );
473 }
474
475 template<bool oracle_>
476 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > populate_dense(
477 const bool row,
479 tatami::VectorPtr<Index_> indices_ptr,
480 const tatami::Options&
481 ) const {
482 const Index_ nidx = indices_ptr->size();
483 return populate_dense_internal<oracle_, DenseIndexed, DensifiedSparseIndexed>(
484 row,
485 nidx,
486 std::move(ora),
487 std::move(indices_ptr)
488 );
489 }
490
491public:
492 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(
493 const bool row,
494 const tatami::Options& opt
495 ) const {
496 return populate_dense<false>(row, false, opt);
497 }
498
499 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(
500 const bool row,
501 const Index_ block_start,
502 const Index_ block_length,
503 const tatami::Options& opt
504 ) const {
505 return populate_dense<false>(row, false, block_start, block_length, opt);
506 }
507
508 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(
509 const bool row,
510 tatami::VectorPtr<Index_> indices_ptr,
511 const tatami::Options& opt
512 ) const {
513 return populate_dense<false>(row, false, std::move(indices_ptr), opt);
514 }
515
516 /**********************
517 *** Oracular dense ***
518 **********************/
519public:
520 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
521 const bool row,
522 std::shared_ptr<const tatami::Oracle<Index_> > ora,
523 const tatami::Options& opt
524 ) const {
525 return populate_dense<true>(row, std::move(ora), opt);
526 }
527
528 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
529 const bool row,
530 std::shared_ptr<const tatami::Oracle<Index_> > ora,
531 const Index_ block_start,
532 const Index_ block_length,
533 const tatami::Options& opt
534 ) const {
535 return populate_dense<true>(row, std::move(ora), block_start, block_length, opt);
536 }
537
538 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
539 const bool row,
540 std::shared_ptr<const tatami::Oracle<Index_> > ora,
541 tatami::VectorPtr<Index_> indices_ptr,
542 const tatami::Options& opt
543 ) const {
544 return populate_dense<true>(row, std::move(ora), std::move(indices_ptr), opt);
545 }
546
547 /*********************
548 *** Myopic sparse ***
549 *********************/
550public:
551 template<
552 bool oracle_,
553 template<bool, bool, typename, typename, typename, typename> class FromSparse_,
554 typename ... Args_
555 >
556 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > populate_sparse_internal(
557 const bool row,
558 const Index_ non_target_length,
560 const tatami::Options& opt,
561 Args_&& ... args
562 ) const {
563 const Index_ max_target_chunk_length = max_primary_chunk_length(row);
564 tatami_chunked::SlabCacheStats<Index_> stats(
565 /* target_length = */ max_target_chunk_length,
566 /* non_target_length = */ non_target_length,
567 /* target_num_slabs = */ primary_num_chunks(row, max_target_chunk_length),
568 /* cache_size_in_bytes = */ my_cache_size_in_bytes,
569 /* element_size = */ (opt.sparse_extract_index ? sizeof(CachedIndex_) : 0) + (opt.sparse_extract_value ? sizeof(CachedValue_) : 0),
570 /* require_minimum_cache = */ my_require_minimum_cache
571 );
572
573 const auto& map = chunk_map(row);
574 const auto& ticks = chunk_ticks(row);
575 const bool needs_value = opt.sparse_extract_value;
576 const bool needs_index = opt.sparse_extract_index;
577 const bool solo = stats.max_slabs_in_cache == 0;
578
579 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > output;
580
581#ifdef TATAMI_R_PARALLELIZE_UNKNOWN
582 // This involves some Rcpp initializations, so we lock it just in case.
583 auto& mexec = executor();
584 mexec.run([&]() -> void {
585#endif
586
587 if (solo) {
588 output.reset(
589 new FromSparse_<true, oracle_, Value_, Index_, CachedValue_, CachedIndex_>(
590 my_original_seed,
591 my_sparse_extractor,
592 row,
593 std::move(oracle),
594 std::forward<Args_>(args)...,
595 max_target_chunk_length,
596 ticks,
597 map,
598 stats,
599 needs_value,
600 needs_index
601 )
602 );
603
604 } else {
605 output.reset(
606 new FromSparse_<false, oracle_, Value_, Index_, CachedValue_, CachedIndex_>(
607 my_original_seed,
608 my_sparse_extractor,
609 row,
610 std::move(oracle),
611 std::forward<Args_>(args)...,
612 max_target_chunk_length,
613 ticks,
614 map,
615 stats,
616 needs_value,
617 needs_index
618 )
619 );
620 }
621
622#ifdef TATAMI_R_PARALLELIZE_UNKNOWN
623 });
624#endif
625
626 return output;
627 }
628
629 template<bool oracle_>
630 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > populate_sparse(
631 const bool row,
633 const tatami::Options& opt
634 ) const {
635 const Index_ non_target_dim = secondary_dim(row);
636 return populate_sparse_internal<oracle_, SparseFull>(
637 row,
638 non_target_dim,
639 std::move(ora),
640 opt,
641 non_target_dim
642 );
643 }
644
645 template<bool oracle_>
646 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > populate_sparse(
647 const bool row,
649 const Index_ block_start,
650 const Index_ block_length,
651 const tatami::Options& opt
652 ) const {
653 return populate_sparse_internal<oracle_, SparseBlock>(
654 row,
655 block_length,
656 std::move(ora),
657 opt,
658 block_start,
659 block_length
660 );
661 }
662
663 template<bool oracle_>
664 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > populate_sparse(
665 const bool row,
667 tatami::VectorPtr<Index_> indices_ptr,
668 const tatami::Options& opt
669 ) const {
670 return populate_sparse_internal<oracle_, SparseIndexed>(
671 row,
672 indices_ptr->size(),
673 std::move(ora),
674 opt,
675 std::move(indices_ptr)
676 );
677 }
678
679public:
680 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(
681 const bool row,
682 const tatami::Options& opt
683 ) const {
684 if (!my_sparse) {
685 return std::make_unique<tatami::FullSparsifiedWrapper<false, Value_, Index_> >(
686 dense(row, opt),
687 secondary_dim(row),
688 opt
689 );
690 } else {
691 return populate_sparse<false>(row, false, opt);
692 }
693 }
694
695 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(
696 const bool row,
697 const Index_ block_start,
698 const Index_ block_length,
699 const tatami::Options& opt
700 ) const {
701 if (!my_sparse) {
702 return std::make_unique<tatami::BlockSparsifiedWrapper<false, Value_, Index_> >(
703 dense(row, block_start, block_length, opt),
704 block_start,
705 block_length,
706 opt
707 );
708 } else {
709 return populate_sparse<false>(row, false, block_start, block_length, opt);
710 }
711 }
712
713 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(
714 const bool row,
715 tatami::VectorPtr<Index_> indices_ptr,
716 const tatami::Options& opt
717 ) const {
718 if (!my_sparse) {
719 auto index_copy = indices_ptr;
720 return std::make_unique<tatami::IndexSparsifiedWrapper<false, Value_, Index_> >(
721 dense(row, std::move(indices_ptr), opt),
722 std::move(index_copy),
723 opt
724 );
725 } else {
726 return populate_sparse<false>(row, false, std::move(indices_ptr), opt);
727 }
728 }
729
730 /**********************
731 *** Oracular sparse ***
732 **********************/
733public:
734 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
735 const bool row,
736 std::shared_ptr<const tatami::Oracle<Index_> > ora,
737 const tatami::Options& opt
738 ) const {
739 if (!my_sparse) {
740 return std::make_unique<tatami::FullSparsifiedWrapper<true, Value_, Index_> >(
741 dense(row, std::move(ora), opt),
742 secondary_dim(row),
743 opt
744 );
745 } else {
746 return populate_sparse<true>(row, std::move(ora), opt);
747 }
748 }
749
750 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
751 const bool row,
752 std::shared_ptr<const tatami::Oracle<Index_> > ora,
753 const Index_ block_start,
754 const Index_ block_length,
755 const tatami::Options& opt
756 ) const {
757 if (!my_sparse) {
758 return std::make_unique<tatami::BlockSparsifiedWrapper<true, Value_, Index_> >(
759 dense(row, std::move(ora), block_start, block_length, opt),
760 block_start,
761 block_length,
762 opt
763 );
764 } else {
765 return populate_sparse<true>(row, std::move(ora), block_start, block_length, opt);
766 }
767 }
768
769 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
770 const bool row,
771 std::shared_ptr<const tatami::Oracle<Index_> > ora,
772 tatami::VectorPtr<Index_> indices_ptr,
773 const tatami::Options& opt
774 ) const {
775 if (!my_sparse) {
776 auto index_copy = indices_ptr;
777 return std::make_unique<tatami::IndexSparsifiedWrapper<true, Value_, Index_> >(
778 dense(row, std::move(ora), std::move(indices_ptr), opt),
779 std::move(index_copy),
780 opt
781 );
782 } else {
783 return populate_sparse<true>(row, std::move(ora), std::move(indices_ptr), opt);
784 }
785 }
786};
787
788}
789
790#endif
Unknown matrix-like object in R.
Definition UnknownMatrix.hpp:56
UnknownMatrix(Rcpp::RObject seed, const UnknownMatrixOptions &opt)
Definition UnknownMatrix.hpp:64
UnknownMatrix(Rcpp::RObject seed)
Definition UnknownMatrix.hpp:246
tatami bindings for arbitrary R matrices.
manticore::Executor & executor()
Definition parallelize.hpp:46
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Index_ can_cast_Index_to_container_size(const Index_ x)
void resize_container_to_Index_size(Container_ &container, const Index_ x, Args_ &&... args)
typename std::conditional< oracle_, std::shared_ptr< const Oracle< Index_ > >, bool >::type MaybeOracle
Safely parallelize for unknown matrices.
bool sparse_extract_index
bool sparse_extract_value
Options for data extraction from an UnknownMatrix.
Definition UnknownMatrix.hpp:28
bool require_minimum_cache
Definition UnknownMatrix.hpp:40
std::optional< std::size_t > maximum_cache_size
Definition UnknownMatrix.hpp:33