tatami_r
R bindings to tatami matrices
Loading...
Searching...
No Matches
UnknownMatrix.hpp
1#ifndef TATAMI_R_UNKNOWNMATRIX_HPP
2#define TATAMI_R_UNKNOWNMATRIX_HPP
3
4#include "Rcpp.h"
5#include "tatami/tatami.hpp"
6
7#include "parallelize.hpp"
8#include "dense_extractor.hpp"
9#include "sparse_extractor.hpp"
10
11#include <vector>
12#include <memory>
13#include <string>
14#include <stdexcept>
15#include <optional>
16#include <cstddef>
17
18namespace tatami_r {
19
28 std::optional<std::size_t> maximum_cache_size;
29
36};
37
50template<typename Value_, typename Index_, typename CachedValue_ = Value_, typename CachedIndex_ = Index_>
51class UnknownMatrix : public tatami::Matrix<Value_, Index_> {
52public:
59 UnknownMatrix(Rcpp::RObject seed, const UnknownMatrixOptions& opt) :
60 my_original_seed(seed),
61 my_delayed_env(Rcpp::Environment::namespace_env("DelayedArray")),
62 my_sparse_env(Rcpp::Environment::namespace_env("SparseArray")),
63 my_dense_extractor(my_delayed_env["extract_array"]),
64 my_sparse_extractor(my_sparse_env["extract_sparse_array"])
65 {
66 // We assume the constructor only occurs on the main thread, so we
67 // won't bother locking things up. I'm also not sure that the
68 // operations in the initialization list are thread-safe.
69
70 {
71 auto base = Rcpp::Environment::base_env();
72 Rcpp::Function fun = base["dim"];
73 Rcpp::RObject output = fun(seed);
74 if (output.sexp_type() != INTSXP) {
75 auto ctype = get_class_name(my_original_seed);
76 throw std::runtime_error("'dim(<" + ctype + ">)' should return an integer vector");
77 }
78 Rcpp::IntegerVector dims(output);
79 if (dims.size() != 2 || dims[0] < 0 || dims[1] < 0) {
80 auto ctype = get_class_name(my_original_seed);
81 throw std::runtime_error("'dim(<" + ctype + ">)' should contain two non-negative integers");
82 }
83
84 // If this cast is okay, all subsequent casts from 'int' to 'Index_' will be okay.
85 // This is because all subsequent casts will involve values that are smaller than 'dims', e.g., chunk extents.
86 // For example, an ArbitraryArrayGrid is restricted by the ticks, while a RegularArrayGrid must have chunkdim <= refdim.
87 my_nrow = sanisizer::cast<Index_>(dims[0]);
88 my_ncol = sanisizer::cast<Index_>(dims[1]);
89 }
90
91 {
92 Rcpp::Function fun = my_delayed_env["is_sparse"];
93 Rcpp::LogicalVector is_sparse = fun(seed);
94 if (is_sparse.size() != 1) {
95 auto ctype = get_class_name(my_original_seed);
96 throw std::runtime_error("'is_sparse(<" + ctype + ">)' should return a logical vector of length 1");
97 }
98 my_sparse = (is_sparse[0] != 0);
99 }
100
101 {
102 tatami::resize_container_to_Index_size(my_row_chunk_map, my_nrow);
103 tatami::resize_container_to_Index_size(my_col_chunk_map, my_ncol);
104
105 Rcpp::Function fun = my_delayed_env["chunkGrid"];
106 Rcpp::RObject grid = fun(seed);
107
108 if (grid == R_NilValue) {
109 my_row_max_chunk_size = 1;
110 my_col_max_chunk_size = 1;
111 std::iota(my_row_chunk_map.begin(), my_row_chunk_map.end(), static_cast<Index_>(0));
112 std::iota(my_col_chunk_map.begin(), my_col_chunk_map.end(), static_cast<Index_>(0));
113 my_row_chunk_ticks.resize(sanisizer::sum<decltype(my_row_chunk_ticks.size())>(my_nrow, 1));
114 std::iota(my_row_chunk_ticks.begin(), my_row_chunk_ticks.end(), static_cast<Index_>(0));
115 my_col_chunk_ticks.resize(sanisizer::sum<decltype(my_col_chunk_ticks.size())>(my_ncol, 1));
116 std::iota(my_col_chunk_ticks.begin(), my_col_chunk_ticks.end(), static_cast<Index_>(0));
117
118 // Both dense and sparse inputs are implicitly column-major, so
119 // if there isn't chunking information to the contrary, we'll
120 // favor extraction of the columns.
121 my_prefer_rows = false;
122
123 } else {
124 auto grid_cls = get_class_name(grid);
125
126 if (grid_cls == "RegularArrayGrid") {
127 Rcpp::IntegerVector spacings(Rcpp::RObject(grid.slot("spacings")));
128 if (spacings.size() != 2) {
129 auto ctype = get_class_name(seed);
130 throw std::runtime_error("'chunkGrid(<" + ctype + ">)@spacings' should be an integer vector of length 2 with non-negative values");
131 }
132
133 auto populate = [](Index_ extent, Index_ spacing, std::vector<Index_>& map, std::vector<Index_>& ticks) {
134 if (spacing == 0) {
135 ticks.push_back(0);
136 } else {
137 ticks.reserve((extent / spacing) + (extent % spacing > 0) + 1);
138 Index_ start = 0;
139 ticks.push_back(start);
140 while (start != extent) {
141 auto to_fill = std::min(spacing, extent - start);
142 std::fill_n(map.begin() + start, to_fill, ticks.size() - 1);
143 start += to_fill;
144 ticks.push_back(start);
145 }
146 }
147 };
148
149 my_row_max_chunk_size = spacings[0];
150 populate(my_nrow, my_row_max_chunk_size, my_row_chunk_map, my_row_chunk_ticks);
151 my_col_max_chunk_size = spacings[1];
152 populate(my_ncol, my_col_max_chunk_size, my_col_chunk_map, my_col_chunk_ticks);
153
154 } else if (grid_cls == "ArbitraryArrayGrid") {
155 Rcpp::List ticks(Rcpp::RObject(grid.slot("tickmarks")));
156 if (ticks.size() != 2) {
157 auto ctype = get_class_name(seed);
158 throw std::runtime_error("'chunkGrid(<" + ctype + ">)@tickmarks' should return a list of length 2");
159 }
160
161 auto populate = [](Index_ extent, const Rcpp::IntegerVector& ticks, std::vector<Index_>& map, std::vector<Index_>& new_ticks, Index_& max_chunk_size) {
162 if (ticks.size() == 0 || ticks[ticks.size() - 1] != static_cast<int>(extent)) {
163 throw std::runtime_error("invalid ticks returned by 'chunkGrid'");
164 }
165 new_ticks.resize(sanisizer::sum<decltype(new_ticks.size())>(ticks.size(), 1));
166 std::copy(ticks.begin(), ticks.end(), new_ticks.begin() + 1);
167
168 max_chunk_size = 0;
169 int start = 0;
171 Index_ counter = 0;
172
173 for (auto t : ticks) {
174 if (t < start) {
175 throw std::runtime_error("invalid ticks returned by 'chunkGrid'");
176 }
177 Index_ to_fill = t - start;
178 if (to_fill > max_chunk_size) {
179 max_chunk_size = to_fill;
180 }
181 std::fill_n(map.begin() + start, to_fill, counter);
182 ++counter;
183 start = t;
184 }
185 };
186
187 Rcpp::IntegerVector first(ticks[0]);
188 populate(my_nrow, first, my_row_chunk_map, my_row_chunk_ticks, my_row_max_chunk_size);
189 Rcpp::IntegerVector second(ticks[1]);
190 populate(my_ncol, second, my_col_chunk_map, my_col_chunk_ticks, my_col_max_chunk_size);
191
192 } else {
193 auto ctype = get_class_name(seed);
194 throw std::runtime_error("instance of unknown class '" + grid_cls + "' returned by 'chunkGrid(<" + ctype + ">)");
195 }
196
197 // Choose the dimension that requires pulling out fewer chunks.
198 auto chunks_per_row = my_col_chunk_ticks.size() - 1;
199 auto chunks_per_col = my_row_chunk_ticks.size() - 1;
200 my_prefer_rows = chunks_per_row <= chunks_per_col;
201 }
202 }
203
204 my_require_minimum_cache = opt.require_minimum_cache;
205 if (opt.maximum_cache_size.has_value()) {
206 my_cache_size_in_bytes = *(opt.maximum_cache_size);
207 } else {
208 Rcpp::Function fun = my_delayed_env["getAutoBlockSize"];
209 Rcpp::NumericVector bsize = fun();
210 if (bsize.size() != 1 || bsize[0] < 0) {
211 throw std::runtime_error("'getAutoBlockSize()' should return a non-negative number of bytes");
212 } else if (bsize[0] > std::numeric_limits<std::size_t>::max()) {
213 throw std::runtime_error("integer overflow from the current value of 'getAutoBlockSize()'");
214 }
215 my_cache_size_in_bytes = bsize[0];
216 }
217 }
218
225 UnknownMatrix(Rcpp::RObject seed) : UnknownMatrix(std::move(seed), UnknownMatrixOptions()) {}
226
227private:
228 Index_ my_nrow, my_ncol;
229 bool my_sparse, my_prefer_rows;
230
231 std::vector<Index_> my_row_chunk_map, my_col_chunk_map;
232 std::vector<Index_> my_row_chunk_ticks, my_col_chunk_ticks;
233
234 // To decide how many chunks to store in the cache, we pretend the largest
235 // chunk is a good representative. This is a bit suboptimal for irregular
236 // chunks but the LruSlabCache class doesn't have a good way of dealing
237 // with this right now. The fundamental problem is that variable slabs will
238 // either (i) all reach the maximum allocation eventually, if slabs are
239 // reused, or (ii) require lots of allocations, if slabs are not reused, or
240 // (iii) require manual defragmentation, if slabs are reused in a manner
241 // that avoids inflation to the maximum allocation.
242 Index_ my_row_max_chunk_size, my_col_max_chunk_size;
243
244 std::size_t my_cache_size_in_bytes;
245 bool my_require_minimum_cache;
246
247 Rcpp::RObject my_original_seed;
248 Rcpp::Environment my_delayed_env, my_sparse_env;
249 Rcpp::Function my_dense_extractor, my_sparse_extractor;
250
251public:
252 Index_ nrow() const {
253 return my_nrow;
254 }
255
256 Index_ ncol() const {
257 return my_ncol;
258 }
259
260 bool is_sparse() const {
261 return my_sparse;
262 }
263
264 double is_sparse_proportion() const {
265 return static_cast<double>(my_sparse);
266 }
267
268 bool prefer_rows() const {
269 return my_prefer_rows;
270 }
271
272 double prefer_rows_proportion() const {
273 return static_cast<double>(my_prefer_rows);
274 }
275
276 bool uses_oracle(bool) const {
277 return true;
278 }
279
280private:
281 Index_ max_primary_chunk_length(bool row) const {
282 return (row ? my_row_max_chunk_size : my_col_max_chunk_size);
283 }
284
285 Index_ primary_num_chunks(bool row, Index_ primary_chunk_length) const {
286 auto primary_dim = (row ? my_nrow : my_ncol);
287 if (primary_chunk_length == 0) {
288 return primary_dim;
289 } else {
290 return primary_dim / primary_chunk_length;
291 }
292 }
293
294 Index_ secondary_dim(bool row) const {
295 return (row ? my_ncol : my_nrow);
296 }
297
298 const std::vector<Index_>& chunk_ticks(bool row) const {
299 if (row) {
300 return my_row_chunk_ticks;
301 } else {
302 return my_col_chunk_ticks;
303 }
304 }
305
306 const std::vector<Index_>& chunk_map(bool row) const {
307 if (row) {
308 return my_row_chunk_map;
309 } else {
310 return my_col_chunk_map;
311 }
312 }
313
314 /********************
315 *** Myopic dense ***
316 ********************/
317private:
318 template<
319 bool oracle_,
320 template <bool, bool, typename, typename, typename> class FromDense_,
321 template <bool, bool, typename, typename, typename, typename> class FromSparse_,
322 typename ... Args_
323 >
324 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > populate_dense_internal(bool row, Index_ non_target_length, tatami::MaybeOracle<oracle_, Index_> oracle, Args_&& ... args) const {
325 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > output;
326
327 Index_ max_target_chunk_length = max_primary_chunk_length(row);
328 tatami_chunked::SlabCacheStats<Index_> stats(
329 /* target length = */ max_target_chunk_length,
330 /* non_target_length = */ non_target_length,
331 /* target_num_slabs = */ primary_num_chunks(row, max_target_chunk_length),
332 /* cache_size_in_bytes = */ my_cache_size_in_bytes,
333 /* element_size = */ sizeof(CachedValue_),
334 /* require_minimum_cache = */ my_require_minimum_cache
335 );
336
337 const auto& map = chunk_map(row);
338 const auto& ticks = chunk_ticks(row);
339 bool solo = (stats.max_slabs_in_cache == 0);
340
341#ifdef TATAMI_R_PARALLELIZE_UNKNOWN
342 // This involves some Rcpp initializations, so we lock it just in case.
343 auto& mexec = executor();
344 mexec.run([&]() -> void {
345#endif
346
347 if (!my_sparse) {
348 if (solo) {
349 typedef FromDense_<true, oracle_, Value_, Index_, CachedValue_> ShortDense;
350 output.reset(new ShortDense(my_original_seed, my_dense_extractor, row, std::move(oracle), std::forward<Args_>(args)..., ticks, map, stats));
351 } else {
352 typedef FromDense_<false, oracle_, Value_, Index_, CachedValue_> ShortDense;
353 output.reset(new ShortDense(my_original_seed, my_dense_extractor, row, std::move(oracle), std::forward<Args_>(args)..., ticks, map, stats));
354 }
355 } else {
356 if (solo) {
357 typedef FromSparse_<true, oracle_, Value_, Index_, CachedValue_, CachedIndex_> ShortSparse;
358 output.reset(new ShortSparse(my_original_seed, my_sparse_extractor, row, std::move(oracle), std::forward<Args_>(args)..., max_target_chunk_length, ticks, map, stats));
359 } else {
360 typedef FromSparse_<false, oracle_, Value_, Index_, CachedValue_, CachedIndex_> ShortSparse;
361 output.reset(new ShortSparse(my_original_seed, my_sparse_extractor, row, std::move(oracle), std::forward<Args_>(args)..., max_target_chunk_length, ticks, map, stats));
362 }
363 }
364
365#ifdef TATAMI_R_PARALLELIZE_UNKNOWN
366 });
367#endif
368
369 return output;
370 }
371
372 template<bool oracle_>
373 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > populate_dense(bool row, tatami::MaybeOracle<oracle_, Index_> ora, const tatami::Options&) const {
374 Index_ non_target_dim = secondary_dim(row);
375 return populate_dense_internal<oracle_, UnknownMatrix_internal::DenseFull, UnknownMatrix_internal::DensifiedSparseFull>(row, non_target_dim, std::move(ora), non_target_dim);
376 }
377
378 template<bool oracle_>
379 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > populate_dense(bool row, tatami::MaybeOracle<oracle_, Index_> ora, Index_ block_start, Index_ block_length, const tatami::Options&) const {
380 return populate_dense_internal<oracle_, UnknownMatrix_internal::DenseBlock, UnknownMatrix_internal::DensifiedSparseBlock>(row, block_length, std::move(ora), block_start, block_length);
381 }
382
383 template<bool oracle_>
384 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > populate_dense(bool row, tatami::MaybeOracle<oracle_, Index_> ora, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options&) const {
385 Index_ nidx = indices_ptr->size();
386 return populate_dense_internal<oracle_, UnknownMatrix_internal::DenseIndexed, UnknownMatrix_internal::DensifiedSparseIndexed>(row, nidx, std::move(ora), std::move(indices_ptr));
387 }
388
389public:
390 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, const tatami::Options& opt) const {
391 return populate_dense<false>(row, false, opt);
392 }
393
394 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
395 return populate_dense<false>(row, false, block_start, block_length, opt);
396 }
397
398 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
399 return populate_dense<false>(row, false, std::move(indices_ptr), opt);
400 }
401
402 /**********************
403 *** Oracular dense ***
404 **********************/
405public:
406 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const tatami::Oracle<Index_> > ora, const tatami::Options& opt) const {
407 return populate_dense<true>(row, std::move(ora), opt);
408 }
409
410 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const tatami::Oracle<Index_> > ora, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
411 return populate_dense<true>(row, std::move(ora), block_start, block_length, opt);
412 }
413
414 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const tatami::Oracle<Index_> > ora, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
415 return populate_dense<true>(row, std::move(ora), std::move(indices_ptr), opt);
416 }
417
418 /*********************
419 *** Myopic sparse ***
420 *********************/
421public:
422 template<
423 bool oracle_,
424 template<bool, bool, typename, typename, typename, typename> class FromSparse_,
425 typename ... Args_
426 >
427 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > populate_sparse_internal(
428 bool row,
429 Index_ non_target_length,
431 const tatami::Options& opt,
432 Args_&& ... args)
433 const {
434 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > output;
435
436 Index_ max_target_chunk_length = max_primary_chunk_length(row);
437 tatami_chunked::SlabCacheStats<Index_> stats(
438 /* target_length = */ max_target_chunk_length,
439 /* non_target_length = */ non_target_length,
440 /* target_num_slabs = */ primary_num_chunks(row, max_target_chunk_length),
441 /* cache_size_in_bytes = */ my_cache_size_in_bytes,
442 /* element_size = */ (opt.sparse_extract_index ? sizeof(CachedIndex_) : 0) + (opt.sparse_extract_value ? sizeof(CachedValue_) : 0),
443 /* require_minimum_cache = */ my_require_minimum_cache
444 );
445
446 const auto& map = chunk_map(row);
447 const auto& ticks = chunk_ticks(row);
448 bool needs_value = opt.sparse_extract_value;
449 bool needs_index = opt.sparse_extract_index;
450 bool solo = stats.max_slabs_in_cache == 0;
451
452#ifdef TATAMI_R_PARALLELIZE_UNKNOWN
453 // This involves some Rcpp initializations, so we lock it just in case.
454 auto& mexec = executor();
455 mexec.run([&]() -> void {
456#endif
457
458 if (solo) {
459 typedef FromSparse_<true, oracle_, Value_, Index_, CachedValue_, CachedIndex_> ShortSparse;
460 output.reset(new ShortSparse(my_original_seed, my_sparse_extractor, row, std::move(oracle), std::forward<Args_>(args)..., max_target_chunk_length, ticks, map, stats, needs_value, needs_index));
461 } else {
462 typedef FromSparse_<false, oracle_, Value_, Index_, CachedValue_, CachedIndex_> ShortSparse;
463 output.reset(new ShortSparse(my_original_seed, my_sparse_extractor, row, std::move(oracle), std::forward<Args_>(args)..., max_target_chunk_length, ticks, map, stats, needs_value, needs_index));
464 }
465
466#ifdef TATAMI_R_PARALLELIZE_UNKNOWN
467 });
468#endif
469
470 return output;
471 }
472
473 template<bool oracle_>
474 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > populate_sparse(bool row, tatami::MaybeOracle<oracle_, Index_> ora, const tatami::Options& opt) const {
475 Index_ non_target_dim = secondary_dim(row);
476 return populate_sparse_internal<oracle_, UnknownMatrix_internal::SparseFull>(row, non_target_dim, std::move(ora), opt, non_target_dim);
477 }
478
479 template<bool oracle_>
480 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > populate_sparse(bool row, tatami::MaybeOracle<oracle_, Index_> ora, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
481 return populate_sparse_internal<oracle_, UnknownMatrix_internal::SparseBlock>(row, block_length, std::move(ora), opt, block_start, block_length);
482 }
483
484 template<bool oracle_>
485 std::unique_ptr<tatami::SparseExtractor<oracle_, Value_, Index_> > populate_sparse(bool row, tatami::MaybeOracle<oracle_, Index_> ora, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
486 Index_ nidx = indices_ptr->size();
487 return populate_sparse_internal<oracle_, UnknownMatrix_internal::SparseIndexed>(row, nidx, std::move(ora), opt, std::move(indices_ptr));
488 }
489
490public:
491 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, const tatami::Options& opt) const {
492 if (!my_sparse) {
493 return std::make_unique<tatami::FullSparsifiedWrapper<false, Value_, Index_> >(dense(row, opt), secondary_dim(row), opt);
494 } else {
495 return populate_sparse<false>(row, false, opt);
496 }
497 }
498
499 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
500 if (!my_sparse) {
501 return std::make_unique<tatami::BlockSparsifiedWrapper<false, Value_, Index_> >(dense(row, block_start, block_length, opt), block_start, block_length, opt);
502 } else {
503 return populate_sparse<false>(row, false, block_start, block_length, opt);
504 }
505 }
506
507 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
508 if (!my_sparse) {
509 auto index_copy = indices_ptr;
510 return std::make_unique<tatami::IndexSparsifiedWrapper<false, Value_, Index_> >(dense(row, std::move(indices_ptr), opt), std::move(index_copy), opt);
511 } else {
512 return populate_sparse<false>(row, false, std::move(indices_ptr), opt);
513 }
514 }
515
516 /**********************
517 *** Oracular sparse ***
518 **********************/
519public:
520 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const tatami::Oracle<Index_> > ora, const tatami::Options& opt) const {
521 if (!my_sparse) {
522 return std::make_unique<tatami::FullSparsifiedWrapper<true, Value_, Index_> >(dense(row, std::move(ora), opt), secondary_dim(row), opt);
523 } else {
524 return populate_sparse<true>(row, std::move(ora), opt);
525 }
526 }
527
528 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const tatami::Oracle<Index_> > ora, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
529 if (!my_sparse) {
530 return std::make_unique<tatami::BlockSparsifiedWrapper<true, Value_, Index_> >(dense(row, std::move(ora), block_start, block_length, opt), block_start, block_length, opt);
531 } else {
532 return populate_sparse<true>(row, std::move(ora), block_start, block_length, opt);
533 }
534 }
535
536 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const tatami::Oracle<Index_> > ora, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
537 if (!my_sparse) {
538 auto index_copy = indices_ptr;
539 return std::make_unique<tatami::IndexSparsifiedWrapper<true, Value_, Index_> >(dense(row, std::move(ora), std::move(indices_ptr), opt), std::move(index_copy), opt);
540 } else {
541 return populate_sparse<true>(row, std::move(ora), std::move(indices_ptr), opt);
542 }
543 }
544};
545
546}
547
548#endif
Unknown matrix-like object in R.
Definition UnknownMatrix.hpp:51
UnknownMatrix(Rcpp::RObject seed, const UnknownMatrixOptions &opt)
Definition UnknownMatrix.hpp:59
UnknownMatrix(Rcpp::RObject seed)
Definition UnknownMatrix.hpp:225
std::shared_ptr< const std::vector< Index_ > > VectorPtr
void resize_container_to_Index_size(Container_ &container, Index_ x)
typename std::conditional< oracle_, std::shared_ptr< const Oracle< Index_ > >, bool >::type MaybeOracle
Safely parallelize for unknown matrices.
bool sparse_extract_index
bool sparse_extract_value
Options for data extraction from an UnknownMatrix.
Definition UnknownMatrix.hpp:23
bool require_minimum_cache
Definition UnknownMatrix.hpp:35
std::optional< std::size_t > maximum_cache_size
Definition UnknownMatrix.hpp:28