tatami
C++ API for different matrix representations
Loading...
Searching...
No Matches
DelayedSubset.hpp
Go to the documentation of this file.
1#ifndef TATAMI_DELAYED_SUBSET_HPP
2#define TATAMI_DELAYED_SUBSET_HPP
3
4#include "utils.hpp"
5#include "../utils/Index_to_container.hpp"
6
7#include <algorithm>
8#include <memory>
9
10#include "sanisizer/sanisizer.hpp"
11
20namespace tatami {
21
25namespace DelayedSubset_internal {
26
27template<typename Index_>
28struct DenseParallelResults {
29 std::vector<Index_> collapsed;
30 std::vector<Index_> reindex;
31};
32
33template<typename Index_, class SubsetStorage_, class ToIndex_>
34DenseParallelResults<Index_> format_dense_parallel_base(const SubsetStorage_& subset, Index_ len, ToIndex_ to_index) {
35 std::vector<std::pair<Index_, Index_> > collected;
36 collected.reserve(len);
37 for (Index_ i = 0; i < len; ++i) {
38 collected.emplace_back(subset[to_index(i)], i);
39 }
40 std::sort(collected.begin(), collected.end());
41
42 DenseParallelResults<Index_> output;
43 if (collected.size()) {
44 output.collapsed.reserve(len);
45 resize_container_to_Index_size(output.reindex, len);
46
47 Index_ last = collected.front().first;
48 output.collapsed.push_back(last);
49 output.reindex[collected.front().second] = 0;
50
51 Index_ counter = 0;
52 for (Index_ i = 1; i < len; ++i) {
53 const auto& pp = collected[i];
54 if (pp.first != last) {
55 last = pp.first;
56 output.collapsed.push_back(last);
57 ++counter;
58 }
59 output.reindex[pp.second] = counter;
60 }
61 }
62
63 return output;
64}
65
66template<bool oracle_, typename Value_, typename Index_>
67class ParallelDense final : public DenseExtractor<oracle_, Value_, Index_> {
68public:
69 template<class SubsetStorage_>
70 ParallelDense(const Matrix<Value_, Index_>* matrix, const SubsetStorage_& subset, bool row, MaybeOracle<oracle_, Index_> oracle, const Options& opt) {
71 auto processed = format_dense_parallel_base<Index_>(subset, subset.size(), [&](Index_ i) -> Index_ { return i; });
72 initialize(matrix, std::move(processed), row, std::move(oracle), opt);
73 }
74
75 template<class SubsetStorage_>
76 ParallelDense(const Matrix<Value_, Index_>* matrix, const SubsetStorage_& subset, bool row, MaybeOracle<oracle_, Index_> oracle, Index_ block_start, Index_ block_length, const Options& opt) {
77 auto processed = format_dense_parallel_base<Index_>(subset, block_length, [&](Index_ i) -> Index_ { return i + block_start; });
78 initialize(matrix, std::move(processed), row, std::move(oracle), opt);
79 }
80
81 template<class SubsetStorage_>
82 ParallelDense(const Matrix<Value_, Index_>* matrix, const SubsetStorage_& subset, bool row, MaybeOracle<oracle_, Index_> oracle, VectorPtr<Index_> indices_ptr, const Options& opt) {
83 const auto& indices = *indices_ptr;
84 auto processed = format_dense_parallel_base<Index_>(subset, indices.size(), [&](Index_ i) -> Index_ { return indices[i]; });
85 initialize(matrix, std::move(processed), row, std::move(oracle), opt);
86 }
87
88private:
89 void initialize(const Matrix<Value_, Index_>* matrix, DenseParallelResults<Index_> processed, bool row, MaybeOracle<oracle_, Index_> oracle, const Options& opt) {
90 resize_container_to_Index_size(my_holding_vbuffer, processed.collapsed.size()); // processed.collapsed.size() should fit in an Index_, so this cast is safe.
91 my_ext = new_extractor<false, oracle_>(matrix, row, std::move(oracle), std::move(processed.collapsed), opt);
92 my_reindex.swap(processed.reindex);
93 }
94
95public:
96 const Value_* fetch(Index_ i, Value_* buffer) {
97 auto src = my_ext->fetch(i, my_holding_vbuffer.data());
98
99 // 'src' and 'buffer' should not point to the same array.
100 auto copy = buffer;
101 for (auto p : my_reindex) {
102 *copy= src[p];
103 ++copy;
104 }
105
106 return buffer;
107 }
108
109private:
110 std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > my_ext;
111 std::vector<Value_> my_holding_vbuffer;
112 std::vector<Index_> my_reindex;
113};
114
115template<typename Index_>
116struct SparseParallelReindex {
117 // This is a bit complicated to explain.
118 // Let 'x = pool_ptrs[i - offset]'.
119 // Let 'y = pool_ptrs[i - offset + 1]'.
120 // Let 'z' denote any integer in '[x, y)'.
121 // In which case, 'indices[pool_indices[z]]' is equal to 'i'.
122 // The general idea is that 'pool_indices[z]' can be used to fill the 'SparseRange::index' on output.
123 std::vector<Index_> pool_ptrs; // this can be Index_ as the length of 'pool_indices' is no greater than the output dimension extent.
124 std::vector<Index_> pool_indices;
125 Index_ offset;
126};
127
128template<typename Index_>
129struct SparseParallelResults {
130 std::vector<Index_> collapsed;
131 SparseParallelReindex<Index_> reindex;
132};
133
134template<typename Index_, class SubsetStorage_, class ToIndex_>
135SparseParallelResults<Index_> format_sparse_parallel_base(const SubsetStorage_& indices, Index_ len, ToIndex_ to_index) {
136 std::vector<std::pair<Index_, Index_> > collected;
137 collected.reserve(len);
138 for (Index_ i = 0; i < len; ++i) {
139 auto curdex = to_index(i);
140 collected.emplace_back(indices[curdex], curdex);
141 }
142 std::sort(collected.begin(), collected.end());
143
144 SparseParallelResults<Index_> output;
145
146 if (collected.size()) {
147 output.collapsed.reserve(len);
148 output.reindex.pool_indices.reserve(len);
149 Index_ first = collected.front().first;
150
151 // 'pool_ptrs' is a vector that enables look-up according to the indices of the underlying array.
152 // To avoid the need to allocate a vector of length equal to the underlying array's dimension, we only consider the extremes of 'indices'.
153 // We allocate 'pool_ptrs' to have length equal to the range of 'indices'... plus 1, as we're storing cumulative pointers.
154 // 'offset' defines the lower bound that must be subtracted from the array indices to get an index into 'pool_ptrs'.
155 output.reindex.offset = first;
156 Index_ allocation = collected.back().first - output.reindex.offset + 1;
157 output.reindex.pool_ptrs.resize(sanisizer::sum<decltype(output.reindex.pool_ptrs.size())>(allocation, 1));
158
159 Index_ counter = 0; // this can never be larger than 'len', so using Index_ will not lead to overflows.
160 output.reindex.pool_ptrs[counter] = 0;
161 ++counter;
162 output.reindex.pool_indices.push_back(collected.front().second);
163 output.reindex.pool_ptrs[counter] = 1;
164 output.collapsed.push_back(first);
165 auto last = first;
166
167 for (Index_ i = 1; i < len; ++i) {
168 const auto& pp = collected[i];
169 auto current = pp.first;
170 if (current == last) {
171 output.reindex.pool_indices.push_back(pp.second);
172 ++(output.reindex.pool_ptrs[counter]); // contents of pool_ptrs will never be greater than len, so this won't overflow.
173 continue;
174 }
175
176 Index_ pool_size = output.reindex.pool_indices.size();
177 counter = current - output.reindex.offset;
178 output.reindex.pool_ptrs[counter] = pool_size; // any overwrite is safe as the value is unchanged.
179 ++counter;
180 output.reindex.pool_indices.push_back(pp.second);
181 output.reindex.pool_ptrs[counter] = pool_size + 1;
182 output.collapsed.push_back(current);
183 last = current;
184 }
185 }
186
187 return output;
188}
189
190template<bool oracle_, typename Value_, typename Index_>
191class ParallelSparse final : public SparseExtractor<oracle_, Value_, Index_> {
192public:
193 template<class SubsetStorage_>
194 ParallelSparse(const Matrix<Value_, Index_>* mat, const SubsetStorage_& subset, bool row, MaybeOracle<oracle_, Index_> oracle, const Options& opt) {
195 auto processed = format_sparse_parallel_base<Index_>(subset, subset.size(), [](Index_ i) -> Index_ { return i; });
196 initialize(mat, std::move(processed), subset.size(), row, std::move(oracle), opt);
197 }
198
199 template<class SubsetStorage_>
200 ParallelSparse(const Matrix<Value_, Index_>* mat, const SubsetStorage_& subset, bool row, MaybeOracle<oracle_, Index_> oracle, Index_ block_start, Index_ block_length, const Options& opt) {
201 auto processed = format_sparse_parallel_base<Index_>(subset, block_length, [&](Index_ i) -> Index_ { return i + block_start; });
202 initialize(mat, std::move(processed), block_length, row, std::move(oracle), opt);
203 }
204
205 template<class SubsetStorage_>
206 ParallelSparse(const Matrix<Value_, Index_>* mat, const SubsetStorage_& subset, bool row, MaybeOracle<oracle_, Index_> oracle, VectorPtr<Index_> indices_ptr, const Options& opt) {
207 const auto& indices = *indices_ptr;
208 auto processed = format_sparse_parallel_base<Index_>(subset, indices.size(), [&](Index_ i) -> Index_ { return indices[i]; });
209 initialize(mat, std::move(processed), indices.size(), row, std::move(oracle), opt);
210 }
211
212private:
213 void initialize(const Matrix<Value_, Index_>* mat, SparseParallelResults<Index_> processed, Index_ extent, bool row, MaybeOracle<oracle_, Index_> oracle, Options opt) {
214 Index_ num_collapsed = processed.collapsed.size(); // number of unique subset indices should be no greater than the extent.
215 my_shift = extent - num_collapsed;
216
217 my_needs_value = opt.sparse_extract_value;
218 my_needs_index = opt.sparse_extract_index;
219 my_needs_sort = opt.sparse_ordered_index;
220
221 if (my_needs_sort && my_needs_value) {
222 my_sortspace.reserve(extent);
223 }
224
225 // We need to extract indices for sorting and expansion purposes, even if they weren't actually requested.
226 opt.sparse_extract_index = true;
227 if (!my_needs_index) {
228 resize_container_to_Index_size(my_holding_ibuffer, num_collapsed);
229 }
230
231 my_ext = new_extractor<true, oracle_>(mat, row, std::move(oracle), std::move(processed.collapsed), opt);
232 my_reindex = std::move(processed.reindex);
233 }
234
235public:
236 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
237 auto vinit = (my_needs_value ? vbuffer + my_shift : NULL);
238 auto iinit = (my_needs_index ? ibuffer + my_shift : my_holding_ibuffer.data());
239 auto input = my_ext->fetch(i, vinit, iinit);
240
241 if (!my_needs_sort) {
242 // Pointers in 'input' and the two 'buffer' pointers may optionally point
243 // to overlapping arrays as long as each 'buffer' pointer precedes its
244 // corresponding pointer in 'input'. The idea is that the expansion of
245 // values into, e.g., 'vbuffer' will cause it to catch up to 'input.value'
246 // without clobbering any values in the latter. This assumes that
247 // 'input.value' has been shifted enough to make space for expansion; the
248 // required shift depends on the number of duplicates.
249 Index_ count = 0;
250 auto vcopy = vbuffer;
251 auto icopy = ibuffer;
252
253 auto vsrc = input.value;
254 bool replace_value = my_needs_value && vsrc != vcopy;
255
256 for (Index_ i = 0; i < input.number; ++i) {
257 auto lookup = input.index[i] - my_reindex.offset;
258 auto start = my_reindex.pool_ptrs[lookup];
259 auto num = my_reindex.pool_ptrs[lookup + 1] - start;
260 count += num;
261
262 if (replace_value) {
263 auto val = *vsrc; // make a copy just in case 'vcopy' and 'input.value' overlap.
264 std::fill_n(vcopy, num, val);
265 vcopy += num;
266 ++vsrc;
267 replace_value = (vcopy != vsrc); // if we've caught up, there no need to do this replacement.
268 }
269
270 if (my_needs_index) {
271 // Again, 'icopy' will eventually catch up to 'input.index' if
272 // they point to overlapping arrays. But we still need to
273 // replace values once we've managed to catch up, so we can't
274 // short-circuit like we did with 'replace_value'.
275 std::copy_n(my_reindex.pool_indices.begin() + start, num, icopy);
276 icopy += num;
277 }
278 }
279
280 input.number = count;
281 if (my_needs_value) {
282 input.value = vbuffer;
283 }
284 if (my_needs_index) {
285 input.index = ibuffer;
286 } else {
287 input.index = NULL;
288 }
289
290 } else if (my_needs_value) {
291 // This does not require any careful consideration of the overlaps
292 // between 'input' and 'buffers', as we're copying things into
293 // 'my_sortspace' anyway before copying them back into 'buffer'.
294 my_sortspace.clear();
295 for (Index_ i = 0; i < input.number; ++i) {
296 auto val = input.value[i];
297 auto lookup = input.index[i] - my_reindex.offset;
298 auto start = my_reindex.pool_ptrs[lookup];
299 auto end = my_reindex.pool_ptrs[lookup + 1];
300 for (Index_ j = start; j < end; ++j) {
301 my_sortspace.emplace_back(my_reindex.pool_indices[j], val);
302 }
303 }
304 std::sort(my_sortspace.begin(), my_sortspace.end());
305 input.number = my_sortspace.size();
306
307 auto vcopy = vbuffer;
308 for (const auto& ss : my_sortspace) {
309 *vcopy = ss.second;
310 ++vcopy;
311 }
312 input.value = vbuffer;
313
314 if (my_needs_index) {
315 auto icopy = ibuffer;
316 for (const auto& ss : my_sortspace) {
317 *icopy = ss.first;
318 ++icopy;
319 }
320 input.index = ibuffer;
321 } else {
322 input.index = NULL;
323 }
324
325 } else {
326 // Again, 'input.index' and 'ibuffer' may point to overlapping arrays,
327 // as long as the latter precedes the former; expansion into the latter
328 // will allow it to catch up to the former without clobbering, assuming
329 // that the latter was shifted back to provide enough space.
330 Index_ count = 0;
331 auto icopy = ibuffer;
332
333 for (Index_ i = 0; i < input.number; ++i) {
334 auto lookup = input.index[i] - my_reindex.offset;
335 auto start = my_reindex.pool_ptrs[lookup];
336 auto num = my_reindex.pool_ptrs[lookup + 1] - start;
337 count += num;
338
339 if (my_needs_index) {
340 std::copy_n(my_reindex.pool_indices.begin() + start, num, icopy);
341 icopy += num;
342 }
343 }
344
345 input.number = count;
346 if (my_needs_index) {
347 std::sort(ibuffer, ibuffer + count);
348 input.index = ibuffer;
349 } else {
350 input.index = NULL;
351 }
352 }
353
354 return input;
355 }
356
357private:
358 std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > my_ext;
359 bool my_needs_value, my_needs_index, my_needs_sort;
360 SparseParallelReindex<Index_> my_reindex;
361 std::vector<std::pair<Index_, Value_> > my_sortspace;
362 std::vector<Index_> my_holding_ibuffer;
363 Index_ my_shift;
364};
365
366}
382template<typename Value_, typename Index_, class SubsetStorage_>
383class DelayedSubset final : public Matrix<Value_, Index_> {
384public:
392 DelayedSubset(std::shared_ptr<const Matrix<Value_, Index_> > matrix, SubsetStorage_ subset, bool by_row) :
393 my_matrix(std::move(matrix)), my_subset(std::move(subset)), my_by_row(by_row)
394 {
395 sanisizer::can_cast<Index_>(my_subset.size());
396 }
397
398private:
399 std::shared_ptr<const Matrix<Value_, Index_> > my_matrix;
400 SubsetStorage_ my_subset;
401 bool my_by_row;
402
403public:
404 Index_ nrow() const {
405 if (my_by_row) {
406 return my_subset.size();
407 } else {
408 return my_matrix->nrow();
409 }
410 }
411
412 Index_ ncol() const {
413 if (my_by_row) {
414 return my_matrix->ncol();
415 } else {
416 return my_subset.size();
417 }
418 }
419
420 bool is_sparse() const {
421 return my_matrix->is_sparse();
422 }
423
424 double is_sparse_proportion() const {
425 return my_matrix->is_sparse_proportion();
426 }
427
428 bool prefer_rows() const {
429 return my_matrix->prefer_rows();
430 }
431
432 double prefer_rows_proportion() const {
433 return my_matrix->prefer_rows_proportion();
434 }
435
436 bool uses_oracle(bool row) const {
437 return my_matrix->uses_oracle(row);
438 }
439
440 using Matrix<Value_, Index_>::dense;
441
442 using Matrix<Value_, Index_>::sparse;
443
444 /********************
445 *** Myopic dense ***
446 ********************/
447private:
448 template<typename ... Args_>
449 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > populate_myopic_dense(bool row, Args_&& ... args) const {
450 if (row == my_by_row) {
451 return std::make_unique<subset_utils::MyopicPerpendicularDense<Value_, Index_, SubsetStorage_> >(my_matrix.get(), my_subset, row, std::forward<Args_>(args)...);
452 } else {
453 return std::make_unique<DelayedSubset_internal::ParallelDense<false, Value_, Index_> >(my_matrix.get(), my_subset, row, false, std::forward<Args_>(args)...);
454 }
455 }
456
457public:
458 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, const Options& opt) const {
459 return populate_myopic_dense(row, opt);
460 }
461
462 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, Index_ block_start, Index_ block_length, const Options& opt) const {
463 return populate_myopic_dense(row, block_start, block_length, opt);
464 }
465
466 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, VectorPtr<Index_> my_subset_ptr, const Options& opt) const {
467 return populate_myopic_dense(row, std::move(my_subset_ptr), opt);
468 }
469
470 /*********************
471 *** Myopic sparse ***
472 *********************/
473private:
474 template<typename ... Args_>
475 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > populate_myopic_sparse(bool row, Args_&& ... args) const {
476 if (row == my_by_row) {
477 return std::make_unique<subset_utils::MyopicPerpendicularSparse<Value_, Index_, SubsetStorage_> >(my_matrix.get(), my_subset, row, std::forward<Args_>(args)...);
478 } else {
479 return std::make_unique<DelayedSubset_internal::ParallelSparse<false, Value_, Index_> >(my_matrix.get(), my_subset, row, false, std::forward<Args_>(args)...);
480 }
481 }
482
483public:
484 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, const Options& opt) const {
485 return populate_myopic_sparse(row, opt);
486 }
487
488 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, Index_ block_start, Index_ block_length, const Options& opt) const {
489 return populate_myopic_sparse(row, block_start, block_length, opt);
490 }
491
492 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, VectorPtr<Index_> my_subset_ptr, const Options& opt) const {
493 return populate_myopic_sparse(row, std::move(my_subset_ptr), opt);
494 }
495
496 /**********************
497 *** Oracular dense ***
498 **********************/
499private:
500 template<typename ... Args_>
501 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > populate_oracular_dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Args_&& ... args) const {
502 if (row == my_by_row) {
503 return std::make_unique<subset_utils::OracularPerpendicularDense<Value_, Index_> >(my_matrix.get(), my_subset, row, std::move(oracle), std::forward<Args_>(args)...);
504 } else {
505 return std::make_unique<DelayedSubset_internal::ParallelDense<true, Value_, Index_> >(my_matrix.get(), my_subset, row, std::move(oracle), std::forward<Args_>(args)...);
506 }
507 }
508
509public:
510 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, const Options& opt) const {
511 return populate_oracular_dense(row, std::move(oracle), opt);
512 }
513
514 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length, const Options& opt) const {
515 return populate_oracular_dense(row, std::move(oracle), block_start, block_length, opt);
516 }
517
518 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, VectorPtr<Index_> my_subset_ptr, const Options& opt) const {
519 return populate_oracular_dense(row, std::move(oracle), std::move(my_subset_ptr), opt);
520 }
521
522 /***********************
523 *** Oracular sparse ***
524 ***********************/
525private:
526 template<typename ... Args_>
527 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > populate_oracular_sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Args_&& ... args) const {
528 if (row == my_by_row) {
529 return std::make_unique<subset_utils::OracularPerpendicularSparse<Value_, Index_> >(my_matrix.get(), my_subset, row, std::move(oracle), std::forward<Args_>(args)...);
530 } else {
531 return std::make_unique<DelayedSubset_internal::ParallelSparse<true, Value_, Index_> >(my_matrix.get(), my_subset, row, std::move(oracle), std::forward<Args_>(args)...);
532 }
533 }
534
535public:
536 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, const Options& opt) const {
537 return populate_oracular_sparse(row, std::move(oracle), opt);
538 }
539
540 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length, const Options& opt) const {
541 return populate_oracular_sparse(row, std::move(oracle), block_start, block_length, opt);
542 }
543
544 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, VectorPtr<Index_> my_subset_ptr, const Options& opt) const {
545 return populate_oracular_sparse(row, std::move(oracle), std::move(my_subset_ptr), opt);
546 }
547};
548
549}
550
551#endif
Delayed subsetting of a matrix with general indices.
Definition DelayedSubset.hpp:383
bool uses_oracle(bool row) const
Definition DelayedSubset.hpp:436
std::unique_ptr< OracularSparseExtractor< Value_, Index_ > > sparse(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:544
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:462
std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:488
std::unique_ptr< OracularSparseExtractor< Value_, Index_ > > sparse(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedSubset.hpp:536
Index_ ncol() const
Definition DelayedSubset.hpp:412
bool prefer_rows() const
Definition DelayedSubset.hpp:428
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:514
std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:492
std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const
Definition DelayedSubset.hpp:484
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedSubset.hpp:510
DelayedSubset(std::shared_ptr< const Matrix< Value_, Index_ > > matrix, SubsetStorage_ subset, bool by_row)
Definition DelayedSubset.hpp:392
double prefer_rows_proportion() const
Definition DelayedSubset.hpp:432
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:518
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, const Options &opt) const
Definition DelayedSubset.hpp:458
Index_ nrow() const
Definition DelayedSubset.hpp:404
std::unique_ptr< OracularSparseExtractor< Value_, Index_ > > sparse(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:540
bool is_sparse() const
Definition DelayedSubset.hpp:420
double is_sparse_proportion() const
Definition DelayedSubset.hpp:424
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:466
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:29
Flexible representations for matrix data.
Definition Extractor.hpp:15
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
void resize_container_to_Index_size(Container_ &container, Index_ x, Args_ &&... args)
Definition Index_to_container.hpp:88
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
Options for accessing data from a Matrix instance.
Definition Options.hpp:30