tatami
C++ API for different matrix representations
Loading...
Searching...
No Matches
DelayedSubset.hpp
Go to the documentation of this file.
1#ifndef TATAMI_DELAYED_SUBSET_HPP
2#define TATAMI_DELAYED_SUBSET_HPP
3
4#include "utils.hpp"
6
7#include <algorithm>
8#include <memory>
9
10#include "sanisizer/sanisizer.hpp"
11
20namespace tatami {
21
25namespace DelayedSubset_internal {
26
27template<typename Index_>
28struct DenseParallelResults {
29 std::vector<Index_> collapsed;
30 std::vector<Index_> reindex;
31};
32
33template<typename Index_, class SubsetStorage_, class ToIndex_>
34DenseParallelResults<Index_> format_dense_parallel_base(const SubsetStorage_& subset, const Index_ len, const ToIndex_ to_index) {
35 std::vector<std::pair<Index_, Index_> > collected;
36 collected.reserve(len);
37 for (Index_ i = 0; i < len; ++i) {
38 collected.emplace_back(subset[to_index(i)], i);
39 }
40 std::sort(collected.begin(), collected.end());
41
42 DenseParallelResults<Index_> output;
43 if (collected.size()) {
44 output.collapsed.reserve(len);
45 resize_container_to_Index_size(output.reindex, len);
46
47 Index_ last = collected.front().first;
48 output.collapsed.push_back(last);
49 output.reindex[collected.front().second] = 0;
50
51 Index_ counter = 0;
52 for (Index_ i = 1; i < len; ++i) {
53 const auto& pp = collected[i];
54 if (pp.first != last) {
55 last = pp.first;
56 output.collapsed.push_back(last);
57 ++counter;
58 }
59 output.reindex[pp.second] = counter;
60 }
61 }
62
63 return output;
64}
65
66template<bool oracle_, typename Value_, typename Index_>
67class ParallelDense final : public DenseExtractor<oracle_, Value_, Index_> {
68public:
69 template<class SubsetStorage_>
70 ParallelDense(
71 const Matrix<Value_, Index_>& matrix,
72 const SubsetStorage_& subset,
73 const bool row,
74 MaybeOracle<oracle_, Index_> oracle,
75 const Options& opt
76 ) {
77 auto processed = format_dense_parallel_base<Index_>(subset, subset.size(), [&](const Index_ i) -> Index_ { return i; });
78 initialize(matrix, std::move(processed), row, std::move(oracle), opt);
79 }
80
81 template<class SubsetStorage_>
82 ParallelDense(
83 const Matrix<Value_, Index_>& matrix,
84 const SubsetStorage_& subset,
85 const bool row,
86 MaybeOracle<oracle_, Index_> oracle,
87 const Index_ block_start,
88 const Index_ block_length,
89 const Options& opt
90 ) {
91 auto processed = format_dense_parallel_base<Index_>(subset, block_length, [&](const Index_ i) -> Index_ { return i + block_start; });
92 initialize(matrix, std::move(processed), row, std::move(oracle), opt);
93 }
94
95 template<class SubsetStorage_>
96 ParallelDense(
97 const Matrix<Value_, Index_>& matrix,
98 const SubsetStorage_& subset,
99 const bool row,
100 MaybeOracle<oracle_, Index_> oracle,
101 VectorPtr<Index_> indices_ptr,
102 const Options& opt
103 ) {
104 const auto& indices = *indices_ptr;
105 auto processed = format_dense_parallel_base<Index_>(subset, indices.size(), [&](const Index_ i) -> Index_ { return indices[i]; });
106 initialize(matrix, std::move(processed), row, std::move(oracle), opt);
107 }
108
109private:
110 void initialize(
111 const Matrix<Value_, Index_>& matrix,
112 DenseParallelResults<Index_> processed,
113 const bool row,
114 MaybeOracle<oracle_, Index_> oracle,
115 const Options& opt
116 ) {
117 resize_container_to_Index_size(my_holding_vbuffer, processed.collapsed.size()); // processed.collapsed.size() should fit in an Index_, so this cast is safe.
118 my_ext = new_extractor<false, oracle_>(matrix, row, std::move(oracle), std::move(processed.collapsed), opt);
119 my_reindex.swap(processed.reindex);
120 }
121
122public:
123 const Value_* fetch(const Index_ i, Value_* const buffer) {
124 const auto src = my_ext->fetch(i, my_holding_vbuffer.data());
125
126 // 'src' and 'buffer' should not point to the same array.
127 auto copy = buffer;
128 for (auto p : my_reindex) {
129 *copy= src[p];
130 ++copy;
131 }
132
133 return buffer;
134 }
135
136private:
137 std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > my_ext;
138 std::vector<Value_> my_holding_vbuffer;
139 std::vector<Index_> my_reindex;
140};
141
142template<typename Index_>
143struct SparseParallelReindex {
144 // This is a bit complicated to explain.
145 // Let 'x = pool_ptrs[i - offset]'.
146 // Let 'y = pool_ptrs[i - offset + 1]'.
147 // Let 'z' denote any integer in '[x, y)'.
148 // In which case, 'indices[pool_indices[z]]' is equal to 'i'.
149 // The general idea is that 'pool_indices[z]' can be used to fill the 'SparseRange::index' on output.
150 std::vector<Index_> pool_ptrs; // this can be Index_ as the length of 'pool_indices' is no greater than the output dimension extent.
151 std::vector<Index_> pool_indices;
152 Index_ offset;
153};
154
155template<typename Index_>
156struct SparseParallelResults {
157 std::vector<Index_> collapsed;
158 SparseParallelReindex<Index_> reindex;
159};
160
161template<typename Index_, class SubsetStorage_, class ToIndex_>
162SparseParallelResults<Index_> format_sparse_parallel_base(const SubsetStorage_& indices, const Index_ len, const ToIndex_ to_index) {
163 std::vector<std::pair<Index_, Index_> > collected;
164 collected.reserve(len);
165 for (Index_ i = 0; i < len; ++i) {
166 const auto curdex = to_index(i);
167 collected.emplace_back(indices[curdex], curdex);
168 }
169 std::sort(collected.begin(), collected.end());
170
171 SparseParallelResults<Index_> output;
172
173 if (collected.size()) {
174 output.collapsed.reserve(len);
175 output.reindex.pool_indices.reserve(len);
176 const Index_ first = collected.front().first;
177
178 // 'pool_ptrs' is a vector that enables look-up according to the indices of the underlying array.
179 // To avoid the need to allocate a vector of length equal to the underlying array's dimension, we only consider the extremes of 'indices'.
180 // We allocate 'pool_ptrs' to have length equal to the range of 'indices'... plus 1, as we're storing cumulative pointers.
181 // 'offset' defines the lower bound that must be subtracted from the array indices to get an index into 'pool_ptrs'.
182 output.reindex.offset = first;
183 const Index_ allocation = collected.back().first - output.reindex.offset + 1;
184 output.reindex.pool_ptrs.resize(sanisizer::sum<I<decltype(output.reindex.pool_ptrs.size())> >(attest_for_Index(allocation), 1));
185
186 Index_ counter = 0; // this can never be larger than 'len', so using Index_ will not lead to overflows.
187 output.reindex.pool_ptrs[counter] = 0;
188 ++counter;
189 output.reindex.pool_indices.push_back(collected.front().second);
190 output.reindex.pool_ptrs[counter] = 1;
191 output.collapsed.push_back(first);
192 auto last = first;
193
194 for (Index_ i = 1; i < len; ++i) {
195 const auto& pp = collected[i];
196 const auto current = pp.first;
197 if (current == last) {
198 output.reindex.pool_indices.push_back(pp.second);
199 ++(output.reindex.pool_ptrs[counter]); // contents of pool_ptrs will never be greater than len, so this won't overflow.
200 continue;
201 }
202
203 const Index_ pool_size = output.reindex.pool_indices.size();
204 counter = current - output.reindex.offset;
205 output.reindex.pool_ptrs[counter] = pool_size; // any overwrite is safe as the value is unchanged.
206 ++counter;
207 output.reindex.pool_indices.push_back(pp.second);
208 output.reindex.pool_ptrs[counter] = pool_size + 1;
209 output.collapsed.push_back(current);
210 last = current;
211 }
212 }
213
214 return output;
215}
216
217template<bool oracle_, typename Value_, typename Index_>
218class ParallelSparse final : public SparseExtractor<oracle_, Value_, Index_> {
219public:
220 template<class SubsetStorage_>
221 ParallelSparse(
222 const Matrix<Value_, Index_>& mat,
223 const SubsetStorage_& subset,
224 const bool row,
225 MaybeOracle<oracle_, Index_> oracle,
226 const Options& opt
227 ) {
228 auto processed = format_sparse_parallel_base<Index_>(subset, subset.size(), [](const Index_ i) -> Index_ { return i; });
229 initialize(mat, std::move(processed), subset.size(), row, std::move(oracle), opt);
230 }
231
232 template<class SubsetStorage_>
233 ParallelSparse(
234 const Matrix<Value_, Index_>& mat,
235 const SubsetStorage_& subset,
236 const bool row,
237 MaybeOracle<oracle_, Index_> oracle,
238 const Index_ block_start,
239 const Index_ block_length,
240 const Options& opt
241 ) {
242 auto processed = format_sparse_parallel_base<Index_>(subset, block_length, [&](const Index_ i) -> Index_ { return i + block_start; });
243 initialize(mat, std::move(processed), block_length, row, std::move(oracle), opt);
244 }
245
246 template<class SubsetStorage_>
247 ParallelSparse(
248 const Matrix<Value_, Index_>& mat,
249 const SubsetStorage_& subset,
250 const bool row,
251 MaybeOracle<oracle_, Index_> oracle,
252 VectorPtr<Index_> indices_ptr,
253 const Options& opt
254 ) {
255 const auto& indices = *indices_ptr;
256 auto processed = format_sparse_parallel_base<Index_>(subset, indices.size(), [&](const Index_ i) -> Index_ { return indices[i]; });
257 initialize(mat, std::move(processed), indices.size(), row, std::move(oracle), opt);
258 }
259
260private:
261 void initialize(
262 const Matrix<Value_, Index_>& mat,
263 SparseParallelResults<Index_> processed,
264 const Index_ extent,
265 const bool row,
266 MaybeOracle<oracle_, Index_> oracle,
267 Options opt
268 ) {
269 const Index_ num_collapsed = processed.collapsed.size(); // number of unique subset indices should be no greater than the extent.
270 my_shift = extent - num_collapsed;
271
272 my_needs_value = opt.sparse_extract_value;
273 my_needs_index = opt.sparse_extract_index;
274 my_needs_sort = opt.sparse_ordered_index;
275
276 if (my_needs_sort && my_needs_value) {
277 my_sortspace.reserve(extent);
278 }
279
280 // We need to extract indices for sorting and expansion purposes, even if they weren't actually requested.
281 opt.sparse_extract_index = true;
282 if (!my_needs_index) {
283 resize_container_to_Index_size(my_holding_ibuffer, num_collapsed);
284 }
285
286 my_ext = new_extractor<true, oracle_>(mat, row, std::move(oracle), std::move(processed.collapsed), opt);
287 my_reindex = std::move(processed.reindex);
288 }
289
290public:
291 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const vbuffer, Index_* const ibuffer) {
292 const auto vinit = (my_needs_value ? vbuffer + my_shift : NULL);
293 const auto iinit = (my_needs_index ? ibuffer + my_shift : my_holding_ibuffer.data());
294 auto input = my_ext->fetch(i, vinit, iinit);
295
296 if (!my_needs_sort) {
297 // Pointers in 'input' and the two 'buffer' pointers may optionally point
298 // to overlapping arrays as long as each 'buffer' pointer precedes its
299 // corresponding pointer in 'input'. The idea is that the expansion of
300 // values into, e.g., 'vbuffer' will cause it to catch up to 'input.value'
301 // without clobbering any values in the latter. This assumes that
302 // 'input.value' has been shifted enough to make space for expansion; the
303 // required shift depends on the number of duplicates.
304 Index_ count = 0;
305 auto vcopy = vbuffer;
306 auto icopy = ibuffer;
307
308 auto vsrc = input.value;
309 bool replace_value = my_needs_value && vsrc != vcopy;
310
311 for (Index_ i = 0; i < input.number; ++i) {
312 const auto lookup = input.index[i] - my_reindex.offset;
313 const auto start = my_reindex.pool_ptrs[lookup];
314 const auto num = my_reindex.pool_ptrs[lookup + 1] - start;
315 count += num;
316
317 if (replace_value) {
318 auto val = *vsrc; // make a copy just in case 'vcopy' and 'input.value' overlap.
319 std::fill_n(vcopy, num, val);
320 vcopy += num;
321 ++vsrc;
322 replace_value = (vcopy != vsrc); // if we've caught up, there no need to do this replacement.
323 }
324
325 if (my_needs_index) {
326 // Again, 'icopy' will eventually catch up to 'input.index' if
327 // they point to overlapping arrays. But we still need to
328 // replace values once we've managed to catch up, so we can't
329 // short-circuit like we did with 'replace_value'.
330 std::copy_n(my_reindex.pool_indices.begin() + start, num, icopy);
331 icopy += num;
332 }
333 }
334
335 input.number = count;
336 if (my_needs_value) {
337 input.value = vbuffer;
338 }
339 if (my_needs_index) {
340 input.index = ibuffer;
341 } else {
342 input.index = NULL;
343 }
344
345 } else if (my_needs_value) {
346 // This does not require any careful consideration of the overlaps
347 // between 'input' and 'buffers', as we're copying things into
348 // 'my_sortspace' anyway before copying them back into 'buffer'.
349 my_sortspace.clear();
350 for (Index_ i = 0; i < input.number; ++i) {
351 const auto val = input.value[i];
352 const auto lookup = input.index[i] - my_reindex.offset;
353 const auto start = my_reindex.pool_ptrs[lookup];
354 const auto end = my_reindex.pool_ptrs[lookup + 1];
355 for (Index_ j = start; j < end; ++j) {
356 my_sortspace.emplace_back(my_reindex.pool_indices[j], val);
357 }
358 }
359 std::sort(my_sortspace.begin(), my_sortspace.end());
360 input.number = my_sortspace.size();
361
362 auto vcopy = vbuffer;
363 for (const auto& ss : my_sortspace) {
364 *vcopy = ss.second;
365 ++vcopy;
366 }
367 input.value = vbuffer;
368
369 if (my_needs_index) {
370 auto icopy = ibuffer;
371 for (const auto& ss : my_sortspace) {
372 *icopy = ss.first;
373 ++icopy;
374 }
375 input.index = ibuffer;
376 } else {
377 input.index = NULL;
378 }
379
380 } else {
381 // Again, 'input.index' and 'ibuffer' may point to overlapping arrays,
382 // as long as the latter precedes the former; expansion into the latter
383 // will allow it to catch up to the former without clobbering, assuming
384 // that the latter was shifted back to provide enough space.
385 Index_ count = 0;
386 auto icopy = ibuffer;
387
388 for (Index_ i = 0; i < input.number; ++i) {
389 const auto lookup = input.index[i] - my_reindex.offset;
390 const auto start = my_reindex.pool_ptrs[lookup];
391 const auto num = my_reindex.pool_ptrs[lookup + 1] - start;
392 count += num;
393
394 if (my_needs_index) {
395 std::copy_n(my_reindex.pool_indices.begin() + start, num, icopy);
396 icopy += num;
397 }
398 }
399
400 input.number = count;
401 if (my_needs_index) {
402 std::sort(ibuffer, ibuffer + count);
403 input.index = ibuffer;
404 } else {
405 input.index = NULL;
406 }
407 }
408
409 return input;
410 }
411
412private:
413 std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > my_ext;
414 bool my_needs_value, my_needs_index, my_needs_sort;
415 SparseParallelReindex<Index_> my_reindex;
416 std::vector<std::pair<Index_, Value_> > my_sortspace;
417 std::vector<Index_> my_holding_ibuffer;
418 Index_ my_shift;
419};
420
421}
437template<typename Value_, typename Index_, class SubsetStorage_>
438class DelayedSubset final : public Matrix<Value_, Index_> {
439public:
448 std::shared_ptr<const Matrix<Value_, Index_> > matrix,
449 SubsetStorage_ subset,
450 const bool by_row
451 ) :
452 my_matrix(std::move(matrix)),
453 my_subset(std::move(subset)),
454 my_by_row(by_row)
455 {
456 // Check that we can still report the dimension extents of the subsetted matrix.
457 sanisizer::can_cast<Index_>(my_subset.size());
458 }
459
460private:
461 std::shared_ptr<const Matrix<Value_, Index_> > my_matrix;
462 SubsetStorage_ my_subset;
463 bool my_by_row;
464
465public:
466 Index_ nrow() const {
467 if (my_by_row) {
468 return my_subset.size();
469 } else {
470 return my_matrix->nrow();
471 }
472 }
473
474 Index_ ncol() const {
475 if (my_by_row) {
476 return my_matrix->ncol();
477 } else {
478 return my_subset.size();
479 }
480 }
481
482 bool is_sparse() const {
483 return my_matrix->is_sparse();
484 }
485
486 double is_sparse_proportion() const {
487 return my_matrix->is_sparse_proportion();
488 }
489
490 bool prefer_rows() const {
491 return my_matrix->prefer_rows();
492 }
493
494 double prefer_rows_proportion() const {
495 return my_matrix->prefer_rows_proportion();
496 }
497
498 bool uses_oracle(const bool row) const {
499 return my_matrix->uses_oracle(row);
500 }
501
502 using Matrix<Value_, Index_>::dense;
503
504 using Matrix<Value_, Index_>::sparse;
505
506 /********************
507 *** Myopic dense ***
508 ********************/
509private:
510 template<typename ... Args_>
511 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > populate_myopic_dense(
512 const bool row,
513 Args_&& ... args
514 ) const {
515 if (row == my_by_row) {
516 return std::make_unique<subset_utils::MyopicPerpendicularDense<Value_, Index_, SubsetStorage_> >(
517 *my_matrix,
518 my_subset,
519 row,
520 std::forward<Args_>(args)...
521 );
522 } else {
523 return std::make_unique<DelayedSubset_internal::ParallelDense<false, Value_, Index_> >(
524 *my_matrix,
525 my_subset,
526 row,
527 false,
528 std::forward<Args_>(args)...
529 );
530 }
531 }
532
533public:
534 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
535 const bool row,
536 const Options& opt
537 ) const {
538 return populate_myopic_dense(row, opt);
539 }
540
541 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
542 const bool row,
543 const Index_ block_start,
544 const Index_ block_length,
545 const Options& opt
546 ) const {
547 return populate_myopic_dense(row, block_start, block_length, opt);
548 }
549
550 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
551 const bool row,
552 VectorPtr<Index_> my_subset_ptr,
553 const Options& opt
554 ) const {
555 return populate_myopic_dense(row, std::move(my_subset_ptr), opt);
556 }
557
558 /*********************
559 *** Myopic sparse ***
560 *********************/
561private:
562 template<typename ... Args_>
563 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > populate_myopic_sparse(
564 const bool row,
565 Args_&& ... args
566 ) const {
567 if (row == my_by_row) {
568 return std::make_unique<subset_utils::MyopicPerpendicularSparse<Value_, Index_, SubsetStorage_> >(
569 *my_matrix,
570 my_subset,
571 row,
572 std::forward<Args_>(args)...
573 );
574 } else {
575 return std::make_unique<DelayedSubset_internal::ParallelSparse<false, Value_, Index_> >(
576 *my_matrix,
577 my_subset,
578 row,
579 false,
580 std::forward<Args_>(args)...
581 );
582 }
583 }
584
585public:
586 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
587 const bool row,
588 const Options& opt
589 ) const {
590 return populate_myopic_sparse(row, opt);
591 }
592
593 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
594 const bool row,
595 const Index_ block_start,
596 const Index_ block_length,
597 const Options& opt
598 ) const {
599 return populate_myopic_sparse(row, block_start, block_length, opt);
600 }
601
602 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
603 const bool row,
604 VectorPtr<Index_> my_subset_ptr,
605 const Options& opt
606 ) const {
607 return populate_myopic_sparse(row, std::move(my_subset_ptr), opt);
608 }
609
610 /**********************
611 *** Oracular dense ***
612 **********************/
613private:
614 template<typename ... Args_>
615 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > populate_oracular_dense(
616 const bool row,
617 std::shared_ptr<const Oracle<Index_> > oracle,
618 Args_&& ... args
619 ) const {
620 if (row == my_by_row) {
621 return std::make_unique<subset_utils::OracularPerpendicularDense<Value_, Index_> >(
622 *my_matrix,
623 my_subset,
624 row,
625 std::move(oracle),
626 std::forward<Args_>(args)...
627 );
628 } else {
629 return std::make_unique<DelayedSubset_internal::ParallelDense<true, Value_, Index_> >(
630 *my_matrix,
631 my_subset,
632 row,
633 std::move(oracle),
634 std::forward<Args_>(args)...
635 );
636 }
637 }
638
639public:
640 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
641 const bool row,
642 std::shared_ptr<const Oracle<Index_> > oracle,
643 const Options& opt
644 ) const {
645 return populate_oracular_dense(row, std::move(oracle), opt);
646 }
647
648 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
649 const bool row,
650 std::shared_ptr<const Oracle<Index_> > oracle,
651 const Index_ block_start,
652 const Index_ block_length,
653 const Options& opt
654 ) const {
655 return populate_oracular_dense(row, std::move(oracle), block_start, block_length, opt);
656 }
657
658 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
659 const bool row,
660 std::shared_ptr<const Oracle<Index_> > oracle,
661 VectorPtr<Index_> my_subset_ptr,
662 const Options& opt
663 ) const {
664 return populate_oracular_dense(row, std::move(oracle), std::move(my_subset_ptr), opt);
665 }
666
667 /***********************
668 *** Oracular sparse ***
669 ***********************/
670private:
671 template<typename ... Args_>
672 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > populate_oracular_sparse(
673 const bool row,
674 std::shared_ptr<const Oracle<Index_> > oracle,
675 Args_&& ... args
676 ) const {
677 if (row == my_by_row) {
678 return std::make_unique<subset_utils::OracularPerpendicularSparse<Value_, Index_> >(
679 *my_matrix,
680 my_subset,
681 row,
682 std::move(oracle),
683 std::forward<Args_>(args)...
684 );
685 } else {
686 return std::make_unique<DelayedSubset_internal::ParallelSparse<true, Value_, Index_> >(
687 *my_matrix,
688 my_subset,
689 row,
690 std::move(oracle),
691 std::forward<Args_>(args)...
692 );
693 }
694 }
695
696public:
697 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
698 const bool row,
699 std::shared_ptr<const Oracle<Index_> > oracle,
700 const Options& opt
701 ) const {
702 return populate_oracular_sparse(row, std::move(oracle), opt);
703 }
704
705 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
706 const bool row,
707 std::shared_ptr<const Oracle<Index_> > oracle,
708 const Index_ block_start,
709 const Index_ block_length,
710 const Options& opt
711 ) const {
712 return populate_oracular_sparse(row, std::move(oracle), block_start, block_length, opt);
713 }
714
715 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
716 const bool row,
717 std::shared_ptr<const Oracle<Index_> > oracle,
718 VectorPtr<Index_> my_subset_ptr,
719 const Options& opt
720 ) const {
721 return populate_oracular_sparse(row, std::move(oracle), std::move(my_subset_ptr), opt);
722 }
723};
724
725}
726
727#endif
Convert index type to container size.
Delayed subsetting of a matrix with general indices.
Definition DelayedSubset.hpp:438
bool uses_oracle(const bool row) const
Definition DelayedSubset.hpp:498
std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(const bool row, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:602
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedSubset.hpp:640
Index_ ncol() const
Definition DelayedSubset.hpp:474
bool prefer_rows() const
Definition DelayedSubset.hpp:490
DelayedSubset(std::shared_ptr< const Matrix< Value_, Index_ > > matrix, SubsetStorage_ subset, const bool by_row)
Definition DelayedSubset.hpp:447
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Options &opt) const
Definition DelayedSubset.hpp:534
std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(const bool row, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:593
std::unique_ptr< OracularSparseExtractor< Value_, Index_ > > sparse(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedSubset.hpp:697
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:658
std::unique_ptr< OracularSparseExtractor< Value_, Index_ > > sparse(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:705
double prefer_rows_proportion() const
Definition DelayedSubset.hpp:494
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:648
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:550
std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(const bool row, const Options &opt) const
Definition DelayedSubset.hpp:586
std::unique_ptr< OracularSparseExtractor< Value_, Index_ > > sparse(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:715
Index_ nrow() const
Definition DelayedSubset.hpp:466
bool is_sparse() const
Definition DelayedSubset.hpp:482
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:541
double is_sparse_proportion() const
Definition DelayedSubset.hpp:486
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:29
Flexible representations for matrix data.
Definition Extractor.hpp:15
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
void resize_container_to_Index_size(Container_ &container, const Index_ x, Args_ &&... args)
Definition Index_to_container.hpp:99
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
Options for accessing data from a Matrix instance.
Definition Options.hpp:30