tatami
C++ API for different matrix representations
Loading...
Searching...
No Matches
DelayedSubset.hpp
Go to the documentation of this file.
1#ifndef TATAMI_DELAYED_SUBSET_HPP
2#define TATAMI_DELAYED_SUBSET_HPP
3
4#include "utils.hpp"
5#include "../utils/Index_to_container.hpp"
6
7#include <algorithm>
8#include <memory>
9
10#include "sanisizer/sanisizer.hpp"
11
20namespace tatami {
21
25namespace DelayedSubset_internal {
26
27template<typename Index_>
28struct DenseParallelResults {
29 std::vector<Index_> collapsed;
30 std::vector<Index_> reindex;
31};
32
33template<typename Index_, class SubsetStorage_, class ToIndex_>
34DenseParallelResults<Index_> format_dense_parallel_base(const SubsetStorage_& subset, const Index_ len, const ToIndex_ to_index) {
35 std::vector<std::pair<Index_, Index_> > collected;
36 collected.reserve(len);
37 for (Index_ i = 0; i < len; ++i) {
38 collected.emplace_back(subset[to_index(i)], i);
39 }
40 std::sort(collected.begin(), collected.end());
41
42 DenseParallelResults<Index_> output;
43 if (collected.size()) {
44 output.collapsed.reserve(len);
45 resize_container_to_Index_size(output.reindex, len);
46
47 Index_ last = collected.front().first;
48 output.collapsed.push_back(last);
49 output.reindex[collected.front().second] = 0;
50
51 Index_ counter = 0;
52 for (Index_ i = 1; i < len; ++i) {
53 const auto& pp = collected[i];
54 if (pp.first != last) {
55 last = pp.first;
56 output.collapsed.push_back(last);
57 ++counter;
58 }
59 output.reindex[pp.second] = counter;
60 }
61 }
62
63 return output;
64}
65
66template<bool oracle_, typename Value_, typename Index_>
67class ParallelDense final : public DenseExtractor<oracle_, Value_, Index_> {
68public:
69 template<class SubsetStorage_>
70 ParallelDense(
71 const Matrix<Value_, Index_>& matrix,
72 const SubsetStorage_& subset,
73 const bool row,
74 MaybeOracle<oracle_, Index_> oracle,
75 const Options& opt
76 ) {
77 auto processed = format_dense_parallel_base<Index_>(subset, subset.size(), [&](const Index_ i) -> Index_ { return i; });
78 initialize(matrix, std::move(processed), row, std::move(oracle), opt);
79 }
80
81 template<class SubsetStorage_>
82 ParallelDense(
83 const Matrix<Value_, Index_>& matrix,
84 const SubsetStorage_& subset,
85 const bool row,
86 MaybeOracle<oracle_, Index_> oracle,
87 const Index_ block_start,
88 const Index_ block_length,
89 const Options& opt
90 ) {
91 auto processed = format_dense_parallel_base<Index_>(subset, block_length, [&](const Index_ i) -> Index_ { return i + block_start; });
92 initialize(matrix, std::move(processed), row, std::move(oracle), opt);
93 }
94
95 template<class SubsetStorage_>
96 ParallelDense(
97 const Matrix<Value_, Index_>& matrix,
98 const SubsetStorage_& subset,
99 const bool row,
100 MaybeOracle<oracle_, Index_> oracle,
101 VectorPtr<Index_> indices_ptr,
102 const Options& opt
103 ) {
104 const auto& indices = *indices_ptr;
105 auto processed = format_dense_parallel_base<Index_>(subset, indices.size(), [&](const Index_ i) -> Index_ { return indices[i]; });
106 initialize(matrix, std::move(processed), row, std::move(oracle), opt);
107 }
108
109private:
110 void initialize(
111 const Matrix<Value_, Index_>& matrix,
112 DenseParallelResults<Index_> processed,
113 const bool row,
114 MaybeOracle<oracle_, Index_> oracle,
115 const Options& opt
116 ) {
117 resize_container_to_Index_size(my_holding_vbuffer, processed.collapsed.size()); // processed.collapsed.size() should fit in an Index_, so this cast is safe.
118 my_ext = new_extractor<false, oracle_>(matrix, row, std::move(oracle), std::move(processed.collapsed), opt);
119 my_reindex.swap(processed.reindex);
120 }
121
122public:
123 const Value_* fetch(const Index_ i, Value_* const buffer) {
124 const auto src = my_ext->fetch(i, my_holding_vbuffer.data());
125
126 // 'src' and 'buffer' should not point to the same array.
127 auto copy = buffer;
128 for (auto p : my_reindex) {
129 *copy= src[p];
130 ++copy;
131 }
132
133 return buffer;
134 }
135
136private:
137 std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > my_ext;
138 std::vector<Value_> my_holding_vbuffer;
139 std::vector<Index_> my_reindex;
140};
141
142template<typename Index_>
143struct SparseParallelReindex {
144 // This is a bit complicated to explain.
145 // Let 'x = pool_ptrs[i - offset]'.
146 // Let 'y = pool_ptrs[i - offset + 1]'.
147 // Let 'z' denote any integer in '[x, y)'.
148 // In which case, 'indices[pool_indices[z]]' is equal to 'i'.
149 // The general idea is that 'pool_indices[z]' can be used to fill the 'SparseRange::index' on output.
150 std::vector<Index_> pool_ptrs; // this can be Index_ as the length of 'pool_indices' is no greater than the output dimension extent.
151 std::vector<Index_> pool_indices;
152 Index_ offset;
153};
154
155template<typename Index_>
156struct SparseParallelResults {
157 std::vector<Index_> collapsed;
158 SparseParallelReindex<Index_> reindex;
159};
160
161template<typename Index_, class SubsetStorage_, class ToIndex_>
162SparseParallelResults<Index_> format_sparse_parallel_base(const SubsetStorage_& indices, const Index_ len, const ToIndex_ to_index) {
163 std::vector<std::pair<Index_, Index_> > collected;
164 collected.reserve(len);
165 for (Index_ i = 0; i < len; ++i) {
166 const auto curdex = to_index(i);
167 collected.emplace_back(indices[curdex], curdex);
168 }
169 std::sort(collected.begin(), collected.end());
170
171 SparseParallelResults<Index_> output;
172
173 if (collected.size()) {
174 output.collapsed.reserve(len);
175 output.reindex.pool_indices.reserve(len);
176 const Index_ first = collected.front().first;
177
178 // 'pool_ptrs' is a vector that enables look-up according to the indices of the underlying array.
179 // To avoid the need to allocate a vector of length equal to the underlying array's dimension, we only consider the extremes of 'indices'.
180 // We allocate 'pool_ptrs' to have length equal to the range of 'indices'... plus 1, as we're storing cumulative pointers.
181 // 'offset' defines the lower bound that must be subtracted from the array indices to get an index into 'pool_ptrs'.
182 output.reindex.offset = first;
183 const Index_ allocation = collected.back().first - output.reindex.offset + 1;
184 output.reindex.pool_ptrs.resize(sanisizer::sum<I<decltype(output.reindex.pool_ptrs.size())> >(allocation, 1));
185
186 Index_ counter = 0; // this can never be larger than 'len', so using Index_ will not lead to overflows.
187 output.reindex.pool_ptrs[counter] = 0;
188 ++counter;
189 output.reindex.pool_indices.push_back(collected.front().second);
190 output.reindex.pool_ptrs[counter] = 1;
191 output.collapsed.push_back(first);
192 auto last = first;
193
194 for (Index_ i = 1; i < len; ++i) {
195 const auto& pp = collected[i];
196 const auto current = pp.first;
197 if (current == last) {
198 output.reindex.pool_indices.push_back(pp.second);
199 ++(output.reindex.pool_ptrs[counter]); // contents of pool_ptrs will never be greater than len, so this won't overflow.
200 continue;
201 }
202
203 const Index_ pool_size = output.reindex.pool_indices.size();
204 counter = current - output.reindex.offset;
205 output.reindex.pool_ptrs[counter] = pool_size; // any overwrite is safe as the value is unchanged.
206 ++counter;
207 output.reindex.pool_indices.push_back(pp.second);
208 output.reindex.pool_ptrs[counter] = pool_size + 1;
209 output.collapsed.push_back(current);
210 last = current;
211 }
212 }
213
214 return output;
215}
216
217template<bool oracle_, typename Value_, typename Index_>
218class ParallelSparse final : public SparseExtractor<oracle_, Value_, Index_> {
219public:
220 template<class SubsetStorage_>
221 ParallelSparse(
222 const Matrix<Value_, Index_>& mat,
223 const SubsetStorage_& subset,
224 const bool row,
225 MaybeOracle<oracle_, Index_> oracle,
226 const Options& opt
227 ) {
228 auto processed = format_sparse_parallel_base<Index_>(subset, subset.size(), [](const Index_ i) -> Index_ { return i; });
229 initialize(mat, std::move(processed), subset.size(), row, std::move(oracle), opt);
230 }
231
232 template<class SubsetStorage_>
233 ParallelSparse(
234 const Matrix<Value_, Index_>& mat,
235 const SubsetStorage_& subset,
236 const bool row,
237 MaybeOracle<oracle_, Index_> oracle,
238 const Index_ block_start,
239 const Index_ block_length,
240 const Options& opt
241 ) {
242 auto processed = format_sparse_parallel_base<Index_>(subset, block_length, [&](const Index_ i) -> Index_ { return i + block_start; });
243 initialize(mat, std::move(processed), block_length, row, std::move(oracle), opt);
244 }
245
246 template<class SubsetStorage_>
247 ParallelSparse(
248 const Matrix<Value_, Index_>& mat,
249 const SubsetStorage_& subset,
250 const bool row,
251 MaybeOracle<oracle_, Index_> oracle,
252 VectorPtr<Index_> indices_ptr,
253 const Options& opt
254 ) {
255 const auto& indices = *indices_ptr;
256 auto processed = format_sparse_parallel_base<Index_>(subset, indices.size(), [&](const Index_ i) -> Index_ { return indices[i]; });
257 initialize(mat, std::move(processed), indices.size(), row, std::move(oracle), opt);
258 }
259
260private:
261 void initialize(
262 const Matrix<Value_, Index_>& mat,
263 SparseParallelResults<Index_> processed,
264 const Index_ extent,
265 const bool row,
266 MaybeOracle<oracle_, Index_> oracle,
267 Options opt
268 ) {
269 const Index_ num_collapsed = processed.collapsed.size(); // number of unique subset indices should be no greater than the extent.
270 my_shift = extent - num_collapsed;
271
272 my_needs_value = opt.sparse_extract_value;
273 my_needs_index = opt.sparse_extract_index;
274 my_needs_sort = opt.sparse_ordered_index;
275
276 if (my_needs_sort && my_needs_value) {
277 my_sortspace.reserve(extent);
278 }
279
280 // We need to extract indices for sorting and expansion purposes, even if they weren't actually requested.
281 opt.sparse_extract_index = true;
282 if (!my_needs_index) {
283 resize_container_to_Index_size(my_holding_ibuffer, num_collapsed);
284 }
285
286 my_ext = new_extractor<true, oracle_>(mat, row, std::move(oracle), std::move(processed.collapsed), opt);
287 my_reindex = std::move(processed.reindex);
288 }
289
290public:
291 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const vbuffer, Index_* const ibuffer) {
292 const auto vinit = (my_needs_value ? vbuffer + my_shift : NULL);
293 const auto iinit = (my_needs_index ? ibuffer + my_shift : my_holding_ibuffer.data());
294 auto input = my_ext->fetch(i, vinit, iinit);
295
296 if (!my_needs_sort) {
297 // Pointers in 'input' and the two 'buffer' pointers may optionally point
298 // to overlapping arrays as long as each 'buffer' pointer precedes its
299 // corresponding pointer in 'input'. The idea is that the expansion of
300 // values into, e.g., 'vbuffer' will cause it to catch up to 'input.value'
301 // without clobbering any values in the latter. This assumes that
302 // 'input.value' has been shifted enough to make space for expansion; the
303 // required shift depends on the number of duplicates.
304 Index_ count = 0;
305 auto vcopy = vbuffer;
306 auto icopy = ibuffer;
307
308 auto vsrc = input.value;
309 bool replace_value = my_needs_value && vsrc != vcopy;
310
311 for (Index_ i = 0; i < input.number; ++i) {
312 const auto lookup = input.index[i] - my_reindex.offset;
313 const auto start = my_reindex.pool_ptrs[lookup];
314 const auto num = my_reindex.pool_ptrs[lookup + 1] - start;
315 count += num;
316
317 if (replace_value) {
318 auto val = *vsrc; // make a copy just in case 'vcopy' and 'input.value' overlap.
319 std::fill_n(vcopy, num, val);
320 vcopy += num;
321 ++vsrc;
322 replace_value = (vcopy != vsrc); // if we've caught up, there no need to do this replacement.
323 }
324
325 if (my_needs_index) {
326 // Again, 'icopy' will eventually catch up to 'input.index' if
327 // they point to overlapping arrays. But we still need to
328 // replace values once we've managed to catch up, so we can't
329 // short-circuit like we did with 'replace_value'.
330 std::copy_n(my_reindex.pool_indices.begin() + start, num, icopy);
331 icopy += num;
332 }
333 }
334
335 input.number = count;
336 if (my_needs_value) {
337 input.value = vbuffer;
338 }
339 if (my_needs_index) {
340 input.index = ibuffer;
341 } else {
342 input.index = NULL;
343 }
344
345 } else if (my_needs_value) {
346 // This does not require any careful consideration of the overlaps
347 // between 'input' and 'buffers', as we're copying things into
348 // 'my_sortspace' anyway before copying them back into 'buffer'.
349 my_sortspace.clear();
350 for (Index_ i = 0; i < input.number; ++i) {
351 const auto val = input.value[i];
352 const auto lookup = input.index[i] - my_reindex.offset;
353 const auto start = my_reindex.pool_ptrs[lookup];
354 const auto end = my_reindex.pool_ptrs[lookup + 1];
355 for (Index_ j = start; j < end; ++j) {
356 my_sortspace.emplace_back(my_reindex.pool_indices[j], val);
357 }
358 }
359 std::sort(my_sortspace.begin(), my_sortspace.end());
360 input.number = my_sortspace.size();
361
362 auto vcopy = vbuffer;
363 for (const auto& ss : my_sortspace) {
364 *vcopy = ss.second;
365 ++vcopy;
366 }
367 input.value = vbuffer;
368
369 if (my_needs_index) {
370 auto icopy = ibuffer;
371 for (const auto& ss : my_sortspace) {
372 *icopy = ss.first;
373 ++icopy;
374 }
375 input.index = ibuffer;
376 } else {
377 input.index = NULL;
378 }
379
380 } else {
381 // Again, 'input.index' and 'ibuffer' may point to overlapping arrays,
382 // as long as the latter precedes the former; expansion into the latter
383 // will allow it to catch up to the former without clobbering, assuming
384 // that the latter was shifted back to provide enough space.
385 Index_ count = 0;
386 auto icopy = ibuffer;
387
388 for (Index_ i = 0; i < input.number; ++i) {
389 const auto lookup = input.index[i] - my_reindex.offset;
390 const auto start = my_reindex.pool_ptrs[lookup];
391 const auto num = my_reindex.pool_ptrs[lookup + 1] - start;
392 count += num;
393
394 if (my_needs_index) {
395 std::copy_n(my_reindex.pool_indices.begin() + start, num, icopy);
396 icopy += num;
397 }
398 }
399
400 input.number = count;
401 if (my_needs_index) {
402 std::sort(ibuffer, ibuffer + count);
403 input.index = ibuffer;
404 } else {
405 input.index = NULL;
406 }
407 }
408
409 return input;
410 }
411
412private:
413 std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > my_ext;
414 bool my_needs_value, my_needs_index, my_needs_sort;
415 SparseParallelReindex<Index_> my_reindex;
416 std::vector<std::pair<Index_, Value_> > my_sortspace;
417 std::vector<Index_> my_holding_ibuffer;
418 Index_ my_shift;
419};
420
421}
437template<typename Value_, typename Index_, class SubsetStorage_>
438class DelayedSubset final : public Matrix<Value_, Index_> {
439public:
448 std::shared_ptr<const Matrix<Value_, Index_> > matrix,
449 SubsetStorage_ subset,
450 const bool by_row
451 ) :
452 my_matrix(std::move(matrix)),
453 my_subset(std::move(subset)),
454 my_by_row(by_row)
455 {
456 sanisizer::can_cast<Index_>(my_subset.size());
457 }
458
459private:
460 std::shared_ptr<const Matrix<Value_, Index_> > my_matrix;
461 SubsetStorage_ my_subset;
462 bool my_by_row;
463
464public:
465 Index_ nrow() const {
466 if (my_by_row) {
467 return my_subset.size();
468 } else {
469 return my_matrix->nrow();
470 }
471 }
472
473 Index_ ncol() const {
474 if (my_by_row) {
475 return my_matrix->ncol();
476 } else {
477 return my_subset.size();
478 }
479 }
480
481 bool is_sparse() const {
482 return my_matrix->is_sparse();
483 }
484
485 double is_sparse_proportion() const {
486 return my_matrix->is_sparse_proportion();
487 }
488
489 bool prefer_rows() const {
490 return my_matrix->prefer_rows();
491 }
492
493 double prefer_rows_proportion() const {
494 return my_matrix->prefer_rows_proportion();
495 }
496
497 bool uses_oracle(const bool row) const {
498 return my_matrix->uses_oracle(row);
499 }
500
501 using Matrix<Value_, Index_>::dense;
502
503 using Matrix<Value_, Index_>::sparse;
504
505 /********************
506 *** Myopic dense ***
507 ********************/
508private:
509 template<typename ... Args_>
510 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > populate_myopic_dense(
511 const bool row,
512 Args_&& ... args
513 ) const {
514 if (row == my_by_row) {
515 return std::make_unique<subset_utils::MyopicPerpendicularDense<Value_, Index_, SubsetStorage_> >(
516 *my_matrix,
517 my_subset,
518 row,
519 std::forward<Args_>(args)...
520 );
521 } else {
522 return std::make_unique<DelayedSubset_internal::ParallelDense<false, Value_, Index_> >(
523 *my_matrix,
524 my_subset,
525 row,
526 false,
527 std::forward<Args_>(args)...
528 );
529 }
530 }
531
532public:
533 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
534 const bool row,
535 const Options& opt
536 ) const {
537 return populate_myopic_dense(row, opt);
538 }
539
540 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
541 const bool row,
542 const Index_ block_start,
543 const Index_ block_length,
544 const Options& opt
545 ) const {
546 return populate_myopic_dense(row, block_start, block_length, opt);
547 }
548
549 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
550 const bool row,
551 VectorPtr<Index_> my_subset_ptr,
552 const Options& opt
553 ) const {
554 return populate_myopic_dense(row, std::move(my_subset_ptr), opt);
555 }
556
557 /*********************
558 *** Myopic sparse ***
559 *********************/
560private:
561 template<typename ... Args_>
562 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > populate_myopic_sparse(
563 const bool row,
564 Args_&& ... args
565 ) const {
566 if (row == my_by_row) {
567 return std::make_unique<subset_utils::MyopicPerpendicularSparse<Value_, Index_, SubsetStorage_> >(
568 *my_matrix,
569 my_subset,
570 row,
571 std::forward<Args_>(args)...
572 );
573 } else {
574 return std::make_unique<DelayedSubset_internal::ParallelSparse<false, Value_, Index_> >(
575 *my_matrix,
576 my_subset,
577 row,
578 false,
579 std::forward<Args_>(args)...
580 );
581 }
582 }
583
584public:
585 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
586 const bool row,
587 const Options& opt
588 ) const {
589 return populate_myopic_sparse(row, opt);
590 }
591
592 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
593 const bool row,
594 const Index_ block_start,
595 const Index_ block_length,
596 const Options& opt
597 ) const {
598 return populate_myopic_sparse(row, block_start, block_length, opt);
599 }
600
601 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
602 const bool row,
603 VectorPtr<Index_> my_subset_ptr,
604 const Options& opt
605 ) const {
606 return populate_myopic_sparse(row, std::move(my_subset_ptr), opt);
607 }
608
609 /**********************
610 *** Oracular dense ***
611 **********************/
612private:
613 template<typename ... Args_>
614 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > populate_oracular_dense(
615 const bool row,
616 std::shared_ptr<const Oracle<Index_> > oracle,
617 Args_&& ... args
618 ) const {
619 if (row == my_by_row) {
620 return std::make_unique<subset_utils::OracularPerpendicularDense<Value_, Index_> >(
621 *my_matrix,
622 my_subset,
623 row,
624 std::move(oracle),
625 std::forward<Args_>(args)...
626 );
627 } else {
628 return std::make_unique<DelayedSubset_internal::ParallelDense<true, Value_, Index_> >(
629 *my_matrix,
630 my_subset,
631 row,
632 std::move(oracle),
633 std::forward<Args_>(args)...
634 );
635 }
636 }
637
638public:
639 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
640 const bool row,
641 std::shared_ptr<const Oracle<Index_> > oracle,
642 const Options& opt
643 ) const {
644 return populate_oracular_dense(row, std::move(oracle), opt);
645 }
646
647 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
648 const bool row,
649 std::shared_ptr<const Oracle<Index_> > oracle,
650 const Index_ block_start,
651 const Index_ block_length,
652 const Options& opt
653 ) const {
654 return populate_oracular_dense(row, std::move(oracle), block_start, block_length, opt);
655 }
656
657 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
658 const bool row,
659 std::shared_ptr<const Oracle<Index_> > oracle,
660 VectorPtr<Index_> my_subset_ptr,
661 const Options& opt
662 ) const {
663 return populate_oracular_dense(row, std::move(oracle), std::move(my_subset_ptr), opt);
664 }
665
666 /***********************
667 *** Oracular sparse ***
668 ***********************/
669private:
670 template<typename ... Args_>
671 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > populate_oracular_sparse(
672 const bool row,
673 std::shared_ptr<const Oracle<Index_> > oracle,
674 Args_&& ... args
675 ) const {
676 if (row == my_by_row) {
677 return std::make_unique<subset_utils::OracularPerpendicularSparse<Value_, Index_> >(
678 *my_matrix,
679 my_subset,
680 row,
681 std::move(oracle),
682 std::forward<Args_>(args)...
683 );
684 } else {
685 return std::make_unique<DelayedSubset_internal::ParallelSparse<true, Value_, Index_> >(
686 *my_matrix,
687 my_subset,
688 row,
689 std::move(oracle),
690 std::forward<Args_>(args)...
691 );
692 }
693 }
694
695public:
696 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
697 const bool row,
698 std::shared_ptr<const Oracle<Index_> > oracle,
699 const Options& opt
700 ) const {
701 return populate_oracular_sparse(row, std::move(oracle), opt);
702 }
703
704 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
705 const bool row,
706 std::shared_ptr<const Oracle<Index_> > oracle,
707 const Index_ block_start,
708 const Index_ block_length,
709 const Options& opt
710 ) const {
711 return populate_oracular_sparse(row, std::move(oracle), block_start, block_length, opt);
712 }
713
714 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
715 const bool row,
716 std::shared_ptr<const Oracle<Index_> > oracle,
717 VectorPtr<Index_> my_subset_ptr,
718 const Options& opt
719 ) const {
720 return populate_oracular_sparse(row, std::move(oracle), std::move(my_subset_ptr), opt);
721 }
722};
723
724}
725
726#endif
Delayed subsetting of a matrix with general indices.
Definition DelayedSubset.hpp:438
bool uses_oracle(const bool row) const
Definition DelayedSubset.hpp:497
std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(const bool row, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:601
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedSubset.hpp:639
Index_ ncol() const
Definition DelayedSubset.hpp:473
bool prefer_rows() const
Definition DelayedSubset.hpp:489
DelayedSubset(std::shared_ptr< const Matrix< Value_, Index_ > > matrix, SubsetStorage_ subset, const bool by_row)
Definition DelayedSubset.hpp:447
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Options &opt) const
Definition DelayedSubset.hpp:533
std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(const bool row, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:592
std::unique_ptr< OracularSparseExtractor< Value_, Index_ > > sparse(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedSubset.hpp:696
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:657
std::unique_ptr< OracularSparseExtractor< Value_, Index_ > > sparse(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:704
double prefer_rows_proportion() const
Definition DelayedSubset.hpp:493
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:647
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:549
std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(const bool row, const Options &opt) const
Definition DelayedSubset.hpp:585
std::unique_ptr< OracularSparseExtractor< Value_, Index_ > > sparse(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > my_subset_ptr, const Options &opt) const
Definition DelayedSubset.hpp:714
Index_ nrow() const
Definition DelayedSubset.hpp:465
bool is_sparse() const
Definition DelayedSubset.hpp:481
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedSubset.hpp:540
double is_sparse_proportion() const
Definition DelayedSubset.hpp:485
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:29
Flexible representations for matrix data.
Definition Extractor.hpp:15
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
void resize_container_to_Index_size(Container_ &container, const Index_ x, Args_ &&... args)
Definition Index_to_container.hpp:92
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
Options for accessing data from a Matrix instance.
Definition Options.hpp:30