tatami
C++ API for different matrix representations
Loading...
Searching...
No Matches
DelayedBind.hpp
Go to the documentation of this file.
1#ifndef TATAMI_DELAYED_BIND_HPP
2#define TATAMI_DELAYED_BIND_HPP
3
4#include "../base/Matrix.hpp"
5#include "../utils/new_extractor.hpp"
6#include "../utils/ConsecutiveOracle.hpp"
7#include "../utils/FixedOracle.hpp"
8#include "../utils/PseudoOracularExtractor.hpp"
9#include "../utils/copy.hpp"
10
11#include <numeric>
12#include <algorithm>
13#include <memory>
14#include <array>
15#include <type_traits>
16
25namespace tatami {
26
30namespace DelayedBind_internal {
31
32/**********************
33 *** Dense parallel ***
34 **********************/
35
36template<typename Index_, class Initialize_>
38 const std::vector<Index_>& cumulative,
39 const std::vector<Index_>& mapping,
43{
44 if (mapping.empty()) {
45 return 0;
46 }
47
51
52 for (Index_ index = start_index, nmats = cumulative.size() - 1; index < nmats; ++index) {
53 Index_ submat_end = cumulative[index + 1];
57 if (!not_final) {
58 break;
59 }
60 actual_start = 0;
61 }
62
63 return start_index;
64}
65
66template<typename Index_, class Initialize_>
68 const std::vector<Index_>& cumulative,
69 const std::vector<Index_>& mapping,
70 const std::vector<Index_>& indices,
72{
73 Index_ counter = 0, il = indices.size();
74 while (counter < il) {
79
80 // Creating the slice with one element already.
81 auto slice_ptr = std::make_shared<std::vector<Index_> >(1, first_index - lower);
82 ++counter;
83
84 while (counter < il && indices[counter] < upper) {
85 slice_ptr->push_back(indices[counter] - lower);
86 ++counter;
87 }
88
89 init(bind_index, std::move(slice_ptr));
90 }
91}
92
93template<bool oracle_, typename Value_, typename Index_>
94class ParallelDense : public DenseExtractor<oracle_, Value_, Index_> {
95public:
96 ParallelDense(
97 const std::vector<Index_>&, // Not used, just provided for consistency with other constructors.
98 const std::vector<Index_>&,
99 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
100 bool row,
101 MaybeOracle<oracle_, Index_> oracle,
102 const Options& opt)
103 {
104 my_exts.reserve(matrices.size());
105 my_count.reserve(matrices.size());
106 for (const auto& m : matrices) {
107 my_count.emplace_back(row ? m->ncol() : m->nrow());
108 my_exts.emplace_back(new_extractor<false, oracle_>(m.get(), row, oracle, opt));
109 }
110 }
111
112 ParallelDense(
113 const std::vector<Index_>& cumulative,
114 const std::vector<Index_>& mapping,
115 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
116 bool row,
117 MaybeOracle<oracle_, Index_> oracle,
118 Index_ block_start,
119 Index_ block_length,
120 const Options& opt)
121 {
122 my_exts.reserve(matrices.size());
123 my_count.reserve(matrices.size());
124 initialize_parallel_block(
125 cumulative,
126 mapping,
127 block_start,
128 block_length,
129 [&](Index_ i, Index_ sub_block_start, Index_ sub_block_length) {
130 my_count.emplace_back(sub_block_length);
131 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
132 }
133 );
134 }
135
136 ParallelDense(
137 const std::vector<Index_>& cumulative,
138 const std::vector<Index_>& mapping,
139 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
140 bool row,
141 MaybeOracle<oracle_, Index_> oracle,
142 VectorPtr<Index_> indices_ptr,
143 const Options& opt)
144 {
145 my_exts.reserve(matrices.size());
146 my_count.reserve(matrices.size());
147 initialize_parallel_index(
148 cumulative,
149 mapping,
150 *indices_ptr,
151 [&](Index_ i, VectorPtr<Index_> sub_indices_ptr) {
152 my_count.emplace_back(sub_indices_ptr->size());
153 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
154 }
155 );
156 }
157
158public:
159 const Value_* fetch(Index_ i, Value_* buffer) {
160 auto copy = buffer;
161 for (Index_ x = 0, end = my_count.size(); x < end; ++x) {
162 auto ptr = my_exts[x]->fetch(i, copy);
163 auto num = my_count[x];
164 copy_n(ptr, num, copy);
165 copy += num;
166 }
167 return buffer;
168 }
169
170private:
171 std::vector<std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > > my_exts;
172 std::vector<Index_> my_count;
173};
174
175/***********************
176 *** Sparse parallel ***
177 ***********************/
178
179template<bool oracle_, typename Value_, typename Index_>
180class ParallelFullSparse : public SparseExtractor<oracle_, Value_, Index_> {
181public:
182 ParallelFullSparse(
183 const std::vector<Index_>& cumulative,
184 const std::vector<Index_>&, // not actually used, just provided for consistency with the other constructors.
185 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
186 bool row,
187 MaybeOracle<oracle_, Index_> oracle,
188 const Options& opt) :
189 my_cumulative(cumulative),
190 my_needs_value(opt.sparse_extract_value),
191 my_needs_index(opt.sparse_extract_index)
192 {
193 my_exts.reserve(matrices.size());
194 for (const auto& m : matrices) {
195 my_exts.emplace_back(new_extractor<true, oracle_>(m.get(), row, oracle, opt));
196 }
197 }
198
199 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
200 auto vcopy = value_buffer;
201 auto icopy = index_buffer;
202 Index_ accumulated = 0;
203
204 for (Index_ x = 0, end = my_cumulative.size() - 1; x < end; ++x) {
205 auto range = my_exts[x]->fetch(i, vcopy, icopy);
206 accumulated += range.number;
207 if (my_needs_value) {
208 copy_n(range.value, range.number, vcopy);
209 vcopy += range.number;
210 }
211 if (my_needs_index) {
212 auto offset = my_cumulative[x];
213 for (Index_ y = 0; y < range.number; ++y) {
214 icopy[y] = range.index[y] + offset;
215 }
216 icopy += range.number;
217 }
218 }
219
220 return SparseRange<Value_, Index_>(accumulated, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
221 }
222
223private:
224 const std::vector<Index_>& my_cumulative;
225 bool my_needs_value, my_needs_index;
226 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
227};
228
229template<bool oracle_, typename Value_, typename Index_>
230class ParallelBlockSparse : public SparseExtractor<oracle_, Value_, Index_> {
231public:
232 ParallelBlockSparse(
233 const std::vector<Index_>& cumulative,
234 const std::vector<Index_>& mapping,
235 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
236 bool row,
237 MaybeOracle<oracle_, Index_> oracle,
238 Index_ block_start,
239 Index_ block_length,
240 const Options& opt) :
241 my_cumulative(cumulative),
242 my_needs_value(opt.sparse_extract_value),
243 my_needs_index(opt.sparse_extract_index)
244 {
245 my_exts.reserve(matrices.size());
246 my_start_matrix = initialize_parallel_block(
247 my_cumulative,
248 mapping,
249 block_start,
250 block_length,
251 [&](Index_ i, Index_ sub_block_start, Index_ sub_block_length) {
252 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
253 }
254 );
255 }
256
257 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
258 auto vcopy = value_buffer;
259 auto icopy = index_buffer;
260 Index_ count = 0;
261
262 for (Index_ x = 0, end = my_exts.size(); x < end; ++x) {
263 auto range = my_exts[x]->fetch(i, vcopy, icopy);
264 count += range.number;
265 if (my_needs_value) {
266 copy_n(range.value, range.number, vcopy);
267 vcopy += range.number;
268 }
269 if (my_needs_index) {
270 Index_ offset = my_cumulative[x + my_start_matrix];
271 for (Index_ y = 0; y < range.number; ++y) {
272 icopy[y] = range.index[y] + offset;
273 }
274 icopy += range.number;
275 }
276 }
277
278 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
279 }
280
281private:
282 const std::vector<Index_>& my_cumulative;
283 bool my_needs_value, my_needs_index;
284 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
285 Index_ my_start_matrix;
286};
287
288template<bool oracle_, typename Value_, typename Index_>
289class ParallelIndexSparse : public SparseExtractor<oracle_, Value_, Index_> {
290public:
291 ParallelIndexSparse(
292 const std::vector<Index_>& cumulative,
293 const std::vector<Index_>& mapping,
294 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
295 bool row,
296 MaybeOracle<oracle_, Index_> oracle,
297 VectorPtr<Index_> indices_ptr,
298 const Options& opt) :
299 my_cumulative(cumulative),
300 my_needs_value(opt.sparse_extract_value),
301 my_needs_index(opt.sparse_extract_index)
302 {
303 my_exts.reserve(matrices.size());
304 my_which_matrix.reserve(matrices.size());
305 initialize_parallel_index(
306 my_cumulative,
307 mapping,
308 *indices_ptr,
309 [&](Index_ i, VectorPtr<Index_> sub_indices_ptr) {
310 my_which_matrix.emplace_back(i);
311 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
312 }
313 );
314 }
315
316 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
317 auto vcopy = value_buffer;
318 auto icopy = index_buffer;
319 Index_ count = 0;
320
321 for (Index_ x = 0, end = my_which_matrix.size(); x < end; ++x) {
322 auto range = my_exts[x]->fetch(i, vcopy, icopy);
323 count += range.number;
324 if (my_needs_value) {
325 copy_n(range.value, range.number, vcopy);
326 vcopy += range.number;
327 }
328
329 if (my_needs_index) {
330 Index_ offset = my_cumulative[my_which_matrix[x]];
331 for (Index_ y = 0; y < range.number; ++y) {
332 icopy[y] = range.index[y] + offset;
333 }
334 icopy += range.number;
335 }
336 }
337
338 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
339 }
340
341private:
342 const std::vector<Index_>& my_cumulative;
343 bool my_needs_value, my_needs_index;
344 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
345 std::vector<Index_> my_which_matrix;
346};
347
348/*********************
349 *** Perpendicular ***
350 *********************/
351
352template<typename Value_, typename Index_>
353class MyopicPerpendicularDense : public MyopicDenseExtractor<Value_, Index_> {
354public:
355 template<typename ... Args_>
356 MyopicPerpendicularDense(
357 const std::vector<Index_>& cumulative,
358 const std::vector<Index_>& mapping,
359 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
360 bool row,
361 const Args_& ... args) :
362 my_cumulative(cumulative),
363 my_mapping(mapping)
364 {
365 my_exts.reserve(matrices.size());
366 for (const auto& m : matrices) {
367 my_exts.emplace_back(m->dense(row, args...));
368 }
369 }
370
371 const Value_* fetch(Index_ i, Value_* buffer) {
372 Index_ chosen = my_mapping[i];
373 return my_exts[chosen]->fetch(i - my_cumulative[chosen], buffer);
374 }
375
376private:
377 const std::vector<Index_>& my_cumulative;
378 const std::vector<Index_>& my_mapping;
379 std::vector<std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > > my_exts;
380};
381
382template<typename Value_, typename Index_>
383class MyopicPerpendicularSparse : public MyopicSparseExtractor<Value_, Index_> {
384public:
385 template<typename ... Args_>
386 MyopicPerpendicularSparse(
387 const std::vector<Index_>& cumulative,
388 const std::vector<Index_>& mapping,
389 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
390 bool row,
391 const Args_& ... args) :
392 my_cumulative(cumulative),
393 my_mapping(mapping)
394 {
395 my_exts.reserve(matrices.size());
396 for (const auto& m : matrices) {
397 my_exts.emplace_back(m->sparse(row, args...));
398 }
399 }
400
401 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
402 Index_ chosen = my_mapping[i];
403 return my_exts[chosen]->fetch(i - my_cumulative[chosen], vbuffer, ibuffer);
404 }
405
406private:
407 const std::vector<Index_>& my_cumulative;
408 const std::vector<Index_>& my_mapping;
409 std::vector<std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > > my_exts;
410};
411
412template<typename Index_, class Initialize_>
414 const std::vector<Index_>& cumulative,
415 const std::vector<Index_>& mapping,
416 const Oracle<Index_>* oracle,
417 std::vector<Index_>& chosen,
419{
420 size_t ntotal = oracle->total();
421 chosen.reserve(ntotal);
422
423 struct Predictions {
424 bool consecutive = true;
425 Index_ start = 0;
426 Index_ number = 0;
427 std::vector<Index_> predictions;
428
429 void add(Index_ p) {
430 if (consecutive) {
431 if (number == 0) {
432 start = p;
433 number = 1;
434 return;
435 }
436 if (number + start == p) {
437 ++number;
438 return;
439 }
440 consecutive = false;
441 predictions.resize(number);
442 std::iota(predictions.begin(), predictions.end(), start);
443 }
444
445 predictions.push_back(p);
446 }
447 };
448
449 Index_ nmats = cumulative.size() - 1;
450 std::vector<Predictions> predictions(nmats);
451 for (size_t i = 0; i < ntotal; ++i) {
452 auto prediction = oracle->get(i);
454 chosen.push_back(choice);
456 }
457
458 for (Index_ x = 0; x < nmats; ++x) {
459 auto& current = predictions[x];
460 if (current.consecutive) {
461 if (current.number) {
462 init(x, std::make_shared<ConsecutiveOracle<Index_> >(current.start, current.number));
463 }
464 } else {
465 if (!current.predictions.empty()) {
466 init(x, std::make_shared<FixedVectorOracle<Index_> >(std::move(current.predictions)));
467 }
468 }
469 }
470}
471
472template<typename Value_, typename Index_>
473class OracularPerpendicularDense : public OracularDenseExtractor<Value_, Index_> {
474public:
475 template<typename ... Args_>
476 OracularPerpendicularDense(
477 const std::vector<Index_>& cumulative,
478 const std::vector<Index_>& mapping,
479 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
480 bool row,
481 std::shared_ptr<const Oracle<Index_> > ora,
482 const Args_& ... args)
483 {
484 my_exts.resize(matrices.size());
485 initialize_perp_oracular(
486 cumulative,
487 mapping,
488 ora.get(),
489 segments,
490 [&](Index_ x, std::shared_ptr<const Oracle<Index_> > subora) {
491 my_exts[x] = matrices[x]->dense(row, std::move(subora), args...);
492 }
493 );
494 }
495
496 const Value_* fetch(Index_ i, Value_* buffer) {
497 auto chosen = segments[used];
498 auto output = my_exts[chosen]->fetch(i, buffer);
499 ++used;
500 return output;
501 }
502
503private:
504 std::vector<Index_> segments;
505 std::vector<std::unique_ptr<OracularDenseExtractor<Value_, Index_> > > my_exts;
506 size_t used = 0;
507};
508
509template<typename Value_, typename Index_>
510class OracularPerpendicularSparse : public OracularSparseExtractor<Value_, Index_> {
511public:
512 template<typename ... Args_>
513 OracularPerpendicularSparse(
514 const std::vector<Index_>& cumulative,
515 const std::vector<Index_>& mapping,
516 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
517 bool row,
518 std::shared_ptr<const Oracle<Index_> > ora,
519 const Args_& ... args)
520 {
521 my_exts.resize(matrices.size());
522 initialize_perp_oracular(
523 cumulative,
524 mapping,
525 ora.get(),
526 segments,
527 [&](Index_ x, std::shared_ptr<const Oracle<Index_> > subora) {
528 my_exts[x] = matrices[x]->sparse(row, std::move(subora), args...);
529 }
530 );
531 }
532
533 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
534 auto chosen = segments[used];
535 auto output = my_exts[chosen]->fetch(i, vbuffer, ibuffer);
536 ++used;
537 return output;
538 }
539
540private:
541 std::vector<Index_> segments;
542 std::vector<std::unique_ptr<OracularSparseExtractor<Value_, Index_> > > my_exts;
543 size_t used = 0;
544};
545
546}
560template<typename Value_, typename Index_>
561class DelayedBind : public Matrix<Value_, Index_> {
562public:
569 DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, bool by_row) :
570 my_matrices(std::move(matrices)), my_by_row(by_row), my_cumulative(my_matrices.size()+1)
571 {
572 size_t sofar = 0;
573 for (size_t i = 0, nmats = my_matrices.size(); i < nmats; ++i) {
574 auto& current = my_matrices[i];
575 Index_ primary, secondary;
576 if (my_by_row) {
577 primary = current->nrow();
578 secondary = current->ncol();
579 } else {
580 primary = current->ncol();
581 secondary = current->nrow();
582 }
583
584 if (i == 0) {
585 my_otherdim = secondary;
586 } else if (my_otherdim != secondary) {
587 throw std::runtime_error("all 'my_matrices' should have the same number of " + (my_by_row ? std::string("columns") : std::string("rows")));
588 }
589
590 // Removing the matrices that don't contribute anything,
591 // so we don't have to deal with their overhead.
592 if (primary > 0) {
593 if (sofar != i) {
594 my_matrices[sofar] = std::move(current);
595 }
596 my_cumulative[sofar + 1] = my_cumulative[sofar] + primary;
597 ++sofar;
598 }
599 }
600
601 my_cumulative.resize(sofar + 1);
602 my_matrices.resize(sofar);
603
604 // At this point, the number of matrices must be no greater than the
605 // number of rows/columns of the combined matrix (as we've removed all
606 // non-contributing submatrices) and thus should fit into 'Index_';
607 // hence, using Index_ for the mapping should not overflow.
608 my_mapping.reserve(my_cumulative.back());
609 for (Index_ i = 0, nmats = my_matrices.size(); i < nmats; ++i) {
610 my_mapping.insert(my_mapping.end(), (my_by_row ? my_matrices[i]->nrow() : my_matrices[i]->ncol()), i);
611 }
612
613 double denom = 0;
614 for (const auto& x : my_matrices) {
615 double total = static_cast<double>(x->nrow()) * static_cast<double>(x->ncol());
616 denom += total;
617 my_sparse_prop += total * x->is_sparse_proportion();
618 my_by_row_prop += total * x->prefer_rows_proportion();
619 }
620 if (denom) {
621 my_sparse_prop /= denom;
622 my_by_row_prop /= denom;
623 }
624
625 for (int d = 0; d < 2; ++d) {
626 my_uses_oracle[d] = false;
627 for (const auto& x : my_matrices) {
628 if (x->uses_oracle(d)) {
629 my_uses_oracle[d] = true;
630 break;
631 }
632 }
633 }
634 }
635
642 DelayedBind(const std::vector<std::shared_ptr<Matrix<Value_, Index_> > >& matrices, bool by_row) :
644
645private:
646 std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > my_matrices;
647 bool my_by_row;
648
649 Index_ my_otherdim = 0;
650 std::vector<Index_> my_cumulative;
651 std::vector<Index_> my_mapping;
652
653 double my_sparse_prop = 0, my_by_row_prop = 0;
654 std::array<bool, 2> my_uses_oracle;
655
656public:
657 Index_ nrow() const {
658 if (my_by_row) {
659 return my_cumulative.back();
660 } else {
661 return my_otherdim;
662 }
663 }
664
665 Index_ ncol() const {
666 if (my_by_row) {
667 return my_otherdim;
668 } else {
669 return my_cumulative.back();
670 }
671 }
672
673 bool is_sparse() const {
674 return my_sparse_prop > 0.5;
675 }
676
677 double is_sparse_proportion() const {
678 return my_sparse_prop;
679 }
680
681 bool prefer_rows() const {
682 return my_by_row_prop > 0.5;
683 }
684
685 double prefer_rows_proportion() const {
686 return my_by_row_prop;
687 }
688
689 bool uses_oracle(bool row) const {
690 return my_uses_oracle[row];
691 }
692
694
695 using Matrix<Value_, Index_>::sparse;
696
697 /**********************************
698 ********** Myopic dense **********
699 **********************************/
700public:
701 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, const Options& opt) const {
702 if (my_matrices.size() == 1) {
703 return my_matrices[0]->dense(row, opt);
704 } else if (row == my_by_row) {
705 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, opt);
706 } else {
707 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, opt);
708 }
709 }
710
711 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, Index_ block_start, Index_ block_length, const Options& opt) const {
712 if (my_matrices.size() == 1) {
713 return my_matrices[0]->dense(row, block_start, block_length, opt);
714 } else if (row == my_by_row) {
715 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, block_start, block_length, opt);
716 } else {
717 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, block_start, block_length, opt);
718 }
719 }
720
721 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, VectorPtr<Index_> indices_ptr, const Options& opt) const {
722 if (my_matrices.size() == 1) {
723 return my_matrices[0]->dense(row, std::move(indices_ptr), opt);
724 } else if (row == my_by_row) {
725 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(indices_ptr), opt);
726 } else {
727 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, std::move(indices_ptr), opt);
728 }
729 }
730
731 /***********************************
732 ********** Myopic sparse **********
733 ***********************************/
734private:
735 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, const Options& opt) const {
736 if (my_matrices.size() == 1) {
737 return my_matrices[0]->sparse(row, opt);
738 } else if (row == my_by_row) {
739 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, opt);
740 } else {
741 return std::make_unique<DelayedBind_internal::ParallelFullSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, opt);
742 }
743 }
744
745 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, Index_ block_start, Index_ block_length, const Options& opt) const {
746 if (my_matrices.size() == 1) {
747 return my_matrices[0]->sparse(row, block_start, block_length, opt);
748 } else if (row == my_by_row) {
749 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, block_start, block_length, opt);
750 } else {
751 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, block_start, block_length, opt);
752 }
753 }
754
755 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, VectorPtr<Index_> indices_ptr, const Options& opt) const {
756 if (my_matrices.size() == 1) {
757 return my_matrices[0]->sparse(row, std::move(indices_ptr), opt);
758 } else if (row == my_by_row) {
759 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(indices_ptr), opt);
760 } else {
761 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, std::move(indices_ptr), opt);
762 }
763 }
764
765 /************************************
766 ********** Oracular dense **********
767 ************************************/
768public:
769 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, const Options& opt) const {
770 if (my_matrices.size() == 1) {
771 return my_matrices[0]->dense(row, std::move(oracle), opt);
772 } else if (!my_uses_oracle[row]) {
773 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle), dense(row, opt));
774 } else if (row == my_by_row) {
775 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
776 } else {
777 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
778 }
779 }
780
781 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length, const Options& opt) const {
782 if (my_matrices.size() == 1) {
783 return my_matrices[0]->dense(row, std::move(oracle), block_start, block_length, opt);
784 } else if (!my_uses_oracle[row]) {
785 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle), dense(row, block_start, block_length, opt));
786 } else if (row == my_by_row) {
787 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
788 } else {
789 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
790 }
791 }
792
793 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, VectorPtr<Index_> indices_ptr, const Options& opt) const {
794 if (my_matrices.size() == 1) {
795 return my_matrices[0]->dense(row, std::move(oracle), std::move(indices_ptr), opt);
796 } else if (!my_uses_oracle[row]) {
797 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle), dense(row, std::move(indices_ptr), opt));
798 } else if (row == my_by_row) {
799 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
800 } else {
801 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
802 }
803 }
804
805 /*************************************
806 ********** Oracular sparse **********
807 *************************************/
808private:
809 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, const Options& opt) const {
810 if (my_matrices.size() == 1) {
811 return my_matrices[0]->sparse(row, std::move(oracle), opt);
812 } else if (!my_uses_oracle[row]) {
813 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, opt));
814 } else if (row == my_by_row) {
815 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
816 } else {
817 return std::make_unique<DelayedBind_internal::ParallelFullSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
818 }
819 }
820
821 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length, const Options& opt) const {
822 if (my_matrices.size() == 1) {
823 return my_matrices[0]->sparse(row, std::move(oracle), block_start, block_length, opt);
824 } else if (!my_uses_oracle[row]) {
825 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, block_start, block_length, opt));
826 } else if (row == my_by_row) {
827 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
828 } else {
829 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
830 }
831 }
832
833 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, VectorPtr<Index_> indices_ptr, const Options& opt) const {
834 if (my_matrices.size() == 1) {
835 return my_matrices[0]->sparse(row, std::move(oracle), std::move(indices_ptr), opt);
836 } else if (!my_uses_oracle[row]) {
837 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, std::move(indices_ptr), opt));
838 } else if (row == my_by_row) {
839 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
840 } else {
841 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
842 }
843 }
844};
845
858template<typename Value_, typename Index_>
859std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, bool row) {
860 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
861}
862
866template<typename Value_, typename Index_>
867std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices, bool row) {
868 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
869}
877// Back-compatibility.
878template<int margin_, typename Value_, typename Index_>
879std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices) {
880 return make_DelayedBind(std::move(matrices), margin_ == 0);
881}
882
883template<int margin_, typename Value_, typename Index_>
884std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices) {
885 return make_DelayedBind(std::move(matrices), margin_ == 0);
886}
891}
892
893#endif
Delayed combining of a matrix.
Definition DelayedBind.hpp:561
DelayedBind(const std::vector< std::shared_ptr< Matrix< Value_, Index_ > > > &matrices, bool by_row)
Definition DelayedBind.hpp:642
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:793
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, const Options &opt) const
Definition DelayedBind.hpp:701
double is_sparse_proportion() const
Definition DelayedBind.hpp:677
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:721
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedBind.hpp:769
Index_ nrow() const
Definition DelayedBind.hpp:657
bool is_sparse() const
Definition DelayedBind.hpp:673
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:781
Index_ ncol() const
Definition DelayedBind.hpp:665
DelayedBind(std::vector< std::shared_ptr< const Matrix< Value_, Index_ > > > matrices, bool by_row)
Definition DelayedBind.hpp:569
bool prefer_rows() const
Definition DelayedBind.hpp:681
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:711
double prefer_rows_proportion() const
Definition DelayedBind.hpp:685
bool uses_oracle(bool row) const
Definition DelayedBind.hpp:689
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:21
Flexible representations for matrix data.
Definition Extractor.hpp:15
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
typename std::conditional< oracle_, OracularSparseExtractor< Value_, Index_ >, MyopicSparseExtractor< Value_, Index_ > >::type SparseExtractor
Definition Extractor.hpp:284
Value_ * copy_n(const Value_ *input, Size_ n, Value_ *output)
Definition copy.hpp:25
std::shared_ptr< Matrix< Value_, Index_ > > make_DelayedBind(std::vector< std::shared_ptr< const Matrix< Value_, Index_ > > > matrices, bool row)
Definition DelayedBind.hpp:859
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
auto consecutive_extractor(const Matrix< Value_, Index_ > *mat, bool row, Index_ iter_start, Index_ iter_length, Args_ &&... args)
Definition consecutive_extractor.hpp:35
Options for accessing data from a Matrix instance.
Definition Options.hpp:30