tatami
C++ API for different matrix representations
Loading...
Searching...
No Matches
DelayedBind.hpp
Go to the documentation of this file.
1#ifndef TATAMI_DELAYED_BIND_HPP
2#define TATAMI_DELAYED_BIND_HPP
3
4#include "../base/Matrix.hpp"
9#include "../utils/copy.hpp"
10
11#include <numeric>
12#include <algorithm>
13#include <memory>
14#include <array>
15#include <type_traits>
16#include <cstddef>
17
26namespace tatami {
27
31namespace DelayedBind_internal {
32
33/**********************
34 *** Dense parallel ***
35 **********************/
36
37template<typename Index_, class Initialize_>
38Index_ initialize_parallel_block(
39 const std::vector<Index_>& cumulative,
40 const std::vector<Index_>& mapping,
41 Index_ block_start,
42 Index_ block_length,
43 Initialize_ init)
44{
45 if (mapping.empty()) {
46 return 0;
47 }
48
49 Index_ start_index = mapping[block_start];
50 Index_ actual_start = block_start - cumulative[start_index];
51 Index_ block_end = block_start + block_length;
52
53 Index_ nmats = cumulative.size() - 1; // This is guaranteed to fit, see reasoning in the DelayedBind constructor.
54 for (Index_ index = start_index; index < nmats; ++index) {
55 Index_ submat_end = cumulative[index + 1];
56 bool not_final = (block_end > submat_end);
57 Index_ actual_end = (not_final ? submat_end : block_end) - cumulative[index];
58 init(index, actual_start, actual_end - actual_start);
59 if (!not_final) {
60 break;
61 }
62 actual_start = 0;
63 }
64
65 return start_index;
66}
67
68template<typename Index_, class Initialize_>
69void initialize_parallel_index(
70 const std::vector<Index_>& cumulative,
71 const std::vector<Index_>& mapping,
72 const std::vector<Index_>& indices,
73 Initialize_ init)
74{
75 Index_ counter = 0, il = indices.size();
76 while (counter < il) {
77 Index_ first_index = indices[counter];
78 Index_ bind_index = mapping[first_index];
79 Index_ lower = cumulative[bind_index];
80 Index_ upper = cumulative[bind_index + 1];
81
82 // Creating the slice with one element already.
83 auto slice_ptr = std::make_shared<std::vector<Index_> >(1, first_index - lower);
84 ++counter;
85
86 while (counter < il && indices[counter] < upper) {
87 slice_ptr->push_back(indices[counter] - lower);
88 ++counter;
89 }
90
91 init(bind_index, std::move(slice_ptr));
92 }
93}
94
95template<bool oracle_, typename Value_, typename Index_>
96class ParallelDense final : public DenseExtractor<oracle_, Value_, Index_> {
97public:
98 ParallelDense(
99 const std::vector<Index_>&, // Not used, just provided for consistency with other constructors.
100 const std::vector<Index_>&,
101 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
102 bool row,
103 MaybeOracle<oracle_, Index_> oracle,
104 const Options& opt)
105 {
106 my_exts.reserve(matrices.size());
107 my_count.reserve(matrices.size());
108 for (const auto& m : matrices) {
109 my_count.emplace_back(row ? m->ncol() : m->nrow());
110 my_exts.emplace_back(new_extractor<false, oracle_>(m.get(), row, oracle, opt));
111 }
112 }
113
114 ParallelDense(
115 const std::vector<Index_>& cumulative,
116 const std::vector<Index_>& mapping,
117 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
118 bool row,
119 MaybeOracle<oracle_, Index_> oracle,
120 Index_ block_start,
121 Index_ block_length,
122 const Options& opt)
123 {
124 my_exts.reserve(matrices.size());
125 my_count.reserve(matrices.size());
126 initialize_parallel_block(
127 cumulative,
128 mapping,
129 block_start,
130 block_length,
131 [&](Index_ i, Index_ sub_block_start, Index_ sub_block_length) -> void {
132 my_count.emplace_back(sub_block_length);
133 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
134 }
135 );
136 }
137
138 ParallelDense(
139 const std::vector<Index_>& cumulative,
140 const std::vector<Index_>& mapping,
141 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
142 bool row,
143 MaybeOracle<oracle_, Index_> oracle,
144 VectorPtr<Index_> indices_ptr,
145 const Options& opt)
146 {
147 my_exts.reserve(matrices.size());
148 my_count.reserve(matrices.size());
149 initialize_parallel_index(
150 cumulative,
151 mapping,
152 *indices_ptr,
153 [&](Index_ i, VectorPtr<Index_> sub_indices_ptr) -> void {
154 my_count.emplace_back(sub_indices_ptr->size());
155 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
156 }
157 );
158 }
159
160public:
161 const Value_* fetch(Index_ i, Value_* buffer) {
162 auto copy = buffer;
163 for (Index_ x = 0, end = my_count.size(); x < end; ++x) {
164 auto ptr = my_exts[x]->fetch(i, copy);
165 auto num = my_count[x];
166 copy_n(ptr, num, copy);
167 copy += num;
168 }
169 return buffer;
170 }
171
172private:
173 std::vector<std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > > my_exts;
174 std::vector<Index_> my_count;
175};
176
177/***********************
178 *** Sparse parallel ***
179 ***********************/
180
181template<bool oracle_, typename Value_, typename Index_>
182class ParallelFullSparse final : public SparseExtractor<oracle_, Value_, Index_> {
183public:
184 ParallelFullSparse(
185 const std::vector<Index_>& cumulative,
186 const std::vector<Index_>&, // not actually used, just provided for consistency with the other constructors.
187 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
188 bool row,
189 MaybeOracle<oracle_, Index_> oracle,
190 const Options& opt) :
191 my_cumulative(cumulative),
192 my_needs_value(opt.sparse_extract_value),
193 my_needs_index(opt.sparse_extract_index)
194 {
195 my_exts.reserve(matrices.size());
196 for (const auto& m : matrices) {
197 my_exts.emplace_back(new_extractor<true, oracle_>(m.get(), row, oracle, opt));
198 }
199 }
200
201 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
202 auto vcopy = value_buffer;
203 auto icopy = index_buffer;
204 Index_ accumulated = 0;
205
206 for (decltype(my_exts.size()) x = 0, end = my_exts.size(); x < end; ++x) {
207 auto range = my_exts[x]->fetch(i, vcopy, icopy);
208 accumulated += range.number;
209 if (my_needs_value) {
210 copy_n(range.value, range.number, vcopy);
211 vcopy += range.number;
212 }
213 if (my_needs_index) {
214 auto offset = my_cumulative[x];
215 for (Index_ y = 0; y < range.number; ++y) {
216 icopy[y] = range.index[y] + offset;
217 }
218 icopy += range.number;
219 }
220 }
221
222 return SparseRange<Value_, Index_>(accumulated, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
223 }
224
225private:
226 const std::vector<Index_>& my_cumulative;
227 bool my_needs_value, my_needs_index;
228 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
229};
230
231template<bool oracle_, typename Value_, typename Index_>
232class ParallelBlockSparse final : public SparseExtractor<oracle_, Value_, Index_> {
233public:
234 ParallelBlockSparse(
235 const std::vector<Index_>& cumulative,
236 const std::vector<Index_>& mapping,
237 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
238 bool row,
239 MaybeOracle<oracle_, Index_> oracle,
240 Index_ block_start,
241 Index_ block_length,
242 const Options& opt) :
243 my_cumulative(cumulative),
244 my_needs_value(opt.sparse_extract_value),
245 my_needs_index(opt.sparse_extract_index)
246 {
247 my_exts.reserve(matrices.size());
248 my_start_matrix = initialize_parallel_block(
249 my_cumulative,
250 mapping,
251 block_start,
252 block_length,
253 [&](Index_ i, Index_ sub_block_start, Index_ sub_block_length) -> void {
254 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
255 }
256 );
257 }
258
259 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
260 auto vcopy = value_buffer;
261 auto icopy = index_buffer;
262 Index_ count = 0;
263
264 for (Index_ x = 0, end = my_exts.size(); x < end; ++x) {
265 auto range = my_exts[x]->fetch(i, vcopy, icopy);
266 count += range.number;
267 if (my_needs_value) {
268 copy_n(range.value, range.number, vcopy);
269 vcopy += range.number;
270 }
271 if (my_needs_index) {
272 Index_ offset = my_cumulative[x + my_start_matrix];
273 for (Index_ y = 0; y < range.number; ++y) {
274 icopy[y] = range.index[y] + offset;
275 }
276 icopy += range.number;
277 }
278 }
279
280 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
281 }
282
283private:
284 const std::vector<Index_>& my_cumulative;
285 bool my_needs_value, my_needs_index;
286 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
287 Index_ my_start_matrix;
288};
289
290template<bool oracle_, typename Value_, typename Index_>
291class ParallelIndexSparse final : public SparseExtractor<oracle_, Value_, Index_> {
292public:
293 ParallelIndexSparse(
294 const std::vector<Index_>& cumulative,
295 const std::vector<Index_>& mapping,
296 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
297 bool row,
298 MaybeOracle<oracle_, Index_> oracle,
299 VectorPtr<Index_> indices_ptr,
300 const Options& opt) :
301 my_cumulative(cumulative),
302 my_needs_value(opt.sparse_extract_value),
303 my_needs_index(opt.sparse_extract_index)
304 {
305 my_exts.reserve(matrices.size());
306 my_which_matrix.reserve(matrices.size());
307 initialize_parallel_index(
308 my_cumulative,
309 mapping,
310 *indices_ptr,
311 [&](Index_ i, VectorPtr<Index_> sub_indices_ptr) -> void {
312 my_which_matrix.emplace_back(i);
313 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
314 }
315 );
316 }
317
318 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
319 auto vcopy = value_buffer;
320 auto icopy = index_buffer;
321 Index_ count = 0;
322
323 for (Index_ x = 0, end = my_which_matrix.size(); x < end; ++x) {
324 auto range = my_exts[x]->fetch(i, vcopy, icopy);
325 count += range.number;
326 if (my_needs_value) {
327 copy_n(range.value, range.number, vcopy);
328 vcopy += range.number;
329 }
330
331 if (my_needs_index) {
332 Index_ offset = my_cumulative[my_which_matrix[x]];
333 for (Index_ y = 0; y < range.number; ++y) {
334 icopy[y] = range.index[y] + offset;
335 }
336 icopy += range.number;
337 }
338 }
339
340 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
341 }
342
343private:
344 const std::vector<Index_>& my_cumulative;
345 bool my_needs_value, my_needs_index;
346 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
347 std::vector<Index_> my_which_matrix;
348};
349
350/*********************
351 *** Perpendicular ***
352 *********************/
353
354template<typename Value_, typename Index_>
355class MyopicPerpendicularDense final : public MyopicDenseExtractor<Value_, Index_> {
356public:
357 template<typename ... Args_>
358 MyopicPerpendicularDense(
359 const std::vector<Index_>& cumulative,
360 const std::vector<Index_>& mapping,
361 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
362 bool row,
363 const Args_& ... args) :
364 my_cumulative(cumulative),
365 my_mapping(mapping)
366 {
367 my_exts.reserve(matrices.size());
368 for (const auto& m : matrices) {
369 my_exts.emplace_back(m->dense(row, args...));
370 }
371 }
372
373 const Value_* fetch(Index_ i, Value_* buffer) {
374 Index_ chosen = my_mapping[i];
375 return my_exts[chosen]->fetch(i - my_cumulative[chosen], buffer);
376 }
377
378private:
379 const std::vector<Index_>& my_cumulative;
380 const std::vector<Index_>& my_mapping;
381 std::vector<std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > > my_exts;
382};
383
384template<typename Value_, typename Index_>
385class MyopicPerpendicularSparse final : public MyopicSparseExtractor<Value_, Index_> {
386public:
387 template<typename ... Args_>
388 MyopicPerpendicularSparse(
389 const std::vector<Index_>& cumulative,
390 const std::vector<Index_>& mapping,
391 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
392 bool row,
393 const Args_& ... args) :
394 my_cumulative(cumulative),
395 my_mapping(mapping)
396 {
397 my_exts.reserve(matrices.size());
398 for (const auto& m : matrices) {
399 my_exts.emplace_back(m->sparse(row, args...));
400 }
401 }
402
403 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
404 Index_ chosen = my_mapping[i];
405 return my_exts[chosen]->fetch(i - my_cumulative[chosen], vbuffer, ibuffer);
406 }
407
408private:
409 const std::vector<Index_>& my_cumulative;
410 const std::vector<Index_>& my_mapping;
411 std::vector<std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > > my_exts;
412};
413
414template<typename Index_, class Initialize_>
415void initialize_perp_oracular(
416 const std::vector<Index_>& cumulative,
417 const std::vector<Index_>& mapping,
418 const Oracle<Index_>* oracle,
419 std::vector<Index_>& chosen,
420 Initialize_ init)
421{
422 auto ntotal = oracle->total();
423 chosen.reserve(ntotal);
424
425 struct Predictions {
426 bool consecutive = true;
427 Index_ start = 0;
428 Index_ number = 0;
429 std::vector<Index_> predictions;
430
431 void add(Index_ p) {
432 if (consecutive) {
433 if (number == 0) {
434 start = p;
435 number = 1;
436 return;
437 }
438 if (number + start == p) {
439 ++number;
440 return;
441 }
442 consecutive = false;
443 predictions.resize(number);
444 std::iota(predictions.begin(), predictions.end(), start);
445 }
446
447 predictions.push_back(p);
448 }
449 };
450
451 auto nmats = cumulative.size() - 1;
452 std::vector<Predictions> predictions(nmats);
453 for (decltype(ntotal) i = 0; i < ntotal; ++i) {
454 auto prediction = oracle->get(i);
455 Index_ choice = mapping[prediction];
456 chosen.push_back(choice);
457 predictions[choice].add(prediction - cumulative[choice]);
458 }
459
460 for (decltype(nmats) x = 0; x < nmats; ++x) {
461 auto& current = predictions[x];
462 if (current.consecutive) {
463 if (current.number) {
464 init(x, std::make_shared<ConsecutiveOracle<Index_> >(current.start, current.number));
465 }
466 } else {
467 if (!current.predictions.empty()) {
468 init(x, std::make_shared<FixedVectorOracle<Index_> >(std::move(current.predictions)));
469 }
470 }
471 }
472}
473
474template<typename Value_, typename Index_>
475class OracularPerpendicularDense final : public OracularDenseExtractor<Value_, Index_> {
476public:
477 template<typename ... Args_>
478 OracularPerpendicularDense(
479 const std::vector<Index_>& cumulative,
480 const std::vector<Index_>& mapping,
481 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
482 bool row,
483 std::shared_ptr<const Oracle<Index_> > ora,
484 const Args_& ... args)
485 {
486 my_exts.resize(matrices.size());
487 initialize_perp_oracular(
488 cumulative,
489 mapping,
490 ora.get(),
491 segments,
492 [&](Index_ x, std::shared_ptr<const Oracle<Index_> > subora) -> void {
493 my_exts[x] = matrices[x]->dense(row, std::move(subora), args...);
494 }
495 );
496 }
497
498 const Value_* fetch(Index_ i, Value_* buffer) {
499 auto chosen = segments[used];
500 auto output = my_exts[chosen]->fetch(i, buffer);
501 ++used;
502 return output;
503 }
504
505private:
506 std::vector<Index_> segments;
507 std::vector<std::unique_ptr<OracularDenseExtractor<Value_, Index_> > > my_exts;
508 std::size_t used = 0;
509};
510
511template<typename Value_, typename Index_>
512class OracularPerpendicularSparse final : public OracularSparseExtractor<Value_, Index_> {
513public:
514 template<typename ... Args_>
515 OracularPerpendicularSparse(
516 const std::vector<Index_>& cumulative,
517 const std::vector<Index_>& mapping,
518 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
519 bool row,
520 std::shared_ptr<const Oracle<Index_> > ora,
521 const Args_& ... args)
522 {
523 my_exts.resize(matrices.size());
524 initialize_perp_oracular(
525 cumulative,
526 mapping,
527 ora.get(),
528 segments,
529 [&](Index_ x, std::shared_ptr<const Oracle<Index_> > subora) -> void {
530 my_exts[x] = matrices[x]->sparse(row, std::move(subora), args...);
531 }
532 );
533 }
534
535 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
536 auto chosen = segments[used];
537 auto output = my_exts[chosen]->fetch(i, vbuffer, ibuffer);
538 ++used;
539 return output;
540 }
541
542private:
543 std::vector<Index_> segments;
544 std::vector<std::unique_ptr<OracularSparseExtractor<Value_, Index_> > > my_exts;
545 std::size_t used = 0;
546};
547
548}
562template<typename Value_, typename Index_>
563class DelayedBind final : public Matrix<Value_, Index_> {
564public:
571 DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, bool by_row) : my_matrices(std::move(matrices)), my_by_row(by_row) {
572 auto nmats = my_matrices.size();
573 my_cumulative.reserve(nmats + 1);
574 decltype(nmats) sofar = 0;
575 my_cumulative.push_back(0);
576
577 for (decltype(nmats) i = 0; i < nmats; ++i) {
578 auto& current = my_matrices[i];
579 Index_ primary, secondary;
580 if (my_by_row) {
581 primary = current->nrow();
582 secondary = current->ncol();
583 } else {
584 primary = current->ncol();
585 secondary = current->nrow();
586 }
587
588 if (i == 0) {
589 my_otherdim = secondary;
590 } else if (my_otherdim != secondary) {
591 throw std::runtime_error("all 'my_matrices' should have the same number of " + (my_by_row ? std::string("columns") : std::string("rows")));
592 }
593
594 // Removing the matrices that don't contribute anything,
595 // so we don't have to deal with their overhead.
596 if (primary > 0) {
597 if (sofar != i) {
598 my_matrices[sofar] = std::move(current);
599 }
600 my_cumulative.push_back(my_cumulative.back() + primary);
601 ++sofar;
602 }
603 }
604
605 my_matrices.resize(sofar);
606 nmats = sofar;
607
608 // At this point, the number of matrices must be no greater than the
609 // number of rows/columns of the combined matrix (as we've removed all
610 // non-contributing submatrices) and thus should fit into 'Index_';
611 // hence, using Index_ for the mapping should not overflow.
612 my_mapping.reserve(my_cumulative.back());
613 for (decltype(nmats) i = 0; i < nmats; ++i) {
614 my_mapping.insert(my_mapping.end(), (my_by_row ? my_matrices[i]->nrow() : my_matrices[i]->ncol()), i);
615 }
616
617 double denom = 0;
618 for (const auto& x : my_matrices) {
619 double total = static_cast<double>(x->nrow()) * static_cast<double>(x->ncol());
620 denom += total;
621 my_sparse_prop += total * x->is_sparse_proportion();
622 my_by_row_prop += total * x->prefer_rows_proportion();
623 }
624 if (denom) {
625 my_sparse_prop /= denom;
626 my_by_row_prop /= denom;
627 }
628
629 for (int d = 0; d < 2; ++d) {
630 my_uses_oracle[d] = false;
631 for (const auto& x : my_matrices) {
632 if (x->uses_oracle(d)) {
633 my_uses_oracle[d] = true;
634 break;
635 }
636 }
637 }
638 }
639
646 DelayedBind(const std::vector<std::shared_ptr<Matrix<Value_, Index_> > >& matrices, bool by_row) :
647 DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >(matrices.begin(), matrices.end()), by_row) {}
648
649private:
650 std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > my_matrices;
651 bool my_by_row;
652
653 Index_ my_otherdim = 0;
654 std::vector<Index_> my_cumulative;
655 std::vector<Index_> my_mapping;
656
657 double my_sparse_prop = 0, my_by_row_prop = 0;
658 std::array<bool, 2> my_uses_oracle;
659
660public:
661 Index_ nrow() const {
662 if (my_by_row) {
663 return my_cumulative.back();
664 } else {
665 return my_otherdim;
666 }
667 }
668
669 Index_ ncol() const {
670 if (my_by_row) {
671 return my_otherdim;
672 } else {
673 return my_cumulative.back();
674 }
675 }
676
677 bool is_sparse() const {
678 return my_sparse_prop > 0.5;
679 }
680
681 double is_sparse_proportion() const {
682 return my_sparse_prop;
683 }
684
685 bool prefer_rows() const {
686 return my_by_row_prop > 0.5;
687 }
688
689 double prefer_rows_proportion() const {
690 return my_by_row_prop;
691 }
692
693 bool uses_oracle(bool row) const {
694 return my_uses_oracle[row];
695 }
696
697 using Matrix<Value_, Index_>::dense;
698
699 using Matrix<Value_, Index_>::sparse;
700
701 /**********************************
702 ********** Myopic dense **********
703 **********************************/
704public:
705 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, const Options& opt) const {
706 if (my_matrices.size() == 1) {
707 return my_matrices[0]->dense(row, opt);
708 } else if (row == my_by_row) {
709 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, opt);
710 } else {
711 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, opt);
712 }
713 }
714
715 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, Index_ block_start, Index_ block_length, const Options& opt) const {
716 if (my_matrices.size() == 1) {
717 return my_matrices[0]->dense(row, block_start, block_length, opt);
718 } else if (row == my_by_row) {
719 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, block_start, block_length, opt);
720 } else {
721 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, block_start, block_length, opt);
722 }
723 }
724
725 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, VectorPtr<Index_> indices_ptr, const Options& opt) const {
726 if (my_matrices.size() == 1) {
727 return my_matrices[0]->dense(row, std::move(indices_ptr), opt);
728 } else if (row == my_by_row) {
729 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(indices_ptr), opt);
730 } else {
731 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, std::move(indices_ptr), opt);
732 }
733 }
734
735 /***********************************
736 ********** Myopic sparse **********
737 ***********************************/
738private:
739 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, const Options& opt) const {
740 if (my_matrices.size() == 1) {
741 return my_matrices[0]->sparse(row, opt);
742 } else if (row == my_by_row) {
743 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, opt);
744 } else {
745 return std::make_unique<DelayedBind_internal::ParallelFullSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, opt);
746 }
747 }
748
749 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, Index_ block_start, Index_ block_length, const Options& opt) const {
750 if (my_matrices.size() == 1) {
751 return my_matrices[0]->sparse(row, block_start, block_length, opt);
752 } else if (row == my_by_row) {
753 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, block_start, block_length, opt);
754 } else {
755 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, block_start, block_length, opt);
756 }
757 }
758
759 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, VectorPtr<Index_> indices_ptr, const Options& opt) const {
760 if (my_matrices.size() == 1) {
761 return my_matrices[0]->sparse(row, std::move(indices_ptr), opt);
762 } else if (row == my_by_row) {
763 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(indices_ptr), opt);
764 } else {
765 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, std::move(indices_ptr), opt);
766 }
767 }
768
769 /************************************
770 ********** Oracular dense **********
771 ************************************/
772public:
773 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, const Options& opt) const {
774 if (my_matrices.size() == 1) {
775 return my_matrices[0]->dense(row, std::move(oracle), opt);
776 } else if (!my_uses_oracle[row]) {
777 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle), dense(row, opt));
778 } else if (row == my_by_row) {
779 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
780 } else {
781 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
782 }
783 }
784
785 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length, const Options& opt) const {
786 if (my_matrices.size() == 1) {
787 return my_matrices[0]->dense(row, std::move(oracle), block_start, block_length, opt);
788 } else if (!my_uses_oracle[row]) {
789 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle), dense(row, block_start, block_length, opt));
790 } else if (row == my_by_row) {
791 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
792 } else {
793 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
794 }
795 }
796
797 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, VectorPtr<Index_> indices_ptr, const Options& opt) const {
798 if (my_matrices.size() == 1) {
799 return my_matrices[0]->dense(row, std::move(oracle), std::move(indices_ptr), opt);
800 } else if (!my_uses_oracle[row]) {
801 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle), dense(row, std::move(indices_ptr), opt));
802 } else if (row == my_by_row) {
803 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
804 } else {
805 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
806 }
807 }
808
809 /*************************************
810 ********** Oracular sparse **********
811 *************************************/
812private:
813 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, const Options& opt) const {
814 if (my_matrices.size() == 1) {
815 return my_matrices[0]->sparse(row, std::move(oracle), opt);
816 } else if (!my_uses_oracle[row]) {
817 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, opt));
818 } else if (row == my_by_row) {
819 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
820 } else {
821 return std::make_unique<DelayedBind_internal::ParallelFullSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
822 }
823 }
824
825 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length, const Options& opt) const {
826 if (my_matrices.size() == 1) {
827 return my_matrices[0]->sparse(row, std::move(oracle), block_start, block_length, opt);
828 } else if (!my_uses_oracle[row]) {
829 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, block_start, block_length, opt));
830 } else if (row == my_by_row) {
831 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
832 } else {
833 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
834 }
835 }
836
837 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, VectorPtr<Index_> indices_ptr, const Options& opt) const {
838 if (my_matrices.size() == 1) {
839 return my_matrices[0]->sparse(row, std::move(oracle), std::move(indices_ptr), opt);
840 } else if (!my_uses_oracle[row]) {
841 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, std::move(indices_ptr), opt));
842 } else if (row == my_by_row) {
843 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
844 } else {
845 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
846 }
847 }
848};
849
853// These methods are soft-deprecated: kept around for back-compatibility only.
854template<typename Value_, typename Index_>
855std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, bool row) {
856 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
857}
858
859template<typename Value_, typename Index_>
860std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices, bool row) {
861 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
862}
863
864template<int margin_, typename Value_, typename Index_>
865std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices) {
866 return make_DelayedBind(std::move(matrices), margin_ == 0);
867}
868
869template<int margin_, typename Value_, typename Index_>
870std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices) {
871 return make_DelayedBind(std::move(matrices), margin_ == 0);
872}
877}
878
879#endif
Iterate across consecutive elements of the target dimension.
Iterate across a fixed sequence of elements on the target dimension.
Virtual class for a matrix of some numeric type.
Mimic the oracle-aware extractor interface.
Delayed combining of a matrix.
Definition DelayedBind.hpp:563
DelayedBind(const std::vector< std::shared_ptr< Matrix< Value_, Index_ > > > &matrices, bool by_row)
Definition DelayedBind.hpp:646
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:797
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, const Options &opt) const
Definition DelayedBind.hpp:705
double is_sparse_proportion() const
Definition DelayedBind.hpp:681
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:725
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedBind.hpp:773
Index_ nrow() const
Definition DelayedBind.hpp:661
bool is_sparse() const
Definition DelayedBind.hpp:677
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:785
Index_ ncol() const
Definition DelayedBind.hpp:669
DelayedBind(std::vector< std::shared_ptr< const Matrix< Value_, Index_ > > > matrices, bool by_row)
Definition DelayedBind.hpp:571
bool prefer_rows() const
Definition DelayedBind.hpp:685
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:715
double prefer_rows_proportion() const
Definition DelayedBind.hpp:689
bool uses_oracle(bool row) const
Definition DelayedBind.hpp:693
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:23
Copy data from one buffer to another.
Flexible representations for matrix data.
Definition Extractor.hpp:15
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
Value_ * copy_n(const Value_ *input, Size_ n, Value_ *output)
Definition copy.hpp:25
Templated construction of a new extractor.
Options for accessing data from a Matrix instance.
Definition Options.hpp:30