tatami
C++ API for different matrix representations
Loading...
Searching...
No Matches
DelayedBind.hpp
Go to the documentation of this file.
1#ifndef TATAMI_DELAYED_BIND_HPP
2#define TATAMI_DELAYED_BIND_HPP
3
4#include "../base/Matrix.hpp"
9#include "../utils/copy.hpp"
11
12#include <numeric>
13#include <algorithm>
14#include <memory>
15#include <array>
16#include <type_traits>
17#include <cstddef>
18
27namespace tatami {
28
32namespace DelayedBind_internal {
33
34/**********************
35 *** Dense parallel ***
36 **********************/
37
38template<typename Index_, class Initialize_>
39Index_ initialize_parallel_block(
40 const std::vector<Index_>& cumulative,
41 const std::vector<Index_>& mapping,
42 const Index_ block_start,
43 const Index_ block_length,
44 const Initialize_ init
45) {
46 if (mapping.empty()) {
47 return 0;
48 }
49
50 const Index_ start_index = mapping[block_start];
51 Index_ actual_start = block_start - cumulative[start_index];
52 const Index_ block_end = block_start + block_length;
53
54 const Index_ nmats = cumulative.size() - 1; // Number of matrices is guaranteed to fit in Index_, see reasoning in the DelayedBind constructor.
55 for (Index_ index = start_index; index < nmats; ++index) {
56 const Index_ submat_end = cumulative[index + 1];
57 bool not_final = (block_end > submat_end);
58 const Index_ actual_end = (not_final ? submat_end : block_end) - cumulative[index];
59 init(index, actual_start, actual_end - actual_start);
60 if (!not_final) {
61 break;
62 }
63 actual_start = 0;
64 }
65
66 return start_index;
67}
68
69template<typename Index_, class Initialize_>
70void initialize_parallel_index(
71 const std::vector<Index_>& cumulative,
72 const std::vector<Index_>& mapping,
73 const std::vector<Index_>& indices,
74 const Initialize_ init
75) {
76 Index_ counter = 0;
77 const Index_ il = indices.size();
78 while (counter < il) {
79 const Index_ first_index = indices[counter];
80 const Index_ bind_index = mapping[first_index];
81 const Index_ lower = cumulative[bind_index];
82 const Index_ upper = cumulative[bind_index + 1];
83
84 // Creating the slice with one element already.
85 auto slice_ptr = std::make_shared<std::vector<Index_> >(1, first_index - lower);
86 ++counter;
87
88 while (counter < il && indices[counter] < upper) {
89 slice_ptr->push_back(indices[counter] - lower);
90 ++counter;
91 }
92
93 init(bind_index, std::move(slice_ptr));
94 }
95}
96
97template<bool oracle_, typename Value_, typename Index_>
98class ParallelDense final : public DenseExtractor<oracle_, Value_, Index_> {
99public:
100 ParallelDense(
101 const std::vector<Index_>&, // Not used, just provided for consistency with other constructors.
102 const std::vector<Index_>&,
103 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
104 const bool row,
105 MaybeOracle<oracle_, Index_> oracle,
106 const Options& opt
107 ) {
108 my_exts.reserve(matrices.size());
109 my_count.reserve(matrices.size());
110 for (const auto& m : matrices) {
111 my_count.emplace_back(row ? m->ncol() : m->nrow());
112 my_exts.emplace_back(new_extractor<false, oracle_>(m.get(), row, oracle, opt));
113 }
114 }
115
116 ParallelDense(
117 const std::vector<Index_>& cumulative,
118 const std::vector<Index_>& mapping,
119 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
120 const bool row,
121 MaybeOracle<oracle_, Index_> oracle,
122 const Index_ block_start,
123 const Index_ block_length,
124 const Options& opt
125 ) {
126 my_exts.reserve(matrices.size());
127 my_count.reserve(matrices.size());
128 initialize_parallel_block(
129 cumulative,
130 mapping,
131 block_start,
132 block_length,
133 [&](const Index_ i, const Index_ sub_block_start, const Index_ sub_block_length) -> void {
134 my_count.emplace_back(sub_block_length);
135 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
136 }
137 );
138 }
139
140 ParallelDense(
141 const std::vector<Index_>& cumulative,
142 const std::vector<Index_>& mapping,
143 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
144 const bool row,
145 MaybeOracle<oracle_, Index_> oracle,
146 VectorPtr<Index_> indices_ptr,
147 const Options& opt
148 ) {
149 my_exts.reserve(matrices.size());
150 my_count.reserve(matrices.size());
151 initialize_parallel_index(
152 cumulative,
153 mapping,
154 *indices_ptr,
155 [&](const Index_ i, VectorPtr<Index_> sub_indices_ptr) -> void {
156 my_count.emplace_back(sub_indices_ptr->size());
157 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
158 }
159 );
160 }
161
162public:
163 const Value_* fetch(const Index_ i, Value_* const buffer) {
164 auto copy = buffer;
165 const Index_ nmats = my_count.size();
166 for (Index_ x = 0; x < nmats; ++x) {
167 const auto ptr = my_exts[x]->fetch(i, copy);
168 const auto num = my_count[x];
169 copy_n(ptr, num, copy);
170 copy += num;
171 }
172 return buffer;
173 }
174
175private:
176 std::vector<std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > > my_exts;
177 std::vector<Index_> my_count;
178};
179
180/***********************
181 *** Sparse parallel ***
182 ***********************/
183
184template<bool oracle_, typename Value_, typename Index_>
185class ParallelFullSparse final : public SparseExtractor<oracle_, Value_, Index_> {
186public:
187 ParallelFullSparse(
188 const std::vector<Index_>& cumulative,
189 const std::vector<Index_>&, // not actually used, just provided for consistency with the other constructors.
190 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
191 const bool row,
192 MaybeOracle<oracle_, Index_> oracle,
193 const Options& opt
194 ) :
195 my_cumulative(cumulative),
196 my_needs_value(opt.sparse_extract_value),
197 my_needs_index(opt.sparse_extract_index)
198 {
199 my_exts.reserve(matrices.size());
200 for (const auto& m : matrices) {
201 my_exts.emplace_back(new_extractor<true, oracle_>(m.get(), row, oracle, opt));
202 }
203 }
204
205 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const value_buffer, Index_* const index_buffer) {
206 auto vcopy = value_buffer;
207 auto icopy = index_buffer;
208 Index_ accumulated = 0;
209
210 const Index_ nmats = my_exts.size();
211 for (Index_ x = 0; x < nmats; ++x) {
212 const auto range = my_exts[x]->fetch(i, vcopy, icopy);
213 accumulated += range.number;
214 if (my_needs_value) {
215 copy_n(range.value, range.number, vcopy);
216 vcopy += range.number;
217 }
218 if (my_needs_index) {
219 const auto offset = my_cumulative[x];
220 for (Index_ y = 0; y < range.number; ++y) {
221 icopy[y] = range.index[y] + offset;
222 }
223 icopy += range.number;
224 }
225 }
226
227 return SparseRange<Value_, Index_>(accumulated, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
228 }
229
230private:
231 const std::vector<Index_>& my_cumulative;
232 bool my_needs_value, my_needs_index;
233 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
234};
235
236template<bool oracle_, typename Value_, typename Index_>
237class ParallelBlockSparse final : public SparseExtractor<oracle_, Value_, Index_> {
238public:
239 ParallelBlockSparse(
240 const std::vector<Index_>& cumulative,
241 const std::vector<Index_>& mapping,
242 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
243 const bool row,
244 MaybeOracle<oracle_, Index_> oracle,
245 const Index_ block_start,
246 const Index_ block_length,
247 const Options& opt
248 ) :
249 my_cumulative(cumulative),
250 my_needs_value(opt.sparse_extract_value),
251 my_needs_index(opt.sparse_extract_index)
252 {
253 my_exts.reserve(matrices.size());
254 my_start_matrix = initialize_parallel_block(
255 my_cumulative,
256 mapping,
257 block_start,
258 block_length,
259 [&](const Index_ i, const Index_ sub_block_start, const Index_ sub_block_length) -> void {
260 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
261 }
262 );
263 }
264
265 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const value_buffer, Index_* const index_buffer) {
266 auto vcopy = value_buffer;
267 auto icopy = index_buffer;
268 Index_ count = 0;
269
270 const Index_ nmats = my_exts.size();
271 for (Index_ x = 0; x < nmats; ++x) {
272 const auto range = my_exts[x]->fetch(i, vcopy, icopy);
273 count += range.number;
274 if (my_needs_value) {
275 copy_n(range.value, range.number, vcopy);
276 vcopy += range.number;
277 }
278 if (my_needs_index) {
279 const Index_ offset = my_cumulative[x + my_start_matrix];
280 for (Index_ y = 0; y < range.number; ++y) {
281 icopy[y] = range.index[y] + offset;
282 }
283 icopy += range.number;
284 }
285 }
286
287 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
288 }
289
290private:
291 const std::vector<Index_>& my_cumulative;
292 bool my_needs_value, my_needs_index;
293 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
294 Index_ my_start_matrix;
295};
296
297template<bool oracle_, typename Value_, typename Index_>
298class ParallelIndexSparse final : public SparseExtractor<oracle_, Value_, Index_> {
299public:
300 ParallelIndexSparse(
301 const std::vector<Index_>& cumulative,
302 const std::vector<Index_>& mapping,
303 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
304 const bool row,
305 MaybeOracle<oracle_, Index_> oracle,
306 VectorPtr<Index_> indices_ptr,
307 const Options& opt
308 ) :
309 my_cumulative(cumulative),
310 my_needs_value(opt.sparse_extract_value),
311 my_needs_index(opt.sparse_extract_index)
312 {
313 my_exts.reserve(matrices.size());
314 my_which_matrix.reserve(matrices.size());
315 initialize_parallel_index(
316 my_cumulative,
317 mapping,
318 *indices_ptr,
319 [&](const Index_ i, VectorPtr<Index_> sub_indices_ptr) -> void {
320 my_which_matrix.emplace_back(i);
321 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
322 }
323 );
324 }
325
326 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const value_buffer, Index_* const index_buffer) {
327 auto vcopy = value_buffer;
328 auto icopy = index_buffer;
329 Index_ count = 0;
330
331 const Index_ nmats = my_which_matrix.size();
332 for (Index_ x = 0; x < nmats; ++x) {
333 const auto range = my_exts[x]->fetch(i, vcopy, icopy);
334 count += range.number;
335 if (my_needs_value) {
336 copy_n(range.value, range.number, vcopy);
337 vcopy += range.number;
338 }
339
340 if (my_needs_index) {
341 const Index_ offset = my_cumulative[my_which_matrix[x]];
342 for (Index_ y = 0; y < range.number; ++y) {
343 icopy[y] = range.index[y] + offset;
344 }
345 icopy += range.number;
346 }
347 }
348
349 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
350 }
351
352private:
353 const std::vector<Index_>& my_cumulative;
354 bool my_needs_value, my_needs_index;
355 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
356 std::vector<Index_> my_which_matrix;
357};
358
359/*********************
360 *** Perpendicular ***
361 *********************/
362
363template<typename Value_, typename Index_>
364class MyopicPerpendicularDense final : public MyopicDenseExtractor<Value_, Index_> {
365public:
366 template<typename ... Args_>
367 MyopicPerpendicularDense(
368 const std::vector<Index_>& cumulative,
369 const std::vector<Index_>& mapping,
370 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
371 const bool row,
372 const Args_& ... args
373 ) :
374 my_cumulative(cumulative),
375 my_mapping(mapping)
376 {
377 my_exts.reserve(matrices.size());
378 for (const auto& m : matrices) {
379 my_exts.emplace_back(m->dense(row, args...));
380 }
381 }
382
383 const Value_* fetch(const Index_ i, Value_* const buffer) {
384 const Index_ chosen = my_mapping[i];
385 return my_exts[chosen]->fetch(i - my_cumulative[chosen], buffer);
386 }
387
388private:
389 const std::vector<Index_>& my_cumulative;
390 const std::vector<Index_>& my_mapping;
391 std::vector<std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > > my_exts;
392};
393
394template<typename Value_, typename Index_>
395class MyopicPerpendicularSparse final : public MyopicSparseExtractor<Value_, Index_> {
396public:
397 template<typename ... Args_>
398 MyopicPerpendicularSparse(
399 const std::vector<Index_>& cumulative,
400 const std::vector<Index_>& mapping,
401 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
402 const bool row,
403 const Args_& ... args
404 ) :
405 my_cumulative(cumulative),
406 my_mapping(mapping)
407 {
408 my_exts.reserve(matrices.size());
409 for (const auto& m : matrices) {
410 my_exts.emplace_back(m->sparse(row, args...));
411 }
412 }
413
414 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const vbuffer, Index_* const ibuffer) {
415 const Index_ chosen = my_mapping[i];
416 return my_exts[chosen]->fetch(i - my_cumulative[chosen], vbuffer, ibuffer);
417 }
418
419private:
420 const std::vector<Index_>& my_cumulative;
421 const std::vector<Index_>& my_mapping;
422 std::vector<std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > > my_exts;
423};
424
425template<typename Index_, class Initialize_>
426void initialize_perp_oracular(
427 const std::vector<Index_>& cumulative,
428 const std::vector<Index_>& mapping,
429 const Oracle<Index_>& oracle,
430 std::vector<Index_>& chosen,
431 Initialize_ init
432) {
433 const auto ntotal = oracle.total();
434 chosen.reserve(ntotal);
435
436 struct Predictions {
437 bool consecutive = true;
438 Index_ start = 0;
439 Index_ number = 0;
440 std::vector<Index_> predictions;
441
442 void add(const Index_ p) {
443 if (consecutive) {
444 if (number == 0) {
445 start = p;
446 number = 1;
447 return;
448 }
449 if (number + start == p) {
450 ++number;
451 return;
452 }
453 consecutive = false;
454 resize_container_to_Index_size(predictions, number);
455 std::iota(predictions.begin(), predictions.end(), start);
456 }
457
458 predictions.push_back(p);
459 }
460 };
461
462 const auto nmats = cumulative.size() - 1;
463 auto predictions = create_container_of_Index_size<std::vector<Predictions> >(nmats); // nmats should fit in an Index_, so this call is legal.
464 for (I<decltype(ntotal)> i = 0; i < ntotal; ++i) {
465 const auto prediction = oracle.get(i);
466 const Index_ choice = mapping[prediction];
467 chosen.push_back(choice);
468 predictions[choice].add(prediction - cumulative[choice]);
469 }
470
471 for (I<decltype(nmats)> x = 0; x < nmats; ++x) {
472 auto& current = predictions[x];
473 if (current.consecutive) {
474 if (current.number) {
475 init(x, std::make_shared<ConsecutiveOracle<Index_> >(current.start, current.number));
476 }
477 } else {
478 if (!current.predictions.empty()) {
479 init(x, std::make_shared<FixedVectorOracle<Index_> >(std::move(current.predictions)));
480 }
481 }
482 }
483}
484
485template<typename Value_, typename Index_>
486class OracularPerpendicularDense final : public OracularDenseExtractor<Value_, Index_> {
487public:
488 template<typename ... Args_>
489 OracularPerpendicularDense(
490 const std::vector<Index_>& cumulative,
491 const std::vector<Index_>& mapping,
492 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
493 const bool row,
494 std::shared_ptr<const Oracle<Index_> > ora,
495 const Args_& ... args
496 ) {
497 resize_container_to_Index_size(my_exts, matrices.size()); // number of matrices should fit in an I ndex_, so this call is allowed.
498 initialize_perp_oracular(
499 cumulative,
500 mapping,
501 *ora,
502 my_segments,
503 [&](const Index_ x, std::shared_ptr<const Oracle<Index_> > subora) -> void {
504 my_exts[x] = matrices[x]->dense(row, std::move(subora), args...);
505 }
506 );
507 }
508
509 const Value_* fetch(const Index_ i, Value_* const buffer) {
510 const auto chosen = my_segments[my_used];
511 const auto output = my_exts[chosen]->fetch(i, buffer);
512 ++my_used;
513 return output;
514 }
515
516private:
517 std::vector<Index_> my_segments;
518 std::vector<std::unique_ptr<OracularDenseExtractor<Value_, Index_> > > my_exts;
519 PredictionIndex my_used = 0;
520};
521
522template<typename Value_, typename Index_>
523class OracularPerpendicularSparse final : public OracularSparseExtractor<Value_, Index_> {
524public:
525 template<typename ... Args_>
526 OracularPerpendicularSparse(
527 const std::vector<Index_>& cumulative,
528 const std::vector<Index_>& mapping,
529 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
530 const bool row,
531 std::shared_ptr<const Oracle<Index_> > ora,
532 const Args_& ... args
533 ) {
534 resize_container_to_Index_size(my_exts, matrices.size()); // number of matrices should fit in an Index_, so this call is legal.
535 initialize_perp_oracular(
536 cumulative,
537 mapping,
538 *ora,
539 my_segments,
540 [&](const Index_ x, std::shared_ptr<const Oracle<Index_> > subora) -> void {
541 my_exts[x] = matrices[x]->sparse(row, std::move(subora), args...);
542 }
543 );
544 }
545
546 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
547 const auto chosen = my_segments[my_used];
548 const auto output = my_exts[chosen]->fetch(i, vbuffer, ibuffer);
549 ++my_used;
550 return output;
551 }
552
553private:
554 std::vector<Index_> my_segments;
555 std::vector<std::unique_ptr<OracularSparseExtractor<Value_, Index_> > > my_exts;
556 PredictionIndex my_used = 0;
557};
558
559}
573template<typename Value_, typename Index_>
574class DelayedBind final : public Matrix<Value_, Index_> {
575public:
582 DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, const bool by_row) :
583 my_matrices(std::move(matrices)), my_by_row(by_row)
584 {
585 auto nmats = my_matrices.size();
586 my_cumulative.reserve(sanisizer::sum<I<decltype(my_cumulative.size())> >(nmats, 1));
587 I<decltype(nmats)> sofar = 0;
588 my_cumulative.push_back(0);
589
590 for (I<decltype(nmats)> i = 0; i < nmats; ++i) {
591 auto& current = my_matrices[i];
592 Index_ primary, secondary;
593 if (my_by_row) {
594 primary = current->nrow();
595 secondary = current->ncol();
596 } else {
597 primary = current->ncol();
598 secondary = current->nrow();
599 }
600
601 if (i == 0) {
602 my_otherdim = secondary;
603 } else if (my_otherdim != secondary) {
604 throw std::runtime_error("all 'matrices' should have the same number of " + (my_by_row ? std::string("columns") : std::string("rows")));
605 }
606
607 // Removing the matrices that don't contribute anything,
608 // so we don't have to deal with their overhead.
609 if (primary > 0) {
610 if (sofar != i) {
611 my_matrices[sofar] = std::move(current);
612 }
613 my_cumulative.push_back(sanisizer::sum<Index_>(attest_for_Index(my_cumulative.back()), attest_for_Index(primary)));
614 ++sofar;
615 }
616 }
617
618 my_matrices.resize(sofar);
619 nmats = sofar;
620
621 // At this point, the number of matrices must be no greater than the
622 // number of rows/columns of the combined matrix (as we've removed all
623 // non-contributing submatrices) and thus should fit into 'Index_';
624 // hence, using Index_ for the mapping should not overflow.
625 my_mapping.reserve(my_cumulative.back());
626 for (I<decltype(nmats)> i = 0; i < nmats; ++i) {
627 my_mapping.insert(my_mapping.end(), (my_by_row ? my_matrices[i]->nrow() : my_matrices[i]->ncol()), i);
628 }
629
630 double denom = 0;
631 for (const auto& x : my_matrices) {
632 const double total = static_cast<double>(x->nrow()) * static_cast<double>(x->ncol());
633 denom += total;
634 my_sparse_prop += total * x->is_sparse_proportion();
635 my_by_row_prop += total * x->prefer_rows_proportion();
636 }
637 if (denom) {
638 my_sparse_prop /= denom;
639 my_by_row_prop /= denom;
640 }
641
642 for (int d = 0; d < 2; ++d) {
643 my_uses_oracle[d] = false;
644 for (const auto& x : my_matrices) {
645 if (x->uses_oracle(d)) {
646 my_uses_oracle[d] = true;
647 break;
648 }
649 }
650 }
651 }
652
656 // Soft-deprecated.
657 DelayedBind(const std::vector<std::shared_ptr<Matrix<Value_, Index_> > >& matrices, const bool by_row) :
658 DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >(matrices.begin(), matrices.end()), by_row) {}
663private:
664 std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > my_matrices;
665 bool my_by_row;
666
667 Index_ my_otherdim = 0;
668 std::vector<Index_> my_cumulative;
669 std::vector<Index_> my_mapping;
670
671 double my_sparse_prop = 0, my_by_row_prop = 0;
672 std::array<bool, 2> my_uses_oracle;
673
674public:
675 Index_ nrow() const {
676 if (my_by_row) {
677 return my_cumulative.back();
678 } else {
679 return my_otherdim;
680 }
681 }
682
683 Index_ ncol() const {
684 if (my_by_row) {
685 return my_otherdim;
686 } else {
687 return my_cumulative.back();
688 }
689 }
690
691 bool is_sparse() const {
692 return my_sparse_prop > 0.5;
693 }
694
695 double is_sparse_proportion() const {
696 return my_sparse_prop;
697 }
698
699 bool prefer_rows() const {
700 return my_by_row_prop > 0.5;
701 }
702
703 double prefer_rows_proportion() const {
704 return my_by_row_prop;
705 }
706
707 bool uses_oracle(const bool row) const {
708 return my_uses_oracle[row];
709 }
710
711 using Matrix<Value_, Index_>::dense;
712
713 using Matrix<Value_, Index_>::sparse;
714
715 /**********************************
716 ********** Myopic dense **********
717 **********************************/
718public:
719 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
720 const bool row,
721 const Options& opt
722 ) const {
723 if (my_matrices.size() == 1) {
724 return my_matrices[0]->dense(row, opt);
725 } else if (row == my_by_row) {
726 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(
727 my_cumulative,
728 my_mapping,
729 my_matrices,
730 row,
731 opt
732 );
733 } else {
734 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(
735 my_cumulative,
736 my_mapping,
737 my_matrices,
738 row,
739 false,
740 opt
741 );
742 }
743 }
744
745 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
746 const bool row,
747 const Index_ block_start,
748 const Index_ block_length,
749 const Options& opt
750 ) const {
751 if (my_matrices.size() == 1) {
752 return my_matrices[0]->dense(row, block_start, block_length, opt);
753 } else if (row == my_by_row) {
754 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(
755 my_cumulative,
756 my_mapping,
757 my_matrices,
758 row,
759 block_start,
760 block_length,
761 opt
762 );
763 } else {
764 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(
765 my_cumulative,
766 my_mapping,
767 my_matrices,
768 row,
769 false,
770 block_start,
771 block_length,
772 opt
773 );
774 }
775 }
776
777 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
778 const bool row,
779 VectorPtr<Index_> indices_ptr,
780 const Options& opt
781 ) const {
782 if (my_matrices.size() == 1) {
783 return my_matrices[0]->dense(row, std::move(indices_ptr), opt);
784 } else if (row == my_by_row) {
785 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(
786 my_cumulative,
787 my_mapping,
788 my_matrices,
789 row,
790 std::move(indices_ptr),
791 opt
792 );
793 } else {
794 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(
795 my_cumulative,
796 my_mapping,
797 my_matrices,
798 row,
799 false,
800 std::move(indices_ptr),
801 opt
802 );
803 }
804 }
805
806 /***********************************
807 ********** Myopic sparse **********
808 ***********************************/
809private:
810 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
811 const bool row,
812 const Options& opt
813 ) const {
814 if (my_matrices.size() == 1) {
815 return my_matrices[0]->sparse(row, opt);
816 } else if (row == my_by_row) {
817 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(
818 my_cumulative,
819 my_mapping,
820 my_matrices,
821 row,
822 opt
823 );
824 } else {
825 return std::make_unique<DelayedBind_internal::ParallelFullSparse<false, Value_, Index_> >(
826 my_cumulative,
827 my_mapping,
828 my_matrices,
829 row,
830 false,
831 opt
832 );
833 }
834 }
835
836 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
837 const bool row,
838 const Index_ block_start,
839 const Index_ block_length,
840 const Options& opt
841 ) const {
842 if (my_matrices.size() == 1) {
843 return my_matrices[0]->sparse(row, block_start, block_length, opt);
844 } else if (row == my_by_row) {
845 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(
846 my_cumulative,
847 my_mapping,
848 my_matrices,
849 row,
850 block_start,
851 block_length,
852 opt
853 );
854 } else {
855 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<false, Value_, Index_> >(
856 my_cumulative,
857 my_mapping,
858 my_matrices,
859 row,
860 false,
861 block_start,
862 block_length,
863 opt
864 );
865 }
866 }
867
868 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
869 const bool row,
870 VectorPtr<Index_> indices_ptr,
871 const Options& opt
872 ) const {
873 if (my_matrices.size() == 1) {
874 return my_matrices[0]->sparse(row, std::move(indices_ptr), opt);
875 } else if (row == my_by_row) {
876 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(
877 my_cumulative,
878 my_mapping,
879 my_matrices,
880 row,
881 std::move(indices_ptr),
882 opt
883 );
884 } else {
885 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<false, Value_, Index_> >(
886 my_cumulative,
887 my_mapping,
888 my_matrices,
889 row,
890 false,
891 std::move(indices_ptr),
892 opt
893 );
894 }
895 }
896
897 /************************************
898 ********** Oracular dense **********
899 ************************************/
900public:
901 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
902 const bool row,
903 std::shared_ptr<const Oracle<Index_> > oracle,
904 const Options& opt
905 ) const {
906 if (my_matrices.size() == 1) {
907 return my_matrices[0]->dense(row, std::move(oracle), opt);
908 } else if (!my_uses_oracle[row]) {
909 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(
910 std::move(oracle),
911 dense(row, opt)
912 );
913 } else if (row == my_by_row) {
914 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(
915 my_cumulative,
916 my_mapping,
917 my_matrices,
918 row,
919 std::move(oracle),
920 opt
921 );
922 } else {
923 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(
924 my_cumulative,
925 my_mapping,
926 my_matrices,
927 row,
928 std::move(oracle),
929 opt
930 );
931 }
932 }
933
934 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
935 const bool row,
936 std::shared_ptr<const Oracle<Index_> > oracle,
937 const Index_ block_start,
938 const Index_ block_length,
939 const Options& opt
940 ) const {
941 if (my_matrices.size() == 1) {
942 return my_matrices[0]->dense(row, std::move(oracle), block_start, block_length, opt);
943 } else if (!my_uses_oracle[row]) {
944 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(
945 std::move(oracle),
946 dense(row, block_start, block_length, opt)
947 );
948 } else if (row == my_by_row) {
949 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(
950 my_cumulative,
951 my_mapping,
952 my_matrices,
953 row,
954 std::move(oracle),
955 block_start,
956 block_length,
957 opt
958 );
959 } else {
960 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(
961 my_cumulative,
962 my_mapping,
963 my_matrices,
964 row,
965 std::move(oracle),
966 block_start,
967 block_length,
968 opt
969 );
970 }
971 }
972
973 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
974 const bool row,
975 std::shared_ptr<const Oracle<Index_> > oracle,
976 VectorPtr<Index_> indices_ptr,
977 const Options& opt
978 ) const {
979 if (my_matrices.size() == 1) {
980 return my_matrices[0]->dense(row, std::move(oracle), std::move(indices_ptr), opt);
981 } else if (!my_uses_oracle[row]) {
982 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(
983 std::move(oracle),
984 dense(row, std::move(indices_ptr), opt)
985 );
986 } else if (row == my_by_row) {
987 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(
988 my_cumulative,
989 my_mapping,
990 my_matrices,
991 row,
992 std::move(oracle),
993 std::move(indices_ptr),
994 opt
995 );
996 } else {
997 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(
998 my_cumulative,
999 my_mapping,
1000 my_matrices,
1001 row,
1002 std::move(oracle),
1003 std::move(indices_ptr),
1004 opt
1005 );
1006 }
1007 }
1008
1009 /*************************************
1010 ********** Oracular sparse **********
1011 *************************************/
1012private:
1013 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
1014 const bool row,
1015 std::shared_ptr<const Oracle<Index_> > oracle,
1016 const Options& opt
1017 ) const {
1018 if (my_matrices.size() == 1) {
1019 return my_matrices[0]->sparse(row, std::move(oracle), opt);
1020 } else if (!my_uses_oracle[row]) {
1021 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(
1022 std::move(oracle),
1023 sparse(row, opt)
1024 );
1025 } else if (row == my_by_row) {
1026 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(
1027 my_cumulative,
1028 my_mapping,
1029 my_matrices,
1030 row,
1031 std::move(oracle),
1032 opt
1033 );
1034 } else {
1035 return std::make_unique<DelayedBind_internal::ParallelFullSparse<true, Value_, Index_> >(
1036 my_cumulative,
1037 my_mapping,
1038 my_matrices,
1039 row,
1040 std::move(oracle),
1041 opt
1042 );
1043 }
1044 }
1045
1046 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
1047 const bool row,
1048 std::shared_ptr<const Oracle<Index_> > oracle,
1049 const Index_ block_start,
1050 const Index_ block_length,
1051 const Options& opt
1052 ) const {
1053 if (my_matrices.size() == 1) {
1054 return my_matrices[0]->sparse(row, std::move(oracle), block_start, block_length, opt);
1055 } else if (!my_uses_oracle[row]) {
1056 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(
1057 std::move(oracle),
1058 sparse(row, block_start, block_length, opt)
1059 );
1060 } else if (row == my_by_row) {
1061 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(
1062 my_cumulative,
1063 my_mapping,
1064 my_matrices,
1065 row,
1066 std::move(oracle),
1067 block_start,
1068 block_length,
1069 opt
1070 );
1071 } else {
1072 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<true, Value_, Index_> >(
1073 my_cumulative,
1074 my_mapping,
1075 my_matrices,
1076 row,
1077 std::move(oracle),
1078 block_start,
1079 block_length,
1080 opt
1081 );
1082 }
1083 }
1084
1085 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
1086 const bool row,
1087 std::shared_ptr<const Oracle<Index_> > oracle,
1088 VectorPtr<Index_> indices_ptr,
1089 const Options& opt
1090 ) const {
1091 if (my_matrices.size() == 1) {
1092 return my_matrices[0]->sparse(row, std::move(oracle), std::move(indices_ptr), opt);
1093 } else if (!my_uses_oracle[row]) {
1094 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(
1095 std::move(oracle),
1096 sparse(row, std::move(indices_ptr), opt)
1097 );
1098 } else if (row == my_by_row) {
1099 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(
1100 my_cumulative,
1101 my_mapping,
1102 my_matrices,
1103 row,
1104 std::move(oracle),
1105 std::move(indices_ptr),
1106 opt
1107 );
1108 } else {
1109 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<true, Value_, Index_> >(
1110 my_cumulative,
1111 my_mapping,
1112 my_matrices,
1113 row,
1114 std::move(oracle),
1115 std::move(indices_ptr),
1116 opt
1117 );
1118 }
1119 }
1120};
1121
1125// These methods are soft-deprecated: kept around for back-compatibility only.
1126template<typename Value_, typename Index_>
1127std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, bool row) {
1128 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
1129}
1130
1131template<typename Value_, typename Index_>
1132std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices, bool row) {
1133 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
1134}
1135
1136template<int margin_, typename Value_, typename Index_>
1137std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices) {
1138 return make_DelayedBind(std::move(matrices), margin_ == 0);
1139}
1140
1141template<int margin_, typename Value_, typename Index_>
1142std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices) {
1143 return make_DelayedBind(std::move(matrices), margin_ == 0);
1144}
1149}
1150
1151#endif
Iterate across consecutive elements of the target dimension.
Iterate across a fixed sequence of elements on the target dimension.
Convert index type to container size.
Virtual class for a matrix of some numeric type.
Mimic the oracle-aware extractor interface.
Delayed combining of a matrix.
Definition DelayedBind.hpp:574
bool uses_oracle(const bool row) const
Definition DelayedBind.hpp:707
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:973
DelayedBind(std::vector< std::shared_ptr< const Matrix< Value_, Index_ > > > matrices, const bool by_row)
Definition DelayedBind.hpp:582
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Options &opt) const
Definition DelayedBind.hpp:719
double is_sparse_proportion() const
Definition DelayedBind.hpp:695
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:745
Index_ nrow() const
Definition DelayedBind.hpp:675
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:777
bool is_sparse() const
Definition DelayedBind.hpp:691
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedBind.hpp:901
Index_ ncol() const
Definition DelayedBind.hpp:683
bool prefer_rows() const
Definition DelayedBind.hpp:699
double prefer_rows_proportion() const
Definition DelayedBind.hpp:703
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:934
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:29
Copy data from one buffer to another.
Flexible representations for matrix data.
Definition Extractor.hpp:15
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
void resize_container_to_Index_size(Container_ &container, const Index_ x, Args_ &&... args)
Definition Index_to_container.hpp:99
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
Value_ * copy_n(const Value_ *const input, const Size_ n, Value_ *const output)
Definition copy.hpp:37
std::size_t PredictionIndex
Definition Oracle.hpp:18
Templated construction of a new extractor.
Options for accessing data from a Matrix instance.
Definition Options.hpp:30