tatami
C++ API for different matrix representations
Loading...
Searching...
No Matches
DelayedBind.hpp
Go to the documentation of this file.
1#ifndef TATAMI_DELAYED_BIND_HPP
2#define TATAMI_DELAYED_BIND_HPP
3
4#include "../base/Matrix.hpp"
9#include "../utils/copy.hpp"
10#include "../utils/Index_to_container.hpp"
11
12#include <numeric>
13#include <algorithm>
14#include <memory>
15#include <array>
16#include <type_traits>
17#include <cstddef>
18
27namespace tatami {
28
32namespace DelayedBind_internal {
33
34/**********************
35 *** Dense parallel ***
36 **********************/
37
38template<typename Index_, class Initialize_>
39Index_ initialize_parallel_block(
40 const std::vector<Index_>& cumulative,
41 const std::vector<Index_>& mapping,
42 const Index_ block_start,
43 const Index_ block_length,
44 const Initialize_ init
45) {
46 if (mapping.empty()) {
47 return 0;
48 }
49
50 const Index_ start_index = mapping[block_start];
51 Index_ actual_start = block_start - cumulative[start_index];
52 const Index_ block_end = block_start + block_length;
53
54 const Index_ nmats = cumulative.size() - 1; // Number of matrices is guaranteed to fit in Index_, see reasoning in the DelayedBind constructor.
55 for (Index_ index = start_index; index < nmats; ++index) {
56 const Index_ submat_end = cumulative[index + 1];
57 bool not_final = (block_end > submat_end);
58 const Index_ actual_end = (not_final ? submat_end : block_end) - cumulative[index];
59 init(index, actual_start, actual_end - actual_start);
60 if (!not_final) {
61 break;
62 }
63 actual_start = 0;
64 }
65
66 return start_index;
67}
68
69template<typename Index_, class Initialize_>
70void initialize_parallel_index(
71 const std::vector<Index_>& cumulative,
72 const std::vector<Index_>& mapping,
73 const std::vector<Index_>& indices,
74 const Initialize_ init
75) {
76 Index_ counter = 0;
77 const Index_ il = indices.size();
78 while (counter < il) {
79 const Index_ first_index = indices[counter];
80 const Index_ bind_index = mapping[first_index];
81 const Index_ lower = cumulative[bind_index];
82 const Index_ upper = cumulative[bind_index + 1];
83
84 // Creating the slice with one element already.
85 auto slice_ptr = std::make_shared<std::vector<Index_> >(1, first_index - lower);
86 ++counter;
87
88 while (counter < il && indices[counter] < upper) {
89 slice_ptr->push_back(indices[counter] - lower);
90 ++counter;
91 }
92
93 init(bind_index, std::move(slice_ptr));
94 }
95}
96
97template<bool oracle_, typename Value_, typename Index_>
98class ParallelDense final : public DenseExtractor<oracle_, Value_, Index_> {
99public:
100 ParallelDense(
101 const std::vector<Index_>&, // Not used, just provided for consistency with other constructors.
102 const std::vector<Index_>&,
103 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
104 const bool row,
105 MaybeOracle<oracle_, Index_> oracle,
106 const Options& opt
107 ) {
108 my_exts.reserve(matrices.size());
109 my_count.reserve(matrices.size());
110 for (const auto& m : matrices) {
111 my_count.emplace_back(row ? m->ncol() : m->nrow());
112 my_exts.emplace_back(new_extractor<false, oracle_>(m.get(), row, oracle, opt));
113 }
114 }
115
116 ParallelDense(
117 const std::vector<Index_>& cumulative,
118 const std::vector<Index_>& mapping,
119 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
120 const bool row,
121 MaybeOracle<oracle_, Index_> oracle,
122 const Index_ block_start,
123 const Index_ block_length,
124 const Options& opt
125 ) {
126 my_exts.reserve(matrices.size());
127 my_count.reserve(matrices.size());
128 initialize_parallel_block(
129 cumulative,
130 mapping,
131 block_start,
132 block_length,
133 [&](const Index_ i, const Index_ sub_block_start, const Index_ sub_block_length) -> void {
134 my_count.emplace_back(sub_block_length);
135 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
136 }
137 );
138 }
139
140 ParallelDense(
141 const std::vector<Index_>& cumulative,
142 const std::vector<Index_>& mapping,
143 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
144 const bool row,
145 MaybeOracle<oracle_, Index_> oracle,
146 VectorPtr<Index_> indices_ptr,
147 const Options& opt
148 ) {
149 my_exts.reserve(matrices.size());
150 my_count.reserve(matrices.size());
151 initialize_parallel_index(
152 cumulative,
153 mapping,
154 *indices_ptr,
155 [&](const Index_ i, VectorPtr<Index_> sub_indices_ptr) -> void {
156 my_count.emplace_back(sub_indices_ptr->size());
157 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
158 }
159 );
160 }
161
162public:
163 const Value_* fetch(const Index_ i, Value_* const buffer) {
164 auto copy = buffer;
165 const Index_ nmats = my_count.size();
166 for (Index_ x = 0; x < nmats; ++x) {
167 const auto ptr = my_exts[x]->fetch(i, copy);
168 const auto num = my_count[x];
169 copy_n(ptr, num, copy);
170 copy += num;
171 }
172 return buffer;
173 }
174
175private:
176 std::vector<std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > > my_exts;
177 std::vector<Index_> my_count;
178};
179
180/***********************
181 *** Sparse parallel ***
182 ***********************/
183
184template<bool oracle_, typename Value_, typename Index_>
185class ParallelFullSparse final : public SparseExtractor<oracle_, Value_, Index_> {
186public:
187 ParallelFullSparse(
188 const std::vector<Index_>& cumulative,
189 const std::vector<Index_>&, // not actually used, just provided for consistency with the other constructors.
190 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
191 const bool row,
192 MaybeOracle<oracle_, Index_> oracle,
193 const Options& opt
194 ) :
195 my_cumulative(cumulative),
196 my_needs_value(opt.sparse_extract_value),
197 my_needs_index(opt.sparse_extract_index)
198 {
199 my_exts.reserve(matrices.size());
200 for (const auto& m : matrices) {
201 my_exts.emplace_back(new_extractor<true, oracle_>(m.get(), row, oracle, opt));
202 }
203 }
204
205 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const value_buffer, Index_* const index_buffer) {
206 auto vcopy = value_buffer;
207 auto icopy = index_buffer;
208 Index_ accumulated = 0;
209
210 const Index_ nmats = my_exts.size();
211 for (Index_ x = 0; x < nmats; ++x) {
212 const auto range = my_exts[x]->fetch(i, vcopy, icopy);
213 accumulated += range.number;
214 if (my_needs_value) {
215 copy_n(range.value, range.number, vcopy);
216 vcopy += range.number;
217 }
218 if (my_needs_index) {
219 const auto offset = my_cumulative[x];
220 for (Index_ y = 0; y < range.number; ++y) {
221 icopy[y] = range.index[y] + offset;
222 }
223 icopy += range.number;
224 }
225 }
226
227 return SparseRange<Value_, Index_>(accumulated, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
228 }
229
230private:
231 const std::vector<Index_>& my_cumulative;
232 bool my_needs_value, my_needs_index;
233 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
234};
235
236template<bool oracle_, typename Value_, typename Index_>
237class ParallelBlockSparse final : public SparseExtractor<oracle_, Value_, Index_> {
238public:
239 ParallelBlockSparse(
240 const std::vector<Index_>& cumulative,
241 const std::vector<Index_>& mapping,
242 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
243 const bool row,
244 MaybeOracle<oracle_, Index_> oracle,
245 const Index_ block_start,
246 const Index_ block_length,
247 const Options& opt
248 ) :
249 my_cumulative(cumulative),
250 my_needs_value(opt.sparse_extract_value),
251 my_needs_index(opt.sparse_extract_index)
252 {
253 my_exts.reserve(matrices.size());
254 my_start_matrix = initialize_parallel_block(
255 my_cumulative,
256 mapping,
257 block_start,
258 block_length,
259 [&](const Index_ i, const Index_ sub_block_start, const Index_ sub_block_length) -> void {
260 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
261 }
262 );
263 }
264
265 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const value_buffer, Index_* const index_buffer) {
266 auto vcopy = value_buffer;
267 auto icopy = index_buffer;
268 Index_ count = 0;
269
270 const Index_ nmats = my_exts.size();
271 for (Index_ x = 0; x < nmats; ++x) {
272 const auto range = my_exts[x]->fetch(i, vcopy, icopy);
273 count += range.number;
274 if (my_needs_value) {
275 copy_n(range.value, range.number, vcopy);
276 vcopy += range.number;
277 }
278 if (my_needs_index) {
279 const Index_ offset = my_cumulative[x + my_start_matrix];
280 for (Index_ y = 0; y < range.number; ++y) {
281 icopy[y] = range.index[y] + offset;
282 }
283 icopy += range.number;
284 }
285 }
286
287 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
288 }
289
290private:
291 const std::vector<Index_>& my_cumulative;
292 bool my_needs_value, my_needs_index;
293 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
294 Index_ my_start_matrix;
295};
296
297template<bool oracle_, typename Value_, typename Index_>
298class ParallelIndexSparse final : public SparseExtractor<oracle_, Value_, Index_> {
299public:
300 ParallelIndexSparse(
301 const std::vector<Index_>& cumulative,
302 const std::vector<Index_>& mapping,
303 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
304 const bool row,
305 MaybeOracle<oracle_, Index_> oracle,
306 VectorPtr<Index_> indices_ptr,
307 const Options& opt
308 ) :
309 my_cumulative(cumulative),
310 my_needs_value(opt.sparse_extract_value),
311 my_needs_index(opt.sparse_extract_index)
312 {
313 my_exts.reserve(matrices.size());
314 my_which_matrix.reserve(matrices.size());
315 initialize_parallel_index(
316 my_cumulative,
317 mapping,
318 *indices_ptr,
319 [&](const Index_ i, VectorPtr<Index_> sub_indices_ptr) -> void {
320 my_which_matrix.emplace_back(i);
321 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
322 }
323 );
324 }
325
326 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const value_buffer, Index_* const index_buffer) {
327 auto vcopy = value_buffer;
328 auto icopy = index_buffer;
329 Index_ count = 0;
330
331 const Index_ nmats = my_which_matrix.size();
332 for (Index_ x = 0; x < nmats; ++x) {
333 const auto range = my_exts[x]->fetch(i, vcopy, icopy);
334 count += range.number;
335 if (my_needs_value) {
336 copy_n(range.value, range.number, vcopy);
337 vcopy += range.number;
338 }
339
340 if (my_needs_index) {
341 const Index_ offset = my_cumulative[my_which_matrix[x]];
342 for (Index_ y = 0; y < range.number; ++y) {
343 icopy[y] = range.index[y] + offset;
344 }
345 icopy += range.number;
346 }
347 }
348
349 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
350 }
351
352private:
353 const std::vector<Index_>& my_cumulative;
354 bool my_needs_value, my_needs_index;
355 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
356 std::vector<Index_> my_which_matrix;
357};
358
359/*********************
360 *** Perpendicular ***
361 *********************/
362
363template<typename Value_, typename Index_>
364class MyopicPerpendicularDense final : public MyopicDenseExtractor<Value_, Index_> {
365public:
366 template<typename ... Args_>
367 MyopicPerpendicularDense(
368 const std::vector<Index_>& cumulative,
369 const std::vector<Index_>& mapping,
370 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
371 const bool row,
372 const Args_& ... args
373 ) :
374 my_cumulative(cumulative),
375 my_mapping(mapping)
376 {
377 my_exts.reserve(matrices.size());
378 for (const auto& m : matrices) {
379 my_exts.emplace_back(m->dense(row, args...));
380 }
381 }
382
383 const Value_* fetch(const Index_ i, Value_* const buffer) {
384 const Index_ chosen = my_mapping[i];
385 return my_exts[chosen]->fetch(i - my_cumulative[chosen], buffer);
386 }
387
388private:
389 const std::vector<Index_>& my_cumulative;
390 const std::vector<Index_>& my_mapping;
391 std::vector<std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > > my_exts;
392};
393
394template<typename Value_, typename Index_>
395class MyopicPerpendicularSparse final : public MyopicSparseExtractor<Value_, Index_> {
396public:
397 template<typename ... Args_>
398 MyopicPerpendicularSparse(
399 const std::vector<Index_>& cumulative,
400 const std::vector<Index_>& mapping,
401 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
402 const bool row,
403 const Args_& ... args
404 ) :
405 my_cumulative(cumulative),
406 my_mapping(mapping)
407 {
408 my_exts.reserve(matrices.size());
409 for (const auto& m : matrices) {
410 my_exts.emplace_back(m->sparse(row, args...));
411 }
412 }
413
414 SparseRange<Value_, Index_> fetch(const Index_ i, Value_* const vbuffer, Index_* const ibuffer) {
415 const Index_ chosen = my_mapping[i];
416 return my_exts[chosen]->fetch(i - my_cumulative[chosen], vbuffer, ibuffer);
417 }
418
419private:
420 const std::vector<Index_>& my_cumulative;
421 const std::vector<Index_>& my_mapping;
422 std::vector<std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > > my_exts;
423};
424
425template<typename Index_, class Initialize_>
426void initialize_perp_oracular(
427 const std::vector<Index_>& cumulative,
428 const std::vector<Index_>& mapping,
429 const Oracle<Index_>& oracle,
430 std::vector<Index_>& chosen,
431 Initialize_ init
432) {
433 const auto ntotal = oracle.total();
434 chosen.reserve(ntotal);
435
436 struct Predictions {
437 bool consecutive = true;
438 Index_ start = 0;
439 Index_ number = 0;
440 std::vector<Index_> predictions;
441
442 void add(const Index_ p) {
443 if (consecutive) {
444 if (number == 0) {
445 start = p;
446 number = 1;
447 return;
448 }
449 if (number + start == p) {
450 ++number;
451 return;
452 }
453 consecutive = false;
454 resize_container_to_Index_size(predictions, number);
455 std::iota(predictions.begin(), predictions.end(), start);
456 }
457
458 predictions.push_back(p);
459 }
460 };
461
462 const auto nmats = cumulative.size() - 1;
463 auto predictions = create_container_of_Index_size<std::vector<Predictions> >(nmats); // nmats should fit in an Index_, so this call is legal.
464 for (I<decltype(ntotal)> i = 0; i < ntotal; ++i) {
465 const auto prediction = oracle.get(i);
466 const Index_ choice = mapping[prediction];
467 chosen.push_back(choice);
468 predictions[choice].add(prediction - cumulative[choice]);
469 }
470
471 for (I<decltype(nmats)> x = 0; x < nmats; ++x) {
472 auto& current = predictions[x];
473 if (current.consecutive) {
474 if (current.number) {
475 init(x, std::make_shared<ConsecutiveOracle<Index_> >(current.start, current.number));
476 }
477 } else {
478 if (!current.predictions.empty()) {
479 init(x, std::make_shared<FixedVectorOracle<Index_> >(std::move(current.predictions)));
480 }
481 }
482 }
483}
484
485template<typename Value_, typename Index_>
486class OracularPerpendicularDense final : public OracularDenseExtractor<Value_, Index_> {
487public:
488 template<typename ... Args_>
489 OracularPerpendicularDense(
490 const std::vector<Index_>& cumulative,
491 const std::vector<Index_>& mapping,
492 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
493 const bool row,
494 std::shared_ptr<const Oracle<Index_> > ora,
495 const Args_& ... args
496 ) {
497 resize_container_to_Index_size(my_exts, matrices.size()); // number of matrices should fit in an I ndex_, so this call is allowed.
498 initialize_perp_oracular(
499 cumulative,
500 mapping,
501 *ora,
502 my_segments,
503 [&](const Index_ x, std::shared_ptr<const Oracle<Index_> > subora) -> void {
504 my_exts[x] = matrices[x]->dense(row, std::move(subora), args...);
505 }
506 );
507 }
508
509 const Value_* fetch(const Index_ i, Value_* const buffer) {
510 const auto chosen = my_segments[my_used];
511 const auto output = my_exts[chosen]->fetch(i, buffer);
512 ++my_used;
513 return output;
514 }
515
516private:
517 std::vector<Index_> my_segments;
518 std::vector<std::unique_ptr<OracularDenseExtractor<Value_, Index_> > > my_exts;
519 PredictionIndex my_used = 0;
520};
521
522template<typename Value_, typename Index_>
523class OracularPerpendicularSparse final : public OracularSparseExtractor<Value_, Index_> {
524public:
525 template<typename ... Args_>
526 OracularPerpendicularSparse(
527 const std::vector<Index_>& cumulative,
528 const std::vector<Index_>& mapping,
529 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
530 const bool row,
531 std::shared_ptr<const Oracle<Index_> > ora,
532 const Args_& ... args
533 ) {
534 resize_container_to_Index_size(my_exts, matrices.size()); // number of matrices should fit in an Index_, so this call is legal.
535 initialize_perp_oracular(
536 cumulative,
537 mapping,
538 *ora,
539 my_segments,
540 [&](const Index_ x, std::shared_ptr<const Oracle<Index_> > subora) -> void {
541 my_exts[x] = matrices[x]->sparse(row, std::move(subora), args...);
542 }
543 );
544 }
545
546 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
547 const auto chosen = my_segments[my_used];
548 const auto output = my_exts[chosen]->fetch(i, vbuffer, ibuffer);
549 ++my_used;
550 return output;
551 }
552
553private:
554 std::vector<Index_> my_segments;
555 std::vector<std::unique_ptr<OracularSparseExtractor<Value_, Index_> > > my_exts;
556 PredictionIndex my_used = 0;
557};
558
559}
573template<typename Value_, typename Index_>
574class DelayedBind final : public Matrix<Value_, Index_> {
575public:
582 DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, const bool by_row) :
583 my_matrices(std::move(matrices)), my_by_row(by_row)
584 {
585 auto nmats = my_matrices.size();
586 my_cumulative.reserve(sanisizer::sum<I<decltype(my_cumulative.size())> >(nmats, 1));
587 I<decltype(nmats)> sofar = 0;
588 my_cumulative.push_back(0);
589
590 for (I<decltype(nmats)> i = 0; i < nmats; ++i) {
591 auto& current = my_matrices[i];
592 Index_ primary, secondary;
593 if (my_by_row) {
594 primary = current->nrow();
595 secondary = current->ncol();
596 } else {
597 primary = current->ncol();
598 secondary = current->nrow();
599 }
600
601 if (i == 0) {
602 my_otherdim = secondary;
603 } else if (my_otherdim != secondary) {
604 throw std::runtime_error("all 'my_matrices' should have the same number of " + (my_by_row ? std::string("columns") : std::string("rows")));
605 }
606
607 // Removing the matrices that don't contribute anything,
608 // so we don't have to deal with their overhead.
609 if (primary > 0) {
610 if (sofar != i) {
611 my_matrices[sofar] = std::move(current);
612 }
613 my_cumulative.push_back(sanisizer::sum<Index_>(my_cumulative.back(), primary));
614 ++sofar;
615 }
616 }
617
618 my_matrices.resize(sofar);
619 nmats = sofar;
620
621 // At this point, the number of matrices must be no greater than the
622 // number of rows/columns of the combined matrix (as we've removed all
623 // non-contributing submatrices) and thus should fit into 'Index_';
624 // hence, using Index_ for the mapping should not overflow.
625 my_mapping.reserve(my_cumulative.back());
626 for (I<decltype(nmats)> i = 0; i < nmats; ++i) {
627 my_mapping.insert(my_mapping.end(), (my_by_row ? my_matrices[i]->nrow() : my_matrices[i]->ncol()), i);
628 }
629
630 double denom = 0;
631 for (const auto& x : my_matrices) {
632 const double total = static_cast<double>(x->nrow()) * static_cast<double>(x->ncol());
633 denom += total;
634 my_sparse_prop += total * x->is_sparse_proportion();
635 my_by_row_prop += total * x->prefer_rows_proportion();
636 }
637 if (denom) {
638 my_sparse_prop /= denom;
639 my_by_row_prop /= denom;
640 }
641
642 for (int d = 0; d < 2; ++d) {
643 my_uses_oracle[d] = false;
644 for (const auto& x : my_matrices) {
645 if (x->uses_oracle(d)) {
646 my_uses_oracle[d] = true;
647 break;
648 }
649 }
650 }
651 }
652
659 DelayedBind(const std::vector<std::shared_ptr<Matrix<Value_, Index_> > >& matrices, const bool by_row) :
660 DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >(matrices.begin(), matrices.end()), by_row) {}
661
662private:
663 std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > my_matrices;
664 bool my_by_row;
665
666 Index_ my_otherdim = 0;
667 std::vector<Index_> my_cumulative;
668 std::vector<Index_> my_mapping;
669
670 double my_sparse_prop = 0, my_by_row_prop = 0;
671 std::array<bool, 2> my_uses_oracle;
672
673public:
674 Index_ nrow() const {
675 if (my_by_row) {
676 return my_cumulative.back();
677 } else {
678 return my_otherdim;
679 }
680 }
681
682 Index_ ncol() const {
683 if (my_by_row) {
684 return my_otherdim;
685 } else {
686 return my_cumulative.back();
687 }
688 }
689
690 bool is_sparse() const {
691 return my_sparse_prop > 0.5;
692 }
693
694 double is_sparse_proportion() const {
695 return my_sparse_prop;
696 }
697
698 bool prefer_rows() const {
699 return my_by_row_prop > 0.5;
700 }
701
702 double prefer_rows_proportion() const {
703 return my_by_row_prop;
704 }
705
706 bool uses_oracle(const bool row) const {
707 return my_uses_oracle[row];
708 }
709
710 using Matrix<Value_, Index_>::dense;
711
712 using Matrix<Value_, Index_>::sparse;
713
714 /**********************************
715 ********** Myopic dense **********
716 **********************************/
717public:
718 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
719 const bool row,
720 const Options& opt
721 ) const {
722 if (my_matrices.size() == 1) {
723 return my_matrices[0]->dense(row, opt);
724 } else if (row == my_by_row) {
725 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(
726 my_cumulative,
727 my_mapping,
728 my_matrices,
729 row,
730 opt
731 );
732 } else {
733 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(
734 my_cumulative,
735 my_mapping,
736 my_matrices,
737 row,
738 false,
739 opt
740 );
741 }
742 }
743
744 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
745 const bool row,
746 const Index_ block_start,
747 const Index_ block_length,
748 const Options& opt
749 ) const {
750 if (my_matrices.size() == 1) {
751 return my_matrices[0]->dense(row, block_start, block_length, opt);
752 } else if (row == my_by_row) {
753 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(
754 my_cumulative,
755 my_mapping,
756 my_matrices,
757 row,
758 block_start,
759 block_length,
760 opt
761 );
762 } else {
763 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(
764 my_cumulative,
765 my_mapping,
766 my_matrices,
767 row,
768 false,
769 block_start,
770 block_length,
771 opt
772 );
773 }
774 }
775
776 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(
777 const bool row,
778 VectorPtr<Index_> indices_ptr,
779 const Options& opt
780 ) const {
781 if (my_matrices.size() == 1) {
782 return my_matrices[0]->dense(row, std::move(indices_ptr), opt);
783 } else if (row == my_by_row) {
784 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(
785 my_cumulative,
786 my_mapping,
787 my_matrices,
788 row,
789 std::move(indices_ptr),
790 opt
791 );
792 } else {
793 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(
794 my_cumulative,
795 my_mapping,
796 my_matrices,
797 row,
798 false,
799 std::move(indices_ptr),
800 opt
801 );
802 }
803 }
804
805 /***********************************
806 ********** Myopic sparse **********
807 ***********************************/
808private:
809 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
810 const bool row,
811 const Options& opt
812 ) const {
813 if (my_matrices.size() == 1) {
814 return my_matrices[0]->sparse(row, opt);
815 } else if (row == my_by_row) {
816 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(
817 my_cumulative,
818 my_mapping,
819 my_matrices,
820 row,
821 opt
822 );
823 } else {
824 return std::make_unique<DelayedBind_internal::ParallelFullSparse<false, Value_, Index_> >(
825 my_cumulative,
826 my_mapping,
827 my_matrices,
828 row,
829 false,
830 opt
831 );
832 }
833 }
834
835 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
836 const bool row,
837 const Index_ block_start,
838 const Index_ block_length,
839 const Options& opt
840 ) const {
841 if (my_matrices.size() == 1) {
842 return my_matrices[0]->sparse(row, block_start, block_length, opt);
843 } else if (row == my_by_row) {
844 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(
845 my_cumulative,
846 my_mapping,
847 my_matrices,
848 row,
849 block_start,
850 block_length,
851 opt
852 );
853 } else {
854 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<false, Value_, Index_> >(
855 my_cumulative,
856 my_mapping,
857 my_matrices,
858 row,
859 false,
860 block_start,
861 block_length,
862 opt
863 );
864 }
865 }
866
867 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
868 const bool row,
869 VectorPtr<Index_> indices_ptr,
870 const Options& opt
871 ) const {
872 if (my_matrices.size() == 1) {
873 return my_matrices[0]->sparse(row, std::move(indices_ptr), opt);
874 } else if (row == my_by_row) {
875 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(
876 my_cumulative,
877 my_mapping,
878 my_matrices,
879 row,
880 std::move(indices_ptr),
881 opt
882 );
883 } else {
884 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<false, Value_, Index_> >(
885 my_cumulative,
886 my_mapping,
887 my_matrices,
888 row,
889 false,
890 std::move(indices_ptr),
891 opt
892 );
893 }
894 }
895
896 /************************************
897 ********** Oracular dense **********
898 ************************************/
899public:
900 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
901 const bool row,
902 std::shared_ptr<const Oracle<Index_> > oracle,
903 const Options& opt
904 ) const {
905 if (my_matrices.size() == 1) {
906 return my_matrices[0]->dense(row, std::move(oracle), opt);
907 } else if (!my_uses_oracle[row]) {
908 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(
909 std::move(oracle),
910 dense(row, opt)
911 );
912 } else if (row == my_by_row) {
913 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(
914 my_cumulative,
915 my_mapping,
916 my_matrices,
917 row,
918 std::move(oracle),
919 opt
920 );
921 } else {
922 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(
923 my_cumulative,
924 my_mapping,
925 my_matrices,
926 row,
927 std::move(oracle),
928 opt
929 );
930 }
931 }
932
933 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
934 const bool row,
935 std::shared_ptr<const Oracle<Index_> > oracle,
936 const Index_ block_start,
937 const Index_ block_length,
938 const Options& opt
939 ) const {
940 if (my_matrices.size() == 1) {
941 return my_matrices[0]->dense(row, std::move(oracle), block_start, block_length, opt);
942 } else if (!my_uses_oracle[row]) {
943 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(
944 std::move(oracle),
945 dense(row, block_start, block_length, opt)
946 );
947 } else if (row == my_by_row) {
948 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(
949 my_cumulative,
950 my_mapping,
951 my_matrices,
952 row,
953 std::move(oracle),
954 block_start,
955 block_length,
956 opt
957 );
958 } else {
959 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(
960 my_cumulative,
961 my_mapping,
962 my_matrices,
963 row,
964 std::move(oracle),
965 block_start,
966 block_length,
967 opt
968 );
969 }
970 }
971
972 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(
973 const bool row,
974 std::shared_ptr<const Oracle<Index_> > oracle,
975 VectorPtr<Index_> indices_ptr,
976 const Options& opt
977 ) const {
978 if (my_matrices.size() == 1) {
979 return my_matrices[0]->dense(row, std::move(oracle), std::move(indices_ptr), opt);
980 } else if (!my_uses_oracle[row]) {
981 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(
982 std::move(oracle),
983 dense(row, std::move(indices_ptr), opt)
984 );
985 } else if (row == my_by_row) {
986 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(
987 my_cumulative,
988 my_mapping,
989 my_matrices,
990 row,
991 std::move(oracle),
992 std::move(indices_ptr),
993 opt
994 );
995 } else {
996 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(
997 my_cumulative,
998 my_mapping,
999 my_matrices,
1000 row,
1001 std::move(oracle),
1002 std::move(indices_ptr),
1003 opt
1004 );
1005 }
1006 }
1007
1008 /*************************************
1009 ********** Oracular sparse **********
1010 *************************************/
1011private:
1012 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
1013 const bool row,
1014 std::shared_ptr<const Oracle<Index_> > oracle,
1015 const Options& opt
1016 ) const {
1017 if (my_matrices.size() == 1) {
1018 return my_matrices[0]->sparse(row, std::move(oracle), opt);
1019 } else if (!my_uses_oracle[row]) {
1020 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(
1021 std::move(oracle),
1022 sparse(row, opt)
1023 );
1024 } else if (row == my_by_row) {
1025 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(
1026 my_cumulative,
1027 my_mapping,
1028 my_matrices,
1029 row,
1030 std::move(oracle),
1031 opt
1032 );
1033 } else {
1034 return std::make_unique<DelayedBind_internal::ParallelFullSparse<true, Value_, Index_> >(
1035 my_cumulative,
1036 my_mapping,
1037 my_matrices,
1038 row,
1039 std::move(oracle),
1040 opt
1041 );
1042 }
1043 }
1044
1045 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
1046 const bool row,
1047 std::shared_ptr<const Oracle<Index_> > oracle,
1048 const Index_ block_start,
1049 const Index_ block_length,
1050 const Options& opt
1051 ) const {
1052 if (my_matrices.size() == 1) {
1053 return my_matrices[0]->sparse(row, std::move(oracle), block_start, block_length, opt);
1054 } else if (!my_uses_oracle[row]) {
1055 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(
1056 std::move(oracle),
1057 sparse(row, block_start, block_length, opt)
1058 );
1059 } else if (row == my_by_row) {
1060 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(
1061 my_cumulative,
1062 my_mapping,
1063 my_matrices,
1064 row,
1065 std::move(oracle),
1066 block_start,
1067 block_length,
1068 opt
1069 );
1070 } else {
1071 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<true, Value_, Index_> >(
1072 my_cumulative,
1073 my_mapping,
1074 my_matrices,
1075 row,
1076 std::move(oracle),
1077 block_start,
1078 block_length,
1079 opt
1080 );
1081 }
1082 }
1083
1084 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
1085 const bool row,
1086 std::shared_ptr<const Oracle<Index_> > oracle,
1087 VectorPtr<Index_> indices_ptr,
1088 const Options& opt
1089 ) const {
1090 if (my_matrices.size() == 1) {
1091 return my_matrices[0]->sparse(row, std::move(oracle), std::move(indices_ptr), opt);
1092 } else if (!my_uses_oracle[row]) {
1093 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(
1094 std::move(oracle),
1095 sparse(row, std::move(indices_ptr), opt)
1096 );
1097 } else if (row == my_by_row) {
1098 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(
1099 my_cumulative,
1100 my_mapping,
1101 my_matrices,
1102 row,
1103 std::move(oracle),
1104 std::move(indices_ptr),
1105 opt
1106 );
1107 } else {
1108 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<true, Value_, Index_> >(
1109 my_cumulative,
1110 my_mapping,
1111 my_matrices,
1112 row,
1113 std::move(oracle),
1114 std::move(indices_ptr),
1115 opt
1116 );
1117 }
1118 }
1119};
1120
1124// These methods are soft-deprecated: kept around for back-compatibility only.
1125template<typename Value_, typename Index_>
1126std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, bool row) {
1127 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
1128}
1129
1130template<typename Value_, typename Index_>
1131std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices, bool row) {
1132 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
1133}
1134
1135template<int margin_, typename Value_, typename Index_>
1136std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices) {
1137 return make_DelayedBind(std::move(matrices), margin_ == 0);
1138}
1139
1140template<int margin_, typename Value_, typename Index_>
1141std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices) {
1142 return make_DelayedBind(std::move(matrices), margin_ == 0);
1143}
1148}
1149
1150#endif
Iterate across consecutive elements of the target dimension.
Iterate across a fixed sequence of elements on the target dimension.
Virtual class for a matrix of some numeric type.
Mimic the oracle-aware extractor interface.
Delayed combining of a matrix.
Definition DelayedBind.hpp:574
bool uses_oracle(const bool row) const
Definition DelayedBind.hpp:706
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:972
DelayedBind(std::vector< std::shared_ptr< const Matrix< Value_, Index_ > > > matrices, const bool by_row)
Definition DelayedBind.hpp:582
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Options &opt) const
Definition DelayedBind.hpp:718
double is_sparse_proportion() const
Definition DelayedBind.hpp:694
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:744
Index_ nrow() const
Definition DelayedBind.hpp:674
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:776
DelayedBind(const std::vector< std::shared_ptr< Matrix< Value_, Index_ > > > &matrices, const bool by_row)
Definition DelayedBind.hpp:659
bool is_sparse() const
Definition DelayedBind.hpp:690
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedBind.hpp:900
Index_ ncol() const
Definition DelayedBind.hpp:682
bool prefer_rows() const
Definition DelayedBind.hpp:698
double prefer_rows_proportion() const
Definition DelayedBind.hpp:702
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:933
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:29
Copy data from one buffer to another.
Flexible representations for matrix data.
Definition Extractor.hpp:15
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
void resize_container_to_Index_size(Container_ &container, const Index_ x, Args_ &&... args)
Definition Index_to_container.hpp:92
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
Value_ * copy_n(const Value_ *const input, const Size_ n, Value_ *const output)
Definition copy.hpp:37
std::size_t PredictionIndex
Definition Oracle.hpp:18
Templated construction of a new extractor.
Options for accessing data from a Matrix instance.
Definition Options.hpp:30