tatami
C++ API for different matrix representations
Loading...
Searching...
No Matches
DelayedBind.hpp
Go to the documentation of this file.
1#ifndef TATAMI_DELAYED_BIND_HPP
2#define TATAMI_DELAYED_BIND_HPP
3
4#include "../base/Matrix.hpp"
9#include "../utils/copy.hpp"
10#include "../utils/Index_to_container.hpp"
11
12#include <numeric>
13#include <algorithm>
14#include <memory>
15#include <array>
16#include <type_traits>
17#include <cstddef>
18
27namespace tatami {
28
32namespace DelayedBind_internal {
33
34/**********************
35 *** Dense parallel ***
36 **********************/
37
38template<typename Index_, class Initialize_>
39Index_ initialize_parallel_block(
40 const std::vector<Index_>& cumulative,
41 const std::vector<Index_>& mapping,
42 Index_ block_start,
43 Index_ block_length,
44 Initialize_ init)
45{
46 if (mapping.empty()) {
47 return 0;
48 }
49
50 Index_ start_index = mapping[block_start];
51 Index_ actual_start = block_start - cumulative[start_index];
52 Index_ block_end = block_start + block_length;
53
54 Index_ nmats = cumulative.size() - 1; // Number of matrices is guaranteed to fit in Index_, see reasoning in the DelayedBind constructor.
55 for (Index_ index = start_index; index < nmats; ++index) {
56 Index_ submat_end = cumulative[index + 1];
57 bool not_final = (block_end > submat_end);
58 Index_ actual_end = (not_final ? submat_end : block_end) - cumulative[index];
59 init(index, actual_start, actual_end - actual_start);
60 if (!not_final) {
61 break;
62 }
63 actual_start = 0;
64 }
65
66 return start_index;
67}
68
69template<typename Index_, class Initialize_>
70void initialize_parallel_index(
71 const std::vector<Index_>& cumulative,
72 const std::vector<Index_>& mapping,
73 const std::vector<Index_>& indices,
74 Initialize_ init)
75{
76 Index_ counter = 0, il = indices.size();
77 while (counter < il) {
78 Index_ first_index = indices[counter];
79 Index_ bind_index = mapping[first_index];
80 Index_ lower = cumulative[bind_index];
81 Index_ upper = cumulative[bind_index + 1];
82
83 // Creating the slice with one element already.
84 auto slice_ptr = std::make_shared<std::vector<Index_> >(1, first_index - lower);
85 ++counter;
86
87 while (counter < il && indices[counter] < upper) {
88 slice_ptr->push_back(indices[counter] - lower);
89 ++counter;
90 }
91
92 init(bind_index, std::move(slice_ptr));
93 }
94}
95
96template<bool oracle_, typename Value_, typename Index_>
97class ParallelDense final : public DenseExtractor<oracle_, Value_, Index_> {
98public:
99 ParallelDense(
100 const std::vector<Index_>&, // Not used, just provided for consistency with other constructors.
101 const std::vector<Index_>&,
102 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
103 bool row,
104 MaybeOracle<oracle_, Index_> oracle,
105 const Options& opt)
106 {
107 my_exts.reserve(matrices.size());
108 my_count.reserve(matrices.size());
109 for (const auto& m : matrices) {
110 my_count.emplace_back(row ? m->ncol() : m->nrow());
111 my_exts.emplace_back(new_extractor<false, oracle_>(m.get(), row, oracle, opt));
112 }
113 }
114
115 ParallelDense(
116 const std::vector<Index_>& cumulative,
117 const std::vector<Index_>& mapping,
118 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
119 bool row,
120 MaybeOracle<oracle_, Index_> oracle,
121 Index_ block_start,
122 Index_ block_length,
123 const Options& opt)
124 {
125 my_exts.reserve(matrices.size());
126 my_count.reserve(matrices.size());
127 initialize_parallel_block(
128 cumulative,
129 mapping,
130 block_start,
131 block_length,
132 [&](Index_ i, Index_ sub_block_start, Index_ sub_block_length) -> void {
133 my_count.emplace_back(sub_block_length);
134 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
135 }
136 );
137 }
138
139 ParallelDense(
140 const std::vector<Index_>& cumulative,
141 const std::vector<Index_>& mapping,
142 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
143 bool row,
144 MaybeOracle<oracle_, Index_> oracle,
145 VectorPtr<Index_> indices_ptr,
146 const Options& opt)
147 {
148 my_exts.reserve(matrices.size());
149 my_count.reserve(matrices.size());
150 initialize_parallel_index(
151 cumulative,
152 mapping,
153 *indices_ptr,
154 [&](Index_ i, VectorPtr<Index_> sub_indices_ptr) -> void {
155 my_count.emplace_back(sub_indices_ptr->size());
156 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
157 }
158 );
159 }
160
161public:
162 const Value_* fetch(Index_ i, Value_* buffer) {
163 auto copy = buffer;
164 for (Index_ x = 0, end = my_count.size(); x < end; ++x) {
165 auto ptr = my_exts[x]->fetch(i, copy);
166 auto num = my_count[x];
167 copy_n(ptr, num, copy);
168 copy += num;
169 }
170 return buffer;
171 }
172
173private:
174 std::vector<std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > > my_exts;
175 std::vector<Index_> my_count;
176};
177
178/***********************
179 *** Sparse parallel ***
180 ***********************/
181
182template<bool oracle_, typename Value_, typename Index_>
183class ParallelFullSparse final : public SparseExtractor<oracle_, Value_, Index_> {
184public:
185 ParallelFullSparse(
186 const std::vector<Index_>& cumulative,
187 const std::vector<Index_>&, // not actually used, just provided for consistency with the other constructors.
188 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
189 bool row,
190 MaybeOracle<oracle_, Index_> oracle,
191 const Options& opt) :
192 my_cumulative(cumulative),
193 my_needs_value(opt.sparse_extract_value),
194 my_needs_index(opt.sparse_extract_index)
195 {
196 my_exts.reserve(matrices.size());
197 for (const auto& m : matrices) {
198 my_exts.emplace_back(new_extractor<true, oracle_>(m.get(), row, oracle, opt));
199 }
200 }
201
202 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
203 auto vcopy = value_buffer;
204 auto icopy = index_buffer;
205 Index_ accumulated = 0;
206
207 for (decltype(my_exts.size()) x = 0, end = my_exts.size(); x < end; ++x) {
208 auto range = my_exts[x]->fetch(i, vcopy, icopy);
209 accumulated += range.number;
210 if (my_needs_value) {
211 copy_n(range.value, range.number, vcopy);
212 vcopy += range.number;
213 }
214 if (my_needs_index) {
215 auto offset = my_cumulative[x];
216 for (Index_ y = 0; y < range.number; ++y) {
217 icopy[y] = range.index[y] + offset;
218 }
219 icopy += range.number;
220 }
221 }
222
223 return SparseRange<Value_, Index_>(accumulated, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
224 }
225
226private:
227 const std::vector<Index_>& my_cumulative;
228 bool my_needs_value, my_needs_index;
229 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
230};
231
232template<bool oracle_, typename Value_, typename Index_>
233class ParallelBlockSparse final : public SparseExtractor<oracle_, Value_, Index_> {
234public:
235 ParallelBlockSparse(
236 const std::vector<Index_>& cumulative,
237 const std::vector<Index_>& mapping,
238 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
239 bool row,
240 MaybeOracle<oracle_, Index_> oracle,
241 Index_ block_start,
242 Index_ block_length,
243 const Options& opt) :
244 my_cumulative(cumulative),
245 my_needs_value(opt.sparse_extract_value),
246 my_needs_index(opt.sparse_extract_index)
247 {
248 my_exts.reserve(matrices.size());
249 my_start_matrix = initialize_parallel_block(
250 my_cumulative,
251 mapping,
252 block_start,
253 block_length,
254 [&](Index_ i, Index_ sub_block_start, Index_ sub_block_length) -> void {
255 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
256 }
257 );
258 }
259
260 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
261 auto vcopy = value_buffer;
262 auto icopy = index_buffer;
263 Index_ count = 0;
264
265 for (Index_ x = 0, end = my_exts.size(); x < end; ++x) {
266 auto range = my_exts[x]->fetch(i, vcopy, icopy);
267 count += range.number;
268 if (my_needs_value) {
269 copy_n(range.value, range.number, vcopy);
270 vcopy += range.number;
271 }
272 if (my_needs_index) {
273 Index_ offset = my_cumulative[x + my_start_matrix];
274 for (Index_ y = 0; y < range.number; ++y) {
275 icopy[y] = range.index[y] + offset;
276 }
277 icopy += range.number;
278 }
279 }
280
281 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
282 }
283
284private:
285 const std::vector<Index_>& my_cumulative;
286 bool my_needs_value, my_needs_index;
287 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
288 Index_ my_start_matrix;
289};
290
291template<bool oracle_, typename Value_, typename Index_>
292class ParallelIndexSparse final : public SparseExtractor<oracle_, Value_, Index_> {
293public:
294 ParallelIndexSparse(
295 const std::vector<Index_>& cumulative,
296 const std::vector<Index_>& mapping,
297 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
298 bool row,
299 MaybeOracle<oracle_, Index_> oracle,
300 VectorPtr<Index_> indices_ptr,
301 const Options& opt) :
302 my_cumulative(cumulative),
303 my_needs_value(opt.sparse_extract_value),
304 my_needs_index(opt.sparse_extract_index)
305 {
306 my_exts.reserve(matrices.size());
307 my_which_matrix.reserve(matrices.size());
308 initialize_parallel_index(
309 my_cumulative,
310 mapping,
311 *indices_ptr,
312 [&](Index_ i, VectorPtr<Index_> sub_indices_ptr) -> void {
313 my_which_matrix.emplace_back(i);
314 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
315 }
316 );
317 }
318
319 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
320 auto vcopy = value_buffer;
321 auto icopy = index_buffer;
322 Index_ count = 0;
323
324 for (Index_ x = 0, end = my_which_matrix.size(); x < end; ++x) {
325 auto range = my_exts[x]->fetch(i, vcopy, icopy);
326 count += range.number;
327 if (my_needs_value) {
328 copy_n(range.value, range.number, vcopy);
329 vcopy += range.number;
330 }
331
332 if (my_needs_index) {
333 Index_ offset = my_cumulative[my_which_matrix[x]];
334 for (Index_ y = 0; y < range.number; ++y) {
335 icopy[y] = range.index[y] + offset;
336 }
337 icopy += range.number;
338 }
339 }
340
341 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
342 }
343
344private:
345 const std::vector<Index_>& my_cumulative;
346 bool my_needs_value, my_needs_index;
347 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
348 std::vector<Index_> my_which_matrix;
349};
350
351/*********************
352 *** Perpendicular ***
353 *********************/
354
355template<typename Value_, typename Index_>
356class MyopicPerpendicularDense final : public MyopicDenseExtractor<Value_, Index_> {
357public:
358 template<typename ... Args_>
359 MyopicPerpendicularDense(
360 const std::vector<Index_>& cumulative,
361 const std::vector<Index_>& mapping,
362 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
363 bool row,
364 const Args_& ... args) :
365 my_cumulative(cumulative),
366 my_mapping(mapping)
367 {
368 my_exts.reserve(matrices.size());
369 for (const auto& m : matrices) {
370 my_exts.emplace_back(m->dense(row, args...));
371 }
372 }
373
374 const Value_* fetch(Index_ i, Value_* buffer) {
375 Index_ chosen = my_mapping[i];
376 return my_exts[chosen]->fetch(i - my_cumulative[chosen], buffer);
377 }
378
379private:
380 const std::vector<Index_>& my_cumulative;
381 const std::vector<Index_>& my_mapping;
382 std::vector<std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > > my_exts;
383};
384
385template<typename Value_, typename Index_>
386class MyopicPerpendicularSparse final : public MyopicSparseExtractor<Value_, Index_> {
387public:
388 template<typename ... Args_>
389 MyopicPerpendicularSparse(
390 const std::vector<Index_>& cumulative,
391 const std::vector<Index_>& mapping,
392 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
393 bool row,
394 const Args_& ... args) :
395 my_cumulative(cumulative),
396 my_mapping(mapping)
397 {
398 my_exts.reserve(matrices.size());
399 for (const auto& m : matrices) {
400 my_exts.emplace_back(m->sparse(row, args...));
401 }
402 }
403
404 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
405 Index_ chosen = my_mapping[i];
406 return my_exts[chosen]->fetch(i - my_cumulative[chosen], vbuffer, ibuffer);
407 }
408
409private:
410 const std::vector<Index_>& my_cumulative;
411 const std::vector<Index_>& my_mapping;
412 std::vector<std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > > my_exts;
413};
414
415template<typename Index_, class Initialize_>
416void initialize_perp_oracular(
417 const std::vector<Index_>& cumulative,
418 const std::vector<Index_>& mapping,
419 const Oracle<Index_>* oracle,
420 std::vector<Index_>& chosen,
421 Initialize_ init)
422{
423 auto ntotal = oracle->total();
424 chosen.reserve(ntotal);
425
426 struct Predictions {
427 bool consecutive = true;
428 Index_ start = 0;
429 Index_ number = 0;
430 std::vector<Index_> predictions;
431
432 void add(Index_ p) {
433 if (consecutive) {
434 if (number == 0) {
435 start = p;
436 number = 1;
437 return;
438 }
439 if (number + start == p) {
440 ++number;
441 return;
442 }
443 consecutive = false;
444 resize_container_to_Index_size(predictions, number);
445 std::iota(predictions.begin(), predictions.end(), start);
446 }
447
448 predictions.push_back(p);
449 }
450 };
451
452 auto nmats = cumulative.size() - 1;
453 auto predictions = create_container_of_Index_size<std::vector<Predictions> >(nmats); // nmats should fit in an Index_, so this call is legal.
454 for (decltype(ntotal) i = 0; i < ntotal; ++i) {
455 auto prediction = oracle->get(i);
456 Index_ choice = mapping[prediction];
457 chosen.push_back(choice);
458 predictions[choice].add(prediction - cumulative[choice]);
459 }
460
461 for (decltype(nmats) x = 0; x < nmats; ++x) {
462 auto& current = predictions[x];
463 if (current.consecutive) {
464 if (current.number) {
465 init(x, std::make_shared<ConsecutiveOracle<Index_> >(current.start, current.number));
466 }
467 } else {
468 if (!current.predictions.empty()) {
469 init(x, std::make_shared<FixedVectorOracle<Index_> >(std::move(current.predictions)));
470 }
471 }
472 }
473}
474
475template<typename Value_, typename Index_>
476class OracularPerpendicularDense final : public OracularDenseExtractor<Value_, Index_> {
477public:
478 template<typename ... Args_>
479 OracularPerpendicularDense(
480 const std::vector<Index_>& cumulative,
481 const std::vector<Index_>& mapping,
482 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
483 bool row,
484 std::shared_ptr<const Oracle<Index_> > ora,
485 const Args_& ... args)
486 {
487 resize_container_to_Index_size(my_exts, matrices.size()); // number of matrices should fit in an I ndex_, so this call is allowed.
488 initialize_perp_oracular(
489 cumulative,
490 mapping,
491 ora.get(),
492 my_segments,
493 [&](Index_ x, std::shared_ptr<const Oracle<Index_> > subora) -> void {
494 my_exts[x] = matrices[x]->dense(row, std::move(subora), args...);
495 }
496 );
497 }
498
499 const Value_* fetch(Index_ i, Value_* buffer) {
500 auto chosen = my_segments[my_used];
501 auto output = my_exts[chosen]->fetch(i, buffer);
502 ++my_used;
503 return output;
504 }
505
506private:
507 std::vector<Index_> my_segments;
508 std::vector<std::unique_ptr<OracularDenseExtractor<Value_, Index_> > > my_exts;
509 PredictionIndex my_used = 0;
510};
511
512template<typename Value_, typename Index_>
513class OracularPerpendicularSparse final : public OracularSparseExtractor<Value_, Index_> {
514public:
515 template<typename ... Args_>
516 OracularPerpendicularSparse(
517 const std::vector<Index_>& cumulative,
518 const std::vector<Index_>& mapping,
519 const std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >& matrices,
520 bool row,
521 std::shared_ptr<const Oracle<Index_> > ora,
522 const Args_& ... args)
523 {
524 resize_container_to_Index_size(my_exts, matrices.size()); // number of matrices should fit in an Index_, so this call is legal.
525 initialize_perp_oracular(
526 cumulative,
527 mapping,
528 ora.get(),
529 my_segments,
530 [&](Index_ x, std::shared_ptr<const Oracle<Index_> > subora) -> void {
531 my_exts[x] = matrices[x]->sparse(row, std::move(subora), args...);
532 }
533 );
534 }
535
536 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
537 auto chosen = my_segments[my_used];
538 auto output = my_exts[chosen]->fetch(i, vbuffer, ibuffer);
539 ++my_used;
540 return output;
541 }
542
543private:
544 std::vector<Index_> my_segments;
545 std::vector<std::unique_ptr<OracularSparseExtractor<Value_, Index_> > > my_exts;
546 PredictionIndex my_used = 0;
547};
548
549}
563template<typename Value_, typename Index_>
564class DelayedBind final : public Matrix<Value_, Index_> {
565public:
572 DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, bool by_row) : my_matrices(std::move(matrices)), my_by_row(by_row) {
573 auto nmats = my_matrices.size();
574 my_cumulative.reserve(sanisizer::sum<decltype(my_cumulative.size())>(nmats, 1));
575 decltype(nmats) sofar = 0;
576 my_cumulative.push_back(0);
577
578 for (decltype(nmats) i = 0; i < nmats; ++i) {
579 auto& current = my_matrices[i];
580 Index_ primary, secondary;
581 if (my_by_row) {
582 primary = current->nrow();
583 secondary = current->ncol();
584 } else {
585 primary = current->ncol();
586 secondary = current->nrow();
587 }
588
589 if (i == 0) {
590 my_otherdim = secondary;
591 } else if (my_otherdim != secondary) {
592 throw std::runtime_error("all 'my_matrices' should have the same number of " + (my_by_row ? std::string("columns") : std::string("rows")));
593 }
594
595 // Removing the matrices that don't contribute anything,
596 // so we don't have to deal with their overhead.
597 if (primary > 0) {
598 if (sofar != i) {
599 my_matrices[sofar] = std::move(current);
600 }
601 my_cumulative.push_back(sanisizer::sum<Index_>(my_cumulative.back(), primary));
602 ++sofar;
603 }
604 }
605
606 my_matrices.resize(sofar);
607 nmats = sofar;
608
609 // At this point, the number of matrices must be no greater than the
610 // number of rows/columns of the combined matrix (as we've removed all
611 // non-contributing submatrices) and thus should fit into 'Index_';
612 // hence, using Index_ for the mapping should not overflow.
613 my_mapping.reserve(my_cumulative.back());
614 for (decltype(nmats) i = 0; i < nmats; ++i) {
615 my_mapping.insert(my_mapping.end(), (my_by_row ? my_matrices[i]->nrow() : my_matrices[i]->ncol()), i);
616 }
617
618 double denom = 0;
619 for (const auto& x : my_matrices) {
620 double total = static_cast<double>(x->nrow()) * static_cast<double>(x->ncol());
621 denom += total;
622 my_sparse_prop += total * x->is_sparse_proportion();
623 my_by_row_prop += total * x->prefer_rows_proportion();
624 }
625 if (denom) {
626 my_sparse_prop /= denom;
627 my_by_row_prop /= denom;
628 }
629
630 for (int d = 0; d < 2; ++d) {
631 my_uses_oracle[d] = false;
632 for (const auto& x : my_matrices) {
633 if (x->uses_oracle(d)) {
634 my_uses_oracle[d] = true;
635 break;
636 }
637 }
638 }
639 }
640
647 DelayedBind(const std::vector<std::shared_ptr<Matrix<Value_, Index_> > >& matrices, bool by_row) :
648 DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > >(matrices.begin(), matrices.end()), by_row) {}
649
650private:
651 std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > my_matrices;
652 bool my_by_row;
653
654 Index_ my_otherdim = 0;
655 std::vector<Index_> my_cumulative;
656 std::vector<Index_> my_mapping;
657
658 double my_sparse_prop = 0, my_by_row_prop = 0;
659 std::array<bool, 2> my_uses_oracle;
660
661public:
662 Index_ nrow() const {
663 if (my_by_row) {
664 return my_cumulative.back();
665 } else {
666 return my_otherdim;
667 }
668 }
669
670 Index_ ncol() const {
671 if (my_by_row) {
672 return my_otherdim;
673 } else {
674 return my_cumulative.back();
675 }
676 }
677
678 bool is_sparse() const {
679 return my_sparse_prop > 0.5;
680 }
681
682 double is_sparse_proportion() const {
683 return my_sparse_prop;
684 }
685
686 bool prefer_rows() const {
687 return my_by_row_prop > 0.5;
688 }
689
690 double prefer_rows_proportion() const {
691 return my_by_row_prop;
692 }
693
694 bool uses_oracle(bool row) const {
695 return my_uses_oracle[row];
696 }
697
698 using Matrix<Value_, Index_>::dense;
699
700 using Matrix<Value_, Index_>::sparse;
701
702 /**********************************
703 ********** Myopic dense **********
704 **********************************/
705public:
706 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, const Options& opt) const {
707 if (my_matrices.size() == 1) {
708 return my_matrices[0]->dense(row, opt);
709 } else if (row == my_by_row) {
710 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, opt);
711 } else {
712 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, opt);
713 }
714 }
715
716 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, Index_ block_start, Index_ block_length, const Options& opt) const {
717 if (my_matrices.size() == 1) {
718 return my_matrices[0]->dense(row, block_start, block_length, opt);
719 } else if (row == my_by_row) {
720 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, block_start, block_length, opt);
721 } else {
722 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, block_start, block_length, opt);
723 }
724 }
725
726 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > dense(bool row, VectorPtr<Index_> indices_ptr, const Options& opt) const {
727 if (my_matrices.size() == 1) {
728 return my_matrices[0]->dense(row, std::move(indices_ptr), opt);
729 } else if (row == my_by_row) {
730 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(indices_ptr), opt);
731 } else {
732 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, std::move(indices_ptr), opt);
733 }
734 }
735
736 /***********************************
737 ********** Myopic sparse **********
738 ***********************************/
739private:
740 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, const Options& opt) const {
741 if (my_matrices.size() == 1) {
742 return my_matrices[0]->sparse(row, opt);
743 } else if (row == my_by_row) {
744 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, opt);
745 } else {
746 return std::make_unique<DelayedBind_internal::ParallelFullSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, opt);
747 }
748 }
749
750 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, Index_ block_start, Index_ block_length, const Options& opt) const {
751 if (my_matrices.size() == 1) {
752 return my_matrices[0]->sparse(row, block_start, block_length, opt);
753 } else if (row == my_by_row) {
754 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, block_start, block_length, opt);
755 } else {
756 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, block_start, block_length, opt);
757 }
758 }
759
760 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(bool row, VectorPtr<Index_> indices_ptr, const Options& opt) const {
761 if (my_matrices.size() == 1) {
762 return my_matrices[0]->sparse(row, std::move(indices_ptr), opt);
763 } else if (row == my_by_row) {
764 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(indices_ptr), opt);
765 } else {
766 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, false, std::move(indices_ptr), opt);
767 }
768 }
769
770 /************************************
771 ********** Oracular dense **********
772 ************************************/
773public:
774 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, const Options& opt) const {
775 if (my_matrices.size() == 1) {
776 return my_matrices[0]->dense(row, std::move(oracle), opt);
777 } else if (!my_uses_oracle[row]) {
778 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle), dense(row, opt));
779 } else if (row == my_by_row) {
780 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
781 } else {
782 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
783 }
784 }
785
786 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length, const Options& opt) const {
787 if (my_matrices.size() == 1) {
788 return my_matrices[0]->dense(row, std::move(oracle), block_start, block_length, opt);
789 } else if (!my_uses_oracle[row]) {
790 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle), dense(row, block_start, block_length, opt));
791 } else if (row == my_by_row) {
792 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
793 } else {
794 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
795 }
796 }
797
798 std::unique_ptr<OracularDenseExtractor<Value_, Index_> > dense(bool row, std::shared_ptr<const Oracle<Index_> > oracle, VectorPtr<Index_> indices_ptr, const Options& opt) const {
799 if (my_matrices.size() == 1) {
800 return my_matrices[0]->dense(row, std::move(oracle), std::move(indices_ptr), opt);
801 } else if (!my_uses_oracle[row]) {
802 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle), dense(row, std::move(indices_ptr), opt));
803 } else if (row == my_by_row) {
804 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
805 } else {
806 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
807 }
808 }
809
810 /*************************************
811 ********** Oracular sparse **********
812 *************************************/
813private:
814 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, const Options& opt) const {
815 if (my_matrices.size() == 1) {
816 return my_matrices[0]->sparse(row, std::move(oracle), opt);
817 } else if (!my_uses_oracle[row]) {
818 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, opt));
819 } else if (row == my_by_row) {
820 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
821 } else {
822 return std::make_unique<DelayedBind_internal::ParallelFullSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
823 }
824 }
825
826 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length, const Options& opt) const {
827 if (my_matrices.size() == 1) {
828 return my_matrices[0]->sparse(row, std::move(oracle), block_start, block_length, opt);
829 } else if (!my_uses_oracle[row]) {
830 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, block_start, block_length, opt));
831 } else if (row == my_by_row) {
832 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
833 } else {
834 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
835 }
836 }
837
838 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(bool row, std::shared_ptr<const Oracle<Index_> > oracle, VectorPtr<Index_> indices_ptr, const Options& opt) const {
839 if (my_matrices.size() == 1) {
840 return my_matrices[0]->sparse(row, std::move(oracle), std::move(indices_ptr), opt);
841 } else if (!my_uses_oracle[row]) {
842 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, std::move(indices_ptr), opt));
843 } else if (row == my_by_row) {
844 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
845 } else {
846 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
847 }
848 }
849};
850
854// These methods are soft-deprecated: kept around for back-compatibility only.
855template<typename Value_, typename Index_>
856std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices, bool row) {
857 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
858}
859
860template<typename Value_, typename Index_>
861std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices, bool row) {
862 return std::shared_ptr<Matrix<Value_, Index_> >(new DelayedBind<Value_, Index_>(std::move(matrices), row));
863}
864
865template<int margin_, typename Value_, typename Index_>
866std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > matrices) {
867 return make_DelayedBind(std::move(matrices), margin_ == 0);
868}
869
870template<int margin_, typename Value_, typename Index_>
871std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices) {
872 return make_DelayedBind(std::move(matrices), margin_ == 0);
873}
878}
879
880#endif
Iterate across consecutive elements of the target dimension.
Iterate across a fixed sequence of elements on the target dimension.
Virtual class for a matrix of some numeric type.
Mimic the oracle-aware extractor interface.
Delayed combining of a matrix.
Definition DelayedBind.hpp:564
DelayedBind(const std::vector< std::shared_ptr< Matrix< Value_, Index_ > > > &matrices, bool by_row)
Definition DelayedBind.hpp:647
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:798
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, const Options &opt) const
Definition DelayedBind.hpp:706
double is_sparse_proportion() const
Definition DelayedBind.hpp:682
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:726
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedBind.hpp:774
Index_ nrow() const
Definition DelayedBind.hpp:662
bool is_sparse() const
Definition DelayedBind.hpp:678
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:786
Index_ ncol() const
Definition DelayedBind.hpp:670
DelayedBind(std::vector< std::shared_ptr< const Matrix< Value_, Index_ > > > matrices, bool by_row)
Definition DelayedBind.hpp:572
bool prefer_rows() const
Definition DelayedBind.hpp:686
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:716
double prefer_rows_proportion() const
Definition DelayedBind.hpp:690
bool uses_oracle(bool row) const
Definition DelayedBind.hpp:694
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:29
Copy data from one buffer to another.
Flexible representations for matrix data.
Definition Extractor.hpp:15
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
void resize_container_to_Index_size(Container_ &container, Index_ x, Args_ &&... args)
Definition Index_to_container.hpp:88
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
Value_ * copy_n(const Value_ *input, Size_ n, Value_ *output)
Definition copy.hpp:26
std::size_t PredictionIndex
Definition Oracle.hpp:18
Templated construction of a new extractor.
Options for accessing data from a Matrix instance.
Definition Options.hpp:30