1#ifndef TATAMI_DELAYED_BIND_HPP
2#define TATAMI_DELAYED_BIND_HPP
31namespace DelayedBind_internal {
37template<
typename Index_,
class Initialize_>
38Index_ initialize_parallel_block(
39 const std::vector<Index_>& cumulative,
40 const std::vector<Index_>& mapping,
45 if (mapping.empty()) {
49 Index_ start_index = mapping[block_start];
50 Index_ actual_start = block_start - cumulative[start_index];
51 Index_ block_end = block_start + block_length;
53 Index_ nmats = cumulative.size() - 1;
54 for (Index_ index = start_index; index < nmats; ++index) {
55 Index_ submat_end = cumulative[index + 1];
56 bool not_final = (block_end > submat_end);
57 Index_ actual_end = (not_final ? submat_end : block_end) - cumulative[index];
58 init(index, actual_start, actual_end - actual_start);
68template<
typename Index_,
class Initialize_>
69void initialize_parallel_index(
70 const std::vector<Index_>& cumulative,
71 const std::vector<Index_>& mapping,
72 const std::vector<Index_>& indices,
75 Index_ counter = 0, il = indices.size();
76 while (counter < il) {
77 Index_ first_index = indices[counter];
78 Index_ bind_index = mapping[first_index];
79 Index_ lower = cumulative[bind_index];
80 Index_ upper = cumulative[bind_index + 1];
83 auto slice_ptr = std::make_shared<std::vector<Index_> >(1, first_index - lower);
86 while (counter < il && indices[counter] < upper) {
87 slice_ptr->push_back(indices[counter] - lower);
91 init(bind_index, std::move(slice_ptr));
95template<
bool oracle_,
typename Value_,
typename Index_>
96class ParallelDense final :
public DenseExtractor<oracle_, Value_, Index_> {
99 const std::vector<Index_>&,
100 const std::vector<Index_>&,
101 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
103 MaybeOracle<oracle_, Index_> oracle,
106 my_exts.reserve(matrices.size());
107 my_count.reserve(matrices.size());
108 for (
const auto& m : matrices) {
109 my_count.emplace_back(row ? m->ncol() : m->nrow());
110 my_exts.emplace_back(new_extractor<false, oracle_>(m.get(), row, oracle, opt));
115 const std::vector<Index_>& cumulative,
116 const std::vector<Index_>& mapping,
117 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
119 MaybeOracle<oracle_, Index_> oracle,
124 my_exts.reserve(matrices.size());
125 my_count.reserve(matrices.size());
126 initialize_parallel_block(
131 [&](Index_ i, Index_ sub_block_start, Index_ sub_block_length) ->
void {
132 my_count.emplace_back(sub_block_length);
133 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
139 const std::vector<Index_>& cumulative,
140 const std::vector<Index_>& mapping,
141 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
143 MaybeOracle<oracle_, Index_> oracle,
144 VectorPtr<Index_> indices_ptr,
147 my_exts.reserve(matrices.size());
148 my_count.reserve(matrices.size());
149 initialize_parallel_index(
153 [&](Index_ i, VectorPtr<Index_> sub_indices_ptr) ->
void {
154 my_count.emplace_back(sub_indices_ptr->size());
155 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
161 const Value_* fetch(Index_ i, Value_* buffer) {
163 for (Index_ x = 0, end = my_count.size(); x < end; ++x) {
164 auto ptr = my_exts[x]->fetch(i, copy);
165 auto num = my_count[x];
173 std::vector<std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > > my_exts;
174 std::vector<Index_> my_count;
181template<
bool oracle_,
typename Value_,
typename Index_>
182class ParallelFullSparse final :
public SparseExtractor<oracle_, Value_, Index_> {
185 const std::vector<Index_>& cumulative,
186 const std::vector<Index_>&,
187 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
189 MaybeOracle<oracle_, Index_> oracle,
190 const Options& opt) :
191 my_cumulative(cumulative),
192 my_needs_value(opt.sparse_extract_value),
193 my_needs_index(opt.sparse_extract_index)
195 my_exts.reserve(matrices.size());
196 for (
const auto& m : matrices) {
197 my_exts.emplace_back(new_extractor<true, oracle_>(m.get(), row, oracle, opt));
201 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
202 auto vcopy = value_buffer;
203 auto icopy = index_buffer;
204 Index_ accumulated = 0;
206 for (
decltype(my_exts.size()) x = 0, end = my_exts.size(); x < end; ++x) {
207 auto range = my_exts[x]->fetch(i, vcopy, icopy);
208 accumulated += range.number;
209 if (my_needs_value) {
210 copy_n(range.value, range.number, vcopy);
211 vcopy += range.number;
213 if (my_needs_index) {
214 auto offset = my_cumulative[x];
215 for (Index_ y = 0; y < range.number; ++y) {
216 icopy[y] = range.index[y] + offset;
218 icopy += range.number;
222 return SparseRange<Value_, Index_>(accumulated, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
226 const std::vector<Index_>& my_cumulative;
227 bool my_needs_value, my_needs_index;
228 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
231template<
bool oracle_,
typename Value_,
typename Index_>
232class ParallelBlockSparse final :
public SparseExtractor<oracle_, Value_, Index_> {
235 const std::vector<Index_>& cumulative,
236 const std::vector<Index_>& mapping,
237 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
239 MaybeOracle<oracle_, Index_> oracle,
242 const Options& opt) :
243 my_cumulative(cumulative),
244 my_needs_value(opt.sparse_extract_value),
245 my_needs_index(opt.sparse_extract_index)
247 my_exts.reserve(matrices.size());
248 my_start_matrix = initialize_parallel_block(
253 [&](Index_ i, Index_ sub_block_start, Index_ sub_block_length) ->
void {
254 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
259 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
260 auto vcopy = value_buffer;
261 auto icopy = index_buffer;
264 for (Index_ x = 0, end = my_exts.size(); x < end; ++x) {
265 auto range = my_exts[x]->fetch(i, vcopy, icopy);
266 count += range.number;
267 if (my_needs_value) {
268 copy_n(range.value, range.number, vcopy);
269 vcopy += range.number;
271 if (my_needs_index) {
272 Index_ offset = my_cumulative[x + my_start_matrix];
273 for (Index_ y = 0; y < range.number; ++y) {
274 icopy[y] = range.index[y] + offset;
276 icopy += range.number;
280 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
284 const std::vector<Index_>& my_cumulative;
285 bool my_needs_value, my_needs_index;
286 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
287 Index_ my_start_matrix;
290template<
bool oracle_,
typename Value_,
typename Index_>
291class ParallelIndexSparse final :
public SparseExtractor<oracle_, Value_, Index_> {
294 const std::vector<Index_>& cumulative,
295 const std::vector<Index_>& mapping,
296 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
298 MaybeOracle<oracle_, Index_> oracle,
299 VectorPtr<Index_> indices_ptr,
300 const Options& opt) :
301 my_cumulative(cumulative),
302 my_needs_value(opt.sparse_extract_value),
303 my_needs_index(opt.sparse_extract_index)
305 my_exts.reserve(matrices.size());
306 my_which_matrix.reserve(matrices.size());
307 initialize_parallel_index(
311 [&](Index_ i, VectorPtr<Index_> sub_indices_ptr) ->
void {
312 my_which_matrix.emplace_back(i);
313 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
318 SparseRange<Value_, Index_> fetch(Index_ i, Value_* value_buffer, Index_* index_buffer) {
319 auto vcopy = value_buffer;
320 auto icopy = index_buffer;
323 for (Index_ x = 0, end = my_which_matrix.size(); x < end; ++x) {
324 auto range = my_exts[x]->fetch(i, vcopy, icopy);
325 count += range.number;
326 if (my_needs_value) {
327 copy_n(range.value, range.number, vcopy);
328 vcopy += range.number;
331 if (my_needs_index) {
332 Index_ offset = my_cumulative[my_which_matrix[x]];
333 for (Index_ y = 0; y < range.number; ++y) {
334 icopy[y] = range.index[y] + offset;
336 icopy += range.number;
340 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
344 const std::vector<Index_>& my_cumulative;
345 bool my_needs_value, my_needs_index;
346 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
347 std::vector<Index_> my_which_matrix;
354template<
typename Value_,
typename Index_>
355class MyopicPerpendicularDense final :
public MyopicDenseExtractor<Value_, Index_> {
357 template<
typename ... Args_>
358 MyopicPerpendicularDense(
359 const std::vector<Index_>& cumulative,
360 const std::vector<Index_>& mapping,
361 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
363 const Args_& ... args) :
364 my_cumulative(cumulative),
367 my_exts.reserve(matrices.size());
368 for (
const auto& m : matrices) {
369 my_exts.emplace_back(m->dense(row, args...));
373 const Value_* fetch(Index_ i, Value_* buffer) {
374 Index_ chosen = my_mapping[i];
375 return my_exts[chosen]->fetch(i - my_cumulative[chosen], buffer);
379 const std::vector<Index_>& my_cumulative;
380 const std::vector<Index_>& my_mapping;
381 std::vector<std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > > my_exts;
384template<
typename Value_,
typename Index_>
385class MyopicPerpendicularSparse final :
public MyopicSparseExtractor<Value_, Index_> {
387 template<
typename ... Args_>
388 MyopicPerpendicularSparse(
389 const std::vector<Index_>& cumulative,
390 const std::vector<Index_>& mapping,
391 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
393 const Args_& ... args) :
394 my_cumulative(cumulative),
397 my_exts.reserve(matrices.size());
398 for (
const auto& m : matrices) {
399 my_exts.emplace_back(m->sparse(row, args...));
403 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
404 Index_ chosen = my_mapping[i];
405 return my_exts[chosen]->fetch(i - my_cumulative[chosen], vbuffer, ibuffer);
409 const std::vector<Index_>& my_cumulative;
410 const std::vector<Index_>& my_mapping;
411 std::vector<std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > > my_exts;
414template<
typename Index_,
class Initialize_>
415void initialize_perp_oracular(
416 const std::vector<Index_>& cumulative,
417 const std::vector<Index_>& mapping,
418 const Oracle<Index_>* oracle,
419 std::vector<Index_>& chosen,
422 auto ntotal = oracle->total();
423 chosen.reserve(ntotal);
426 bool consecutive =
true;
429 std::vector<Index_> predictions;
438 if (number + start == p) {
443 predictions.resize(number);
444 std::iota(predictions.begin(), predictions.end(), start);
447 predictions.push_back(p);
451 auto nmats = cumulative.size() - 1;
452 std::vector<Predictions> predictions(nmats);
453 for (
decltype(ntotal) i = 0; i < ntotal; ++i) {
454 auto prediction = oracle->get(i);
455 Index_ choice = mapping[prediction];
456 chosen.push_back(choice);
457 predictions[choice].add(prediction - cumulative[choice]);
460 for (
decltype(nmats) x = 0; x < nmats; ++x) {
461 auto& current = predictions[x];
462 if (current.consecutive) {
463 if (current.number) {
464 init(x, std::make_shared<ConsecutiveOracle<Index_> >(current.start, current.number));
467 if (!current.predictions.empty()) {
468 init(x, std::make_shared<FixedVectorOracle<Index_> >(std::move(current.predictions)));
474template<
typename Value_,
typename Index_>
475class OracularPerpendicularDense final :
public OracularDenseExtractor<Value_, Index_> {
477 template<
typename ... Args_>
478 OracularPerpendicularDense(
479 const std::vector<Index_>& cumulative,
480 const std::vector<Index_>& mapping,
481 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
483 std::shared_ptr<
const Oracle<Index_> > ora,
484 const Args_& ... args)
486 my_exts.resize(matrices.size());
487 initialize_perp_oracular(
492 [&](Index_ x, std::shared_ptr<
const Oracle<Index_> > subora) ->
void {
493 my_exts[x] = matrices[x]->dense(row, std::move(subora), args...);
498 const Value_* fetch(Index_ i, Value_* buffer) {
499 auto chosen = segments[used];
500 auto output = my_exts[chosen]->fetch(i, buffer);
506 std::vector<Index_> segments;
507 std::vector<std::unique_ptr<OracularDenseExtractor<Value_, Index_> > > my_exts;
508 std::size_t used = 0;
511template<
typename Value_,
typename Index_>
512class OracularPerpendicularSparse final :
public OracularSparseExtractor<Value_, Index_> {
514 template<
typename ... Args_>
515 OracularPerpendicularSparse(
516 const std::vector<Index_>& cumulative,
517 const std::vector<Index_>& mapping,
518 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
520 std::shared_ptr<
const Oracle<Index_> > ora,
521 const Args_& ... args)
523 my_exts.resize(matrices.size());
524 initialize_perp_oracular(
529 [&](Index_ x, std::shared_ptr<
const Oracle<Index_> > subora) ->
void {
530 my_exts[x] = matrices[x]->sparse(row, std::move(subora), args...);
535 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
536 auto chosen = segments[used];
537 auto output = my_exts[chosen]->fetch(i, vbuffer, ibuffer);
543 std::vector<Index_> segments;
544 std::vector<std::unique_ptr<OracularSparseExtractor<Value_, Index_> > > my_exts;
545 std::size_t used = 0;
562template<
typename Value_,
typename Index_>
572 auto nmats = my_matrices.size();
573 my_cumulative.reserve(nmats + 1);
574 decltype(nmats) sofar = 0;
575 my_cumulative.push_back(0);
577 for (
decltype(nmats) i = 0; i < nmats; ++i) {
578 auto& current = my_matrices[i];
579 Index_ primary, secondary;
581 primary = current->nrow();
582 secondary = current->ncol();
584 primary = current->ncol();
585 secondary = current->nrow();
589 my_otherdim = secondary;
590 }
else if (my_otherdim != secondary) {
591 throw std::runtime_error(
"all 'my_matrices' should have the same number of " + (my_by_row ? std::string(
"columns") : std::string(
"rows")));
598 my_matrices[sofar] = std::move(current);
600 my_cumulative.push_back(my_cumulative.back() + primary);
605 my_matrices.resize(sofar);
612 my_mapping.reserve(my_cumulative.back());
613 for (
decltype(nmats) i = 0; i < nmats; ++i) {
614 my_mapping.insert(my_mapping.end(), (my_by_row ? my_matrices[i]->nrow() : my_matrices[i]->ncol()), i);
618 for (
const auto& x : my_matrices) {
619 double total =
static_cast<double>(x->nrow()) *
static_cast<double>(x->ncol());
621 my_sparse_prop += total * x->is_sparse_proportion();
622 my_by_row_prop += total * x->prefer_rows_proportion();
625 my_sparse_prop /= denom;
626 my_by_row_prop /= denom;
629 for (
int d = 0; d < 2; ++d) {
630 my_uses_oracle[d] =
false;
631 for (
const auto& x : my_matrices) {
632 if (x->uses_oracle(d)) {
633 my_uses_oracle[d] =
true;
647 DelayedBind(std::vector<std::shared_ptr<const
Matrix<Value_, Index_> > >(matrices.begin(), matrices.end()), by_row) {}
650 std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > my_matrices;
653 Index_ my_otherdim = 0;
654 std::vector<Index_> my_cumulative;
655 std::vector<Index_> my_mapping;
657 double my_sparse_prop = 0, my_by_row_prop = 0;
658 std::array<bool, 2> my_uses_oracle;
663 return my_cumulative.back();
673 return my_cumulative.back();
678 return my_sparse_prop > 0.5;
682 return my_sparse_prop;
686 return my_by_row_prop > 0.5;
690 return my_by_row_prop;
694 return my_uses_oracle[row];
699 using Matrix<Value_, Index_>::sparse;
705 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> >
dense(
bool row,
const Options& opt)
const {
706 if (my_matrices.size() == 1) {
707 return my_matrices[0]->dense(row, opt);
708 }
else if (row == my_by_row) {
709 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, opt);
711 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row,
false, opt);
715 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> >
dense(
bool row, Index_ block_start, Index_ block_length,
const Options& opt)
const {
716 if (my_matrices.size() == 1) {
717 return my_matrices[0]->dense(row, block_start, block_length, opt);
718 }
else if (row == my_by_row) {
719 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, block_start, block_length, opt);
721 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row,
false, block_start, block_length, opt);
726 if (my_matrices.size() == 1) {
727 return my_matrices[0]->dense(row, std::move(indices_ptr), opt);
728 }
else if (row == my_by_row) {
729 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(indices_ptr), opt);
731 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row,
false, std::move(indices_ptr), opt);
739 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
bool row,
const Options& opt)
const {
740 if (my_matrices.size() == 1) {
741 return my_matrices[0]->sparse(row, opt);
742 }
else if (row == my_by_row) {
743 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, opt);
745 return std::make_unique<DelayedBind_internal::ParallelFullSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row,
false, opt);
749 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
bool row, Index_ block_start, Index_ block_length,
const Options& opt)
const {
750 if (my_matrices.size() == 1) {
751 return my_matrices[0]->sparse(row, block_start, block_length, opt);
752 }
else if (row == my_by_row) {
753 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, block_start, block_length, opt);
755 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row,
false, block_start, block_length, opt);
759 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
bool row,
VectorPtr<Index_> indices_ptr,
const Options& opt)
const {
760 if (my_matrices.size() == 1) {
761 return my_matrices[0]->sparse(row, std::move(indices_ptr), opt);
762 }
else if (row == my_by_row) {
763 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(indices_ptr), opt);
765 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<false, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row,
false, std::move(indices_ptr), opt);
773 std::unique_ptr<OracularDenseExtractor<Value_, Index_> >
dense(
bool row, std::shared_ptr<
const Oracle<Index_> > oracle,
const Options& opt)
const {
774 if (my_matrices.size() == 1) {
775 return my_matrices[0]->dense(row, std::move(oracle), opt);
776 }
else if (!my_uses_oracle[row]) {
777 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle),
dense(row, opt));
778 }
else if (row == my_by_row) {
779 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
781 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
785 std::unique_ptr<OracularDenseExtractor<Value_, Index_> >
dense(
bool row, std::shared_ptr<
const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length,
const Options& opt)
const {
786 if (my_matrices.size() == 1) {
787 return my_matrices[0]->dense(row, std::move(oracle), block_start, block_length, opt);
788 }
else if (!my_uses_oracle[row]) {
789 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle),
dense(row, block_start, block_length, opt));
790 }
else if (row == my_by_row) {
791 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
793 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
798 if (my_matrices.size() == 1) {
799 return my_matrices[0]->dense(row, std::move(oracle), std::move(indices_ptr), opt);
800 }
else if (!my_uses_oracle[row]) {
801 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(std::move(oracle),
dense(row, std::move(indices_ptr), opt));
802 }
else if (row == my_by_row) {
803 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
805 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
813 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
bool row, std::shared_ptr<
const Oracle<Index_> > oracle,
const Options& opt)
const {
814 if (my_matrices.size() == 1) {
815 return my_matrices[0]->sparse(row, std::move(oracle), opt);
816 }
else if (!my_uses_oracle[row]) {
817 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, opt));
818 }
else if (row == my_by_row) {
819 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
821 return std::make_unique<DelayedBind_internal::ParallelFullSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), opt);
825 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
bool row, std::shared_ptr<
const Oracle<Index_> > oracle, Index_ block_start, Index_ block_length,
const Options& opt)
const {
826 if (my_matrices.size() == 1) {
827 return my_matrices[0]->sparse(row, std::move(oracle), block_start, block_length, opt);
828 }
else if (!my_uses_oracle[row]) {
829 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, block_start, block_length, opt));
830 }
else if (row == my_by_row) {
831 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
833 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), block_start, block_length, opt);
837 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
bool row, std::shared_ptr<
const Oracle<Index_> > oracle,
VectorPtr<Index_> indices_ptr,
const Options& opt)
const {
838 if (my_matrices.size() == 1) {
839 return my_matrices[0]->sparse(row, std::move(oracle), std::move(indices_ptr), opt);
840 }
else if (!my_uses_oracle[row]) {
841 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(std::move(oracle), sparse(row, std::move(indices_ptr), opt));
842 }
else if (row == my_by_row) {
843 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
845 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<true, Value_, Index_> >(my_cumulative, my_mapping, my_matrices, row, std::move(oracle), std::move(indices_ptr), opt);
854template<
typename Value_,
typename Index_>
855std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > > matrices,
bool row) {
856 return std::shared_ptr<Matrix<Value_, Index_> >(
new DelayedBind<Value_, Index_>(std::move(matrices), row));
859template<
typename Value_,
typename Index_>
860std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices,
bool row) {
861 return std::shared_ptr<Matrix<Value_, Index_> >(
new DelayedBind<Value_, Index_>(std::move(matrices), row));
864template<
int margin_,
typename Value_,
typename Index_>
865std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > > matrices) {
866 return make_DelayedBind(std::move(matrices), margin_ == 0);
869template<
int margin_,
typename Value_,
typename Index_>
870std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices) {
871 return make_DelayedBind(std::move(matrices), margin_ == 0);
Iterate across consecutive elements of the target dimension.
Iterate across a fixed sequence of elements on the target dimension.
Virtual class for a matrix of some numeric type.
Delayed combining of a matrix.
Definition DelayedBind.hpp:563
DelayedBind(const std::vector< std::shared_ptr< Matrix< Value_, Index_ > > > &matrices, bool by_row)
Definition DelayedBind.hpp:646
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:797
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, const Options &opt) const
Definition DelayedBind.hpp:705
double is_sparse_proportion() const
Definition DelayedBind.hpp:681
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:725
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedBind.hpp:773
Index_ nrow() const
Definition DelayedBind.hpp:661
bool is_sparse() const
Definition DelayedBind.hpp:677
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(bool row, std::shared_ptr< const Oracle< Index_ > > oracle, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:785
Index_ ncol() const
Definition DelayedBind.hpp:669
DelayedBind(std::vector< std::shared_ptr< const Matrix< Value_, Index_ > > > matrices, bool by_row)
Definition DelayedBind.hpp:571
bool prefer_rows() const
Definition DelayedBind.hpp:685
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(bool row, Index_ block_start, Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:715
double prefer_rows_proportion() const
Definition DelayedBind.hpp:689
bool uses_oracle(bool row) const
Definition DelayedBind.hpp:693
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:23
Copy data from one buffer to another.
Flexible representations for matrix data.
Definition Extractor.hpp:15
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
Value_ * copy_n(const Value_ *input, Size_ n, Value_ *output)
Definition copy.hpp:25
Options for accessing data from a Matrix instance.
Definition Options.hpp:30