1#ifndef TATAMI_DELAYED_BIND_HPP
2#define TATAMI_DELAYED_BIND_HPP
32namespace DelayedBind_internal {
38template<
typename Index_,
class Initialize_>
39Index_ initialize_parallel_block(
40 const std::vector<Index_>& cumulative,
41 const std::vector<Index_>& mapping,
42 const Index_ block_start,
43 const Index_ block_length,
44 const Initialize_ init
46 if (mapping.empty()) {
50 const Index_ start_index = mapping[block_start];
51 Index_ actual_start = block_start - cumulative[start_index];
52 const Index_ block_end = block_start + block_length;
54 const Index_ nmats = cumulative.size() - 1;
55 for (Index_ index = start_index; index < nmats; ++index) {
56 const Index_ submat_end = cumulative[index + 1];
57 bool not_final = (block_end > submat_end);
58 const Index_ actual_end = (not_final ? submat_end : block_end) - cumulative[index];
59 init(index, actual_start, actual_end - actual_start);
69template<
typename Index_,
class Initialize_>
70void initialize_parallel_index(
71 const std::vector<Index_>& cumulative,
72 const std::vector<Index_>& mapping,
73 const std::vector<Index_>& indices,
74 const Initialize_ init
77 const Index_ il = indices.size();
78 while (counter < il) {
79 const Index_ first_index = indices[counter];
80 const Index_ bind_index = mapping[first_index];
81 const Index_ lower = cumulative[bind_index];
82 const Index_ upper = cumulative[bind_index + 1];
85 auto slice_ptr = std::make_shared<std::vector<Index_> >(1, first_index - lower);
88 while (counter < il && indices[counter] < upper) {
89 slice_ptr->push_back(indices[counter] - lower);
93 init(bind_index, std::move(slice_ptr));
97template<
bool oracle_,
typename Value_,
typename Index_>
98class ParallelDense final :
public DenseExtractor<oracle_, Value_, Index_> {
101 const std::vector<Index_>&,
102 const std::vector<Index_>&,
103 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
105 MaybeOracle<oracle_, Index_> oracle,
108 my_exts.reserve(matrices.size());
109 my_count.reserve(matrices.size());
110 for (
const auto& m : matrices) {
111 my_count.emplace_back(row ? m->ncol() : m->nrow());
112 my_exts.emplace_back(new_extractor<false, oracle_>(m.get(), row, oracle, opt));
117 const std::vector<Index_>& cumulative,
118 const std::vector<Index_>& mapping,
119 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
121 MaybeOracle<oracle_, Index_> oracle,
122 const Index_ block_start,
123 const Index_ block_length,
126 my_exts.reserve(matrices.size());
127 my_count.reserve(matrices.size());
128 initialize_parallel_block(
133 [&](
const Index_ i,
const Index_ sub_block_start,
const Index_ sub_block_length) ->
void {
134 my_count.emplace_back(sub_block_length);
135 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
141 const std::vector<Index_>& cumulative,
142 const std::vector<Index_>& mapping,
143 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
145 MaybeOracle<oracle_, Index_> oracle,
146 VectorPtr<Index_> indices_ptr,
149 my_exts.reserve(matrices.size());
150 my_count.reserve(matrices.size());
151 initialize_parallel_index(
155 [&](
const Index_ i, VectorPtr<Index_> sub_indices_ptr) ->
void {
156 my_count.emplace_back(sub_indices_ptr->size());
157 my_exts.emplace_back(new_extractor<false, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
163 const Value_* fetch(
const Index_ i, Value_*
const buffer) {
165 const Index_ nmats = my_count.size();
166 for (Index_ x = 0; x < nmats; ++x) {
167 const auto ptr = my_exts[x]->fetch(i, copy);
168 const auto num = my_count[x];
176 std::vector<std::unique_ptr<DenseExtractor<oracle_, Value_, Index_> > > my_exts;
177 std::vector<Index_> my_count;
184template<
bool oracle_,
typename Value_,
typename Index_>
185class ParallelFullSparse final :
public SparseExtractor<oracle_, Value_, Index_> {
188 const std::vector<Index_>& cumulative,
189 const std::vector<Index_>&,
190 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
192 MaybeOracle<oracle_, Index_> oracle,
195 my_cumulative(cumulative),
196 my_needs_value(opt.sparse_extract_value),
197 my_needs_index(opt.sparse_extract_index)
199 my_exts.reserve(matrices.size());
200 for (
const auto& m : matrices) {
201 my_exts.emplace_back(new_extractor<true, oracle_>(m.get(), row, oracle, opt));
205 SparseRange<Value_, Index_> fetch(
const Index_ i, Value_*
const value_buffer, Index_*
const index_buffer) {
206 auto vcopy = value_buffer;
207 auto icopy = index_buffer;
208 Index_ accumulated = 0;
210 const Index_ nmats = my_exts.size();
211 for (Index_ x = 0; x < nmats; ++x) {
212 const auto range = my_exts[x]->fetch(i, vcopy, icopy);
213 accumulated += range.number;
214 if (my_needs_value) {
215 copy_n(range.value, range.number, vcopy);
216 vcopy += range.number;
218 if (my_needs_index) {
219 const auto offset = my_cumulative[x];
220 for (Index_ y = 0; y < range.number; ++y) {
221 icopy[y] = range.index[y] + offset;
223 icopy += range.number;
227 return SparseRange<Value_, Index_>(accumulated, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
231 const std::vector<Index_>& my_cumulative;
232 bool my_needs_value, my_needs_index;
233 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
236template<
bool oracle_,
typename Value_,
typename Index_>
237class ParallelBlockSparse final :
public SparseExtractor<oracle_, Value_, Index_> {
240 const std::vector<Index_>& cumulative,
241 const std::vector<Index_>& mapping,
242 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
244 MaybeOracle<oracle_, Index_> oracle,
245 const Index_ block_start,
246 const Index_ block_length,
249 my_cumulative(cumulative),
250 my_needs_value(opt.sparse_extract_value),
251 my_needs_index(opt.sparse_extract_index)
253 my_exts.reserve(matrices.size());
254 my_start_matrix = initialize_parallel_block(
259 [&](
const Index_ i,
const Index_ sub_block_start,
const Index_ sub_block_length) ->
void {
260 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, sub_block_start, sub_block_length, opt));
265 SparseRange<Value_, Index_> fetch(
const Index_ i, Value_*
const value_buffer, Index_*
const index_buffer) {
266 auto vcopy = value_buffer;
267 auto icopy = index_buffer;
270 const Index_ nmats = my_exts.size();
271 for (Index_ x = 0; x < nmats; ++x) {
272 const auto range = my_exts[x]->fetch(i, vcopy, icopy);
273 count += range.number;
274 if (my_needs_value) {
275 copy_n(range.value, range.number, vcopy);
276 vcopy += range.number;
278 if (my_needs_index) {
279 const Index_ offset = my_cumulative[x + my_start_matrix];
280 for (Index_ y = 0; y < range.number; ++y) {
281 icopy[y] = range.index[y] + offset;
283 icopy += range.number;
287 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
291 const std::vector<Index_>& my_cumulative;
292 bool my_needs_value, my_needs_index;
293 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
294 Index_ my_start_matrix;
297template<
bool oracle_,
typename Value_,
typename Index_>
298class ParallelIndexSparse final :
public SparseExtractor<oracle_, Value_, Index_> {
301 const std::vector<Index_>& cumulative,
302 const std::vector<Index_>& mapping,
303 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
305 MaybeOracle<oracle_, Index_> oracle,
306 VectorPtr<Index_> indices_ptr,
309 my_cumulative(cumulative),
310 my_needs_value(opt.sparse_extract_value),
311 my_needs_index(opt.sparse_extract_index)
313 my_exts.reserve(matrices.size());
314 my_which_matrix.reserve(matrices.size());
315 initialize_parallel_index(
319 [&](
const Index_ i, VectorPtr<Index_> sub_indices_ptr) ->
void {
320 my_which_matrix.emplace_back(i);
321 my_exts.emplace_back(new_extractor<true, oracle_>(matrices[i].get(), row, oracle, std::move(sub_indices_ptr), opt));
326 SparseRange<Value_, Index_> fetch(
const Index_ i, Value_*
const value_buffer, Index_*
const index_buffer) {
327 auto vcopy = value_buffer;
328 auto icopy = index_buffer;
331 const Index_ nmats = my_which_matrix.size();
332 for (Index_ x = 0; x < nmats; ++x) {
333 const auto range = my_exts[x]->fetch(i, vcopy, icopy);
334 count += range.number;
335 if (my_needs_value) {
336 copy_n(range.value, range.number, vcopy);
337 vcopy += range.number;
340 if (my_needs_index) {
341 const Index_ offset = my_cumulative[my_which_matrix[x]];
342 for (Index_ y = 0; y < range.number; ++y) {
343 icopy[y] = range.index[y] + offset;
345 icopy += range.number;
349 return SparseRange<Value_, Index_>(count, (my_needs_value ? value_buffer : NULL), (my_needs_index ? index_buffer : NULL));
353 const std::vector<Index_>& my_cumulative;
354 bool my_needs_value, my_needs_index;
355 std::vector<std::unique_ptr<SparseExtractor<oracle_, Value_, Index_> > > my_exts;
356 std::vector<Index_> my_which_matrix;
363template<
typename Value_,
typename Index_>
364class MyopicPerpendicularDense final :
public MyopicDenseExtractor<Value_, Index_> {
366 template<
typename ... Args_>
367 MyopicPerpendicularDense(
368 const std::vector<Index_>& cumulative,
369 const std::vector<Index_>& mapping,
370 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
372 const Args_& ... args
374 my_cumulative(cumulative),
377 my_exts.reserve(matrices.size());
378 for (
const auto& m : matrices) {
379 my_exts.emplace_back(m->dense(row, args...));
383 const Value_* fetch(
const Index_ i, Value_*
const buffer) {
384 const Index_ chosen = my_mapping[i];
385 return my_exts[chosen]->fetch(i - my_cumulative[chosen], buffer);
389 const std::vector<Index_>& my_cumulative;
390 const std::vector<Index_>& my_mapping;
391 std::vector<std::unique_ptr<MyopicDenseExtractor<Value_, Index_> > > my_exts;
394template<
typename Value_,
typename Index_>
395class MyopicPerpendicularSparse final :
public MyopicSparseExtractor<Value_, Index_> {
397 template<
typename ... Args_>
398 MyopicPerpendicularSparse(
399 const std::vector<Index_>& cumulative,
400 const std::vector<Index_>& mapping,
401 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
403 const Args_& ... args
405 my_cumulative(cumulative),
408 my_exts.reserve(matrices.size());
409 for (
const auto& m : matrices) {
410 my_exts.emplace_back(m->sparse(row, args...));
414 SparseRange<Value_, Index_> fetch(
const Index_ i, Value_*
const vbuffer, Index_*
const ibuffer) {
415 const Index_ chosen = my_mapping[i];
416 return my_exts[chosen]->fetch(i - my_cumulative[chosen], vbuffer, ibuffer);
420 const std::vector<Index_>& my_cumulative;
421 const std::vector<Index_>& my_mapping;
422 std::vector<std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > > my_exts;
425template<
typename Index_,
class Initialize_>
426void initialize_perp_oracular(
427 const std::vector<Index_>& cumulative,
428 const std::vector<Index_>& mapping,
429 const Oracle<Index_>& oracle,
430 std::vector<Index_>& chosen,
433 const auto ntotal = oracle.total();
434 chosen.reserve(ntotal);
437 bool consecutive =
true;
440 std::vector<Index_> predictions;
442 void add(
const Index_ p) {
449 if (number + start == p) {
455 std::iota(predictions.begin(), predictions.end(), start);
458 predictions.push_back(p);
462 const auto nmats = cumulative.size() - 1;
463 auto predictions = create_container_of_Index_size<std::vector<Predictions> >(nmats);
464 for (I<
decltype(ntotal)> i = 0; i < ntotal; ++i) {
465 const auto prediction = oracle.get(i);
466 const Index_ choice = mapping[prediction];
467 chosen.push_back(choice);
468 predictions[choice].add(prediction - cumulative[choice]);
471 for (I<
decltype(nmats)> x = 0; x < nmats; ++x) {
472 auto& current = predictions[x];
473 if (current.consecutive) {
474 if (current.number) {
475 init(x, std::make_shared<ConsecutiveOracle<Index_> >(current.start, current.number));
478 if (!current.predictions.empty()) {
479 init(x, std::make_shared<FixedVectorOracle<Index_> >(std::move(current.predictions)));
485template<
typename Value_,
typename Index_>
486class OracularPerpendicularDense final :
public OracularDenseExtractor<Value_, Index_> {
488 template<
typename ... Args_>
489 OracularPerpendicularDense(
490 const std::vector<Index_>& cumulative,
491 const std::vector<Index_>& mapping,
492 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
494 std::shared_ptr<
const Oracle<Index_> > ora,
495 const Args_& ... args
498 initialize_perp_oracular(
503 [&](
const Index_ x, std::shared_ptr<
const Oracle<Index_> > subora) ->
void {
504 my_exts[x] = matrices[x]->dense(row, std::move(subora), args...);
509 const Value_* fetch(
const Index_ i, Value_*
const buffer) {
510 const auto chosen = my_segments[my_used];
511 const auto output = my_exts[chosen]->fetch(i, buffer);
517 std::vector<Index_> my_segments;
518 std::vector<std::unique_ptr<OracularDenseExtractor<Value_, Index_> > > my_exts;
522template<
typename Value_,
typename Index_>
523class OracularPerpendicularSparse final :
public OracularSparseExtractor<Value_, Index_> {
525 template<
typename ... Args_>
526 OracularPerpendicularSparse(
527 const std::vector<Index_>& cumulative,
528 const std::vector<Index_>& mapping,
529 const std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > >& matrices,
531 std::shared_ptr<
const Oracle<Index_> > ora,
532 const Args_& ... args
535 initialize_perp_oracular(
540 [&](
const Index_ x, std::shared_ptr<
const Oracle<Index_> > subora) ->
void {
541 my_exts[x] = matrices[x]->sparse(row, std::move(subora), args...);
546 SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
547 const auto chosen = my_segments[my_used];
548 const auto output = my_exts[chosen]->fetch(i, vbuffer, ibuffer);
554 std::vector<Index_> my_segments;
555 std::vector<std::unique_ptr<OracularSparseExtractor<Value_, Index_> > > my_exts;
573template<
typename Value_,
typename Index_>
583 my_matrices(std::move(matrices)), my_by_row(by_row)
585 auto nmats = my_matrices.size();
586 my_cumulative.reserve(sanisizer::sum<I<
decltype(my_cumulative.size())> >(nmats, 1));
587 I<
decltype(nmats)> sofar = 0;
588 my_cumulative.push_back(0);
590 for (I<
decltype(nmats)> i = 0; i < nmats; ++i) {
591 auto& current = my_matrices[i];
592 Index_ primary, secondary;
594 primary = current->nrow();
595 secondary = current->ncol();
597 primary = current->ncol();
598 secondary = current->nrow();
602 my_otherdim = secondary;
603 }
else if (my_otherdim != secondary) {
604 throw std::runtime_error(
"all 'matrices' should have the same number of " + (my_by_row ? std::string(
"columns") : std::string(
"rows")));
611 my_matrices[sofar] = std::move(current);
613 my_cumulative.push_back(sanisizer::sum<Index_>(attest_for_Index(my_cumulative.back()), attest_for_Index(primary)));
618 my_matrices.resize(sofar);
625 my_mapping.reserve(my_cumulative.back());
626 for (I<
decltype(nmats)> i = 0; i < nmats; ++i) {
627 my_mapping.insert(my_mapping.end(), (my_by_row ? my_matrices[i]->nrow() : my_matrices[i]->ncol()), i);
631 for (
const auto& x : my_matrices) {
632 const double total =
static_cast<double>(x->nrow()) *
static_cast<double>(x->ncol());
634 my_sparse_prop += total * x->is_sparse_proportion();
635 my_by_row_prop += total * x->prefer_rows_proportion();
638 my_sparse_prop /= denom;
639 my_by_row_prop /= denom;
642 for (
int d = 0; d < 2; ++d) {
643 my_uses_oracle[d] =
false;
644 for (
const auto& x : my_matrices) {
645 if (x->uses_oracle(d)) {
646 my_uses_oracle[d] =
true;
658 DelayedBind(std::vector<std::shared_ptr<const
Matrix<Value_, Index_> > >(matrices.begin(), matrices.end()), by_row) {}
664 std::vector<std::shared_ptr<const Matrix<Value_, Index_> > > my_matrices;
667 Index_ my_otherdim = 0;
668 std::vector<Index_> my_cumulative;
669 std::vector<Index_> my_mapping;
671 double my_sparse_prop = 0, my_by_row_prop = 0;
672 std::array<bool, 2> my_uses_oracle;
677 return my_cumulative.back();
687 return my_cumulative.back();
692 return my_sparse_prop > 0.5;
696 return my_sparse_prop;
700 return my_by_row_prop > 0.5;
704 return my_by_row_prop;
708 return my_uses_oracle[row];
713 using Matrix<Value_, Index_>::sparse;
719 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> >
dense(
723 if (my_matrices.size() == 1) {
724 return my_matrices[0]->dense(row, opt);
725 }
else if (row == my_by_row) {
726 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(
734 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(
745 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> >
dense(
747 const Index_ block_start,
748 const Index_ block_length,
751 if (my_matrices.size() == 1) {
752 return my_matrices[0]->dense(row, block_start, block_length, opt);
753 }
else if (row == my_by_row) {
754 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(
764 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(
777 std::unique_ptr<MyopicDenseExtractor<Value_, Index_> >
dense(
782 if (my_matrices.size() == 1) {
783 return my_matrices[0]->dense(row, std::move(indices_ptr), opt);
784 }
else if (row == my_by_row) {
785 return std::make_unique<DelayedBind_internal::MyopicPerpendicularDense<Value_, Index_> >(
790 std::move(indices_ptr),
794 return std::make_unique<DelayedBind_internal::ParallelDense<false, Value_, Index_> >(
800 std::move(indices_ptr),
810 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
814 if (my_matrices.size() == 1) {
815 return my_matrices[0]->sparse(row, opt);
816 }
else if (row == my_by_row) {
817 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(
825 return std::make_unique<DelayedBind_internal::ParallelFullSparse<false, Value_, Index_> >(
836 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
838 const Index_ block_start,
839 const Index_ block_length,
842 if (my_matrices.size() == 1) {
843 return my_matrices[0]->sparse(row, block_start, block_length, opt);
844 }
else if (row == my_by_row) {
845 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(
855 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<false, Value_, Index_> >(
868 std::unique_ptr<MyopicSparseExtractor<Value_, Index_> > sparse(
873 if (my_matrices.size() == 1) {
874 return my_matrices[0]->sparse(row, std::move(indices_ptr), opt);
875 }
else if (row == my_by_row) {
876 return std::make_unique<DelayedBind_internal::MyopicPerpendicularSparse<Value_, Index_> >(
881 std::move(indices_ptr),
885 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<false, Value_, Index_> >(
891 std::move(indices_ptr),
901 std::unique_ptr<OracularDenseExtractor<Value_, Index_> >
dense(
906 if (my_matrices.size() == 1) {
907 return my_matrices[0]->dense(row, std::move(oracle), opt);
908 }
else if (!my_uses_oracle[row]) {
909 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(
913 }
else if (row == my_by_row) {
914 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(
923 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(
934 std::unique_ptr<OracularDenseExtractor<Value_, Index_> >
dense(
937 const Index_ block_start,
938 const Index_ block_length,
941 if (my_matrices.size() == 1) {
942 return my_matrices[0]->dense(row, std::move(oracle), block_start, block_length, opt);
943 }
else if (!my_uses_oracle[row]) {
944 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(
946 dense(row, block_start, block_length, opt)
948 }
else if (row == my_by_row) {
949 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(
960 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(
973 std::unique_ptr<OracularDenseExtractor<Value_, Index_> >
dense(
979 if (my_matrices.size() == 1) {
980 return my_matrices[0]->dense(row, std::move(oracle), std::move(indices_ptr), opt);
981 }
else if (!my_uses_oracle[row]) {
982 return std::make_unique<PseudoOracularDenseExtractor<Value_, Index_> >(
984 dense(row, std::move(indices_ptr), opt)
986 }
else if (row == my_by_row) {
987 return std::make_unique<DelayedBind_internal::OracularPerpendicularDense<Value_, Index_> >(
993 std::move(indices_ptr),
997 return std::make_unique<DelayedBind_internal::ParallelDense<true, Value_, Index_> >(
1003 std::move(indices_ptr),
1013 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
1018 if (my_matrices.size() == 1) {
1019 return my_matrices[0]->sparse(row, std::move(oracle), opt);
1020 }
else if (!my_uses_oracle[row]) {
1021 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(
1025 }
else if (row == my_by_row) {
1026 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(
1035 return std::make_unique<DelayedBind_internal::ParallelFullSparse<true, Value_, Index_> >(
1046 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
1048 std::shared_ptr<
const Oracle<Index_> > oracle,
1049 const Index_ block_start,
1050 const Index_ block_length,
1053 if (my_matrices.size() == 1) {
1054 return my_matrices[0]->sparse(row, std::move(oracle), block_start, block_length, opt);
1055 }
else if (!my_uses_oracle[row]) {
1056 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(
1058 sparse(row, block_start, block_length, opt)
1060 }
else if (row == my_by_row) {
1061 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(
1072 return std::make_unique<DelayedBind_internal::ParallelBlockSparse<true, Value_, Index_> >(
1085 std::unique_ptr<OracularSparseExtractor<Value_, Index_> > sparse(
1087 std::shared_ptr<
const Oracle<Index_> > oracle,
1091 if (my_matrices.size() == 1) {
1092 return my_matrices[0]->sparse(row, std::move(oracle), std::move(indices_ptr), opt);
1093 }
else if (!my_uses_oracle[row]) {
1094 return std::make_unique<PseudoOracularSparseExtractor<Value_, Index_> >(
1096 sparse(row, std::move(indices_ptr), opt)
1098 }
else if (row == my_by_row) {
1099 return std::make_unique<DelayedBind_internal::OracularPerpendicularSparse<Value_, Index_> >(
1105 std::move(indices_ptr),
1109 return std::make_unique<DelayedBind_internal::ParallelIndexSparse<true, Value_, Index_> >(
1115 std::move(indices_ptr),
1126template<
typename Value_,
typename Index_>
1127std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > > matrices,
bool row) {
1128 return std::shared_ptr<Matrix<Value_, Index_> >(
new DelayedBind<Value_, Index_>(std::move(matrices), row));
1131template<
typename Value_,
typename Index_>
1132std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices,
bool row) {
1133 return std::shared_ptr<Matrix<Value_, Index_> >(
new DelayedBind<Value_, Index_>(std::move(matrices), row));
1136template<
int margin_,
typename Value_,
typename Index_>
1137std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<
const Matrix<Value_, Index_> > > matrices) {
1138 return make_DelayedBind(std::move(matrices), margin_ == 0);
1141template<
int margin_,
typename Value_,
typename Index_>
1142std::shared_ptr<Matrix<Value_, Index_> > make_DelayedBind(std::vector<std::shared_ptr<Matrix<Value_, Index_> > > matrices) {
1143 return make_DelayedBind(std::move(matrices), margin_ == 0);
Iterate across consecutive elements of the target dimension.
Iterate across a fixed sequence of elements on the target dimension.
Convert index type to container size.
Virtual class for a matrix of some numeric type.
Delayed combining of a matrix.
Definition DelayedBind.hpp:574
bool uses_oracle(const bool row) const
Definition DelayedBind.hpp:707
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:973
DelayedBind(std::vector< std::shared_ptr< const Matrix< Value_, Index_ > > > matrices, const bool by_row)
Definition DelayedBind.hpp:582
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Options &opt) const
Definition DelayedBind.hpp:719
double is_sparse_proportion() const
Definition DelayedBind.hpp:695
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:745
Index_ nrow() const
Definition DelayedBind.hpp:675
std::unique_ptr< MyopicDenseExtractor< Value_, Index_ > > dense(const bool row, VectorPtr< Index_ > indices_ptr, const Options &opt) const
Definition DelayedBind.hpp:777
bool is_sparse() const
Definition DelayedBind.hpp:691
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Options &opt) const
Definition DelayedBind.hpp:901
Index_ ncol() const
Definition DelayedBind.hpp:683
bool prefer_rows() const
Definition DelayedBind.hpp:699
double prefer_rows_proportion() const
Definition DelayedBind.hpp:703
std::unique_ptr< OracularDenseExtractor< Value_, Index_ > > dense(const bool row, std::shared_ptr< const Oracle< Index_ > > oracle, const Index_ block_start, const Index_ block_length, const Options &opt) const
Definition DelayedBind.hpp:934
Virtual class for a matrix.
Definition Matrix.hpp:59
Predict future access requests on the target dimension.
Definition Oracle.hpp:29
Copy data from one buffer to another.
Flexible representations for matrix data.
Definition Extractor.hpp:15
std::shared_ptr< const std::vector< Index_ > > VectorPtr
Definition Matrix.hpp:26
void resize_container_to_Index_size(Container_ &container, const Index_ x, Args_ &&... args)
Definition Index_to_container.hpp:99
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
Definition Extractor.hpp:273
Value_ * copy_n(const Value_ *const input, const Size_ n, Value_ *const output)
Definition copy.hpp:37
std::size_t PredictionIndex
Definition Oracle.hpp:18
Options for accessing data from a Matrix instance.
Definition Options.hpp:30