tatami_chunked
Helpers to create custom chunked tatami matrices
Loading...
Searching...
No Matches
custom_internals.hpp
1#ifndef TATAMI_CHUNKED_CUSTOM_CHUNK_COORDINATOR_HPP
2#define TATAMI_CHUNKED_CUSTOM_CHUNK_COORDINATOR_HPP
3
4#include "tatami/tatami.hpp"
9#include "ChunkDimensionStats.hpp"
10
11#include <vector>
12#include <type_traits>
13#include <algorithm>
14#include <tuple>
15
16#include "sanisizer/sanisizer.hpp"
17
18namespace tatami_chunked {
19
20namespace CustomChunkedMatrix_internal {
21
22/******************
23 *** Workspaces ***
24 ******************/
25
26template<typename ChunkValue_>
27using DenseSingleWorkspace = std::vector<ChunkValue_>;
28
29template<typename ChunkValue_, typename Index_>
30class SparseSingleWorkspace {
31public:
32 SparseSingleWorkspace(Index_ target_chunkdim, Index_ non_target_chunkdim, bool needs_value, bool needs_index) : my_number(target_chunkdim) {
33 if (needs_value) {
34 my_value_pool.resize(sanisizer::product<decltype(my_value_pool.size())>(target_chunkdim, non_target_chunkdim));
35 my_values.reserve(target_chunkdim);
36 auto vptr = my_value_pool.data();
37 for (Index_ p = 0; p < target_chunkdim; ++p, vptr += non_target_chunkdim) {
38 my_values.push_back(vptr);
39 }
40 }
41 if (needs_index) {
42 my_index_pool.resize(sanisizer::product<decltype(my_index_pool.size())>(target_chunkdim, non_target_chunkdim));
43 my_indices.reserve(target_chunkdim);
44 auto iptr = my_index_pool.data();
45 for (Index_ p = 0; p < target_chunkdim; ++p, iptr += non_target_chunkdim) {
46 my_indices.push_back(iptr);
47 }
48 }
49 }
50
51 // Delete the copy constructors as we're passing out pointers.
52 SparseSingleWorkspace(const SparseSingleWorkspace&) = delete;
53 SparseSingleWorkspace& operator=(const SparseSingleWorkspace&) = delete;
54
55 // Move constructors are okay though.
56 SparseSingleWorkspace(SparseSingleWorkspace&&) = default;
57 SparseSingleWorkspace& operator=(SparseSingleWorkspace&&) = default;
58
59private:
60 std::vector<ChunkValue_> my_value_pool;
61 std::vector<Index_> my_index_pool;
62 std::vector<ChunkValue_*> my_values;
63 std::vector<Index_*> my_indices;
64 std::vector<Index_> my_number;
65
66public:
67 std::vector<ChunkValue_*>& get_values() {
68 return my_values;
69 }
70
71 std::vector<Index_*>& get_indices() {
72 return my_indices;
73 }
74
75 std::vector<Index_>& get_number() {
76 return my_number;
77 }
78};
79
80/*******************
81 *** Coordinator ***
82 *******************/
83
84template<bool sparse_, class ChunkValue_, typename Index_>
85class ChunkCoordinator {
86public:
87 ChunkCoordinator(ChunkDimensionStats<Index_> row_stats, ChunkDimensionStats<Index_> col_stats) :
88 my_row_stats(std::move(row_stats)),
89 my_col_stats(std::move(col_stats))
90 {}
91
92private:
93 ChunkDimensionStats<Index_> my_row_stats;
94 ChunkDimensionStats<Index_> my_col_stats;
95
96public:
97 // Number of chunks along the rows is equal to the number of chunks for
98 // each column, and vice versa; hence the flipped definitions.
99 Index_ get_num_chunks_per_row() const {
100 return my_col_stats.num_chunks;
101 }
102
103 Index_ get_num_chunks_per_column() const {
104 return my_row_stats.num_chunks;
105 }
106
107 Index_ get_nrow() const {
108 return my_row_stats.dimension_extent;
109 }
110
111 Index_ get_ncol() const {
112 return my_col_stats.dimension_extent;
113 }
114
115 Index_ get_chunk_nrow() const {
116 return my_row_stats.chunk_length;
117 }
118
119 Index_ get_chunk_ncol() const {
120 return my_col_stats.chunk_length;
121 }
122
123public:
124 Index_ get_non_target_dim(bool row) const {
125 if (row) {
126 return my_col_stats.dimension_extent;
127 } else {
128 return my_row_stats.dimension_extent;
129 }
130 }
131
132 Index_ get_target_chunkdim(bool row) const {
133 if (row) {
134 return my_row_stats.chunk_length;
135 } else {
136 return my_col_stats.chunk_length;
137 }
138 }
139
140 Index_ get_non_target_chunkdim(bool row) const {
141 if (row) {
142 return my_col_stats.chunk_length;
143 } else {
144 return my_row_stats.chunk_length;
145 }
146 }
147
148 // Overload that handles the truncated chunk at the bottom/right edges of each matrix.
149 Index_ get_target_chunkdim(bool row, Index_ chunk_id) const {
150 return get_chunk_length(row ? my_row_stats : my_col_stats, chunk_id);
151 }
152
153private:
154 template<class ExtractFunction_>
155 void extract_non_target_block(
156 bool row,
157 Index_ target_chunk_id,
158 Index_ non_target_block_start,
159 Index_ non_target_block_length,
160 ExtractFunction_ extract)
161 const {
162 auto non_target_chunkdim = get_non_target_chunkdim(row);
163 Index_ non_target_start_chunk_index = non_target_block_start / non_target_chunkdim;
164 Index_ non_target_start_pos = non_target_start_chunk_index * non_target_chunkdim;
165 Index_ non_target_block_end = non_target_block_start + non_target_block_length;
166 Index_ non_target_end_chunk_index = non_target_block_end / non_target_chunkdim + (non_target_block_end % non_target_chunkdim > 0); // i.e., integer ceiling.
167
168 for (Index_ non_target_chunk_id = non_target_start_chunk_index; non_target_chunk_id < non_target_end_chunk_index; ++non_target_chunk_id) {
169 Index_ from = (non_target_chunk_id == non_target_start_chunk_index ? non_target_block_start - non_target_start_pos : 0);
170 Index_ to = (non_target_chunk_id + 1 == non_target_end_chunk_index ? non_target_block_end - non_target_start_pos : non_target_chunkdim);
171 Index_ len = to - from;
172
173 auto row_id = (row ? target_chunk_id : non_target_chunk_id);
174 auto col_id = (row ? non_target_chunk_id : target_chunk_id);
175
176 // No need to protect against a zero length, as it should be impossible
177 // here (otherwise, start_chunk_index == end_chunk_index and we'd never iterate).
178 if constexpr(sparse_) {
179 extract(row_id, col_id, from, len, non_target_start_pos);
180 } else {
181 extract(row_id, col_id, from, len);
182 }
183
184 // yes, this is deliberate; '+ to' means that either we add 'non_target_chunkdim' or set it to 'non_target_block_end', the latter of which avoids overflow.
185 non_target_start_pos += to;
186 }
187 }
188
189 template<class ExtractFunction_>
190 void extract_non_target_index(
191 bool row,
192 Index_ target_chunk_id,
193 const std::vector<Index_>& non_target_indices,
194 std::vector<Index_>& chunk_indices_buffer,
195 ExtractFunction_ extract)
196 const {
197 auto non_target_chunkdim = get_non_target_chunkdim(row);
198 auto non_target_dim = get_non_target_dim(row);
199 auto iIt = non_target_indices.begin();
200 auto iEnd = non_target_indices.end();
201
202 while (iIt != iEnd) {
203 Index_ non_target_chunk_id = *iIt / non_target_chunkdim;
204 Index_ non_target_start_pos = non_target_chunk_id * non_target_chunkdim;
205 Index_ non_target_end_pos = std::min(non_target_dim - non_target_start_pos, non_target_chunkdim) + non_target_start_pos; // this convoluted method avoids overflow.
206
207 chunk_indices_buffer.clear();
208 do {
209 chunk_indices_buffer.push_back(*iIt - non_target_start_pos);
210 ++iIt;
211 } while (iIt != iEnd && *iIt < non_target_end_pos);
212
213 auto row_id = (row ? target_chunk_id : non_target_chunk_id);
214 auto col_id = (row ? non_target_chunk_id : target_chunk_id);
215 if constexpr(sparse_) {
216 extract(row_id, col_id, chunk_indices_buffer, non_target_start_pos);
217 } else {
218 extract(row_id, col_id, chunk_indices_buffer);
219 }
220 }
221 }
222
223 typedef typename std::conditional<sparse_, typename SparseSlabFactory<ChunkValue_, Index_>::Slab, typename DenseSlabFactory<ChunkValue_>::Slab>::type Slab;
224 typedef typename std::conditional<sparse_, SparseSingleWorkspace<ChunkValue_, Index_>, DenseSingleWorkspace<ChunkValue_> >::type SingleWorkspace;
225
226public:
227 // Extract a single element of the target dimension, using a contiguous
228 // block on the non_target dimension.
229 template<class ChunkWorkspace_>
230 std::pair<const Slab*, Index_> fetch_single(
231 bool row,
232 Index_ i,
233 Index_ non_target_block_start,
234 Index_ non_target_block_length,
235 ChunkWorkspace_& chunk_workspace,
236 SingleWorkspace& tmp_work,
237 Slab& final_slab)
238 const {
239 Index_ target_chunkdim = get_target_chunkdim(row);
240 Index_ target_chunk_id = i / target_chunkdim;
241 Index_ target_chunk_offset = i % target_chunkdim;
242
243 if constexpr(sparse_) {
244 auto& final_num = *final_slab.number;
245 final_num = 0;
246 bool needs_value = !final_slab.values.empty();
247 bool needs_index = !final_slab.indices.empty();
248
249 extract_non_target_block(
250 row,
251 target_chunk_id,
252 non_target_block_start,
253 non_target_block_length,
254 [&](Index_ row_id, Index_ column_id, Index_ from, Index_ len, Index_ non_target_start_pos) -> void {
255 auto& tmp_values = tmp_work.get_values();
256 auto& tmp_indices = tmp_work.get_indices();
257 auto& tmp_number = tmp_work.get_number();
258
259 tmp_number[target_chunk_offset] = 0;
260 chunk_workspace.extract(
261 row_id,
262 column_id,
263 row,
264 target_chunk_offset,
265 static_cast<Index_>(1),
266 from,
267 len,
268 tmp_values,
269 tmp_indices,
270 tmp_number.data(),
271 non_target_start_pos
272 );
273
274 auto count = tmp_number[target_chunk_offset];
275 if (needs_value) {
276 std::copy_n(tmp_values[target_chunk_offset], count, final_slab.values[0] + final_num);
277 }
278 if (needs_index) {
279 std::copy_n(tmp_indices[target_chunk_offset], count, final_slab.indices[0] + final_num);
280 }
281 final_num += count;
282 }
283 );
284
285 } else {
286 auto final_slab_ptr = final_slab.data;
287 auto tmp_buffer_ptr = tmp_work.data();
288 typedef decltype(tmp_work.size()) Size;
289
290 extract_non_target_block(
291 row,
292 target_chunk_id,
293 non_target_block_start,
294 non_target_block_length,
295 [&](Index_ row_id, Index_ column_id, Index_ from, Index_ len) -> void {
296
297 chunk_workspace.extract(
298 row_id,
299 column_id,
300 row,
301 target_chunk_offset,
302 static_cast<Index_>(1),
303 from,
304 len,
305 tmp_buffer_ptr,
306 len
307 );
308
309 Size tmp_offset = sanisizer::product_unsafe<Size>(len, target_chunk_offset);
310 std::copy_n(tmp_buffer_ptr + tmp_offset, len, final_slab_ptr);
311 final_slab_ptr += len;
312 }
313 );
314 }
315
316 return std::make_pair(&final_slab, static_cast<Index_>(0));
317 }
318
319 // Extract a single element of the target dimension, using an indexed
320 // subset on the non_target dimension.
321 template<class ChunkWorkspace_>
322 std::pair<const Slab*, Index_> fetch_single(
323 bool row,
324 Index_ i,
325 const std::vector<Index_>& non_target_indices,
326 std::vector<Index_>& chunk_indices_buffer,
327 ChunkWorkspace_& chunk_workspace,
328 SingleWorkspace& tmp_work,
329 Slab& final_slab)
330 const {
331 Index_ target_chunkdim = get_target_chunkdim(row);
332 Index_ target_chunk_id = i / target_chunkdim;
333 Index_ target_chunk_offset = i % target_chunkdim;
334
335 if constexpr(sparse_) {
336 auto& final_num = *final_slab.number;
337 final_num = 0;
338 bool needs_value = !final_slab.values.empty();
339 bool needs_index = !final_slab.indices.empty();
340
341 extract_non_target_index(
342 row,
343 target_chunk_id,
344 non_target_indices,
345 chunk_indices_buffer,
346 [&](Index_ row_id, Index_ column_id, const std::vector<Index_>& chunk_indices, Index_ non_target_start_pos) -> void {
347 auto& tmp_values = tmp_work.get_values();
348 auto& tmp_indices = tmp_work.get_indices();
349 auto& tmp_number = tmp_work.get_number();
350
351 tmp_number[target_chunk_offset] = 0;
352 chunk_workspace.extract(
353 row_id,
354 column_id,
355 row,
356 target_chunk_offset,
357 static_cast<Index_>(1),
358 chunk_indices,
359 tmp_values,
360 tmp_indices,
361 tmp_number.data(),
362 non_target_start_pos
363 );
364
365 auto count = tmp_number[target_chunk_offset];
366 if (needs_value) {
367 std::copy_n(tmp_values[target_chunk_offset], count, final_slab.values[0] + final_num);
368 }
369 if (needs_index) {
370 std::copy_n(tmp_indices[target_chunk_offset], count, final_slab.indices[0] + final_num);
371 }
372 final_num += count;
373 }
374 );
375
376 } else {
377 auto final_slab_ptr = final_slab.data;
378 auto tmp_buffer_ptr = tmp_work.data();
379 typedef decltype(tmp_work.size()) Size;
380
381 extract_non_target_index(
382 row,
383 target_chunk_id,
384 non_target_indices,
385 chunk_indices_buffer,
386 [&](Index_ row_id, Index_ column_id, const std::vector<Index_>& chunk_indices) -> void {
387 auto nidx = chunk_indices.size();
388 chunk_workspace.extract(
389 row_id,
390 column_id,
391 row,
392 target_chunk_offset,
393 static_cast<Index_>(1),
394 chunk_indices,
395 tmp_buffer_ptr,
396 nidx
397 );
398
399 Size tmp_offset = static_cast<Size>(nidx) * static_cast<Size>(target_chunk_offset);
400 std::copy_n(tmp_buffer_ptr + tmp_offset, nidx, final_slab_ptr);
401 final_slab_ptr += nidx;
402 }
403 );
404 }
405
406 return std::make_pair(&final_slab, static_cast<Index_>(0));
407 }
408
409private:
410 // Extract a contiguous block of the target dimension, using a contiguous block on the non_target dimension.
411 template<class ChunkWorkspace_>
412 void fetch_block(
413 bool row,
414 Index_ target_chunk_id,
415 Index_ target_chunk_offset,
416 Index_ target_chunk_length,
417 Index_ non_target_block_start,
418 Index_ non_target_block_length,
419 Slab& slab,
420 ChunkWorkspace_& chunk_workspace)
421 const {
422 if constexpr(sparse_) {
423 std::fill_n(slab.number, get_target_chunkdim(row), 0);
424
425 extract_non_target_block(
426 row,
427 target_chunk_id,
428 non_target_block_start,
429 non_target_block_length,
430 [&](Index_ row_id, Index_ column_id, Index_ from, Index_ len, Index_ non_target_start_pos) -> void {
431 chunk_workspace.extract(
432 row_id,
433 column_id,
434 row,
435 target_chunk_offset,
436 target_chunk_length,
437 from,
438 len,
439 slab.values,
440 slab.indices,
441 slab.number,
442 non_target_start_pos
443 );
444 }
445 );
446
447 } else {
448 auto slab_ptr = slab.data;
449
450 extract_non_target_block(
451 row,
452 target_chunk_id,
453 non_target_block_start,
454 non_target_block_length,
455 [&](Index_ row_id, Index_ column_id, Index_ from, Index_ len) -> void {
456 chunk_workspace.extract(
457 row_id,
458 column_id,
459 row,
460 target_chunk_offset,
461 target_chunk_length,
462 from,
463 len,
464 slab_ptr,
465 non_target_block_length
466 );
467 slab_ptr += len;
468 }
469 );
470 }
471 }
472
473 // Extract a contiguous block of the target dimension, using an indexed subset on the non_target dimension.
474 template<class ChunkWorkspace_>
475 void fetch_block(
476 bool row,
477 Index_ target_chunk_id,
478 Index_ target_chunk_offset,
479 Index_ target_chunk_length,
480 const std::vector<Index_>& non_target_indices,
481 std::vector<Index_>& chunk_indices_buffer,
482 Slab& slab,
483 ChunkWorkspace_& chunk_workspace)
484 const {
485 if constexpr(sparse_) {
486 std::fill_n(slab.number, get_target_chunkdim(row), 0);
487
488 extract_non_target_index(
489 row,
490 target_chunk_id,
491 non_target_indices,
492 chunk_indices_buffer,
493 [&](Index_ row_id, Index_ column_id, const std::vector<Index_>& chunk_indices, Index_ non_target_start_pos) -> void {
494 chunk_workspace.extract(
495 row_id,
496 column_id,
497 row,
498 target_chunk_offset,
499 target_chunk_length,
500 chunk_indices,
501 slab.values,
502 slab.indices,
503 slab.number,
504 non_target_start_pos
505 );
506 }
507 );
508
509 } else {
510 auto slab_ptr = slab.data;
511 Index_ stride = non_target_indices.size();
512 extract_non_target_index(
513 row,
514 target_chunk_id,
515 non_target_indices,
516 chunk_indices_buffer,
517 [&](Index_ row_id, Index_ column_id, const std::vector<Index_>& chunk_indices) -> void {
518 chunk_workspace.extract(
519 row_id,
520 column_id,
521 row,
522 target_chunk_offset,
523 target_chunk_length,
524 chunk_indices,
525 slab_ptr,
526 stride
527 );
528 slab_ptr += chunk_indices.size();
529 }
530 );
531 }
532 }
533
534private:
535 // Extract an indexed subset of the target dimension, using a contiguous block on the non_target dimension.
536 template<class ChunkWorkspace_>
537 void fetch_index(
538 bool row,
539 Index_ target_chunk_id,
540 const std::vector<Index_>& target_indices,
541 Index_ non_target_block_start,
542 Index_ non_target_block_length,
543 Slab& slab,
544 ChunkWorkspace_& chunk_workspace)
545 const {
546 if constexpr(sparse_) {
547 std::fill_n(slab.number, get_target_chunkdim(row), 0);
548 extract_non_target_block(
549 row,
550 target_chunk_id,
551 non_target_block_start,
552 non_target_block_length,
553 [&](Index_ row_id, Index_ column_id, Index_ from, Index_ len, Index_ non_target_start_pos) -> void {
554 chunk_workspace.extract(
555 row_id,
556 column_id,
557 row,
558 target_indices,
559 from,
560 len,
561 slab.values,
562 slab.indices,
563 slab.number,
564 non_target_start_pos
565 );
566 }
567 );
568
569 } else {
570 auto slab_ptr = slab.data;
571 extract_non_target_block(
572 row,
573 target_chunk_id,
574 non_target_block_start,
575 non_target_block_length,
576 [&](Index_ row_id, Index_ column_id, Index_ from, Index_ len) -> void {
577 chunk_workspace.extract(
578 row_id,
579 column_id,
580 row,
581 target_indices,
582 from,
583 len,
584 slab_ptr,
585 non_target_block_length
586 );
587 slab_ptr += len;
588 }
589 );
590 }
591 }
592
593 // Extract an indexed subset of the target dimension, using an indexed subset on the non_target dimension.
594 template<class ChunkWorkspace_>
595 void fetch_index(
596 bool row,
597 Index_ target_chunk_id,
598 const std::vector<Index_>& target_indices,
599 const std::vector<Index_>& non_target_indices,
600 std::vector<Index_>& chunk_indices_buffer,
601 Slab& slab,
602 ChunkWorkspace_& chunk_workspace)
603 const {
604 if constexpr(sparse_) {
605 std::fill_n(slab.number, get_target_chunkdim(row), 0);
606 extract_non_target_index(
607 row,
608 target_chunk_id,
609 non_target_indices,
610 chunk_indices_buffer,
611 [&](Index_ row_id, Index_ column_id, const std::vector<Index_>& chunk_indices, Index_ non_target_start_pos) -> void {
612 chunk_workspace.extract(
613 row_id,
614 column_id,
615 row,
616 target_indices,
617 chunk_indices,
618 slab.values,
619 slab.indices,
620 slab.number,
621 non_target_start_pos
622 );
623 }
624 );
625
626 } else {
627 auto slab_ptr = slab.data;
628 Index_ stride = non_target_indices.size();
629 extract_non_target_index(
630 row,
631 target_chunk_id,
632 non_target_indices,
633 chunk_indices_buffer,
634 [&](Index_ row_id, Index_ column_id, const std::vector<Index_>& chunk_indices) -> void {
635 chunk_workspace.extract(
636 row_id,
637 column_id,
638 row,
639 target_indices,
640 chunk_indices,
641 slab_ptr,
642 stride
643 );
644 slab_ptr += chunk_indices.size();
645 }
646 );
647 }
648 }
649
650public:
651 // Obtain the slab containing the 'i'-th element of the target dimension.
652 template<class ChunkWorkspace_, class Cache_, class Factory_>
653 std::pair<const Slab*, Index_> fetch_myopic(
654 bool row,
655 Index_ i,
656 Index_ block_start,
657 Index_ block_length,
658 ChunkWorkspace_& chunk_workspace,
659 Cache_& cache,
660 Factory_& factory)
661 const {
662 Index_ target_chunkdim = get_target_chunkdim(row);
663 Index_ target_chunk_id = i / target_chunkdim;
664 Index_ target_chunk_offset = i % target_chunkdim;
665 auto& out = cache.find(
666 target_chunk_id,
667 /* create = */ [&]() -> Slab {
668 return factory.create();
669 },
670 /* populate = */ [&](Index_ id, Slab& slab) -> void {
671 fetch_block(row, id, 0, get_target_chunkdim(row, id), block_start, block_length, slab, chunk_workspace);
672 }
673 );
674 return std::make_pair(&out, target_chunk_offset);
675 }
676
677 template<class ChunkWorkspace_, class Cache_, class Factory_>
678 std::pair<const Slab*, Index_> fetch_myopic(
679 bool row,
680 Index_ i,
681 const std::vector<Index_>& indices,
682 std::vector<Index_>& tmp_indices,
683 ChunkWorkspace_& chunk_workspace,
684 Cache_& cache,
685 Factory_& factory)
686 const {
687 Index_ target_chunkdim = get_target_chunkdim(row);
688 Index_ target_chunk_id = i / target_chunkdim;
689 Index_ target_chunk_offset = i % target_chunkdim;
690 auto& out = cache.find(
691 target_chunk_id,
692 /* create = */ [&]() -> Slab {
693 return factory.create();
694 },
695 /* populate = */ [&](Index_ id, Slab& slab) -> void {
696 fetch_block(row, id, 0, get_target_chunkdim(row, id), indices, tmp_indices, slab, chunk_workspace);
697 }
698 );
699 return std::make_pair(&out, target_chunk_offset);
700 }
701
702public:
703 template<class ChunkWorkspace_, class Cache_, class Factory_>
704 std::pair<const Slab*, Index_> fetch_oracular(
705 bool row,
706 Index_ block_start,
707 Index_ block_length,
708 ChunkWorkspace_& chunk_workspace,
709 Cache_& cache,
710 Factory_& factory)
711 const {
712 Index_ target_chunkdim = get_target_chunkdim(row);
713 return cache.next(
714 /* identify = */ [&](Index_ i) -> std::pair<Index_, Index_> {
715 return std::pair<Index_, Index_>(i / target_chunkdim, i % target_chunkdim);
716 },
717 /* create = */ [&]() -> Slab {
718 return factory.create();
719 },
720 /* populate =*/ [&](std::vector<std::pair<Index_, Slab*> >& to_populate) -> void {
721 for (auto& p : to_populate) {
722 fetch_block(row, p.first, 0, get_target_chunkdim(row, p.first), block_start, block_length, *(p.second), chunk_workspace);
723 }
724 }
725 );
726 }
727
728 template<class ChunkWorkspace_, class Cache_, class Factory_>
729 std::pair<const Slab*, Index_> fetch_oracular(
730 bool row,
731 const std::vector<Index_>& indices,
732 std::vector<Index_>& chunk_indices_buffer,
733 ChunkWorkspace_& chunk_workspace,
734 Cache_& cache,
735 Factory_& factory)
736 const {
737 Index_ target_chunkdim = get_target_chunkdim(row);
738 return cache.next(
739 /* identify = */ [&](Index_ i) -> std::pair<Index_, Index_> {
740 return std::pair<Index_, Index_>(i / target_chunkdim, i % target_chunkdim);
741 },
742 /* create = */ [&]() -> Slab {
743 return factory.create();
744 },
745 /* populate =*/ [&](std::vector<std::pair<Index_, Slab*> >& to_populate) -> void {
746 for (auto& p : to_populate) {
747 fetch_block(row, p.first, 0, get_target_chunkdim(row, p.first), indices, chunk_indices_buffer, *(p.second), chunk_workspace);
748 }
749 }
750 );
751 }
752
753public:
754 template<class ChunkWorkspace_, class Cache_, class Factory_>
755 std::pair<const Slab*, Index_> fetch_oracular_subsetted(
756 bool row,
757 Index_ block_start,
758 Index_ block_length,
759 ChunkWorkspace_& chunk_workspace,
760 Cache_& cache,
761 Factory_& factory)
762 const {
763 Index_ target_chunkdim = get_target_chunkdim(row);
764 return cache.next(
765 /* identify = */ [&](Index_ i) -> std::pair<Index_, Index_> {
766 return std::pair<Index_, Index_>(i / target_chunkdim, i % target_chunkdim);
767 },
768 /* create = */ [&]() -> Slab {
769 return factory.create();
770 },
771 /* populate =*/ [&](std::vector<std::tuple<Index_, Slab*, const OracularSubsettedSlabCacheSelectionDetails<Index_>*> >& in_need) -> void {
772 for (const auto& p : in_need) {
773 auto id = std::get<0>(p);
774 auto ptr = std::get<1>(p);
775 auto sub = std::get<2>(p);
776 switch (sub->selection) {
777 case OracularSubsettedSlabCacheSelectionType::FULL:
778 fetch_block(row, id, 0, get_target_chunkdim(row, id), block_start, block_length, *ptr, chunk_workspace);
779 break;
780 case OracularSubsettedSlabCacheSelectionType::BLOCK:
781 fetch_block(row, id, sub->block_start, sub->block_length, block_start, block_length, *ptr, chunk_workspace);
782 break;
783 case OracularSubsettedSlabCacheSelectionType::INDEX:
784 fetch_index(row, id, sub->indices, block_start, block_length, *ptr, chunk_workspace);
785 break;
786 }
787 }
788 }
789 );
790 }
791
792 template<class ChunkWorkspace_, class Cache_, class Factory_>
793 std::pair<const Slab*, Index_> fetch_oracular_subsetted(
794 bool row,
795 const std::vector<Index_>& indices,
796 std::vector<Index_>& chunk_indices_buffer,
797 ChunkWorkspace_& chunk_workspace,
798 Cache_& cache,
799 Factory_& factory)
800 const {
801 Index_ target_chunkdim = get_target_chunkdim(row);
802 return cache.next(
803 /* identify = */ [&](Index_ i) -> std::pair<Index_, Index_> {
804 return std::pair<Index_, Index_>(i / target_chunkdim, i % target_chunkdim);
805 },
806 /* create = */ [&]() -> Slab {
807 return factory.create();
808 },
809 /* populate =*/ [&](std::vector<std::tuple<Index_, Slab*, const OracularSubsettedSlabCacheSelectionDetails<Index_>*> >& in_need) -> void {
810 for (const auto& p : in_need) {
811 auto id = std::get<0>(p);
812 auto ptr = std::get<1>(p);
813 auto sub = std::get<2>(p);
814 switch (sub->selection) {
815 case OracularSubsettedSlabCacheSelectionType::FULL:
816 fetch_block(row, id, 0, get_target_chunkdim(row, id), indices, chunk_indices_buffer, *ptr, chunk_workspace);
817 break;
818 case OracularSubsettedSlabCacheSelectionType::BLOCK:
819 fetch_block(row, id, sub->block_start, sub->block_length, indices, chunk_indices_buffer, *ptr, chunk_workspace);
820 break;
821 case OracularSubsettedSlabCacheSelectionType::INDEX:
822 fetch_index(row, id, sub->indices, indices, chunk_indices_buffer, *ptr, chunk_workspace);
823 break;
824 }
825 }
826 }
827 );
828 }
829};
830
831}
832
833}
834
835#endif
Factory for dense slabs.
Create a oracle-aware cache for slabs.
Create a oracle-aware cache with subsets.
Factory for sparse slabs.
Methods to handle chunked tatami matrices.
Definition ChunkDimensionStats.hpp:4
Index_ get_chunk_length(const ChunkDimensionStats< Index_ > &stats, Index_ i)
Definition ChunkDimensionStats.hpp:85