tatami_chunked
Helpers to create custom chunked tatami matrices
Loading...
Searching...
No Matches
custom_internals.hpp
1#ifndef TATAMI_CHUNKED_CUSTOM_CHUNK_COORDINATOR_HPP
2#define TATAMI_CHUNKED_CUSTOM_CHUNK_COORDINATOR_HPP
3
4#include "tatami/tatami.hpp"
9#include "ChunkDimensionStats.hpp"
10
11#include <vector>
12
13namespace tatami_chunked {
14
15namespace CustomChunkedMatrix_internal {
16
17/******************
18 *** Workspaces ***
19 ******************/
20
21template<typename CachedValue_>
22using DenseSingleWorkspace = std::vector<CachedValue_>;
23
24template<typename CachedValue_, typename Index_>
25class SparseSingleWorkspace {
26public:
27 SparseSingleWorkspace(size_t primary_chunkdim, size_t secondary_chunkdim, bool needs_value, bool needs_index) : my_number(primary_chunkdim) {
28 size_t total_size = primary_chunkdim * secondary_chunkdim;
29 if (needs_value) {
30 my_value_pool.resize(total_size);
31 my_values.reserve(primary_chunkdim);
32 auto vptr = my_value_pool.data();
33 for (size_t p = 0; p < primary_chunkdim; ++p, vptr += secondary_chunkdim) {
34 my_values.push_back(vptr);
35 }
36 }
37 if (needs_index) {
38 my_index_pool.resize(total_size);
39 my_indices.reserve(primary_chunkdim);
40 auto iptr = my_index_pool.data();
41 for (size_t p = 0; p < primary_chunkdim; ++p, iptr += secondary_chunkdim) {
42 my_indices.push_back(iptr);
43 }
44 }
45 }
46
47 // Delete the copy constructors as we're passing out pointers.
48 SparseSingleWorkspace(const SparseSingleWorkspace&) = delete;
49 SparseSingleWorkspace& operator=(const SparseSingleWorkspace&) = delete;
50
51 // Move constructors are okay though.
52 SparseSingleWorkspace(SparseSingleWorkspace&&) = default;
53 SparseSingleWorkspace& operator=(SparseSingleWorkspace&&) = default;
54
55private:
56 std::vector<CachedValue_> my_value_pool;
57 std::vector<Index_> my_index_pool;
58 std::vector<CachedValue_*> my_values;
59 std::vector<Index_*> my_indices;
60 std::vector<Index_> my_number;
61
62public:
63 std::vector<CachedValue_*>& get_values() {
64 return my_values;
65 }
66
67 std::vector<Index_*>& get_indices() {
68 return my_indices;
69 }
70
71 std::vector<Index_>& get_number() {
72 return my_number;
73 }
74};
75
76/*******************
77 *** Coordinator ***
78 *******************/
79
80template<typename Index_, bool sparse_, class Chunk_>
81class ChunkCoordinator {
82public:
83 ChunkCoordinator(ChunkDimensionStats<Index_> row_stats, ChunkDimensionStats<Index_> col_stats, std::vector<Chunk_> chunk_array, bool row_major) :
84 my_row_stats(std::move(row_stats)), my_col_stats(std::move(col_stats)), my_chunk_array(std::move(chunk_array)), my_row_major(row_major)
85 {
86 if (static_cast<size_t>(my_row_stats.num_chunks) * static_cast<size_t>(my_col_stats.num_chunks) != my_chunk_array.size()) {
87 throw std::runtime_error("length of 'chunks' should be equal to the product of the number of chunks along each row and column");
88 }
89 }
90
91private:
92 ChunkDimensionStats<Index_> my_row_stats;
93 ChunkDimensionStats<Index_> my_col_stats;
94 std::vector<Chunk_> my_chunk_array;
95 bool my_row_major;
96
97public:
98 // Number of chunks along the rows is equal to the number of chunks for
99 // each column, and vice versa; hence the flipped definitions.
100 Index_ get_num_chunks_per_row() const {
101 return my_col_stats.num_chunks;
102 }
103
104 Index_ get_num_chunks_per_column() const {
105 return my_row_stats.num_chunks;
106 }
107
108 Index_ get_nrow() const {
109 return my_row_stats.dimension_extent;
110 }
111
112 Index_ get_ncol() const {
113 return my_col_stats.dimension_extent;
114 }
115
116 bool prefer_rows_internal() const {
117 // Prefer rows if we have to extract fewer chunks per row.
118 return get_num_chunks_per_column() > get_num_chunks_per_row();
119 }
120
121 Index_ get_chunk_nrow() const {
122 return my_row_stats.chunk_length;
123 }
124
125 Index_ get_chunk_ncol() const {
126 return my_col_stats.chunk_length;
127 }
128
129public:
130 Index_ get_secondary_dim(bool row) const {
131 if (row) {
132 return my_col_stats.dimension_extent;
133 } else {
134 return my_row_stats.dimension_extent;
135 }
136 }
137
138 Index_ get_primary_chunkdim(bool row) const {
139 if (row) {
140 return my_row_stats.chunk_length;
141 } else {
142 return my_col_stats.chunk_length;
143 }
144 }
145
146 Index_ get_secondary_chunkdim(bool row) const {
147 if (row) {
148 return my_col_stats.chunk_length;
149 } else {
150 return my_row_stats.chunk_length;
151 }
152 }
153
154 // Overload that handles the truncated chunk at the bottom/right edges of each matrix.
155 Index_ get_primary_chunkdim(bool row, Index_ chunk_id) const {
156 return get_chunk_length(row ? my_row_stats : my_col_stats, chunk_id);
157 }
158
159private:
160 std::pair<size_t, size_t> offset_and_increment(bool row, Index_ chunk_id) const {
161 size_t num_chunks = (my_row_major ? get_num_chunks_per_row() : get_num_chunks_per_column()); // use size_t to avoid overflow.
162 if (row == my_row_major) {
163 return std::pair<size_t, size_t>(static_cast<size_t>(chunk_id) * num_chunks, 1);
164 } else {
165 return std::pair<size_t, size_t>(chunk_id, num_chunks);
166 }
167 }
168
169 template<class ExtractFunction_>
170 void extract_secondary_block(
171 bool row,
172 Index_ chunk_id,
173 Index_ secondary_block_start,
174 Index_ secondary_block_length,
175 ExtractFunction_ extract)
176 const {
177 auto secondary_chunkdim = get_secondary_chunkdim(row);
178 Index_ start_chunk_index = secondary_block_start / secondary_chunkdim;
179 Index_ secondary_start_pos = start_chunk_index * secondary_chunkdim;
180 Index_ secondary_block_end = secondary_block_start + secondary_block_length;
181 Index_ end_chunk_index = secondary_block_end / secondary_chunkdim + (secondary_block_end % secondary_chunkdim > 0); // i.e., integer ceiling.
182
183 auto oi = offset_and_increment(row, chunk_id);
184 auto offset = std::get<0>(oi);
185 auto increment = std::get<1>(oi);
186 offset += increment * static_cast<size_t>(start_chunk_index); // size_t to avoid integer overflow.
187
188 for (Index_ c = start_chunk_index; c < end_chunk_index; ++c) {
189 const auto& chunk = my_chunk_array[offset];
190 Index_ from = (c == start_chunk_index ? secondary_block_start - secondary_start_pos : 0);
191 Index_ to = (c + 1 == end_chunk_index ? secondary_block_end - secondary_start_pos : secondary_chunkdim);
192 Index_ len = to - from;
193
194 // No need to protect against a zero length, as it should be impossible
195 // here (otherwise, start_chunk_index == end_chunk_index and we'd never iterate).
196 if constexpr(sparse_) {
197 extract(chunk, from, len, secondary_start_pos);
198 } else {
199 extract(chunk, from, len);
200 }
201
202 secondary_start_pos += to; // yes, this is deliberate; '+ to' means that either we add 'secondary_chunkdim' or set it to 'secondary_block_end', the latter of which avoids overflow.
203 offset += increment;
204 }
205 }
206
207 template<class ExtractFunction_>
208 void extract_secondary_index(
209 bool row,
210 Index_ chunk_id,
211 const std::vector<Index_>& secondary_indices,
212 std::vector<Index_>& chunk_indices_buffer,
213 ExtractFunction_ extract)
214 const {
215 if (secondary_indices.empty()) {
216 return;
217 }
218
219 auto secondary_chunkdim = get_secondary_chunkdim(row);
220 Index_ start_chunk_index = secondary_indices.front() / secondary_chunkdim; // 'secondary_indices' is guaranteed to be non-empty at this point.
221 Index_ secondary_start_pos = start_chunk_index * secondary_chunkdim;
222
223 auto oi = offset_and_increment(row, chunk_id);
224 auto offset = std::get<0>(oi);
225 auto increment = std::get<1>(oi);
226 offset += increment * static_cast<size_t>(start_chunk_index); // size_t to avoid integer overflow.
227
228 auto secondary_dim = get_secondary_dim(row);
229 auto iIt = secondary_indices.begin();
230 auto iEnd = secondary_indices.end();
231 while (iIt != iEnd) {
232 const auto& chunk = my_chunk_array[offset];
233
234 Index_ secondary_end_pos = std::min(secondary_dim - secondary_start_pos, secondary_chunkdim) + secondary_start_pos; // this convoluted method avoids overflow.
235 chunk_indices_buffer.clear();
236 while (iIt != iEnd && *iIt < secondary_end_pos) {
237 chunk_indices_buffer.push_back(*iIt - secondary_start_pos);
238 ++iIt;
239 }
240
241 if (!chunk_indices_buffer.empty()) {
242 if constexpr(sparse_) {
243 extract(chunk, chunk_indices_buffer, secondary_start_pos);
244 } else {
245 extract(chunk, chunk_indices_buffer);
246 }
247 }
248
249 secondary_start_pos = secondary_end_pos;
250 offset += increment;
251 }
252 }
253
254 typedef typename Chunk_::Workspace ChunkWork;
255 typedef typename Chunk_::value_type ChunkValue;
256 typedef typename std::conditional<sparse_, typename SparseSlabFactory<ChunkValue, Index_>::Slab, typename DenseSlabFactory<ChunkValue>::Slab>::type Slab;
257 typedef typename std::conditional<sparse_, SparseSingleWorkspace<ChunkValue, Index_>, DenseSingleWorkspace<ChunkValue> >::type SingleWorkspace;
258
259public:
260 // Extract a single element of the primary dimension, using a contiguous
261 // block on the secondary dimension.
262 //
263 // Unfortunately, we can't just re-use the fetch_block() functions with a
264 // length of 1, because some chunks do not support partial extraction; this
265 // requires special handling to extract the full chunk into 'tmp_work', and
266 // then pull out what we need into 'final_slab'.
267 //
268 // Even the use_subset=true chunks that do support partial extraction
269 // require a workspace involving the full chunk size, so we end up needing
270 // the 'tmp_work' workspace anyway.
271 std::pair<const Slab*, Index_> fetch_single(
272 bool row,
273 Index_ i,
274 Index_ secondary_block_start,
275 Index_ secondary_block_length,
276 ChunkWork& chunk_workspace,
277 SingleWorkspace& tmp_work,
278 Slab& final_slab)
279 const {
280 Index_ primary_chunkdim = get_primary_chunkdim(row);
281 Index_ chunk_id = i / primary_chunkdim;
282 Index_ chunk_offset = i % primary_chunkdim;
283
284 if constexpr(sparse_) {
285 auto& final_num = *final_slab.number;
286 final_num = 0;
287 bool needs_value = !final_slab.values.empty();
288 bool needs_index = !final_slab.indices.empty();
289
290 extract_secondary_block(
291 row, chunk_id, secondary_block_start, secondary_block_length,
292 [&](const Chunk_& chunk, Index_ from, Index_ len, Index_ secondary_start_pos) -> void {
293 auto& tmp_values = tmp_work.get_values();
294 auto& tmp_indices = tmp_work.get_indices();
295 auto& tmp_number = tmp_work.get_number();
296 std::fill_n(tmp_number.begin(), primary_chunkdim, 0);
297
298 if constexpr(Chunk_::use_subset) {
299 chunk.extract(row, chunk_offset, 1, from, len, chunk_workspace, tmp_values, tmp_indices, tmp_number.data(), secondary_start_pos);
300 } else {
301 chunk.extract(row, from, len, chunk_workspace, tmp_values, tmp_indices, tmp_number.data(), secondary_start_pos);
302 }
303
304 auto count = tmp_number[chunk_offset];
305 if (needs_value) {
306 std::copy_n(tmp_values[chunk_offset], count, final_slab.values[0] + final_num);
307 }
308 if (needs_index) {
309 std::copy_n(tmp_indices[chunk_offset], count, final_slab.indices[0] + final_num);
310 }
311 final_num += count;
312 }
313 );
314
315 } else {
316 auto final_slab_ptr = final_slab.data;
317 auto tmp_buffer_ptr = tmp_work.data();
318
319 extract_secondary_block(
320 row, chunk_id, secondary_block_start, secondary_block_length,
321 [&](const Chunk_& chunk, Index_ from, Index_ len) -> void {
322 if constexpr(Chunk_::use_subset) {
323 chunk.extract(row, chunk_offset, 1, from, len, chunk_workspace, tmp_buffer_ptr, len);
324 } else {
325 chunk.extract(row, from, len, chunk_workspace, tmp_buffer_ptr, len);
326 }
327
328 size_t tmp_offset = static_cast<size_t>(len) * static_cast<size_t>(chunk_offset);
329 std::copy_n(tmp_buffer_ptr + tmp_offset, len, final_slab_ptr);
330 final_slab_ptr += len;
331 }
332 );
333 }
334
335 return std::make_pair(&final_slab, static_cast<Index_>(0));
336 }
337
338 // Extract a single element of the primary dimension, using an indexed
339 // subset on the secondary dimension.
340 std::pair<const Slab*, Index_> fetch_single(
341 bool row,
342 Index_ i,
343 const std::vector<Index_>& secondary_indices,
344 std::vector<Index_>& chunk_indices_buffer,
345 ChunkWork& chunk_workspace,
346 SingleWorkspace& tmp_work,
347 Slab& final_slab)
348 const {
349 Index_ primary_chunkdim = get_primary_chunkdim(row);
350 Index_ chunk_id = i / primary_chunkdim;
351 Index_ chunk_offset = i % primary_chunkdim;
352
353 if constexpr(sparse_) {
354 auto& final_num = *final_slab.number;
355 final_num = 0;
356 bool needs_value = !final_slab.values.empty();
357 bool needs_index = !final_slab.indices.empty();
358
359 extract_secondary_index(
360 row, chunk_id, secondary_indices, chunk_indices_buffer,
361 [&](const Chunk_& chunk, const std::vector<Index_>& chunk_indices, Index_ secondary_start_pos) -> void {
362 auto& tmp_values = tmp_work.get_values();
363 auto& tmp_indices = tmp_work.get_indices();
364 auto& tmp_number = tmp_work.get_number();
365 std::fill_n(tmp_number.begin(), primary_chunkdim, 0);
366
367 if constexpr(Chunk_::use_subset) {
368 chunk.extract(row, chunk_offset, 1, chunk_indices, chunk_workspace, tmp_values, tmp_indices, tmp_number.data(), secondary_start_pos);
369 } else {
370 chunk.extract(row, chunk_indices, chunk_workspace, tmp_values, tmp_indices, tmp_number.data(), secondary_start_pos);
371 }
372
373 auto count = tmp_number[chunk_offset];
374 if (needs_value) {
375 std::copy_n(tmp_values[chunk_offset], count, final_slab.values[0] + final_num);
376 }
377 if (needs_index) {
378 std::copy_n(tmp_indices[chunk_offset], count, final_slab.indices[0] + final_num);
379 }
380 final_num += count;
381 }
382 );
383
384 } else {
385 auto final_slab_ptr = final_slab.data;
386 auto tmp_buffer_ptr = tmp_work.data();
387
388 extract_secondary_index(
389 row, chunk_id, secondary_indices, chunk_indices_buffer,
390 [&](const Chunk_& chunk, const std::vector<Index_>& chunk_indices) -> void {
391 size_t nidx = chunk_indices.size();
392 if constexpr(Chunk_::use_subset) {
393 chunk.extract(row, chunk_offset, 1, chunk_indices, chunk_workspace, tmp_buffer_ptr, nidx);
394 } else {
395 chunk.extract(row, chunk_indices, chunk_workspace, tmp_buffer_ptr, nidx);
396 }
397
398 size_t tmp_offset = nidx * static_cast<size_t>(chunk_offset);
399 std::copy_n(tmp_buffer_ptr + tmp_offset, nidx, final_slab_ptr);
400 final_slab_ptr += nidx;
401 }
402 );
403 }
404
405 return std::make_pair(&final_slab, static_cast<Index_>(0));
406 }
407
408private:
409 // Extract a contiguous block of the primary dimension, using a contiguous block on the secondary dimension.
410 void fetch_block(
411 bool row,
412 Index_ chunk_id,
413 Index_ chunk_offset,
414 Index_ chunk_length,
415 Index_ secondary_block_start,
416 Index_ secondary_block_length,
417 Slab& slab,
418 ChunkWork& chunk_workspace)
419 const {
420 if constexpr(sparse_) {
421 std::fill_n(slab.number, get_primary_chunkdim(row), 0);
422
423 extract_secondary_block(
424 row, chunk_id, secondary_block_start, secondary_block_length,
425 [&](const Chunk_& chunk, Index_ from, Index_ len, Index_ secondary_start_pos) -> void {
426 if constexpr(Chunk_::use_subset) {
427 chunk.extract(row, chunk_offset, chunk_length, from, len, chunk_workspace, slab.values, slab.indices, slab.number, secondary_start_pos);
428 } else {
429 chunk.extract(row, from, len, chunk_workspace, slab.values, slab.indices, slab.number, secondary_start_pos);
430 }
431 }
432 );
433
434 } else {
435 auto slab_ptr = slab.data;
436 size_t stride = secondary_block_length;
437
438 extract_secondary_block(
439 row, chunk_id, secondary_block_start, secondary_block_length,
440 [&](const Chunk_& chunk, Index_ from, Index_ len) -> void {
441 if constexpr(Chunk_::use_subset) {
442 chunk.extract(row, chunk_offset, chunk_length, from, len, chunk_workspace, slab_ptr, stride);
443 } else {
444 chunk.extract(row, from, len, chunk_workspace, slab_ptr, stride);
445 }
446 slab_ptr += len;
447 }
448 );
449 }
450 }
451
452 // Extract a contiguous block of the primary dimension, using an indexed subset on the secondary dimension.
453 void fetch_block(
454 bool row,
455 Index_ chunk_id,
456 Index_ chunk_offset,
457 Index_ chunk_length,
458 const std::vector<Index_>& secondary_indices,
459 std::vector<Index_>& chunk_indices_buffer,
460 Slab& slab,
461 ChunkWork& chunk_workspace)
462 const {
463 if constexpr(sparse_) {
464 std::fill_n(slab.number, get_primary_chunkdim(row), 0);
465
466 extract_secondary_index(
467 row, chunk_id, secondary_indices, chunk_indices_buffer,
468 [&](const Chunk_& chunk, const std::vector<Index_>& chunk_indices, Index_ secondary_start_pos) -> void {
469 if constexpr(Chunk_::use_subset) {
470 chunk.extract(row, chunk_offset, chunk_length, chunk_indices, chunk_workspace, slab.values, slab.indices, slab.number, secondary_start_pos);
471 } else {
472 chunk.extract(row, chunk_indices, chunk_workspace, slab.values, slab.indices, slab.number, secondary_start_pos);
473 }
474 }
475 );
476
477 } else {
478 auto slab_ptr = slab.data;
479 size_t stride = secondary_indices.size();
480
481 extract_secondary_index(
482 row, chunk_id, secondary_indices, chunk_indices_buffer,
483 [&](const Chunk_& chunk, const std::vector<Index_>& chunk_indices) -> void {
484 if constexpr(Chunk_::use_subset) {
485 chunk.extract(row, chunk_offset, chunk_length, chunk_indices, chunk_workspace, slab_ptr, stride);
486 } else {
487 chunk.extract(row, chunk_indices, chunk_workspace, slab_ptr, stride);
488 }
489 slab_ptr += chunk_indices.size();
490 }
491 );
492 }
493 }
494
495private:
496 // Extract an indexed subset of the primary dimension, using a contiguous block on the secondary dimension.
497 void fetch_index(
498 bool row,
499 Index_ chunk_id,
500 const std::vector<Index_>& primary_indices,
501 Index_ secondary_block_start,
502 Index_ secondary_block_length,
503 Slab& slab,
504 ChunkWork& chunk_workspace)
505 const {
506 if constexpr(sparse_) {
507 std::fill_n(slab.number, get_primary_chunkdim(row), 0);
508
509 extract_secondary_block(
510 row, chunk_id, secondary_block_start, secondary_block_length,
511 [&](const Chunk_& chunk, Index_ from, Index_ len, Index_ secondary_start_pos) -> void {
512 if constexpr(Chunk_::use_subset) {
513 chunk.extract(row, primary_indices, from, len, chunk_workspace, slab.values, slab.indices, slab.number, secondary_start_pos);
514 } else {
515 chunk.extract(row, from, len, chunk_workspace, slab.values, slab.indices, slab.number, secondary_start_pos);
516 }
517 }
518 );
519
520 } else {
521 auto slab_ptr = slab.data;
522 size_t stride = secondary_block_length;
523
524 extract_secondary_block(
525 row, chunk_id, secondary_block_start, secondary_block_length,
526 [&](const Chunk_& chunk, Index_ from, Index_ len) -> void {
527 if constexpr(Chunk_::use_subset) {
528 chunk.extract(row, primary_indices, from, len, chunk_workspace, slab_ptr, stride);
529 } else {
530 chunk.extract(row, from, len, chunk_workspace, slab_ptr, stride);
531 }
532 slab_ptr += len;
533 }
534 );
535 }
536 }
537
538 // Extract an indexed subset of the primary dimension, using an indexed subset on the secondary dimension.
539 void fetch_index(
540 bool row,
541 Index_ chunk_id,
542 const std::vector<Index_>& primary_indices,
543 const std::vector<Index_>& secondary_indices,
544 std::vector<Index_>& chunk_indices_buffer,
545 Slab& slab,
546 ChunkWork& chunk_workspace)
547 const {
548 if constexpr(sparse_) {
549 std::fill_n(slab.number, get_primary_chunkdim(row), 0);
550
551 extract_secondary_index(
552 row, chunk_id, secondary_indices, chunk_indices_buffer,
553 [&](const Chunk_& chunk, const std::vector<Index_>& chunk_indices, Index_ secondary_start_pos) -> void {
554 if constexpr(Chunk_::use_subset) {
555 chunk.extract(row, primary_indices, chunk_indices, chunk_workspace, slab.values, slab.indices, slab.number, secondary_start_pos);
556 } else {
557 chunk.extract(row, chunk_indices, chunk_workspace, slab.values, slab.indices, slab.number, secondary_start_pos);
558 }
559 }
560 );
561
562 } else {
563 auto slab_ptr = slab.data;
564 size_t stride = secondary_indices.size();
565
566 extract_secondary_index(
567 row, chunk_id, secondary_indices, chunk_indices_buffer,
568 [&](const Chunk_& chunk, const std::vector<Index_>& chunk_indices) -> void {
569 if constexpr(Chunk_::use_subset) {
570 chunk.extract(row, primary_indices, chunk_indices, chunk_workspace, slab_ptr, stride);
571 } else {
572 chunk.extract(row, chunk_indices, chunk_workspace, slab_ptr, stride);
573 }
574 slab_ptr += chunk_indices.size();
575 }
576 );
577 }
578 }
579
580public:
581 // Obtain the slab containing the 'i'-th element of the primary dimension.
582 template<class Cache_, class Factory_>
583 std::pair<const Slab*, Index_> fetch_myopic(
584 bool row,
585 Index_ i,
586 Index_ block_start,
587 Index_ block_length,
588 ChunkWork& chunk_workspace,
589 Cache_& cache,
590 Factory_& factory)
591 const {
592 Index_ primary_chunkdim = get_primary_chunkdim(row);
593 Index_ chunk_id = i / primary_chunkdim;
594 Index_ chunk_offset = i % primary_chunkdim;
595 auto& out = cache.find(
596 chunk_id,
597 /* create = */ [&]() -> Slab {
598 return factory.create();
599 },
600 /* populate = */ [&](Index_ id, Slab& slab) -> void {
601 fetch_block(row, id, 0, get_primary_chunkdim(row, id), block_start, block_length, slab, chunk_workspace);
602 }
603 );
604 return std::make_pair(&out, chunk_offset);
605 }
606
607 template<class Cache_, class Factory_>
608 std::pair<const Slab*, Index_> fetch_myopic(
609 bool row,
610 Index_ i,
611 const std::vector<Index_>& indices,
612 std::vector<Index_>& tmp_indices,
613 ChunkWork& chunk_workspace,
614 Cache_& cache,
615 Factory_& factory)
616 const {
617 Index_ primary_chunkdim = get_primary_chunkdim(row);
618 Index_ chunk_id = i / primary_chunkdim;
619 Index_ chunk_offset = i % primary_chunkdim;
620 auto& out = cache.find(
621 chunk_id,
622 /* create = */ [&]() -> Slab {
623 return factory.create();
624 },
625 /* populate = */ [&](Index_ id, Slab& slab) -> void {
626 fetch_block(row, id, 0, get_primary_chunkdim(row, id), indices, tmp_indices, slab, chunk_workspace);
627 }
628 );
629 return std::make_pair(&out, chunk_offset);
630 }
631
632private:
633 template<class Cache_, class Factory_, typename PopulateBlock_, typename PopulateIndex_>
634 std::pair<const Slab*, Index_> fetch_oracular_core(
635 bool row,
636 Cache_& cache,
637 Factory_& factory,
638 PopulateBlock_ populate_block,
639 PopulateIndex_ populate_index)
640 const {
641 Index_ primary_chunkdim = get_primary_chunkdim(row);
642 if constexpr(Chunk_::use_subset) {
643 return cache.next(
644 /* identify = */ [&](Index_ i) -> std::pair<Index_, Index_> {
645 return std::pair<Index_, Index_>(i / primary_chunkdim, i % primary_chunkdim);
646 },
647 /* create = */ [&]() -> Slab {
648 return factory.create();
649 },
650 /* populate =*/ [&](std::vector<std::tuple<Index_, Slab*, const OracularSubsettedSlabCacheSelectionDetails<Index_>*> >& in_need) -> void {
651 for (const auto& p : in_need) {
652 auto id = std::get<0>(p);
653 auto ptr = std::get<1>(p);
654 auto sub = std::get<2>(p);
655 switch (sub->selection) {
656 case OracularSubsettedSlabCacheSelectionType::FULL:
657 populate_block(id, 0, get_primary_chunkdim(row, id), *ptr);
658 break;
659 case OracularSubsettedSlabCacheSelectionType::BLOCK:
660 populate_block(id, sub->block_start, sub->block_length, *ptr);
661 break;
662 case OracularSubsettedSlabCacheSelectionType::INDEX:
663 populate_index(id, sub->indices, *ptr);
664 break;
665 }
666 }
667 }
668 );
669
670 } else {
671 return cache.next(
672 /* identify = */ [&](Index_ i) -> std::pair<Index_, Index_> {
673 return std::pair<Index_, Index_>(i / primary_chunkdim, i % primary_chunkdim);
674 },
675 /* create = */ [&]() -> Slab {
676 return factory.create();
677 },
678 /* populate =*/ [&](std::vector<std::pair<Index_, Slab*> >& to_populate) -> void {
679 for (auto& p : to_populate) {
680 populate_block(p.first, 0, get_primary_chunkdim(row, p.first), *(p.second));
681 }
682 }
683 );
684 }
685 }
686
687public:
688 template<class Cache_, class Factory_>
689 std::pair<const Slab*, Index_> fetch_oracular(
690 bool row,
691 Index_ block_start,
692 Index_ block_length,
693 ChunkWork& chunk_workspace,
694 Cache_& cache,
695 Factory_& factory)
696 const {
697 return fetch_oracular_core(
698 row,
699 cache,
700 factory,
701 [&](Index_ pid, Index_ pstart, Index_ plen, Slab& slab) -> void {
702 fetch_block(row, pid, pstart, plen, block_start, block_length, slab, chunk_workspace);
703 },
704 [&](Index_ pid, const std::vector<Index_>& pindices, Slab& slab) -> void {
705 fetch_index(row, pid, pindices, block_start, block_length, slab, chunk_workspace);
706 }
707 );
708 }
709
710 template<class Cache_, class Factory_>
711 std::pair<const Slab*, Index_> fetch_oracular(
712 bool row,
713 const std::vector<Index_>& indices,
714 std::vector<Index_>& chunk_indices_buffer,
715 ChunkWork& chunk_workspace,
716 Cache_& cache,
717 Factory_& factory)
718 const {
719 return fetch_oracular_core(
720 row,
721 cache,
722 factory,
723 [&](Index_ pid, Index_ pstart, Index_ plen, Slab& slab) -> void {
724 fetch_block(row, pid, pstart, plen, indices, chunk_indices_buffer, slab, chunk_workspace);
725 },
726 [&](Index_ pid, const std::vector<Index_>& pindices, Slab& slab) -> void {
727 fetch_index(row, pid, pindices, indices, chunk_indices_buffer, slab, chunk_workspace);
728 }
729 );
730 }
731};
732
733}
734
735}
736
737#endif
Factory for dense slabs.
Create a oracle-aware cache for slabs.
Create a oracle-aware cache with subsets.
Factory for sparse slabs.
Methods to handle chunked tatami matrices.
Definition ChunkDimensionStats.hpp:4
Index_ get_chunk_length(const ChunkDimensionStats< Index_ > &stats, Index_ i)
Definition ChunkDimensionStats.hpp:85