tatami_tiledb
tatami bindings for TileDB-backed matrices
Loading...
Searching...
No Matches
SparseMatrix.hpp
Go to the documentation of this file.
1#ifndef TATAMI_TILEDB_SPARSE_MATRIX_HPP
2#define TATAMI_TILEDB_SPARSE_MATRIX_HPP
3
4
5#include "serialize.hpp"
6
7#include <string>
8#include <memory>
9#include <vector>
10#include <stdexcept>
11#include <type_traits>
12#include <cstddef>
13
14#include <tiledb/tiledb>
15#include "tatami_chunked/tatami_chunked.hpp"
16#include "sanisizer/sanisizer.hpp"
17
23namespace tatami_tiledb {
24
40 std::size_t maximum_cache_size = sanisizer::cap<std::size_t>(100000000);
41
48};
49
53namespace SparseMatrix_internal {
54
55typedef ::tatami_tiledb::internal::Components Components;
56typedef ::tatami_tiledb::internal::VariablyTypedDimension Dimension;
57typedef ::tatami_tiledb::internal::VariablyTypedVector CacheBuffer;
58
59struct Workspace {
60 CacheBuffer values;
61 CacheBuffer target_indices;
62 CacheBuffer non_target_indices;
63};
64
65inline std::size_t execute_query(
66 const Components& tdb_comp,
67 tiledb::Subarray& subarray,
68 const std::string& attribute,
69 bool row,
70 const std::string& target_dimname,
71 const std::string& non_target_dimname,
72 Workspace& work,
73 std::size_t general_work_offset,
74 std::size_t target_index_work_offset,
75 std::size_t work_length,
76 bool needs_value,
77 bool needs_index)
78{
79 tiledb::Query query(tdb_comp.ctx, tdb_comp.array);
80 query.set_subarray(subarray);
81 query.set_layout(row ? TILEDB_ROW_MAJOR : TILEDB_COL_MAJOR);
82
83 work.target_indices.set_data_buffer(query, target_dimname, target_index_work_offset, work_length);
84 if (needs_value) {
85 work.values.set_data_buffer(query, attribute, general_work_offset, work_length);
86 }
87 if (needs_index) {
88 work.non_target_indices.set_data_buffer(query, non_target_dimname, general_work_offset, work_length);
89 }
90
91 if (query.submit() != tiledb::Query::Status::COMPLETE) {
92 throw std::runtime_error("failed to read sparse data from TileDB");
93 }
94
95 return query.result_buffer_elements()[target_dimname].second;
96}
97
98/********************
99 *** Core classes ***
100 ********************/
101
102template<typename Index_>
103struct MyopicCacheParameters {
104 Index_ chunk_length;
105 std::size_t slab_size_in_elements;
106 Index_ max_slabs_in_cache;
107};
108
109template<typename Index_>
110class MyopicCore {
111public:
112 MyopicCore(
113 const Components& tdb_comp,
114 const std::string& attribute,
115 bool row,
116 Index_ target_dim_extent,
117 const std::string& target_dimname,
118 const Dimension& tdb_target_dim,
119 const std::string& non_target_dimname,
120 const Dimension& tdb_non_target_dim,
121 tiledb_datatype_t tdb_type,
122 [[maybe_unused]] Index_ non_target_length, // provided for consistency with the other constructors.
123 [[maybe_unused]] tatami::MaybeOracle<false, Index_> oracle,
124 const MyopicCacheParameters<Index_>& cache_stats,
125 bool needs_value,
126 bool needs_index
127 ) :
128 my_tdb_comp(tdb_comp),
129 my_attribute(attribute),
130 my_row(row),
131 my_target_dim_extent(target_dim_extent),
132 my_tdb_target_dim(tdb_target_dim),
133 my_target_dimname(target_dimname),
134 my_tdb_non_target_dim(tdb_non_target_dim),
135 my_non_target_dimname(non_target_dimname),
136 my_target_chunk_length(cache_stats.chunk_length),
137 my_slab_size(cache_stats.slab_size_in_elements),
138 my_needs_value(needs_value),
139 my_needs_index(needs_index),
140 my_cache(cache_stats.max_slabs_in_cache)
141 {
142 // Only storing one slab at a time for the target indices.
143 my_work.target_indices.reset(my_tdb_target_dim.type(), my_slab_size);
144
145 auto total_cache_size = sanisizer::product<std::size_t>(my_slab_size, cache_stats.max_slabs_in_cache);
146 if (my_needs_value) {
147 my_work.values.reset(tdb_type, total_cache_size);
148 }
149 if (my_needs_index) {
150 my_work.non_target_indices.reset(my_tdb_non_target_dim.type(), total_cache_size);
151 }
152
153 // Check that the indptrs will not overflow on resize() in the populate function of fetch_raw().
154 sanisizer::sum<typename decltype(Slab::indptrs)::size_type>(my_target_chunk_length, 1);
155 }
156
157private:
158 const Components& my_tdb_comp;
159 const std::string& my_attribute;
160
161 bool my_row;
162 Index_ my_target_dim_extent;
163 const Dimension& my_tdb_target_dim;
164 const std::string& my_target_dimname;
165 const Dimension& my_tdb_non_target_dim;
166 const std::string& my_non_target_dimname;
167
168 Index_ my_target_chunk_length;
169 std::size_t my_slab_size;
170 bool my_needs_value;
171 bool my_needs_index;
172 Workspace my_work;
173 std::vector<std::pair<Index_, Index_> > my_counts;
174
175 struct Slab {
176 std::size_t offset;
177 std::vector<std::size_t> indptrs;
178 };
179 std::size_t my_offset = 0;
180 tatami_chunked::LruSlabCache<Index_, Slab> my_cache;
181
182private:
183 template<class Configure_>
184 std::pair<std::size_t, std::size_t> fetch_raw(Index_ i, Configure_ configure) {
185 Index_ chunk = i / my_target_chunk_length;
186 Index_ index = i % my_target_chunk_length;
187
188 const auto& info = my_cache.find(
189 chunk,
190 /* create = */ [&]() -> Slab {
191 Slab output;
192 output.offset = my_offset;
193 my_offset += my_slab_size;
194 return output;
195 },
196 /* populate = */ [&](Index_ id, Slab& contents) -> void {
197 Index_ chunk_start = id * my_target_chunk_length;
198 Index_ chunk_length = std::min(my_target_dim_extent - chunk_start, my_target_chunk_length);
199
200 std::size_t num_nonzero = 0;
201 serialize([&]() -> void {
202 tiledb::Subarray subarray(my_tdb_comp.ctx, my_tdb_comp.array);
203 int rowdex = my_row;
204 my_tdb_target_dim.add_range(subarray, 1 - rowdex, chunk_start, chunk_length);
205 configure(subarray, rowdex);
206 num_nonzero = execute_query(
207 my_tdb_comp,
208 subarray,
209 my_attribute,
210 my_row,
211 my_target_dimname,
212 my_non_target_dimname,
213 my_work,
214 contents.offset,
215 0,
216 my_slab_size,
217 my_needs_value,
218 my_needs_index
219 );
220 });
221
222 auto& indptrs = contents.indptrs;
223 indptrs.clear();
224 indptrs.resize(static_cast<decltype(indptrs.size())>(chunk_length) + 1); // cast is safe, we already know it won't overflow (see constructor).
225
226 if (num_nonzero) {
227 my_work.target_indices.compact(0, num_nonzero, my_tdb_target_dim, my_counts);
228 for (const auto& cnts : my_counts) {
229 indptrs[cnts.first - chunk_start + 1] = cnts.second;
230 }
231 for (Index_ i = 1; i <= chunk_length; ++i) {
232 indptrs[i] += indptrs[i - 1];
233 }
234 }
235 }
236 );
237
238 auto start = info.indptrs[index];
239 return std::make_pair(info.offset + start, info.indptrs[index + 1] - start);
240 }
241
242public:
243 std::pair<std::size_t, std::size_t> fetch_block(Index_ i, Index_ block_start, Index_ block_length) {
244 return fetch_raw(
245 i,
246 [&](tiledb::Subarray& subarray, int rowdex) -> void {
247 my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, block_length);
248 }
249 );
250 }
251
252 std::pair<std::size_t, std::size_t> fetch_indices(Index_ i, const std::vector<Index_>& indices) {
253 return fetch_raw(
254 i,
255 [&](tiledb::Subarray& subarray, int rowdex) -> void {
257 indices.data(),
258 indices.size(),
259 [&](Index_ s, Index_ l) -> void {
260 my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
261 }
262 );
263 }
264 );
265 }
266
267public:
268 const Workspace& get_workspace() const {
269 return my_work;
270 }
271
272 bool get_needs_value() const {
273 return my_needs_value;
274 }
275
276 bool get_needs_index() const {
277 return my_needs_index;
278 }
279
280 const Dimension& get_tdb_non_target_dim() const {
281 return my_tdb_non_target_dim;
282 }
283};
284
285// The general idea with the oracular extractors is to either:
286//
287// - Extract each target dimension element directly, if the cell order within each tile corresponds to the desired target dimension (i.e., 'row').
288// - Extract the tile-wise chunk of target dimension elements, if the cell order within each tile is not the same as the target dimension.
289//
290// This means that we need to vary the chunk length of each slab from 1 or the tile extent, depending on the cell order of the TileDB array.
291// In addition, we use a variable slab cache that adjusts to the number of non-zero elements in each slab.
292
293template<typename Index_>
294struct OracularCacheParameters {
295 Index_ chunk_length;
296 std::size_t max_cache_size_in_elements;
297};
298
299template<typename Index_>
300class OracularCore {
301public:
302 OracularCore(
303 const Components& tdb_comp,
304 const std::string& attribute,
305 bool row,
306 Index_ target_dim_extent,
307 const std::string& target_dimname,
308 const Dimension& tdb_target_dim,
309 const std::string& non_target_dimname,
310 const Dimension& tdb_non_target_dim,
311 tiledb_datatype_t tdb_type,
312 Index_ non_target_length,
314 const OracularCacheParameters<Index_>& cache_stats,
315 bool needs_value,
316 bool needs_index) :
317 my_tdb_comp(tdb_comp),
318 my_attribute(attribute),
319 my_row(row),
320 my_target_dim_extent(target_dim_extent),
321 my_tdb_target_dim(tdb_target_dim),
322 my_target_dimname(target_dimname),
323 my_tdb_non_target_dim(tdb_non_target_dim),
324 my_non_target_dimname(non_target_dimname),
325 my_target_chunk_length(cache_stats.chunk_length),
326 my_max_slab_size(sanisizer::product<std::size_t>(non_target_length, my_target_chunk_length)),
327 my_needs_value(needs_value),
328 my_needs_index(needs_index),
329 my_cache(std::move(oracle), cache_stats.max_cache_size_in_elements)
330 {
331 my_work.target_indices.reset(my_tdb_target_dim.type(), cache_stats.max_cache_size_in_elements);
332 if (my_needs_value) {
333 my_work.values.reset(tdb_type, cache_stats.max_cache_size_in_elements);
334 }
335 if (my_needs_index) {
336 my_work.non_target_indices.reset(my_tdb_non_target_dim.type(), cache_stats.max_cache_size_in_elements);
337 }
338
339 // Check that the indptrs will not overflow on resize() in the populate function of fetch_raw().
340 sanisizer::sum<typename decltype(Slab::indptrs)::size_type>(my_target_chunk_length, 1);
341 }
342
343private:
344 const Components& my_tdb_comp;
345 const std::string& my_attribute;
346
347 bool my_row;
348 Index_ my_target_dim_extent;
349 const Dimension& my_tdb_target_dim;
350 const std::string& my_target_dimname;
351 const Dimension& my_tdb_non_target_dim;
352 const std::string& my_non_target_dimname;
353
354 Index_ my_target_chunk_length;
355 std::size_t my_max_slab_size;
356 bool my_needs_value;
357 bool my_needs_index;
358 Workspace my_work;
359 std::vector<std::pair<Index_, Index_> > my_counts;
360
361 struct Slab {
362 std::size_t offset;
363 std::vector<std::size_t> indptrs;
364 };
365 tatami_chunked::OracularVariableSlabCache<Index_, Index_, Slab, std::size_t> my_cache;
366
367private:
368 template<class Function_>
369 static void sort_by_field(std::vector<std::pair<Index_, std::size_t> >& indices, Function_ field) {
370 auto comp = [&field](const std::pair<Index_, std::size_t>& l, const std::pair<Index_, std::size_t>& r) -> bool {
371 return field(l) < field(r);
372 };
373 if (!std::is_sorted(indices.begin(), indices.end(), comp)) {
374 std::sort(indices.begin(), indices.end(), comp);
375 }
376 }
377
378 template<class Configure_>
379 std::pair<std::size_t, std::size_t> fetch_raw([[maybe_unused]] Index_ i, Configure_ configure) {
380 auto info = my_cache.next(
381 /* identify = */ [&](Index_ current) -> std::pair<Index_, Index_> {
382 return std::pair<Index_, Index_>(current / my_target_chunk_length, current % my_target_chunk_length);
383 },
384 /* upper_size = */ [&](Index_) -> std::size_t {
385 return my_max_slab_size;
386 },
387 /* actual_size = */ [&](Index_, const Slab& slab) -> std::size_t {
388 return slab.indptrs.back();
389 },
390 /* create = */ [&]() -> Slab {
391 return Slab();
392 },
393 /* populate = */ [&](std::vector<std::pair<Index_, std::size_t> >& to_populate, std::vector<std::pair<Index_, std::size_t> >& to_reuse, std::vector<Slab>& all_slabs) -> void {
394 // Defragmenting the existing chunks. We sort by offset to make
395 // sure that we're not clobbering in-use slabs during the copy().
396 sort_by_field(to_reuse, [&](const std::pair<Index_, std::size_t>& x) -> std::size_t { return all_slabs[x.second].offset; });
397 std::size_t running_offset = 0;
398 for (auto& x : to_reuse) {
399 auto& reused_slab = all_slabs[x.second];
400 auto& cur_offset = reused_slab.offset;
401 auto num_nonzero = reused_slab.indptrs.back();
402 if (cur_offset != running_offset) {
403 if (my_needs_value) {
404 my_work.values.shift(cur_offset, num_nonzero, running_offset);
405 }
406 if (my_needs_index) {
407 my_work.non_target_indices.shift(cur_offset, num_nonzero, running_offset);
408 }
409 cur_offset = running_offset;
410 }
411 running_offset += num_nonzero;
412 }
413
414 // Collapsing runs of consecutive ranges into a single range;
415 // otherwise, making union of ranges. This allows a single TileDb call
416 // to populate the contiguous memory pool that we made available after
417 // defragmentation; then we just update the slab pointers to refer
418 // to the slices of memory corresponding to each slab.
419 sort_by_field(to_populate, [](const std::pair<Index_, std::size_t>& x) -> Index_ { return x.first; });
420
421 std::size_t num_nonzero = 0;
422 serialize([&]() -> void {
423 tiledb::Subarray subarray(my_tdb_comp.ctx, my_tdb_comp.array);
424 int rowdex = my_row;
425 configure(subarray, rowdex);
426
427 Index_ run_chunk_id = to_populate.front().first;
428 Index_ run_chunk_start = run_chunk_id * my_target_chunk_length;
429 Index_ run_length = std::min(my_target_dim_extent - run_chunk_start, my_target_chunk_length);
430
431 int dimdex = 1 - rowdex;
432 for (decltype(to_populate.size()) ci = 1, cend = to_populate.size(); ci < cend; ++ci) {
433 Index_ current_chunk_id = to_populate[ci].first;
434 Index_ current_chunk_start = current_chunk_id * my_target_chunk_length;
435
436 if (current_chunk_id - run_chunk_id > 1) { // save the existing run of to_populate as one range, and start a new run.
437 my_tdb_target_dim.add_range(subarray, dimdex, run_chunk_start, run_length);
438 run_chunk_id = current_chunk_id;
439 run_chunk_start = current_chunk_start;
440 run_length = 0;
441 }
442
443 run_length += std::min(my_target_dim_extent - current_chunk_start, my_target_chunk_length);
444 }
445
446 my_tdb_target_dim.add_range(subarray, dimdex, run_chunk_start, run_length);
447 num_nonzero = execute_query(
448 my_tdb_comp,
449 subarray,
450 my_attribute,
451 my_row,
452 my_target_dimname,
453 my_non_target_dimname,
454 my_work,
455 running_offset,
456 running_offset,
457 sanisizer::product_unsafe<std::size_t>(to_populate.size(), my_max_slab_size),
458 my_needs_value,
459 my_needs_index
460 );
461 });
462
463 my_work.target_indices.compact(running_offset, num_nonzero, my_tdb_target_dim, my_counts);
464
465 auto cIt = my_counts.begin(), cEnd = my_counts.end();
466 for (auto& si : to_populate) {
467 auto& populate_slab = all_slabs[si.second];
468 populate_slab.offset = running_offset;
469
470 Index_ chunk_start = si.first * my_target_chunk_length;
471 Index_ chunk_length = std::min(my_target_dim_extent - chunk_start, my_target_chunk_length);
472 Index_ chunk_end = chunk_start + chunk_length;
473
474 auto& slab_indptrs = populate_slab.indptrs;
475 slab_indptrs.clear();
476 slab_indptrs.resize(static_cast<decltype(slab_indptrs.size())>(chunk_length) + 1); // cast is safe, we already know it won't overflow.
477
478 while (cIt != cEnd && cIt->first < chunk_end) {
479 slab_indptrs[cIt->first - chunk_start + 1] = cIt->second;
480 ++cIt;
481 }
482
483 for (Index_ i = 1; i <= chunk_length; ++i) {
484 slab_indptrs[i] += slab_indptrs[i - 1];
485 }
486 running_offset += slab_indptrs.back();
487 }
488 }
489 );
490
491 const auto& indptrs = info.first->indptrs;
492 auto start = indptrs[info.second];
493 return std::make_pair(info.first->offset + start, indptrs[info.second + 1] - start);
494 }
495
496public:
497 std::pair<std::size_t, std::size_t> fetch_block(Index_ i, Index_ block_start, Index_ block_length) {
498 return fetch_raw(
499 i,
500 [&](tiledb::Subarray& subarray, int rowdex) -> void {
501 my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, block_length);
502 }
503 );
504 }
505
506 std::pair<std::size_t, std::size_t> fetch_indices(Index_ i, const std::vector<Index_>& indices) {
507 return fetch_raw(
508 i,
509 [&](tiledb::Subarray& subarray, int rowdex) -> void {
511 indices.data(),
512 indices.size(),
513 [&](Index_ s, Index_ l) -> void {
514 my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
515 }
516 );
517 }
518 );
519 }
520
521public:
522 const Workspace& get_workspace() const {
523 return my_work;
524 }
525
526 bool get_needs_value() const {
527 return my_needs_value;
528 }
529
530 bool get_needs_index() const {
531 return my_needs_index;
532 }
533
534 const Dimension& get_tdb_non_target_dim() const {
535 return my_tdb_non_target_dim;
536 }
537};
538
539template<bool oracle_, typename Index_>
540using SparseCore = typename std::conditional<oracle_, OracularCore<Index_>, MyopicCore<Index_> >::type;
541
542template<bool oracle_, typename Index_>
543using CacheParameters = typename std::conditional<oracle_, OracularCacheParameters<Index_>, MyopicCacheParameters<Index_> >::type;
544
545/*************************
546 *** Sparse subclasses ***
547 *************************/
548
549template<typename Value_, typename Index_>
551 const Workspace& work,
552 std::size_t work_start,
553 std::size_t work_length,
554 const Dimension& non_target_dim,
555 Value_* vbuffer,
556 Index_* ibuffer,
557 bool needs_value,
558 bool needs_index)
559{
561 output.number = work_length;
562 if (needs_value) {
563 work.values.copy(work_start, work_length, vbuffer);
564 output.value = vbuffer;
565 }
566 if (needs_index) {
567 work.non_target_indices.copy(work_start, work_length, non_target_dim, ibuffer);
568 output.index = ibuffer;
569 }
570 return output;
571}
572
573template<bool oracle_, typename Value_, typename Index_>
574class SparseFull final : public tatami::SparseExtractor<oracle_, Value_, Index_> {
575public:
576 SparseFull(
577 const Components& tdb_comp,
578 const std::string& attribute,
579 bool row,
580 Index_ target_dim_extent,
581 const std::string& target_dimname,
582 const Dimension& tdb_target_dim,
583 const std::string& non_target_dimname,
584 const Dimension& tdb_non_target_dim,
585 tiledb_datatype_t tdb_type,
587 Index_ non_target_dim,
588 const CacheParameters<oracle_, Index_>& cache_parameters,
589 bool needs_value,
590 bool needs_index) :
591 my_core(
592 tdb_comp,
593 attribute,
594 row,
595 target_dim_extent,
596 target_dimname,
597 tdb_target_dim,
598 non_target_dimname,
599 tdb_non_target_dim,
600 tdb_type,
601 non_target_dim,
602 std::move(oracle),
603 cache_parameters,
604 needs_value,
605 needs_index
606 ),
607 my_non_target_dim(non_target_dim)
608 {}
609
610 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
611 auto info = my_core.fetch_block(i, 0, my_non_target_dim);
612 return fill_sparse_range(my_core.get_workspace(), info.first, info.second, my_core.get_tdb_non_target_dim(), vbuffer, ibuffer, my_core.get_needs_value(), my_core.get_needs_index());
613 }
614
615private:
616 SparseCore<oracle_, Index_> my_core;
617 Index_ my_non_target_dim;
618};
619
620template<bool oracle_, typename Value_, typename Index_>
621class SparseBlock final : public tatami::SparseExtractor<oracle_, Value_, Index_> {
622public:
623 SparseBlock(
624 const Components& tdb_comp,
625 const std::string& attribute,
626 bool row,
627 Index_ target_dim_extent,
628 const std::string& target_dimname,
629 const Dimension& tdb_target_dim,
630 const std::string& non_target_dimname,
631 const Dimension& tdb_non_target_dim,
632 tiledb_datatype_t tdb_type,
634 Index_ block_start,
635 Index_ block_length,
636 const CacheParameters<oracle_, Index_>& cache_parameters,
637 bool needs_value,
638 bool needs_index) :
639 my_core(
640 tdb_comp,
641 attribute,
642 row,
643 target_dim_extent,
644 target_dimname,
645 tdb_target_dim,
646 non_target_dimname,
647 tdb_non_target_dim,
648 tdb_type,
649 block_length,
650 std::move(oracle),
651 cache_parameters,
652 needs_value,
653 needs_index
654 ),
655 my_block_start(block_start),
656 my_block_length(block_length)
657 {}
658
659 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
660 auto info = my_core.fetch_block(i, my_block_start, my_block_length);
661 return fill_sparse_range(my_core.get_workspace(), info.first, info.second, my_core.get_tdb_non_target_dim(), vbuffer, ibuffer, my_core.get_needs_value(), my_core.get_needs_index());
662 }
663
664private:
665 SparseCore<oracle_, Index_> my_core;
666 Index_ my_block_start, my_block_length;
667};
668
669template<bool oracle_, typename Value_, typename Index_>
670class SparseIndex final : public tatami::SparseExtractor<oracle_, Value_, Index_> {
671public:
672 SparseIndex(
673 const Components& tdb_comp,
674 const std::string& attribute,
675 bool row,
676 Index_ target_dim_extent,
677 const std::string& target_dimname,
678 const Dimension& tdb_target_dim,
679 const std::string& non_target_dimname,
680 const Dimension& tdb_non_target_dim,
681 tiledb_datatype_t tdb_type,
683 tatami::VectorPtr<Index_> indices_ptr,
684 const CacheParameters<oracle_, Index_>& cache_parameters,
685 bool needs_value,
686 bool needs_index) :
687 my_core(
688 tdb_comp,
689 attribute,
690 row,
691 target_dim_extent,
692 target_dimname,
693 tdb_target_dim,
694 non_target_dimname,
695 tdb_non_target_dim,
696 tdb_type,
697 indices_ptr->size(),
698 std::move(oracle),
699 cache_parameters,
700 needs_value,
701 needs_index
702 ),
703 my_indices_ptr(std::move(indices_ptr))
704 {}
705
706 tatami::SparseRange<Value_, Index_> fetch(Index_ i, Value_* vbuffer, Index_* ibuffer) {
707 auto info = my_core.fetch_indices(i, *my_indices_ptr);
708 return fill_sparse_range(my_core.get_workspace(), info.first, info.second, my_core.get_tdb_non_target_dim(), vbuffer, ibuffer, my_core.get_needs_value(), my_core.get_needs_index());
709 }
710
711private:
712 SparseCore<oracle_, Index_> my_core;
713 tatami::VectorPtr<Index_> my_indices_ptr;
714};
715
716/************************
717 *** Dense subclasses ***
718 ************************/
719
720template<bool oracle_, typename Value_, typename Index_>
721class DenseFull final : public tatami::DenseExtractor<oracle_, Value_, Index_> {
722public:
723 DenseFull(
724 const Components& tdb_comp,
725 const std::string& attribute,
726 bool row,
727 Index_ target_dim_extent,
728 const std::string& target_dimname,
729 const Dimension& tdb_target_dim,
730 const std::string& non_target_dimname,
731 const Dimension& tdb_non_target_dim,
732 tiledb_datatype_t tdb_type,
734 Index_ non_target_dim_extent,
735 const CacheParameters<oracle_, Index_>& cache_parameters,
736 [[maybe_unused]] bool needs_value, // for consistency with Sparse* constructors.
737 [[maybe_unused]] bool needs_index) :
738 my_core(
739 tdb_comp,
740 attribute,
741 row,
742 target_dim_extent,
743 target_dimname,
744 tdb_target_dim,
745 non_target_dimname,
746 tdb_non_target_dim,
747 tdb_type,
748 non_target_dim_extent,
749 std::move(oracle),
750 cache_parameters,
751 /* needs_value = */ true,
752 /* needs_index = */ true
753 ),
754 my_non_target_dim_extent(non_target_dim_extent),
755 my_holding_value(my_non_target_dim_extent),
756 my_holding_index(my_non_target_dim_extent)
757 {}
758
759 const Value_* fetch(Index_ i, Value_* buffer) {
760 auto info = my_core.fetch_block(i, 0, my_non_target_dim_extent);
761 const auto& work = my_core.get_workspace();
762 work.values.copy(info.first, info.second, my_holding_value.data());
763 work.non_target_indices.copy(info.first, info.second, my_core.get_tdb_non_target_dim(), my_holding_index.data());
764 std::fill_n(buffer, my_non_target_dim_extent, 0);
765 for (decltype(info.second) i = 0; i < info.second; ++i) {
766 buffer[my_holding_index[i]] = my_holding_value[i];
767 }
768 return buffer;
769 }
770
771private:
772 SparseCore<oracle_, Index_> my_core;
773 Index_ my_non_target_dim_extent;
774 std::vector<Value_> my_holding_value;
775 std::vector<Index_> my_holding_index;
776};
777
778template<bool oracle_, typename Value_, typename Index_>
779class DenseBlock final : public tatami::DenseExtractor<oracle_, Value_, Index_> {
780public:
781 DenseBlock(
782 const Components& tdb_comp,
783 const std::string& attribute,
784 bool row,
785 Index_ target_dim_extent,
786 const std::string& target_dimname,
787 const Dimension& tdb_target_dim,
788 const std::string& non_target_dimname,
789 const Dimension& tdb_non_target_dim,
790 tiledb_datatype_t tdb_type,
792 Index_ block_start,
793 Index_ block_length,
794 const CacheParameters<oracle_, Index_>& cache_parameters,
795 [[maybe_unused]] bool needs_value, // for consistency with Sparse* constructors.
796 [[maybe_unused]] bool needs_index) :
797 my_core(
798 tdb_comp,
799 attribute,
800 row,
801 target_dim_extent,
802 target_dimname,
803 tdb_target_dim,
804 non_target_dimname,
805 tdb_non_target_dim,
806 tdb_type,
807 block_length,
808 std::move(oracle),
809 cache_parameters,
810 /* needs_value = */ true,
811 /* needs_index = */ true
812 ),
813 my_block_start(block_start),
814 my_block_length(block_length),
815 my_holding_value(block_length),
816 my_holding_index(block_length)
817 {}
818
819 const Value_* fetch(Index_ i, Value_* buffer) {
820 auto info = my_core.fetch_block(i, my_block_start, my_block_length);
821 const auto& work = my_core.get_workspace();
822 work.values.copy(info.first, info.second, my_holding_value.data());
823 work.non_target_indices.copy(info.first, info.second, my_core.get_tdb_non_target_dim(), my_holding_index.data());
824 std::fill_n(buffer, my_block_length, 0);
825 for (decltype(info.second) i = 0; i < info.second; ++i) {
826 buffer[my_holding_index[i] - my_block_start] = my_holding_value[i];
827 }
828 return buffer;
829 }
830
831private:
832 SparseCore<oracle_, Index_> my_core;
833 Index_ my_block_start, my_block_length;
834 std::vector<Value_> my_holding_value;
835 std::vector<Index_> my_holding_index;
836};
837
838template<bool oracle_, typename Value_, typename Index_>
839class DenseIndex final : public tatami::DenseExtractor<oracle_, Value_, Index_> {
840public:
841 DenseIndex(
842 const Components& tdb_comp,
843 const std::string& attribute,
844 bool row,
845 Index_ target_dim_extent,
846 const std::string& target_dimname,
847 const Dimension& tdb_target_dim,
848 const std::string& non_target_dimname,
849 const Dimension& tdb_non_target_dim,
850 tiledb_datatype_t tdb_type,
852 tatami::VectorPtr<Index_> indices_ptr,
853 const CacheParameters<oracle_, Index_>& cache_parameters,
854 [[maybe_unused]] bool needs_value, // for consistency with Sparse* constructors.
855 [[maybe_unused]] bool needs_index) :
856 my_core(
857 tdb_comp,
858 attribute,
859 row,
860 target_dim_extent,
861 target_dimname,
862 tdb_target_dim,
863 non_target_dimname,
864 tdb_non_target_dim,
865 tdb_type,
866 indices_ptr->size(),
867 std::move(oracle),
868 cache_parameters,
869 /* needs_value = */ true,
870 /* needs_index = */ true
871 ),
872 my_indices_ptr(std::move(indices_ptr)),
873 my_holding_value(my_indices_ptr->size()),
874 my_holding_index(my_indices_ptr->size())
875 {
876 const auto& indices = *my_indices_ptr;
877 if (!indices.empty()) {
878 auto idx_start = indices.front();
879 tatami::resize_container_to_Index_size(my_remapping, indices.back() - idx_start + 1);
880 for (decltype(indices.size()) j = 0, end = indices.size(); j < end; ++j) {
881 my_remapping[indices[j] - idx_start] = j;
882 }
883 }
884 }
885
886 const Value_* fetch(Index_ i, Value_* buffer) {
887 const auto& indices = *my_indices_ptr;
888
889 if (!indices.empty()) {
890 auto info = my_core.fetch_indices(i, indices);
891 const auto& work = my_core.get_workspace();
892 work.values.copy(info.first, info.second, my_holding_value.data());
893 work.non_target_indices.copy(info.first, info.second, my_core.get_tdb_non_target_dim(), my_holding_index.data());
894 auto idx_start = indices.front();
895 std::fill_n(buffer, indices.size(), 0);
896 for (decltype(info.second) i = 0; i < info.second; ++i) {
897 buffer[my_remapping[my_holding_index[i] - idx_start]] = my_holding_value[i];
898 }
899 }
900
901 return buffer;
902 }
903
904private:
905 SparseCore<oracle_, Index_> my_core;
906 tatami::VectorPtr<Index_> my_indices_ptr;
907 std::vector<Index_> my_remapping;
908 std::vector<Value_> my_holding_value;
909 std::vector<Index_> my_holding_index;
910};
911
912}
931template<typename Value_, typename Index_>
932class SparseMatrix final : public tatami::Matrix<Value_, Index_> {
933public:
940 SparseMatrix(const std::string& uri, std::string attribute, tiledb::Context ctx, const SparseMatrixOptions& options) : my_attribute(std::move(attribute)) {
941 initialize(uri, std::move(ctx), options);
942 }
943
949 SparseMatrix(const std::string& uri, std::string attribute, const SparseMatrixOptions& options) : my_attribute(std::move(attribute)) {
950 initialize(uri, false, options);
951 }
952
957 SparseMatrix(const std::string& uri, std::string attribute) : SparseMatrix(uri, std::move(attribute), SparseMatrixOptions()) {}
958
959private:
960 template<class PossibleContext_>
961 void initialize(const std::string& uri, PossibleContext_ ctx, const SparseMatrixOptions& options) {
962 serialize([&]() -> void {
963 my_tdb_comp.reset(
964 [&]{
965 // If we have to create our own Context_ object, we do so inside the serialized
966 // section, rather than using a delegating constructor.
967 if constexpr(std::is_same<PossibleContext_, tiledb::Context>::value) {
968 return new SparseMatrix_internal::Components(std::move(ctx), uri);
969 } else {
970 return new SparseMatrix_internal::Components(uri);
971 }
972 }(),
973 [](SparseMatrix_internal::Components* ptr) {
974 // Serializing the deleter, for completeness's sake.
975 serialize([&]() -> void {
976 delete ptr;
977 });
978 }
979 );
980
981 auto schema = my_tdb_comp->array.schema();
982 if (schema.array_type() != TILEDB_SPARSE) {
983 throw std::runtime_error("TileDB array should be sparse");
984 }
985 my_cell_order = schema.cell_order();
986
987 my_cache_size_in_bytes = options.maximum_cache_size;
988 my_require_minimum_cache = options.require_minimum_cache;
989
990 if (!schema.has_attribute(my_attribute)) {
991 throw std::runtime_error("no attribute '" + my_attribute + "' is present in the TileDB array");
992 }
993 auto attr = schema.attribute(my_attribute);
994 my_tdb_type = attr.type();
995
996 tiledb::Domain domain = schema.domain();
997 if (domain.ndim() != 2) {
998 throw std::runtime_error("TileDB array should have exactly two dimensions");
999 }
1000
1001 tiledb::Dimension first_dim = domain.dimension(0);
1002 my_first_dimname = first_dim.name();
1003 my_tdb_first_dim.reset(first_dim);
1004 Index_ first_extent = my_tdb_first_dim.extent<Index_>();
1005 Index_ first_tile = my_tdb_first_dim.tile<Index_>();
1006 my_firstdim_stats = tatami_chunked::ChunkDimensionStats<Index_>(first_extent, first_tile);
1007
1008 tiledb::Dimension second_dim = domain.dimension(1);
1009 my_second_dimname = second_dim.name();
1010 my_tdb_second_dim.reset(second_dim);
1011 Index_ second_extent = my_tdb_second_dim.extent<Index_>();
1012 Index_ second_tile = my_tdb_second_dim.tile<Index_>();
1013 my_seconddim_stats = tatami_chunked::ChunkDimensionStats<Index_>(second_extent, second_tile);
1014
1015 // Favoring extraction on the dimension that involves pulling out fewer chunks per dimension element.
1016 auto tiles_per_firstdim = (second_extent / second_tile) + (second_extent % second_tile > 0);
1017 auto tiles_per_seconddim = (first_extent / first_tile) + (first_extent % first_tile > 0);
1018 my_prefer_firstdim = tiles_per_firstdim <= tiles_per_seconddim;
1019 });
1020 }
1021
1022private:
1023 std::shared_ptr<SparseMatrix_internal::Components> my_tdb_comp;
1024 tiledb_layout_t my_cell_order;
1025 tiledb_datatype_t my_tdb_type;
1026
1027 std::string my_attribute;
1028 std::size_t my_cache_size_in_bytes;
1029 bool my_require_minimum_cache;
1030
1031 std::string my_first_dimname, my_second_dimname;
1032 SparseMatrix_internal::Dimension my_tdb_first_dim, my_tdb_second_dim;
1033 tatami_chunked::ChunkDimensionStats<Index_> my_firstdim_stats, my_seconddim_stats;
1034
1035 bool my_prefer_firstdim;
1036
1037private:
1038 Index_ nrow_internal() const {
1039 return my_firstdim_stats.dimension_extent;
1040 }
1041
1042 Index_ ncol_internal() const {
1043 return my_seconddim_stats.dimension_extent;
1044 }
1045
1046public:
1047 Index_ nrow() const {
1048 return nrow_internal();
1049 }
1050
1051 Index_ ncol() const {
1052 return ncol_internal();
1053 }
1054
1055 bool is_sparse() const {
1056 return true;
1057 }
1058
1059 double is_sparse_proportion() const {
1060 return 1;
1061 }
1062
1063 bool prefer_rows() const {
1064 return my_prefer_firstdim;
1065 }
1066
1067 double prefer_rows_proportion() const {
1068 return static_cast<double>(my_prefer_firstdim);
1069 }
1070
1071 bool uses_oracle(bool) const {
1072 // It won't necessarily be used, but if the cache is empty,
1073 // the oracle definitely _won't_ be used.
1074 return my_cache_size_in_bytes > 0;
1075 }
1076
1077private:
1078 template<
1079 bool oracle_,
1080 template<typename, typename> class Interface_,
1081 template<bool, typename, typename> class Extractor_,
1082 typename ... Args_
1083 >
1084 std::unique_ptr<Interface_<Value_, Index_> > populate(
1085 bool row,
1086 Index_ non_target_length,
1088 const tatami::Options& opt,
1089 Args_&& ... args)
1090 const {
1091 const auto& target_dim_stats = (row ? my_firstdim_stats : my_seconddim_stats);
1092 const auto& target_dimname = (row ? my_first_dimname : my_second_dimname);
1093 const auto& non_target_dimname = (row ? my_second_dimname : my_first_dimname);
1094 const auto& tdb_target_dim = (row ? my_tdb_first_dim : my_tdb_second_dim);
1095 const auto& tdb_non_target_dim = (row ? my_tdb_second_dim : my_tdb_first_dim);
1096
1097 std::size_t nonzero_size = 0;
1098 if (opt.sparse_extract_value) {
1099 nonzero_size += ::tatami_tiledb::internal::determine_type_size(my_tdb_type);
1100 }
1101 if (opt.sparse_extract_index) {
1102 nonzero_size += ::tatami_tiledb::internal::determine_type_size(tdb_non_target_dim.type());
1103 }
1104
1105 if constexpr(oracle_) {
1106 // Add the target index size because we always need it for bulk
1107 // reads in the oracular case. This is not needed in the
1108 // myopic case because we only read one slab at a time.
1109 nonzero_size += ::tatami_tiledb::internal::determine_type_size(tdb_target_dim.type());
1110
1111 SparseMatrix_internal::OracularCacheParameters<Index_> cache_params;
1112 cache_params.max_cache_size_in_elements = my_cache_size_in_bytes / nonzero_size;
1113
1114 // If we're asking for rows and the cell order is row-major or
1115 // we want columns and the cell order is column-major, each
1116 // element of the target dimension has its contents stored
1117 // contiguously in TileDB's data tiles and can be easily
1118 // extracted on an individual basis; thus each element is
1119 // considered a separate slab and we set the chunk_length to 1.
1120 //
1121 // Otherwise, it's likely that an element of the target
1122 // dimension will overlap multiple data tiles within each space
1123 // tile, so we might as well extract the entire space tile's
1124 // elements on the target dimension.
1125 cache_params.chunk_length = (row == (my_cell_order == TILEDB_ROW_MAJOR) ? 1 : target_dim_stats.chunk_length);
1126
1127 // Ensure that there's enough space for every dimension element.
1128 // If this can't be guaranteed, we set the cache to only be able to
1129 // hold a single dimension element. This is effectively the same as
1130 // not doing any caching at all, as a hypothetical SoloCore would
1131 // still need to allocate enough memory for a single dimension
1132 // element to create a buffer for the TileDB libary.
1133 auto max_slab_size = sanisizer::product<std::size_t>(non_target_length, cache_params.chunk_length);
1134 if (my_require_minimum_cache) {
1135 cache_params.max_cache_size_in_elements = std::max(cache_params.max_cache_size_in_elements, max_slab_size);
1136 } else if (cache_params.max_cache_size_in_elements < max_slab_size) {
1137 cache_params.max_cache_size_in_elements = non_target_length;
1138 cache_params.chunk_length = 1;
1139 }
1140
1141 return std::make_unique<Extractor_<oracle_, Value_, Index_> >(
1142 *my_tdb_comp,
1143 my_attribute,
1144 row,
1145 target_dim_stats.dimension_extent,
1146 target_dimname,
1147 tdb_target_dim,
1148 non_target_dimname,
1149 tdb_non_target_dim,
1150 my_tdb_type,
1151 std::move(oracle),
1152 std::forward<Args_>(args)...,
1153 cache_params,
1156 );
1157
1158 } else {
1159 tatami_chunked::SlabCacheStats<Index_> raw_params(
1160 target_dim_stats.chunk_length,
1161 non_target_length,
1162 target_dim_stats.num_chunks,
1163 my_cache_size_in_bytes,
1164 nonzero_size,
1165 my_require_minimum_cache
1166 );
1167
1168 // No need to have a dedicated SoloCore for uncached extraction,
1169 // because it would still need to hold a single Workspace. We
1170 // instead reuse the MyopicCore's code with a chunk length of 1 to
1171 // achieve the same memory usage. This has a mild perf hit from the
1172 // LRU but perf already sucks without caching so who cares.
1173 SparseMatrix_internal::MyopicCacheParameters<Index_> cache_params;
1174 if (raw_params.max_slabs_in_cache > 0) {
1175 cache_params.chunk_length = target_dim_stats.chunk_length;
1176 cache_params.slab_size_in_elements = raw_params.slab_size_in_elements;
1177 cache_params.max_slabs_in_cache = raw_params.max_slabs_in_cache;
1178 } else {
1179 cache_params.chunk_length = 1;
1180 cache_params.slab_size_in_elements = non_target_length;
1181 cache_params.max_slabs_in_cache = 1;
1182 }
1183
1184 return std::make_unique<Extractor_<oracle_, Value_, Index_> >(
1185 *my_tdb_comp,
1186 my_attribute,
1187 row,
1188 target_dim_stats.dimension_extent,
1189 target_dimname,
1190 tdb_target_dim,
1191 non_target_dimname,
1192 tdb_non_target_dim,
1193 my_tdb_type,
1194 std::move(oracle),
1195 std::forward<Args_>(args)...,
1196 cache_params,
1199 );
1200 }
1201 }
1202
1203 static tatami::Options set_extract_all(tatami::Options opt) {
1204 // Resetting these options so that the slab size estimates are
1205 // correctly estimated for dense extractors, regardless of 'opt'.
1206 opt.sparse_extract_value = true;
1207 opt.sparse_extract_index = true;
1208 return opt;
1209 }
1210
1211 /********************
1212 *** Myopic dense ***
1213 ********************/
1214public:
1215 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, const tatami::Options& opt) const {
1216 Index_ full_non_target = (row ? ncol_internal() : nrow_internal());
1217 return populate<false, tatami::MyopicDenseExtractor, SparseMatrix_internal::DenseFull>(row, full_non_target, false, set_extract_all(opt), full_non_target);
1218 }
1219
1220 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
1221 return populate<false, tatami::MyopicDenseExtractor, SparseMatrix_internal::DenseBlock>(row, block_length, false, set_extract_all(opt), block_start, block_length);
1222 }
1223
1224 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
1225 auto nidx = indices_ptr->size();
1226 return populate<false, tatami::MyopicDenseExtractor, SparseMatrix_internal::DenseIndex>(row, nidx, false, set_extract_all(opt), std::move(indices_ptr));
1227 }
1228
1229 /*********************
1230 *** Myopic sparse ***
1231 *********************/
1232public:
1233 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, const tatami::Options& opt) const {
1234 Index_ full_non_target = (row ? ncol_internal() : nrow_internal());
1235 return populate<false, tatami::MyopicSparseExtractor, SparseMatrix_internal::SparseFull>(row, full_non_target, false, opt, full_non_target);
1236 }
1237
1238 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, Index_ block_start, Index_ block_length, const tatami::Options& opt) const {
1239 return populate<false, tatami::MyopicSparseExtractor, SparseMatrix_internal::SparseBlock>(row, block_length, false, opt, block_start, block_length);
1240 }
1241
1242 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(bool row, tatami::VectorPtr<Index_> indices_ptr, const tatami::Options& opt) const {
1243 auto nidx = indices_ptr->size();
1244 return populate<false, tatami::MyopicSparseExtractor, SparseMatrix_internal::SparseIndex>(row, nidx, false, opt, std::move(indices_ptr));
1245 }
1246
1247 /**********************
1248 *** Oracular dense ***
1249 **********************/
1250public:
1251 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
1252 bool row,
1253 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
1254 const tatami::Options& opt)
1255 const {
1256 Index_ full_non_target = (row ? ncol_internal() : nrow_internal());
1257 return populate<true, tatami::OracularDenseExtractor, SparseMatrix_internal::DenseFull>(row, full_non_target, std::move(oracle), set_extract_all(opt), full_non_target);
1258 }
1259
1260 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
1261 bool row,
1262 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
1263 Index_ block_start,
1264 Index_ block_length,
1265 const tatami::Options& opt)
1266 const {
1267 return populate<true, tatami::OracularDenseExtractor, SparseMatrix_internal::DenseBlock>(row, block_length, std::move(oracle), set_extract_all(opt), block_start, block_length);
1268 }
1269
1270 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
1271 bool row,
1272 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
1273 tatami::VectorPtr<Index_> indices_ptr,
1274 const tatami::Options& opt)
1275 const {
1276 auto nidx = indices_ptr->size();
1277 return populate<true, tatami::OracularDenseExtractor, SparseMatrix_internal::DenseIndex>(row, nidx, std::move(oracle), set_extract_all(opt), std::move(indices_ptr));
1278 }
1279
1280 /***********************
1281 *** Oracular sparse ***
1282 ***********************/
1283public:
1284 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
1285 bool row,
1286 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
1287 const tatami::Options& opt)
1288 const {
1289 Index_ full_non_target = (row ? ncol_internal() : nrow_internal());
1290 return populate<true, tatami::OracularSparseExtractor, SparseMatrix_internal::SparseFull>(row, full_non_target, std::move(oracle), opt, full_non_target);
1291 }
1292
1293 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
1294 bool row,
1295 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
1296 Index_ block_start,
1297 Index_ block_length,
1298 const tatami::Options& opt)
1299 const {
1300 return populate<true, tatami::OracularSparseExtractor, SparseMatrix_internal::SparseBlock>(row, block_length, std::move(oracle), opt, block_start, block_length);
1301 }
1302
1303 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
1304 bool row,
1305 std::shared_ptr<const tatami::Oracle<Index_> > oracle,
1306 tatami::VectorPtr<Index_> indices_ptr,
1307 const tatami::Options& opt)
1308 const {
1309 auto nidx = indices_ptr->size();
1310 return populate<true, tatami::OracularSparseExtractor, SparseMatrix_internal::SparseIndex>(row, nidx, std::move(oracle), opt, std::move(indices_ptr));
1311 }
1312};
1313
1314}
1315
1316#endif
TileDB-backed sparse matrix.
Definition SparseMatrix.hpp:932
SparseMatrix(const std::string &uri, std::string attribute, const SparseMatrixOptions &options)
Definition SparseMatrix.hpp:949
SparseMatrix(const std::string &uri, std::string attribute)
Definition SparseMatrix.hpp:957
SparseMatrix(const std::string &uri, std::string attribute, tiledb::Context ctx, const SparseMatrixOptions &options)
Definition SparseMatrix.hpp:940
tatami bindings for TileDB matrices.
Definition DenseMatrix.hpp:22
void serialize(Function_ fun)
Definition serialize.hpp:20
std::shared_ptr< const std::vector< Index_ > > VectorPtr
typename std::conditional< oracle_, OracularSparseExtractor< Value_, Index_ >, MyopicSparseExtractor< Value_, Index_ > >::type SparseExtractor
typename std::conditional< oracle_, std::shared_ptr< const Oracle< Index_ > >, bool >::type MaybeOracle
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
void process_consecutive_indices(const Index_ *indices, Index_ length, Function_ fun)
Locking for serial access.
bool sparse_extract_index
bool sparse_extract_value
const Value_ * value
const Index_ * index
Options for sparse TileDB extraction.
Definition SparseMatrix.hpp:30
bool require_minimum_cache
Definition SparseMatrix.hpp:47
std::size_t maximum_cache_size
Definition SparseMatrix.hpp:40