1#ifndef TATAMI_HDF5_DENSE_MATRIX_HPP
2#define TATAMI_HDF5_DENSE_MATRIX_HPP
13#include "tatami_chunked/tatami_chunked.hpp"
14#include "sanisizer/sanisizer.hpp"
50namespace DenseMatrix_internal {
56 H5::DataSpace dataspace;
57 H5::DataSpace memspace;
64template<
typename Index_,
typename OutputValue_>
65void extract_block(
bool h5_row_is_target, Index_ cache_start, Index_ cache_length, Index_ block_start, Index_ block_length, OutputValue_* buffer, Components& comp) {
69 int target_dim = 1 - h5_row_is_target;
70 offset[target_dim] = cache_start;
71 count[target_dim] = cache_length;
73 int non_target_dim = h5_row_is_target;
74 offset[non_target_dim] = block_start;
75 count[non_target_dim] = block_length;
76 comp.dataspace.selectHyperslab(H5S_SELECT_SET, count, offset);
80 comp.memspace.setExtentSimple(2, count);
81 comp.memspace.selectAll();
83 comp.dataset.read(buffer, define_mem_type<OutputValue_>(), comp.memspace, comp.dataspace);
86template<
typename Index_,
typename OutputValue_>
87void extract_indices(
bool h5_row_is_target, Index_ cache_start, Index_ cache_length,
const std::vector<Index_>& indices, OutputValue_* buffer, Components& comp) {
91 int target_dim = 1 - h5_row_is_target;
92 offset[target_dim] = cache_start;
93 count[target_dim] = cache_length;
95 int non_target_dim = h5_row_is_target;
100 comp.dataspace.selectNone();
104 [&](Index_ start, Index_ length) ->
void {
105 offset[non_target_dim] = start;
106 count[non_target_dim] = length;
107 comp.dataspace.selectHyperslab(H5S_SELECT_OR, count, offset);
112 count[non_target_dim] = indices.size();
113 comp.memspace.setExtentSimple(2, count);
114 comp.memspace.selectAll();
116 comp.dataset.read(buffer, define_mem_type<OutputValue_>(), comp.memspace, comp.dataspace);
126inline void initialize(
const std::string& file_name,
const std::string& dataset_name, std::unique_ptr<Components>& h5comp) {
128 h5comp.reset(
new Components);
133 H5::FileAccPropList fapl(H5::FileAccPropList::DEFAULT.getId());
134 fapl.setCache(0, 0, 0, 0);
136 h5comp->file.openFile(file_name, H5F_ACC_RDONLY, fapl);
137 h5comp->dataset = h5comp->file.openDataSet(dataset_name);
138 h5comp->dataspace = h5comp->dataset.getSpace();
142inline void destroy(std::unique_ptr<Components>& h5comp) {
148template<
bool oracle_,
bool by_h5_row_,
typename Index_>
152 const std::string& file_name,
153 const std::string& dataset_name,
154 [[maybe_unused]] tatami_chunked::ChunkDimensionStats<Index_> target_dim_stats,
156 [[maybe_unused]]
const tatami_chunked::SlabCacheStats<Index_>& slab_stats) :
157 my_oracle(std::move(oracle))
159 initialize(file_name, dataset_name, my_h5comp);
167 std::unique_ptr<Components> my_h5comp;
169 typename std::conditional<oracle_, tatami::PredictionIndex, bool>::type my_counter = 0;
172 template<
typename Value_>
173 const Value_* fetch_block(Index_ i, Index_ block_start, Index_ block_length, Value_* buffer) {
174 if constexpr(oracle_) {
175 i = my_oracle->get(my_counter++);
178 extract_block(by_h5_row_, i,
static_cast<Index_
>(1), block_start, block_length, buffer, *my_h5comp);
183 template<
typename Value_>
184 const Value_* fetch_indices(Index_ i,
const std::vector<Index_>& indices, Value_* buffer) {
185 if constexpr(oracle_) {
186 i = my_oracle->get(my_counter++);
189 extract_indices(by_h5_row_, i,
static_cast<Index_
>(1), indices, buffer, *my_h5comp);
195template<
bool by_h5_row_,
typename Index_,
typename CachedValue_>
199 const std::string& file_name,
200 const std::string& dataset_name,
201 tatami_chunked::ChunkDimensionStats<Index_> target_dim_stats,
203 const tatami_chunked::SlabCacheStats<Index_>& slab_stats) :
204 my_dim_stats(std::move(target_dim_stats)),
205 my_factory(slab_stats),
206 my_cache(slab_stats.max_slabs_in_cache)
208 initialize(file_name, dataset_name, my_h5comp);
209 if constexpr(!by_h5_row_) {
210 my_transposition_buffer.resize(sanisizer::cast<
decltype(my_transposition_buffer.size())>(slab_stats.slab_size_in_elements));
219 std::unique_ptr<Components> my_h5comp;
220 tatami_chunked::ChunkDimensionStats<Index_> my_dim_stats;
222 tatami_chunked::DenseSlabFactory<CachedValue_> my_factory;
223 typedef typename decltype(my_factory)::Slab Slab;
224 tatami_chunked::LruSlabCache<Index_, Slab> my_cache;
226 typename std::conditional<by_h5_row_, bool, std::vector<CachedValue_> >::type my_transposition_buffer;
229 template<
typename Value_,
class Extract_>
230 void fetch_raw(Index_ i, Value_* buffer, Index_ non_target_length, Extract_ extract) {
231 Index_ chunk = i / my_dim_stats.chunk_length;
232 Index_ index = i % my_dim_stats.chunk_length;
234 const auto& info = my_cache.find(
237 return my_factory.create();
239 [&](Index_ id, Slab& contents) ->
void {
240 auto curdim = tatami_chunked::get_chunk_length(my_dim_stats,
id);
242 if constexpr(by_h5_row_) {
244 extract(
id * my_dim_stats.chunk_length, curdim, contents.data);
249 auto tptr = my_transposition_buffer.data();
251 extract(
id * my_dim_stats.chunk_length, curdim, tptr);
258 auto ptr = info.data + sanisizer::product_unsafe<std::size_t>(non_target_length, index);
259 std::copy_n(ptr, non_target_length, buffer);
263 template<
typename Value_>
264 const Value_* fetch_block(Index_ i, Index_ block_start, Index_ block_length, Value_* buffer) {
269 [&](Index_ start, Index_ length, CachedValue_* buf) ->
void {
270 extract_block(by_h5_row_, start, length, block_start, block_length, buf, *my_h5comp);
276 template<
typename Value_>
277 const Value_* fetch_indices(Index_ i,
const std::vector<Index_>& indices, Value_* buffer) {
282 [&](Index_ start, Index_ length, CachedValue_* buf) ->
void {
283 extract_indices(by_h5_row_, start, length, indices, buf, *my_h5comp);
292template<
typename Index_,
typename CachedValue_>
293class OracularCoreNormal {
296 const std::string& file_name,
297 const std::string& dataset_name,
298 tatami_chunked::ChunkDimensionStats<Index_> target_dim_stats,
300 const tatami_chunked::SlabCacheStats<Index_>& slab_stats) :
301 my_dim_stats(std::move(target_dim_stats)),
302 my_cache(std::move(oracle), slab_stats.max_slabs_in_cache),
303 my_slab_size(slab_stats.slab_size_in_elements),
304 my_memory_pool(sanisizer::product<decltype(my_memory_pool.size())>(slab_stats.max_slabs_in_cache, my_slab_size))
306 initialize(file_name, dataset_name, my_h5comp);
309 ~OracularCoreNormal() {
314 std::unique_ptr<Components> my_h5comp;
315 tatami_chunked::ChunkDimensionStats<Index_> my_dim_stats;
321 tatami_chunked::OracularSlabCache<Index_, Index_, Slab, true> my_cache;
322 std::size_t my_slab_size;
323 std::vector<CachedValue_> my_memory_pool;
324 std::size_t my_offset = 0;
327 template<
class Function_>
328 static void sort_by_field(std::vector<std::pair<Index_, Slab*> >& indices, Function_ field) {
329 auto comp = [&field](
const std::pair<Index_, Slab*>& l,
const std::pair<Index_, Slab*>& r) ->
bool {
330 return field(l) < field(r);
332 if (!std::is_sorted(indices.begin(), indices.end(), comp)) {
333 std::sort(indices.begin(), indices.end(), comp);
337 template<
typename Value_,
class Unionize_>
338 void fetch_raw([[maybe_unused]] Index_ i, Value_* buffer, Index_ non_target_length, Unionize_ unionize) {
339 auto info = my_cache.next(
340 [&](Index_ current) -> std::pair<Index_, Index_> {
341 return std::pair<Index_, Index_>(current / my_dim_stats.chunk_length, current % my_dim_stats.chunk_length);
345 output.offset = my_offset;
346 my_offset += my_slab_size;
349 [&](std::vector<std::pair<Index_, Slab*> >& chunks, std::vector<std::pair<Index_, Slab*> >& to_reuse) ->
void {
352 sort_by_field(to_reuse, [](
const std::pair<Index_, Slab*>& x) -> std::size_t {
return x.second->offset; });
354 auto dest = my_memory_pool.data();
355 std::size_t running_offset = 0;
356 for (
auto& x : to_reuse) {
357 auto& cur_offset = x.second->offset;
358 if (cur_offset != running_offset) {
359 std::copy_n(dest + cur_offset, my_slab_size, dest + running_offset);
360 cur_offset = running_offset;
362 running_offset += my_slab_size;
370 sort_by_field(chunks, [](
const std::pair<Index_, Slab*>& x) -> Index_ {
return x.first; });
373 auto& components = *my_h5comp;
374 auto& dspace = my_h5comp->dataspace;
382 Index_ run_chunk_id = chunks.front().first;
383 Index_ chunk_length = tatami_chunked::get_chunk_length(my_dim_stats, run_chunk_id);
384 Index_ run_length = chunk_length;
385 Index_ total_length = chunk_length;
386 chunks.front().second->offset = running_offset;
387 auto start_offset = running_offset;
388 running_offset += my_slab_size;
390 for (
decltype(chunks.size()) ci = 1, cend = chunks.size(); ci < cend; ++ci) {
391 auto& current_chunk = chunks[ci];
392 Index_ current_chunk_id = current_chunk.first;
394 if (current_chunk_id - run_chunk_id > 1) {
395 unionize(dspace, run_chunk_id * my_dim_stats.chunk_length, run_length);
396 run_chunk_id = current_chunk_id;
400 Index_ current_length = tatami_chunked::get_chunk_length(my_dim_stats, current_chunk_id);
401 run_length += current_length;
402 total_length += current_length;
403 current_chunk.second->offset = running_offset;
404 running_offset += my_slab_size;
407 unionize(dspace, run_chunk_id * my_dim_stats.chunk_length, run_length);
410 count[0] = total_length;
411 count[1] = non_target_length;
412 components.memspace.setExtentSimple(2, count);
413 components.memspace.selectAll();
414 components.dataset.read(dest + start_offset, define_mem_type<CachedValue_>(), components.memspace, dspace);
419 auto ptr = my_memory_pool.data() + info.first->offset + sanisizer::product_unsafe<std::size_t>(non_target_length, info.second);
420 std::copy_n(ptr, non_target_length, buffer);
424 template<
typename Value_>
425 const Value_* fetch_block(Index_ i, Index_ block_start, Index_ block_length, Value_* buffer) {
430 [&](H5::DataSpace& dspace, Index_ run_start, Index_ run_length) ->
void {
433 offset[0] = run_start;
434 offset[1] = block_start;
435 count[0] = run_length;
436 count[1] = block_length;
437 dspace.selectHyperslab(H5S_SELECT_OR, count, offset);
443 template<
typename Value_>
444 const Value_* fetch_indices(Index_ i,
const std::vector<Index_>& indices, Value_* buffer) {
449 [&](H5::DataSpace& dspace, Index_ run_start, Index_ run_length) ->
void {
452 offset[0] = run_start;
453 count[0] = run_length;
456 tatami::process_consecutive_indices<Index_>(
459 [&](Index_ start, Index_ length) -> void {
462 dspace.selectHyperslab(H5S_SELECT_OR, count, offset);
473template<
typename Index_,
typename CachedValue_>
474class OracularCoreTransposed {
476 OracularCoreTransposed(
477 const std::string& file_name,
478 const std::string& dataset_name,
479 tatami_chunked::ChunkDimensionStats<Index_> target_dim_stats,
481 const tatami_chunked::SlabCacheStats<Index_>& slab_stats) :
482 my_dim_stats(std::move(target_dim_stats)),
483 my_factory(slab_stats),
484 my_cache(std::move(oracle), slab_stats.max_slabs_in_cache),
485 my_transposition_buffer(sanisizer::cast<decltype(my_transposition_buffer.size())>(slab_stats.slab_size_in_elements)),
486 my_transposition_buffer_ptr(my_transposition_buffer.data())
488 initialize(file_name, dataset_name, my_h5comp);
489 my_cache_transpose_info.reserve(slab_stats.max_slabs_in_cache);
492 ~OracularCoreTransposed() {
497 std::unique_ptr<Components> my_h5comp;
498 tatami_chunked::ChunkDimensionStats<Index_> my_dim_stats;
500 tatami_chunked::DenseSlabFactory<CachedValue_> my_factory;
501 typedef typename decltype(my_factory)::Slab Slab;
502 tatami_chunked::OracularSlabCache<Index_, Index_, Slab> my_cache;
504 std::vector<CachedValue_> my_transposition_buffer;
505 CachedValue_* my_transposition_buffer_ptr;
506 std::vector<std::pair<Slab*, Index_> > my_cache_transpose_info;
509 template<
typename Value_,
class Extract_>
510 void fetch_raw([[maybe_unused]] Index_ i, Value_* buffer, Index_ non_target_length, Extract_ extract) {
511 auto info = my_cache.next(
512 [&](Index_ current) -> std::pair<Index_, Index_> {
513 return std::pair<Index_, Index_>(current / my_dim_stats.chunk_length, current % my_dim_stats.chunk_length);
516 return my_factory.create();
518 [&](std::vector<std::pair<Index_, Slab*> >& chunks) ->
void {
519 my_cache_transpose_info.clear();
522 for (
const auto& c : chunks) {
523 auto curdim = tatami_chunked::get_chunk_length(my_dim_stats, c.first);
524 extract(c.first * my_dim_stats.chunk_length, curdim, c.second->data);
525 my_cache_transpose_info.emplace_back(c.second, curdim);
531 if (non_target_length != 1) {
532 for (
const auto& c : my_cache_transpose_info) {
534 tatami::transpose(c.first->data, non_target_length, c.second, my_transposition_buffer_ptr);
539 std::swap(c.first->data, my_transposition_buffer_ptr);
546 auto ptr = info.first->data + sanisizer::product_unsafe<std::size_t>(non_target_length, info.second);
547 std::copy_n(ptr, non_target_length, buffer);
551 template<
typename Value_>
552 const Value_* fetch_block(Index_ i, Index_ block_start, Index_ block_length, Value_* buffer) {
557 [&](Index_ start, Index_ length, CachedValue_* buf) ->
void {
558 extract_block(
false, start, length, block_start, block_length, buf, *my_h5comp);
564 template<
typename Value_>
565 const Value_* fetch_indices(Index_ i,
const std::vector<Index_>& indices, Value_* buffer) {
570 [&](Index_ start, Index_ length, CachedValue_* buf) ->
void {
571 extract_indices(false, start, length, indices, buf, *my_h5comp);
585template<
bool solo_,
bool oracle_,
bool by_h5_row_,
typename Index_,
typename CachedValue_>
586using DenseCore =
typename std::conditional<solo_,
587 SoloCore<oracle_, by_h5_row_, Index_>,
588 typename std::conditional<!oracle_,
589 MyopicCore<by_h5_row_, Index_, CachedValue_>,
590 typename std::conditional<by_h5_row_,
591 OracularCoreNormal<Index_, CachedValue_>,
592 OracularCoreTransposed<Index_, CachedValue_>
601template<
bool solo_,
bool oracle_,
bool by_h5_row_,
typename Value_,
typename Index_,
typename CachedValue_>
605 const std::string& file_name,
606 const std::string& dataset_name,
607 tatami_chunked::ChunkDimensionStats<Index_> target_dim_stats,
609 Index_ non_target_dim,
610 const tatami_chunked::SlabCacheStats<Index_>& slab_stats) :
614 std::move(target_dim_stats),
618 my_non_target_dim(non_target_dim)
621 const Value_* fetch(Index_ i, Value_* buffer) {
622 return my_core.fetch_block(i, 0, my_non_target_dim, buffer);
626 DenseCore<solo_, oracle_, by_h5_row_, Index_, CachedValue_> my_core;
627 Index_ my_non_target_dim;
630template<
bool solo_,
bool oracle_,
bool by_h5_row_,
typename Value_,
typename Index_,
typename CachedValue_>
634 const std::string& file_name,
635 const std::string& dataset_name,
636 tatami_chunked::ChunkDimensionStats<Index_> target_dim_stats,
640 const tatami_chunked::SlabCacheStats<Index_>& slab_stats) :
644 std::move(target_dim_stats),
648 my_block_start(block_start),
649 my_block_length(block_length)
652 const Value_* fetch(Index_ i, Value_* buffer) {
653 return my_core.fetch_block(i, my_block_start, my_block_length, buffer);
657 DenseCore<solo_, oracle_, by_h5_row_, Index_, CachedValue_> my_core;
658 Index_ my_block_start, my_block_length;
661template<
bool solo_,
bool oracle_,
bool by_h5_row_,
typename Value_,
typename Index_,
typename CachedValue_>
665 const std::string& file_name,
666 const std::string& dataset_name,
667 tatami_chunked::ChunkDimensionStats<Index_> target_dim_stats,
670 const tatami_chunked::SlabCacheStats<Index_>& slab_stats) :
674 std::move(target_dim_stats),
678 my_indices_ptr(std::move(indices_ptr))
681 const Value_* fetch(Index_ i, Value_* buffer) {
682 return my_core.fetch_indices(i, *my_indices_ptr, buffer);
686 DenseCore<solo_, oracle_, by_h5_row_, Index_, CachedValue_> my_core;
719template<
typename Value_,
typename Index_,
typename CachedValue_ = Value_>
721 std::string my_file_name, my_dataset_name;
724 std::size_t my_cache_size_in_elements;
725 bool my_require_minimum_cache;
727 tatami_chunked::ChunkDimensionStats<Index_> my_firstdim_stats, my_seconddim_stats;
728 bool my_prefer_firstdim;
740 my_file_name(std::move(file)),
741 my_dataset_name(std::move(name)),
742 my_transpose(transpose),
743 my_cache_size_in_elements(options.maximum_cache_size / sizeof(CachedValue_)),
744 my_require_minimum_cache(options.require_minimum_cache)
747 H5::H5File fhandle(my_file_name, H5F_ACC_RDONLY);
748 auto dhandle = open_and_check_dataset<false>(fhandle, my_dataset_name);
749 auto dims = get_array_dimensions<2>(dhandle, my_dataset_name);
751 hsize_t chunk_dims[2];
752 auto dparms = dhandle.getCreatePlist();
753 if (dparms.getLayout() != H5D_CHUNKED) {
756 chunk_dims[1] = dims[1];
758 dparms.getChunk(2, chunk_dims);
761 my_firstdim_stats = tatami_chunked::ChunkDimensionStats<Index_>(
762 sanisizer::cast<Index_>(dims[0]),
763 sanisizer::cast<Index_>(chunk_dims[0])
765 my_seconddim_stats = tatami_chunked::ChunkDimensionStats<Index_>(
766 sanisizer::cast<Index_>(dims[1]),
767 sanisizer::cast<Index_>(chunk_dims[1])
776 my_prefer_firstdim = (my_firstdim_stats.num_chunks > my_seconddim_stats.num_chunks);
789 bool prefer_rows_internal()
const {
791 return !my_prefer_firstdim;
793 return my_prefer_firstdim;
797 Index_ nrow_internal()
const {
799 return my_seconddim_stats.dimension_extent;
801 return my_firstdim_stats.dimension_extent;
805 Index_ ncol_internal()
const {
807 return my_firstdim_stats.dimension_extent;
809 return my_seconddim_stats.dimension_extent;
814 Index_ nrow()
const {
815 return nrow_internal();
818 Index_ ncol()
const {
819 return ncol_internal();
830 return prefer_rows_internal();
833 double prefer_rows_proportion()
const {
834 return static_cast<double>(prefer_rows_internal());
837 bool uses_oracle(
bool)
const {
841 bool is_sparse()
const {
845 double is_sparse_proportion()
const {
854 template<
bool oracle_,
template<
bool,
bool,
bool,
typename,
typename,
typename>
class Extractor_,
typename ... Args_>
855 std::unique_ptr<tatami::DenseExtractor<oracle_, Value_, Index_> > populate(
bool row, Index_ non_target_length,
tatami::MaybeOracle<oracle_, Index_> oracle, Args_&& ... args)
const {
856 bool by_h5_row = (row != my_transpose);
857 const auto& dim_stats = (by_h5_row ? my_firstdim_stats : my_seconddim_stats);
859 tatami_chunked::SlabCacheStats<Index_> slab_stats(dim_stats.chunk_length, non_target_length, dim_stats.num_chunks, my_cache_size_in_elements, my_require_minimum_cache);
860 if (slab_stats.max_slabs_in_cache > 0) {
862 return std::make_unique<Extractor_<false, oracle_, true, Value_, Index_, CachedValue_> >(
863 my_file_name, my_dataset_name, dim_stats, std::move(oracle), std::forward<Args_>(args)..., slab_stats
866 return std::make_unique<Extractor_<false, oracle_, false, Value_, Index_, CachedValue_> >(
867 my_file_name, my_dataset_name, dim_stats, std::move(oracle), std::forward<Args_>(args)..., slab_stats
873 return std::make_unique<Extractor_<true, oracle_, true, Value_, Index_, CachedValue_> >(
874 my_file_name, my_dataset_name, dim_stats, std::move(oracle), std::forward<Args_>(args)..., slab_stats
877 return std::make_unique<Extractor_<true, oracle_, false, Value_, Index_, CachedValue_> >(
878 my_file_name, my_dataset_name, dim_stats, std::move(oracle), std::forward<Args_>(args)..., slab_stats
888 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(
bool row,
const tatami::Options&)
const {
889 Index_ full_non_target = (row ? ncol_internal() : nrow_internal());
890 return populate<false, DenseMatrix_internal::Full>(row, full_non_target,
false, full_non_target);
893 std::unique_ptr<tatami::MyopicDenseExtractor<Value_, Index_> > dense(
bool row, Index_ block_start, Index_ block_length,
const tatami::Options&)
const {
894 return populate<false, DenseMatrix_internal::Block>(row, block_length,
false, block_start, block_length);
898 auto nidx = indices_ptr->size();
899 return populate<false, DenseMatrix_internal::Index>(row, nidx,
false, std::move(indices_ptr));
906 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(
bool row,
const tatami::Options& opt)
const {
907 Index_ full_non_target = (row ? ncol_internal() : nrow_internal());
908 return std::make_unique<tatami::FullSparsifiedWrapper<false, Value_, Index_> >(dense(row, opt), full_non_target, opt);
911 std::unique_ptr<tatami::MyopicSparseExtractor<Value_, Index_> > sparse(
bool row, Index_ block_start, Index_ block_length,
const tatami::Options& opt)
const {
912 return std::make_unique<tatami::BlockSparsifiedWrapper<false, Value_, Index_> >(dense(row, block_start, block_length, opt), block_start, block_length, opt);
916 auto ptr = dense(row, indices_ptr, opt);
917 return std::make_unique<tatami::IndexSparsifiedWrapper<false, Value_, Index_> >(std::move(ptr), std::move(indices_ptr), opt);
924 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
929 Index_ full_non_target = (row ? ncol_internal() : nrow_internal());
930 return populate<true, DenseMatrix_internal::Full>(row, full_non_target, std::move(oracle), full_non_target);
933 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
940 return populate<true, DenseMatrix_internal::Block>(row, block_length, std::move(oracle), block_start, block_length);
943 std::unique_ptr<tatami::OracularDenseExtractor<Value_, Index_> > dense(
949 auto nidx = indices_ptr->size();
950 return populate<true, DenseMatrix_internal::Index>(row, nidx, std::move(oracle), std::move(indices_ptr));
957 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
962 Index_ full_non_target = (row ? ncol_internal() : nrow_internal());
963 return std::make_unique<tatami::FullSparsifiedWrapper<true, Value_, Index_> >(dense(row, std::move(oracle), opt), full_non_target, opt);
966 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
973 return std::make_unique<tatami::BlockSparsifiedWrapper<true, Value_, Index_> >(dense(row, std::move(oracle), block_start, block_length, opt), block_start, block_length, opt);
976 std::unique_ptr<tatami::OracularSparseExtractor<Value_, Index_> > sparse(
982 auto ptr = dense(row, std::move(oracle), indices_ptr, opt);
983 return std::make_unique<tatami::IndexSparsifiedWrapper<true, Value_, Index_> >(std::move(ptr), std::move(indices_ptr), opt);
Dense matrix backed by a DataSet in a HDF5 file.
Definition DenseMatrix.hpp:720
DenseMatrix(std::string file, std::string name, bool transpose)
Definition DenseMatrix.hpp:785
bool prefer_rows() const
Definition DenseMatrix.hpp:829
DenseMatrix(std::string file, std::string name, bool transpose, const DenseMatrixOptions &options)
Definition DenseMatrix.hpp:739
Representations for matrix data in HDF5 files.
Definition CompressedSparseMatrix.hpp:24
void serialize(Function_ f)
Definition serialize.hpp:53
std::shared_ptr< const std::vector< Index_ > > VectorPtr
void transpose(const Input_ *input, std::size_t nrow, std::size_t ncol, std::size_t input_stride, Output_ *output, std::size_t output_stride)
typename std::conditional< oracle_, std::shared_ptr< const Oracle< Index_ > >, bool >::type MaybeOracle
typename std::conditional< oracle_, OracularDenseExtractor< Value_, Index_ >, MyopicDenseExtractor< Value_, Index_ > >::type DenseExtractor
void process_consecutive_indices(const Index_ *indices, Index_ length, Function_ fun)
Default locking for serial access.
Options for DenseMatrix extraction.
Definition DenseMatrix.hpp:27
std::size_t maximum_cache_size
Definition DenseMatrix.hpp:37
bool require_minimum_cache
Definition DenseMatrix.hpp:44
Utilities for HDF5 extraction.