1#ifndef TATAMI_WRITE_SPARSE_MATRIX_TO_HDF5_HPP
2#define TATAMI_WRITE_SPARSE_MATRIX_TO_HDF5_HPP
99inline H5::DataSet create_1d_compressed_hdf5_dataset(H5::Group& location,
const H5::DataType& dtype,
const std::string& name, hsize_t length,
int deflate_level, hsize_t chunk) {
100 H5::DataSpace dspace(1, &length);
101 H5::DSetCreatPropList plist;
103 if (deflate_level >= 0 && length) {
104 plist.setDeflate(deflate_level);
105 if (chunk > length) {
106 plist.setChunk(1, &length);
108 plist.setChunk(1, &chunk);
112 return location.createDataSet(name, dtype, dspace, plist);
115inline H5::DataSet create_1d_compressed_hdf5_dataset(H5::Group& location,
WriteStorageType type,
const std::string& name, hsize_t length,
int deflate_level, hsize_t chunk) {
116 const H5::PredType* dtype;
118 case WriteStorageType::INT8:
119 dtype = &(H5::PredType::NATIVE_INT8);
121 case WriteStorageType::UINT8:
122 dtype = &(H5::PredType::NATIVE_UINT8);
124 case WriteStorageType::INT16:
125 dtype = &(H5::PredType::NATIVE_INT16);
127 case WriteStorageType::UINT16:
128 dtype = &(H5::PredType::NATIVE_UINT16);
130 case WriteStorageType::INT32:
131 dtype = &(H5::PredType::NATIVE_INT32);
133 case WriteStorageType::UINT32:
134 dtype = &(H5::PredType::NATIVE_UINT32);
136 case WriteStorageType::DOUBLE:
137 dtype = &(H5::PredType::NATIVE_DOUBLE);
140 throw std::runtime_error(
"automatic HDF5 output type must be resolved before creating a HDF5 dataset");
142 return create_1d_compressed_hdf5_dataset(location, *dtype, name, length, deflate_level, chunk);
145template<
typename Native>
146bool fits_upper_limit(int64_t max) {
147 int64_t limit = std::numeric_limits<Native>::max();
151template<
typename Native>
152bool fits_lower_limit(int64_t min) {
153 int64_t limit = std::numeric_limits<Native>::min();
163template<
typename Value_,
typename Index_>
164struct WriteSparseHdf5Statistics {
165 Value_ lower_data = 0;
166 Value_ upper_data = 0;
167 Index_ upper_index = 0;
168 hsize_t non_zeros = 0;
169 bool non_integer =
false;
171 void add_value(Value_ val) {
172 if constexpr(!std::is_integral<Value_>::value) {
173 if (std::trunc(val) != val || !std::isfinite(val)) {
178 if (val < lower_data) {
180 }
else if (val > upper_data) {
185 void add_index(Index_ idx) {
186 if (idx > upper_index) {
192template<
typename Value_,
typename Index_>
194 output.non_zeros += extracted.
number;
197 for (Index_ i = 0; i < extracted.
number; ++i) {
198 output.add_value(extracted.
value[i]);
203 for (Index_ i = 0; i < extracted.
number; ++i) {
204 output.add_index(extracted.
index[i]);
209template<
typename Value_,
typename Index_>
210void update_hdf5_stats(
const Value_* extracted, Index_ n, WriteSparseHdf5Statistics<Value_, Index_>& output) {
211 for (Index_ i = 0; i < n; ++i) {
212 auto val = extracted[i];
217 output.add_value(val);
222template<
typename Value_,
typename Index_>
223WriteSparseHdf5Statistics<Value_, Index_> write_sparse_hdf5_statistics(
const tatami::Matrix<Value_, Index_>* mat,
bool infer_value,
bool infer_index,
int nthreads) {
224 auto NR = mat->
nrow(), NC = mat->
ncol();
225 std::vector<WriteSparseHdf5Statistics<Value_, Index_> > collected(nthreads);
234 auto wrk = tatami::consecutive_extractor<true>(mat,
true, start, len, opt);
235 std::vector<Value_> xbuffer(NC);
236 std::vector<Index_> ibuffer(NC);
237 for (
size_t r = start, end = start + len; r < end; ++r) {
238 auto extracted = wrk->fetch(r, xbuffer.data(), ibuffer.data());
239 update_hdf5_stats(extracted, collected[t], infer_value, infer_index);
245 auto wrk = tatami::consecutive_extractor<true>(mat,
false, start, len, opt);
246 std::vector<Value_> xbuffer(NR);
247 std::vector<Index_> ibuffer(NR);
248 for (
size_t c = start, end = start + len; c < end; ++c) {
249 auto extracted = wrk->fetch(c, xbuffer.data(), ibuffer.data());
250 update_hdf5_stats(extracted, collected[t], infer_value, infer_index);
258 auto wrk = tatami::consecutive_extractor<false>(mat,
true, start, len);
259 std::vector<Value_> xbuffer(NC);
260 for (
size_t r = start, end = start + len; r < end; ++r) {
261 auto extracted = wrk->fetch(r, xbuffer.data());
262 update_hdf5_stats(extracted, NC, collected[t]);
268 auto wrk = tatami::consecutive_extractor<false>(mat,
false, start, len);
269 std::vector<Value_> xbuffer(NR);
270 for (
size_t c = start, end = start + len; c < end; ++c) {
271 auto extracted = wrk->fetch(c, xbuffer.data());
272 update_hdf5_stats(extracted, NR, collected[t]);
278 auto& first = collected.front();
279 for (
int i = 1; i < nthreads; ++i) {
280 auto& current = collected[i];
281 first.lower_data = std::min(first.lower_data, current.lower_data);
282 first.upper_data = std::max(first.upper_data, current.upper_data);
283 first.upper_index = std::max(first.upper_index, current.upper_index);
284 first.non_zeros += current.non_zeros;
285 first.non_integer = first.non_integer || current.non_integer;
288 return std::move(first);
307template<
typename Value_,
typename Index_>
311 auto use_auto_data_type = (data_type == WriteStorageType::AUTOMATIC);
312 auto use_auto_index_type = (index_type == WriteStorageType::AUTOMATIC);
313 auto stats = write_sparse_hdf5_statistics(mat, use_auto_data_type, use_auto_index_type, params.
num_threads);
316 if (use_auto_data_type) {
318 data_type = WriteStorageType::DOUBLE;
320 auto lower_data = stats.lower_data;
321 auto upper_data = stats.upper_data;
322 if (lower_data < 0) {
323 if (fits_lower_limit<int8_t>(lower_data) && fits_upper_limit<int8_t>(upper_data)) {
324 data_type = WriteStorageType::INT8;
325 }
else if (fits_lower_limit<int16_t>(lower_data) && fits_upper_limit<int16_t>(upper_data)) {
326 data_type = WriteStorageType::INT16;
328 data_type = WriteStorageType::INT32;
331 if (fits_upper_limit<uint8_t>(upper_data)) {
332 data_type = WriteStorageType::UINT8;
333 }
else if (fits_upper_limit<uint16_t>(upper_data)) {
334 data_type = WriteStorageType::UINT16;
336 data_type = WriteStorageType::UINT32;
342 if (use_auto_index_type) {
343 auto upper_index = stats.upper_index;
344 if (fits_upper_limit<uint8_t>(upper_index)) {
345 index_type = WriteStorageType::UINT8;
346 }
else if (fits_upper_limit<uint16_t>(upper_index)) {
347 index_type = WriteStorageType::UINT16;
349 index_type = WriteStorageType::UINT32;
355 if (layout == WriteStorageLayout::AUTOMATIC) {
357 layout = WriteStorageLayout::ROW;
359 layout = WriteStorageLayout::COLUMN;
364 auto non_zeros = stats.non_zeros;
368 H5::DataSpace inspace(1, &non_zeros);
369 H5::DataSpace outspace(1, &non_zeros);
370 const auto& dstype = define_mem_type<Value_>();
371 const auto& ixtype = define_mem_type<Index_>();
373 size_t NR = mat->
nrow(), NC = mat->
ncol();
374 std::vector<hsize_t> ptrs;
376 auto fill_datasets = [&](
const Value_* vptr,
const Index_* iptr, hsize_t count) ->
void {
378 inspace.setExtentSimple(1, &count);
379 outspace.selectHyperslab(H5S_SELECT_SET, &count, &offset);
380 data_ds.write(vptr, dstype, inspace, outspace);
381 index_ds.write(iptr, ixtype, inspace, outspace);
387 if (layout == WriteStorageLayout::ROW) {
389 std::vector<Value_> xbuffer(NC);
390 std::vector<Index_> ibuffer(NC);
392 auto wrk = tatami::consecutive_extractor<true>(mat,
true,
static_cast<Index_
>(0),
static_cast<Index_
>(NR));
393 for (
size_t r = 0; r < NR; ++r) {
394 auto extracted = wrk->fetch(r, xbuffer.data(), ibuffer.data());
395 fill_datasets(extracted.value, extracted.index, extracted.number);
396 ptrs[r+1] = ptrs[r] + extracted.number;
401 std::vector<Value_> xbuffer(NR);
402 std::vector<Index_> ibuffer(NR);
404 auto wrk = tatami::consecutive_extractor<true>(mat,
false,
static_cast<Index_
>(0),
static_cast<Index_
>(NC));
405 for (
size_t c = 0; c < NC; ++c) {
406 auto extracted = wrk->fetch(c, xbuffer.data(), ibuffer.data());
407 fill_datasets(extracted.value, extracted.index, extracted.number);
408 ptrs[c+1] = ptrs[c] + extracted.number;
413 std::vector<Value_> sparse_xbuffer;
414 std::vector<Index_> sparse_ibuffer;
415 auto fill_datasets_from_dense = [&](
const Value_* extracted, Index_ n) -> hsize_t {
416 sparse_xbuffer.clear();
417 sparse_ibuffer.clear();
418 for (Index_ i = 0; i < n; ++i) {
420 sparse_xbuffer.push_back(extracted[i]);
421 sparse_ibuffer.push_back(i);
425 hsize_t count = sparse_xbuffer.size();
426 fill_datasets(sparse_xbuffer.data(), sparse_ibuffer.data(), count);
430 if (layout == WriteStorageLayout::ROW) {
432 std::vector<Value_> dbuffer(NC);
433 auto wrk = tatami::consecutive_extractor<false>(mat,
true,
static_cast<Index_
>(0),
static_cast<Index_
>(NR));
434 for (
size_t r = 0; r < NR; ++r) {
435 auto extracted = wrk->fetch(r, dbuffer.data());
436 auto count = fill_datasets_from_dense(extracted, NC);
437 ptrs[r+1] = ptrs[r] + count;
442 std::vector<Value_> dbuffer(NR);
443 auto wrk = tatami::consecutive_extractor<false>(mat,
false,
static_cast<Index_
>(0),
static_cast<Index_
>(NC));
444 for (
size_t c = 0; c < NC; ++c) {
445 auto extracted = wrk->fetch(c, dbuffer.data());
446 auto count = fill_datasets_from_dense(extracted, NR);
447 ptrs[c+1] = ptrs[c] + count;
453 hsize_t ptr_len = ptrs.size();
454 H5::DataSet ptr_ds = create_1d_compressed_hdf5_dataset(location, H5::PredType::NATIVE_HSIZE, params.
ptr_name, ptr_len, params.
deflate_level, params.
chunk_size);
455 H5::DataSpace ptr_space(1, &ptr_len);
456 ptr_ds.write(ptrs.data(), H5::PredType::NATIVE_HSIZE, ptr_space);
472template<
typename Value_,
typename Index_>
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
virtual bool prefer_rows() const=0
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
Representations for matrix data in HDF5 files.
Definition CompressedSparseMatrix.hpp:23
WriteStorageLayout
Definition utils.hpp:24
WriteStorageType
Definition utils.hpp:29
void write_compressed_sparse_matrix(const tatami::Matrix< Value_, Index_ > *mat, H5::Group &location, const WriteCompressedSparseMatrixOptions ¶ms)
Definition write_compressed_sparse_matrix.hpp:308
void parallelize(Function_ fun, Index_ tasks, int threads)
bool sparse_extract_index
bool sparse_extract_value
Parameters for write_compressed_sparse_matrix().
Definition write_compressed_sparse_matrix.hpp:25
std::string ptr_name
Definition write_compressed_sparse_matrix.hpp:50
WriteStorageType index_type
Definition write_compressed_sparse_matrix.hpp:76
bool force_integer
Definition write_compressed_sparse_matrix.hpp:70
int deflate_level
Definition write_compressed_sparse_matrix.hpp:81
std::string data_name
Definition write_compressed_sparse_matrix.hpp:38
std::string index_name
Definition write_compressed_sparse_matrix.hpp:44
WriteStorageType data_type
Definition write_compressed_sparse_matrix.hpp:62
int num_threads
Definition write_compressed_sparse_matrix.hpp:93
WriteStorageLayout columnar
Definition write_compressed_sparse_matrix.hpp:56
size_t chunk_size
Definition write_compressed_sparse_matrix.hpp:86
Utilities for HDF5 extraction.