tatami_hdf5
tatami bindings for HDF5-backed matrices
Loading...
Searching...
No Matches
write_compressed_sparse_matrix.hpp
Go to the documentation of this file.
1#ifndef TATAMI_WRITE_SPARSE_MATRIX_TO_HDF5_HPP
2#define TATAMI_WRITE_SPARSE_MATRIX_TO_HDF5_HPP
3
4#include "tatami/tatami.hpp"
5#include "utils.hpp"
6
7#include "H5Cpp.h"
8
9#include <cstdint>
10#include <string>
11#include <vector>
12#include <cmath>
13#include <limits>
14
20namespace tatami_hdf5 {
21
29 WriteCompressedSparseMatrixOptions() : data_name("data"), index_name("indices"), ptr_name("indptr") {}
38 std::string data_name;
39
44 std::string index_name;
45
50 std::string ptr_name;
51
56 WriteStorageLayout columnar = WriteStorageLayout::AUTOMATIC;
57
62 WriteStorageType data_type = WriteStorageType::AUTOMATIC;
63
70 bool force_integer = false;
71
76 WriteStorageType index_type = WriteStorageType::AUTOMATIC;
77
82
86 hsize_t chunk_size = sanisizer::cap<hsize_t>(100000);
87
93 int num_threads = 1;
94};
95
99inline H5::DataSet create_1d_compressed_hdf5_dataset(H5::Group& location, const H5::DataType& dtype, const std::string& name, hsize_t length, int deflate_level, hsize_t chunk) {
100 H5::DataSpace dspace(1, &length);
101 H5::DSetCreatPropList plist;
102
103 if (deflate_level >= 0 && length) {
104 plist.setDeflate(deflate_level);
105 if (chunk > length) {
106 plist.setChunk(1, &length);
107 } else {
108 plist.setChunk(1, &chunk);
109 }
110 }
111
112 return location.createDataSet(name, dtype, dspace, plist);
113}
114
115inline H5::DataSet create_1d_compressed_hdf5_dataset(H5::Group& location, WriteStorageType type, const std::string& name, hsize_t length, int deflate_level, hsize_t chunk) {
116 const H5::PredType* dtype;
117 switch (type) {
118 case WriteStorageType::INT8:
119 dtype = &(H5::PredType::NATIVE_INT8);
120 break;
121 case WriteStorageType::UINT8:
122 dtype = &(H5::PredType::NATIVE_UINT8);
123 break;
124 case WriteStorageType::INT16:
125 dtype = &(H5::PredType::NATIVE_INT16);
126 break;
127 case WriteStorageType::UINT16:
128 dtype = &(H5::PredType::NATIVE_UINT16);
129 break;
130 case WriteStorageType::INT32:
131 dtype = &(H5::PredType::NATIVE_INT32);
132 break;
133 case WriteStorageType::UINT32:
134 dtype = &(H5::PredType::NATIVE_UINT32);
135 break;
136 case WriteStorageType::DOUBLE:
137 dtype = &(H5::PredType::NATIVE_DOUBLE);
138 break;
139 default:
140 throw std::runtime_error("automatic HDF5 output type must be resolved before creating a HDF5 dataset");
141 }
142 return create_1d_compressed_hdf5_dataset(location, *dtype, name, length, deflate_level, chunk);
143}
144
145template<typename Left_, typename Right_>
146bool is_less_than_or_equal(Left_ l, Right_ r) {
147 constexpr bool lsigned = std::is_signed<Left_>::value;
148 constexpr bool rsigned = std::is_signed<Right_>::value;
149 if constexpr(lsigned == rsigned) {
150 return l <= r;
151 } else if constexpr(lsigned) {
152 return l <= 0 || static_cast<typename std::make_unsigned<Left_>::type>(l) <= r;
153 } else {
154 return r >= 0 && l <= static_cast<typename std::make_unsigned<Right_>::type>(r);
155 }
156}
157
158template<typename Native_, typename Max_>
159bool fits_upper_limit(Max_ max) {
160 constexpr auto native_max = std::numeric_limits<Native_>::max();
161 if constexpr(std::is_integral<Max_>::value) { // Native_ is already integral, so no need to check that.
162 return is_less_than_or_equal(max, native_max);
163 } else {
164 return max <= static_cast<double>(native_max);
165 }
166}
167
168template<typename Native_, typename Min_>
169bool fits_lower_limit(Min_ min) {
170 constexpr auto native_min = std::numeric_limits<Native_>::min();
171 if constexpr(std::is_integral<Min_>::value) {
172 return is_less_than_or_equal(native_min, min);
173 } else {
174 return min >= static_cast<double>(native_min);
175 }
176}
184template<typename Value_, typename Index_>
185struct WriteSparseHdf5Statistics {
186 Value_ lower_data = 0;
187 Value_ upper_data = 0;
188 Index_ upper_index = 0;
189 hsize_t non_zeros = 0;
190 bool non_integer = false;
191
192 void add_value(Value_ val) {
193 if constexpr(!std::is_integral<Value_>::value) {
194 if (std::trunc(val) != val || !std::isfinite(val)) {
195 non_integer = true;
196 }
197 }
198
199 if (val < lower_data) {
200 lower_data = val;
201 } else if (val > upper_data) {
202 upper_data = val;
203 }
204 }
205
206 void add_index(Index_ idx) {
207 if (idx > upper_index) {
208 upper_index = idx;
209 }
210 }
211};
212
213template<typename Value_, typename Index_>
214void update_hdf5_stats(const tatami::SparseRange<Value_, Index_>& extracted, WriteSparseHdf5Statistics<Value_, Index_>& output, bool infer_value, bool infer_index) {
215 // We need to protect the addition just in case it overflows from having too many non-zero elements.
216 output.non_zeros = sanisizer::sum<hsize_t>(output.non_zeros, extracted.number);
217
218 if (infer_value) {
219 for (Index_ i = 0; i < extracted.number; ++i) {
220 output.add_value(extracted.value[i]);
221 }
222 }
223
224 if (infer_index) {
225 for (Index_ i = 0; i < extracted.number; ++i) {
226 output.add_index(extracted.index[i]);
227 }
228 }
229}
230
231template<typename Value_, typename Index_>
232void update_hdf5_stats(const Value_* extracted, Index_ n, WriteSparseHdf5Statistics<Value_, Index_>& output) {
233 Index_ local_nonzero = 0;
234 for (Index_ i = 0; i < n; ++i) {
235 auto val = extracted[i];
236 if (val == 0) {
237 continue;
238 }
239 ++local_nonzero;
240 output.add_value(val);
241 output.add_index(i);
242 }
243
244 // Checking that there aren't overflows, but doing so outside of the hot loop for perf.
245 output.non_zeros = sanisizer::sum<hsize_t>(output.non_zeros, local_nonzero);
246}
247
248template<typename Value_, typename Index_>
249WriteSparseHdf5Statistics<Value_, Index_> write_sparse_hdf5_statistics(const tatami::Matrix<Value_, Index_>* mat, bool infer_value, bool infer_index, int nthreads) {
250 auto NR = mat->nrow(), NC = mat->ncol();
251 std::vector<WriteSparseHdf5Statistics<Value_, Index_> > collected(nthreads);
252
253 if (mat->sparse()) {
254 tatami::Options opt;
255 opt.sparse_extract_index = infer_index;
256 opt.sparse_extract_value = infer_value;
257
258 if (mat->prefer_rows()) {
259 tatami::parallelize([&](int t, Index_ start, Index_ len) -> void {
260 auto wrk = tatami::consecutive_extractor<true>(mat, true, start, len, opt);
261 std::vector<Value_> xbuffer(NC);
262 std::vector<Index_> ibuffer(NC);
263 for (Index_ r = start, end = start + len; r < end; ++r) {
264 auto extracted = wrk->fetch(r, xbuffer.data(), ibuffer.data());
265 update_hdf5_stats(extracted, collected[t], infer_value, infer_index);
266 }
267 }, NR, nthreads);
268
269 } else {
270 tatami::parallelize([&](int t, Index_ start, Index_ len) -> void {
271 auto wrk = tatami::consecutive_extractor<true>(mat, false, start, len, opt);
272 std::vector<Value_> xbuffer(NR);
273 std::vector<Index_> ibuffer(NR);
274 for (Index_ c = start, end = start + len; c < end; ++c) {
275 auto extracted = wrk->fetch(c, xbuffer.data(), ibuffer.data());
276 update_hdf5_stats(extracted, collected[t], infer_value, infer_index);
277 }
278 }, NC, nthreads);
279 }
280
281 } else {
282 if (mat->prefer_rows()) {
283 tatami::parallelize([&](int t, Index_ start, Index_ len) -> void {
284 auto wrk = tatami::consecutive_extractor<false>(mat, true, start, len);
285 std::vector<Value_> xbuffer(NC);
286 for (Index_ r = start, end = start + len; r < end; ++r) {
287 auto extracted = wrk->fetch(r, xbuffer.data());
288 update_hdf5_stats(extracted, NC, collected[t]);
289 }
290 }, NR, nthreads);
291
292 } else {
293 tatami::parallelize([&](int t, Index_ start, Index_ len) -> void {
294 auto wrk = tatami::consecutive_extractor<false>(mat, false, start, len);
295 std::vector<Value_> xbuffer(NR);
296 for (Index_ c = start, end = start + len; c < end; ++c) {
297 auto extracted = wrk->fetch(c, xbuffer.data());
298 update_hdf5_stats(extracted, NR, collected[t]);
299 }
300 }, NC, nthreads);
301 }
302 }
303
304 auto& first = collected.front();
305 for (int i = 1; i < nthreads; ++i) {
306 auto& current = collected[i];
307 first.lower_data = std::min(first.lower_data, current.lower_data);
308 first.upper_data = std::max(first.upper_data, current.upper_data);
309 first.upper_index = std::max(first.upper_index, current.upper_index);
310 first.non_zeros = sanisizer::sum<hsize_t>(first.non_zeros, current.non_zeros);
311 first.non_integer = first.non_integer || current.non_integer;
312 }
313
314 return std::move(first); // better be at least one thread.
315}
333template<typename Value_, typename Index_>
335 auto data_type = params.data_type;
336 auto index_type = params.index_type;
337 auto use_auto_data_type = (data_type == WriteStorageType::AUTOMATIC);
338 auto use_auto_index_type = (index_type == WriteStorageType::AUTOMATIC);
339 auto stats = write_sparse_hdf5_statistics(mat, use_auto_data_type, use_auto_index_type, params.num_threads);
340
341 // Choosing the types.
342 if (use_auto_data_type) {
343 if (stats.non_integer && !params.force_integer) {
344 data_type = WriteStorageType::DOUBLE;
345 } else {
346 auto lower_data = stats.lower_data;
347 auto upper_data = stats.upper_data;
348 if (lower_data < 0) {
349 if (fits_lower_limit<std::int8_t>(lower_data) && fits_upper_limit<std::int8_t>(upper_data)) {
350 data_type = WriteStorageType::INT8;
351 } else if (fits_lower_limit<std::int16_t>(lower_data) && fits_upper_limit<std::int16_t>(upper_data)) {
352 data_type = WriteStorageType::INT16;
353 } else {
354 data_type = WriteStorageType::INT32;
355 }
356 } else {
357 if (fits_upper_limit<std::uint8_t>(upper_data)) {
358 data_type = WriteStorageType::UINT8;
359 } else if (fits_upper_limit<std::uint16_t>(upper_data)) {
360 data_type = WriteStorageType::UINT16;
361 } else {
362 data_type = WriteStorageType::UINT32;
363 }
364 }
365 }
366 }
367
368 if (use_auto_index_type) {
369 auto upper_index = stats.upper_index;
370 if (fits_upper_limit<std::uint8_t>(upper_index)) {
371 index_type = WriteStorageType::UINT8;
372 } else if (fits_upper_limit<std::uint16_t>(upper_index)) {
373 index_type = WriteStorageType::UINT16;
374 } else {
375 index_type = WriteStorageType::UINT32;
376 }
377 }
378
379 // Choosing the layout.
380 auto layout = params.columnar;
381 if (layout == WriteStorageLayout::AUTOMATIC) {
382 if (mat->prefer_rows()) {
383 layout = WriteStorageLayout::ROW;
384 } else {
385 layout = WriteStorageLayout::COLUMN;
386 }
387 }
388
389 // And then saving it. This time we have no choice but to iterate by the desired dimension.
390 auto non_zeros = stats.non_zeros;
391 H5::DataSet data_ds = create_1d_compressed_hdf5_dataset(location, data_type, params.data_name, non_zeros, params.deflate_level, params.chunk_size);
392 H5::DataSet index_ds = create_1d_compressed_hdf5_dataset(location, index_type, params.index_name, non_zeros, params.deflate_level, params.chunk_size);
393 hsize_t offset = 0;
394 H5::DataSpace inspace(1, &non_zeros);
395 H5::DataSpace outspace(1, &non_zeros);
396 const auto& dstype = define_mem_type<Value_>();
397 const auto& ixtype = define_mem_type<Index_>();
398
399 Index_ NR = mat->nrow(), NC = mat->ncol();
400 std::vector<hsize_t> ptrs;
401
402 auto fill_datasets = [&](const Value_* vptr, const Index_* iptr, hsize_t count) -> void {
403 if (count) {
404 inspace.setExtentSimple(1, &count);
405 outspace.selectHyperslab(H5S_SELECT_SET, &count, &offset);
406 data_ds.write(vptr, dstype, inspace, outspace);
407 index_ds.write(iptr, ixtype, inspace, outspace);
408 offset += count;
409 }
410 };
411
412 if (mat->sparse()) {
413 if (layout == WriteStorageLayout::ROW) {
414 ptrs.resize(sanisizer::sum<decltype(ptrs.size())>(NR, 1));
417
418 auto wrk = tatami::consecutive_extractor<true>(mat, true, static_cast<Index_>(0), NR);
419 for (Index_ r = 0; r < NR; ++r) {
420 auto extracted = wrk->fetch(r, xbuffer.data(), ibuffer.data());
421 fill_datasets(extracted.value, extracted.index, extracted.number);
422 ptrs[r+1] = ptrs[r] + extracted.number;
423 }
424
425 } else {
426 ptrs.resize(sanisizer::sum<decltype(ptrs.size())>(NC, 1));
429
430 auto wrk = tatami::consecutive_extractor<true>(mat, false, static_cast<Index_>(0), NC);
431 for (Index_ c = 0; c < NC; ++c) {
432 auto extracted = wrk->fetch(c, xbuffer.data(), ibuffer.data());
433 fill_datasets(extracted.value, extracted.index, extracted.number);
434 ptrs[c+1] = ptrs[c] + extracted.number;
435 }
436 }
437
438 } else {
439 std::vector<Value_> sparse_xbuffer;
440 std::vector<Index_> sparse_ibuffer;
441 auto fill_datasets_from_dense = [&](const Value_* extracted, Index_ n) -> hsize_t {
442 sparse_xbuffer.clear();
443 sparse_ibuffer.clear();
444 for (Index_ i = 0; i < n; ++i) {
445 if (extracted[i]) {
446 sparse_xbuffer.push_back(extracted[i]);
447 sparse_ibuffer.push_back(i);
448 }
449 }
450
451 hsize_t count = sparse_xbuffer.size();
452 fill_datasets(sparse_xbuffer.data(), sparse_ibuffer.data(), count);
453 return count;
454 };
455
456 if (layout == WriteStorageLayout::ROW) {
457 ptrs.resize(sanisizer::sum<decltype(ptrs.size())>(NR, 1));
459 auto wrk = tatami::consecutive_extractor<false>(mat, true, static_cast<Index_>(0), NR);
460 for (Index_ r = 0; r < NR; ++r) {
461 auto extracted = wrk->fetch(r, dbuffer.data());
462 auto count = fill_datasets_from_dense(extracted, NC);
463 ptrs[r+1] = ptrs[r] + count;
464 }
465
466 } else {
467 ptrs.resize(sanisizer::sum<decltype(ptrs.size())>(NC, 1));
469 auto wrk = tatami::consecutive_extractor<false>(mat, false, static_cast<Index_>(0), NC);
470 for (Index_ c = 0; c < NC; ++c) {
471 auto extracted = wrk->fetch(c, dbuffer.data());
472 auto count = fill_datasets_from_dense(extracted, NR);
473 ptrs[c+1] = ptrs[c] + count;
474 }
475 }
476 }
477
478 // Saving the pointers.
479 auto ptr_len = sanisizer::cast<hsize_t>(ptrs.size());
480 H5::DataSet ptr_ds = create_1d_compressed_hdf5_dataset(location, H5::PredType::NATIVE_HSIZE, params.ptr_name, ptr_len, params.deflate_level, params.chunk_size);
481 H5::DataSpace ptr_space(1, &ptr_len);
482 ptr_ds.write(ptrs.data(), H5::PredType::NATIVE_HSIZE, ptr_space);
483
484 return;
485}
486
498template<typename Value_, typename Index_>
501 write_compressed_sparse_matrix(mat, location, params);
502 return;
503}
504
505}
506
507#endif
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
virtual bool prefer_rows() const=0
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
Representations for matrix data in HDF5 files.
Definition CompressedSparseMatrix.hpp:24
WriteStorageLayout
Definition utils.hpp:24
WriteStorageType
Definition utils.hpp:29
void write_compressed_sparse_matrix(const tatami::Matrix< Value_, Index_ > *mat, H5::Group &location, const WriteCompressedSparseMatrixOptions &params)
Definition write_compressed_sparse_matrix.hpp:334
void parallelize(Function_ fun, Index_ tasks, int threads)
Container_ create_container_of_Index_size(Index_ x, Args_ &&... args)
auto consecutive_extractor(const Matrix< Value_, Index_ > &matrix, bool row, Index_ iter_start, Index_ iter_length, Args_ &&... args)
bool sparse_extract_index
bool sparse_extract_value
const Value_ * value
const Index_ * index
Parameters for write_compressed_sparse_matrix().
Definition write_compressed_sparse_matrix.hpp:25
std::string ptr_name
Definition write_compressed_sparse_matrix.hpp:50
WriteStorageType index_type
Definition write_compressed_sparse_matrix.hpp:76
bool force_integer
Definition write_compressed_sparse_matrix.hpp:70
int deflate_level
Definition write_compressed_sparse_matrix.hpp:81
std::string data_name
Definition write_compressed_sparse_matrix.hpp:38
std::string index_name
Definition write_compressed_sparse_matrix.hpp:44
WriteStorageType data_type
Definition write_compressed_sparse_matrix.hpp:62
hsize_t chunk_size
Definition write_compressed_sparse_matrix.hpp:86
int num_threads
Definition write_compressed_sparse_matrix.hpp:93
WriteStorageLayout columnar
Definition write_compressed_sparse_matrix.hpp:56
Utilities for HDF5 extraction.