tatami
C++ API for different matrix representations
Loading...
Searching...
No Matches
convert_to_fragmented_sparse.hpp
Go to the documentation of this file.
1#ifndef TATAMI_CONVERT_TO_FRAGMENTED_SPARSE_H
2#define TATAMI_CONVERT_TO_FRAGMENTED_SPARSE_H
3
7#include "../utils/Index_to_container.hpp"
8
9#include <memory>
10#include <vector>
11#include <cstddef>
12
19namespace tatami {
20
31template<typename Value_, typename Index_>
36 FragmentedSparseContents(Index_ n) :
39 {}
48 std::vector<std::vector<Value_> > value;
49
55 std::vector<std::vector<Index_> > index;
56};
57
67
80template<typename StoredValue_, typename StoredIndex_, typename InputValue_, typename InputIndex_>
83 bool row,
85{
86 InputIndex_ NR = matrix.nrow();
87 InputIndex_ NC = matrix.ncol();
88 InputIndex_ primary = (row ? NR : NC);
89 InputIndex_ secondary = (row ? NC : NR);
90
92 auto& store_v = output.value;
93 auto& store_i = output.index;
94
95 if (row == matrix.prefer_rows()) {
96 if (matrix.is_sparse()) {
97 parallelize([&](int, InputIndex_ start, InputIndex_ length) -> void {
98 auto wrk = consecutive_extractor<true>(matrix, row, start, length);
101
102 for (InputIndex_ p = start, pe = start + length; p < pe; ++p) {
103 auto range = wrk->fetch(buffer_v.data(), buffer_i.data());
104 auto& sv = store_v[p];
105 auto& si = store_i[p];
106 sv.reserve(range.number);
107 si.reserve(range.number);
108
109 for (InputIndex_ i = 0; i < range.number; ++i, ++range.value, ++range.index) {
110 if (*range.value) {
111 sv.push_back(*range.value);
112 si.push_back(*range.index);
113 }
114 }
115 }
116 }, primary, options.num_threads);
117
118 } else {
119 parallelize([&](int, InputIndex_ start, InputIndex_ length) -> void {
120 auto wrk = consecutive_extractor<false>(matrix, row, start, length);
122
123 // Special conversion from dense to save ourselves from having to make
124 // indices that we aren't really interested in.
125 for (InputIndex_ p = start, pe = start + length; p < pe; ++p) {
126 auto ptr = wrk->fetch(buffer_v.data());
127 auto& sv = store_v[p];
128 auto& si = store_i[p];
129
130 for (InputIndex_ s = 0; s < secondary; ++s, ++ptr) {
131 if (*ptr) {
132 sv.push_back(*ptr);
133 si.push_back(s);
134 }
135 }
136 }
137 }, primary, options.num_threads);
138 }
139
140 } else {
141 // We iterate on the matrix matrix's preferred dimension, under the
142 // assumption that it may be arbitrarily costly to extract in the
143 // non-preferred dim; it is thus cheaper to do cache-unfriendly inserts
144 // into the output buffers.
145
146 if (matrix.is_sparse()) {
147 parallelize([&](int, InputIndex_ start, InputIndex_ length) -> void {
148 auto wrk = consecutive_extractor<true>(matrix, !row, static_cast<InputIndex_>(0), secondary, start, length);
151
152 for (InputIndex_ x = 0; x < secondary; ++x) {
153 auto range = wrk->fetch(buffer_v.data(), buffer_i.data());
154 for (InputIndex_ i = 0; i < range.number; ++i, ++range.value, ++range.index) {
155 if (*range.value) {
156 store_v[*range.index].push_back(*range.value);
157 store_i[*range.index].push_back(x);
158 }
159 }
160 }
161 }, primary, options.num_threads);
162
163 } else {
164 parallelize([&](int, InputIndex_ start, InputIndex_ length) -> void {
165 auto wrk = consecutive_extractor<false>(matrix, !row, static_cast<InputIndex_>(0), secondary, start, length);
167
168 for (InputIndex_ x = 0; x < secondary; ++x) {
169 auto ptr = wrk->fetch(buffer_v.data());
170 for (InputIndex_ p = start, pe = start + length; p < pe; ++p, ++ptr) {
171 if (*ptr) {
172 store_v[p].push_back(*ptr);
173 store_i[p].push_back(x);
174 }
175 }
176 }
177 }, primary, options.num_threads);
178 }
179 }
180
181 return output;
182}
183
193
209template<
210 typename Value_,
211 typename Index_,
212 typename StoredValue_ = Value_,
213 typename StoredIndex_ = Index_,
214 typename InputValue_,
215 typename InputIndex_
216>
217std::shared_ptr<Matrix<Value_, Index_> > convert_to_fragmented_sparse(
219 bool row,
221{
223 matrix,
224 row,
225 [&]{
227 ropt.num_threads = options.num_threads;
228 return ropt;
229 }()
230 );
231 return std::shared_ptr<Matrix<Value_, Index_> >(
233 Value_,
234 Index_,
235 std::vector<std::vector<StoredValue_> >,
236 std::vector<std::vector<StoredIndex_> >
237 >(
238 matrix.nrow(),
239 matrix.ncol(),
240 std::move(frag.value),
241 std::move(frag.index),
242 row,
243 []{
244 FragmentedSparseMatrixOptions fopt;
245 fopt.check = false; // no need for checks, as we guarantee correctness.
246 return fopt;
247 }()
248 )
249 );
250}
251
255// Backwards compatbility.
256template<typename Value_, typename Index_, typename StoredValue_ = Value_, typename StoredIndex_ = Index_, typename InputValue_, typename InputIndex_>
257std::shared_ptr<Matrix<Value_, Index_> > convert_to_fragmented_sparse(const Matrix<InputValue_, InputIndex_>* matrix, bool row, int threads = 1) {
259 *matrix,
260 row,
261 [&]{
262 ConvertToFragmentedSparseOptions opt;
263 opt.num_threads = threads;
264 return opt;
265 }()
266 );
267}
268
269template<typename StoredValue_, typename StoredIndex_, typename InputValue_, typename InputIndex_>
270FragmentedSparseContents<StoredValue_, StoredIndex_> retrieve_fragmented_sparse_contents(const Matrix<InputValue_, InputIndex_>* matrix, bool row, int threads = 1) {
272 *matrix,
273 row,
274 [&]{
275 RetrieveFragmentedSparseContentsOptions opt;
276 opt.num_threads = threads;
277 return opt;
278 }()
279 );
280}
281
282template <bool row_, typename StoredValue_, typename StoredIndex_, typename InputValue_, typename InputIndex_>
283FragmentedSparseContents<StoredValue_, StoredIndex_> retrieve_fragmented_sparse_contents(const Matrix<InputValue_, InputIndex_>* matrix, int threads = 1) {
285}
286
287template <bool row_, typename Value_, typename Index_, typename StoredValue_ = Value_, typename StoredIndex_ = Index_, typename InputValue_, typename InputIndex_>
288std::shared_ptr<Matrix<Value_, Index_> > convert_to_fragmented_sparse(const Matrix<InputValue_, InputIndex_>* matrix, int threads = 1) {
290}
295}
296
297#endif
Fragmented sparse matrix representation.
Fragmented sparse matrix representation.
Definition FragmentedSparseMatrix.hpp:464
Virtual class for a matrix.
Definition Matrix.hpp:59
virtual Index_ ncol() const =0
virtual Index_ nrow() const =0
virtual bool prefer_rows() const =0
virtual bool is_sparse() const =0
Templated construction of a new consecutive extractor.
Flexible representations for matrix data.
Definition Extractor.hpp:15
Container_ create_container_of_Index_size(Index_ x)
Definition Index_to_container.hpp:69
decltype(std::declval< Container_ >().size()) cast_Index_to_container_size(Index_ x)
Definition Index_to_container.hpp:54
void parallelize(Function_ fun, Index_ tasks, int threads)
Definition parallelize.hpp:42
FragmentedSparseContents< StoredValue_, StoredIndex_ > retrieve_fragmented_sparse_contents(const Matrix< InputValue_, InputIndex_ > &matrix, bool row, const RetrieveFragmentedSparseContentsOptions &options)
Definition convert_to_fragmented_sparse.hpp:81
std::shared_ptr< Matrix< Value_, Index_ > > convert_to_fragmented_sparse(const Matrix< InputValue_, InputIndex_ > &matrix, bool row, const ConvertToFragmentedSparseOptions &options)
Definition convert_to_fragmented_sparse.hpp:217
auto consecutive_extractor(const Matrix< Value_, Index_ > &matrix, bool row, Index_ iter_start, Index_ iter_length, Args_ &&... args)
Definition consecutive_extractor.hpp:35
Parallelized iteration over a tatami::Matrix.
Options for convert_to_fragmented_sparse().
Definition convert_to_fragmented_sparse.hpp:187
int num_threads
Definition convert_to_fragmented_sparse.hpp:191
Fragmented sparse contents.
Definition convert_to_fragmented_sparse.hpp:32
std::vector< std::vector< Value_ > > value
Definition convert_to_fragmented_sparse.hpp:48
std::vector< std::vector< Index_ > > index
Definition convert_to_fragmented_sparse.hpp:55
Options for retrieve_fragmented_sparse_contents().
Definition convert_to_fragmented_sparse.hpp:61
int num_threads
Definition convert_to_fragmented_sparse.hpp:65