tatami_mult
Multiply tatami matrices
Loading...
Searching...
No Matches
tatami_mult.hpp
Go to the documentation of this file.
1#ifndef TATAMI_MULT_HPP
2#define TATAMI_MULT_HPP
3
4#include "tatami/tatami.hpp"
5
6#include "dense_row.hpp"
7#include "sparse_row.hpp"
8#include "dense_column.hpp"
9#include "sparse_column.hpp"
10
20namespace tatami_mult {
21
25struct Options {
29 int num_threads = 1;
30
38 bool prefer_larger = true;
39
46};
47
63template<typename Value_, typename Index_, typename Right_, typename Output_>
64void multiply(const tatami::Matrix<Value_, Index_>& left, const Right_* right, Output_* output, const Options& opt) {
65 if (left.sparse()) {
66 if (left.prefer_rows()) {
67 internal::sparse_row_vector(left, right, output, opt.num_threads);
68 } else {
69 internal::sparse_column_vector(left, right, output, opt.num_threads);
70 }
71 } else {
72 if (left.prefer_rows()) {
73 internal::dense_row_vector(left, right, output, opt.num_threads);
74 } else {
75 internal::dense_column_vector(left, right, output, opt.num_threads);
76 }
77 }
78}
79
95template<typename Left_, typename Value_, typename Index_, typename Output_>
96void multiply(const Left_* left, const tatami::Matrix<Value_, Index_>& right, Output_* output, const Options& opt) {
98 if (tright->sparse()) {
99 if (tright->prefer_rows()) {
100 internal::sparse_row_vector(*tright, left, output, opt.num_threads);
101 } else {
102 internal::sparse_column_vector(*tright, left, output, opt.num_threads);
103 }
104 } else {
105 if (tright->prefer_rows()) {
106 internal::dense_row_vector(*tright, left, output, opt.num_threads);
107 } else {
108 internal::dense_column_vector(*tright, left, output, opt.num_threads);
109 }
110 }
111}
112
128template<typename Value_, typename Index_, typename Right_, typename Output_>
129void multiply(const tatami::Matrix<Value_, Index_>& left, const std::vector<Right_*>& right, const std::vector<Output_*>& output, const Options& opt) {
130 if (left.sparse()) {
131 if (left.prefer_rows()) {
132 internal::sparse_row_vectors(left, right, output, opt.num_threads);
133 } else {
134 internal::sparse_column_vectors(left, right, output, opt.num_threads);
135 }
136 } else {
137 if (left.prefer_rows()) {
138 internal::dense_row_vectors(left, right, output, opt.num_threads);
139 } else {
140 internal::dense_column_vectors(left, right, output, opt.num_threads);
141 }
142 }
143}
144
160template<typename Left_, typename Value_, typename Index_, typename Output_>
161void multiply(const std::vector<Left_*>& left, const tatami::Matrix<Value_, Index_>& right, const std::vector<Output_*>& output, const Options& opt) {
163 if (tright->sparse()) {
164 if (tright->prefer_rows()) {
165 internal::sparse_row_vectors(*tright, left, output, opt.num_threads);
166 } else {
167 internal::sparse_column_vectors(*tright, left, output, opt.num_threads);
168 }
169 } else {
170 if (tright->prefer_rows()) {
171 internal::dense_row_vectors(*tright, left, output, opt.num_threads);
172 } else {
173 internal::dense_column_vectors(*tright, left, output, opt.num_threads);
174 }
175 }
176}
177
181namespace internal {
182
183template<typename LeftValue_, typename LeftIndex_, typename RightValue_, typename RightIndex_, typename Output_>
184void multiply(const tatami::Matrix<LeftValue_, LeftIndex_>& left, const tatami::Matrix<RightValue_, RightIndex_>& right, Output_* output, bool column_major_out, int num_threads) {
185 size_t row_shift, col_shift;
186 if (column_major_out) {
187 row_shift = 1;
188 col_shift = left.nrow();
189 } else {
190 row_shift = right.ncol();
191 col_shift = 1;
192 }
193
194 if (left.sparse()) {
195 if (left.prefer_rows()) {
196 if (right.sparse()) {
197 internal::sparse_row_tatami_sparse(left, right, output, row_shift, col_shift, num_threads);
198 } else {
199 internal::sparse_row_tatami_dense(left, right, output, row_shift, col_shift, num_threads);
200 }
201 } else {
202 if (right.sparse()) {
203 internal::sparse_column_tatami_sparse(left, right, output, row_shift, col_shift, num_threads);
204 } else {
205 internal::sparse_column_tatami_dense(left, right, output, row_shift, col_shift, num_threads);
206 }
207 }
208 } else {
209 if (left.prefer_rows()) {
210 if (right.sparse()) {
211 internal::dense_row_tatami_sparse(left, right, output, row_shift, col_shift, num_threads);
212 } else {
213 internal::dense_row_tatami_dense(left, right, output, row_shift, col_shift, num_threads);
214 }
215 } else {
216 if (right.sparse()) {
217 internal::dense_column_tatami_sparse(left, right, output, row_shift, col_shift, num_threads);
218 } else {
219 internal::dense_column_tatami_dense(left, right, output, row_shift, col_shift, num_threads);
220 }
221 }
222 }
223}
224
225}
246template<typename LeftValue_, typename LeftIndex_, typename RightValue_, typename RightIndex_, typename Output_>
247void multiply(const tatami::Matrix<LeftValue_, LeftIndex_>& left, const tatami::Matrix<RightValue_, RightIndex_>& right, Output_* output, const Options& opt) {
248 if (opt.prefer_larger) {
249 if (left.nrow() < right.ncol()) {
252 internal::multiply(*tright, *tleft, output, !opt.column_major_output, opt.num_threads);
253 return;
254 }
255 }
256
257 internal::multiply(left, right, output, opt.column_major_output, opt.num_threads);
258}
259
260}
261
262#endif
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
virtual bool prefer_rows() const=0
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
Multiplication of tatami matrices.
void multiply(const tatami::Matrix< Value_, Index_ > &left, const Right_ *right, Output_ *output, const Options &opt)
Definition tatami_mult.hpp:64
std::shared_ptr< const Matrix< Value_, Index_ > > wrap_shared_ptr(const Matrix< Value_, Index_ > *ptr)
std::shared_ptr< Matrix< Value_, Index_ > > make_DelayedTranspose(std::shared_ptr< const Matrix< Value_, Index_ > > matrix)
Options for multiply().
Definition tatami_mult.hpp:25
bool prefer_larger
Definition tatami_mult.hpp:38
int num_threads
Definition tatami_mult.hpp:29
bool column_major_output
Definition tatami_mult.hpp:45