IgANet
IgANets - Isogeometric Analysis Networks
Loading...
Searching...
No Matches
blocktensor.hpp
Go to the documentation of this file.
1
15#pragma once
16
17#include <array>
18#include <exception>
19#include <initializer_list>
20#include <memory>
21#include <type_traits>
22
23#include <core.hpp>
24#include <utils/fqn.hpp>
25
26namespace iganet {
27namespace utils {
28
31template <typename T> struct is_shared_ptr : std::false_type {};
32
33template <typename T>
34struct is_shared_ptr<std::shared_ptr<T>> : std::true_type {};
36
38template <typename T> inline auto make_shared(T &&arg) {
40 return std::forward<typename std::decay<T>::type>(arg);
41 else
42 return std::make_shared<typename std::decay<T>::type>(std::forward<T>(arg));
43}
44
46template <typename T, std::size_t... Dims> class BlockTensor;
47
49template <typename T, std::size_t... Dims>
51
52protected:
54 std::array<std::shared_ptr<T>, (Dims * ...)> data_;
55
56public:
58 BlockTensorCore() = default;
59
61 template <typename... Ts, std::size_t... dims>
63 auto it = data_.begin();
64 (std::transform(other.data().begin(), other.data().end(), it,
65 [&it](auto &&d) {
66 it++;
67 return std::move(d);
68 }),
69 ...);
70 }
71
73 template <typename... Ts, std::size_t... dims>
75 auto it = data_.begin();
76 (std::transform(other.data().begin(), other.data().end(), it,
77 [&it](auto &&d) {
78 it++;
79 return std::move(d);
80 }),
81 ...);
82 }
83
85 template <typename... Ts>
86 explicit BlockTensorCore(Ts &&...data)
87 : data_({make_shared<Ts>(std::forward<Ts>(data))...}) {}
88
90 inline static constexpr auto dims() {
91 return std::array<std::size_t, sizeof...(Dims)>({Dims...});
92 }
93
95 template <std::size_t i> inline static constexpr std::size_t dim() {
96 if constexpr (i < sizeof...(Dims))
97 return std::get<i>(std::forward_as_tuple(Dims...));
98 else
99 return 0;
100 }
101
103 inline static constexpr std::size_t size() { return sizeof...(Dims); }
104
106 inline static constexpr std::size_t entries() { return (Dims * ...); }
107
109 inline const std::array<std::shared_ptr<T>, (Dims * ...)> &data() const {
110 return data_;
111 }
112
114 inline std::array<std::shared_ptr<T>, (Dims * ...)> &data() { return data_; }
115
117 inline const std::shared_ptr<T> &operator[](std::size_t idx) const {
118 assert(0 <= idx && idx < (Dims * ...));
119 return data_[idx];
120 }
121
123 inline std::shared_ptr<T> &operator[](std::size_t idx) {
124 assert(0 <= idx && idx < (Dims * ...));
125 return data_[idx];
126 }
127
129 inline const T &operator()(std::size_t idx) const {
130 assert(0 <= idx && idx < (Dims * ...));
131 return *data_[idx];
132 }
133
135 inline T &operator()(std::size_t idx) {
136 assert(0 <= idx && idx < (Dims * ...));
137 return *data_[idx];
138 }
139
141 template <typename Data> inline T &set(std::size_t idx, Data &&data) {
142 assert(0 <= idx && idx < (Dims * ...));
143 data_[idx] = make_shared<Data>(std::forward<Data>(data));
144 return *data_[idx];
145 }
146
148 inline virtual void
149 pretty_print(std::ostream &os = Log(log::info)) const noexcept = 0;
150};
151
153template <typename T, std::size_t... Dims>
154inline std::ostream &operator<<(std::ostream &os,
156 obj.pretty_print(os);
157 return os;
158}
159
161template <typename T, std::size_t Rows>
162class BlockTensor<T, Rows> : public BlockTensorCore<T, Rows> {
163private:
165
166public:
167 using BlockTensorCore<T, Rows>::BlockTensorCore;
168
170 inline static constexpr std::size_t rows() { return Rows; }
171
173 inline virtual void
174 pretty_print(std::ostream &os = Log(log::info)) const noexcept override {
175 os << Base::name() << "\n";
176 for (std::size_t row = 0; row < Rows; ++row)
177 os << "[" << row << "] = \n" << *Base::data_[row] << "\n";
178 }
179};
180
186template <typename T, std::size_t Rows, std::size_t Cols>
187class BlockTensor<T, Rows, Cols> : public BlockTensorCore<T, Rows, Cols> {
188private:
190
191public:
192 using BlockTensorCore<T, Rows, Cols>::BlockTensorCore;
193
195 inline static constexpr std::size_t rows() { return Rows; }
196
198 inline static constexpr std::size_t cols() { return Cols; }
199
200 using Base::operator();
201
203 inline const T &operator()(std::size_t row, std::size_t col) const {
204 assert(0 <= row && row < Rows && 0 <= col && col < Cols);
205 return *Base::data_[Cols * row + col];
206 }
207
209 inline T &operator()(std::size_t row, std::size_t col) {
210 assert(0 <= row && row < Rows && 0 <= col && col < Cols);
211 return *Base::data_[Cols * row + col];
212 }
213
214 using Base::set;
215
217 template <typename D>
218 inline T &set(std::size_t row, std::size_t col, D &&data) {
219 assert(0 <= row && row < Rows && 0 <= col && col < Cols);
220 Base::data_[Cols * row + col] = make_shared<D>(std::forward<D>(data));
221 return *Base::data_[Cols * row + col];
222 }
223
225 inline auto tr() const {
227 for (std::size_t row = 0; row < Rows; ++row)
228 for (std::size_t col = 0; col < Cols; ++col)
229 result[Rows * col + row] = Base::data_[Cols * row + col];
230 return result;
231 }
232
236
237 inline auto det() const {
238 if constexpr (Rows == 1 && Cols == 1) {
239 auto result = *Base::data_[0];
240 return result;
241 } else if constexpr (Rows == 2 && Cols == 2) {
242 auto result = torch::mul(*Base::data_[0], *Base::data_[3]) -
243 torch::mul(*Base::data_[1], *Base::data_[2]);
244 return result;
245 } else if constexpr (Rows == 3 && Cols == 3) {
246 auto result =
247 torch::mul(*Base::data_[0],
248 torch::mul(*Base::data_[4], *Base::data_[8]) -
249 torch::mul(*Base::data_[5], *Base::data_[7])) -
250 torch::mul(*Base::data_[1],
251 torch::mul(*Base::data_[3], *Base::data_[8]) -
252 torch::mul(*Base::data_[5], *Base::data_[6])) +
253 torch::mul(*Base::data_[2],
254 torch::mul(*Base::data_[3], *Base::data_[7]) -
255 torch::mul(*Base::data_[4], *Base::data_[6]));
256 return result;
257 } else if constexpr (Rows == 4 && Cols == 4) {
258 auto a11 = torch::mul(*Base::data_[5],
259 (torch::mul(*Base::data_[10], *Base::data_[15]) -
260 torch::mul(*Base::data_[11], *Base::data_[14]))) -
261 torch::mul(*Base::data_[9],
262 (torch::mul(*Base::data_[6], *Base::data_[15]) -
263 torch::mul(*Base::data_[7], *Base::data_[14]))) -
264 torch::mul(*Base::data_[13],
265 (torch::mul(*Base::data_[7], *Base::data_[10]) -
266 torch::mul(*Base::data_[6], *Base::data_[11])));
267
268 auto a21 = torch::mul(*Base::data_[4],
269 (torch::mul(*Base::data_[11], *Base::data_[14]) -
270 torch::mul(*Base::data_[10], *Base::data_[15]))) -
271 torch::mul(*Base::data_[8],
272 (torch::mul(*Base::data_[7], *Base::data_[14]) -
273 torch::mul(*Base::data_[6], *Base::data_[15]))) -
274 torch::mul(*Base::data_[12],
275 (torch::mul(*Base::data_[6], *Base::data_[11]) -
276 torch::mul(*Base::data_[7], *Base::data_[10])));
277
278 auto a31 = torch::mul(*Base::data_[4],
279 (torch::mul(*Base::data_[9], *Base::data_[15]) -
280 torch::mul(*Base::data_[11], *Base::data_[13]))) -
281 torch::mul(*Base::data_[8],
282 (torch::mul(*Base::data_[5], *Base::data_[15]) -
283 torch::mul(*Base::data_[7], *Base::data_[13]))) -
284 torch::mul(*Base::data_[12],
285 (torch::mul(*Base::data_[7], *Base::data_[9]) -
286 torch::mul(*Base::data_[5], *Base::data_[11])));
287
288 auto a41 = torch::mul(*Base::data_[4],
289 (torch::mul(*Base::data_[10], *Base::data_[13]) -
290 torch::mul(*Base::data_[9], *Base::data_[14]))) -
291 torch::mul(*Base::data_[8],
292 (torch::mul(*Base::data_[6], *Base::data_[13]) -
293 torch::mul(*Base::data_[5], *Base::data_[14]))) -
294 torch::mul(*Base::data_[12],
295 (torch::mul(*Base::data_[5], *Base::data_[10]) -
296 torch::mul(*Base::data_[6], *Base::data_[9])));
297
298 auto result =
299 torch::mul(*Base::data_[0], a11) + torch::mul(*Base::data_[1], a21) +
300 torch::mul(*Base::data_[2], a31) + torch::mul(*Base::data_[3], a41);
301
302 return result;
303 } else {
304 throw std::runtime_error("Unsupported block tensor dimension");
305 return *this;
306 }
307 }
308
312 inline auto inv() const {
313
314 auto det_ = this->det();
315
316 if constexpr (Rows == 1 && Cols == 1) {
318 result[0] = std::make_shared<T>(torch::reciprocal(*Base::data_[0]));
319 return result;
320 } else if constexpr (Rows == 2 && Cols == 2) {
321
323 result[0] = std::make_shared<T>(torch::div(*Base::data_[3], det_));
324 result[1] = std::make_shared<T>(torch::div(*Base::data_[2], -det_));
325 result[2] = std::make_shared<T>(torch::div(*Base::data_[1], -det_));
326 result[3] = std::make_shared<T>(torch::div(*Base::data_[0], det_));
327 return result;
328 } else if constexpr (Rows == 3 && Cols == 3) {
329
330 auto a11 = torch::mul(*Base::data_[4], *Base::data_[8]) -
331 torch::mul(*Base::data_[5], *Base::data_[7]);
332 auto a12 = torch::mul(*Base::data_[2], *Base::data_[7]) -
333 torch::mul(*Base::data_[1], *Base::data_[8]);
334 auto a13 = torch::mul(*Base::data_[1], *Base::data_[5]) -
335 torch::mul(*Base::data_[2], *Base::data_[4]);
336 auto a21 = torch::mul(*Base::data_[5], *Base::data_[6]) -
337 torch::mul(*Base::data_[3], *Base::data_[8]);
338 auto a22 = torch::mul(*Base::data_[0], *Base::data_[8]) -
339 torch::mul(*Base::data_[2], *Base::data_[6]);
340 auto a23 = torch::mul(*Base::data_[2], *Base::data_[3]) -
341 torch::mul(*Base::data_[0], *Base::data_[5]);
342 auto a31 = torch::mul(*Base::data_[3], *Base::data_[7]) -
343 torch::mul(*Base::data_[4], *Base::data_[6]);
344 auto a32 = torch::mul(*Base::data_[1], *Base::data_[6]) -
345 torch::mul(*Base::data_[0], *Base::data_[7]);
346 auto a33 = torch::mul(*Base::data_[0], *Base::data_[4]) -
347 torch::mul(*Base::data_[1], *Base::data_[3]);
348
350 result[0] = std::make_shared<T>(torch::div(a11, det_));
351 result[1] = std::make_shared<T>(torch::div(a12, det_));
352 result[2] = std::make_shared<T>(torch::div(a13, det_));
353 result[3] = std::make_shared<T>(torch::div(a21, det_));
354 result[4] = std::make_shared<T>(torch::div(a22, det_));
355 result[5] = std::make_shared<T>(torch::div(a23, det_));
356 result[6] = std::make_shared<T>(torch::div(a31, det_));
357 result[7] = std::make_shared<T>(torch::div(a32, det_));
358 result[8] = std::make_shared<T>(torch::div(a33, det_));
359 return result;
360 } else if constexpr (Rows == 4 && Cols == 4) {
361 auto a11 = torch::mul(*Base::data_[5],
362 (torch::mul(*Base::data_[10], *Base::data_[15]) -
363 torch::mul(*Base::data_[11], *Base::data_[14]))) -
364 torch::mul(*Base::data_[9],
365 (torch::mul(*Base::data_[6], *Base::data_[15]) -
366 torch::mul(*Base::data_[7], *Base::data_[14]))) -
367 torch::mul(*Base::data_[13],
368 (torch::mul(*Base::data_[7], *Base::data_[10]) -
369 torch::mul(*Base::data_[6], *Base::data_[11])));
370
371 auto a12 = torch::mul(*Base::data_[1],
372 (torch::mul(*Base::data_[11], *Base::data_[14]) -
373 torch::mul(*Base::data_[10], *Base::data_[15]))) -
374 torch::mul(*Base::data_[9],
375 (torch::mul(*Base::data_[3], *Base::data_[14]) -
376 torch::mul(*Base::data_[2], *Base::data_[15]))) -
377 torch::mul(*Base::data_[13],
378 (torch::mul(*Base::data_[2], *Base::data_[11]) -
379 torch::mul(*Base::data_[3], *Base::data_[10])));
380
381 auto a13 = torch::mul(*Base::data_[1],
382 (torch::mul(*Base::data_[6], *Base::data_[15]) -
383 torch::mul(*Base::data_[7], *Base::data_[14]))) -
384 torch::mul(*Base::data_[5],
385 (torch::mul(*Base::data_[2], *Base::data_[15]) -
386 torch::mul(*Base::data_[3], *Base::data_[14]))) -
387 torch::mul(*Base::data_[13],
388 (torch::mul(*Base::data_[3], *Base::data_[6]) -
389 torch::mul(*Base::data_[2], *Base::data_[7])));
390
391 auto a14 = torch::mul(*Base::data_[1],
392 (torch::mul(*Base::data_[7], *Base::data_[10]) -
393 torch::mul(*Base::data_[6], *Base::data_[11]))) -
394 torch::mul(*Base::data_[5],
395 (torch::mul(*Base::data_[3], *Base::data_[10]) -
396 torch::mul(*Base::data_[2], *Base::data_[11]))) -
397 torch::mul(*Base::data_[9],
398 (torch::mul(*Base::data_[2], *Base::data_[7]) -
399 torch::mul(*Base::data_[3], *Base::data_[6])));
400
401 auto a21 = torch::mul(*Base::data_[4],
402 (torch::mul(*Base::data_[11], *Base::data_[14]) -
403 torch::mul(*Base::data_[10], *Base::data_[15]))) -
404 torch::mul(*Base::data_[8],
405 (torch::mul(*Base::data_[7], *Base::data_[14]) -
406 torch::mul(*Base::data_[6], *Base::data_[15]))) -
407 torch::mul(*Base::data_[12],
408 (torch::mul(*Base::data_[6], *Base::data_[11]) -
409 torch::mul(*Base::data_[7], *Base::data_[10])));
410
411 auto a22 = torch::mul(*Base::data_[0],
412 (torch::mul(*Base::data_[10], *Base::data_[15]) -
413 torch::mul(*Base::data_[11], *Base::data_[14]))) -
414 torch::mul(*Base::data_[8],
415 (torch::mul(*Base::data_[2], *Base::data_[15]) -
416 torch::mul(*Base::data_[3], *Base::data_[14]))) -
417 torch::mul(*Base::data_[12],
418 (torch::mul(*Base::data_[3], *Base::data_[10]) -
419 torch::mul(*Base::data_[2], *Base::data_[11])));
420
421 auto a23 = torch::mul(*Base::data_[0],
422 (torch::mul(*Base::data_[7], *Base::data_[14]) -
423 torch::mul(*Base::data_[6], *Base::data_[15]))) -
424 torch::mul(*Base::data_[4],
425 (torch::mul(*Base::data_[3], *Base::data_[14]) -
426 torch::mul(*Base::data_[2], *Base::data_[15]))) -
427 torch::mul(*Base::data_[12],
428 (torch::mul(*Base::data_[2], *Base::data_[7]) -
429 torch::mul(*Base::data_[3], *Base::data_[6])));
430
431 auto a24 = torch::mul(*Base::data_[0],
432 (torch::mul(*Base::data_[6], *Base::data_[11]) -
433 torch::mul(*Base::data_[7], *Base::data_[10]))) -
434 torch::mul(*Base::data_[4],
435 (torch::mul(*Base::data_[2], *Base::data_[11]) -
436 torch::mul(*Base::data_[3], *Base::data_[10]))) -
437 torch::mul(*Base::data_[8],
438 (torch::mul(*Base::data_[3], *Base::data_[6]) -
439 torch::mul(*Base::data_[2], *Base::data_[7])));
440
441 auto a31 = torch::mul(*Base::data_[4],
442 (torch::mul(*Base::data_[9], *Base::data_[15]) -
443 torch::mul(*Base::data_[11], *Base::data_[13]))) -
444 torch::mul(*Base::data_[8],
445 (torch::mul(*Base::data_[5], *Base::data_[15]) -
446 torch::mul(*Base::data_[7], *Base::data_[13]))) -
447 torch::mul(*Base::data_[12],
448 (torch::mul(*Base::data_[7], *Base::data_[9]) -
449 torch::mul(*Base::data_[5], *Base::data_[11])));
450
451 auto a32 = torch::mul(*Base::data_[0],
452 (torch::mul(*Base::data_[11], *Base::data_[13]) -
453 torch::mul(*Base::data_[9], *Base::data_[15]))) -
454 torch::mul(*Base::data_[8],
455 (torch::mul(*Base::data_[3], *Base::data_[13]) -
456 torch::mul(*Base::data_[1], *Base::data_[15]))) -
457 torch::mul(*Base::data_[12],
458 (torch::mul(*Base::data_[1], *Base::data_[11]) -
459 torch::mul(*Base::data_[3], *Base::data_[9])));
460
461 auto a33 = torch::mul(*Base::data_[0],
462 (torch::mul(*Base::data_[5], *Base::data_[15]) -
463 torch::mul(*Base::data_[7], *Base::data_[13]))) -
464 torch::mul(*Base::data_[4],
465 (torch::mul(*Base::data_[1], *Base::data_[15]) -
466 torch::mul(*Base::data_[3], *Base::data_[13]))) -
467 torch::mul(*Base::data_[12],
468 (torch::mul(*Base::data_[3], *Base::data_[5]) -
469 torch::mul(*Base::data_[1], *Base::data_[7])));
470
471 auto a34 = torch::mul(*Base::data_[0],
472 (torch::mul(*Base::data_[7], *Base::data_[9]) -
473 torch::mul(*Base::data_[5], *Base::data_[11]))) -
474 torch::mul(*Base::data_[4],
475 (torch::mul(*Base::data_[3], *Base::data_[9]) -
476 torch::mul(*Base::data_[1], *Base::data_[11]))) -
477 torch::mul(*Base::data_[8],
478 (torch::mul(*Base::data_[1], *Base::data_[7]) -
479 torch::mul(*Base::data_[3], *Base::data_[5])));
480
481 auto a41 = torch::mul(*Base::data_[4],
482 (torch::mul(*Base::data_[10], *Base::data_[13]) -
483 torch::mul(*Base::data_[9], *Base::data_[14]))) -
484 torch::mul(*Base::data_[8],
485 (torch::mul(*Base::data_[6], *Base::data_[13]) -
486 torch::mul(*Base::data_[5], *Base::data_[14]))) -
487 torch::mul(*Base::data_[12],
488 (torch::mul(*Base::data_[5], *Base::data_[10]) -
489 torch::mul(*Base::data_[6], *Base::data_[9])));
490
491 auto a42 = torch::mul(*Base::data_[0],
492 (torch::mul(*Base::data_[9], *Base::data_[14]) -
493 torch::mul(*Base::data_[10], *Base::data_[13]))) -
494 torch::mul(*Base::data_[8],
495 (torch::mul(*Base::data_[1], *Base::data_[14]) -
496 torch::mul(*Base::data_[2], *Base::data_[13]))) -
497 torch::mul(*Base::data_[12],
498 (torch::mul(*Base::data_[2], *Base::data_[9]) -
499 torch::mul(*Base::data_[1], *Base::data_[10])));
500
501 auto a43 = torch::mul(*Base::data_[0],
502 (torch::mul(*Base::data_[6], *Base::data_[13]) -
503 torch::mul(*Base::data_[5], *Base::data_[14]))) -
504 torch::mul(*Base::data_[4],
505 (torch::mul(*Base::data_[2], *Base::data_[13]) -
506 torch::mul(*Base::data_[1], *Base::data_[14]))) -
507 torch::mul(*Base::data_[12],
508 (torch::mul(*Base::data_[1], *Base::data_[6]) -
509 torch::mul(*Base::data_[2], *Base::data_[5])));
510
511 auto a44 = torch::mul(*Base::data_[0],
512 (torch::mul(*Base::data_[5], *Base::data_[10]) -
513 torch::mul(*Base::data_[6], *Base::data_[9]))) -
514 torch::mul(*Base::data_[4],
515 (torch::mul(*Base::data_[1], *Base::data_[10]) -
516 torch::mul(*Base::data_[2], *Base::data_[9]))) -
517 torch::mul(*Base::data_[8],
518 (torch::mul(*Base::data_[2], *Base::data_[5]) -
519 torch::mul(*Base::data_[1], *Base::data_[6])));
521 result[0] = std::make_shared<T>(torch::div(a11, det_));
522 result[1] = std::make_shared<T>(torch::div(a12, det_));
523 result[2] = std::make_shared<T>(torch::div(a13, det_));
524 result[3] = std::make_shared<T>(torch::div(a14, det_));
525 result[4] = std::make_shared<T>(torch::div(a21, det_));
526 result[5] = std::make_shared<T>(torch::div(a22, det_));
527 result[6] = std::make_shared<T>(torch::div(a23, det_));
528 result[7] = std::make_shared<T>(torch::div(a24, det_));
529 result[8] = std::make_shared<T>(torch::div(a31, det_));
530 result[9] = std::make_shared<T>(torch::div(a32, det_));
531 result[10] = std::make_shared<T>(torch::div(a33, det_));
532 result[11] = std::make_shared<T>(torch::div(a34, det_));
533 result[12] = std::make_shared<T>(torch::div(a41, det_));
534 result[13] = std::make_shared<T>(torch::div(a42, det_));
535 result[14] = std::make_shared<T>(torch::div(a43, det_));
536 result[15] = std::make_shared<T>(torch::div(a44, det_));
537 return result;
538 } else {
539 throw std::runtime_error("Unsupported block tensor dimension");
540 return *this;
541 }
542 }
543
551 inline auto ginv() const {
552 if constexpr (Rows == Cols)
553 return this->inv();
554 else
555 // Compute the generalized inverse, i.e. (A^T A)^{-1} A^T
556 return (this->tr() * (*this)).inv() * this->tr();
557 }
558
564 inline auto invtr() const {
565
566 auto det_ = this->det();
567
568 if constexpr (Rows == 1 && Cols == 1) {
570 result[0] = std::make_shared<T>(torch::reciprocal(*Base::data_[0]));
571 return result;
572 } else if constexpr (Rows == 2 && Cols == 2) {
573
575 result[0] = std::make_shared<T>(torch::div(*Base::data_[3], det_));
576 result[1] = std::make_shared<T>(torch::div(*Base::data_[1], -det_));
577 result[2] = std::make_shared<T>(torch::div(*Base::data_[2], -det_));
578 result[3] = std::make_shared<T>(torch::div(*Base::data_[0], det_));
579 return result;
580 } else if constexpr (Rows == 3 && Cols == 3) {
581
582 auto a11 = torch::mul(*Base::data_[4], *Base::data_[8]) -
583 torch::mul(*Base::data_[5], *Base::data_[7]);
584 auto a12 = torch::mul(*Base::data_[2], *Base::data_[7]) -
585 torch::mul(*Base::data_[1], *Base::data_[8]);
586 auto a13 = torch::mul(*Base::data_[1], *Base::data_[5]) -
587 torch::mul(*Base::data_[2], *Base::data_[4]);
588 auto a21 = torch::mul(*Base::data_[5], *Base::data_[6]) -
589 torch::mul(*Base::data_[3], *Base::data_[8]);
590 auto a22 = torch::mul(*Base::data_[0], *Base::data_[8]) -
591 torch::mul(*Base::data_[2], *Base::data_[6]);
592 auto a23 = torch::mul(*Base::data_[2], *Base::data_[3]) -
593 torch::mul(*Base::data_[0], *Base::data_[5]);
594 auto a31 = torch::mul(*Base::data_[3], *Base::data_[7]) -
595 torch::mul(*Base::data_[4], *Base::data_[6]);
596 auto a32 = torch::mul(*Base::data_[1], *Base::data_[6]) -
597 torch::mul(*Base::data_[0], *Base::data_[7]);
598 auto a33 = torch::mul(*Base::data_[0], *Base::data_[4]) -
599 torch::mul(*Base::data_[1], *Base::data_[3]);
600
602 result[0] = std::make_shared<T>(torch::div(a11, det_));
603 result[1] = std::make_shared<T>(torch::div(a21, det_));
604 result[2] = std::make_shared<T>(torch::div(a31, det_));
605 result[3] = std::make_shared<T>(torch::div(a12, det_));
606 result[4] = std::make_shared<T>(torch::div(a22, det_));
607 result[5] = std::make_shared<T>(torch::div(a32, det_));
608 result[6] = std::make_shared<T>(torch::div(a13, det_));
609 result[7] = std::make_shared<T>(torch::div(a23, det_));
610 result[8] = std::make_shared<T>(torch::div(a33, det_));
611 return result;
612 } else if constexpr (Rows == 4 && Cols == 4) {
613
614 auto a11 = torch::mul(*Base::data_[5],
615 (torch::mul(*Base::data_[10], *Base::data_[15]) -
616 torch::mul(*Base::data_[11], *Base::data_[14]))) -
617 torch::mul(*Base::data_[9],
618 (torch::mul(*Base::data_[6], *Base::data_[15]) -
619 torch::mul(*Base::data_[7], *Base::data_[14]))) -
620 torch::mul(*Base::data_[13],
621 (torch::mul(*Base::data_[7], *Base::data_[10]) -
622 torch::mul(*Base::data_[6], *Base::data_[11])));
623
624 auto a12 = torch::mul(*Base::data_[1],
625 (torch::mul(*Base::data_[11], *Base::data_[14]) -
626 torch::mul(*Base::data_[10], *Base::data_[15]))) -
627 torch::mul(*Base::data_[9],
628 (torch::mul(*Base::data_[3], *Base::data_[14]) -
629 torch::mul(*Base::data_[2], *Base::data_[15]))) -
630 torch::mul(*Base::data_[13],
631 (torch::mul(*Base::data_[2], *Base::data_[11]) -
632 torch::mul(*Base::data_[3], *Base::data_[10])));
633
634 auto a13 = torch::mul(*Base::data_[1],
635 (torch::mul(*Base::data_[6], *Base::data_[15]) -
636 torch::mul(*Base::data_[7], *Base::data_[14]))) -
637 torch::mul(*Base::data_[5],
638 (torch::mul(*Base::data_[2], *Base::data_[15]) -
639 torch::mul(*Base::data_[3], *Base::data_[14]))) -
640 torch::mul(*Base::data_[13],
641 (torch::mul(*Base::data_[3], *Base::data_[6]) -
642 torch::mul(*Base::data_[2], *Base::data_[7])));
643
644 auto a14 = torch::mul(*Base::data_[1],
645 (torch::mul(*Base::data_[7], *Base::data_[10]) -
646 torch::mul(*Base::data_[6], *Base::data_[11]))) -
647 torch::mul(*Base::data_[5],
648 (torch::mul(*Base::data_[3], *Base::data_[10]) -
649 torch::mul(*Base::data_[2], *Base::data_[11]))) -
650 torch::mul(*Base::data_[9],
651 (torch::mul(*Base::data_[2], *Base::data_[7]) -
652 torch::mul(*Base::data_[3], *Base::data_[6])));
653
654 auto a21 = torch::mul(*Base::data_[4],
655 (torch::mul(*Base::data_[11], *Base::data_[14]) -
656 torch::mul(*Base::data_[10], *Base::data_[15]))) -
657 torch::mul(*Base::data_[8],
658 (torch::mul(*Base::data_[7], *Base::data_[14]) -
659 torch::mul(*Base::data_[6], *Base::data_[15]))) -
660 torch::mul(*Base::data_[12],
661 (torch::mul(*Base::data_[6], *Base::data_[11]) -
662 torch::mul(*Base::data_[7], *Base::data_[10])));
663
664 auto a22 = torch::mul(*Base::data_[0],
665 (torch::mul(*Base::data_[10], *Base::data_[15]) -
666 torch::mul(*Base::data_[11], *Base::data_[14]))) -
667 torch::mul(*Base::data_[8],
668 (torch::mul(*Base::data_[2], *Base::data_[15]) -
669 torch::mul(*Base::data_[3], *Base::data_[14]))) -
670 torch::mul(*Base::data_[12],
671 (torch::mul(*Base::data_[3], *Base::data_[10]) -
672 torch::mul(*Base::data_[2], *Base::data_[11])));
673
674 auto a23 = torch::mul(*Base::data_[0],
675 (torch::mul(*Base::data_[7], *Base::data_[14]) -
676 torch::mul(*Base::data_[6], *Base::data_[15]))) -
677 torch::mul(*Base::data_[4],
678 (torch::mul(*Base::data_[3], *Base::data_[14]) -
679 torch::mul(*Base::data_[2], *Base::data_[15]))) -
680 torch::mul(*Base::data_[12],
681 (torch::mul(*Base::data_[2], *Base::data_[7]) -
682 torch::mul(*Base::data_[3], *Base::data_[6])));
683
684 auto a24 = torch::mul(*Base::data_[0],
685 (torch::mul(*Base::data_[6], *Base::data_[11]) -
686 torch::mul(*Base::data_[7], *Base::data_[10]))) -
687 torch::mul(*Base::data_[4],
688 (torch::mul(*Base::data_[2], *Base::data_[11]) -
689 torch::mul(*Base::data_[3], *Base::data_[10]))) -
690 torch::mul(*Base::data_[8],
691 (torch::mul(*Base::data_[3], *Base::data_[6]) -
692 torch::mul(*Base::data_[2], *Base::data_[7])));
693
694 auto a31 = torch::mul(*Base::data_[4],
695 (torch::mul(*Base::data_[9], *Base::data_[15]) -
696 torch::mul(*Base::data_[11], *Base::data_[13]))) -
697 torch::mul(*Base::data_[8],
698 (torch::mul(*Base::data_[5], *Base::data_[15]) -
699 torch::mul(*Base::data_[7], *Base::data_[13]))) -
700 torch::mul(*Base::data_[12],
701 (torch::mul(*Base::data_[7], *Base::data_[9]) -
702 torch::mul(*Base::data_[5], *Base::data_[11])));
703
704 auto a32 = torch::mul(*Base::data_[0],
705 (torch::mul(*Base::data_[11], *Base::data_[13]) -
706 torch::mul(*Base::data_[9], *Base::data_[15]))) -
707 torch::mul(*Base::data_[8],
708 (torch::mul(*Base::data_[3], *Base::data_[13]) -
709 torch::mul(*Base::data_[1], *Base::data_[15]))) -
710 torch::mul(*Base::data_[12],
711 (torch::mul(*Base::data_[1], *Base::data_[11]) -
712 torch::mul(*Base::data_[3], *Base::data_[9])));
713
714 auto a33 = torch::mul(*Base::data_[0],
715 (torch::mul(*Base::data_[5], *Base::data_[15]) -
716 torch::mul(*Base::data_[7], *Base::data_[13]))) -
717 torch::mul(*Base::data_[4],
718 (torch::mul(*Base::data_[1], *Base::data_[15]) -
719 torch::mul(*Base::data_[3], *Base::data_[13]))) -
720 torch::mul(*Base::data_[12],
721 (torch::mul(*Base::data_[3], *Base::data_[5]) -
722 torch::mul(*Base::data_[1], *Base::data_[7])));
723
724 auto a34 = torch::mul(*Base::data_[0],
725 (torch::mul(*Base::data_[7], *Base::data_[9]) -
726 torch::mul(*Base::data_[5], *Base::data_[11]))) -
727 torch::mul(*Base::data_[4],
728 (torch::mul(*Base::data_[3], *Base::data_[9]) -
729 torch::mul(*Base::data_[1], *Base::data_[11]))) -
730 torch::mul(*Base::data_[8],
731 (torch::mul(*Base::data_[1], *Base::data_[7]) -
732 torch::mul(*Base::data_[3], *Base::data_[5])));
733
734 auto a41 = torch::mul(*Base::data_[4],
735 (torch::mul(*Base::data_[10], *Base::data_[13]) -
736 torch::mul(*Base::data_[9], *Base::data_[14]))) -
737 torch::mul(*Base::data_[8],
738 (torch::mul(*Base::data_[6], *Base::data_[13]) -
739 torch::mul(*Base::data_[5], *Base::data_[14]))) -
740 torch::mul(*Base::data_[12],
741 (torch::mul(*Base::data_[5], *Base::data_[10]) -
742 torch::mul(*Base::data_[6], *Base::data_[9])));
743
744 auto a42 = torch::mul(*Base::data_[0],
745 (torch::mul(*Base::data_[9], *Base::data_[14]) -
746 torch::mul(*Base::data_[10], *Base::data_[13]))) -
747 torch::mul(*Base::data_[8],
748 (torch::mul(*Base::data_[1], *Base::data_[14]) -
749 torch::mul(*Base::data_[2], *Base::data_[13]))) -
750 torch::mul(*Base::data_[12],
751 (torch::mul(*Base::data_[2], *Base::data_[9]) -
752 torch::mul(*Base::data_[1], *Base::data_[10])));
753
754 auto a43 = torch::mul(*Base::data_[0],
755 (torch::mul(*Base::data_[6], *Base::data_[13]) -
756 torch::mul(*Base::data_[5], *Base::data_[14]))) -
757 torch::mul(*Base::data_[4],
758 (torch::mul(*Base::data_[2], *Base::data_[13]) -
759 torch::mul(*Base::data_[1], *Base::data_[14]))) -
760 torch::mul(*Base::data_[12],
761 (torch::mul(*Base::data_[1], *Base::data_[6]) -
762 torch::mul(*Base::data_[2], *Base::data_[5])));
763
764 auto a44 = torch::mul(*Base::data_[0],
765 (torch::mul(*Base::data_[5], *Base::data_[10]) -
766 torch::mul(*Base::data_[6], *Base::data_[9]))) -
767 torch::mul(*Base::data_[4],
768 (torch::mul(*Base::data_[1], *Base::data_[10]) -
769 torch::mul(*Base::data_[2], *Base::data_[9]))) -
770 torch::mul(*Base::data_[8],
771 (torch::mul(*Base::data_[2], *Base::data_[5]) -
772 torch::mul(*Base::data_[1], *Base::data_[6])));
773
775 result[0] = std::make_shared<T>(torch::div(a11, det_));
776 result[1] = std::make_shared<T>(torch::div(a21, det_));
777 result[2] = std::make_shared<T>(torch::div(a31, det_));
778 result[3] = std::make_shared<T>(torch::div(a41, det_));
779 result[4] = std::make_shared<T>(torch::div(a12, det_));
780 result[5] = std::make_shared<T>(torch::div(a22, det_));
781 result[6] = std::make_shared<T>(torch::div(a32, det_));
782 result[7] = std::make_shared<T>(torch::div(a42, det_));
783 result[8] = std::make_shared<T>(torch::div(a13, det_));
784 result[9] = std::make_shared<T>(torch::div(a23, det_));
785 result[10] = std::make_shared<T>(torch::div(a33, det_));
786 result[11] = std::make_shared<T>(torch::div(a43, det_));
787 result[12] = std::make_shared<T>(torch::div(a14, det_));
788 result[13] = std::make_shared<T>(torch::div(a24, det_));
789 result[14] = std::make_shared<T>(torch::div(a34, det_));
790 result[15] = std::make_shared<T>(torch::div(a44, det_));
791 return result;
792 } else {
793 throw std::runtime_error("Unsupported block tensor dimension");
794 return *this;
795 }
796 }
797
807 inline auto ginvtr() const {
808 if constexpr (Rows == Cols)
809 return this->invtr();
810 else
811 // Compute the transpose of the generalized inverse, i.e. A (A^T A)^{-T}
812 return (*this) * (this->tr() * (*this)).invtr();
813 }
814
816 inline auto trace() const {
817 static_assert(Rows == Cols, "trace(.) requires square block tensor");
818
819 if constexpr (Rows == 1)
820 return BlockTensor<T, 1, 1>(*Base::data_[0]);
821
822 else if constexpr (Rows == 2)
823 return BlockTensor<T, 1, 1>(*Base::data_[0] + *Base::data_[3]);
824
825 else if constexpr (Rows == 3)
826 return BlockTensor<T, 1, 1>(*Base::data_[0] + *Base::data_[4] +
827 *Base::data_[8]);
828
829 else if constexpr (Rows == 4)
830 return BlockTensor<T, 1, 1>(*Base::data_[0] + *Base::data_[5] +
831 *Base::data_[10] + *Base::data_[15]);
832
833 else
834 throw std::runtime_error("Unsupported block tensor dimension");
835 }
836
838 inline virtual void
839 pretty_print(std::ostream &os = Log(log::info)) const noexcept override {
840 os << Base::name() << "\n";
841 for (std::size_t row = 0; row < Rows; ++row)
842 for (std::size_t col = 0; col < Cols; ++col)
843 os << "[" << row << "," << col << "] = \n"
844 << *Base::data_[Cols * row + col] << "\n";
845 }
846};
847
850template <typename T, typename U, std::size_t Rows, std::size_t Common,
851 std::size_t Cols>
855 for (std::size_t row = 0; row < Rows; ++row)
856 for (std::size_t col = 0; col < Cols; ++col) {
857 T tmp =
858 (lhs[Common * row]->dim() > rhs[col]->dim()
859 ? torch::mul(*lhs[Common * row], rhs[col]->unsqueeze(-1))
860 : (lhs[Common * row]->dim() < rhs[col]->dim()
861 ? torch::mul(lhs[Common * row]->unsqueeze(-1), *rhs[col])
862 : torch::mul(*lhs[Common * row], *rhs[col])));
863 for (std::size_t idx = 1; idx < Common; ++idx)
864 tmp += (lhs[Common * row]->dim() > rhs[col]->dim()
865 ? torch::mul(*lhs[Common * row + idx],
866 rhs[Cols * idx + col]->unsqueeze(-1))
867 : (lhs[Common * row]->dim() < rhs[col]->dim()
868 ? torch::mul(lhs[Common * row + idx]->unsqueeze(-1),
869 *rhs[Cols * idx + col])
870 : torch::mul(*lhs[Common * row + idx],
871 *rhs[Cols * idx + col])));
872 result[Cols * row + col] = std::make_shared<T>(tmp);
873 }
874 return result;
875}
876
883template <typename T, std::size_t Rows, std::size_t Cols, std::size_t Slices>
885 : public BlockTensorCore<T, Rows, Cols, Slices> {
886private:
888
889public:
890 using BlockTensorCore<T, Rows, Cols, Slices>::BlockTensorCore;
891
893 inline static constexpr std::size_t rows() { return Rows; }
894
896 inline static constexpr std::size_t cols() { return Cols; }
897
899 inline static constexpr std::size_t slices() { return Slices; }
900
901 using Base::operator();
902
904 inline const T &operator()(std::size_t row, std::size_t col,
905 std::size_t slice) const {
906 assert(0 <= row && row < Rows && 0 <= col && col < Cols && 0 <= slice &&
907 slice < Slices);
908 return *Base::data_[Rows * Cols * slice + Cols * row + col];
909 }
910
912 inline T &operator()(std::size_t row, std::size_t col, std::size_t slice) {
913 assert(0 <= row && row < Rows && 0 <= col && col < Cols && 0 <= slice &&
914 slice < Slices);
915 return *Base::data_[Rows * Cols * slice + Cols * row + col];
916 }
917
918 using Base::set;
919
921 template <typename D>
922 inline T &set(std::size_t row, std::size_t col, std::size_t slice, D &&data) {
923 Base::data_[Rows * Cols * slice + Cols * row + col] =
924 make_shared<D>(std::forward<D>(data));
925 return *Base::data_[Rows * Cols * slice + Cols * row + col];
926 }
927
929 inline auto slice(std::size_t slice) const {
930 assert(0 <= slice && slice < Slices);
932 for (std::size_t row = 0; row < Rows; ++row)
933 for (std::size_t col = 0; col < Cols; ++col)
934 result[Cols * row + col] =
935 Base::data_[Rows * Cols * slice + Cols * row + col];
936 return result;
937 }
938
941 inline auto reorder_ikj() const {
943 for (std::size_t slice = 0; slice < Slices; ++slice)
944 for (std::size_t row = 0; row < Rows; ++row)
945 for (std::size_t col = 0; col < Cols; ++col)
946 result[Rows * Slices * col + Slices * row + slice] =
947 Base::data_[Rows * Cols * slice + Cols * row + col];
948 return result;
949 }
950
954 inline auto reorder_jik() const {
956 for (std::size_t slice = 0; slice < Slices; ++slice)
957 for (std::size_t row = 0; row < Rows; ++row)
958 for (std::size_t col = 0; col < Cols; ++col)
959 result[Rows * Cols * slice + Rows * col + row] =
960 Base::data_[Rows * Cols * slice + Cols * row + col];
961 return result;
962 }
963
966 inline auto reorder_kji() const {
968 for (std::size_t slice = 0; slice < Slices; ++slice)
969 for (std::size_t row = 0; row < Rows; ++row)
970 for (std::size_t col = 0; col < Cols; ++col)
971 result[Slices * Cols * row + Cols * slice + col] =
972 Base::data_[Rows * Cols * slice + Cols * row + col];
973 return result;
974 }
975
978 inline auto reorder_kij() const {
980 for (std::size_t slice = 0; slice < Slices; ++slice)
981 for (std::size_t row = 0; row < Rows; ++row)
982 for (std::size_t col = 0; col < Cols; ++col)
983 result[Slices * Rows * col + Rows * slice + row] =
984 Base::data_[Rows * Cols * slice + Cols * row + col];
985 return result;
986 }
987
989 inline virtual void
990 pretty_print(std::ostream &os = Log(log::info)) const noexcept override {
991 os << Base::name() << "\n";
992 for (std::size_t slice = 0; slice < Slices; ++slice)
993 for (std::size_t row = 0; row < Rows; ++row)
994 for (std::size_t col = 0; col < Cols; ++col)
995 os << "[" << row << "," << col << "," << slice << "] = \n"
996 << *Base::data_[Rows * Cols * slice + Cols * row + col] << "\n";
997 }
998};
999
1002template <typename T, typename U, std::size_t Rows, std::size_t Common,
1003 std::size_t Cols, std::size_t Slices>
1007 for (std::size_t slice = 0; slice < Slices; ++slice)
1008 for (std::size_t row = 0; row < Rows; ++row)
1009 for (std::size_t col = 0; col < Cols; ++col) {
1010 T tmp =
1011 (lhs[Common * row]->dim() > rhs[Rows * Cols * slice + col]->dim()
1012 ? torch::mul(*lhs[Common * row],
1013 rhs[Rows * Cols * slice + col]->unsqueeze(-1))
1014 : (lhs[Common * row]->dim() <
1015 rhs[Rows * Cols * slice + col]->dim()
1016 ? torch::mul(lhs[Common * row]->unsqueeze(-1),
1017 *rhs[Rows * Cols * slice + col])
1018 : torch::mul(*lhs[Common * row],
1019 *rhs[Rows * Cols * slice + col])));
1020 for (std::size_t idx = 1; idx < Common; ++idx)
1021 tmp +=
1022 (lhs[Common * row]->dim() > rhs[Rows * Cols * slice + col]->dim()
1023 ? torch::mul(
1024 *lhs[Common * row + idx],
1025 rhs[Rows * Cols * slice + Cols * idx + col]->unsqueeze(
1026 -1))
1027 : (lhs[Common * row]->dim() <
1028 rhs[Rows * Cols * slice + col]->dim()
1029 ? torch::mul(
1030 lhs[Common * row + idx]->unsqueeze(-1),
1031 *rhs[Rows * Cols * slice + Cols * idx + col])
1032 : torch::mul(
1033 *lhs[Common * row + idx],
1034 *rhs[Rows * Cols * slice + Cols * idx + col])));
1035 result[Rows * Cols * slice + Cols * row + col] =
1036 std::make_shared<T>(tmp);
1037 }
1038 return result;
1039}
1040
1043template <typename T, typename U, std::size_t Rows, std::size_t Common,
1044 std::size_t Cols, std::size_t Slices>
1048 for (std::size_t slice = 0; slice < Slices; ++slice)
1049 for (std::size_t row = 0; row < Rows; ++row)
1050 for (std::size_t col = 0; col < Cols; ++col) {
1051 T tmp =
1052 (lhs[Rows * Cols * slice + Common * row]->dim() > rhs[col]->dim()
1053 ? torch::mul(*lhs[Rows * Cols * slice + Common * row],
1054 rhs[col]->unsqueeze(-1))
1055 : (lhs[Rows * Cols * slice + Common * row]->dim() <
1056 rhs[col]->dim()
1057 ? torch::mul(lhs[Rows * Cols * slice + Common * row]
1058 ->unsqueeze(-1),
1059 *rhs[col])
1060 : torch::mul(*lhs[Rows * Cols * slice + Common * row],
1061 *rhs[col])));
1062 for (std::size_t idx = 1; idx < Common; ++idx)
1063 tmp +=
1064 (lhs[Rows * Cols * slice + Common * row + idx]->dim() >
1065 rhs[Cols * idx + col]->dim()
1066 ? torch::mul(*lhs[Rows * Cols * slice + Common * row + idx],
1067 rhs[Cols * idx + col])
1068 ->unsqueeze(-1)
1069 : (lhs[Rows * Cols * slice + Common * row + idx]->dim() <
1070 rhs[Cols * idx + col]->dim()
1071 ? torch::mul(
1072 lhs[Rows * Cols * slice + Common * row + idx]
1073 ->unsqueeze(-1),
1074 *rhs[Cols * idx + col])
1075 : torch::mul(
1076 *lhs[Rows * Cols * slice + Common * row + idx],
1077 *rhs[Cols * idx + col])));
1078 result[Rows * Cols * slice + Cols * row + col] =
1079 std::make_shared<T>(tmp);
1080 }
1081 return result;
1082}
1083
1084#define blocktensor_unary_op(name) \
1085 template <typename T, std::size_t... Dims> \
1086 inline auto name(const BlockTensor<T, Dims...> &input) { \
1087 BlockTensor<T, Dims...> result; \
1088 for (std::size_t idx = 0; idx < (Dims * ...); ++idx) \
1089 result[idx] = std::make_shared<T>(torch::name(*input[idx])); \
1090 return result; \
1091 }
1092
1093#define blocktensor_unary_special_op(name) \
1094 template <typename T, std::size_t... Dims> \
1095 inline auto name(const BlockTensor<T, Dims...> &input) { \
1096 BlockTensor<T, Dims...> result; \
1097 for (std::size_t idx = 0; idx < (Dims * ...); ++idx) \
1098 result[idx] = std::make_shared<T>(torch::special::name(*input[idx])); \
1099 return result; \
1100 }
1101
1102#define blocktensor_binary_op(name) \
1103 template <typename T, typename U, std::size_t... Dims> \
1104 inline auto name(const BlockTensor<T, Dims...> &input, \
1105 const BlockTensor<U, Dims...> &other) { \
1106 BlockTensor<typename std::common_type<T, U>::type, Dims...> result; \
1107 for (std::size_t idx = 0; idx < (Dims * ...); ++idx) \
1108 result[idx] = \
1109 std::make_shared<T>(torch::name(*input[idx], *other[idx])); \
1110 return result; \
1111 }
1112
1113#define blocktensor_binary_special_op(name) \
1114 template <typename T, typename U, std::size_t... Dims> \
1115 inline auto name(const BlockTensor<T, Dims...> &input, \
1116 const BlockTensor<U, Dims...> &other) { \
1117 BlockTensor<typename std::common_type<T, U>::type, Dims...> result; \
1118 for (std::size_t idx = 0; idx < (Dims * ...); ++idx) \
1119 result[idx] = \
1120 std::make_shared<T>(torch::special::name(*input[idx], *other[idx])); \
1121 return result; \
1122 }
1123
1127
1130
1134
1137
1141
1144
1147template <typename T, typename U, typename V, std::size_t... Dims>
1149 const BlockTensor<U, Dims...> &other, V alpha = 1.0) {
1151 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1152 result[idx] =
1153 std::make_shared<T>(torch::add(*input[idx], *other[idx], alpha));
1154 return result;
1155}
1156
1159template <typename T, typename U, typename V, std::size_t... Dims>
1160inline auto add(const BlockTensor<T, Dims...> &input, U other, V alpha = 1.0) {
1161 BlockTensor<T, Dims...> result;
1162 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1163 result[idx] = std::make_shared<T>(torch::add(*input[idx], other, alpha));
1164 return result;
1165}
1166
1169template <typename T, typename U, typename V, std::size_t... Dims>
1170inline auto add(T input, const BlockTensor<U, Dims...> &other, V alpha = 1.0) {
1171 BlockTensor<U, Dims...> result;
1172 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1173 result[idx] = std::make_shared<T>(torch::add(input, *other[idx], alpha));
1174 return result;
1175}
1176
1180template <typename T, typename U, typename V, typename W, std::size_t... Dims>
1183 const BlockTensor<V, Dims...> &tensor2, W value = 1.0) {
1185 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1186 result[idx] = std::make_shared<T>(
1187 torch::addcdiv(*input[idx], *tensor1[idx], *tensor2[idx], value));
1188 return result;
1189}
1190
1195template <typename T, typename U, typename V, typename W, std::size_t... Dims>
1198 const BlockTensor<V, Dims...> &tensor2, W value = 1.0) {
1200 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1201 result[idx] = std::make_shared<T>(
1202 torch::addcmul(*input[idx], *tensor1[idx], *tensor2[idx], value));
1203 return result;
1204}
1205
1209
1213
1216
1220
1223
1227
1230
1234
1235
1237
1242
1243#if TORCH_VERSION_MAJOR >= 1 && TORCH_VERSION_MINOR >= 11 || \
1244 TORCH_VERSION_MAJOR >= 2
1247#endif
1248
1252
1256
1260
1264
1268
1272
1277
1280template <typename T, typename U, std::size_t... Dims>
1282 BlockTensor<T, Dims...> result;
1283 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1284 result[idx] = std::make_shared<T>(torch::clamp(*input[idx], min, max));
1285 return result;
1286}
1287
1289template <typename T, typename U, std::size_t... Dims>
1290inline auto clip(const BlockTensor<T, Dims...> &input, U min, U max) {
1291 BlockTensor<T, Dims...> result;
1292 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1293 result[idx] = std::make_shared<T>(torch::clip(*input[idx], min, max));
1294 return result;
1295}
1296
1300
1304
1308
1312
1316
1317
1320
1323
1327
1331
1335
1339
1343
1347
1351
1354
1359
1363
1367
1371
1375
1379
1383
1388
1392
1396
1400
1404
1408
1412
1416
1420
1424
1428
1430
1433
1438
1442
1445
1449
1452
1456
1459
1463
1466
1470
1473
1477
1481
1485
1489
1493
1497
1501
1505
1508
1512
1516
1520
1524
1528
1532
1536
1540
1542template <typename T, typename U, typename V, std::size_t... Dims>
1544 const BlockTensor<U, Dims...> &other, V alpha = 1.0) {
1546 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1547 result[idx] =
1548 std::make_shared<T>(torch::sub(*input[idx], *other[idx], alpha));
1549 return result;
1550}
1551
1553template <typename T, typename U, typename V, std::size_t... Dims>
1555 const BlockTensor<U, Dims...> &other, V alpha = 1.0) {
1557 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1558 result[idx] =
1559 std::make_shared<T>(torch::sub(*input[idx], *other[idx], alpha));
1560 return result;
1561}
1562
1566
1570
1574
1575
1577
1580template <typename T, typename U, std::size_t... Dims>
1582 const BlockTensor<U, Dims...> &rhs) {
1584 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1585 result[idx] = std::make_shared<T>(*lhs[idx] + *rhs[idx]);
1586 return result;
1587}
1588
1591template <typename T, typename U, std::size_t... Dims>
1592inline auto operator+(const BlockTensor<T, Dims...> &lhs, const U &rhs) {
1593 BlockTensor<T, Dims...> result;
1594 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1595 result[idx] = std::make_shared<T>(*lhs[idx] + rhs);
1596 return result;
1597}
1598
1601template <typename T, typename U, std::size_t... Dims>
1602inline auto operator+(const T &lhs, const BlockTensor<U, Dims...> &rhs) {
1603 BlockTensor<U, Dims...> result;
1604 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1605 result[idx] = std::make_shared<U>(lhs + *rhs[idx]);
1606 return result;
1607}
1608
1610template <typename T, typename U, std::size_t... Dims>
1613 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1614 lhs[idx] = std::make_shared<T>(*lhs[idx] + *rhs[idx]);
1615 return lhs;
1616}
1617
1619template <typename T, typename U, std::size_t... Dims>
1621 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1622 lhs[idx] = std::make_shared<T>(*lhs[idx] + rhs);
1623 return lhs;
1624}
1625
1628template <typename T, typename U, std::size_t... Dims>
1632 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1633 result[idx] = std::make_shared<T>(*lhs[idx] - *rhs[idx]);
1634 return result;
1635}
1636
1639template <typename T, typename U, std::size_t... Dims>
1640inline auto operator-(const BlockTensor<T, Dims...> &lhs, const U &rhs) {
1641 BlockTensor<T, Dims...> result;
1642 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1643 result[idx] = std::make_shared<T>(*lhs[idx] - rhs);
1644 return result;
1645}
1646
1649template <typename T, typename U, std::size_t... Dims>
1650inline auto operator-(const T &lhs, const BlockTensor<U, Dims...> &rhs) {
1651 BlockTensor<U, Dims...> result;
1652 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1653 result[idx] = std::make_shared<U>(lhs - *rhs[idx]);
1654 return result;
1655}
1656
1658template <typename T, typename U, std::size_t... Dims>
1661 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1662 lhs[idx] = std::make_shared<T>(*lhs[idx] - *rhs[idx]);
1663 return lhs;
1664}
1665
1667template <typename T, typename U, std::size_t... Dims>
1669 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1670 lhs[idx] = std::make_shared<T>(*lhs[idx] - rhs);
1671 return lhs;
1672}
1673
1676template <typename T, typename U, std::size_t... Dims>
1677inline auto operator*(const BlockTensor<T, Dims...> &lhs, const U &rhs) {
1678 BlockTensor<T, Dims...> result;
1679 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1680 result[idx] =
1681 (lhs[idx]->dim() > rhs.dim()
1682 ? std::make_shared<T>(*lhs[idx] * rhs.unsqueeze(-1))
1683 : (lhs[idx]->dim() < rhs.dim()
1684 ? std::make_shared<T>(lhs[idx]->unsqueeze(-1) * rhs)
1685 : std::make_shared<T>(*lhs[idx] * rhs)));
1686 ;
1687 return result;
1688}
1689
1692template <typename T, typename U, std::size_t... Dims>
1693inline auto operator*(const T &lhs, const BlockTensor<U, Dims...> &rhs) {
1694 BlockTensor<U, Dims...> result;
1695 for (std::size_t idx = 0; idx < (Dims * ...); ++idx)
1696 result[idx] =
1697 (lhs.dim() > rhs[idx]->dim()
1698 ? std::make_shared<U>(lhs * rhs[idx]->unsqueeze(-1))
1699 : (lhs.dim() < rhs[idx]->dim()
1700 ? std::make_shared<U>(lhs.unsqueeze(-1) * *rhs[idx])
1701 : std::make_shared<U>(lhs * *rhs[idx])));
1702 return result;
1703}
1704
1706template <typename T, typename U, std::size_t... TDims, std::size_t... UDims>
1709 if constexpr ((sizeof...(TDims) != sizeof...(UDims)) ||
1710 ((TDims != UDims) || ...))
1711 return false;
1712
1713 bool result = true;
1714 for (std::size_t idx = 0; idx < (TDims * ...); ++idx)
1715 result = result && torch::equal(*lhs[idx], *rhs[idx]);
1716
1717 return result;
1718}
1719
1721template <typename T, typename U, std::size_t... TDims, std::size_t... UDims>
1724 return !(lhs == rhs);
1725}
1726
1727} // namespace utils
1728} // namespace iganet
#define blocktensor_unary_op(name)
Definition blocktensor.hpp:1084
#define blocktensor_unary_special_op(name)
Definition blocktensor.hpp:1093
#define blocktensor_binary_op(name)
Definition blocktensor.hpp:1102
#define blocktensor_binary_special_op(name)
Definition blocktensor.hpp:1113
static constexpr std::size_t slices()
Returns the number of slices.
Definition blocktensor.hpp:899
auto reorder_jik() const
Returns a new block vector with rows and columns transposed and slices remaining fixed....
Definition blocktensor.hpp:954
virtual void pretty_print(std::ostream &os=Log(log::info)) const noexcept override
Returns a string representation of the BSplineCommon object.
Definition blocktensor.hpp:990
const T & operator()(std::size_t row, std::size_t col, std::size_t slice) const
Returns a constant reference to entry (row, col, slice)
Definition blocktensor.hpp:904
T & set(std::size_t row, std::size_t col, std::size_t slice, D &&data)
Stores the given data object at the given position.
Definition blocktensor.hpp:922
auto reorder_kij() const
Returns a new block vector with rows, columns, and slices permuted according to (i,...
Definition blocktensor.hpp:978
auto reorder_ikj() const
Returns a new block vector with rows, columns, and slices permuted according to (i,...
Definition blocktensor.hpp:941
static constexpr std::size_t rows()
Returns the number of rows.
Definition blocktensor.hpp:893
T & operator()(std::size_t row, std::size_t col, std::size_t slice)
Returns a non-constant reference to entry (row, col, slice)
Definition blocktensor.hpp:912
auto reorder_kji() const
Returns a new block vector with rows, columns, and slices permuted according to (i,...
Definition blocktensor.hpp:966
auto slice(std::size_t slice) const
Returns a rank-2 tensor of the k-th slice.
Definition blocktensor.hpp:929
static constexpr std::size_t cols()
Returns the number of columns.
Definition blocktensor.hpp:896
auto inv() const
Returns the inverse of the block tensor.
Definition blocktensor.hpp:312
auto ginv() const
Returns the (generalized) inverse of the block tensor.
Definition blocktensor.hpp:551
auto ginvtr() const
Returns the transpose of the (generalized) inverse of the block tensor.
Definition blocktensor.hpp:807
auto tr() const
Returns the transpose of the block tensor.
Definition blocktensor.hpp:225
auto invtr() const
Returns the transpose of the inverse of the block tensor.
Definition blocktensor.hpp:564
static constexpr std::size_t cols()
Returns the number of columns.
Definition blocktensor.hpp:198
static constexpr std::size_t rows()
Returns the number of rows.
Definition blocktensor.hpp:195
const T & operator()(std::size_t row, std::size_t col) const
Returns a constant reference to entry (row, col)
Definition blocktensor.hpp:203
T & set(std::size_t row, std::size_t col, D &&data)
Stores the given data object at the given position.
Definition blocktensor.hpp:218
virtual void pretty_print(std::ostream &os=Log(log::info)) const noexcept override
Returns a string representation of the BSplineCommon object.
Definition blocktensor.hpp:839
auto trace() const
Returns the trace of the block tensor.
Definition blocktensor.hpp:816
auto det() const
Returns the determinant of a square block tensor.
Definition blocktensor.hpp:237
T & operator()(std::size_t row, std::size_t col)
Returns a non-constant reference to entry (row, col)
Definition blocktensor.hpp:209
static constexpr std::size_t rows()
Returns the number of rows.
Definition blocktensor.hpp:170
virtual void pretty_print(std::ostream &os=Log(log::info)) const noexcept override
Returns a string representation of the BlockTensor object.
Definition blocktensor.hpp:174
Compile-time block tensor core.
Definition blocktensor.hpp:50
const std::array< std::shared_ptr< T >,(Dims *...)> & data() const
Returns a constant reference to the data array.
Definition blocktensor.hpp:109
static constexpr auto dims()
Returns all dimensions as array.
Definition blocktensor.hpp:90
BlockTensorCore(BlockTensorCore< Ts, dims... > &&...other)
Constructur from BlockTensorCore objects.
Definition blocktensor.hpp:62
BlockTensorCore(Ts &&...data)
Constructor from variadic templates.
Definition blocktensor.hpp:86
BlockTensorCore()=default
Default constructor.
std::shared_ptr< T > & operator[](std::size_t idx)
Returns a non-constant shared pointer to entry (idx)
Definition blocktensor.hpp:123
T & set(std::size_t idx, Data &&data)
Stores the given data object at the given index.
Definition blocktensor.hpp:141
virtual void pretty_print(std::ostream &os=Log(log::info)) const noexcept=0
Returns a string representation of the BlockTensorCore object.
static constexpr std::size_t dim()
Returns the i-th dimension.
Definition blocktensor.hpp:95
const std::shared_ptr< T > & operator[](std::size_t idx) const
Returns a constant shared pointer to entry (idx)
Definition blocktensor.hpp:117
BlockTensorCore(BlockTensor< Ts, dims... > &&...other)
Constructur from BlockTensor objects.
Definition blocktensor.hpp:74
static constexpr std::size_t entries()
Returns the total number of entries.
Definition blocktensor.hpp:106
std::array< std::shared_ptr< T >,(Dims *...)> & data()
Returns a non-constant reference to the data array.
Definition blocktensor.hpp:114
static constexpr std::size_t size()
Returns the number of dimensions.
Definition blocktensor.hpp:103
const T & operator()(std::size_t idx) const
Returns a constant reference to entry (idx)
Definition blocktensor.hpp:129
std::array< std::shared_ptr< T >,(Dims *...)> data_
Array storing the data.
Definition blocktensor.hpp:54
T & operator()(std::size_t idx)
Returns a non-constant reference to entry (idx)
Definition blocktensor.hpp:135
Full qualified name descriptor.
Definition fqn.hpp:26
Core components.
Full qualified name utility functions.
auto addcmul(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &tensor1, const BlockTensor< V, Dims... > &tensor2, W value=1.0)
Returns a new block tensor with the elements of tensor1 multiplied by the elements of tensor2,...
Definition blocktensor.hpp:1196
auto log2(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the logarithm to the base-2 of the elements of input
Definition blocktensor.hpp:1403
auto tan(const BlockTensor< T, Dims... > &input)
Returns a new tensor with the tangent of the elements of input.
Definition blocktensor.hpp:1565
auto square(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the square of the elements of input
Definition blocktensor.hpp:1539
auto mul(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the product of each element of input and other
Definition blocktensor.hpp:1455
auto divide(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Alias for div()
Definition blocktensor.hpp:1322
auto exp2(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the base-2 exponential of the elements of input
Definition blocktensor.hpp:1346
auto frexp(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the decomposition of the elements of input into mantissae and exponen...
Definition blocktensor.hpp:1374
auto xlogy(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Computes input * log(other)
Definition blocktensor.hpp:1576
auto operator-(const BlockTensor< T, Dims... > &lhs, const BlockTensor< U, Dims... > &rhs)
Subtracts one compile-time block tensor from another and returns a new compile-time block tensor.
Definition blocktensor.hpp:1629
auto floor(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the floor of the elements of input, the largest integer less than or ...
Definition blocktensor.hpp:1362
auto bitwise_not(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the bitwise NOT of the elements of input
Definition blocktensor.hpp:1251
auto i0(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the element-wise zeroth order modified Bessel function of the first k...
Definition blocktensor.hpp:1437
auto float_power(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the elements of input raised to the power of exponent,...
Definition blocktensor.hpp:1358
auto round(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the elements of input rounded to the nearest integer.
Definition blocktensor.hpp:1496
auto hypot(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
logit
Definition blocktensor.hpp:1432
auto imag(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the imaginary values of the elements of input
Definition blocktensor.hpp:1378
auto atan(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the arctangent of the elements of input
Definition blocktensor.hpp:1226
auto copysign(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the magnitude of the elements of input and the sign of the elements o...
Definition blocktensor.hpp:1303
auto add(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other, V alpha=1.0)
Returns a new block tensor with the elements of other, scaled by alpha, added to the elements of inpu...
Definition blocktensor.hpp:1148
auto asin(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the arcsine of the elements of input
Definition blocktensor.hpp:1212
bool operator==(const BlockTensor< T, TDims... > &lhs, const BlockTensor< U, UDims... > &rhs)
Returns true if both compile-time block tensors are equal.
Definition blocktensor.hpp:1707
auto angle(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the angle (in radians) of the elements of input
Definition blocktensor.hpp:1208
auto nextafter(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Return a new block tensor with the next elementwise floating-point value after input towards other
Definition blocktensor.hpp:1469
auto arcsinh(const BlockTensor< T, Dims... > &input)
Alias for asinh()
Definition blocktensor.hpp:1222
auto sub(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other, V alpha=1.0)
Subtracts other, scaled by alpha, from input.
Definition blocktensor.hpp:1543
auto sign(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the signs of the elements of input
Definition blocktensor.hpp:1511
auto logical_and(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the element-wise logical AND of the elements of input and other
Definition blocktensor.hpp:1415
auto absolute(const BlockTensor< T, Dims... > &input)
Alias for abs()
Definition blocktensor.hpp:1129
auto fix(const BlockTensor< T, Dims... > &input)
Alias for trunc()
Definition blocktensor.hpp:1353
auto sinc(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the normalized sinc of the elements of input
Definition blocktensor.hpp:1527
auto logical_not(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the element-wise logical NOT of the elements of input
Definition blocktensor.hpp:1419
auto positive(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the input
Definition blocktensor.hpp:1472
auto sqrt(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the square-root of the elements of input
Definition blocktensor.hpp:1535
auto reciprocal(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the reciprocal of the elements of input
Definition blocktensor.hpp:1488
auto clip(const BlockTensor< T, Dims... > &input, U min, U max)
Alias for clamp()
Definition blocktensor.hpp:1290
auto arcsin(const BlockTensor< T, Dims... > &input)
Alias for asin()
Definition blocktensor.hpp:1215
auto bitwise_or(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the bitwise OR of the elements of input and other
Definition blocktensor.hpp:1259
auto atanh(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the inverse hyperbolic tangent of the elements of input
Definition blocktensor.hpp:1233
auto subtract(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other, V alpha=1.0)
Alias for sub()
Definition blocktensor.hpp:1554
auto atan2(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the arctangent of the elements in input and other with consideration ...
Definition blocktensor.hpp:1241
auto expit(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the expit (also known as the logistic sigmoid function) of the elemen...
Definition blocktensor.hpp:1504
auto rsqrt(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the reciprocal of the square-root of the elements of input
Definition blocktensor.hpp:1500
auto sin(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the sine of the elements of input
Definition blocktensor.hpp:1523
auto cosh(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the hyperbolic cosine of the elements of input
Definition blocktensor.hpp:1311
std::ostream & operator<<(std::ostream &os, const BlockTensorCore< T, Dims... > &obj)
Prints (as string) a compile-time block tensor object.
Definition blocktensor.hpp:154
auto bitwise_and(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the bitwise AND of the elements of input and other
Definition blocktensor.hpp:1255
auto erfc(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the complementary error function of the elements of input
Definition blocktensor.hpp:1334
auto gammainc(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the regularized lower incomplete gamma function of each element of in...
Definition blocktensor.hpp:1441
auto operator-=(BlockTensor< T, Dims... > &lhs, const BlockTensor< U, Dims... > &rhs)
Decrements one compile-time block tensor by another.
Definition blocktensor.hpp:1659
auto arccos(const BlockTensor< T, Dims... > &input)
Alias for acos()
Definition blocktensor.hpp:1136
auto negative(const BlockTensor< T, Dims... > &input)
Alias for neg()
Definition blocktensor.hpp:1465
auto multiply(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Alias for mul()
Definition blocktensor.hpp:1458
auto logaddexp2(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block-vector with the logarithm of the sum of exponentiations of the elements of input ...
Definition blocktensor.hpp:1411
auto pow(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the power of each element in input with exponent other
Definition blocktensor.hpp:1476
auto bitwise_xor(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the bitwise XOR of the elements of input and other
Definition blocktensor.hpp:1263
auto ldexp(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the elements of input multiplied by 2**other.
Definition blocktensor.hpp:1382
auto igammac(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Alias for gammainc()
Definition blocktensor.hpp:1451
auto neg(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the negative of the elements of input
Definition blocktensor.hpp:1462
auto exp(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the exponential of the elements of input
Definition blocktensor.hpp:1342
auto arctan(const BlockTensor< T, Dims... > &input)
Alias for atan()
Definition blocktensor.hpp:1229
auto log1p(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the natural logarithm of (1 + the elements of input)
Definition blocktensor.hpp:1399
auto arctanh(const BlockTensor< T, Dims... > &input)
Alias for atanh()
Definition blocktensor.hpp:1236
auto ceil(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the ceil of the elements of input, the smallest integer greater than ...
Definition blocktensor.hpp:1276
auto trunc(const BlockTensor< T, Dims... > &input)
Returns a new tensor with the truncated integer values of the elements of input.
Definition blocktensor.hpp:1573
auto cos(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the cosine of the elements of input
Definition blocktensor.hpp:1307
auto erfinv(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the inverse error function of the elements of input
Definition blocktensor.hpp:1338
bool operator!=(const BlockTensor< T, TDims... > &lhs, const BlockTensor< U, UDims... > &rhs)
Returns true if both compile-time block tensors are not equal.
Definition blocktensor.hpp:1722
auto expm1(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the exponential minus 1 of the elements of input
Definition blocktensor.hpp:1350
auto signbit(const BlockTensor< T, Dims... > &input)
Tests if each element of input has its sign bit set (is less than zero) or not.
Definition blocktensor.hpp:1519
auto conj_physical(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the conjugate of the elements of input tensor.
Definition blocktensor.hpp:1299
auto sinh(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the hyperbolic sine of the elements of input
Definition blocktensor.hpp:1531
auto remainder(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the modulus of the elements of input
Definition blocktensor.hpp:1492
auto digamma(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the logarithmic derivative of the gamma function of the elements of i...
Definition blocktensor.hpp:1326
auto asinh(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the inverse hyperbolic sine of the elements of input
Definition blocktensor.hpp:1219
auto div(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the elements of input divided by the elements of other
Definition blocktensor.hpp:1319
auto igamma(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Alias for gammainc()
Definition blocktensor.hpp:1444
auto bitwise_left_shift(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the left arithmetic shift of the elements of input by other bits.
Definition blocktensor.hpp:1267
auto rad2deg(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with each of the elements of input converted from angles in radians to deg...
Definition blocktensor.hpp:1480
auto real(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the real values of the elements of input
Definition blocktensor.hpp:1484
auto lgamma(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the natural logarithm of the absolute value of the gamma function of ...
Definition blocktensor.hpp:1387
auto gammaincc(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the regularized upper incomplete gamma function of each element of in...
Definition blocktensor.hpp:1448
auto acos(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the inverse cosine of the elements of input
Definition blocktensor.hpp:1133
auto fmod(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the fmod of the elements of input and other
Definition blocktensor.hpp:1366
auto bitwise_right_shift(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the right arithmetic shift of the element of input by other bits.
Definition blocktensor.hpp:1271
auto operator+=(BlockTensor< T, Dims... > &lhs, const BlockTensor< U, Dims... > &rhs)
Increments one compile-time block tensor by another.
Definition blocktensor.hpp:1611
auto sgn(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the signs of the elements of input, extension to complex value.
Definition blocktensor.hpp:1515
auto arccosh(const BlockTensor< T, Dims... > &input)
Alias for acosh()`.
Definition blocktensor.hpp:1143
auto abs(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the absolute value of the elements of input
Definition blocktensor.hpp:1126
auto logical_xor(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the element-wise logical XOR of the elements of input and other
Definition blocktensor.hpp:1427
auto addcdiv(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &tensor1, const BlockTensor< V, Dims... > &tensor2, W value=1.0)
Returns a new block tensor with the elements of tensor1 divided by the elements of tensor2,...
Definition blocktensor.hpp:1181
auto deg2rad(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the elements of input converted from angles in degrees to radians.
Definition blocktensor.hpp:1315
auto logaddexp(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block-vector with the logarithm of the sum of exponentiations of the elements of input
Definition blocktensor.hpp:1407
auto erf(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the error function of the elements of input
Definition blocktensor.hpp:1330
auto operator*(const BlockTensor< T, Rows, Common > &lhs, const BlockTensor< U, Common, Cols > &rhs)
Multiplies one compile-time rank-2 block tensor with another compile-time rank-2 block tensor.
Definition blocktensor.hpp:852
auto log10(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the logarithm to the base-10 of the elements of input
Definition blocktensor.hpp:1395
auto make_shared(T &&arg)
Returns an std::shared_ptr<T> object from arg.
Definition blocktensor.hpp:38
auto acosh(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the inverse hyperbolic cosine of the elements of input
Definition blocktensor.hpp:1140
auto clamp(const BlockTensor< T, Dims... > &input, U min, U max)
Returns a new block tensor with the elements of input clamped into the range [ min,...
Definition blocktensor.hpp:1281
auto logical_or(const BlockTensor< T, Dims... > &input, const BlockTensor< U, Dims... > &other)
Returns a new block tensor with the element-wise logical OR of the elements of input and other
Definition blocktensor.hpp:1423
auto frac(const BlockTensor< T, Dims... > &input)
Returns a new block tensor with the fractional portion of the elements of input
Definition blocktensor.hpp:1370
Forward declaration of BlockTensor.
Definition blocktensor.hpp:46
Definition boundary.hpp:22
constexpr auto operator+(deriv lhs, deriv rhs)
Adds two enumerators for specifying the derivative of B-spline evaluation.
Definition bspline.hpp:91
constexpr bool is_SplineType_v
Alias to the value of is_SplineType.
Definition bspline.hpp:3243
struct iganet::@0 Log
Logger.
log
Enumerator for specifying the logging level.
Definition core.hpp:90
STL namespace.
Type trait checks if template argument is of type std::shared_ptr<T>
Definition blocktensor.hpp:31