IgANet
IgANets - Isogeometric Analysis Networks
Loading...
Searching...
No Matches
iganet.hpp
Go to the documentation of this file.
1
15#pragma once
16
17#include <any>
18
19#include <boundary.hpp>
20#include <functionspace.hpp>
21#include <igabase.hpp>
22#include <layer.hpp>
23#include <optimizer.hpp>
24#include <utils/container.hpp>
25#include <utils/fqn.hpp>
26#include <utils/tuple.hpp>
27#include <utils/zip.hpp>
28
29namespace iganet {
30
33 TORCH_ARG(int64_t, max_epoch) = 100;
34 TORCH_ARG(int64_t, batch_size) = 1000;
35 TORCH_ARG(double, min_loss) = 1e-4;
36};
37
47template <typename real_t>
48class IgANetGeneratorImpl : public torch::nn::Module {
49public:
52
55 const std::vector<int64_t> &layers,
56 const std::vector<std::vector<std::any>> &activations,
57 Options<real_t> options = Options<real_t>{}) {
58 assert(layers.size() == activations.size() + 1);
59
60 // Generate vector of linear layers and register them as layer[i]
61 for (auto i = 0; i < layers.size() - 1; ++i) {
62 layers_.emplace_back(
63 register_module("layer[" + std::to_string(i) + "]",
64 torch::nn::Linear(layers[i], layers[i + 1])));
65 layers_.back()->to(options.device(), options.dtype(), true);
66
67 torch::nn::init::xavier_uniform_(layers_.back()->weight);
68 torch::nn::init::constant_(layers_.back()->bias, 0.0);
69 }
70
71 // Generate vector of activation functions
72 for (const auto &a : activations)
73 switch (std::any_cast<activation>(a[0])) {
74 // No activation function
76 switch (a.size()) {
77 case 1:
78 activations_.emplace_back(new None{});
79 break;
80 default:
81 throw std::runtime_error("Invalid number of parameters");
82 }
83 break;
84
85 // Batch Normalization
87 switch (a.size()) {
88 case 8:
89 activations_.emplace_back(new BatchNorm{
90 std::any_cast<torch::Tensor>(a[1]),
91 std::any_cast<torch::Tensor>(a[2]),
92 std::any_cast<torch::Tensor>(a[3]),
93 std::any_cast<torch::Tensor>(a[4]), std::any_cast<double>(a[5]),
94 std::any_cast<double>(a[6]), std::any_cast<bool>(a[7])});
95 break;
96 case 7:
97 activations_.emplace_back(new BatchNorm{
98 std::any_cast<torch::Tensor>(a[1]),
99 std::any_cast<torch::Tensor>(a[2]),
100 std::any_cast<torch::Tensor>(a[3]),
101 std::any_cast<torch::Tensor>(a[4]), std::any_cast<double>(a[5]),
102 std::any_cast<double>(a[6])});
103 break;
104 case 4:
105 activations_.emplace_back(new BatchNorm{
106 std::any_cast<torch::Tensor>(a[1]),
107 std::any_cast<torch::Tensor>(a[2]),
108 std::any_cast<torch::nn::functional::BatchNormFuncOptions>(
109 a[3])});
110 break;
111 case 3:
112 activations_.emplace_back(
113 new BatchNorm{std::any_cast<torch::Tensor>(a[1]),
114 std::any_cast<torch::Tensor>(a[2])});
115 break;
116 default:
117 throw std::runtime_error("Invalid number of parameters");
118 }
119 break;
120
121 // CELU
122 case activation::celu:
123 switch (a.size()) {
124 case 3:
125 activations_.emplace_back(
126 new CELU{std::any_cast<double>(a[1]), std::any_cast<bool>(a[2])});
127 break;
128 case 2:
129 try {
130 activations_.emplace_back(new CELU{
131 std::any_cast<torch::nn::functional::CELUFuncOptions>(a[1])});
132 } catch (...) {
133 activations_.emplace_back(new CELU{std::any_cast<double>(a[1])});
134 }
135 break;
136 case 1:
137 activations_.emplace_back(new CELU{});
138 break;
139 default:
140 throw std::runtime_error("Invalid number of parameters");
141 }
142 break;
143
144 // ELU
145 case activation::elu:
146 switch (a.size()) {
147 case 3:
148 activations_.emplace_back(
149 new ELU{std::any_cast<double>(a[1]), std::any_cast<bool>(a[2])});
150 break;
151 case 2:
152 try {
153 activations_.emplace_back(new ELU{
154 std::any_cast<torch::nn::functional::ELUFuncOptions>(a[1])});
155 } catch (...) {
156 activations_.emplace_back(new ELU{std::any_cast<double>(a[1])});
157 }
158 break;
159 case 1:
160 activations_.emplace_back(new ELU{});
161 break;
162 default:
163 throw std::runtime_error("Invalid number of parameters");
164 }
165 break;
166
167 // GELU
168 case activation::gelu:
169 switch (a.size()) {
170 case 1:
171 activations_.emplace_back(new GELU{});
172 break;
173 default:
174 throw std::runtime_error("Invalid number of parameters");
175 }
176 break;
177
178 // GLU
179 case activation::glu:
180 switch (a.size()) {
181 case 2:
182 try {
183 activations_.emplace_back(new GLU{
184 std::any_cast<torch::nn::functional::GLUFuncOptions>(a[1])});
185 } catch (...) {
186 activations_.emplace_back(new GLU{std::any_cast<int64_t>(a[1])});
187 }
188 break;
189 case 1:
190 activations_.emplace_back(new GLU{});
191 break;
192 default:
193 throw std::runtime_error("Invalid number of parameters");
194 }
195 break;
196
197 // Group Normalization
199 switch (a.size()) {
200 case 5:
201 activations_.emplace_back(new GroupNorm{
202 std::any_cast<int64_t>(a[1]), std::any_cast<torch::Tensor>(a[2]),
203 std::any_cast<torch::Tensor>(a[3]), std::any_cast<double>(a[4])});
204 break;
205 case 2:
206 try {
207 activations_.emplace_back(new GroupNorm{
208 std::any_cast<torch::nn::functional::GroupNormFuncOptions>(
209 a[1])});
210 } catch (...) {
211 activations_.emplace_back(
212 new GroupNorm{std::any_cast<int64_t>(a[1])});
213 }
214 break;
215 default:
216 throw std::runtime_error("Invalid number of parameters");
217 }
218 break;
219
220 // Gumbel-Softmax
222 switch (a.size()) {
223 case 4:
224 activations_.emplace_back(new GumbelSoftmax{
225 std::any_cast<double>(a[1]), std::any_cast<int>(a[2]),
226 std::any_cast<bool>(a[3])});
227 break;
228 case 2:
229 activations_.emplace_back(new GumbelSoftmax{
230 std::any_cast<torch::nn::functional::GumbelSoftmaxFuncOptions>(
231 a[1])});
232 break;
233 case 1:
234 activations_.emplace_back(new GumbelSoftmax{});
235 break;
236 default:
237 throw std::runtime_error("Invalid number of parameters");
238 }
239 break;
240
241 // Hard shrinkish
243 switch (a.size()) {
244 case 2:
245 try {
246 activations_.emplace_back(new Hardshrink{
247 std::any_cast<torch::nn::functional::HardshrinkFuncOptions>(
248 a[1])});
249 } catch (...) {
250 activations_.emplace_back(
251 new Hardshrink{std::any_cast<double>(a[1])});
252 }
253 break;
254 case 1:
255 activations_.emplace_back(new Hardshrink{});
256 break;
257 default:
258 throw std::runtime_error("Invalid number of parameters");
259 }
260 break;
261
262 // Hardsigmoid
264 switch (a.size()) {
265 case 1:
266 activations_.emplace_back(new Hardsigmoid{});
267 break;
268 default:
269 throw std::runtime_error("Invalid number of parameters");
270 }
271 break;
272
273 // Hardswish
275 switch (a.size()) {
276 case 1:
277 activations_.emplace_back(new Hardswish{});
278 break;
279 default:
280 throw std::runtime_error("Invalid number of parameters");
281 }
282 break;
283
284 // Hardtanh
286 switch (a.size()) {
287 case 4:
288 activations_.emplace_back(new Hardtanh{std::any_cast<double>(a[1]),
289 std::any_cast<double>(a[2]),
290 std::any_cast<bool>(a[3])});
291 break;
292 case 3:
293 activations_.emplace_back(new Hardtanh{std::any_cast<double>(a[1]),
294 std::any_cast<double>(a[2])});
295 break;
296 case 2:
297 activations_.emplace_back(new Hardtanh{
298 std::any_cast<torch::nn::functional::HardtanhFuncOptions>(a[1])});
299 break;
300 case 1:
301 activations_.emplace_back(new Hardtanh{});
302 break;
303 default:
304 throw std::runtime_error("Invalid number of parameters");
305 }
306 break;
307
308 // Instance Normalization
310 switch (a.size()) {
311 case 8:
312 activations_.emplace_back(new InstanceNorm{
313 std::any_cast<torch::Tensor>(a[1]),
314 std::any_cast<torch::Tensor>(a[2]),
315 std::any_cast<torch::Tensor>(a[3]),
316 std::any_cast<torch::Tensor>(a[4]), std::any_cast<double>(a[5]),
317 std::any_cast<double>(a[6]), std::any_cast<bool>(a[7])});
318 break;
319 case 7:
320 activations_.emplace_back(new InstanceNorm{
321 std::any_cast<torch::Tensor>(a[1]),
322 std::any_cast<torch::Tensor>(a[2]),
323 std::any_cast<torch::Tensor>(a[3]),
324 std::any_cast<torch::Tensor>(a[4]), std::any_cast<double>(a[5]),
325 std::any_cast<double>(a[6])});
326 break;
327 case 2:
328 activations_.emplace_back(new InstanceNorm{
329 std::any_cast<torch::nn::functional::InstanceNormFuncOptions>(
330 a[1])});
331 break;
332 case 1:
333 activations_.emplace_back(new InstanceNorm{});
334 break;
335 default:
336 throw std::runtime_error("Invalid number of parameters");
337 }
338 break;
339
340 // Layer Normalization
342 switch (a.size()) {
343 case 5:
344 activations_.emplace_back(new LayerNorm{
345 std::any_cast<std::vector<int64_t>>(a[1]),
346 std::any_cast<torch::Tensor>(a[2]),
347 std::any_cast<torch::Tensor>(a[3]), std::any_cast<double>(a[4])});
348 break;
349 case 2:
350 try {
351 activations_.emplace_back(new LayerNorm{
352 std::any_cast<torch::nn::functional::LayerNormFuncOptions>(
353 a[1])});
354 } catch (...) {
355 activations_.emplace_back(
356 new LayerNorm{std::any_cast<std::vector<int64_t>>(a[1])});
357 }
358 break;
359 default:
360 throw std::runtime_error("Invalid number of parameters");
361 }
362 break;
363
364 // Leaky ReLU
366 switch (a.size()) {
367 case 3:
368 activations_.emplace_back(new LeakyReLU{std::any_cast<double>(a[1]),
369 std::any_cast<bool>(a[2])});
370 break;
371 case 2:
372 try {
373 activations_.emplace_back(new LeakyReLU{
374 std::any_cast<torch::nn::functional::LeakyReLUFuncOptions>(
375 a[1])});
376 } catch (...) {
377 activations_.emplace_back(
378 new LeakyReLU{std::any_cast<double>(a[1])});
379 }
380 break;
381 case 1:
382 activations_.emplace_back(new LeakyReLU{});
383 break;
384 default:
385 throw std::runtime_error("Invalid number of parameters");
386 }
387 break;
388
389 // Local response Normalization
391 switch (a.size()) {
392 case 5:
393 activations_.emplace_back(new LocalResponseNorm{
394 std::any_cast<int64_t>(a[1]), std::any_cast<double>(a[2]),
395 std::any_cast<double>(a[3]), std::any_cast<double>(a[4])});
396 break;
397 case 2:
398 try {
399 activations_.emplace_back(new LocalResponseNorm{std::any_cast<
400 torch::nn::functional::LocalResponseNormFuncOptions>(a[1])});
401 } catch (...) {
402 activations_.emplace_back(
403 new LocalResponseNorm{std::any_cast<int64_t>(a[1])});
404 }
405 break;
406 default:
407 throw std::runtime_error("Invalid number of parameters");
408 }
409 break;
410
411 // LogSigmoid
413 switch (a.size()) {
414 case 1:
415 activations_.emplace_back(new LogSigmoid{});
416 break;
417 default:
418 throw std::runtime_error("Invalid number of parameters");
419 }
420 break;
421
422 // LogSoftmax
424 switch (a.size()) {
425 case 2:
426 try {
427 activations_.emplace_back(new LogSoftmax{
428 std::any_cast<torch::nn::functional::LogSoftmaxFuncOptions>(
429 a[1])});
430 } catch (...) {
431 activations_.emplace_back(
432 new LogSoftmax{std::any_cast<int64_t>(a[1])});
433 }
434 break;
435 default:
436 throw std::runtime_error("Invalid number of parameters");
437 }
438 break;
439
440 // Mish
441 case activation::mish:
442 switch (a.size()) {
443 case 1:
444 activations_.emplace_back(new Mish{});
445 break;
446 default:
447 throw std::runtime_error("Invalid number of parameters");
448 }
449 break;
450
451 // Lp Normalization
453 switch (a.size()) {
454 case 4:
455 activations_.emplace_back(new Normalize{
456 std::any_cast<double>(a[1]), std::any_cast<double>(a[2]),
457 std::any_cast<int64_t>(a[3])});
458 break;
459 case 2:
460 activations_.emplace_back(new Normalize{
461 std::any_cast<torch::nn::functional::NormalizeFuncOptions>(
462 a[1])});
463 break;
464 case 1:
465 activations_.emplace_back(new Normalize{});
466 break;
467 default:
468 throw std::runtime_error("Invalid number of parameters");
469 }
470 break;
471
472 // PReLU
474 switch (a.size()) {
475 case 2:
476 activations_.emplace_back(
477 new PReLU{std::any_cast<torch::Tensor>(a[1])});
478 break;
479 default:
480 throw std::runtime_error("Invalid number of parameters");
481 }
482 break;
483
484 // ReLU
485 case activation::relu:
486 switch (a.size()) {
487 case 2:
488 try {
489 activations_.emplace_back(new ReLU{
490 std::any_cast<torch::nn::functional::ReLUFuncOptions>(a[1])});
491 } catch (...) {
492 activations_.emplace_back(new ReLU{std::any_cast<bool>(a[1])});
493 }
494 break;
495 case 1:
496 activations_.emplace_back(new ReLU{});
497 break;
498 default:
499 throw std::runtime_error("Invalid number of parameters");
500 }
501 break;
502
503 // Relu6
505 switch (a.size()) {
506 case 2:
507 try {
508 activations_.emplace_back(new ReLU6{
509 std::any_cast<torch::nn::functional::ReLU6FuncOptions>(a[1])});
510 } catch (...) {
511 activations_.emplace_back(new ReLU6{std::any_cast<bool>(a[1])});
512 }
513 break;
514 case 1:
515 activations_.emplace_back(new ReLU6{});
516 break;
517 default:
518 throw std::runtime_error("Invalid number of parameters");
519 }
520 break;
521
522 // Randomized ReLU
524 switch (a.size()) {
525 case 4:
526 activations_.emplace_back(new RReLU{std::any_cast<double>(a[1]),
527 std::any_cast<double>(a[2]),
528 std::any_cast<bool>(a[3])});
529 break;
530 case 3:
531 activations_.emplace_back(new RReLU{std::any_cast<double>(a[1]),
532 std::any_cast<double>(a[2])});
533 break;
534 case 2:
535 activations_.emplace_back(new RReLU{
536 std::any_cast<torch::nn::functional::RReLUFuncOptions>(a[1])});
537 break;
538 case 1:
539 activations_.emplace_back(new RReLU{});
540 break;
541 default:
542 throw std::runtime_error("Invalid number of parameters");
543 }
544 break;
545
546 // SELU
547 case activation::selu:
548 switch (a.size()) {
549 case 2:
550 try {
551 activations_.emplace_back(new SELU{
552 std::any_cast<torch::nn::functional::SELUFuncOptions>(a[1])});
553 } catch (...) {
554 activations_.emplace_back(new SELU{std::any_cast<bool>(a[1])});
555 }
556 break;
557 case 1:
558 activations_.emplace_back(new SELU{});
559 break;
560 default:
561 throw std::runtime_error("Invalid number of parameters");
562 }
563 break;
564
565 // Sigmoid
567 switch (a.size()) {
568 case 1:
569 activations_.emplace_back(new Sigmoid{});
570 break;
571 default:
572 throw std::runtime_error("Invalid number of parameters");
573 }
574 break;
575
576 // SiLU
577 case activation::silu:
578 switch (a.size()) {
579 case 1:
580 activations_.emplace_back(new SiLU{});
581 break;
582 default:
583 throw std::runtime_error("Invalid number of parameters");
584 }
585 break;
586
587 // Softmax
589 switch (a.size()) {
590 case 2:
591 try {
592 activations_.emplace_back(new Softmax{
593 std::any_cast<torch::nn::functional::SoftmaxFuncOptions>(
594 a[1])});
595 } catch (...) {
596 activations_.emplace_back(
597 new Softmax{std::any_cast<int64_t>(a[1])});
598 }
599 break;
600 default:
601 throw std::runtime_error("Invalid number of parameters");
602 }
603 break;
604
605 // Softmin
607 switch (a.size()) {
608 case 2:
609 try {
610 activations_.emplace_back(new Softmin{
611 std::any_cast<torch::nn::functional::SoftminFuncOptions>(
612 a[1])});
613 } catch (...) {
614 activations_.emplace_back(
615 new Softmin{std::any_cast<int64_t>(a[1])});
616 }
617 break;
618 default:
619 throw std::runtime_error("Invalid number of parameters");
620 }
621 break;
622
623 // Softplus
625 switch (a.size()) {
626 case 3:
627 activations_.emplace_back(new Softplus{std::any_cast<double>(a[1]),
628 std::any_cast<double>(a[2])});
629 break;
630 case 2:
631 activations_.emplace_back(new Softplus{
632 std::any_cast<torch::nn::functional::SoftplusFuncOptions>(a[1])});
633 break;
634 case 1:
635 activations_.emplace_back(new Softplus{});
636 break;
637 default:
638 throw std::runtime_error("Invalid number of parameters");
639 }
640 break;
641
642 // Softshrink
644 switch (a.size()) {
645 case 2:
646 try {
647 activations_.emplace_back(new Softshrink{
648 std::any_cast<torch::nn::functional::SoftshrinkFuncOptions>(
649 a[1])});
650 } catch (...) {
651 activations_.emplace_back(
652 new Softshrink{std::any_cast<double>(a[1])});
653 }
654 break;
655 case 1:
656 activations_.emplace_back(new Softshrink{});
657 break;
658 default:
659 throw std::runtime_error("Invalid number of parameters");
660 }
661 break;
662
663 // Softsign
665 switch (a.size()) {
666 case 1:
667 activations_.emplace_back(new Softsign{});
668 break;
669 default:
670 throw std::runtime_error("Invalid number of parameters");
671 }
672 break;
673
674 // Tanh
675 case activation::tanh:
676 switch (a.size()) {
677 case 1:
678 activations_.emplace_back(new Tanh{});
679 break;
680 default:
681 throw std::runtime_error("Invalid number of parameters");
682 }
683 break;
684
685 // Tanhshrink
687 switch (a.size()) {
688 case 1:
689 activations_.emplace_back(new Tanhshrink{});
690 break;
691 default:
692 throw std::runtime_error("Invalid number of parameters");
693 }
694 break;
695
696 // Threshold
698 switch (a.size()) {
699 case 4:
700 activations_.emplace_back(new Threshold{std::any_cast<double>(a[1]),
701 std::any_cast<double>(a[2]),
702 std::any_cast<bool>(a[3])});
703 break;
704 case 3:
705 activations_.emplace_back(new Threshold{std::any_cast<double>(a[1]),
706 std::any_cast<double>(a[2])});
707 break;
708 case 2:
709 activations_.emplace_back(new Threshold{
710 std::any_cast<torch::nn::functional::ThresholdFuncOptions>(
711 a[1])});
712 break;
713 default:
714 throw std::runtime_error("Invalid number of parameters");
715 }
716 break;
717
718 default:
719 throw std::runtime_error("Invalid activation function");
720 }
721 }
722
724 torch::Tensor forward(torch::Tensor x) {
725 torch::Tensor x_in = x.clone();
726
727 // Standard feed-forward neural network
728 for (auto [layer, activation] : utils::zip(layers_, activations_))
729 x = activation->apply(layer->forward(x));
730
731 return x;
732 }
733
735 inline torch::serialize::OutputArchive &
736 write(torch::serialize::OutputArchive &archive,
737 const std::string &key = "iganet") const {
738 assert(layers_.size() == activations_.size());
739
740 archive.write(key + ".layers", torch::full({1}, (int64_t)layers_.size()));
741 for (std::size_t i = 0; i < layers_.size(); ++i) {
742 archive.write(
743 key + ".layer[" + std::to_string(i) + "].in_features",
744 torch::full({1}, (int64_t)layers_[i]->options.in_features()));
745 archive.write(
746 key + ".layer[" + std::to_string(i) + "].outputs_features",
747 torch::full({1}, (int64_t)layers_[i]->options.out_features()));
748 archive.write(key + ".layer[" + std::to_string(i) + "].bias",
749 torch::full({1}, (int64_t)layers_[i]->options.bias()));
750
751 activations_[i]->write(archive, key + ".layer[" + std::to_string(i) +
752 "].activation");
753 }
754
755 return archive;
756 }
757
759 inline torch::serialize::InputArchive &
760 read(torch::serialize::InputArchive &archive,
761 const std::string &key = "iganet") {
762 torch::Tensor layers, in_features, outputs_features, bias, activation;
763
764 archive.read(key + ".layers", layers);
765 for (int64_t i = 0; i < layers.item<int64_t>(); ++i) {
766 archive.read(key + ".layer[" + std::to_string(i) + "].in_features",
767 in_features);
768 archive.read(key + ".layer[" + std::to_string(i) + "].outputs_features",
769 outputs_features);
770 archive.read(key + ".layer[" + std::to_string(i) + "].bias", bias);
771 layers_.emplace_back(register_module(
772 "layer[" + std::to_string(i) + "]",
773 torch::nn::Linear(
774 torch::nn::LinearOptions(in_features.item<int64_t>(),
775 outputs_features.item<int64_t>())
776 .bias(bias.item<bool>()))));
777
778 archive.read(key + ".layer[" + std::to_string(i) + "].activation.type",
779 activation);
780 switch (static_cast<enum activation>(activation.item<int64_t>())) {
781 case activation::none:
782 activations_.emplace_back(new None{});
783 break;
785 activations_.emplace_back(
786 new BatchNorm{torch::Tensor{}, torch::Tensor{}});
787 break;
788 case activation::celu:
789 activations_.emplace_back(new CELU{});
790 break;
791 case activation::elu:
792 activations_.emplace_back(new ELU{});
793 break;
794 case activation::gelu:
795 activations_.emplace_back(new GELU{});
796 break;
797 case activation::glu:
798 activations_.emplace_back(new GLU{});
799 break;
801 activations_.emplace_back(new GroupNorm{0});
802 break;
804 activations_.emplace_back(new GumbelSoftmax{});
805 break;
807 activations_.emplace_back(new Hardshrink{});
808 break;
810 activations_.emplace_back(new Hardsigmoid{});
811 break;
813 activations_.emplace_back(new Hardswish{});
814 break;
816 activations_.emplace_back(new Hardtanh{});
817 break;
819 activations_.emplace_back(new InstanceNorm{});
820 break;
822 activations_.emplace_back(new LayerNorm{{}});
823 break;
825 activations_.emplace_back(new LeakyReLU{});
826 break;
828 activations_.emplace_back(new LocalResponseNorm{0});
829 break;
831 activations_.emplace_back(new LogSigmoid{});
832 break;
834 activations_.emplace_back(new LogSoftmax{0});
835 break;
836 case activation::mish:
837 activations_.emplace_back(new Mish{});
838 break;
840 activations_.emplace_back(new Normalize{0, 0, 0});
841 break;
843 activations_.emplace_back(new PReLU{torch::Tensor{}});
844 break;
845 case activation::relu:
846 activations_.emplace_back(new ReLU{});
847 break;
849 activations_.emplace_back(new ReLU6{});
850 break;
852 activations_.emplace_back(new RReLU{});
853 break;
854 case activation::selu:
855 activations_.emplace_back(new SELU{});
856 break;
858 activations_.emplace_back(new Sigmoid{});
859 break;
860 case activation::silu:
861 activations_.emplace_back(new SiLU{});
862 break;
864 activations_.emplace_back(new Softmax{0});
865 break;
867 activations_.emplace_back(new Softmin{0});
868 break;
870 activations_.emplace_back(new Softplus{});
871 break;
873 activations_.emplace_back(new Softshrink{});
874 break;
876 activations_.emplace_back(new Softsign{});
877 break;
878 case activation::tanh:
879 activations_.emplace_back(new Tanh{});
880 break;
882 activations_.emplace_back(new Tanhshrink{});
883 break;
885 activations_.emplace_back(new Threshold{0, 0});
886 break;
887 default:
888 throw std::runtime_error("Invalid activation function");
889 }
890 activations_.back()->read(archive, key + ".layer[" + std::to_string(i) +
891 "].activation");
892 }
893 return archive;
894 }
895
896 inline virtual void
897 pretty_print(std::ostream &os = Log(log::info)) const noexcept override {
898 os << "(\n";
899
900 int i = 0;
901 for (const auto &activation : activations_)
902 os << "activation[" << i++ << "] = " << *activation << "\n";
903 os << ")\n";
904 }
905
906private:
908 std::vector<torch::nn::Linear> layers_;
909
911 std::vector<std::unique_ptr<iganet::ActivationFunction>> activations_;
912};
913
919template <typename real_t>
921 : public torch::nn::ModuleHolder<IgANetGeneratorImpl<real_t>> {
922
923public:
924 using torch::nn::ModuleHolder<IgANetGeneratorImpl<real_t>>::ModuleHolder;
926};
927
931template <typename Optimizer, typename GeometryMap, typename Variable,
932 template <typename, typename> typename IgABase = ::iganet::IgABase>
934class IgANet : public IgABase<GeometryMap, Variable>,
937public:
940
942 using optimizer_type = Optimizer;
943
946
947protected:
950
952 std::unique_ptr<optimizer_type> opt_;
953
956
957public:
959 explicit IgANet(IgANetOptions defaults = {},
962 : // Construct the base class
963 Base(),
964 // Construct the optimizer
965 opt_(std::make_unique<optimizer_type>(net_->parameters())),
966 // Set options
967 options_(defaults) {}
968
973 template <std::size_t NumCoeffs>
974 IgANet(const std::vector<int64_t> &layers,
975 const std::vector<std::vector<std::any>> &activations,
976 std::array<int64_t, NumCoeffs> numCoeffs, IgANetOptions defaults = {},
979 : IgANet(layers, activations, std::tuple{numCoeffs}, std::tuple{numCoeffs},
980 defaults, options) {}
981
982 template <std::size_t... NumCoeffs>
983 IgANet(const std::vector<int64_t> &layers,
984 const std::vector<std::vector<std::any>> &activations,
985 std::tuple<std::array<int64_t, NumCoeffs>...> numCoeffs,
986 IgANetOptions defaults = {},
989 : IgANet(layers, activations, numCoeffs, numCoeffs, defaults, options) {}
991
996 template <std::size_t GeometryMapNumCoeffs, std::size_t VariableNumCoeffs>
997 IgANet(const std::vector<int64_t> &layers,
998 const std::vector<std::vector<std::any>> &activations,
999 std::array<int64_t, GeometryMapNumCoeffs> geometryMapNumCoeffs,
1000 std::array<int64_t, VariableNumCoeffs> variableNumCoeffs,
1001 IgANetOptions defaults = {},
1004 : IgANet(layers, activations, std::tuple{geometryMapNumCoeffs},
1005 std::tuple{variableNumCoeffs}, defaults, options) {}
1006
1007 template <std::size_t... GeometryMapNumCoeffs,
1008 std::size_t... VariableNumCoeffs>
1010 const std::vector<int64_t> &layers,
1011 const std::vector<std::vector<std::any>> &activations,
1012 std::tuple<std::array<int64_t, GeometryMapNumCoeffs>...>
1013 geometryMapNumCoeffs,
1014 std::tuple<std::array<int64_t, VariableNumCoeffs>...> variableNumCoeffs,
1015 IgANetOptions defaults = {},
1018 : // Construct the base class
1019 Base(geometryMapNumCoeffs, variableNumCoeffs, options),
1020 // Construct the deep neural network
1021 net_(utils::concat(std::vector<int64_t>{inputs(/* epoch */ 0).size(0)},
1022 layers,
1023 std::vector<int64_t>{Base::u_.as_tensor_size()}),
1024 activations, options),
1025
1026 // Construct the optimizer
1027 opt_(std::make_unique<optimizer_type>(net_->parameters())),
1028
1029 // Set options
1030 options_(defaults) {}
1031
1034 return net_;
1035 }
1036
1039
1041 inline const optimizer_type &optimizer() const { return *opt_; }
1042
1044 inline optimizer_type &optimizer() { return *opt_; }
1045
1049 inline void optimizerReset(bool resetOptions = true) {
1050 if (resetOptions)
1051 opt_ = std::make_unique<optimizer_type>(net_->parameters());
1052 else {
1053 std::vector<optimizer_options_type> options;
1054 for (auto & group : opt_->param_groups())
1055 options.push_back(static_cast<optimizer_options_type&>(group.options()));
1056 opt_ = std::make_unique<optimizer_type>(net_->parameters());
1057 for (auto [group, options] : utils::zip(opt_->param_groups(), options))
1058 static_cast<optimizer_options_type&>(group.options()) = options;
1059 }
1060 }
1061
1064 opt_ = std::make_unique<optimizer_type>(net_->parameters(), optimizerOptions);
1065 }
1066
1068 inline optimizer_options_type &optimizerOptions(std::size_t param_group = 0) {
1069 if (param_group < opt_->param_groups().size())
1070 return static_cast<optimizer_options_type&>(opt_->param_groups()[param_group].options());
1071 else
1072 throw std::runtime_error("Index exceeds number of parameter groups");
1073 }
1074
1076 inline const optimizer_options_type &optimizerOptions(std::size_t param_group = 0) const {
1077 if (param_group < opt_->param_groups().size())
1078 return static_cast<optimizer_options_type&>(opt_->param_groups()[param_group].options());
1079 else
1080 throw std::runtime_error("Index exceeds number of parameter groups");
1081 }
1082
1085 for (auto &group : opt_->param_groups())
1086 static_cast<optimizer_options_type&>(group.options()) = options;
1087 }
1088
1091 for (auto &group : opt_->param_groups())
1092 static_cast<optimizer_options_type&>(group.options()) = options;
1093 }
1094
1096 inline void optimizerOptionsReset(const optimizer_options_type& options, std::size_t param_group) {
1097 if (param_group < opt_->param_groups().size())
1098 static_cast<optimizer_options_type&>(opt_->param_group().options()) = options;
1099 else
1100 throw std::runtime_error("Index exceeds number of parameter groups");
1101 }
1102
1104 inline void optimizerOptionsReset(optimizer_options_type&& options, std::size_t param_group) {
1105 if (param_group < opt_->param_groups().size())
1106 static_cast<optimizer_options_type&>(opt_->param_group().options()) = options;
1107 else
1108 throw std::runtime_error("Index exceeds number of parameter groups");
1109 }
1110
1112 inline const auto &options() const { return options_; }
1113
1115 inline auto &options() { return options_; }
1116
1123 virtual torch::Tensor inputs(int64_t epoch) const {
1125 return torch::cat({Base::G_.as_tensor(), Base::f_.as_tensor()});
1126 else if constexpr (Base::has_GeometryMap && !Base::has_RefData)
1127 return Base::G_.as_tensor();
1128 else if constexpr (!Base::has_GeometryMap && Base::has_RefData)
1129 return Base::f_.as_tensor();
1130 else
1131 return torch::empty({0});
1132 }
1133
1135 virtual bool epoch(int64_t) = 0;
1136
1138 virtual torch::Tensor loss(const torch::Tensor &, int64_t) = 0;
1139
1141 virtual void train(
1142#ifdef IGANET_WITH_MPI
1143 c10::intrusive_ptr<c10d::ProcessGroupMPI> pg =
1144 c10d::ProcessGroupMPI::createProcessGroupMPI()
1145#endif
1146 ) {
1147 torch::Tensor inputs, outputs, loss;
1148 typename Base::value_type previous_loss(-1.0);
1149
1150 // Loop over epochs
1151 for (int64_t epoch = 0; epoch != options_.max_epoch(); ++epoch) {
1152
1153 // Update epoch and inputs
1154 if (this->epoch(epoch))
1155 inputs = this->inputs(epoch);
1156
1157 auto closure = [&]() {
1158 // Reset gradients
1159 net_->zero_grad();
1160
1161 // Execute the model on the inputs
1162 outputs = net_->forward(inputs);
1163
1164 // Compute the loss value
1165 loss = this->loss(outputs, epoch);
1166
1167 // Compute gradients of the loss w.r.t. the model parameters
1168 loss.backward({}, true, false);
1169
1170 return loss;
1171 };
1172
1173#ifdef IGANET_WITH_MPI
1174 // Averaging the gradients of the parameters in all the processors
1175 // Note: This may lag behind DistributedDataParallel (DDP) in performance
1176 // since this synchronizes parameters after backward pass while DDP
1177 // overlaps synchronizing parameters and computing gradients in backward
1178 // pass
1179 std::vector<c10::intrusive_ptr<::c10d::Work>> works;
1180 for (auto &param : net_->named_parameters()) {
1181 std::vector<torch::Tensor> tmp = {param.value().grad()};
1182 works.emplace_back(pg->allreduce(tmp));
1183 }
1184
1185 waitWork(pg, works);
1186
1187 for (auto &param : net_->named_parameters()) {
1188 param.value().grad().data() =
1189 param.value().grad().data() / pg->getSize();
1190 }
1191#endif
1192
1193 // Update the parameters based on the calculated gradients
1194 opt_->step(closure);
1195
1196 typename Base::value_type current_loss = loss.template item<typename Base::value_type>();
1197 Log(log::verbose) << "Epoch " << std::to_string(epoch) << ": "
1198 << current_loss
1199 << std::endl;
1200
1201 if (current_loss <
1202 options_.min_loss()) {
1203 Log(log::info) << "Total epochs: " << epoch << ", loss: "
1204 << current_loss
1205 << std::endl;
1206 break;
1207 }
1208
1209 if (current_loss == previous_loss || std::abs(current_loss-previous_loss) < previous_loss/10) {
1210 Log(log::info) << "Total epochs: " << epoch << ", loss: "
1211 << current_loss
1212 << std::endl;
1213 break;
1214 }
1215
1216 if (loss.isnan().template item<bool>()) {
1217 Log(log::info) << "Total epochs: " << epoch << ", loss: "
1218 << current_loss
1219 << std::endl;
1220 break;
1221 }
1222 previous_loss = current_loss;
1223 }
1224 }
1225
1227 template <typename DataLoader>
1228 void train(DataLoader &loader
1229#ifdef IGANET_WITH_MPI
1230 ,
1231 c10::intrusive_ptr<c10d::ProcessGroupMPI> pg =
1232 c10d::ProcessGroupMPI::createProcessGroupMPI()
1233#endif
1234 ) {
1235 torch::Tensor inputs, outputs, loss;
1236 typename Base::value_type previous_loss(-1.0);
1237
1238 // Loop over epochs
1239 for (int64_t epoch = 0; epoch != options_.max_epoch(); ++epoch) {
1240
1241 typename Base::value_type Loss(0);
1242
1243 for (auto &batch : loader) {
1244 inputs = batch.data;
1245
1246 if (inputs.dim() > 0) {
1247 if constexpr (Base::has_GeometryMap && Base::has_RefData) {
1248 Base::G_.from_tensor(
1249 inputs.slice(1, 0, Base::G_.as_tensor_size()).t());
1250 Base::f_.from_tensor(inputs
1251 .slice(1, Base::G_.as_tensor_size(),
1252 Base::G_.as_tensor_size() +
1253 Base::f_.as_tensor_size())
1254 .t());
1255 } else if constexpr (Base::has_GeometryMap && !Base::has_RefData)
1256 Base::G_.from_tensor(
1257 inputs.slice(1, 0, Base::G_.as_tensor_size()).t());
1258 else if constexpr (!Base::has_GeometryMap && Base::has_RefData)
1259 Base::f_.from_tensor(
1260 inputs.slice(1, 0, Base::f_.as_tensor_size()).t());
1261
1262 } else {
1263 if constexpr (Base::has_GeometryMap && Base::has_RefData) {
1264 Base::G_.from_tensor(
1265 inputs.slice(1, 0, Base::G_.as_tensor_size()).flatten());
1266 Base::f_.from_tensor(inputs
1267 .slice(1, Base::G_.as_tensor_size(),
1268 Base::G_.as_tensor_size() +
1269 Base::f_.as_tensor_size())
1270 .flatten());
1271 } else if constexpr (Base::has_GeometryMap && !Base::has_RefData)
1272 Base::G_.from_tensor(
1273 inputs.slice(1, 0, Base::G_.as_tensor_size()).flatten());
1274 else if constexpr (!Base::has_GeometryMap && Base::has_RefData)
1275 Base::f_.from_tensor(
1276 inputs.slice(1, 0, Base::f_.as_tensor_size()).flatten());
1277 }
1278
1279 this->epoch(epoch);
1280
1281 auto closure = [&]() {
1282 // Reset gradients
1283 net_->zero_grad();
1284
1285 // Execute the model on the inputs
1286 outputs = net_->forward(inputs);
1287
1288 // Compute the loss value
1289 loss = this->loss(outputs, epoch);
1290
1291 // Compute gradients of the loss w.r.t. the model parameters
1292 loss.backward({}, true, false);
1293
1294 return loss;
1295 };
1296
1297 // Update the parameters based on the calculated gradients
1298 opt_->step(closure);
1299
1300 Loss += loss.template item<typename Base::value_type>();
1301 }
1302
1303 Log(log::verbose) << "Epoch " << std::to_string(epoch) << ": " << Loss
1304 << std::endl;
1305
1306 if (Loss < options_.min_loss()) {
1307 Log(log::info) << "Total epochs: " << epoch << ", loss: " << Loss
1308 << std::endl;
1309 break;
1310 }
1311
1312 if (Loss == previous_loss) {
1313 Log(log::info) << "Total epochs: " << epoch << ", loss: " << Loss
1314 << std::endl;
1315 break;
1316 }
1317 previous_loss = Loss;
1318
1319 if (epoch == options_.max_epoch() - 1)
1320 Log(log::warning) << "Total epochs: " << epoch << ", loss: " << Loss
1321 << std::endl;
1322 }
1323 }
1324
1326 void eval() {
1327 torch::Tensor inputs = this->inputs(0);
1328 torch::Tensor outputs = net_->forward(inputs);
1329 Base::u_.from_tensor(outputs);
1330 }
1331
1333 inline virtual nlohmann::json to_json() const override {
1334 return "Not implemented yet";
1335 }
1336
1338 inline std::vector<torch::Tensor> parameters() const noexcept {
1339 return net_->parameters();
1340 }
1341
1344 inline torch::OrderedDict<std::string, torch::Tensor>
1345 named_parameters() const noexcept {
1346 return net_->named_parameters();
1347 }
1348
1350 inline std::size_t nparameters() const noexcept {
1351 std::size_t result = 0;
1352 for (const auto &param : this->parameters()) {
1353 result += param.numel();
1354 }
1355 return result;
1356 }
1357
1359 inline virtual void
1360 pretty_print(std::ostream &os = Log(log::info)) const noexcept override {
1361 os << name() << "(\n"
1362 << "net = " << net_ << "\n";
1363 if constexpr (Base::has_GeometryMap)
1364 os << "G = " << Base::G_ << "\n";
1365 if constexpr (Base::has_RefData)
1366 os << "f = " << Base::f_ << "\n";
1367 if constexpr (Base::has_Solution)
1368 os << "u = " << Base::u_ << "\n)";
1369 }
1370
1372 inline void save(const std::string &filename,
1373 const std::string &key = "iganet") const {
1374 torch::serialize::OutputArchive archive;
1375 write(archive, key).save_to(filename);
1376 }
1377
1379 inline void load(const std::string &filename,
1380 const std::string &key = "iganet") {
1381 torch::serialize::InputArchive archive;
1382 archive.load_from(filename);
1383 read(archive, key);
1384 }
1385
1387 inline torch::serialize::OutputArchive &
1388 write(torch::serialize::OutputArchive &archive,
1389 const std::string &key = "iganet") const {
1390 if constexpr (Base::has_GeometryMap)
1391 Base::G_.write(archive, key + ".geo");
1392 if constexpr (Base::has_RefData)
1393 Base::f_.write(archive, key + ".ref");
1394 if constexpr (Base::has_Solution)
1395 Base::u_.write(archive, key + ".out");
1396
1397 net_->write(archive, key + ".net");
1398 torch::serialize::OutputArchive archive_net;
1399 net_->save(archive_net);
1400 archive.write(key + ".net.data", archive_net);
1401
1402 torch::serialize::OutputArchive archive_opt;
1403 opt_->save(archive_opt);
1404 archive.write(key + ".opt", archive_opt);
1405
1406 return archive;
1407 }
1408
1410 inline torch::serialize::InputArchive &
1411 read(torch::serialize::InputArchive &archive,
1412 const std::string &key = "iganet") {
1413 if constexpr (Base::has_GeometryMap)
1414 Base::G_.read(archive, key + ".geo");
1415 if constexpr (Base::has_RefData)
1416 Base::f_.read(archive, key + ".ref");
1417 if constexpr (Base::has_Solution)
1418 Base::u_.read(archive, key + ".out");
1419
1420 net_->read(archive, key + ".net");
1421 torch::serialize::InputArchive archive_net;
1422 archive.read(key + ".net.data", archive_net);
1423 net_->load(archive_net);
1424
1425 opt_->add_parameters(net_->parameters());
1426 torch::serialize::InputArchive archive_opt;
1427 archive.read(key + ".opt", archive_opt);
1428 opt_->load(archive_opt);
1429
1430 return archive;
1431 }
1432
1434 bool operator==(const IgANet &other) const {
1435 bool result(true);
1436
1437 if constexpr (Base::has_GeometryMap)
1438 result *= (Base::G_ == other.G());
1439 if constexpr (Base::has_RefData)
1440 result *= (Base::f_ == other.f());
1441 if constexpr (Base::has_Solution)
1442 result *= (Base::u_ == other.u());
1443
1444 return result;
1445 }
1446
1448 bool operator!=(const IgANet &other) const { return *this != other; }
1449
1450#ifdef IGANET_WITH_MPI
1451private:
1453 static void waitWork(c10::intrusive_ptr<c10d::ProcessGroupMPI> pg,
1454 std::vector<c10::intrusive_ptr<c10d::Work>> works) {
1455 for (auto &work : works) {
1456 try {
1457 work->wait();
1458 } catch (const std::exception &ex) {
1459 Log(log::error) << "Exception received during waitWork: " << ex.what()
1460 << std::endl;
1461 pg->abort();
1462 }
1463 }
1464 }
1465#endif
1466};
1467
1469 template <typename Optimizer, typename GeometryMap, typename Variable>
1470 requires OptimizerType<Optimizer> && FunctionSpaceType<GeometryMap> && FunctionSpaceType<Variable>
1471inline std::ostream &
1472operator<<(std::ostream &os,
1474 obj.pretty_print(os);
1475 return os;
1476}
1477
1483 template <typename GeometryMap, typename Variable>
1484 requires FunctionSpaceType<GeometryMap> && FunctionSpaceType<Variable>
1486public:
1489 decltype(std::declval<GeometryMap>()
1490 .template find_knot_indices<functionspace::interior>(
1491 std::declval<typename GeometryMap::eval_type>()));
1492
1495 decltype(std::declval<GeometryMap>()
1496 .template find_knot_indices<functionspace::boundary>(
1497 std::declval<
1498 typename GeometryMap::boundary_eval_type>()));
1499
1502 decltype(std::declval<Variable>()
1503 .template find_knot_indices<functionspace::interior>(
1504 std::declval<typename Variable::eval_type>()));
1505
1508 decltype(std::declval<Variable>()
1509 .template find_knot_indices<functionspace::boundary>(
1510 std::declval<typename Variable::boundary_eval_type>()));
1511
1514 decltype(std::declval<GeometryMap>()
1515 .template find_coeff_indices<functionspace::interior>(
1516 std::declval<typename GeometryMap::eval_type>()));
1517
1520 decltype(std::declval<GeometryMap>()
1521 .template find_coeff_indices<functionspace::boundary>(
1522 std::declval<
1523 typename GeometryMap::boundary_eval_type>()));
1524
1527 decltype(std::declval<Variable>()
1528 .template find_coeff_indices<functionspace::interior>(
1529 std::declval<typename Variable::eval_type>()));
1530
1533 decltype(std::declval<Variable>()
1534 .template find_coeff_indices<functionspace::boundary>(
1535 std::declval<typename Variable::boundary_eval_type>()));
1536};
1537
1541template <typename Optimizer, typename Inputs, typename Outputs, typename CollPts = void>
1543class IgANet2 : public IgABase2<Inputs, Outputs, CollPts>,
1545 private utils::FullQualifiedName {
1546public:
1549
1551 using optimizer_type = Optimizer;
1552
1555
1556protected:
1559
1561 std::unique_ptr<optimizer_type> opt_;
1562
1565
1566public:
1568 explicit IgANet2(IgANetOptions defaults = {},
1571 : // Construct the base class
1572 Base(),
1573 // Construct the optimizer
1574 opt_(std::make_unique<optimizer_type>(net_->parameters())),
1575 // Set options
1576 options_(defaults) {}
1577
1580 template <typename NumCoeffs>
1581 IgANet2(const std::vector<int64_t> &layers,
1582 const std::vector<std::vector<std::any>> &activations,
1583 const NumCoeffs &numCoeffs,
1584 enum init init = init::greville,
1585 IgANetOptions defaults = {},
1588 : IgANet2(layers, activations, numCoeffs, numCoeffs, init, defaults, options)
1589 {}
1590
1593 template <typename NumCoeffsInputs, typename NumCoeffsOutputs>
1594 IgANet2(const std::vector<int64_t> &layers,
1595 const std::vector<std::vector<std::any>> &activations,
1596 const NumCoeffsInputs &numCoeffsInputs,
1597 const NumCoeffsOutputs &numCoeffsOutputs,
1598 enum init init = init::greville,
1599 IgANetOptions defaults = {},
1602 : // Construct the base class
1603 Base(numCoeffsInputs, numCoeffsOutputs, init, options),
1604 // Construct the deep neural network
1605 net_(utils::concat(std::vector<int64_t>{inputs(/* epoch */ 0).size(0)},
1606 layers,
1607 std::vector<int64_t>{outputs(/* epoch */ 0).size(0)}),
1608 activations, options),
1609
1610 // Construct the optimizer
1611 opt_(std::make_unique<optimizer_type>(net_->parameters())),
1612
1613 // Set options
1614 options_(defaults) {}
1615
1618 return net_;
1619 }
1620
1623
1625 inline const optimizer_type &optimizer() const { return *opt_; }
1626
1628 inline optimizer_type &optimizer() { return *opt_; }
1629
1633 inline void optimizerReset(bool resetOptions = true) {
1634 if (resetOptions)
1635 opt_ = std::make_unique<optimizer_type>(net_->parameters());
1636 else {
1637 std::vector<optimizer_options_type> options;
1638 for (auto & group : opt_->param_groups())
1639 options.push_back(static_cast<optimizer_options_type&>(group.options()));
1640 opt_ = std::make_unique<optimizer_type>(net_->parameters());
1641 for (auto [group, options] : utils::zip(opt_->param_groups(), options))
1642 static_cast<optimizer_options_type&>(group.options()) = options;
1643 }
1644 }
1645
1648 opt_ = std::make_unique<optimizer_type>(net_->parameters(), optimizerOptions);
1649 }
1650
1652 inline optimizer_options_type &optimizerOptions(std::size_t param_group = 0) {
1653 if (param_group < opt_->param_groups().size())
1654 return static_cast<optimizer_options_type&>(opt_->param_groups()[param_group].options());
1655 else
1656 throw std::runtime_error("Index exceeds number of parameter groups");
1657 }
1658
1660 inline const optimizer_options_type &optimizerOptions(std::size_t param_group = 0) const {
1661 if (param_group < opt_->param_groups().size())
1662 return static_cast<optimizer_options_type&>(opt_->param_groups()[param_group].options());
1663 else
1664 throw std::runtime_error("Index exceeds number of parameter groups");
1665 }
1666
1669 for (auto &group : opt_->param_groups())
1670 static_cast<optimizer_options_type&>(group.options()) = options;
1671 }
1672
1675 for (auto &group : opt_->param_groups())
1676 static_cast<optimizer_options_type&>(group.options()) = options;
1677 }
1678
1680 inline void optimizerOptionsReset(const optimizer_options_type& options, std::size_t param_group) {
1681 if (param_group < opt_->param_groups().size())
1682 static_cast<optimizer_options_type&>(opt_->param_group().options()) = options;
1683 else
1684 throw std::runtime_error("Index exceeds number of parameter groups");
1685 }
1686
1688 inline void optimizerOptionsReset(optimizer_options_type&& options, std::size_t param_group) {
1689 if (param_group < opt_->param_groups().size())
1690 static_cast<optimizer_options_type&>(opt_->param_group().options()) = options;
1691 else
1692 throw std::runtime_error("Index exceeds number of parameter groups");
1693 }
1694
1696 inline const auto &options() const { return options_; }
1697
1699 inline auto &options() { return options_; }
1700
1702 virtual torch::Tensor inputs(int64_t epoch) const {
1703 return utils::cat_tuple_into_tensor(Base::inputs_, [](const auto& obj){ return obj.as_tensor(); });
1704 }
1705
1707 virtual torch::Tensor outputs(int64_t epoch) const {
1708 return utils::cat_tuple_into_tensor(Base::outputs_, [](const auto& obj){ return obj.as_tensor(); });
1709 }
1710
1712 virtual void inputs(const torch::Tensor& tensor) {
1713 utils::slice_tensor_into_tuple(Base::inputs_, tensor,
1714 [](const auto& obj){ return obj.as_tensor_size(); },
1715 [](auto& obj, const auto& tensor){ return obj.from_tensor(tensor); });
1716 }
1717
1719 virtual void outputs(const torch::Tensor& tensor) {
1720 utils::slice_tensor_into_tuple(Base::outputs_, tensor,
1721 [](const auto& obj){ return obj.as_tensor_size(); },
1722 [](auto& obj, const auto& tensor){ return obj.from_tensor(tensor); });
1723 }
1724
1726 virtual bool epoch(int64_t) = 0;
1727
1729 virtual torch::Tensor loss(const torch::Tensor &, int64_t) = 0;
1730
1732 virtual void train(
1733#ifdef IGANET_WITH_MPI
1734 c10::intrusive_ptr<c10d::ProcessGroupMPI> pg =
1735 c10d::ProcessGroupMPI::createProcessGroupMPI()
1736#endif
1737 ) {
1738 torch::Tensor inputs, outputs, loss;
1739 typename Base::value_type previous_loss(-1.0);
1740
1741 // Loop over epochs
1742 for (int64_t epoch = 0; epoch != options_.max_epoch(); ++epoch) {
1743
1744 // Update epoch and inputs
1745 if (this->epoch(epoch))
1746 inputs = this->inputs(epoch);
1747
1748 auto closure = [&]() {
1749 // Reset gradients
1750 net_->zero_grad();
1751
1752 // Execute the model on the inputs
1753 outputs = net_->forward(inputs);
1754
1755 // Compute the loss value
1756 loss = this->loss(outputs, epoch);
1757
1758 // Compute gradients of the loss w.r.t. the model parameters
1759 loss.backward({}, true, false);
1760
1761 return loss;
1762 };
1763
1764#ifdef IGANET_WITH_MPI
1765 // Averaging the gradients of the parameters in all the processors
1766 // Note: This may lag behind DistributedDataParallel (DDP) in performance
1767 // since this synchronizes parameters after backward pass while DDP
1768 // overlaps synchronizing parameters and computing gradients in backward
1769 // pass
1770 std::vector<c10::intrusive_ptr<::c10d::Work>> works;
1771 for (auto &param : net_->named_parameters()) {
1772 std::vector<torch::Tensor> tmp = {param.value().grad()};
1773 works.emplace_back(pg->allreduce(tmp));
1774 }
1775
1776 waitWork(pg, works);
1777
1778 for (auto &param : net_->named_parameters()) {
1779 param.value().grad().data() =
1780 param.value().grad().data() / pg->getSize();
1781 }
1782#endif
1783
1784 // Update the parameters based on the calculated gradients
1785 opt_->step(closure);
1786
1787 typename Base::value_type current_loss = loss.template item<typename Base::value_type>();
1788 Log(log::verbose) << "Epoch " << std::to_string(epoch) << ": "
1789 << current_loss
1790 << std::endl;
1791
1792 if (current_loss <
1793 options_.min_loss()) {
1794 Log(log::info) << "Total epochs: " << epoch << ", loss: "
1795 << current_loss
1796 << std::endl;
1797 break;
1798 }
1799
1800 if (current_loss == previous_loss || std::abs(current_loss-previous_loss) < previous_loss/10) {
1801 Log(log::info) << "Total epochs: " << epoch << ", loss: "
1802 << current_loss
1803 << std::endl;
1804 break;
1805 }
1806
1807 if (loss.isnan().template item<bool>()) {
1808 Log(log::info) << "Total epochs: " << epoch << ", loss: "
1809 << current_loss
1810 << std::endl;
1811 break;
1812 }
1813 previous_loss = current_loss;
1814 }
1815 }
1816
1818 template <typename DataLoader>
1819 void train(DataLoader &loader
1820#ifdef IGANET_WITH_MPI
1821 ,
1822 c10::intrusive_ptr<c10d::ProcessGroupMPI> pg =
1823 c10d::ProcessGroupMPI::createProcessGroupMPI()
1824#endif
1825 ) {
1826 torch::Tensor inputs, outputs, loss;
1827 typename Base::value_type previous_loss(-1.0);
1828
1829 // Loop over epochs
1830 for (int64_t epoch = 0; epoch != options_.max_epoch(); ++epoch) {
1831
1832 typename Base::value_type Loss(0);
1833
1834 for (auto &batch : loader) {
1835 inputs = batch.data;
1836
1837 if (inputs.dim() > 0) {
1838 // if constexpr (Base::has_GeometryMap && Base::has_RefData) {
1839 // Base::G_.from_tensor(
1840 // inputs.slice(1, 0, Base::G_.as_tensor_size()).t());
1841 // Base::f_.from_tensor(inputs
1842 // .slice(1, Base::G_.as_tensor_size(),
1843 // Base::G_.as_tensor_size() +
1844 // Base::f_.as_tensor_size())
1845 // .t());
1846 // } else if constexpr (Base::has_GeometryMap && !Base::has_RefData)
1847 // Base::G_.from_tensor(
1848 // inputs.slice(1, 0, Base::G_.as_tensor_size()).t());
1849 // else if constexpr (!Base::has_GeometryMap && Base::has_RefData)
1850 // Base::f_.from_tensor(
1851 // inputs.slice(1, 0, Base::f_.as_tensor_size()).t());
1852
1853 } else {
1854 // if constexpr (Base::has_GeometryMap && Base::has_RefData) {
1855 // Base::G_.from_tensor(
1856 // inputs.slice(1, 0, Base::G_.as_tensor_size()).flatten());
1857 // Base::f_.from_tensor(inputs
1858 // .slice(1, Base::G_.as_tensor_size(),
1859 // Base::G_.as_tensor_size() +
1860 // Base::f_.as_tensor_size())
1861 // .flatten());
1862 // } else if constexpr (Base::has_GeometryMap && !Base::has_RefData)
1863 // Base::G_.from_tensor(
1864 // inputs.slice(1, 0, Base::G_.as_tensor_size()).flatten());
1865 // else if constexpr (!Base::has_GeometryMap && Base::has_RefData)
1866 // Base::f_.from_tensor(
1867 // inputs.slice(1, 0, Base::f_.as_tensor_size()).flatten());
1868 }
1869
1870 this->epoch(epoch);
1871
1872 auto closure = [&]() {
1873 // Reset gradients
1874 net_->zero_grad();
1875
1876 // Execute the model on the inputs
1877 outputs = net_->forward(inputs);
1878
1879 // Compute the loss value
1880 loss = this->loss(outputs, epoch);
1881
1882 // Compute gradients of the loss w.r.t. the model parameters
1883 loss.backward({}, true, false);
1884
1885 return loss;
1886 };
1887
1888 // Update the parameters based on the calculated gradients
1889 opt_->step(closure);
1890
1891 Loss += loss.template item<typename Base::value_type>();
1892 }
1893
1894 Log(log::verbose) << "Epoch " << std::to_string(epoch) << ": " << Loss
1895 << std::endl;
1896
1897 if (Loss < options_.min_loss()) {
1898 Log(log::info) << "Total epochs: " << epoch << ", loss: " << Loss
1899 << std::endl;
1900 break;
1901 }
1902
1903 if (Loss == previous_loss) {
1904 Log(log::info) << "Total epochs: " << epoch << ", loss: " << Loss
1905 << std::endl;
1906 break;
1907 }
1908 previous_loss = Loss;
1909
1910 if (epoch == options_.max_epoch() - 1)
1911 Log(log::warning) << "Total epochs: " << epoch << ", loss: " << Loss
1912 << std::endl;
1913 }
1914 }
1915
1917 void eval() {
1918 torch::Tensor inputs = this->inputs(0);
1919 torch::Tensor outputs = net_->forward(inputs);
1920 this->outputs(outputs);
1921 }
1922
1924 inline virtual nlohmann::json to_json() const override {
1925 return "Not implemented yet";
1926 }
1927
1929 inline std::vector<torch::Tensor> parameters() const noexcept {
1930 return net_->parameters();
1931 }
1932
1935 inline torch::OrderedDict<std::string, torch::Tensor>
1936 named_parameters() const noexcept {
1937 return net_->named_parameters();
1938 }
1939
1941 inline std::size_t nparameters() const noexcept {
1942 std::size_t result = 0;
1943 for (const auto &param : this->parameters()) {
1944 result += param.numel();
1945 }
1946 return result;
1947 }
1948
1950 inline virtual void
1951 pretty_print(std::ostream &os = Log(log::info)) const noexcept override {
1952 os << name() << "(\n"
1953 << "net = " << net_ << "\n";
1954 // if constexpr (Base::has_GeometryMap)
1955 // os << "G = " << Base::G_ << "\n";
1956 // if constexpr (Base::has_RefData)
1957 // os << "f = " << Base::f_ << "\n";
1958 // if constexpr (Base::has_Solution)
1959 // os << "u = " << Base::u_ << "\n)";
1960 }
1961
1963 inline void save(const std::string &filename,
1964 const std::string &key = "iganet") const {
1965 torch::serialize::OutputArchive archive;
1966 write(archive, key).save_to(filename);
1967 }
1968
1970 inline void load(const std::string &filename,
1971 const std::string &key = "iganet") {
1972 torch::serialize::InputArchive archive;
1973 archive.load_from(filename);
1974 read(archive, key);
1975 }
1976
1978 inline torch::serialize::OutputArchive &
1979 write(torch::serialize::OutputArchive &archive,
1980 const std::string &key = "iganet") const {
1981 // if constexpr (Base::has_GeometryMap)
1982 // Base::G_.write(archive, key + ".geo");
1983 // if constexpr (Base::has_RefData)
1984 // Base::f_.write(archive, key + ".ref");
1985 // if constexpr (Base::has_Solution)
1986 // Base::u_.write(archive, key + ".out");
1987
1988 net_->write(archive, key + ".net");
1989 torch::serialize::OutputArchive archive_net;
1990 net_->save(archive_net);
1991 archive.write(key + ".net.data", archive_net);
1992
1993 torch::serialize::OutputArchive archive_opt;
1994 opt_->save(archive_opt);
1995 archive.write(key + ".opt", archive_opt);
1996
1997 return archive;
1998 }
1999
2001 inline torch::serialize::InputArchive &
2002 read(torch::serialize::InputArchive &archive,
2003 const std::string &key = "iganet") {
2004 // if constexpr (Base::has_GeometryMap)
2005 // Base::G_.read(archive, key + ".geo");
2006 // if constexpr (Base::has_RefData)
2007 // Base::f_.read(archive, key + ".ref");
2008 // if constexpr (Base::has_Solution)
2009 // Base::u_.read(archive, key + ".out");
2010
2011 net_->read(archive, key + ".net");
2012 torch::serialize::InputArchive archive_net;
2013 archive.read(key + ".net.data", archive_net);
2014 net_->load(archive_net);
2015
2016 opt_->add_parameters(net_->parameters());
2017 torch::serialize::InputArchive archive_opt;
2018 archive.read(key + ".opt", archive_opt);
2019 opt_->load(archive_opt);
2020
2021 return archive;
2022 }
2023
2025 bool operator==(const IgANet2 &other) const {
2026 bool result(true);
2027
2028 // if constexpr (Base::has_GeometryMap)
2029 // result *= (Base::G_ == other.G());
2030 // if constexpr (Base::has_RefData)
2031 // result *= (Base::f_ == other.f());
2032 // if constexpr (Base::has_Solution)
2033 // result *= (Base::u_ == other.u());
2034
2035 return result;
2036 }
2037
2039 bool operator!=(const IgANet2 &other) const { return *this != other; }
2040
2041#ifdef IGANET_WITH_MPI
2042private:
2044 static void waitWork(c10::intrusive_ptr<c10d::ProcessGroupMPI> pg,
2045 std::vector<c10::intrusive_ptr<c10d::Work>> works) {
2046 for (auto &work : works) {
2047 try {
2048 work->wait();
2049 } catch (const std::exception &ex) {
2050 Log(log::error) << "Exception received during waitWork: " << ex.what()
2051 << std::endl;
2052 pg->abort();
2053 }
2054 }
2055 }
2056#endif
2057};
2058
2060template <typename Optimizer, typename Inputs, typename Outputs, typename CollPts>
2061requires OptimizerType<Optimizer>
2062inline std::ostream &
2063operator<<(std::ostream &os,
2065 // obj.pretty_print(os);
2066 return os;
2067}
2068
2076template <typename, typename, typename = void>
2078
2079template <detail::HasAsTensor... Inputs,
2080 detail::HasAsTensor... Outputs>
2081class IgANetCustomizable2<std::tuple<Inputs...>,
2082 std::tuple<Outputs...>, void> {
2083public:
2086 std::tuple<decltype(std::declval<Inputs>()
2087 .template find_knot_indices<functionspace::interior>(
2088 std::declval<typename Inputs::eval_type>()))...>;
2089
2091 template<std::size_t index>
2092 using input_interior_knot_indices_t = typename std::tuple_element_t<index, inputs_interior_knot_indices_type>;
2093
2096 std::tuple<decltype(std::declval<Inputs>()
2097 .template find_knot_indices<functionspace::boundary>(
2098 std::declval<
2099 typename Inputs::boundary_eval_type>()))...>;
2100
2102 template<std::size_t index>
2103 using input_boundary_knot_indices_t = typename std::tuple_element_t<index, inputs_boundary_knot_indices_type>;
2104
2107 std::tuple<decltype(std::declval<Outputs>()
2108 .template find_knot_indices<functionspace::interior>(
2109 std::declval<typename Outputs::eval_type>()))...>;
2110
2112 template<std::size_t index>
2113 using output_interior_knot_indices_t = typename std::tuple_element_t<index, outputs_interior_knot_indices_type>;
2114
2117 std::tuple<decltype(std::declval<Outputs>()
2118 .template find_knot_indices<functionspace::boundary>(
2119 std::declval<
2120 typename Outputs::boundary_eval_type>()))...>;
2121
2123 template<std::size_t index>
2124 using output_boundary_knot_indices_t = typename std::tuple_element_t<index, outputs_boundary_knot_indices_type>;
2125
2128 std::tuple<decltype(std::declval<Inputs>()
2129 .template find_coeff_indices<functionspace::interior>(
2130 std::declval<typename Inputs::eval_type>()))...>;
2131
2133 template<std::size_t index>
2134 using input_interior_coeff_indices_t = typename std::tuple_element_t<index, inputs_interior_coeff_indices_type>;
2135
2138 std::tuple<decltype(std::declval<Inputs>()
2139 .template find_coeff_indices<functionspace::boundary>(
2140 std::declval<
2141 typename Inputs::boundary_eval_type>()))...>;
2142
2144 template<std::size_t index>
2145 using input_boundary_coeff_indices_t = typename std::tuple_element_t<index, inputs_boundary_coeff_indices_type>;
2146
2149 std::tuple<decltype(std::declval<Outputs>()
2150 .template find_coeff_indices<functionspace::interior>(
2151 std::declval<typename Outputs::eval_type>()))...>;
2152
2154 template<std::size_t index>
2155 using output_interior_coeff_indices_t = typename std::tuple_element_t<index, outputs_interior_coeff_indices_type>;
2156
2159 std::tuple<decltype(std::declval<Outputs>()
2160 .template find_coeff_indices<functionspace::boundary>(
2161 std::declval<
2162 typename Outputs::boundary_eval_type>()))...>;
2163
2165 template<std::size_t index>
2166 using output_boundary_coeff_indices_t = typename std::tuple_element_t<index, outputs_boundary_coeff_indices_type>;
2167};
2168
2169template <detail::HasAsTensor... Inputs,
2170 detail::HasAsTensor... Outputs,
2171 detail::HasAsTensor... CollPts>
2172class IgANetCustomizable2<std::tuple<Inputs...>,
2173 std::tuple<Outputs...>,
2174 std::tuple<CollPts...>> : public IgANetCustomizable2<std::tuple<Inputs...>,
2175 std::tuple<Outputs...>, void> {
2176public:
2179 std::tuple<decltype(std::declval<CollPts>()
2180 .template find_knot_indices<functionspace::interior>(
2181 std::declval<typename CollPts::eval_type>()))...>;
2182
2185 std::tuple<decltype(std::declval<CollPts>()
2186 .template find_knot_indices<functionspace::boundary>(
2187 std::declval<
2188 typename CollPts::boundary_eval_type>()))...>;
2189
2192 std::tuple<decltype(std::declval<CollPts>()
2193 .template find_coeff_indices<functionspace::interior>(
2194 std::declval<typename CollPts::eval_type>()))...>;
2195
2198 std::tuple<decltype(std::declval<CollPts>()
2199 .template find_coeff_indices<functionspace::boundary>(
2200 std::declval<
2201 typename CollPts::boundary_eval_type>()))...>;
2202};
2203
2205
2206} // namespace iganet
Boundary treatment.
Batch Normalization as described in the paper.
Definition layer.hpp:134
Continuously Differentiable Exponential Linear Units activation function.
Definition layer.hpp:264
Exponential Linear Units activation function.
Definition layer.hpp:341
Gaussian Error Linear Units activation function.
Definition layer.hpp:417
Grated Linear Units activation function.
Definition layer.hpp:468
Group Normalization over a mini-batch of inputs as described in the paper Group Normalization,...
Definition layer.hpp:532
Gumbel-Softmax distribution activation function.
Definition layer.hpp:615
Hard shrinkish activation function.
Definition layer.hpp:692
Hardsigmoid activation function.
Definition layer.hpp:769
Hardswish activation function.
Definition layer.hpp:822
Hardtanh activation function.
Definition layer.hpp:875
namespace detail
Definition igabase.hpp:47
IgA base class.
Definition igabase.hpp:1195
Variable f_
Spline representation of the reference data.
Definition igabase.hpp:1226
typename Base::value_type value_type
Value type.
Definition igabase.hpp:1201
static bool constexpr has_GeometryMap
Indicates whether this class provides a geometry map.
Definition igabase.hpp:1216
static bool constexpr has_Solution
Indicates whether this class provides a solution.
Definition igabase.hpp:1222
static bool constexpr has_RefData
Indicates whether this class provides a reference solution.
Definition igabase.hpp:1219
GeometryMap G_
Spline representation of the geometry map.
Definition igabase.hpp:809
Variable u_
Spline representation of the solution.
Definition igabase.hpp:812
IgANet2.
Definition iganet.hpp:1545
virtual torch::Tensor inputs(int64_t epoch) const
Returns the network inputs as tensor.
Definition iganet.hpp:1702
optimizer_options_type & optimizerOptions(std::size_t param_group=0)
Returns a non-constant reference to the optimizer options.
Definition iganet.hpp:1652
void optimizerOptionsReset(optimizer_options_type &&options)
Resets the optimizer options.
Definition iganet.hpp:1674
void train(DataLoader &loader)
Trains the IgANet.
Definition iganet.hpp:1819
const auto & options() const
Returns a constant reference to the options structure.
Definition iganet.hpp:1696
IgANetOptions options_
Options.
Definition iganet.hpp:1564
torch::OrderedDict< std::string, torch::Tensor > named_parameters() const noexcept
Returns a constant reference to the named parameters of the IgANet object.
Definition iganet.hpp:1936
void optimizerOptionsReset(optimizer_options_type &&options, std::size_t param_group)
Resets the optimizer options.
Definition iganet.hpp:1688
IgANet2(const std::vector< int64_t > &layers, const std::vector< std::vector< std::any > > &activations, const NumCoeffs &numCoeffs, enum init init=init::greville, IgANetOptions defaults={}, iganet::Options< typename Base::value_type > options=iganet::Options< typename Base::value_type >{})
Constructor: number of layers, activation functions, and number of spline coefficients (same for all ...
Definition iganet.hpp:1581
void save(const std::string &filename, const std::string &key="iganet") const
Saves the IgANet to file.
Definition iganet.hpp:1963
const optimizer_options_type & optimizerOptions(std::size_t param_group=0) const
Returns a constant reference to the optimizer options.
Definition iganet.hpp:1660
std::unique_ptr< optimizer_type > opt_
Optimizer.
Definition iganet.hpp:1561
void optimizerOptionsReset(const optimizer_options_type &options, std::size_t param_group)
Resets the optimizer options.
Definition iganet.hpp:1680
void eval()
Evaluate IgANet.
Definition iganet.hpp:1917
IgANetGenerator< typename Base::value_type > & net()
Returns a non-constant reference to the IgANet generator.
Definition iganet.hpp:1622
virtual void train()
Trains the IgANet.
Definition iganet.hpp:1732
auto & options()
Returns a non-constant reference to the options structure.
Definition iganet.hpp:1699
void load(const std::string &filename, const std::string &key="iganet")
Loads the IgANet from file.
Definition iganet.hpp:1970
typename optimizer_options_type< Optimizer >::type optimizer_options_type
Type of the optimizer options.
Definition iganet.hpp:1554
void optimizerReset(const optimizer_options_type &optimizerOptions)
Resets the optimizer.
Definition iganet.hpp:1647
const IgANetGenerator< typename Base::value_type > & net() const
Returns a constant reference to the IgANet generator.
Definition iganet.hpp:1617
virtual bool epoch(int64_t)=0
Initializes epoch.
void optimizerOptionsReset(const optimizer_options_type &options)
Resets the optimizer options.
Definition iganet.hpp:1668
bool operator==(const IgANet2 &other) const
Returns true if both IgANet objects are the same.
Definition iganet.hpp:2025
bool operator!=(const IgANet2 &other) const
Returns true if both IgANet objects are different.
Definition iganet.hpp:2039
virtual void pretty_print(std::ostream &os=Log(log::info)) const noexcept override
Returns a string representation of the IgANet object.
Definition iganet.hpp:1951
IgABase2< Inputs, Outputs, CollPts > Base
Base type.
Definition iganet.hpp:1548
const optimizer_type & optimizer() const
Returns a constant reference to the optimizer.
Definition iganet.hpp:1625
IgANet2(const std::vector< int64_t > &layers, const std::vector< std::vector< std::any > > &activations, const NumCoeffsInputs &numCoeffsInputs, const NumCoeffsOutputs &numCoeffsOutputs, enum init init=init::greville, IgANetOptions defaults={}, iganet::Options< typename Base::value_type > options=iganet::Options< typename Base::value_type >{})
Constructor: number of layers, activation functions, and number of spline coefficients (same for all ...
Definition iganet.hpp:1594
optimizer_type & optimizer()
Returns a non-constant reference to the optimizer.
Definition iganet.hpp:1628
IgANetGenerator< typename Base::value_type > net_
IgANet generator.
Definition iganet.hpp:1558
std::vector< torch::Tensor > parameters() const noexcept
Returns a constant reference to the parameters of the IgANet object.
Definition iganet.hpp:1929
virtual void outputs(const torch::Tensor &tensor)
Attaches the given tensor to the outputs.
Definition iganet.hpp:1719
torch::serialize::OutputArchive & write(torch::serialize::OutputArchive &archive, const std::string &key="iganet") const
Writes the IgANet into a torch::serialize::OutputArchive object.
Definition iganet.hpp:1979
virtual void inputs(const torch::Tensor &tensor)
Attaches the given tensor to the inputs.
Definition iganet.hpp:1712
Optimizer optimizer_type
Type of the optimizer.
Definition iganet.hpp:1551
torch::serialize::InputArchive & read(torch::serialize::InputArchive &archive, const std::string &key="iganet")
Loads the IgANet from a torch::serialize::InputArchive object.
Definition iganet.hpp:2002
IgANet2(IgANetOptions defaults={}, iganet::Options< typename Base::value_type > options=iganet::Options< typename Base::value_type >{})
Default constructor.
Definition iganet.hpp:1568
void optimizerReset(bool resetOptions=true)
Resets the optimizer.
Definition iganet.hpp:1633
virtual torch::Tensor outputs(int64_t epoch) const
Returns the network outputs as tensor.
Definition iganet.hpp:1707
virtual nlohmann::json to_json() const override
Returns the IgANet object as JSON object.
Definition iganet.hpp:1924
virtual torch::Tensor loss(const torch::Tensor &, int64_t)=0
Computes the loss function.
std::size_t nparameters() const noexcept
Returns the total number of parameters of the IgANet object.
Definition iganet.hpp:1941
std::tuple< decltype(std::declval< Inputs >() .template find_coeff_indices< functionspace::boundary >(std::declval< typename Inputs::boundary_eval_type >()))... > inputs_boundary_coeff_indices_type
Type of the coefficient indices of the inputs at the boundary.
Definition iganet.hpp:2141
std::tuple< decltype(std::declval< Outputs >() .template find_coeff_indices< functionspace::interior >(std::declval< typename Outputs::eval_type >()))... > outputs_interior_coeff_indices_type
Type of the coefficient indices of the outputs in the interior.
Definition iganet.hpp:2151
typename std::tuple_element_t< index, inputs_boundary_coeff_indices_type > input_boundary_coeff_indices_t
Type alias for the type of the index-th coefficient indices of the inputs at the boundary.
Definition iganet.hpp:2145
typename std::tuple_element_t< index, outputs_boundary_coeff_indices_type > output_boundary_coeff_indices_t
Type alias for the type of the index-th coefficient indices of the outputs at the boundary.
Definition iganet.hpp:2166
typename std::tuple_element_t< index, outputs_interior_coeff_indices_type > output_interior_coeff_indices_t
Type alias for the type of the index-th coefficient indices of the outputs in the interior.
Definition iganet.hpp:2155
std::tuple< decltype(std::declval< Outputs >() .template find_coeff_indices< functionspace::boundary >(std::declval< typename Outputs::boundary_eval_type >()))... > outputs_boundary_coeff_indices_type
Type of the coefficient indices of the outputs at the boundary.
Definition iganet.hpp:2162
typename std::tuple_element_t< index, inputs_boundary_knot_indices_type > input_boundary_knot_indices_t
Type alias for the type of the index-th knot indices of the inputs at the boundary.
Definition iganet.hpp:2103
std::tuple< decltype(std::declval< Inputs >() .template find_coeff_indices< functionspace::interior >(std::declval< typename Inputs::eval_type >()))... > inputs_interior_coeff_indices_type
Type of the coefficient indices of the inputs in the interior.
Definition iganet.hpp:2130
std::tuple< decltype(std::declval< Outputs >() .template find_knot_indices< functionspace::interior >(std::declval< typename Outputs::eval_type >()))... > outputs_interior_knot_indices_type
Type of the knot indices of the outputs in the interior.
Definition iganet.hpp:2109
std::tuple< decltype(std::declval< Inputs >() .template find_knot_indices< functionspace::interior >(std::declval< typename Inputs::eval_type >()))... > inputs_interior_knot_indices_type
Type of the knot indices of the inputs in the interior.
Definition iganet.hpp:2088
typename std::tuple_element_t< index, inputs_interior_coeff_indices_type > input_interior_coeff_indices_t
Type alias for the type of the index-th coefficient indices of the inputs in the interior.
Definition iganet.hpp:2134
typename std::tuple_element_t< index, inputs_interior_knot_indices_type > input_interior_knot_indices_t
Type alias for the type of the index-th knot indices of the inputs in the interior.
Definition iganet.hpp:2092
std::tuple< decltype(std::declval< Outputs >() .template find_knot_indices< functionspace::boundary >(std::declval< typename Outputs::boundary_eval_type >()))... > outputs_boundary_knot_indices_type
Type of the knot indices of the outputs at the boundary.
Definition iganet.hpp:2120
typename std::tuple_element_t< index, outputs_interior_knot_indices_type > output_interior_knot_indices_t
Type alias for the type of the index-th knot indices of the outputs in the interior.
Definition iganet.hpp:2113
typename std::tuple_element_t< index, outputs_boundary_knot_indices_type > output_boundary_knot_indices_t
Type alias for the type of the index-th knot indices of the outputs at the boundary.
Definition iganet.hpp:2124
std::tuple< decltype(std::declval< Inputs >() .template find_knot_indices< functionspace::boundary >(std::declval< typename Inputs::boundary_eval_type >()))... > inputs_boundary_knot_indices_type
Type of the knot indices of the inputs at the boundary.
Definition iganet.hpp:2099
std::tuple< decltype(std::declval< CollPts >() .template find_coeff_indices< functionspace::boundary >(std::declval< typename CollPts::boundary_eval_type >()))... > collPts_boundary_coeff_indices_type
Type of the coefficient indices of the collocation points objects at the boundary.
Definition iganet.hpp:2201
std::tuple< decltype(std::declval< CollPts >() .template find_knot_indices< functionspace::boundary >(std::declval< typename CollPts::boundary_eval_type >()))... > collPts_boundary_knot_indices_type
Type of the knot indices of the collocation points objects at the boundary.
Definition iganet.hpp:2188
std::tuple< decltype(std::declval< CollPts >() .template find_knot_indices< functionspace::interior >(std::declval< typename CollPts::eval_type >()))... > collPts_interior_knot_indices_type
Type of the knot indices of the collocation points objects in the interior.
Definition iganet.hpp:2181
std::tuple< decltype(std::declval< CollPts >() .template find_coeff_indices< functionspace::interior >(std::declval< typename CollPts::eval_type >()))... > collPts_interior_coeff_indices_type
Type of the coefficient indices of the collocation points objects in the interior.
Definition iganet.hpp:2194
IgANetGenerator.
Definition iganet.hpp:921
IgANetGeneratorImpl.
Definition iganet.hpp:48
torch::serialize::InputArchive & read(torch::serialize::InputArchive &archive, const std::string &key="iganet")
Reads the IgANet from a torch::serialize::InputArchive object.
Definition iganet.hpp:760
IgANetGeneratorImpl()=default
Default constructor.
IgANetGeneratorImpl(const std::vector< int64_t > &layers, const std::vector< std::vector< std::any > > &activations, Options< real_t > options=Options< real_t >{})
Constructor.
Definition iganet.hpp:54
std::vector< std::unique_ptr< iganet::ActivationFunction > > activations_
Vector of activation functions.
Definition iganet.hpp:911
virtual void pretty_print(std::ostream &os=Log(log::info)) const noexcept override
Definition iganet.hpp:897
torch::Tensor forward(torch::Tensor x)
Forward evaluation.
Definition iganet.hpp:724
std::vector< torch::nn::Linear > layers_
Vector of linear layers.
Definition iganet.hpp:908
torch::serialize::OutputArchive & write(torch::serialize::OutputArchive &archive, const std::string &key="iganet") const
Writes the IgANet into a torch::serialize::OutputArchive object.
Definition iganet.hpp:736
IgANet.
Definition iganet.hpp:936
const auto & options() const
Returns a constant reference to the options structure.
Definition iganet.hpp:1112
void optimizerOptionsReset(optimizer_options_type &&options)
Resets the optimizer options.
Definition iganet.hpp:1090
void save(const std::string &filename, const std::string &key="iganet") const
Saves the IgANet to file.
Definition iganet.hpp:1372
void load(const std::string &filename, const std::string &key="iganet")
Loads the IgANet from file.
Definition iganet.hpp:1379
torch::OrderedDict< std::string, torch::Tensor > named_parameters() const noexcept
Returns a constant reference to the named parameters of the IgANet object.
Definition iganet.hpp:1345
void optimizerOptionsReset(const optimizer_options_type &options)
Resets the optimizer options.
Definition iganet.hpp:1084
virtual torch::Tensor loss(const torch::Tensor &, int64_t)=0
Computes the loss function.
IgANet(const std::vector< int64_t > &layers, const std::vector< std::vector< std::any > > &activations, std::array< int64_t, NumCoeffs > numCoeffs, IgANetOptions defaults={}, iganet::Options< typename Base::value_type > options=iganet::Options< typename Base::value_type >{})
Constructor: number of layers, activation functions, and number of spline coefficients (same for geom...
Definition iganet.hpp:974
torch::serialize::InputArchive & read(torch::serialize::InputArchive &archive, const std::string &key="iganet")
Loads the IgANet from a torch::serialize::InputArchive object.
Definition iganet.hpp:1411
IgABase< GeometryMap, Variable > Base
Base type.
Definition iganet.hpp:939
IgANet(const std::vector< int64_t > &layers, const std::vector< std::vector< std::any > > &activations, std::tuple< std::array< int64_t, GeometryMapNumCoeffs >... > geometryMapNumCoeffs, std::tuple< std::array< int64_t, VariableNumCoeffs >... > variableNumCoeffs, IgANetOptions defaults={}, iganet::Options< typename Base::value_type > options=iganet::Options< typename Base::value_type >{})
Constructor: number of layers, activation functions, and number of spline coefficients (different for...
Definition iganet.hpp:1009
std::vector< torch::Tensor > parameters() const noexcept
Returns a constant reference to the parameters of the IgANet object.
Definition iganet.hpp:1338
std::unique_ptr< optimizer_type > opt_
Optimizer.
Definition iganet.hpp:952
std::size_t nparameters() const noexcept
Returns the total number of parameters of the IgANet object.
Definition iganet.hpp:1350
optimizer_type & optimizer()
Returns a non-constant reference to the optimizer.
Definition iganet.hpp:1044
virtual void train()
Trains the IgANet.
Definition iganet.hpp:1141
virtual void pretty_print(std::ostream &os=Log(log::info)) const noexcept override
Returns a string representation of the IgANet object.
Definition iganet.hpp:1360
Optimizer optimizer_type
Type of the optimizer.
Definition iganet.hpp:942
torch::serialize::OutputArchive & write(torch::serialize::OutputArchive &archive, const std::string &key="iganet") const
Writes the IgANet into a torch::serialize::OutputArchive object.
Definition iganet.hpp:1388
void optimizerOptionsReset(optimizer_options_type &&options, std::size_t param_group)
Resets the optimizer options.
Definition iganet.hpp:1104
virtual torch::Tensor inputs(int64_t epoch) const
Returns the network inputs.
Definition iganet.hpp:1123
auto & options()
Returns a non-constant reference to the options structure.
Definition iganet.hpp:1115
void optimizerReset(bool resetOptions=true)
Resets the optimizer.
Definition iganet.hpp:1049
IgANetGenerator< typename Base::value_type > & net()
Returns a non-constant reference to the IgANet generator.
Definition iganet.hpp:1038
const IgANetGenerator< typename Base::value_type > & net() const
Returns a constant reference to the IgANet generator.
Definition iganet.hpp:1033
const optimizer_options_type & optimizerOptions(std::size_t param_group=0) const
Returns a constant reference to the optimizer options.
Definition iganet.hpp:1076
virtual bool epoch(int64_t)=0
Initializes epoch.
IgANet(IgANetOptions defaults={}, iganet::Options< typename Base::value_type > options=iganet::Options< typename Base::value_type >{})
Default constructor.
Definition iganet.hpp:959
IgANet(const std::vector< int64_t > &layers, const std::vector< std::vector< std::any > > &activations, std::array< int64_t, GeometryMapNumCoeffs > geometryMapNumCoeffs, std::array< int64_t, VariableNumCoeffs > variableNumCoeffs, IgANetOptions defaults={}, iganet::Options< typename Base::value_type > options=iganet::Options< typename Base::value_type >{})
Constructor: number of layers, activation functions, and number of spline coefficients (different for...
Definition iganet.hpp:997
typename optimizer_options_type< Optimizer >::type optimizer_options_type
Type of the optimizer options.
Definition iganet.hpp:945
void train(DataLoader &loader)
Trains the IgANet.
Definition iganet.hpp:1228
IgANetOptions options_
Options.
Definition iganet.hpp:955
virtual nlohmann::json to_json() const override
Returns the IgANet object as JSON object.
Definition iganet.hpp:1333
IgANetGenerator< typename Base::value_type > net_
IgANet generator.
Definition iganet.hpp:949
void optimizerOptionsReset(const optimizer_options_type &options, std::size_t param_group)
Resets the optimizer options.
Definition iganet.hpp:1096
void eval()
Evaluate IgANet.
Definition iganet.hpp:1326
optimizer_options_type & optimizerOptions(std::size_t param_group=0)
Returns a non-constant reference to the optimizer options.
Definition iganet.hpp:1068
const optimizer_type & optimizer() const
Returns a constant reference to the optimizer.
Definition iganet.hpp:1041
IgANet(const std::vector< int64_t > &layers, const std::vector< std::vector< std::any > > &activations, std::tuple< std::array< int64_t, NumCoeffs >... > numCoeffs, IgANetOptions defaults={}, iganet::Options< typename Base::value_type > options=iganet::Options< typename Base::value_type >{})
Constructor: number of layers, activation functions, and number of spline coefficients (same for geom...
Definition iganet.hpp:983
bool operator==(const IgANet &other) const
Returns true if both IgANet objects are the same.
Definition iganet.hpp:1434
void optimizerReset(const optimizer_options_type &optimizerOptions)
Resets the optimizer.
Definition iganet.hpp:1063
bool operator!=(const IgANet &other) const
Returns true if both IgANet objects are different.
Definition iganet.hpp:1448
Instance Normalization as described in the paper.
Definition layer.hpp:958
Layer Normalization as described in the paper.
Definition layer.hpp:1064
Leaky ReLU activation function.
Definition layer.hpp:1159
Local response Normalization.
Definition layer.hpp:1234
LogSigmoid activation function.
Definition layer.hpp:1326
LogSoftmax activation function.
Definition layer.hpp:1377
Mish activation function.
Definition layer.hpp:1444
No-op activation function.
Definition layer.hpp:92
Lp Normalization.
Definition layer.hpp:1487
The Options class handles the automated determination of dtype from the template argument and the sel...
Definition options.hpp:107
PReLU activation function.
Definition layer.hpp:1562
Randomized ReLU activation function.
Definition layer.hpp:1764
ReLU6 activation function.
Definition layer.hpp:1692
ReLU activation function.
Definition layer.hpp:1624
SELU activation function.
Definition layer.hpp:1847
Sigmoid Linear Unit activation function.
Definition layer.hpp:1959
Sigmoid activation function.
Definition layer.hpp:1915
Softmax activation function.
Definition layer.hpp:2004
Softmin activation function.
Definition layer.hpp:2075
Softplus activation function.
Definition layer.hpp:2146
Softshrink activation function.
Definition layer.hpp:2228
Softsign activation function.
Definition layer.hpp:2300
Tanh activation function.
Definition layer.hpp:2344
Tanhshrink activation function.
Definition layer.hpp:2387
Threshold activation function.
Definition layer.hpp:2435
Full qualified name descriptor.
Definition fqn.hpp:26
virtual const std::string & name() const noexcept
Returns the full qualified name of the object.
Definition fqn.hpp:31
Concept to identify template parameters that are derived from iganet::details::FunctionSpaceType.
Definition functionspace.hpp:3118
Concept to identify template parameters that are derived from torch::optim::Optimizer.
Definition optimizer.hpp:21
Definition igabase.hpp:37
Container utility functions.
Full qualified name utility functions.
Function spaces.
Isogeometric analysis base class.
Network layer.
auto zip(T &&...seqs)
Definition zip.hpp:97
void slice_tensor_into_tuple(std::tuple< Tensors... > &tuple, const torch::Tensor &tensor, FuncSize &&funcSize, FuncAssign &&funcAssign, int64_t &offset, int64_t dim=0)
Slices the given tensor into the objects of the std::tuple.
Definition tuple.hpp:61
torch::Tensor cat_tuple_into_tensor(const std::tuple< Tensors... > &tensors, int64_t dim=0)
Concatenates the entries of an std::tuple object into a single Torch tensor along the given dimension...
Definition tuple.hpp:26
Definition boundary.hpp:22
decltype(std::declval< Variable >() .template find_knot_indices< functionspace::interior >(std::declval< typename Variable::eval_type >())) variable_interior_knot_indices_type
Type of the knot indices of the variables in the interior.
Definition iganet.hpp:1504
decltype(std::declval< GeometryMap >() .template find_coeff_indices< functionspace::boundary >(std::declval< typename GeometryMap::boundary_eval_type >())) geometryMap_boundary_coeff_indices_type
Type of the coefficient indices of geometry type at the boundary.
Definition iganet.hpp:1523
decltype(std::declval< Variable >() .template find_knot_indices< functionspace::boundary >(std::declval< typename Variable::boundary_eval_type >())) variable_boundary_knot_indices_type
Type of the knot indices of boundary_eval_type type at the boundary.
Definition iganet.hpp:1510
decltype(std::declval< Variable >() .template find_coeff_indices< functionspace::interior >(std::declval< typename Variable::eval_type >())) variable_interior_coeff_indices_type
Type of the coefficient indices of variable type in the interior.
Definition iganet.hpp:1529
decltype(std::declval< GeometryMap >() .template find_knot_indices< functionspace::boundary >(std::declval< typename GeometryMap::boundary_eval_type >())) geometryMap_boundary_knot_indices_type
Type of the knot indices of the geometry map at the boundary.
Definition iganet.hpp:1498
decltype(std::declval< GeometryMap >() .template find_coeff_indices< functionspace::interior >(std::declval< typename GeometryMap::eval_type >())) geometryMap_interior_coeff_indices_type
Type of the coefficient indices of geometry type in the interior.
Definition iganet.hpp:1516
struct iganet::@0 Log
Logger.
init
Enumerator for specifying the initialization of B-spline coefficients.
Definition bspline.hpp:55
decltype(std::declval< Variable >() .template find_coeff_indices< functionspace::boundary >(std::declval< typename Variable::boundary_eval_type >())) variable_boundary_coeff_indices_type
Type of the coefficient indices of variable type at the boundary.
Definition iganet.hpp:1535
activation
Enumerator for nonlinear activation functions.
Definition layer.hpp:23
std::ostream & operator<<(std::ostream &os, const Boundary< Spline > &obj)
Print (as string) a Boundary object.
Definition boundary.hpp:1963
decltype(std::declval< GeometryMap >() .template find_knot_indices< functionspace::interior >(std::declval< typename GeometryMap::eval_type >())) geometryMap_interior_knot_indices_type
Type of the knot indices of the geometry map in the interior.
Definition iganet.hpp:1491
IgANetCustomizable.
Definition iganet.hpp:1485
IgANetCustomizable2.
Definition iganet.hpp:2077
STL namespace.
IgANetOptions.
Definition iganet.hpp:32
TORCH_ARG(int64_t, batch_size)
TORCH_ARG(double, min_loss)
TORCH_ARG(int64_t, max_epoch)
Serialization prototype.
Definition serialize.hpp:31
Tuple utility functions.
Zip utility function.