20#include <nlohmann/json.hpp>
34 virtual nlohmann::json
to_json()
const = 0;
41template <
typename T, std::
size_t N>
42inline auto to_json(
const torch::TensorAccessor<T, N> &accessor) {
43 auto json = nlohmann::json::array();
45 if constexpr (N == 1) {
46 for (int64_t i = 0; i < accessor.size(0); ++i)
47 json.push_back(accessor[i]);
48 }
else if constexpr (N == 2) {
49 for (int64_t i = 0; i < accessor.size(0); ++i)
50 for (int64_t j = 0; j < accessor.size(1); ++j)
51 json.push_back(accessor[i][j]);
52 }
else if constexpr (N == 3) {
53 for (int64_t i = 0; i < accessor.size(0); ++i)
54 for (int64_t j = 0; j < accessor.size(1); ++j)
55 for (int64_t k = 0; k < accessor.size(2); ++k)
56 json.push_back(accessor[i][j][k]);
57 }
else if constexpr (N == 4) {
58 for (int64_t i = 0; i < accessor.size(0); ++i)
59 for (int64_t j = 0; j < accessor.size(1); ++j)
60 for (int64_t k = 0; k < accessor.size(2); ++k)
61 for (int64_t l = 0; l < accessor.size(3); ++l)
62 json.push_back(accessor[i][j][k][l]);
69template <
typename T, std::
size_t N>
70inline auto to_json(
const torch::Tensor &tensor) {
71 if (tensor.is_cuda()) {
72 auto [tensor_cpu, accessor] = to_tensorAccessor<T, N>(tensor, torch::kCPU);
75 auto accessor = to_tensorAccessor<T, N>(tensor);
82template <
typename T, std::
size_t N, std::
size_t M>
84 auto json = nlohmann::json::array();
86 for (std::size_t i = 0; i < M; ++i) {
87 if (tensors[i].is_cuda()) {
88 auto [tensor_cpu, accessor] =
89 to_tensorAccessor<T, N>(tensors[i], torch::kCPU);
90 json.push_back(to_json<T, N>(accessor));
92 auto accessor = to_tensorAccessor<T, N>(tensors[i]);
93 json.push_back(to_json<T, N>(accessor));
100#ifdef IGANET_WITH_GISMO
102template <
typename T,
int Rows,
int Cols,
int Options>
103inline auto to_json(
const gismo::gsMatrix<T, Rows, Cols, Options> &matrix,
104 bool flatten =
false,
bool transpose =
false) {
105 auto json = nlohmann::json::array();
107 if constexpr (
Options == gismo::RowMajor) {
110 for (std::size_t j = 0; j < matrix.cols(); ++j)
111 for (std::size_t i = 0; i < matrix.rows(); ++i)
112 json.push_back(matrix(i, j));
114 for (std::size_t i = 0; i < matrix.rows(); ++i)
115 for (std::size_t j = 0; j < matrix.cols(); ++j)
116 json.push_back(matrix(i, j));
120 for (std::size_t j = 0; j < matrix.cols(); ++j) {
121 auto data = nlohmann::json::array();
122 for (std::size_t i = 0; i < matrix.rows(); ++i) {
123 data.push_back(matrix(i, j));
125 json.emplace_back(data);
128 for (std::size_t i = 0; i < matrix.rows(); ++i) {
129 auto data = nlohmann::json::array();
130 for (std::size_t j = 0; j < matrix.cols(); ++j) {
131 data.push_back(matrix(i, j));
133 json.emplace_back(data);
138 }
else if constexpr (Options == gismo::ColMajor) {
141 for (std::size_t i = 0; i < matrix.rows(); ++i)
142 for (std::size_t j = 0; j < matrix.cols(); ++j)
143 json.push_back(matrix(i, j));
145 for (std::size_t j = 0; j < matrix.cols(); ++j)
146 for (std::size_t i = 0; i < matrix.rows(); ++i)
147 json.push_back(matrix(i, j));
151 for (std::size_t i = 0; i < matrix.rows(); ++i) {
152 auto data = nlohmann::json::array();
153 for (std::size_t j = 0; j < matrix.cols(); ++j) {
154 data.push_back(matrix(i, j));
156 json.emplace_back(data);
159 for (std::size_t j = 0; j < matrix.cols(); ++j) {
160 auto data = nlohmann::json::array();
161 for (std::size_t i = 0; i < matrix.rows(); ++i) {
162 data.push_back(matrix(i, j));
164 json.emplace_back(data);
170 throw std::runtime_error(
"Invalid matrix options");
176template <
typename T>
inline auto to_json(
const gismo::gsBSpline<T> &bspline) {
177 auto json = nlohmann::json();
179 json[
"degrees"] = nlohmann::json::array();
181 for (std::size_t i = 0; i < bspline.parDim(); ++i)
182 json[
"degrees"].push_back(bspline.degree(i));
184 json[
"geoDim"] = bspline.geoDim();
185 json[
"parDim"] = bspline.parDim();
187 json[
"ncoeffs"] = nlohmann::json::array();
188 for (std::size_t i = 0; i < bspline.parDim(); ++i)
189 json[
"ncoeffs"].push_back(bspline.basis().size(i));
191 json[
"coeffs"] =
to_json(bspline.coefs());
193 json[
"nknots"] = nlohmann::json::array();
194 for (std::size_t i = 0; i < bspline.parDim(); ++i)
195 json[
"nknots"].push_back(bspline.knots(i).size());
197 json[
"knots"] = nlohmann::json::array();
198 for (std::size_t i = 0; i < bspline.parDim(); ++i)
199 json[
"knots"].push_back(bspline.knots(i));
205template <
int d,
typename T>
206inline auto to_json(
const gismo::gsTensorBSpline<d, T> &bspline) {
207 auto json = nlohmann::json();
209 json[
"degrees"] = nlohmann::json::array();
211 for (std::size_t i = 0; i < bspline.parDim(); ++i)
212 json[
"degrees"].push_back(bspline.degree(i));
214 json[
"geoDim"] = bspline.geoDim();
215 json[
"parDim"] = bspline.parDim();
217 json[
"ncoeffs"] = nlohmann::json::array();
218 for (std::size_t i = 0; i < bspline.parDim(); ++i)
219 json[
"ncoeffs"].push_back(bspline.basis().size(i));
221 json[
"coeffs"] =
to_json(bspline.coefs());
223 json[
"nknots"] = nlohmann::json::array();
224 for (std::size_t i = 0; i < bspline.parDim(); ++i)
225 json[
"nknots"].push_back(bspline.knots(i).size());
227 json[
"knots"] = nlohmann::json::array();
228 for (std::size_t i = 0; i < bspline.parDim(); ++i)
229 json[
"knots"].push_back(bspline.knots(i));
236inline auto to_json(
const gismo::gsGeometry<T> &geometry) {
238 if (
auto patch =
dynamic_cast<const gismo::gsBSpline<T> *
>(&geometry))
240 else if (
auto patch =
241 dynamic_cast<const gismo::gsTensorBSpline<2, T> *
>(&geometry))
243 else if (
auto patch =
244 dynamic_cast<const gismo::gsTensorBSpline<3, T> *
>(&geometry))
246 else if (
auto patch =
247 dynamic_cast<const gismo::gsTensorBSpline<4, T> *
>(&geometry))
250 return nlohmann::json(
"{ Invalid patch type }");
256to_json(
const typename gismo::gsMultiPatch<T>::ifContainer &interfaces) {
258 auto json = nlohmann::json::array();
260 for (
auto const &interface : interfaces) {
261 auto interface_json = nlohmann::json();
263 interface_json[
"patches"] = {interface.first().patchIndex(),
264 interface.second().patchIndex()};
265 interface_json[
"sides"] = {interface.first().side().index(),
266 interface.second().side().index()};
267 interface_json[
"direction"] =
"NOT IMPLEMENTED YET";
268 interface_json[
"orientation"] =
"NOT IMPLEMENTED YET";
270 json.push_back(interface_json);
279to_json(
const typename gismo::gsMultiPatch<T>::bContainer &boundaries) {
281 auto json = nlohmann::json::array();
283 for (
auto const &
boundary : boundaries) {
284 auto boundary_json = nlohmann::json();
286 boundary_json[
"patch"] =
boundary.patchIndex();
287 boundary_json[
"side"] =
boundary.side().index();
289 json.push_back(boundary_json);
297inline auto to_json(
const gismo::gsMultiPatch<T> &mp,
bool verbose =
false) {
299 auto json = nlohmann::json();
302 auto patches_json = nlohmann::json::array();
303 for (std::size_t i = 0; i < mp.nPatches(); ++i)
304 patches_json.push_back(i);
306 json[
"patches"] = patches_json;
307 json[
"interfaces"] = to_json<T>(mp.interfaces());
308 json[
"boundaries"] = to_json<T>(mp.boundaries());
311 auto patches_json = nlohmann::json::array();
313 for (std::size_t i = 0; i < mp.nPatches(); ++i)
314 patches_json.push_back(
to_json(mp.patch(i)));
316 json[
"patches"] = patches_json;
324template <
typename T, std::
size_t N>
325inline pugi::xml_document
to_xml(
const torch::TensorAccessor<T, N> &accessor,
326 torch::IntArrayRef sizes,
327 std::string tag =
"Matrix",
int id = 0,
328 std::string label =
"",
int index = -1) {
329 pugi::xml_document doc;
330 pugi::xml_node root = doc.append_child(
"xml");
331 to_xml(accessor, sizes, root,
id, label, index);
337template <
typename T, std::
size_t N>
338inline pugi::xml_node &
to_xml(
const torch::TensorAccessor<T, N> &accessor,
339 torch::IntArrayRef sizes, pugi::xml_node &root,
340 std::string tag =
"Matrix",
int id = 0,
341 std::string label =
"",
int index = -1) {
344 pugi::xml_node node = root.append_child(tag.c_str());
347 node.append_attribute(
"id") = id;
350 node.append_attribute(
"index") = index;
353 node.append_attribute(
"label") = label.c_str();
356 if (tag ==
"Matrix") {
357 if constexpr (N == 1) {
358 node.append_attribute(
"rows") = sizes[0];
359 node.append_attribute(
"cols") = 1;
361 std::stringstream ss;
362 for (std::size_t i = 0; i < sizes[0]; ++i)
363 ss << std::to_string(accessor[i]) << (i < sizes[0] - 1 ?
" " :
"");
364 node.append_child(pugi::node_pcdata).set_value(ss.str().c_str());
365 }
else if constexpr (N == 2) {
366 node.append_attribute(
"rows") = sizes[0];
367 node.append_attribute(
"cols") = sizes[1];
369 std::stringstream ss;
370 for (std::size_t i = 0; i < sizes[0]; ++i)
371 for (std::size_t j = 0; j < sizes[1]; ++j)
372 ss << std::to_string(accessor[i][j])
373 << (j < sizes[1] - 1 ?
" " : (i < sizes[0] - 1 ?
" " :
""));
374 node.append_child(pugi::node_pcdata).set_value(ss.str().c_str());
376 throw std::runtime_error(
377 "Tag \"Matrix\" only supports 1- and 2-dimensional tensors");
379 std::stringstream ss;
380 for (
const auto &size : sizes)
381 ss << std::to_string(size) <<
" ";
383 pugi::xml_node dims = node.append_child(
"Dimensions");
384 dims.append_child(pugi::node_pcdata).set_value(ss.str().c_str());
387 if constexpr (N == 1) {
388 for (std::size_t i = 0; i < sizes[0]; ++i)
389 ss << std::to_string(accessor[i]) <<
" ";
390 }
else if constexpr (N == 2) {
391 for (std::size_t i = 0; i < sizes[0]; ++i)
392 for (std::size_t j = 0; j < sizes[1]; ++j)
393 ss << std::to_string(accessor[i][j]) <<
" ";
394 }
else if constexpr (N == 3) {
395 for (std::size_t i = 0; i < sizes[0]; ++i)
396 for (std::size_t j = 0; j < sizes[1]; ++j)
397 for (std::size_t k = 0; j < sizes[2]; ++k)
398 ss << std::to_string(accessor[i][j][k]) <<
" ";
399 }
else if constexpr (N == 4) {
400 for (std::size_t i = 0; i < sizes[0]; ++i)
401 for (std::size_t j = 0; j < sizes[1]; ++j)
402 for (std::size_t k = 0; k < sizes[2]; ++k)
403 for (std::size_t l = 0; l < sizes[3]; ++l)
404 ss << std::to_string(accessor[i][j][k][l]) <<
" ";
406 }
else if constexpr (N == 5) {
407 for (std::size_t i = 0; i < sizes[0]; ++i)
408 for (std::size_t j = 0; j < sizes[1]; ++j)
409 for (std::size_t k = 0; k < sizes[2]; ++k)
410 for (std::size_t l = 0; l < sizes[3]; ++l)
411 for (std::size_t m = 0; m < sizes[4]; ++m)
412 ss << std::to_string(accessor[i][j][k][l][m]) <<
" ";
413 }
else if constexpr (N == 6) {
414 for (std::size_t i = 0; i < sizes[0]; ++i)
415 for (std::size_t j = 0; j < sizes[1]; ++j)
416 for (std::size_t k = 0; k < sizes[2]; ++k)
417 for (std::size_t l = 0; l < sizes[3]; ++l)
418 for (std::size_t m = 0; m < sizes[4]; ++m)
419 for (std::size_t n = 0; n < sizes[5]; ++n)
420 ss << std::to_string(accessor[i][j][k][l][m][n]) <<
" ";
422 throw std::runtime_error(
423 "Dimensions higher than 4 are not implemented yet");
425 pugi::xml_node data = node.append_child(
"Data");
426 data.append_child(pugi::node_pcdata).set_value(ss.str().c_str());
433template <
typename T, std::
size_t N>
434inline pugi::xml_document
to_xml(
const torch::Tensor &tensor,
435 std::string tag =
"Matrix",
int id = 0,
436 std::string label =
"",
int index = -1) {
437 pugi::xml_document doc;
438 pugi::xml_node root = doc.append_child(
"xml");
439 to_xml<T, N>(tensor, root,
id, label, index);
445template <
typename T, std::
size_t N>
446inline pugi::xml_node &
to_xml(
const torch::Tensor &tensor, pugi::xml_node &root,
447 std::string tag =
"Matrix",
int id = 0,
448 std::string label =
"",
int index = -1) {
450 if (tensor.is_cuda()) {
451 auto [tensor_cpu, accessor] = to_tensorAccessor<T, N>(tensor, torch::kCPU);
452 return to_xml(accessor, tensor.sizes(), root, tag,
id, label, index);
454 auto accessor = to_tensorAccessor<T, N>(tensor);
455 return to_xml(accessor, tensor.sizes(), root, tag,
id, label, index);
461template <
typename T, std::
size_t N, std::
size_t M>
463 std::string tag =
"Matrix",
int id = 0,
464 std::string label =
"",
int index = -1) {
465 pugi::xml_document doc;
466 pugi::xml_node root = doc.append_child(
"xml");
467 to_xml<T, N>(tensors, root,
id, label, index);
474template <
typename T, std::
size_t N, std::
size_t M>
476 pugi::xml_node &root, std::string tag =
"Matrix",
477 int id = 0, std::string label =
"") {
479 for (std::size_t i = 0; i < M; ++i) {
480 if (tensors[i].is_cuda()) {
481 auto [tensor_cpu, accessor] =
482 to_tensorAccessor<T, N>(tensors[i], torch::kCPU);
483 to_xml(accessor, tensors[i].sizes(), root, tag,
id, label, i);
485 auto accessor = to_tensorAccessor<T, N>(tensors[i]);
486 to_xml(accessor, tensors[i].sizes(), root, tag,
id, label, i);
494template <
typename T, std::
size_t N>
495inline torch::TensorAccessor<T, N> &
496from_xml(
const pugi::xml_document &doc, torch::TensorAccessor<T, N> &accessor,
497 torch::IntArrayRef sizes, std::string tag =
"Matrix",
int id = 0,
498 std::string label =
"",
int index = -1) {
499 return from_xml(doc.child(
"xml"), accessor, sizes, tag,
id, label, index);
503template <
typename T, std::
size_t N>
504inline torch::TensorAccessor<T, N> &
505from_xml(
const pugi::xml_node &root, torch::TensorAccessor<T, N> &accessor,
506 torch::IntArrayRef sizes, std::string tag =
"Matrix",
int id = 0,
507 std::string label =
"",
int index = -1) {
513template <
typename T, std::
size_t N>
514inline torch::Tensor &
515from_xml(
const pugi::xml_document &doc, torch::Tensor &tensor,
516 std::string tag =
"Matrix",
int id = 0, std::string label =
"",
517 bool alloc =
true,
int index = -1) {
518 return from_xml<T, N>(doc.child(
"xml"), tensor, tag,
id, label, index);
522template <
typename T, std::
size_t N>
523inline torch::Tensor &
524from_xml(
const pugi::xml_node &root, torch::Tensor &tensor,
525 std::string tag =
"Matrix",
int id = 0, std::string label =
"",
526 bool alloc =
true,
int index = -1) {
529 for (pugi::xml_node node : root.children(tag.c_str())) {
531 if ((
id >= 0 ? node.attribute(
"id").as_int() ==
id :
true) &&
532 (index >= 0 ? node.attribute(
"index").as_int() == index :
true) &&
533 (!label.empty() ? node.attribute(
"label").value() == label :
true)) {
535 if (tag ==
"Matrix") {
537 int64_t rows = node.attribute(
"rows").as_int();
538 int64_t cols = node.attribute(
"cols").as_int();
540 if (!alloc && (tensor.size(0) != rows || tensor.size(1) != cols))
541 throw std::runtime_error(
"Invalid matrix dimensions");
543 else if (alloc && (tensor.size(0) != rows || tensor.size(1) != cols))
544 tensor = torch::zeros({rows, cols}, tensor.options());
546 std::string values = std::regex_replace(
547 node.text().get(), std::regex(
"[\t\r\n\a]+| +"),
" ");
549 auto [tensor_cpu, accessor] =
550 to_tensorAccessor<T, N>(tensor, torch::kCPU);
551 auto value = strtok(&values[0],
" ");
553 for (int64_t i = 0; i < rows; ++i)
554 for (int64_t j = 0; j < cols; ++j) {
555 if (value ==
nullptr)
556 throw std::runtime_error(
557 "XML object does not provide enough coefficients");
559 accessor[i][j] =
static_cast<T
>(std::stod(value));
560 value = strtok(
nullptr,
" ");
563 if (value !=
nullptr)
564 throw std::runtime_error(
"XML object provides too many coefficients");
566 if (tensor.device().type() != torch::kCPU)
567 tensor = std::move(tensor_cpu);
574 if (pugi::xml_node dims = node.child(
"Dimensions")) {
575 std::vector<int64_t> sizes;
577 std::string values = std::regex_replace(
578 dims.text().get(), std::regex(
"[\t\r\n\a]+| +"),
" ");
579 for (
auto value = strtok(&values[0],
" "); value !=
nullptr;
580 value = strtok(
nullptr,
" "))
581 sizes.push_back(
static_cast<std::size_t
>(std::stoi(value)));
583 if (!alloc && (tensor.sizes() != sizes))
584 throw std::runtime_error(
"Invalid tensor dimensions");
586 else if (alloc && (tensor.sizes() != sizes))
587 tensor = torch::zeros(torch::IntArrayRef{sizes}, tensor.options());
589 if (sizes.size() != N)
590 throw std::runtime_error(
"Invalid tensor dimensions");
593 if (pugi::xml_node data = node.child(
"Data")) {
594 std::string values = std::regex_replace(
595 data.text().get(), std::regex(
"[\t\r\n\a]+| +"),
" ");
597 auto [tensor_cpu, accessor] =
598 to_tensorAccessor<T, N>(tensor, torch::kCPU);
599 auto value = strtok(&values[0],
" ");
601 if constexpr (N == 1) {
602 for (int64_t i = 0; i < sizes[0]; ++i) {
603 if (value ==
nullptr)
604 throw std::runtime_error(
605 "XML object does not provide enough coefficients");
607 accessor[i] =
static_cast<T
>(std::stod(value));
608 value = strtok(
nullptr,
" ");
610 }
else if constexpr (N == 2) {
611 for (int64_t i = 0; i < sizes[0]; ++i)
612 for (int64_t j = 0; j < sizes[1]; ++j) {
613 if (value ==
nullptr)
614 throw std::runtime_error(
615 "XML object does not provide enough coefficients");
617 accessor[i][j] =
static_cast<T
>(std::stod(value));
618 value = strtok(
nullptr,
" ");
620 }
else if constexpr (N == 3) {
621 for (int64_t i = 0; i < sizes[0]; ++i)
622 for (int64_t j = 0; j < sizes[1]; ++j)
623 for (int64_t k = 0; k < sizes[2]; ++k) {
624 if (value ==
nullptr)
625 throw std::runtime_error(
626 "XML object does not provide enough coefficients");
628 accessor[i][j][k] =
static_cast<T
>(std::stod(value));
629 value = strtok(
nullptr,
" ");
631 }
else if constexpr (N == 4) {
632 for (int64_t i = 0; i < sizes[0]; ++i)
633 for (int64_t j = 0; j < sizes[1]; ++j)
634 for (int64_t k = 0; k < sizes[2]; ++k)
635 for (int64_t l = 0; l < sizes[3]; ++l) {
636 if (value ==
nullptr)
637 throw std::runtime_error(
638 "XML object does not provide enough coefficients");
640 accessor[i][j][k][l] =
static_cast<T
>(std::stod(value));
641 value = strtok(
nullptr,
" ");
643 }
else if constexpr (N == 5) {
644 for (int64_t i = 0; i < sizes[0]; ++i)
645 for (int64_t j = 0; j < sizes[1]; ++j)
646 for (int64_t k = 0; k < sizes[2]; ++k)
647 for (int64_t l = 0; l < sizes[3]; ++l)
648 for (int64_t m = 0; m < sizes[4]; ++m) {
649 if (value ==
nullptr)
650 throw std::runtime_error(
651 "XML object does not provide enough "
654 accessor[i][j][k][l][m] =
655 static_cast<T
>(std::stod(value));
656 value = strtok(
nullptr,
" ");
658 }
else if constexpr (N == 6) {
659 for (int64_t i = 0; i < sizes[0]; ++i)
660 for (int64_t j = 0; j < sizes[1]; ++j)
661 for (int64_t k = 0; k < sizes[2]; ++k)
662 for (int64_t l = 0; l < sizes[3]; ++l)
663 for (int64_t m = 0; m < sizes[4]; ++m)
664 for (int64_t n = 0; n < sizes[5]; ++n) {
665 if (value ==
nullptr)
666 throw std::runtime_error(
667 "XML object does not provide enough "
670 accessor[i][j][k][l][m][n] =
671 static_cast<T
>(std::stod(value));
672 value = strtok(
nullptr,
" ");
676 if (value !=
nullptr)
677 throw std::runtime_error(
678 "XML object provides too many coefficients");
680 if (tensor.device().type() != torch::kCPU)
681 tensor = std::move(tensor_cpu);
687 throw std::runtime_error(
688 "XML object does not provide a \"Dimensions\" tag");
696 throw std::runtime_error(
697 "XML object does not provide tag with given id, index, and/or label");
703template <
typename T, std::
size_t N, std::
size_t M>
706 std::string tag =
"Matrix",
int id = 0,
bool alloc =
true,
707 std::string label =
"") {
709 return from_xml<T, N>(doc.child(
"xml"), tensors, tag,
id, label, alloc);
713template <
typename T, std::
size_t N, std::
size_t M>
716 std::string tag =
"Matrix",
int id = 0,
bool alloc =
true,
717 std::string label =
"") {
719 for (std::size_t i = 0; i < M; ++i) {
720 from_xml<T, N>(root, tensors[i], tag,
id, label, alloc, i);
The Options class handles the automated determination of dtype from the template argument and the sel...
Definition options.hpp:104
Definition blocktensor.hpp:24
pugi::xml_document to_xml(const torch::TensorAccessor< T, N > &accessor, torch::IntArrayRef sizes, std::string tag="Matrix", int id=0, std::string label="", int index=-1)
Converts a torch::TensorAccessor object to an XML document object.
Definition serialize.hpp:325
auto to_json(const torch::TensorAccessor< T, N > &accessor)
Converts a torch::TensorAccessor object to a JSON object.
Definition serialize.hpp:42
torch::TensorAccessor< T, N > & from_xml(const pugi::xml_document &doc, torch::TensorAccessor< T, N > &accessor, torch::IntArrayRef sizes, std::string tag="Matrix", int id=0, std::string label="", int index=-1)
Converts an XML documentobject to a torch::TensorAccessor object.
Definition serialize.hpp:496
std::array< torch::Tensor, N > TensorArray
Definition tensorarray.hpp:26
struct iganet::@0 Log
Logger.
Serialization prototype.
Definition serialize.hpp:29
virtual void pretty_print(std::ostream &os=Log(log::info)) const =0
Returns a string representation of the object.
virtual nlohmann::json to_json() const =0
Returns the object as JSON object.
virtual ~Serializable()=default
Destructor.
TensorArray utility functions.