19#include <nlohmann/json.hpp>
22#include <torch/torch.h>
33 virtual nlohmann::json
to_json()
const = 0;
40template <
typename T, std::
size_t N>
41inline auto to_json(
const torch::TensorAccessor<T, N> &accessor) {
42 auto json = nlohmann::json::array();
44 if constexpr (N == 1) {
45 for (int64_t i = 0; i < accessor.size(0); ++i)
46 json.push_back(accessor[i]);
47 }
else if constexpr (N == 2) {
48 for (int64_t i = 0; i < accessor.size(0); ++i)
49 for (int64_t j = 0; j < accessor.size(1); ++j)
50 json.push_back(accessor[i][j]);
51 }
else if constexpr (N == 3) {
52 for (int64_t i = 0; i < accessor.size(0); ++i)
53 for (int64_t j = 0; j < accessor.size(1); ++j)
54 for (int64_t k = 0; k < accessor.size(2); ++k)
55 json.push_back(accessor[i][j][k]);
56 }
else if constexpr (N == 4) {
57 for (int64_t i = 0; i < accessor.size(0); ++i)
58 for (int64_t j = 0; j < accessor.size(1); ++j)
59 for (int64_t k = 0; k < accessor.size(2); ++k)
60 for (int64_t l = 0; l < accessor.size(3); ++l)
61 json.push_back(accessor[i][j][k][l]);
68template <
typename T, std::
size_t N>
69inline auto to_json(
const torch::Tensor &tensor) {
70 if (tensor.is_cuda()) {
71 auto [tensor_cpu, accessor] = to_tensorAccessor<T, N>(tensor, torch::kCPU);
74 auto accessor = to_tensorAccessor<T, N>(tensor);
81template <
typename T, std::
size_t N, std::
size_t M>
83 auto json = nlohmann::json::array();
85 for (std::size_t i = 0; i < M; ++i) {
86 if (tensors[i].is_cuda()) {
87 auto [tensor_cpu, accessor] =
88 to_tensorAccessor<T, N>(tensors[i], torch::kCPU);
89 json.push_back(to_json<T, N>(accessor));
91 auto accessor = to_tensorAccessor<T, N>(tensors[i]);
92 json.push_back(to_json<T, N>(accessor));
99#ifdef IGANET_WITH_GISMO
101template <
typename T,
int Rows,
int Cols,
int Options>
102inline auto to_json(
const gismo::gsMatrix<T, Rows, Cols, Options> &matrix,
103 bool flatten =
false,
bool transpose =
false) {
104 auto json = nlohmann::json::array();
106 if constexpr (
Options == gismo::RowMajor) {
109 for (std::size_t j = 0; j < matrix.cols(); ++j)
110 for (std::size_t i = 0; i < matrix.rows(); ++i)
111 json.push_back(matrix(i, j));
113 for (std::size_t i = 0; i < matrix.rows(); ++i)
114 for (std::size_t j = 0; j < matrix.cols(); ++j)
115 json.push_back(matrix(i, j));
119 for (std::size_t j = 0; j < matrix.cols(); ++j) {
120 auto data = nlohmann::json::array();
121 for (std::size_t i = 0; i < matrix.rows(); ++i) {
122 data.push_back(matrix(i, j));
124 json.emplace_back(data);
127 for (std::size_t i = 0; i < matrix.rows(); ++i) {
128 auto data = nlohmann::json::array();
129 for (std::size_t j = 0; j < matrix.cols(); ++j) {
130 data.push_back(matrix(i, j));
132 json.emplace_back(data);
137 }
else if constexpr (Options == gismo::ColMajor) {
140 for (std::size_t i = 0; i < matrix.rows(); ++i)
141 for (std::size_t j = 0; j < matrix.cols(); ++j)
142 json.push_back(matrix(i, j));
144 for (std::size_t j = 0; j < matrix.cols(); ++j)
145 for (std::size_t i = 0; i < matrix.rows(); ++i)
146 json.push_back(matrix(i, j));
150 for (std::size_t i = 0; i < matrix.rows(); ++i) {
151 auto data = nlohmann::json::array();
152 for (std::size_t j = 0; j < matrix.cols(); ++j) {
153 data.push_back(matrix(i, j));
155 json.emplace_back(data);
158 for (std::size_t j = 0; j < matrix.cols(); ++j) {
159 auto data = nlohmann::json::array();
160 for (std::size_t i = 0; i < matrix.rows(); ++i) {
161 data.push_back(matrix(i, j));
163 json.emplace_back(data);
169 throw std::runtime_error(
"Invalid matrix options");
175template <
typename T>
inline auto to_json(
const gismo::gsBSpline<T> &bspline) {
176 auto json = nlohmann::json();
178 json[
"degrees"] = nlohmann::json::array();
180 for (std::size_t i = 0; i < bspline.parDim(); ++i)
181 json[
"degrees"].push_back(bspline.degree(i));
183 json[
"geoDim"] = bspline.geoDim();
184 json[
"parDim"] = bspline.parDim();
186 json[
"ncoeffs"] = nlohmann::json::array();
187 for (std::size_t i = 0; i < bspline.parDim(); ++i)
188 json[
"ncoeffs"].push_back(bspline.basis().size(i));
190 json[
"coeffs"] =
to_json(bspline.coefs());
192 json[
"nknots"] = nlohmann::json::array();
193 for (std::size_t i = 0; i < bspline.parDim(); ++i)
194 json[
"nknots"].push_back(bspline.knots(i).size());
196 json[
"knots"] = nlohmann::json::array();
197 for (std::size_t i = 0; i < bspline.parDim(); ++i)
198 json[
"knots"].push_back(bspline.knots(i));
204template <
int d,
typename T>
205inline auto to_json(
const gismo::gsTensorBSpline<d, T> &bspline) {
206 auto json = nlohmann::json();
208 json[
"degrees"] = nlohmann::json::array();
210 for (std::size_t i = 0; i < bspline.parDim(); ++i)
211 json[
"degrees"].push_back(bspline.degree(i));
213 json[
"geoDim"] = bspline.geoDim();
214 json[
"parDim"] = bspline.parDim();
216 json[
"ncoeffs"] = nlohmann::json::array();
217 for (std::size_t i = 0; i < bspline.parDim(); ++i)
218 json[
"ncoeffs"].push_back(bspline.basis().size(i));
220 json[
"coeffs"] =
to_json(bspline.coefs());
222 json[
"nknots"] = nlohmann::json::array();
223 for (std::size_t i = 0; i < bspline.parDim(); ++i)
224 json[
"nknots"].push_back(bspline.knots(i).size());
226 json[
"knots"] = nlohmann::json::array();
227 for (std::size_t i = 0; i < bspline.parDim(); ++i)
228 json[
"knots"].push_back(bspline.knots(i));
235inline auto to_json(
const gismo::gsGeometry<T> &geometry) {
237 if (
auto patch =
dynamic_cast<const gismo::gsBSpline<T> *
>(&geometry))
239 else if (
auto patch =
dynamic_cast<const gismo::gsTensorBSpline<2, T> *
>(&geometry))
241 else if (
auto patch =
dynamic_cast<const gismo::gsTensorBSpline<3, T> *
>(&geometry))
243 else if (
auto patch =
dynamic_cast<const gismo::gsTensorBSpline<4, T> *
>(&geometry))
246 return nlohmann::json(
"{ Invalid patch type }");
251inline auto to_json(
const typename gismo::gsMultiPatch<T>::ifContainer &interfaces) {
253 auto json = nlohmann::json::array();
255 for (
auto const &interface : interfaces) {
256 auto interface_json = nlohmann::json();
258 interface_json[
"patches"] = { interface.first().patchIndex(), interface.second().patchIndex() };
259 interface_json[
"sides"] = { interface.first().side().index(), interface.second().side().index() };
260 interface_json[
"direction"] =
"NOT IMPLEMENTED YET";
261 interface_json[
"orientation"] =
"NOT IMPLEMENTED YET";
263 json.push_back(interface_json);
271inline auto to_json(
const typename gismo::gsMultiPatch<T>::bContainer &boundaries) {
273 auto json = nlohmann::json::array();
275 for (
auto const &
boundary : boundaries) {
276 auto boundary_json = nlohmann::json();
278 boundary_json[
"patch"] =
boundary.patchIndex();
279 boundary_json[
"side"] =
boundary.side().index();
281 json.push_back(boundary_json);
288 template <
typename T>
inline auto to_json(
const gismo::gsMultiPatch<T> &mp,
bool verbose=
false) {
290 auto json = nlohmann::json();
293 auto patches_json = nlohmann::json::array();
294 for (std::size_t i = 0; i < mp.nPatches(); ++i)
295 patches_json.push_back(i);
297 json[
"patches"] = patches_json;
298 json[
"interfaces"] = to_json<T>(mp.interfaces());
299 json[
"boundaries"] = to_json<T>(mp.boundaries());
302 auto patches_json = nlohmann::json::array();
304 for (std::size_t i = 0; i < mp.nPatches(); ++i)
305 patches_json.push_back(
to_json(mp.patch(i)));
307 json[
"patches"] = patches_json;
315template <
typename T, std::
size_t N>
316inline pugi::xml_document
to_xml(
const torch::TensorAccessor<T, N> &accessor,
317 torch::IntArrayRef sizes,
318 std::string tag =
"Matrix",
int id = 0,
319 std::string label =
"",
int index = -1) {
320 pugi::xml_document doc;
321 pugi::xml_node root = doc.append_child(
"xml");
322 to_xml(accessor, sizes, root,
id, label, index);
328template <
typename T, std::
size_t N>
329inline pugi::xml_node &
to_xml(
const torch::TensorAccessor<T, N> &accessor,
330 torch::IntArrayRef sizes, pugi::xml_node &root,
331 std::string tag =
"Matrix",
int id = 0,
332 std::string label =
"",
int index = -1) {
335 pugi::xml_node node = root.append_child(tag.c_str());
338 node.append_attribute(
"id") = id;
341 node.append_attribute(
"index") = index;
344 node.append_attribute(
"label") = label.c_str();
347 if (tag ==
"Matrix") {
348 if constexpr (N == 1) {
349 node.append_attribute(
"rows") = sizes[0];
350 node.append_attribute(
"cols") = 1;
352 std::stringstream ss;
353 for (std::size_t i = 0; i < sizes[0]; ++i)
354 ss << std::to_string(accessor[i]) << (i < sizes[0] - 1 ?
" " :
"");
355 node.append_child(pugi::node_pcdata).set_value(ss.str().c_str());
356 }
else if constexpr (N == 2) {
357 node.append_attribute(
"rows") = sizes[0];
358 node.append_attribute(
"cols") = sizes[1];
360 std::stringstream ss;
361 for (std::size_t i = 0; i < sizes[0]; ++i)
362 for (std::size_t j = 0; j < sizes[1]; ++j)
363 ss << std::to_string(accessor[i][j])
364 << (j < sizes[1] - 1 ?
" " : (i < sizes[0] - 1 ?
" " :
""));
365 node.append_child(pugi::node_pcdata).set_value(ss.str().c_str());
367 throw std::runtime_error(
368 "Tag \"Matrix\" only supports 1- and 2-dimensional tensors");
370 std::stringstream ss;
371 for (
const auto &size : sizes)
372 ss << std::to_string(size) <<
" ";
374 pugi::xml_node dims = node.append_child(
"Dimensions");
375 dims.append_child(pugi::node_pcdata).set_value(ss.str().c_str());
378 if constexpr (N == 1) {
379 for (std::size_t i = 0; i < sizes[0]; ++i)
380 ss << std::to_string(accessor[i]) <<
" ";
381 }
else if constexpr (N == 2) {
382 for (std::size_t i = 0; i < sizes[0]; ++i)
383 for (std::size_t j = 0; j < sizes[1]; ++j)
384 ss << std::to_string(accessor[i][j]) <<
" ";
385 }
else if constexpr (N == 3) {
386 for (std::size_t i = 0; i < sizes[0]; ++i)
387 for (std::size_t j = 0; j < sizes[1]; ++j)
388 for (std::size_t k = 0; j < sizes[2]; ++k)
389 ss << std::to_string(accessor[i][j][k]) <<
" ";
390 }
else if constexpr (N == 4) {
391 for (std::size_t i = 0; i < sizes[0]; ++i)
392 for (std::size_t j = 0; j < sizes[1]; ++j)
393 for (std::size_t k = 0; k < sizes[2]; ++k)
394 for (std::size_t l = 0; l < sizes[3]; ++l)
395 ss << std::to_string(accessor[i][j][k][l]) <<
" ";
397 }
else if constexpr (N == 5) {
398 for (std::size_t i = 0; i < sizes[0]; ++i)
399 for (std::size_t j = 0; j < sizes[1]; ++j)
400 for (std::size_t k = 0; k < sizes[2]; ++k)
401 for (std::size_t l = 0; l < sizes[3]; ++l)
402 for (std::size_t m = 0; m < sizes[4]; ++m)
403 ss << std::to_string(accessor[i][j][k][l][m]) <<
" ";
404 }
else if constexpr (N == 6) {
405 for (std::size_t i = 0; i < sizes[0]; ++i)
406 for (std::size_t j = 0; j < sizes[1]; ++j)
407 for (std::size_t k = 0; k < sizes[2]; ++k)
408 for (std::size_t l = 0; l < sizes[3]; ++l)
409 for (std::size_t m = 0; m < sizes[4]; ++m)
410 for (std::size_t n = 0; n < sizes[5]; ++n)
411 ss << std::to_string(accessor[i][j][k][l][m][n]) <<
" ";
413 throw std::runtime_error(
414 "Dimensions higher than 4 are not implemented yet");
416 pugi::xml_node data = node.append_child(
"Data");
417 data.append_child(pugi::node_pcdata).set_value(ss.str().c_str());
424template <
typename T, std::
size_t N>
425inline pugi::xml_document
to_xml(
const torch::Tensor &tensor,
426 std::string tag =
"Matrix",
int id = 0,
427 std::string label =
"",
int index = -1) {
428 pugi::xml_document doc;
429 pugi::xml_node root = doc.append_child(
"xml");
430 to_xml<T, N>(tensor, root,
id, label, index);
436template <
typename T, std::
size_t N>
437inline pugi::xml_node &
to_xml(
const torch::Tensor &tensor, pugi::xml_node &root,
438 std::string tag =
"Matrix",
int id = 0,
439 std::string label =
"",
int index = -1) {
441 if (tensor.is_cuda()) {
442 auto [tensor_cpu, accessor] = to_tensorAccessor<T, N>(tensor, torch::kCPU);
443 return to_xml(accessor, tensor.sizes(), root, tag,
id, label, index);
445 auto accessor = to_tensorAccessor<T, N>(tensor);
446 return to_xml(accessor, tensor.sizes(), root, tag,
id, label, index);
452template <
typename T, std::
size_t N, std::
size_t M>
454 std::string tag =
"Matrix",
int id = 0,
455 std::string label =
"",
int index = -1) {
456 pugi::xml_document doc;
457 pugi::xml_node root = doc.append_child(
"xml");
458 to_xml<T, N>(tensors, root,
id, label, index);
465template <
typename T, std::
size_t N, std::
size_t M>
467 pugi::xml_node &root, std::string tag =
"Matrix",
468 int id = 0, std::string label =
"") {
470 for (std::size_t i = 0; i < M; ++i) {
471 if (tensors[i].is_cuda()) {
472 auto [tensor_cpu, accessor] =
473 to_tensorAccessor<T, N>(tensors[i], torch::kCPU);
474 to_xml(accessor, tensors[i].sizes(), root, tag,
id, label, i);
476 auto accessor = to_tensorAccessor<T, N>(tensors[i]);
477 to_xml(accessor, tensors[i].sizes(), root, tag,
id, label, i);
485template <
typename T, std::
size_t N>
486inline torch::TensorAccessor<T, N> &
487from_xml(
const pugi::xml_document &doc, torch::TensorAccessor<T, N> &accessor,
488 torch::IntArrayRef sizes, std::string tag =
"Matrix",
int id = 0,
489 std::string label =
"",
int index = -1) {
490 return from_xml(doc.child(
"xml"), accessor, sizes, tag,
id, label, index);
494template <
typename T, std::
size_t N>
495inline torch::TensorAccessor<T, N> &
496from_xml(
const pugi::xml_node &root, torch::TensorAccessor<T, N> &accessor,
497 torch::IntArrayRef sizes, std::string tag =
"Matrix",
int id = 0,
498 std::string label =
"",
int index = -1) {
504template <
typename T, std::
size_t N>
505inline torch::Tensor &
506from_xml(
const pugi::xml_document &doc, torch::Tensor &tensor,
507 std::string tag =
"Matrix",
int id = 0, std::string label =
"",
508 bool alloc =
true,
int index = -1) {
509 return from_xml<T, N>(doc.child(
"xml"), tensor, tag,
id, label, index);
513template <
typename T, std::
size_t N>
514inline torch::Tensor &
515from_xml(
const pugi::xml_node &root, torch::Tensor &tensor,
516 std::string tag =
"Matrix",
int id = 0, std::string label =
"",
517 bool alloc =
true,
int index = -1) {
520 for (pugi::xml_node node : root.children(tag.c_str())) {
522 if ((
id >= 0 ? node.attribute(
"id").as_int() ==
id :
true) &&
523 (index >= 0 ? node.attribute(
"index").as_int() == index :
true) &&
524 (!label.empty() ? node.attribute(
"label").value() == label :
true)) {
526 if (tag ==
"Matrix") {
528 int64_t rows = node.attribute(
"rows").as_int();
529 int64_t cols = node.attribute(
"cols").as_int();
531 if (!alloc && (tensor.size(0) != rows || tensor.size(1) != cols))
532 throw std::runtime_error(
"Invalid matrix dimensions");
534 else if (alloc && (tensor.size(0) != rows || tensor.size(1) != cols))
535 tensor = torch::zeros({rows, cols}, tensor.options());
537 std::string values = std::regex_replace(
538 node.text().get(), std::regex(
"[\t\r\n\a]+| +"),
" ");
540 auto [tensor_cpu, accessor] =
541 to_tensorAccessor<T, N>(tensor, torch::kCPU);
542 auto value = strtok(&values[0],
" ");
544 for (int64_t i = 0; i < rows; ++i)
545 for (int64_t j = 0; j < cols; ++j) {
547 throw std::runtime_error(
548 "XML object does not provide enough coefficients");
550 accessor[i][j] =
static_cast<T
>(std::stod(value));
551 value = strtok(NULL,
" ");
555 throw std::runtime_error(
"XML object provides too many coefficients");
557 if (tensor.device().type() != torch::kCPU)
558 tensor = std::move(tensor_cpu);
564 std::vector<int64_t> sizes;
567 if (pugi::xml_node dims = node.child(
"Dimensions")) {
569 std::string values = std::regex_replace(
570 dims.text().get(), std::regex(
"[\t\r\n\a]+| +"),
" ");
571 for (
auto value = strtok(&values[0],
" "); value != NULL;
572 value = strtok(NULL,
" "))
573 sizes.push_back(
static_cast<std::size_t
>(std::stoi(value)));
575 if (!alloc && (tensor.sizes() != sizes))
576 throw std::runtime_error(
"Invalid tensor dimensions");
578 else if (alloc && (tensor.sizes() != sizes))
579 tensor = torch::zeros(torch::IntArrayRef{sizes}, tensor.options());
581 if (sizes.size() != N)
582 throw std::runtime_error(
"Invalid tensor dimensions");
585 if (pugi::xml_node data = node.child(
"Data")) {
586 std::string values = std::regex_replace(
587 data.text().get(), std::regex(
"[\t\r\n\a]+| +"),
" ");
589 auto [tensor_cpu, accessor] =
590 to_tensorAccessor<T, N>(tensor, torch::kCPU);
591 auto value = strtok(&values[0],
" ");
593 if constexpr (N == 1) {
594 for (int64_t i = 0; i < sizes[0]; ++i) {
596 throw std::runtime_error(
597 "XML object does not provide enough coefficients");
599 accessor[i] =
static_cast<T
>(std::stod(value));
600 value = strtok(NULL,
" ");
602 }
else if constexpr (N == 2) {
603 for (int64_t i = 0; i < sizes[0]; ++i)
604 for (int64_t j = 0; j < sizes[1]; ++j) {
606 throw std::runtime_error(
607 "XML object does not provide enough coefficients");
609 accessor[i][j] =
static_cast<T
>(std::stod(value));
610 value = strtok(NULL,
" ");
612 }
else if constexpr (N == 3) {
613 for (int64_t i = 0; i < sizes[0]; ++i)
614 for (int64_t j = 0; j < sizes[1]; ++j)
615 for (int64_t k = 0; k < sizes[2]; ++k) {
617 throw std::runtime_error(
618 "XML object does not provide enough coefficients");
620 accessor[i][j][k] =
static_cast<T
>(std::stod(value));
621 value = strtok(NULL,
" ");
623 }
else if constexpr (N == 4) {
624 for (int64_t i = 0; i < sizes[0]; ++i)
625 for (int64_t j = 0; j < sizes[1]; ++j)
626 for (int64_t k = 0; k < sizes[2]; ++k)
627 for (int64_t l = 0; l < sizes[3]; ++l) {
629 throw std::runtime_error(
630 "XML object does not provide enough coefficients");
632 accessor[i][j][k][l] =
static_cast<T
>(std::stod(value));
633 value = strtok(NULL,
" ");
635 }
else if constexpr (N == 5) {
636 for (int64_t i = 0; i < sizes[0]; ++i)
637 for (int64_t j = 0; j < sizes[1]; ++j)
638 for (int64_t k = 0; k < sizes[2]; ++k)
639 for (int64_t l = 0; l < sizes[3]; ++l)
640 for (int64_t m = 0; m < sizes[4]; ++m) {
642 throw std::runtime_error(
643 "XML object does not provide enough "
646 accessor[i][j][k][l][m] =
647 static_cast<T
>(std::stod(value));
648 value = strtok(NULL,
" ");
650 }
else if constexpr (N == 6) {
651 for (int64_t i = 0; i < sizes[0]; ++i)
652 for (int64_t j = 0; j < sizes[1]; ++j)
653 for (int64_t k = 0; k < sizes[2]; ++k)
654 for (int64_t l = 0; l < sizes[3]; ++l)
655 for (int64_t m = 0; m < sizes[4]; ++m)
656 for (int64_t n = 0; n < sizes[5]; ++n) {
658 throw std::runtime_error(
659 "XML object does not provide enough "
662 accessor[i][j][k][l][m][n] =
663 static_cast<T
>(std::stod(value));
664 value = strtok(NULL,
" ");
669 throw std::runtime_error(
670 "XML object provides too many coefficients");
672 if (tensor.device().type() != torch::kCPU)
673 tensor = std::move(tensor_cpu);
679 throw std::runtime_error(
680 "XML object does not provide a \"Dimensions\" tag");
688 throw std::runtime_error(
689 "XML object does not provide tag with given id, index, and/or label");
695template <
typename T, std::
size_t N, std::
size_t M>
698 std::string tag =
"Matrix",
int id = 0,
bool alloc =
true,
699 std::string label =
"") {
701 return from_xml<T, N>(doc.child(
"xml"), tensors, tag,
id, label, alloc);
705template <
typename T, std::
size_t N, std::
size_t M>
708 std::string tag =
"Matrix",
int id = 0,
bool alloc =
true,
709 std::string label =
"") {
711 for (std::size_t i = 0; i < M; ++i) {
712 from_xml<T, N>(root, tensors[i], tag,
id, label, alloc, i);
The Options class handles the automated determination of dtype from the template argument and the sel...
Definition options.hpp:107
pugi::xml_document to_xml(const torch::TensorAccessor< T, N > &accessor, torch::IntArrayRef sizes, std::string tag="Matrix", int id=0, std::string label="", int index=-1)
Converts a torch::TensorAccessor object to an XML document object.
Definition serialize.hpp:316
auto to_json(const torch::TensorAccessor< T, N > &accessor)
Converts a torch::TensorAccessor object to a JSON object.
Definition serialize.hpp:41
torch::TensorAccessor< T, N > & from_xml(const pugi::xml_document &doc, torch::TensorAccessor< T, N > &accessor, torch::IntArrayRef sizes, std::string tag="Matrix", int id=0, std::string label="", int index=-1)
Converts an XML documentobject to a torch::TensorAccessor object.
Definition serialize.hpp:487
std::array< torch::Tensor, N > TensorArray
Definition tensorarray.hpp:28
Definition boundary.hpp:22
struct iganet::@0 Log
Logger.
Serialization prototype.
Definition serialize.hpp:31
virtual void pretty_print(std::ostream &os=Log(log::info)) const =0
Returns a string representation of the object.
virtual nlohmann::json to_json() const =0
Returns the object as JSON object.
TensorArray utility functions.