svMultiPhysics
Loading...
Searching...
No Matches
Tensor4.h
1// SPDX-FileCopyrightText: Copyright (c) Stanford University, The Regents of the University of California, and others.
2// SPDX-License-Identifier: BSD-3-Clause
3
4#ifndef TENSOR4_H
5#define TENSOR4_H
6
7#include <cstring>
8#include <iostream>
9
10#include "Vector.h"
11
12#ifdef ENABLE_ARRAY_INDEX_CHECKING
13#define Tensor4_check_enabled
14#endif
15
16/// @brief The Tensor4 template class implements a simple interface to 4th order tensors.
17//
18template<typename T>
20{
21 public:
22 std::string name_ = "";
23 int ni_ = 0;
24 int nj_ = 0;
25 int nk_ = 0;
26 int nl_ = 0;
27 int p1_ = 0;
28 int p2_ = 0;
29 int size_ = 0;
30 T *data_ = nullptr;
31
32 Tensor4()
33 {
34 std::string name_ = "";
35 int ni_ = 0;
36 int nj_ = 0;
37 int nk_ = 0;
38 int nl_ = 0;
39 int p1_ = 0;
40 int p2_ = 0;
41 int size_ = 0;
42 T *data_ = nullptr;
43 };
44
45 Tensor4(const int num_i, const int num_j, const int num_k, const int num_l)
46 {
47 allocate(num_i, num_j, num_k, num_l);
48 }
49
50 ~Tensor4()
51 {
52 //std::cout << "- - - - - Tensor4 dtor - - - - - " << std::endl;
53 if (data_ != nullptr) {
54 //std::cout << "[Tensor4 dtor] delete[] data: " << data_ << std::endl;
55 delete [] data_;
56 data_ = nullptr;
57 }
58 }
59
60 // Copy
61 Tensor4(const Tensor4& rhs)
62 {
63 if (rhs.ni_ <= 0 || rhs.nj_ <= 0 || rhs.nk_ <= 0 || rhs.nl_ <= 0) {
64 return;
65 }
66 allocate(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
67 memcpy(data_, rhs.data_, size_*sizeof(T));
68 }
69
70 int num_i() { return ni_; }
71 int num_j() { return nj_; }
72 int num_k() { return nk_; }
73 int num_l() { return nl_; }
74
75 // Tensor4 assignment.
76 //
77 Tensor4& operator=(const Tensor4& rhs)
78 {
79 if (this == &rhs) {
80 return *this;
81 }
82
83 if (rhs.ni_ <= 0 || rhs.nj_ <= 0 || rhs.nk_ <= 0 || rhs.nl_ <= 0) {
84 return *this;
85 }
86
87 if (ni_ != rhs.ni_ || nj_ != rhs.nj_ || nk_ != rhs.nk_ || nl_ != rhs.nl_ <= 0) {
88 clear();
89 //delete [] data_;
90 //data_ = nullptr;
91 allocate(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
92 }
93
94 memcpy(data_, rhs.data_, sizeof(T) * size_);
95 return *this;
96 }
97
98 Tensor4& operator=(const double value)
99 {
100 for (int i = 0; i < size_; i++) {
101 data_[i] = value;
102 }
103 return *this;
104 }
105
106 void write(const std::string& label, bool memory=true, T offset={}) const
107 {
108 int n = 0;
109 char file_prefix[1000];
110 for (auto c : label) {
111 if (c == '[') {
112 continue;
113 }
114 if (c == ']') {
115 file_prefix[n] = '_';
116 n += 1;
117 continue;
118 }
119 if (c == ' ') {
120 continue;
121 }
122 if (c == ':') {
123 c = '_';
124 }
125 file_prefix[n] = c;
126 n += 1;
127 }
128 file_prefix[n] = '\0';
129
130 FILE* fp;
131 std::string file_name;
132
133 // Write binary file.
134 file_name = std::string(file_prefix) + "_cm.bin";
135 std::cout << "[Tensor4::write] file_name: " << file_name << std::endl;
136 fp = fopen(file_name.c_str(), "wb");
137 fwrite(&size_, sizeof(int), 1, fp);
138 fwrite(data_, sizeof(double), size_, fp);
139 fclose(fp);
140 }
141
142 friend std::ostream& operator << (std::ostream& out, const Tensor4<T>& lhs)
143 {
144 for (int i = 0; i < lhs.size_; i++) {
145 out << lhs.data_[i];
146 if (i != lhs.size_-1) {
147 out << ", ";
148 }
149 }
150 return out;
151 }
152
153 //----------
154 // allocate
155 //----------
156 //
157 void allocate(const int num_i, const int num_j, const int num_k, const int num_l)
158 {
159 //std::cout << "----- Tensor4::allocate -----" << std::endl;
160 ni_ = num_i;
161 nj_ = num_j;
162 nk_ = num_k;
163 nl_ = num_l;
164 p1_ = num_i * num_j;
165 p2_ = p1_ * num_l;
166 size_ = ni_ * nj_ * nk_ * nl_;
167 data_ = new T [size_];
168 memset(data_, 0, sizeof(T)*size_);
169 //std::cout << "[Tensor4::allocate] data_: " << data_ << std::endl;
170 }
171
172 //-------------
173 // check_index
174 //-------------
175 //
176 void check_index(const int i, const int j, const int k, const int l) const
177 {
178 if (data_ == nullptr) {
179 throw std::runtime_error(name_+"Accessing null data in Tensor4.");
180 }
181 if ((i < 0) || (i >= ni_) || (j < 0) || (j >= nj_) || (k < 0) || (k >= nk_) || (l < 0) || (l >= nl_)) {
182 auto i_str = std::to_string(ni_);
183 auto j_str = std::to_string(nj_);
184 auto k_str = std::to_string(nk_);
185 auto l_str = std::to_string(nl_);
186 auto dims = i_str + " x " + j_str + " x " + k_str + " x " + l_str;
187 auto index_str = " " + std::to_string(i) + "," + std::to_string(j) + "," + std::to_string(k) + "," + std::to_string(l);
188 throw std::runtime_error("Index (i,j,k,l)=" + index_str + " is out of bounds for " + dims + " array.");
189 }
190 }
191
192 //-------
193 // clear
194 //-------
195 // Free the array data.
196 //
197 // This is to replicate the Fortran DEALLOCATE().
198 //
199 void clear()
200 {
201 //std::cout << "----- Tensor4::erase -----" << std::endl;
202 if (data_ != nullptr) {
203 //std::cout << "[Tensor4::erase] data_: " << data_ << std::endl;
204 delete [] data_;
205 }
206
207 ni_ = 0;
208 nj_ = 0;
209 nk_ = 0;
210 nl_ = 0;
211 p1_ = 0;
212 p2_ = 0;
213 size_ = 0;
214 data_ = nullptr;
215 }
216
217 //--------
218 // resize
219 //--------
220 // Resize the array.
221 //
222 void resize(const int num_i, const int num_j, const int num_k, const int num_l)
223 {
224 if (data_ != nullptr) {
225 //std::cout << "[Tensor4::resize] data_: " << data_ << std::endl;
226 delete [] data_;
227 data_ = nullptr;
228 }
229
230 allocate(num_i, num_j, num_k, num_l);
231 }
232
233 /////////////////////////
234 // O p e r a t o r s //
235 /////////////////////////
236
237 const T& operator()(const int i, const int j, const int k, const int l) const
238 {
239 #ifdef Tensor4_check_enabled
240 check_index(i, j, k , l);
241 #endif
242 return data_[i + j*ni_ + p1_*k + p2_*l ];
243 }
244
245 T& operator()(const int i, const int j, const int k, const int l)
246 {
247 #ifdef Tensor4_check_enabled
248 check_index(i, j, k , l);
249 #endif
250 return data_[i + j*ni_ + p1_*k + p2_*l ];
251 }
252
253 ///////////////////////////////////////////////////
254 // M a t h e m a t i c a l o p e r a t o r s //
255 ///////////////////////////////////////////////////
256
257 // Multiply by a scalar.
258 //
259 Tensor4<T> operator*(const T value) const
260 {
261 Tensor4<T> result(ni_, nj_, nk_, nl_);
262 for (int i = 0; i < size_; i++) {
263 result.data_[i] = value * data_[i];
264 }
265 return result;
266 }
267
268 friend const Tensor4<T> operator * (const T value, const Tensor4& rhs)
269 {
270 if (rhs.data_ == nullptr) {
271 throw std::runtime_error("Null data for rhs Tensor4.");
272 }
273 Tensor4<T> result(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
274 for (int i = 0; i < rhs.size_; i++) {
275 result.data_[i] = value * rhs.data_[i];
276 }
277 return result;
278 }
279
280 // Compound add assignment.
281 //
282 Tensor4<T> operator+=(const Tensor4<T>& rhs) const
283 {
284 for (int i = 0; i < size_; i++) {
285 data_[i] += rhs.data_[i];
286 }
287 return *this;
288 }
289
290 // Compound subtract assignment.
291 //
292 Tensor4<T> operator-=(const Tensor4<T>& rhs) const
293 {
294 for (int i = 0; i < size_; i++) {
295 data_[i] -= rhs.data_[i];
296 }
297 return *this;
298 }
299
300 // Add and subtract arrays.
301 //
302 Tensor4<T> operator+(const Tensor4<T>& rhs) const
303 {
304 Tensor4<T> result(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
305 for (int i = 0; i < size_; i++) {
306 result.data_[i] = data_[i] + rhs.data_[i];
307 }
308 return result;
309 }
310
311 Tensor4<T> operator-(const Tensor4<T>& rhs) const
312 {
313 Tensor4<T> result(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
314 for (int i = 0; i < size_; i++) {
315 result.data_[i] = data_[i] - rhs.data_[i];
316 }
317 return result;
318 }
319
320};
321
322#endif
323
The Tensor4 template class implements a simple interface to 4th order tensors.
Definition Tensor4.h:20