Reference documentation for deal.II version Git c92c73f660 2021-06-12 09:30:03 +0200
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
symmetric_tensor.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_symmetric_tensor_h
17 #define dealii_symmetric_tensor_h
18 
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/numbers.h>
25 #include <deal.II/base/tensor.h>
26 
27 #include <algorithm>
28 #include <array>
29 #include <functional>
30 
32 
33 // Forward declaration
34 #ifndef DOXYGEN
35 template <int rank, int dim, typename Number = double>
36 class SymmetricTensor;
37 #endif
38 
39 template <int dim, typename Number>
42 
43 template <int dim, typename Number>
46 
47 template <int dim, typename Number>
50 
51 template <int dim, typename Number>
54 
55 template <int dim, typename Number>
58 
59 template <int dim2, typename Number>
60 constexpr inline DEAL_II_ALWAYS_INLINE Number
62 
63 template <int dim, typename Number>
66 
67 template <int dim, typename Number>
68 constexpr inline DEAL_II_ALWAYS_INLINE Number
70 
71 
72 
73 namespace internal
74 {
75  // Workaround: The following 4 overloads are necessary to be able to
76  // compile the library with Apple Clang 8 and older. We should remove
77  // these overloads again when we bump the minimal required version to
78  // something later than clang-3.6 / Apple Clang 6.3.
79  template <int rank, int dim, typename T, typename U>
80  struct ProductTypeImpl<SymmetricTensor<rank, dim, T>, std::complex<U>>
81  {
82  using type =
83  SymmetricTensor<rank,
84  dim,
85  std::complex<typename ProductType<T, U>::type>>;
86  };
87 
88  template <int rank, int dim, typename T, typename U>
89  struct ProductTypeImpl<SymmetricTensor<rank, dim, std::complex<T>>,
90  std::complex<U>>
91  {
92  using type =
93  SymmetricTensor<rank,
94  dim,
95  std::complex<typename ProductType<T, U>::type>>;
96  };
97 
98  template <typename T, int rank, int dim, typename U>
99  struct ProductTypeImpl<std::complex<T>, SymmetricTensor<rank, dim, U>>
100  {
101  using type =
102  SymmetricTensor<rank,
103  dim,
104  std::complex<typename ProductType<T, U>::type>>;
105  };
106 
107  template <int rank, int dim, typename T, typename U>
108  struct ProductTypeImpl<std::complex<T>,
109  SymmetricTensor<rank, dim, std::complex<U>>>
110  {
111  using type =
112  SymmetricTensor<rank,
113  dim,
114  std::complex<typename ProductType<T, U>::type>>;
115  };
116  // end workaround
117 
122  namespace SymmetricTensorImplementation
123  {
128  template <int rank, int dim, typename Number>
129  struct Inverse;
130  } // namespace SymmetricTensorImplementation
131 
136  namespace SymmetricTensorAccessors
137  {
145  merge(const TableIndices<2> &previous_indices,
146  const unsigned int new_index,
147  const unsigned int position)
148  {
149  AssertIndexRange(position, 2);
150 
151  if (position == 0)
152  return {new_index, numbers::invalid_unsigned_int};
153  else
154  return {previous_indices[0], new_index};
155  }
156 
157 
158 
166  merge(const TableIndices<4> &previous_indices,
167  const unsigned int new_index,
168  const unsigned int position)
169  {
170  AssertIndexRange(position, 4);
171 
172  switch (position)
173  {
174  case 0:
175  return {new_index,
178  numbers::invalid_unsigned_int};
179  case 1:
180  return {previous_indices[0],
181  new_index,
183  numbers::invalid_unsigned_int};
184  case 2:
185  return {previous_indices[0],
186  previous_indices[1],
187  new_index,
188  numbers::invalid_unsigned_int};
189  case 3:
190  return {previous_indices[0],
191  previous_indices[1],
192  previous_indices[2],
193  new_index};
194  default:
195  Assert(false, ExcInternalError());
196  return {};
197  }
198  }
199 
200 
207  template <int rank1,
208  int rank2,
209  int dim,
210  typename Number,
211  typename OtherNumber = Number>
213  {
215  using type =
216  ::SymmetricTensor<rank1 + rank2 - 4, dim, value_type>;
217  };
218 
219 
226  template <int dim, typename Number, typename OtherNumber>
227  struct double_contraction_result<2, 2, dim, Number, OtherNumber>
228  {
230  };
231 
232 
233 
246  template <int rank, int dim, typename Number>
247  struct StorageType;
248 
252  template <int dim, typename Number>
253  struct StorageType<2, dim, Number>
254  {
259  static const unsigned int n_independent_components =
260  (dim * dim + dim) / 2;
261 
266  };
267 
268 
269 
273  template <int dim, typename Number>
274  struct StorageType<4, dim, Number>
275  {
281  static const unsigned int n_rank2_components = (dim * dim + dim) / 2;
282 
286  static const unsigned int n_independent_components =
287  (n_rank2_components *
289 
297  };
298 
299 
300 
305  template <int rank, int dim, bool constness, typename Number>
307 
314  template <int rank, int dim, typename Number>
315  struct AccessorTypes<rank, dim, true, Number>
316  {
317  using tensor_type = const ::SymmetricTensor<rank, dim, Number>;
318 
319  using reference = Number;
320  };
321 
328  template <int rank, int dim, typename Number>
329  struct AccessorTypes<rank, dim, false, Number>
330  {
332 
333  using reference = Number &;
334  };
335 
336 
369  template <int rank, int dim, bool constness, int P, typename Number>
370  class Accessor
371  {
372  public:
376  using reference =
378  using tensor_type =
380 
381  private:
400  constexpr Accessor(tensor_type & tensor,
401  const TableIndices<rank> &previous_indices);
402 
406  constexpr DEAL_II_ALWAYS_INLINE
407  Accessor(const Accessor &) = default;
408 
409  public:
413  constexpr Accessor<rank, dim, constness, P - 1, Number>
414  operator[](const unsigned int i);
415 
419  constexpr Accessor<rank, dim, constness, P - 1, Number>
420  operator[](const unsigned int i) const;
421 
422  private:
428 
429  // Declare some other classes as friends. Make sure to work around bugs
430  // in some compilers:
431  template <int, int, typename>
432  friend class ::SymmetricTensor;
433  template <int, int, bool, int, typename>
434  friend class Accessor;
435  friend class ::SymmetricTensor<rank, dim, Number>;
436  friend class Accessor<rank, dim, constness, P + 1, Number>;
437  };
438 
439 
440 
448  template <int rank, int dim, bool constness, typename Number>
449  class Accessor<rank, dim, constness, 1, Number>
450  {
451  public:
455  using reference =
457  using tensor_type =
459 
460  private:
482  constexpr Accessor(tensor_type & tensor,
483  const TableIndices<rank> &previous_indices);
484 
488  constexpr DEAL_II_ALWAYS_INLINE
489  Accessor(const Accessor &) = default;
490 
491  public:
495  constexpr reference operator[](const unsigned int);
496 
500  constexpr reference operator[](const unsigned int) const;
501 
502  private:
508 
509  // Declare some other classes as friends. Make sure to work around bugs
510  // in some compilers:
511  template <int, int, typename>
512  friend class ::SymmetricTensor;
513  template <int, int, bool, int, typename>
515  friend class ::SymmetricTensor<rank, dim, Number>;
516  friend class SymmetricTensorAccessors::
517  Accessor<rank, dim, constness, 2, Number>;
518  };
519  } // namespace SymmetricTensorAccessors
520 } // namespace internal
521 
522 
523 
596 template <int rank_, int dim, typename Number>
598 {
599 public:
600  static_assert(rank_ % 2 == 0, "A SymmetricTensor must have even rank!");
601 
610  static const unsigned int dimension = dim;
611 
615  static const unsigned int rank = rank_;
616 
622  static constexpr unsigned int n_independent_components =
624  n_independent_components;
625 
629  constexpr DEAL_II_ALWAYS_INLINE
630  SymmetricTensor() = default;
631 
645  template <typename OtherNumber>
646  explicit SymmetricTensor(const Tensor<2, dim, OtherNumber> &t);
647 
663  constexpr SymmetricTensor(const Number (&array)[n_independent_components]);
664 
670  template <typename OtherNumber>
671  constexpr explicit SymmetricTensor(
672  const SymmetricTensor<rank_, dim, OtherNumber> &initializer);
673 
677  Number *
678  begin_raw();
679 
683  const Number *
684  begin_raw() const;
685 
689  Number *
690  end_raw();
691 
696  const Number *
697  end_raw() const;
698 
705  template <typename OtherNumber>
706  constexpr SymmetricTensor &
707  operator=(const SymmetricTensor<rank_, dim, OtherNumber> &rhs);
708 
715  constexpr SymmetricTensor &
716  operator=(const Number &d);
717 
722  constexpr operator Tensor<rank_, dim, Number>() const;
723 
727  constexpr bool
728  operator==(const SymmetricTensor &) const;
729 
733  constexpr bool
734  operator!=(const SymmetricTensor &) const;
735 
739  template <typename OtherNumber>
740  constexpr SymmetricTensor &
741  operator+=(const SymmetricTensor<rank_, dim, OtherNumber> &);
742 
746  template <typename OtherNumber>
747  constexpr SymmetricTensor &
748  operator-=(const SymmetricTensor<rank_, dim, OtherNumber> &);
749 
754  template <typename OtherNumber>
755  constexpr SymmetricTensor &
756  operator*=(const OtherNumber &factor);
757 
761  template <typename OtherNumber>
762  constexpr SymmetricTensor &
763  operator/=(const OtherNumber &factor);
764 
768  constexpr SymmetricTensor
769  operator-() const;
770 
797  template <typename OtherNumber>
801 
806  template <typename OtherNumber>
810 
814  constexpr Number &
815  operator()(const TableIndices<rank_> &indices);
816 
820  constexpr const Number &
821  operator()(const TableIndices<rank_> &indices) const;
822 
827  constexpr internal::SymmetricTensorAccessors::
828  Accessor<rank_, dim, true, rank_ - 1, Number>
829  operator[](const unsigned int row) const;
830 
835  constexpr internal::SymmetricTensorAccessors::
836  Accessor<rank_, dim, false, rank_ - 1, Number>
837  operator[](const unsigned int row);
838 
844  constexpr const Number &operator[](const TableIndices<rank_> &indices) const;
845 
851  constexpr Number &operator[](const TableIndices<rank_> &indices);
852 
859  constexpr const Number &
860  access_raw_entry(const unsigned int unrolled_index) const;
861 
868  constexpr Number &
869  access_raw_entry(const unsigned int unrolled_index);
870 
881  norm() const;
882 
890  static constexpr unsigned int
891  component_to_unrolled_index(const TableIndices<rank_> &indices);
892 
898  static constexpr TableIndices<rank_>
899  unrolled_to_component_indices(const unsigned int i);
900 
913  constexpr void
914  clear();
915 
920  static constexpr std::size_t
922 
928  template <class Archive>
929  void
930  serialize(Archive &ar, const unsigned int version);
931 
932 private:
936  using base_tensor_descriptor =
938 
942  using base_tensor_type = typename base_tensor_descriptor::base_tensor_type;
943 
948 
949  // Make all other symmetric tensors friends.
950  template <int, int, typename>
951  friend class SymmetricTensor;
952 
953  // Make a few more functions friends.
954  template <int dim2, typename Number2>
955  friend constexpr Number2
957 
958  template <int dim2, typename Number2>
959  friend constexpr Number2
961 
962  template <int dim2, typename Number2>
963  friend constexpr SymmetricTensor<2, dim2, Number2>
965 
966  template <int dim2, typename Number2>
969 
970  template <int dim2, typename Number2>
972  deviator_tensor();
973 
974  template <int dim2, typename Number2>
976  identity_tensor();
977 
978 
979  // Make a few helper classes friends as well.
981  Inverse<2, dim, Number>;
982 
984  Inverse<4, dim, Number>;
985 };
986 
987 
988 
989 // ------------------------- inline functions ------------------------
990 
991 #ifndef DOXYGEN
992 
993 // provide declarations for static members
994 template <int rank, int dim, typename Number>
995 const unsigned int SymmetricTensor<rank, dim, Number>::dimension;
996 
997 template <int rank_, int dim, typename Number>
998 constexpr unsigned int
999  SymmetricTensor<rank_, dim, Number>::n_independent_components;
1000 
1001 namespace internal
1002 {
1003  namespace SymmetricTensorAccessors
1004  {
1005  template <int rank_, int dim, bool constness, int P, typename Number>
1006  constexpr DEAL_II_ALWAYS_INLINE
1007  Accessor<rank_, dim, constness, P, Number>::Accessor(
1008  tensor_type & tensor,
1009  const TableIndices<rank_> &previous_indices)
1010  : tensor(tensor)
1011  , previous_indices(previous_indices)
1012  {}
1013 
1014 
1015 
1016  template <int rank_, int dim, bool constness, int P, typename Number>
1017  constexpr inline DEAL_II_ALWAYS_INLINE
1018  Accessor<rank_, dim, constness, P - 1, Number>
1019  Accessor<rank_, dim, constness, P, Number>::
1020  operator[](const unsigned int i)
1021  {
1022  return Accessor<rank_, dim, constness, P - 1, Number>(
1023  tensor, merge(previous_indices, i, rank_ - P));
1024  }
1025 
1026 
1027 
1028  template <int rank_, int dim, bool constness, int P, typename Number>
1029  constexpr DEAL_II_ALWAYS_INLINE
1030  Accessor<rank_, dim, constness, P - 1, Number>
1031  Accessor<rank_, dim, constness, P, Number>::
1032  operator[](const unsigned int i) const
1033  {
1034  return Accessor<rank_, dim, constness, P - 1, Number>(
1035  tensor, merge(previous_indices, i, rank_ - P));
1036  }
1037 
1038 
1039 
1040  template <int rank_, int dim, bool constness, typename Number>
1041  constexpr DEAL_II_ALWAYS_INLINE
1042  Accessor<rank_, dim, constness, 1, Number>::Accessor(
1043  tensor_type & tensor,
1044  const TableIndices<rank_> &previous_indices)
1045  : tensor(tensor)
1046  , previous_indices(previous_indices)
1047  {}
1048 
1049 
1050 
1051  template <int rank_, int dim, bool constness, typename Number>
1052  constexpr inline DEAL_II_ALWAYS_INLINE
1053  typename Accessor<rank_, dim, constness, 1, Number>::reference
1054  Accessor<rank_, dim, constness, 1, Number>::
1055  operator[](const unsigned int i)
1056  {
1057  return tensor(merge(previous_indices, i, rank_ - 1));
1058  }
1059 
1060 
1061  template <int rank_, int dim, bool constness, typename Number>
1062  constexpr DEAL_II_ALWAYS_INLINE
1063  typename Accessor<rank_, dim, constness, 1, Number>::reference
1064  Accessor<rank_, dim, constness, 1, Number>::
1065  operator[](const unsigned int i) const
1066  {
1067  return tensor(merge(previous_indices, i, rank_ - 1));
1068  }
1069  } // namespace SymmetricTensorAccessors
1070 } // namespace internal
1071 
1072 
1073 
1074 template <int rank_, int dim, typename Number>
1075 template <typename OtherNumber>
1076 inline DEAL_II_ALWAYS_INLINE
1078  const Tensor<2, dim, OtherNumber> &t)
1079 {
1080  static_assert(rank == 2, "This function is only implemented for rank==2");
1081  for (unsigned int d = 0; d < dim; ++d)
1082  for (unsigned int e = 0; e < d; ++e)
1083  Assert(t[d][e] == t[e][d],
1084  ExcMessage("The incoming Tensor must be exactly symmetric."));
1085 
1086  for (unsigned int d = 0; d < dim; ++d)
1087  data[d] = t[d][d];
1088 
1089  for (unsigned int d = 0, c = 0; d < dim; ++d)
1090  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1091  data[dim + c] = t[d][e];
1092 }
1093 
1094 
1095 
1096 template <int rank_, int dim, typename Number>
1097 template <typename OtherNumber>
1098 constexpr DEAL_II_ALWAYS_INLINE
1100  const SymmetricTensor<rank_, dim, OtherNumber> &initializer)
1101  : data(initializer.data)
1102 {}
1103 
1104 
1105 
1106 template <int rank_, int dim, typename Number>
1107 constexpr inline DEAL_II_ALWAYS_INLINE
1109  const Number (&array)[n_independent_components])
1110  : data(
1111  *reinterpret_cast<const typename base_tensor_type::array_type *>(array))
1112 {
1113  // ensure that the reinterpret_cast above actually works
1114  Assert(sizeof(typename base_tensor_type::array_type) == sizeof(array),
1115  ExcInternalError());
1116 }
1117 
1118 
1119 
1120 template <int rank_, int dim, typename Number>
1121 template <typename OtherNumber>
1125 {
1126  data = t.data;
1127  return *this;
1128 }
1129 
1130 
1131 
1132 template <int rank_, int dim, typename Number>
1135 {
1137  ExcMessage("Only assignment with zero is allowed"));
1138  (void)d;
1139 
1141 
1142  return *this;
1143 }
1144 
1145 
1146 namespace internal
1147 {
1148  namespace SymmetricTensorImplementation
1149  {
1150  template <int dim, typename Number>
1151  constexpr inline DEAL_II_ALWAYS_INLINE ::Tensor<2, dim, Number>
1152  convert_to_tensor(const ::SymmetricTensor<2, dim, Number> &s)
1153  {
1155 
1156  // diagonal entries are stored first
1157  for (unsigned int d = 0; d < dim; ++d)
1158  t[d][d] = s.access_raw_entry(d);
1159 
1160  // off-diagonal entries come next, row by row
1161  for (unsigned int d = 0, c = 0; d < dim; ++d)
1162  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1163  {
1164  t[d][e] = s.access_raw_entry(dim + c);
1165  t[e][d] = s.access_raw_entry(dim + c);
1166  }
1167  return t;
1168  }
1169 
1170 
1171  template <int dim, typename Number>
1172  constexpr ::Tensor<4, dim, Number>
1173  convert_to_tensor(const ::SymmetricTensor<4, dim, Number> &st)
1174  {
1175  // utilize the symmetry properties of SymmetricTensor<4,dim>
1176  // discussed in the class documentation to avoid accessing all
1177  // independent elements of the input tensor more than once
1179 
1180  for (unsigned int i = 0; i < dim; ++i)
1181  for (unsigned int j = i; j < dim; ++j)
1182  for (unsigned int k = 0; k < dim; ++k)
1183  for (unsigned int l = k; l < dim; ++l)
1184  t[TableIndices<4>(i, j, k, l)] = t[TableIndices<4>(i, j, l, k)] =
1185  t[TableIndices<4>(j, i, k, l)] =
1186  t[TableIndices<4>(j, i, l, k)] =
1187  st[TableIndices<4>(i, j, k, l)];
1188 
1189  return t;
1190  }
1191 
1192 
1193  template <typename Number>
1194  struct Inverse<2, 1, Number>
1195  {
1196  constexpr static inline DEAL_II_ALWAYS_INLINE
1197  ::SymmetricTensor<2, 1, Number>
1198  value(const ::SymmetricTensor<2, 1, Number> &t)
1199  {
1201 
1202  tmp[0][0] = 1.0 / t[0][0];
1203 
1204  return tmp;
1205  }
1206  };
1207 
1208 
1209  template <typename Number>
1210  struct Inverse<2, 2, Number>
1211  {
1212  constexpr static inline DEAL_II_ALWAYS_INLINE
1213  ::SymmetricTensor<2, 2, Number>
1214  value(const ::SymmetricTensor<2, 2, Number> &t)
1215  {
1217 
1218  // Sympy result: ([
1219  // [ t11/(t00*t11 - t01**2), -t01/(t00*t11 - t01**2)],
1220  // [-t01/(t00*t11 - t01**2), t00/(t00*t11 - t01**2)] ])
1221  const TableIndices<2> idx_00(0, 0);
1222  const TableIndices<2> idx_01(0, 1);
1223  const TableIndices<2> idx_11(1, 1);
1224  const Number inv_det_t =
1225  1.0 / (t[idx_00] * t[idx_11] - t[idx_01] * t[idx_01]);
1226  tmp[idx_00] = t[idx_11];
1227  tmp[idx_01] = -t[idx_01];
1228  tmp[idx_11] = t[idx_00];
1229  tmp *= inv_det_t;
1230 
1231  return tmp;
1232  }
1233  };
1234 
1235 
1236  template <typename Number>
1237  struct Inverse<2, 3, Number>
1238  {
1239  constexpr static ::SymmetricTensor<2, 3, Number>
1240  value(const ::SymmetricTensor<2, 3, Number> &t)
1241  {
1243 
1244  // Sympy result: ([
1245  // [ (t11*t22 - t12**2)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1246  // 2*t01*t02*t12 - t02**2*t11),
1247  // (-t01*t22 + t02*t12)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1248  // 2*t01*t02*t12 - t02**2*t11),
1249  // (t01*t12 - t02*t11)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1250  // 2*t01*t02*t12 - t02**2*t11)],
1251  // [ (-t01*t22 + t02*t12)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1252  // 2*t01*t02*t12 - t02**2*t11),
1253  // (t00*t22 - t02**2)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1254  // 2*t01*t02*t12 - t02**2*t11),
1255  // (t00*t12 - t01*t02)/(-t00*t11*t22 + t00*t12**2 + t01**2*t22 -
1256  // 2*t01*t02*t12 + t02**2*t11)],
1257  // [ (t01*t12 - t02*t11)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1258  // 2*t01*t02*t12 - t02**2*t11),
1259  // (t00*t12 - t01*t02)/(-t00*t11*t22 + t00*t12**2 + t01**2*t22 -
1260  // 2*t01*t02*t12 + t02**2*t11),
1261  // (-t00*t11 + t01**2)/(-t00*t11*t22 + t00*t12**2 + t01**2*t22 -
1262  // 2*t01*t02*t12 + t02**2*t11)] ])
1263  //
1264  // =
1265  //
1266  // [ (t11*t22 - t12**2)/det_t,
1267  // (-t01*t22 + t02*t12)/det_t,
1268  // (t01*t12 - t02*t11)/det_t],
1269  // [ (-t01*t22 + t02*t12)/det_t,
1270  // (t00*t22 - t02**2)/det_t,
1271  // (-t00*t12 + t01*t02)/det_t],
1272  // [ (t01*t12 - t02*t11)/det_t,
1273  // (-t00*t12 + t01*t02)/det_t,
1274  // (t00*t11 - t01**2)/det_t] ])
1275  //
1276  // with det_t = (t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1277  // 2*t01*t02*t12 - t02**2*t11)
1278  const TableIndices<2> idx_00(0, 0);
1279  const TableIndices<2> idx_01(0, 1);
1280  const TableIndices<2> idx_02(0, 2);
1281  const TableIndices<2> idx_11(1, 1);
1282  const TableIndices<2> idx_12(1, 2);
1283  const TableIndices<2> idx_22(2, 2);
1284  const Number inv_det_t =
1285  1.0 / (t[idx_00] * t[idx_11] * t[idx_22] -
1286  t[idx_00] * t[idx_12] * t[idx_12] -
1287  t[idx_01] * t[idx_01] * t[idx_22] +
1288  2.0 * t[idx_01] * t[idx_02] * t[idx_12] -
1289  t[idx_02] * t[idx_02] * t[idx_11]);
1290  tmp[idx_00] = t[idx_11] * t[idx_22] - t[idx_12] * t[idx_12];
1291  tmp[idx_01] = -t[idx_01] * t[idx_22] + t[idx_02] * t[idx_12];
1292  tmp[idx_02] = t[idx_01] * t[idx_12] - t[idx_02] * t[idx_11];
1293  tmp[idx_11] = t[idx_00] * t[idx_22] - t[idx_02] * t[idx_02];
1294  tmp[idx_12] = -t[idx_00] * t[idx_12] + t[idx_01] * t[idx_02];
1295  tmp[idx_22] = t[idx_00] * t[idx_11] - t[idx_01] * t[idx_01];
1296  tmp *= inv_det_t;
1297 
1298  return tmp;
1299  }
1300  };
1301 
1302 
1303  template <typename Number>
1304  struct Inverse<4, 1, Number>
1305  {
1306  constexpr static inline ::SymmetricTensor<4, 1, Number>
1307  value(const ::SymmetricTensor<4, 1, Number> &t)
1308  {
1310  tmp.data[0][0] = 1.0 / t.data[0][0];
1311  return tmp;
1312  }
1313  };
1314 
1315 
1316  template <typename Number>
1317  struct Inverse<4, 2, Number>
1318  {
1319  constexpr static inline ::SymmetricTensor<4, 2, Number>
1320  value(const ::SymmetricTensor<4, 2, Number> &t)
1321  {
1323 
1324  // Inverting this tensor is a little more complicated than necessary,
1325  // since we store the data of 't' as a 3x3 matrix t.data, but the
1326  // product between a rank-4 and a rank-2 tensor is really not the
1327  // product between this matrix and the 3-vector of a rhs, but rather
1328  //
1329  // B.vec = t.data * mult * A.vec
1330  //
1331  // where mult is a 3x3 matrix with entries [[1,0,0],[0,1,0],[0,0,2]] to
1332  // capture the fact that we need to add up both the c_ij12*a_12 and the
1333  // c_ij21*a_21 terms.
1334  //
1335  // In addition, in this scheme, the identity tensor has the matrix
1336  // representation mult^-1.
1337  //
1338  // The inverse of 't' therefore has the matrix representation
1339  //
1340  // inv.data = mult^-1 * t.data^-1 * mult^-1
1341  //
1342  // in order to compute it, let's first compute the inverse of t.data and
1343  // put it into tmp.data; at the end of the function we then scale the
1344  // last row and column of the inverse by 1/2, corresponding to the left
1345  // and right multiplication with mult^-1.
1346  const Number t4 = t.data[0][0] * t.data[1][1],
1347  t6 = t.data[0][0] * t.data[1][2],
1348  t8 = t.data[0][1] * t.data[1][0],
1349  t00 = t.data[0][2] * t.data[1][0],
1350  t01 = t.data[0][1] * t.data[2][0],
1351  t04 = t.data[0][2] * t.data[2][0],
1352  t07 = 1.0 / (t4 * t.data[2][2] - t6 * t.data[2][1] -
1353  t8 * t.data[2][2] + t00 * t.data[2][1] +
1354  t01 * t.data[1][2] - t04 * t.data[1][1]);
1355  tmp.data[0][0] =
1356  (t.data[1][1] * t.data[2][2] - t.data[1][2] * t.data[2][1]) * t07;
1357  tmp.data[0][1] =
1358  -(t.data[0][1] * t.data[2][2] - t.data[0][2] * t.data[2][1]) * t07;
1359  tmp.data[0][2] =
1360  -(-t.data[0][1] * t.data[1][2] + t.data[0][2] * t.data[1][1]) * t07;
1361  tmp.data[1][0] =
1362  -(t.data[1][0] * t.data[2][2] - t.data[1][2] * t.data[2][0]) * t07;
1363  tmp.data[1][1] = (t.data[0][0] * t.data[2][2] - t04) * t07;
1364  tmp.data[1][2] = -(t6 - t00) * t07;
1365  tmp.data[2][0] =
1366  -(-t.data[1][0] * t.data[2][1] + t.data[1][1] * t.data[2][0]) * t07;
1367  tmp.data[2][1] = -(t.data[0][0] * t.data[2][1] - t01) * t07;
1368  tmp.data[2][2] = (t4 - t8) * t07;
1369 
1370  // scale last row and column as mentioned
1371  // above
1372  tmp.data[2][0] /= 2;
1373  tmp.data[2][1] /= 2;
1374  tmp.data[0][2] /= 2;
1375  tmp.data[1][2] /= 2;
1376  tmp.data[2][2] /= 4;
1377 
1378  return tmp;
1379  }
1380  };
1381 
1382 
1383  template <typename Number>
1384  struct Inverse<4, 3, Number>
1385  {
1386  static ::SymmetricTensor<4, 3, Number>
1387  value(const ::SymmetricTensor<4, 3, Number> &t)
1388  {
1390 
1391  // This function follows the exact same scheme as the 2d case, except
1392  // that hardcoding the inverse of a 6x6 matrix is pretty wasteful.
1393  // Instead, we use the Gauss-Jordan algorithm implemented for
1394  // FullMatrix. For historical reasons the following code is copied from
1395  // there, with the tangential benefit that we do not need to copy the
1396  // tensor entries to and from the FullMatrix.
1397  const unsigned int N = 6;
1398 
1399  // First get an estimate of the size of the elements of this matrix,
1400  // for later checks whether the pivot element is large enough, or
1401  // whether we have to fear that the matrix is not regular.
1402  Number diagonal_sum = internal::NumberType<Number>::value(0.0);
1403  for (unsigned int i = 0; i < N; ++i)
1404  diagonal_sum += std::fabs(tmp.data[i][i]);
1405  const Number typical_diagonal_element =
1406  diagonal_sum / static_cast<double>(N);
1407  (void)typical_diagonal_element;
1408 
1409  unsigned int p[N];
1410  for (unsigned int i = 0; i < N; ++i)
1411  p[i] = i;
1412 
1413  for (unsigned int j = 0; j < N; ++j)
1414  {
1415  // Pivot search: search that part of the line on and right of the
1416  // diagonal for the largest element.
1417  Number max = std::fabs(tmp.data[j][j]);
1418  unsigned int r = j;
1419  for (unsigned int i = j + 1; i < N; ++i)
1420  if (std::fabs(tmp.data[i][j]) > max)
1421  {
1422  max = std::fabs(tmp.data[i][j]);
1423  r = i;
1424  }
1425 
1426  // Check whether the pivot is too small
1427  Assert(max > 1.e-16 * typical_diagonal_element,
1428  ExcMessage("This tensor seems to be noninvertible"));
1429 
1430  // Row interchange
1431  if (r > j)
1432  {
1433  for (unsigned int k = 0; k < N; ++k)
1434  std::swap(tmp.data[j][k], tmp.data[r][k]);
1435 
1436  std::swap(p[j], p[r]);
1437  }
1438 
1439  // Transformation
1440  const Number hr = 1. / tmp.data[j][j];
1441  tmp.data[j][j] = hr;
1442  for (unsigned int k = 0; k < N; ++k)
1443  {
1444  if (k == j)
1445  continue;
1446  for (unsigned int i = 0; i < N; ++i)
1447  {
1448  if (i == j)
1449  continue;
1450  tmp.data[i][k] -= tmp.data[i][j] * tmp.data[j][k] * hr;
1451  }
1452  }
1453  for (unsigned int i = 0; i < N; ++i)
1454  {
1455  tmp.data[i][j] *= hr;
1456  tmp.data[j][i] *= -hr;
1457  }
1458  tmp.data[j][j] = hr;
1459  }
1460 
1461  // Column interchange
1462  Number hv[N];
1463  for (unsigned int i = 0; i < N; ++i)
1464  {
1465  for (unsigned int k = 0; k < N; ++k)
1466  hv[p[k]] = tmp.data[i][k];
1467  for (unsigned int k = 0; k < N; ++k)
1468  tmp.data[i][k] = hv[k];
1469  }
1470 
1471  // Scale rows and columns. The mult matrix
1472  // here is diag[1, 1, 1, 1/2, 1/2, 1/2].
1473  for (unsigned int i = 3; i < 6; ++i)
1474  for (unsigned int j = 0; j < 3; ++j)
1475  tmp.data[i][j] /= 2;
1476 
1477  for (unsigned int i = 0; i < 3; ++i)
1478  for (unsigned int j = 3; j < 6; ++j)
1479  tmp.data[i][j] /= 2;
1480 
1481  for (unsigned int i = 3; i < 6; ++i)
1482  for (unsigned int j = 3; j < 6; ++j)
1483  tmp.data[i][j] /= 4;
1484 
1485  return tmp;
1486  }
1487  };
1488 
1489  } // namespace SymmetricTensorImplementation
1490 } // namespace internal
1491 
1492 
1493 
1494 template <int rank_, int dim, typename Number>
1496  operator Tensor<rank_, dim, Number>() const
1497 {
1498  return internal::SymmetricTensorImplementation::convert_to_tensor(*this);
1499 }
1500 
1501 
1502 
1503 template <int rank_, int dim, typename Number>
1504 constexpr bool
1507 {
1508  return data == t.data;
1509 }
1510 
1511 
1512 
1513 template <int rank_, int dim, typename Number>
1514 constexpr bool
1517 {
1518  return data != t.data;
1519 }
1520 
1521 
1522 
1523 template <int rank_, int dim, typename Number>
1524 template <typename OtherNumber>
1528 {
1529  data += t.data;
1530  return *this;
1531 }
1532 
1533 
1534 
1535 template <int rank_, int dim, typename Number>
1536 template <typename OtherNumber>
1540 {
1541  data -= t.data;
1542  return *this;
1543 }
1544 
1545 
1546 
1547 template <int rank_, int dim, typename Number>
1548 template <typename OtherNumber>
1551 {
1552  data *= d;
1553  return *this;
1554 }
1555 
1556 
1557 
1558 template <int rank_, int dim, typename Number>
1559 template <typename OtherNumber>
1562 {
1563  data /= d;
1564  return *this;
1565 }
1566 
1567 
1568 
1569 template <int rank_, int dim, typename Number>
1572 {
1573  SymmetricTensor tmp = *this;
1574  tmp.data = -tmp.data;
1575  return tmp;
1576 }
1577 
1578 
1579 
1580 template <int rank_, int dim, typename Number>
1581 constexpr inline DEAL_II_ALWAYS_INLINE void
1583 {
1584  data.clear();
1585 }
1586 
1587 
1588 
1589 template <int rank_, int dim, typename Number>
1590 constexpr std::size_t
1592 {
1593  // all memory consists of statically allocated memory of the current
1594  // object, no pointers
1595  return sizeof(SymmetricTensor<rank_, dim, Number>);
1596 }
1597 
1598 
1599 
1600 namespace internal
1601 {
1602  template <int dim, typename Number, typename OtherNumber = Number>
1606  perform_double_contraction(
1607  const typename SymmetricTensorAccessors::StorageType<2, dim, Number>::
1608  base_tensor_type &data,
1609  const typename SymmetricTensorAccessors::
1610  StorageType<2, dim, OtherNumber>::base_tensor_type &sdata)
1611  {
1612  using result_type = typename SymmetricTensorAccessors::
1614 
1615  switch (dim)
1616  {
1617  case 1:
1618  return data[0] * sdata[0];
1619  default:
1620  // Start with the non-diagonal part to avoid some multiplications by
1621  // 2.
1622 
1623  result_type sum = data[dim] * sdata[dim];
1624  for (unsigned int d = dim + 1; d < (dim * (dim + 1) / 2); ++d)
1625  sum += data[d] * sdata[d];
1626  sum += sum; // sum = sum * 2.;
1627 
1628  // Now add the contributions from the diagonal
1629  for (unsigned int d = 0; d < dim; ++d)
1630  sum += data[d] * sdata[d];
1631  return sum;
1632  }
1633  }
1634 
1635 
1636 
1637  template <int dim, typename Number, typename OtherNumber = Number>
1641  perform_double_contraction(
1642  const typename SymmetricTensorAccessors::StorageType<4, dim, Number>::
1643  base_tensor_type &data,
1644  const typename SymmetricTensorAccessors::
1645  StorageType<2, dim, OtherNumber>::base_tensor_type &sdata)
1646  {
1647  using result_type = typename SymmetricTensorAccessors::
1649  using value_type = typename SymmetricTensorAccessors::
1651 
1652  const unsigned int data_dim = SymmetricTensorAccessors::
1653  StorageType<2, dim, value_type>::n_independent_components;
1654  value_type tmp[data_dim]{};
1655  for (unsigned int i = 0; i < data_dim; ++i)
1656  tmp[i] =
1657  perform_double_contraction<dim, Number, OtherNumber>(data[i], sdata);
1658  return result_type(tmp);
1659  }
1660 
1661 
1662 
1663  template <int dim, typename Number, typename OtherNumber = Number>
1665  typename SymmetricTensorAccessors::StorageType<
1666  2,
1667  dim,
1670  base_tensor_type
1671  perform_double_contraction(
1672  const typename SymmetricTensorAccessors::StorageType<2, dim, Number>::
1673  base_tensor_type &data,
1674  const typename SymmetricTensorAccessors::
1675  StorageType<4, dim, OtherNumber>::base_tensor_type &sdata)
1676  {
1677  using value_type = typename SymmetricTensorAccessors::
1679  using base_tensor_type = typename SymmetricTensorAccessors::
1680  StorageType<2, dim, value_type>::base_tensor_type;
1681 
1682  base_tensor_type tmp;
1683  for (unsigned int i = 0; i < tmp.dimension; ++i)
1684  {
1685  // Start with the non-diagonal part
1686  value_type sum = data[dim] * sdata[dim][i];
1687  for (unsigned int d = dim + 1; d < (dim * (dim + 1) / 2); ++d)
1688  sum += data[d] * sdata[d][i];
1689  sum += sum; // sum = sum * 2.;
1690 
1691  // Now add the contributions from the diagonal
1692  for (unsigned int d = 0; d < dim; ++d)
1693  sum += data[d] * sdata[d][i];
1694  tmp[i] = sum;
1695  }
1696  return tmp;
1697  }
1698 
1699 
1700 
1701  template <int dim, typename Number, typename OtherNumber = Number>
1703  typename SymmetricTensorAccessors::StorageType<
1704  4,
1705  dim,
1708  base_tensor_type
1709  perform_double_contraction(
1710  const typename SymmetricTensorAccessors::StorageType<4, dim, Number>::
1711  base_tensor_type &data,
1712  const typename SymmetricTensorAccessors::
1713  StorageType<4, dim, OtherNumber>::base_tensor_type &sdata)
1714  {
1715  using value_type = typename SymmetricTensorAccessors::
1717  using base_tensor_type = typename SymmetricTensorAccessors::
1718  StorageType<4, dim, value_type>::base_tensor_type;
1719 
1720  const unsigned int data_dim = SymmetricTensorAccessors::
1721  StorageType<2, dim, value_type>::n_independent_components;
1722  base_tensor_type tmp;
1723  for (unsigned int i = 0; i < data_dim; ++i)
1724  for (unsigned int j = 0; j < data_dim; ++j)
1725  {
1726  // Start with the non-diagonal part
1727  for (unsigned int d = dim; d < (dim * (dim + 1) / 2); ++d)
1728  tmp[i][j] += data[i][d] * sdata[d][j];
1729  tmp[i][j] += tmp[i][j]; // tmp[i][j] = tmp[i][j] * 2;
1730 
1731  // Now add the contributions from the diagonal
1732  for (unsigned int d = 0; d < dim; ++d)
1733  tmp[i][j] += data[i][d] * sdata[d][j];
1734  }
1735  return tmp;
1736  }
1737 
1738 } // end of namespace internal
1739 
1740 
1741 
1742 template <int rank_, int dim, typename Number>
1743 template <typename OtherNumber>
1749 {
1750  // need to have two different function calls
1751  // because a scalar and rank-2 tensor are not
1752  // the same data type (see internal function
1753  // above)
1754  return internal::perform_double_contraction<dim, Number, OtherNumber>(data,
1755  s.data);
1756 }
1757 
1758 
1759 
1760 template <int rank_, int dim, typename Number>
1761 template <typename OtherNumber>
1766 {
1769  tmp.data =
1770  internal::perform_double_contraction<dim, Number, OtherNumber>(data,
1771  s.data);
1772  return tmp;
1773 }
1774 
1775 
1776 
1777 // internal namespace to switch between the
1778 // access of different tensors. There used to
1779 // be explicit instantiations before for
1780 // different ranks and dimensions, but since
1781 // we now allow for templates on the data
1782 // type, and since we cannot partially
1783 // specialize the implementation, this got
1784 // into a separate namespace
1785 namespace internal
1786 {
1787  // The variables within this struct will be referenced in the next functions.
1788  // It is a workaround that allows returning a reference to a static variable
1789  // while allowing constexpr evaluation of the function.
1790  // It has to be defined outside the function because constexpr functions
1791  // cannot define static variables.
1792  // A similar struct has also been defined in tensor.h
1793  template <typename Type>
1794  struct Uninitialized
1795  {
1796  static Type value;
1797  };
1798 
1799  template <typename Type>
1801 
1802  template <int dim, typename Number>
1803  constexpr inline DEAL_II_ALWAYS_INLINE Number &
1804  symmetric_tensor_access(const TableIndices<2> &indices,
1805  typename SymmetricTensorAccessors::
1806  StorageType<2, dim, Number>::base_tensor_type &data)
1807  {
1808  // 1d is very simple and done first
1809  if (dim == 1)
1810  return data[0];
1811 
1812  // first treat the main diagonal elements, which are stored consecutively
1813  // at the beginning
1814  if (indices[0] == indices[1])
1815  return data[indices[0]];
1816 
1817  // the rest is messier and requires a few switches.
1818  switch (dim)
1819  {
1820  case 2:
1821  // at least for the 2x2 case it is reasonably simple
1822  Assert(((indices[0] == 1) && (indices[1] == 0)) ||
1823  ((indices[0] == 0) && (indices[1] == 1)),
1824  ExcInternalError());
1825  return data[2];
1826 
1827  default:
1828  // to do the rest, sort our indices before comparing
1829  {
1830  TableIndices<2> sorted_indices(std::min(indices[0], indices[1]),
1831  std::max(indices[0], indices[1]));
1832  for (unsigned int d = 0, c = 0; d < dim; ++d)
1833  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1834  if ((sorted_indices[0] == d) && (sorted_indices[1] == e))
1835  return data[dim + c];
1836  Assert(false, ExcInternalError());
1837  }
1838  }
1839 
1840  // The code should never reach there.
1841  // Returns a dummy reference to a dummy variable just to make the
1842  // compiler happy.
1844  }
1845 
1846 
1847 
1848  template <int dim, typename Number>
1849  constexpr inline DEAL_II_ALWAYS_INLINE const Number &
1850  symmetric_tensor_access(const TableIndices<2> &indices,
1851  const typename SymmetricTensorAccessors::
1852  StorageType<2, dim, Number>::base_tensor_type &data)
1853  {
1854  // 1d is very simple and done first
1855  if (dim == 1)
1856  return data[0];
1857 
1858  // first treat the main diagonal elements, which are stored consecutively
1859  // at the beginning
1860  if (indices[0] == indices[1])
1861  return data[indices[0]];
1862 
1863  // the rest is messier and requires a few switches.
1864  switch (dim)
1865  {
1866  case 2:
1867  // at least for the 2x2 case it is reasonably simple
1868  Assert(((indices[0] == 1) && (indices[1] == 0)) ||
1869  ((indices[0] == 0) && (indices[1] == 1)),
1870  ExcInternalError());
1871  return data[2];
1872 
1873  default:
1874  // to do the rest, sort our indices before comparing
1875  {
1876  TableIndices<2> sorted_indices(std::min(indices[0], indices[1]),
1877  std::max(indices[0], indices[1]));
1878  for (unsigned int d = 0, c = 0; d < dim; ++d)
1879  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1880  if ((sorted_indices[0] == d) && (sorted_indices[1] == e))
1881  return data[dim + c];
1882  Assert(false, ExcInternalError());
1883  }
1884  }
1885 
1886  // The code should never reach there.
1887  // Returns a dummy reference to a dummy variable just to make the
1888  // compiler happy.
1890  }
1891 
1892 
1893 
1894  template <int dim, typename Number>
1895  constexpr inline Number &
1896  symmetric_tensor_access(const TableIndices<4> &indices,
1897  typename SymmetricTensorAccessors::
1898  StorageType<4, dim, Number>::base_tensor_type &data)
1899  {
1900  switch (dim)
1901  {
1902  case 1:
1903  return data[0][0];
1904 
1905  case 2:
1906  // each entry of the tensor can be thought of as an entry in a
1907  // matrix that maps the rolled-out rank-2 tensors into rolled-out
1908  // rank-2 tensors. this is the format in which we store rank-4
1909  // tensors. determine which position the present entry is
1910  // stored in
1911  {
1912  constexpr std::size_t base_index[2][2] = {{0, 2}, {2, 1}};
1913  return data[base_index[indices[0]][indices[1]]]
1914  [base_index[indices[2]][indices[3]]];
1915  }
1916  case 3:
1917  // each entry of the tensor can be thought of as an entry in a
1918  // matrix that maps the rolled-out rank-2 tensors into rolled-out
1919  // rank-2 tensors. this is the format in which we store rank-4
1920  // tensors. determine which position the present entry is
1921  // stored in
1922  {
1923  constexpr std::size_t base_index[3][3] = {{0, 3, 4},
1924  {3, 1, 5},
1925  {4, 5, 2}};
1926  return data[base_index[indices[0]][indices[1]]]
1927  [base_index[indices[2]][indices[3]]];
1928  }
1929 
1930  default:
1931  Assert(false, ExcNotImplemented());
1932  }
1933 
1934  // The code should never reach there.
1935  // Returns a dummy reference to a dummy variable just to make the
1936  // compiler happy.
1938  }
1939 
1940 
1941  template <int dim, typename Number>
1942  constexpr inline DEAL_II_ALWAYS_INLINE const Number &
1943  symmetric_tensor_access(const TableIndices<4> &indices,
1944  const typename SymmetricTensorAccessors::
1945  StorageType<4, dim, Number>::base_tensor_type &data)
1946  {
1947  switch (dim)
1948  {
1949  case 1:
1950  return data[0][0];
1951 
1952  case 2:
1953  // each entry of the tensor can be thought of as an entry in a
1954  // matrix that maps the rolled-out rank-2 tensors into rolled-out
1955  // rank-2 tensors. this is the format in which we store rank-4
1956  // tensors. determine which position the present entry is
1957  // stored in
1958  {
1959  constexpr std::size_t base_index[2][2] = {{0, 2}, {2, 1}};
1960  return data[base_index[indices[0]][indices[1]]]
1961  [base_index[indices[2]][indices[3]]];
1962  }
1963  case 3:
1964  // each entry of the tensor can be thought of as an entry in a
1965  // matrix that maps the rolled-out rank-2 tensors into rolled-out
1966  // rank-2 tensors. this is the format in which we store rank-4
1967  // tensors. determine which position the present entry is
1968  // stored in
1969  {
1970  constexpr std::size_t base_index[3][3] = {{0, 3, 4},
1971  {3, 1, 5},
1972  {4, 5, 2}};
1973  return data[base_index[indices[0]][indices[1]]]
1974  [base_index[indices[2]][indices[3]]];
1975  }
1976 
1977  default:
1978  Assert(false, ExcNotImplemented());
1979  }
1980 
1981  // The code should never reach there.
1982  // Returns a dummy reference to a dummy variable just to make the
1983  // compiler happy.
1985  }
1986 
1987 } // end of namespace internal
1988 
1989 
1990 
1991 template <int rank_, int dim, typename Number>
1992 constexpr inline DEAL_II_ALWAYS_INLINE Number &
1994  operator()(const TableIndices<rank_> &indices)
1995 {
1996  for (unsigned int r = 0; r < rank; ++r)
1997  AssertIndexRange(indices[r], dimension);
1998  return internal::symmetric_tensor_access<dim, Number>(indices, data);
1999 }
2000 
2001 
2002 
2003 template <int rank_, int dim, typename Number>
2004 constexpr inline DEAL_II_ALWAYS_INLINE const Number &
2006  operator()(const TableIndices<rank_> &indices) const
2007 {
2008  for (unsigned int r = 0; r < rank; ++r)
2009  AssertIndexRange(indices[r], dimension);
2010  return internal::symmetric_tensor_access<dim, Number>(indices, data);
2011 }
2012 
2013 
2014 
2015 namespace internal
2016 {
2017  namespace SymmetricTensorImplementation
2018  {
2019  template <int rank_>
2020  constexpr TableIndices<rank_>
2021  get_partially_filled_indices(const unsigned int row,
2022  const std::integral_constant<int, 2> &)
2023  {
2025  }
2026 
2027 
2028  template <int rank_>
2029  constexpr TableIndices<rank_>
2030  get_partially_filled_indices(const unsigned int row,
2031  const std::integral_constant<int, 4> &)
2032  {
2033  return TableIndices<rank_>(row,
2037  }
2038  } // namespace SymmetricTensorImplementation
2039 } // namespace internal
2040 
2041 
2042 template <int rank_, int dim, typename Number>
2043 constexpr DEAL_II_ALWAYS_INLINE internal::SymmetricTensorAccessors::
2044  Accessor<rank_, dim, true, rank_ - 1, Number>
2046  operator[](const unsigned int row) const
2047 {
2048  return internal::SymmetricTensorAccessors::
2049  Accessor<rank_, dim, true, rank_ - 1, Number>(
2050  *this,
2051  internal::SymmetricTensorImplementation::get_partially_filled_indices<
2052  rank_>(row, std::integral_constant<int, rank_>()));
2053 }
2054 
2055 
2056 
2057 template <int rank_, int dim, typename Number>
2058 constexpr inline DEAL_II_ALWAYS_INLINE internal::SymmetricTensorAccessors::
2059  Accessor<rank_, dim, false, rank_ - 1, Number>
2060  SymmetricTensor<rank_, dim, Number>::operator[](const unsigned int row)
2061 {
2062  return internal::SymmetricTensorAccessors::
2063  Accessor<rank_, dim, false, rank_ - 1, Number>(
2064  *this,
2065  internal::SymmetricTensorImplementation::get_partially_filled_indices<
2066  rank_>(row, std::integral_constant<int, rank_>()));
2067 }
2068 
2069 
2070 
2071 template <int rank_, int dim, typename Number>
2072 constexpr DEAL_II_ALWAYS_INLINE const Number &
2074  operator[](const TableIndices<rank_> &indices) const
2075 {
2076  return operator()(indices);
2077 }
2078 
2079 
2080 
2081 template <int rank_, int dim, typename Number>
2082 constexpr inline DEAL_II_ALWAYS_INLINE Number &
2084  operator[](const TableIndices<rank_> &indices)
2085 {
2086  return operator()(indices);
2087 }
2088 
2089 
2090 
2091 template <int rank_, int dim, typename Number>
2092 inline Number *
2094 {
2095  return std::addressof(this->access_raw_entry(0));
2096 }
2097 
2098 
2099 
2100 template <int rank_, int dim, typename Number>
2101 inline const Number *
2103 {
2104  return std::addressof(this->access_raw_entry(0));
2105 }
2106 
2107 
2108 
2109 template <int rank_, int dim, typename Number>
2110 inline Number *
2112 {
2113  return begin_raw() + n_independent_components;
2114 }
2115 
2116 
2117 
2118 template <int rank_, int dim, typename Number>
2119 inline const Number *
2121 {
2122  return begin_raw() + n_independent_components;
2123 }
2124 
2125 
2126 
2127 namespace internal
2128 {
2129  namespace SymmetricTensorImplementation
2130  {
2131  template <int dim, typename Number>
2132  constexpr unsigned int
2133  entry_to_indices(const ::SymmetricTensor<2, dim, Number> &,
2134  const unsigned int index)
2135  {
2136  return index;
2137  }
2138 
2139 
2140  template <int dim, typename Number>
2141  constexpr ::TableIndices<2>
2142  entry_to_indices(const ::SymmetricTensor<4, dim, Number> &,
2143  const unsigned int index)
2144  {
2147  }
2148 
2149  } // namespace SymmetricTensorImplementation
2150 } // namespace internal
2151 
2152 
2153 
2154 template <int rank_, int dim, typename Number>
2155 constexpr inline const Number &
2157  const unsigned int index) const
2158 {
2159  AssertIndexRange(index, n_independent_components);
2160  return data[internal::SymmetricTensorImplementation::entry_to_indices(*this,
2161  index)];
2162 }
2163 
2164 
2165 
2166 template <int rank_, int dim, typename Number>
2167 constexpr inline Number &
2169 {
2170  AssertIndexRange(index, n_independent_components);
2171  return data[internal::SymmetricTensorImplementation::entry_to_indices(*this,
2172  index)];
2173 }
2174 
2175 
2176 
2177 namespace internal
2178 {
2179  template <int dim, typename Number>
2180  constexpr inline typename numbers::NumberTraits<Number>::real_type
2181  compute_norm(const typename SymmetricTensorAccessors::
2182  StorageType<2, dim, Number>::base_tensor_type &data)
2183  {
2184  switch (dim)
2185  {
2186  case 1:
2187  return numbers::NumberTraits<Number>::abs(data[0]);
2188 
2189  case 2:
2190  return std::sqrt(
2194 
2195  case 3:
2196  return std::sqrt(
2203 
2204  default:
2205  {
2206  typename numbers::NumberTraits<Number>::real_type return_value =
2208 
2209  for (unsigned int d = 0; d < dim; ++d)
2210  return_value +=
2212  for (unsigned int d = dim; d < (dim * dim + dim) / 2; ++d)
2213  return_value +=
2215 
2216  return std::sqrt(return_value);
2217  }
2218  }
2219  }
2220 
2221 
2222 
2223  template <int dim, typename Number>
2224  constexpr inline typename numbers::NumberTraits<Number>::real_type
2225  compute_norm(const typename SymmetricTensorAccessors::
2226  StorageType<4, dim, Number>::base_tensor_type &data)
2227  {
2228  switch (dim)
2229  {
2230  case 1:
2231  return numbers::NumberTraits<Number>::abs(data[0][0]);
2232 
2233  default:
2234  {
2235  typename numbers::NumberTraits<Number>::real_type return_value =
2237 
2238  const unsigned int n_independent_components = data.dimension;
2239 
2240  for (unsigned int i = 0; i < dim; ++i)
2241  for (unsigned int j = 0; j < dim; ++j)
2242  return_value +=
2244  for (unsigned int i = 0; i < dim; ++i)
2245  for (unsigned int j = dim; j < n_independent_components; ++j)
2246  return_value +=
2248  for (unsigned int i = dim; i < n_independent_components; ++i)
2249  for (unsigned int j = 0; j < dim; ++j)
2250  return_value +=
2252  for (unsigned int i = dim; i < n_independent_components; ++i)
2253  for (unsigned int j = dim; j < n_independent_components; ++j)
2254  return_value +=
2256 
2257  return std::sqrt(return_value);
2258  }
2259  }
2260  }
2261 
2262 } // end of namespace internal
2263 
2264 
2265 
2266 template <int rank_, int dim, typename Number>
2269 {
2270  return internal::compute_norm<dim, Number>(data);
2271 }
2272 
2273 
2274 
2275 namespace internal
2276 {
2277  namespace SymmetricTensorImplementation
2278  {
2279  // a function to do the unrolling from a set of indices to a
2280  // scalar index into the array in which we store the elements of
2281  // a symmetric tensor
2282  //
2283  // this function is for rank-2 tensors
2284  template <int dim>
2285  constexpr inline DEAL_II_ALWAYS_INLINE unsigned int
2286  component_to_unrolled_index(const TableIndices<2> &indices)
2287  {
2288  AssertIndexRange(indices[0], dim);
2289  AssertIndexRange(indices[1], dim);
2290 
2291  switch (dim)
2292  {
2293  case 1:
2294  {
2295  return 0;
2296  }
2297 
2298  case 2:
2299  {
2300  constexpr unsigned int table[2][2] = {{0, 2}, {2, 1}};
2301  return table[indices[0]][indices[1]];
2302  }
2303 
2304  case 3:
2305  {
2306  constexpr unsigned int table[3][3] = {{0, 3, 4},
2307  {3, 1, 5},
2308  {4, 5, 2}};
2309  return table[indices[0]][indices[1]];
2310  }
2311 
2312  case 4:
2313  {
2314  constexpr unsigned int table[4][4] = {{0, 4, 5, 6},
2315  {4, 1, 7, 8},
2316  {5, 7, 2, 9},
2317  {6, 8, 9, 3}};
2318  return table[indices[0]][indices[1]];
2319  }
2320 
2321  default:
2322  // for the remainder, manually figure out the numbering
2323  {
2324  if (indices[0] == indices[1])
2325  return indices[0];
2326 
2327  TableIndices<2> sorted_indices(indices);
2328  sorted_indices.sort();
2329 
2330  for (unsigned int d = 0, c = 0; d < dim; ++d)
2331  for (unsigned int e = d + 1; e < dim; ++e, ++c)
2332  if ((sorted_indices[0] == d) && (sorted_indices[1] == e))
2333  return dim + c;
2334 
2335  // should never get here:
2336  Assert(false, ExcInternalError());
2337  return 0;
2338  }
2339  }
2340  }
2341 
2342  // a function to do the unrolling from a set of indices to a
2343  // scalar index into the array in which we store the elements of
2344  // a symmetric tensor
2345  //
2346  // this function is for tensors of ranks not already handled
2347  // above
2348  template <int dim, int rank_>
2349  constexpr inline unsigned int
2350  component_to_unrolled_index(const TableIndices<rank_> &indices)
2351  {
2352  (void)indices;
2353  Assert(false, ExcNotImplemented());
2355  }
2356  } // namespace SymmetricTensorImplementation
2357 } // namespace internal
2358 
2359 
2360 template <int rank_, int dim, typename Number>
2361 constexpr unsigned int
2363  const TableIndices<rank_> &indices)
2364 {
2365  return internal::SymmetricTensorImplementation::component_to_unrolled_index<
2366  dim>(indices);
2367 }
2368 
2369 
2370 
2371 namespace internal
2372 {
2373  namespace SymmetricTensorImplementation
2374  {
2375  // a function to do the inverse of the unrolling from a set of
2376  // indices to a scalar index into the array in which we store
2377  // the elements of a symmetric tensor. in other words, it goes
2378  // from the scalar index into the array to a set of indices of
2379  // the tensor
2380  //
2381  // this function is for rank-2 tensors
2382  template <int dim>
2383  constexpr inline DEAL_II_ALWAYS_INLINE TableIndices<2>
2384  unrolled_to_component_indices(const unsigned int i,
2385  const std::integral_constant<int, 2> &)
2386  {
2387  Assert(
2389  ExcIndexRange(
2390  i,
2391  0,
2393  switch (dim)
2394  {
2395  case 1:
2396  {
2397  return {0, 0};
2398  }
2399 
2400  case 2:
2401  {
2402  const TableIndices<2> table[3] = {TableIndices<2>(0, 0),
2403  TableIndices<2>(1, 1),
2404  TableIndices<2>(0, 1)};
2405  return table[i];
2406  }
2407 
2408  case 3:
2409  {
2410  const TableIndices<2> table[6] = {TableIndices<2>(0, 0),
2411  TableIndices<2>(1, 1),
2412  TableIndices<2>(2, 2),
2413  TableIndices<2>(0, 1),
2414  TableIndices<2>(0, 2),
2415  TableIndices<2>(1, 2)};
2416  return table[i];
2417  }
2418 
2419  default:
2420  if (i < dim)
2421  return {i, i};
2422 
2423  for (unsigned int d = 0, c = dim; d < dim; ++d)
2424  for (unsigned int e = d + 1; e < dim; ++e, ++c)
2425  if (c == i)
2426  return {d, e};
2427 
2428  // should never get here:
2429  Assert(false, ExcInternalError());
2430  return {0, 0};
2431  }
2432  }
2433 
2434  // a function to do the inverse of the unrolling from a set of
2435  // indices to a scalar index into the array in which we store
2436  // the elements of a symmetric tensor. in other words, it goes
2437  // from the scalar index into the array to a set of indices of
2438  // the tensor
2439  //
2440  // this function is for tensors of a rank not already handled
2441  // above
2442  template <int dim, int rank_>
2443  constexpr inline
2444  typename std::enable_if<rank_ != 2, TableIndices<rank_>>::type
2445  unrolled_to_component_indices(const unsigned int i,
2446  const std::integral_constant<int, rank_> &)
2447  {
2448  (void)i;
2449  Assert(
2450  (i <
2452  ExcIndexRange(i,
2453  0,
2455  n_independent_components));
2456  Assert(false, ExcNotImplemented());
2457  return TableIndices<rank_>();
2458  }
2459 
2460  } // namespace SymmetricTensorImplementation
2461 } // namespace internal
2462 
2463 template <int rank_, int dim, typename Number>
2466  const unsigned int i)
2467 {
2468  return internal::SymmetricTensorImplementation::unrolled_to_component_indices<
2469  dim>(i, std::integral_constant<int, rank_>());
2470 }
2471 
2472 
2473 
2474 template <int rank_, int dim, typename Number>
2475 template <class Archive>
2476 inline void
2477 SymmetricTensor<rank_, dim, Number>::serialize(Archive &ar, const unsigned int)
2478 {
2479  ar &data;
2480 }
2481 
2482 
2483 #endif // DOXYGEN
2484 
2485 /* ----------------- Non-member functions operating on tensors. ------------ */
2486 
2487 
2500 template <int rank_, int dim, typename Number, typename OtherNumber>
2501 constexpr inline DEAL_II_ALWAYS_INLINE
2505 {
2507  tmp = left;
2508  tmp += right;
2509  return tmp;
2510 }
2511 
2512 
2525 template <int rank_, int dim, typename Number, typename OtherNumber>
2526 constexpr inline DEAL_II_ALWAYS_INLINE
2530 {
2532  tmp = left;
2533  tmp -= right;
2534  return tmp;
2535 }
2536 
2537 
2545 template <int rank_, int dim, typename Number, typename OtherNumber>
2546 constexpr DEAL_II_ALWAYS_INLINE
2549  const Tensor<rank_, dim, OtherNumber> & right)
2550 {
2551  return Tensor<rank_, dim, Number>(left) + right;
2552 }
2553 
2554 
2562 template <int rank_, int dim, typename Number, typename OtherNumber>
2563 constexpr DEAL_II_ALWAYS_INLINE
2567 {
2568  return left + Tensor<rank_, dim, OtherNumber>(right);
2569 }
2570 
2571 
2579 template <int rank_, int dim, typename Number, typename OtherNumber>
2580 constexpr DEAL_II_ALWAYS_INLINE
2583  const Tensor<rank_, dim, OtherNumber> & right)
2584 {
2585  return Tensor<rank_, dim, Number>(left) - right;
2586 }
2587 
2588 
2596 template <int rank_, int dim, typename Number, typename OtherNumber>
2597 constexpr DEAL_II_ALWAYS_INLINE
2601 {
2602  return left - Tensor<rank_, dim, OtherNumber>(right);
2603 }
2604 
2605 
2606 
2620 template <int dim, typename Number>
2621 constexpr inline DEAL_II_ALWAYS_INLINE Number
2623 {
2624  switch (dim)
2625  {
2626  case 1:
2627  return t.data[0];
2628  case 2:
2629  return (t.data[0] * t.data[1] - t.data[2] * t.data[2]);
2630  case 3:
2631  {
2632  // in analogy to general tensors, but
2633  // there's something to be simplified for
2634  // the present case
2635  const Number tmp = t.data[3] * t.data[4] * t.data[5];
2636  return (tmp + tmp + t.data[0] * t.data[1] * t.data[2] -
2637  t.data[0] * t.data[5] * t.data[5] -
2638  t.data[1] * t.data[4] * t.data[4] -
2639  t.data[2] * t.data[3] * t.data[3]);
2640  }
2641  default:
2642  Assert(false, ExcNotImplemented());
2644  }
2645 }
2646 
2647 
2648 
2660 template <int dim, typename Number>
2661 constexpr DEAL_II_ALWAYS_INLINE Number
2663 {
2664  return determinant(t);
2665 }
2666 
2667 
2668 
2678 template <int dim, typename Number>
2679 constexpr inline DEAL_II_ALWAYS_INLINE Number
2681 {
2682  Number t = d.data[0];
2683  for (unsigned int i = 1; i < dim; ++i)
2684  t += d.data[i];
2685  return t;
2686 }
2687 
2688 
2700 template <int dim, typename Number>
2701 constexpr Number
2703 {
2704  return trace(t);
2705 }
2706 
2707 
2719 template <typename Number>
2720 constexpr DEAL_II_ALWAYS_INLINE Number
2722 {
2724 }
2725 
2726 
2727 
2746 template <typename Number>
2747 constexpr DEAL_II_ALWAYS_INLINE Number
2749 {
2750  return t[0][0] * t[1][1] - t[0][1] * t[0][1];
2751 }
2752 
2753 
2754 
2763 template <typename Number>
2764 constexpr DEAL_II_ALWAYS_INLINE Number
2766 {
2767  return (t[0][0] * t[1][1] + t[1][1] * t[2][2] + t[2][2] * t[0][0] -
2768  t[0][1] * t[0][1] - t[0][2] * t[0][2] - t[1][2] * t[1][2]);
2769 }
2770 
2771 
2772 
2780 template <typename Number>
2781 std::array<Number, 1>
2783 
2784 
2785 
2808 template <typename Number>
2809 std::array<Number, 2>
2811 
2812 
2813 
2836 template <typename Number>
2837 std::array<Number, 3>
2839 
2840 
2841 
2842 namespace internal
2843 {
2844  namespace SymmetricTensorImplementation
2845  {
2883  template <int dim, typename Number>
2884  void
2885  tridiagonalize(const ::SymmetricTensor<2, dim, Number> &A,
2886  ::Tensor<2, dim, Number> & Q,
2887  std::array<Number, dim> & d,
2888  std::array<Number, dim - 1> & e);
2889 
2890 
2891 
2931  template <int dim, typename Number>
2932  std::array<std::pair<Number, Tensor<1, dim, Number>>, dim>
2933  ql_implicit_shifts(const ::SymmetricTensor<2, dim, Number> &A);
2934 
2935 
2936 
2976  template <int dim, typename Number>
2977  std::array<std::pair<Number, Tensor<1, dim, Number>>, dim>
2978  jacobi(::SymmetricTensor<2, dim, Number> A);
2979 
2980 
2981 
2995  template <typename Number>
2996  std::array<std::pair<Number, Tensor<1, 2, Number>>, 2>
2997  hybrid(const ::SymmetricTensor<2, 2, Number> &A);
2998 
2999 
3000 
3033  template <typename Number>
3034  std::array<std::pair<Number, Tensor<1, 3, Number>>, 3>
3035  hybrid(const ::SymmetricTensor<2, 3, Number> &A);
3036 
3041  template <int dim, typename Number>
3043  {
3044  using EigValsVecs = std::pair<Number, Tensor<1, dim, Number>>;
3045  bool
3046  operator()(const EigValsVecs &lhs, const EigValsVecs &rhs)
3047  {
3048  return lhs.first > rhs.first;
3049  }
3050  };
3051 
3052  } // namespace SymmetricTensorImplementation
3053 
3054 } // namespace internal
3055 
3056 
3057 
3058 // The line below is to ensure that doxygen puts the full description
3059 // of this global enumeration into the documentation
3060 // See https://stackoverflow.com/a/1717984
3090 {
3100  hybrid,
3110  ql_implicit_shifts,
3118  jacobi
3119 };
3120 
3121 
3122 
3151 template <int dim, typename Number>
3152 std::array<std::pair<Number, Tensor<1, dim, Number>>,
3153  std::integral_constant<int, dim>::value>
3155  const SymmetricTensorEigenvectorMethod method =
3157 
3158 
3159 
3168 template <int rank_, int dim, typename Number>
3171 {
3172  return t;
3173 }
3174 
3175 
3176 
3187 template <int dim, typename Number>
3190 {
3192 
3193  // subtract scaled trace from the diagonal
3194  const Number tr = trace(t) / dim;
3195  for (unsigned int i = 0; i < dim; ++i)
3196  tmp.data[i] -= tr;
3197 
3198  return tmp;
3199 }
3200 
3201 
3202 
3209 template <int dim, typename Number>
3212 {
3213  // create a default constructed matrix filled with
3214  // zeros, then set the diagonal elements to one
3216  switch (dim)
3217  {
3218  case 1:
3220  break;
3221  case 2:
3222  tmp.data[0] = tmp.data[1] = internal::NumberType<Number>::value(1.);
3223  break;
3224  case 3:
3225  tmp.data[0] = tmp.data[1] = tmp.data[2] =
3227  break;
3228  default:
3229  for (unsigned int d = 0; d < dim; ++d)
3231  }
3232  return tmp;
3233 }
3234 
3235 
3236 
3244 template <int dim>
3247 {
3248  return unit_symmetric_tensor<dim, double>();
3249 }
3250 
3251 
3252 
3281 template <int dim, typename Number>
3284 {
3286 
3287  // fill the elements treating the diagonal
3288  for (unsigned int i = 0; i < dim; ++i)
3289  for (unsigned int j = 0; j < dim; ++j)
3290  tmp.data[i][j] =
3291  internal::NumberType<Number>::value((i == j ? 1. : 0.) - 1. / dim);
3292 
3293  // then fill the ones that copy over the
3294  // non-diagonal elements. note that during
3295  // the double-contraction, we handle the
3296  // off-diagonal elements twice, so simply
3297  // copying requires a weight of 1/2
3298  for (unsigned int i = dim;
3299  i < internal::SymmetricTensorAccessors::StorageType<4, dim, Number>::
3300  n_rank2_components;
3301  ++i)
3302  tmp.data[i][i] = internal::NumberType<Number>::value(0.5);
3303 
3304  return tmp;
3305 }
3306 
3307 
3308 
3316 template <int dim>
3319 {
3320  return deviator_tensor<dim, double>();
3321 }
3322 
3323 
3324 
3362 template <int dim, typename Number>
3365 {
3367 
3368  // fill the elements treating the diagonal
3369  for (unsigned int i = 0; i < dim; ++i)
3370  tmp.data[i][i] = internal::NumberType<Number>::value(1.);
3371 
3372  // then fill the ones that copy over the
3373  // non-diagonal elements. note that during
3374  // the double-contraction, we handle the
3375  // off-diagonal elements twice, so simply
3376  // copying requires a weight of 1/2
3377  for (unsigned int i = dim;
3378  i < internal::SymmetricTensorAccessors::StorageType<4, dim, Number>::
3379  n_rank2_components;
3380  ++i)
3381  tmp.data[i][i] = internal::NumberType<Number>::value(0.5);
3382 
3383  return tmp;
3384 }
3385 
3386 
3387 
3395 template <int dim>
3398 {
3399  return identity_tensor<dim, double>();
3400 }
3401 
3402 
3403 
3413 template <int dim, typename Number>
3416 {
3418  value(t);
3419 }
3420 
3421 
3422 
3433 template <int dim, typename Number>
3436 {
3438  value(t);
3439 }
3440 
3441 
3442 
3464 template <int dim, typename Number>
3465 constexpr inline SymmetricTensor<4, dim, Number>
3468 {
3470 
3471  // fill only the elements really needed
3472  for (unsigned int i = 0; i < dim; ++i)
3473  for (unsigned int j = i; j < dim; ++j)
3474  for (unsigned int k = 0; k < dim; ++k)
3475  for (unsigned int l = k; l < dim; ++l)
3476  tmp[i][j][k][l] = t1[i][j] * t2[k][l];
3477 
3478  return tmp;
3479 }
3480 
3481 
3482 
3490 template <int dim, typename Number>
3493 {
3495  for (unsigned int d = 0; d < dim; ++d)
3496  result[d][d] = t[d][d];
3497 
3498  const Number half = internal::NumberType<Number>::value(0.5);
3499  for (unsigned int d = 0; d < dim; ++d)
3500  for (unsigned int e = d + 1; e < dim; ++e)
3501  result[d][e] = (t[d][e] + t[e][d]) * half;
3502  return result;
3503 }
3504 
3505 
3506 
3514 template <int rank_, int dim, typename Number>
3516  operator*(const SymmetricTensor<rank_, dim, Number> &t, const Number &factor)
3517 {
3519  tt *= factor;
3520  return tt;
3521 }
3522 
3523 
3524 
3532 template <int rank_, int dim, typename Number>
3534  operator*(const Number &factor, const SymmetricTensor<rank_, dim, Number> &t)
3535 {
3536  // simply forward to the other operator
3537  return t * factor;
3538 }
3539 
3540 
3541 
3567 template <int rank_, int dim, typename Number, typename OtherNumber>
3568 constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor<
3569  rank_,
3570  dim,
3571  typename ProductType<Number,
3574  const OtherNumber & factor)
3575 {
3576  // form the product. we have to convert the two factors into the final
3577  // type via explicit casts because, for awkward reasons, the C++
3578  // standard committee saw it fit to not define an
3579  // operator*(float,std::complex<double>)
3580  // (as well as with switched arguments and double<->float).
3581  using product_type = typename ProductType<Number, OtherNumber>::type;
3584  return tt;
3585 }
3586 
3587 
3588 
3597 template <int rank_, int dim, typename Number, typename OtherNumber>
3598 constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor<
3599  rank_,
3600  dim,
3601  typename ProductType<OtherNumber,
3603 operator*(const Number & factor,
3605 {
3606  // simply forward to the other operator with switched arguments
3607  return (t * factor);
3608 }
3609 
3610 
3611 
3617 template <int rank_, int dim, typename Number, typename OtherNumber>
3618 constexpr inline SymmetricTensor<
3619  rank_,
3620  dim,
3621  typename ProductType<Number,
3624  const OtherNumber & factor)
3625 {
3626  using product_type = typename ProductType<Number, OtherNumber>::type;
3629  return tt;
3630 }
3631 
3632 
3633 
3640 template <int rank_, int dim>
3642  operator*(const SymmetricTensor<rank_, dim> &t, const double factor)
3643 {
3645  tt *= factor;
3646  return tt;
3647 }
3648 
3649 
3650 
3657 template <int rank_, int dim>
3659  operator*(const double factor, const SymmetricTensor<rank_, dim> &t)
3660 {
3662  tt *= factor;
3663  return tt;
3664 }
3665 
3666 
3667 
3673 template <int rank_, int dim>
3674 constexpr inline SymmetricTensor<rank_, dim>
3675 operator/(const SymmetricTensor<rank_, dim> &t, const double factor)
3676 {
3678  tt /= factor;
3679  return tt;
3680 }
3681 
3691 template <int dim, typename Number, typename OtherNumber>
3695 {
3696  return (t1 * t2);
3697 }
3698 
3699 
3713 template <int dim, typename Number, typename OtherNumber>
3714 constexpr inline DEAL_II_ALWAYS_INLINE
3717  const Tensor<2, dim, OtherNumber> & t2)
3718 {
3721  for (unsigned int i = 0; i < dim; ++i)
3722  for (unsigned int j = 0; j < dim; ++j)
3723  s += t1[i][j] * t2[i][j];
3724  return s;
3725 }
3726 
3727 
3741 template <int dim, typename Number, typename OtherNumber>
3745 {
3746  return scalar_product(t2, t1);
3747 }
3748 
3749 
3764 template <typename Number, typename OtherNumber>
3769 {
3770  tmp[0][0] = t[0][0][0][0] * s[0][0];
3771 }
3772 
3773 
3774 
3789 template <typename Number, typename OtherNumber>
3790 constexpr inline void double_contract(
3794 {
3795  tmp[0][0] = t[0][0][0][0] * s[0][0];
3796 }
3797 
3798 
3799 
3814 template <typename Number, typename OtherNumber>
3815 constexpr inline void double_contract(
3819 {
3820  const unsigned int dim = 2;
3821 
3822  for (unsigned int i = 0; i < dim; ++i)
3823  for (unsigned int j = i; j < dim; ++j)
3824  tmp[i][j] = t[i][j][0][0] * s[0][0] + t[i][j][1][1] * s[1][1] +
3825  2 * t[i][j][0][1] * s[0][1];
3826 }
3827 
3828 
3829 
3844 template <typename Number, typename OtherNumber>
3845 constexpr inline void double_contract(
3849 {
3850  const unsigned int dim = 2;
3851 
3852  for (unsigned int i = 0; i < dim; ++i)
3853  for (unsigned int j = i; j < dim; ++j)
3854  tmp[i][j] = s[0][0] * t[0][0][i][j] * +s[1][1] * t[1][1][i][j] +
3855  2 * s[0][1] * t[0][1][i][j];
3856 }
3857 
3858 
3859 
3874 template <typename Number, typename OtherNumber>
3875 constexpr inline void double_contract(
3879 {
3880  const unsigned int dim = 3;
3881 
3882  for (unsigned int i = 0; i < dim; ++i)
3883  for (unsigned int j = i; j < dim; ++j)
3884  tmp[i][j] = t[i][j][0][0] * s[0][0] + t[i][j][1][1] * s[1][1] +
3885  t[i][j][2][2] * s[2][2] + 2 * t[i][j][0][1] * s[0][1] +
3886  2 * t[i][j][0][2] * s[0][2] + 2 * t[i][j][1][2] * s[1][2];
3887 }
3888 
3889 
3890 
3905 template <typename Number, typename OtherNumber>
3906 constexpr inline void double_contract(
3910 {
3911  const unsigned int dim = 3;
3912 
3913  for (unsigned int i = 0; i < dim; ++i)
3914  for (unsigned int j = i; j < dim; ++j)
3915  tmp[i][j] = s[0][0] * t[0][0][i][j] + s[1][1] * t[1][1][i][j] +
3916  s[2][2] * t[2][2][i][j] + 2 * s[0][1] * t[0][1][i][j] +
3917  2 * s[0][2] * t[0][2][i][j] + 2 * s[1][2] * t[1][2][i][j];
3918 }
3919 
3920 
3921 
3928 template <int dim, typename Number, typename OtherNumber>
3931  const Tensor<1, dim, OtherNumber> & src2)
3932 {
3934  for (unsigned int i = 0; i < dim; ++i)
3935  for (unsigned int j = 0; j < dim; ++j)
3936  dest[i] += src1[i][j] * src2[j];
3937  return dest;
3938 }
3939 
3940 
3947 template <int dim, typename Number, typename OtherNumber>
3951 {
3952  // this is easy for symmetric tensors:
3953  return src2 * src1;
3954 }
3955 
3956 
3957 
3977 template <int rank_1,
3978  int rank_2,
3979  int dim,
3980  typename Number,
3981  typename OtherNumber>
3982 constexpr DEAL_II_ALWAYS_INLINE
3983  typename Tensor<rank_1 + rank_2 - 2,
3984  dim,
3985  typename ProductType<Number, OtherNumber>::type>::tensor_type
3988 {
3989  return src1 * Tensor<rank_2, dim, OtherNumber>(src2);
3990 }
3991 
3992 
3993 
4013 template <int rank_1,
4014  int rank_2,
4015  int dim,
4016  typename Number,
4017  typename OtherNumber>
4018 constexpr DEAL_II_ALWAYS_INLINE
4019  typename Tensor<rank_1 + rank_2 - 2,
4020  dim,
4021  typename ProductType<Number, OtherNumber>::type>::tensor_type
4023  const Tensor<rank_2, dim, OtherNumber> & src2)
4024 {
4025  return Tensor<rank_1, dim, Number>(src1) * src2;
4026 }
4027 
4028 
4029 
4039 template <int dim, typename Number>
4040 inline std::ostream &
4041 operator<<(std::ostream &out, const SymmetricTensor<2, dim, Number> &t)
4042 {
4043  // make our lives a bit simpler by outputting
4044  // the tensor through the operator for the
4045  // general Tensor class
4047 
4048  for (unsigned int i = 0; i < dim; ++i)
4049  for (unsigned int j = 0; j < dim; ++j)
4050  tt[i][j] = t[i][j];
4051 
4052  return out << tt;
4053 }
4054 
4055 
4056 
4066 template <int dim, typename Number>
4067 inline std::ostream &
4068 operator<<(std::ostream &out, const SymmetricTensor<4, dim, Number> &t)
4069 {
4070  // make our lives a bit simpler by outputting
4071  // the tensor through the operator for the
4072  // general Tensor class
4074 
4075  for (unsigned int i = 0; i < dim; ++i)
4076  for (unsigned int j = 0; j < dim; ++j)
4077  for (unsigned int k = 0; k < dim; ++k)
4078  for (unsigned int l = 0; l < dim; ++l)
4079  tt[i][j][k][l] = t[i][j][k][l];
4080 
4081  return out << tt;
4082 }
4083 
4084 
4086 
4087 #endif
constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
static const unsigned int invalid_unsigned_int
Definition: types.h:196
constexpr internal::SymmetricTensorAccessors::double_contraction_result< rank_, 2, dim, Number, OtherNumber >::type operator*(const SymmetricTensor< 2, dim, OtherNumber > &s) const
typename AccessorTypes< rank, dim, constness, Number >::tensor_type tensor_type
static constexpr unsigned int component_to_unrolled_index(const TableIndices< rank_ > &indices)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
constexpr SymmetricTensor operator-() const
constexpr SymmetricTensor< 2, dim, Number > deviator(const SymmetricTensor< 2, dim, Number > &t)
static constexpr const T & value(const T &t)
Definition: numbers.h:703
constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)
typename internal::ProductTypeImpl< typename std::decay< T >::type, typename std::decay< U >::type >::type type
constexpr SymmetricTensor & operator-=(const SymmetricTensor< rank_, dim, OtherNumber > &)
constexpr SymmetricTensor< 2, dim, Number > deviator(const SymmetricTensor< 2, dim, Number > &)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
constexpr numbers::NumberTraits< Number >::real_type norm() const
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1721
typename AccessorTypes< rank, dim, constness, Number >::reference reference
static constexpr TableIndices< rank_ > unrolled_to_component_indices(const unsigned int i)
constexpr SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim >>> &Du)
Definition: divergence.h:472
constexpr bool operator==(const SymmetricTensor &) const
STL namespace.
static real_type abs(const number &x)
Definition: numbers.h:609
constexpr SymmetricTensor< rank_, dim, Number > transpose(const SymmetricTensor< rank_, dim, Number > &t)
SymmetricTensorEigenvectorMethod
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
constexpr void sort()
constexpr const Number & access_raw_entry(const unsigned int unrolled_index) const
constexpr void clear()
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &t)
constexpr SymmetricTensor & operator+=(const SymmetricTensor< rank_, dim, OtherNumber > &)
constexpr SymmetricTensor & operator/=(const OtherNumber &factor)
typename AccessorTypes< rank, dim, constness, Number >::tensor_type tensor_type
constexpr SymmetricTensor< 4, dim, Number > outer_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, Number > &t2)
constexpr internal::SymmetricTensorAccessors::Accessor< rank_, dim, true, rank_ - 1, Number > operator[](const unsigned int row) const
static ::ExceptionBase & ExcMessage(std::string arg1)
constexpr bool operator!=(const SymmetricTensor &) const
constexpr SymmetricTensor()=default
typename base_tensor_descriptor::base_tensor_type base_tensor_type
static const char T
T sum(const T &t, const MPI_Comm &mpi_communicator)
Number * begin_raw()
#define Assert(cond, exc)
Definition: exceptions.h:1478
std::pair< Number, Tensor< 1, dim, Number > > EigValsVecs
bool operator()(const EigValsVecs &lhs, const EigValsVecs &rhs)
constexpr SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
constexpr Number trace(const SymmetricTensor< 2, dim, Number > &d)
constexpr TableIndices< 4 > merge(const TableIndices< 4 > &previous_indices, const unsigned int new_index, const unsigned int position)
constexpr SymmetricTensor< 4, dim, Number > identity_tensor()
base_tensor_type data
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:399
typename ProductType< Number, OtherNumber >::type value_type
constexpr SymmetricTensor< rank_, dim, Number > operator*(const SymmetricTensor< rank_, dim, Number > &t, const Number &factor)
void serialize(Archive &ar, const unsigned int version)
void tridiagonalize(const ::SymmetricTensor< 2, dim, Number > &A, ::Tensor< 2, dim, Number > &Q, std::array< Number, dim > &d, std::array< Number, dim - 1 > &e)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:96
Number * end_raw()
typename AccessorTypes< rank, dim, constness, Number >::reference reference
constexpr void double_contract(SymmetricTensor< 2, 1, typename ProductType< Number, OtherNumber >::type > &tmp, const SymmetricTensor< 4, 1, Number > &t, const SymmetricTensor< 2, 1, OtherNumber > &s)
constexpr Number second_invariant(const SymmetricTensor< 2, 1, Number > &)
Expression fabs(const Expression &x)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &t)
::SymmetricTensor< rank1+rank2 - 4, dim, value_type > type
constexpr SymmetricTensor< rank_, dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator/(const SymmetricTensor< rank_, dim, Number > &t, const OtherNumber &factor)
static const char A
static constexpr std::size_t memory_consumption()
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
constexpr Number first_invariant(const SymmetricTensor< 2, dim, Number > &t)
constexpr SymmetricTensor< 2, dim, Number > unit_symmetric_tensor()
Definition: tensor.h:462
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:398
constexpr bool value_is_zero(const Number &value)
Definition: numbers.h:941
static const char N
decltype(std::declval< T >() *std::declval< U >()) type
constexpr ProductType< Number, OtherNumber >::type scalar_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, OtherNumber > &t2)
constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)
static ::ExceptionBase & ExcNotImplemented()
constexpr Number & operator()(const TableIndices< rank_ > &indices)
constexpr Number third_invariant(const SymmetricTensor< 2, dim, Number > &t)
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
constexpr SymmetricTensor< 4, dim, Number > deviator_tensor()
constexpr SymmetricTensor & operator*=(const OtherNumber &factor)
#define DEAL_II_CONSTEXPR
Definition: config.h:171
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
static ::ExceptionBase & ExcInternalError()
constexpr SymmetricTensor & operator=(const SymmetricTensor< rank_, dim, OtherNumber > &rhs)