Medusa  1.1
Coordinate Free Mehless Method implementation
computeShapes.hpp
Go to the documentation of this file.
1 #ifndef MEDUSA_BITS_OPERATORS_COMPUTESHAPES_HPP_
2 #define MEDUSA_BITS_OPERATORS_COMPUTESHAPES_HPP_
3 
9 #include "computeShapes_fwd.hpp"
13 #include <Eigen/Core>
14 #include <tuple>
15 #include <type_traits>
16 
17 namespace mm {
18 
20 template <class vec_t>
21 template <sh::shape_flags mask, typename approx_t>
22 RaggedShapeStorage<vec_t, typename sh::operator_tuple<mask, vec_t::dim>::type>
23 DomainDiscretization<vec_t>::computeShapes(approx_t approx, const indexes_t& indexes) const {
24  static_assert(static_cast<int>(vec_t::dim) == static_cast<int>(approx_t::dim),
25  "Domain and approximation engine dimensions do not match");
26  RaggedShapeStorage<vec_t, typename sh::operator_tuple<mask, vec_t::dim>::type> storage;
27  storage.resize(supportSizes());
28  typename sh::operator_tuple<mask, vec_t::dim>::type operators{};
29  if (indexes.empty()) mm::computeShapes(*this, approx, all(), operators , &storage);
30  else mm::computeShapes(*this, approx, indexes, operators , &storage);
31  return storage;
32 }
33 
35 template <class vec_t>
36 template <typename OperatorTuple, typename approx_t>
37 RaggedShapeStorage<vec_t, OperatorTuple>
38 DomainDiscretization<vec_t>::computeShapes(approx_t approx, const indexes_t& indexes) const {
39  static_assert(static_cast<int>(vec_t::dim) == static_cast<int>(approx_t::dim),
40  "Domain and approximation engine dimensions do not match");
41  RaggedShapeStorage<vec_t, OperatorTuple> storage;
42  OperatorTuple operators{};
43  storage.resize(supportSizes());
44  if (indexes.empty()) mm::computeShapes(*this, approx, all(), operators , &storage);
45  else mm::computeShapes(*this, approx, indexes, operators , &storage);
46  return storage;
47 }
48 
50 template <class vec_t>
51 template <typename OperatorTuple, typename approx_t>
52 RaggedShapeStorage<vec_t, OperatorTuple>
53 DomainDiscretization<vec_t>::computeShapes(OperatorTuple operators, approx_t approx,
54 const indexes_t& indexes) const {
55  static_assert(static_cast<int>(vec_t::dim) == static_cast<int>(approx_t::dim),
56  "Domain and approximation engine dimensions do not match");
57  RaggedShapeStorage<vec_t, OperatorTuple> storage;
58  storage.resize(supportSizes());
59  if (indexes.empty()) mm::computeShapes(*this, approx, all(), operators , &storage);
60  else mm::computeShapes(*this, approx, indexes, operators , &storage);
61  return storage;
62 }
63 
64 namespace shapes_internal {
65 
67 template <typename approx_t, typename shape_storage_t, typename OperatorTuple, std::size_t N>
68 struct ShapeComputer {
69  static void compute(const OperatorTuple& operators, const approx_t& approx, int node,
70  shape_storage_t* storage) {
71  // do computation for previous ones
72  ShapeComputer<approx_t, shape_storage_t, OperatorTuple, N-1>::compute(
73  operators, approx, node, storage);
74 
75  // Do computation for current operator family:
76  // get family type (for storage indexing)
77  typedef typename std::tuple_element<N-1, OperatorTuple>::type op_family_t;
78  // get family value, to produce operators
79  auto op_family = std::get<N-1>(operators);
80  // count operators (requirements ensure compatibility with `index` method used in `apply`)
81  int i = 0;
82  for (const auto op : op_family.operators()) {
83  // Access storage by family type, not by family index (e.g. using N-1), since given
84  // operators may only be a subset of total operators in storage, and using
85  // N-1 is wrong.
86  storage->template setShape<op_family_t>(i, node, approx.getShape(op));
87  ++i;
88  }
89  }
90 };
91 
92 template <typename approx_t, typename shape_storage_t, typename OperatorTuple>
93 struct ShapeComputer<approx_t, shape_storage_t, OperatorTuple, 0> {
94  static void compute(const OperatorTuple&, const approx_t&, int, shape_storage_t*) {}
95 };
96 
97 } // namespace shapes_internal
99 
100 template <typename approx_t, typename shape_storage_t, typename ...Operators>
101 void computeShapes(const DomainDiscretization<typename approx_t::vector_t>& domain,
102  approx_t approx, const indexes_t& indexes,
103  const std::tuple<Operators...>& operators, shape_storage_t* storage) {
104  int N = domain.size();
105  assert_msg(domain.supports().size() == N,
106  "domain.support.size = %d and domain.size = %d, but should be the same. "
107  "Did you forget to find support before computing shapes?",
108  domain.supports().size(), domain.size());
109  for (const auto& c : indexes) {
110  assert_msg(0 <= c && c < N,
111  "Index %d is not a valid index of a point in the domain, must be in range "
112  "[%d, %d).", c, 0, N);
113  assert_msg(!domain.support(c).empty(),
114  "Node %d has empty support! Did you forget to find support before "
115  "computing shapes?", c);
116  }
117  assert_msg(storage->size() == N,
118  "Storage must of appropriate size before calling computeShapes(), got size %d, "
119  "expected %d.", storage->size(), N);
120 
121  // construct shape functions for every point specified
122  int cc;
123  int isize = static_cast<int>(indexes.size());
124  // create local copies of mls for each thread
125  #if !defined(_OPENMP)
126  approx_t& local_approx = approx;
127  #else
128  std::vector<approx_t> approx_copies(omp_get_max_threads(), approx);
129  #pragma omp parallel for private(cc) schedule(static)
130  #endif
131  for (cc = 0; cc < isize; ++cc) {
132  int node = indexes[cc];
133  // store local copy of approximation engine -- for parallel computing
134  #if defined(_OPENMP)
135  approx_t& local_approx = approx_copies[omp_get_thread_num()];
136  #endif
137  assert_msg(storage->supportSize(node) == domain.supportSize(node),
138  "Storage must of appropriate size before calling computeShapes(). "
139  "Got support size %d at node %d, expected %d.",
140  storage->supportSize(node), node, domain.supportSize(node));
141  // Preps local 1D support domain vector (cache friendly storage).
142  storage->setSupport(node, domain.support(node));
143  // Resets local approximation engine with local parameters.
144  Range<typename approx_t::vector_t> supp_domain = domain.supportNodes(node);
145  local_approx.compute(supp_domain[0], supp_domain);
146  // Iterate over all operator families to compute approximations.
147  shapes_internal::ShapeComputer<
148  approx_t, shape_storage_t, std::tuple<Operators...>, sizeof...(Operators)
149  >::compute(operators, local_approx, node, storage);
150  }
151 }
152 
153 } // namespace mm
154 
155 #endif // MEDUSA_BITS_OPERATORS_COMPUTESHAPES_HPP_
mm
Root namespace for the whole library.
Definition: Gaussian.hpp:14
DomainDiscretization.hpp
stdtypesutils.hpp
computeShapes_fwd.hpp
mm::computeShapes
void computeShapes(const DomainDiscretization< typename approx_t::vector_t > &domain, approx_t approx, const indexes_t &indexes, const std::tuple< OpFamilies... > &operators, shape_storage_t *storage)
Computes shape functions (stencil weights) for given nodes using support domains from domain and appr...
dim
@ dim
Number of elements of this matrix.
Definition: MatrixBaseAddons.hpp:14
assert_msg
#define assert_msg(cond,...)
Assert with better error reporting.
Definition: assert.hpp:75
mm::sh::all
static const shape_flags all
Indicates to prepare all shapes, default.
Definition: shape_flags.hpp:29
mm::sh::operator_tuple::type
void type
The type of the operator tuple.
Definition: shape_flags.hpp:43
mm::indexes_t
std::vector< int > indexes_t
Class representing a collection of indices.
Definition: Config.hpp:36
assert.hpp
mm::DomainDiscretization::computeShapes
RaggedShapeStorage< vec_t, typename sh::operator_tuple< mask, vec_t::dim >::type > computeShapes(approx_t approx, const indexes_t &indexes={}) const
Compute shapes, specified with shape flags, for this domain with given approximation for given indexe...