KASKADE 7 development version
dune_bridge.hh
Go to the documentation of this file.
1#ifndef DUNE_BRIDGE_HH
2#define DUNE_BRIDGE_HH
3
4#include <cmath>
5#include <memory>
6#include <utility>
7#include <dune/istl/operators.hh>
9#include "linalg/triplet.hh"
10#include "fem/assemble.hh"
11#include "fem/linearspace.hh"
12#include "fem/functional_aux.hh"
14
15namespace Kaskade
16{
17 // forward declaration
18 namespace Bridge { template <class> class Linearization; }
19
20template<class Functional>
22{
23 typedef typename Functional::AnsatzVars::VariableSet Vars;
24 typedef typename Functional::Scalar Scalar;
25
26 static constexpr bool anyPresent = false;
28 Functional const& fu,
29 Vars const& origin,
30 int row,
31 int col)
32 {}
33 static void getRHSBlock(std::vector<Scalar>& rhs,
34 Functional const& fu,
35 Vars const& origin,
36 int row)
37 {}
38
39 static Scalar getValue(Functional const& fu,
40 Vars const& origin)
41 {
42 return 0.0;
43 }
44};
45
46template <typename Scalar>
47std::ostream& operator<<(std::ostream &s, std::vector<Scalar> const& vec)
48{
49 s << "[\n";
50 for(size_t i=0; i<vec.size(); ++i)
51 s << " " << vec[i] << std::endl;
52 s << "]\n";
53 return s;
54}
55
56template<class Functional, class DomainElement>
58{
59 static bool inDomain(DomainElement const& x){ return true; }
60};
61
62
63namespace Bridge{
64 template <class> class Vector;
65
66 template<class Implementation> Implementation& getImpl(AbstractFunctionSpaceElement& v);
67 template<class Implementation> Implementation const& getImpl(AbstractFunctionSpaceElement const& v);
68
69// /// Bridge::Linearization class that uses a VariationalFunctionalAssembler to create linear systems
70// /** Implements AbstractLinearization */
71// template<class Functional>
72// class KaskadeLinearization : public AbstractLinearization, public SparseLinearSystem
73// {
74// public:
75//
76// static const int nThreads = 16;
77//
78// typedef typename Functional::AnsatzVars::VariableSet DomainElement;
79// typedef typename Functional::TestVars::VariableSet ImageElement;
80// typedef typename Functional::Scalar Scalar;
81// typedef LinearizationAt<Functional> Implementation;
82// typedef VariationalFunctionalAssembler<Implementation> Assembler;
83// typedef typename DomainElement::Descriptions::template CoefficientVectorRepresentation<>::type CoefficientVector;
84// typedef Dune::LinearOperator<CoefficientVector, CoefficientVector> OperatorType;
85// typedef OperatorType Operator;
86//
87// KaskadeLinearization() = delete;
88//
89// /// Creation of a linearization for a functional fu at x_
90// KaskadeLinearization(Functional const& fu_, DomainElement const& x_)
91// : x(x_), fu(fu_), lin(fu_,x), ass(x.descriptions.spaces)
92// {
93// flush();
94// }
95//
96// virtual ~KaskadeLinearization() { changed(); flushconn.disconnect(); }
97//
98// /// Number of columns of components [cbegin, cend)
99// int cols(int cbegin, int cend) const
100// {
101// if(cbegin < cend)
102// return x.descriptions.degreesOfFreedom(cbegin,cend);
103// return 0;
104// }
105//
106// /// Number of rows of components [rbegin, rend)
107// int rows(int rbegin, int rend) const
108// {
109// if(rbegin < rend)
110// return x.descriptions.degreesOfFreedom(rbegin,rend);
111// return 0;
112// }
113//
114// void precompute() {
115// doAssemble(Assembler::VALUE | Assembler::RHS | Assembler::MATRIX);
116// }
117//
118// /// write blocks of the hessian matrix into mat
119// void getMatrixBlocks(MatrixAsTriplet<Scalar>& mat, int rbegin, int rend, int colbegin, int colend) const
120// {
121// doAssemble(Assembler::VALUE | Assembler::RHS | Assembler::MATRIX);
122// mat = ass.template get<MatrixAsTriplet<Scalar> >(false,rbegin,rend,colbegin,colend);
123//
124// if(DiscreteBlockTraits<Functional>::anyPresent)
125// {
126// MatrixAsTriplet<Scalar> matD;
127// getDiscreteMatrixBlocks(matD,rbegin,rend,colbegin,colend);
128// mat+=matD;
129// }
130// }
131//
132// /// write components of the gradient into rhs
133// void getRHSBlocks(std::vector<Scalar>& rhs, int rbegin, int rend) const
134// {
135// doAssemble(Assembler::VALUE | Assembler::RHS);
136// rhs.resize(rows(rbegin,rend),0.0);
137// ass.toSequence(rbegin,rend,rhs.begin());
138// if(DiscreteBlockTraits<Functional>::anyPresent)
139// {
140// addDiscreteRHSBlocks(rhs,rbegin,rend);
141// }
142// }
143//
144// MatrixAsTriplet<Scalar> assembleGradientCheck(int const lastBlockId)
145// {
146// int const firstBlockId = 0;
147// std::cout << "assembling for gradient check" << std::flush;
148//
149// MatrixAsTriplet<Scalar> result, reference;
150// flush();
151// getMatrixBlocks(reference, 0, lastBlockId, 0, lastBlockId);
152// std::vector<Scalar> ref_sol, tmp;
153// size_t numRows = rows(firstBlockId, lastBlockId), numCom = Functional::TestVars::template Components<0>::m;
154// std::cout << "1: numRows: " << numRows << ", " << numCom << std::endl;
155// Scalar const increment = 1.0e-9;
156//
157// tmp.resize(numRows, 0.0);
158// ref_sol.resize(numRows, 0.0), tmp.resize(numRows,0.0);
159// flush();
160// doAssemble(Assembler::RHS);
161// ass.toSequence(firstBlockId,lastBlockId, ref_sol.begin());
162// for(size_t i=0; i<boost::fusion::at_c<0>(x.data).coefficients().N(); ++i)
163// {
164// for(int j=0; j<numCom; ++j){
165// tmp.clear();
166// tmp.resize(numRows,0.0);
167// boost::fusion::at_c<0>(x.data).coefficients()[i][j] += increment;
168// flush();
169// doAssemble(Assembler::RHS);
170// boost::fusion::at_c<0>(x.data).coefficients()[i][j] -= increment;
171//
172// ass.toSequence(firstBlockId, lastBlockId, tmp.begin());
173//
174// for(size_t k=0; k<tmp.size(); ++k)
175// {
176// result.addEntry(k,i*numCom+j, (tmp[k] - ref_sol[k])/increment );
177// }
178// }
179// std::cout << "." << std::flush;
180// }
181// std::cout << "done." << std::flush << std::endl << std::endl;
182//
183// result *= -1.0;
184// result += reference;
185//
186// std::cout << "Error:" << std::endl;
187// std::cout << result << std::endl;
188//
189// return result;
190// }
191//
192// std::vector<Scalar> checkRealGradient(int const lastBlockId)
193// {
194// std::vector<Scalar> result, reference;
195// int const firstBlockId = 0;
196// getRHSBlocks(reference, firstBlockId, lastBlockId);
197// size_t numRows = rows(firstBlockId, lastBlockId);
198// Scalar const increment = 1.0e-9;
199//
200// result.resize(numRows,0.0);
201// flush();
202// Scalar const value = getValue();
203// Scalar tmp(0);
204//
205// for(size_t i=0; i<boost::fusion::at_c<0>(x.data).coefficients().N(); ++i)
206// {
207// boost::fusion::at_c<0>(x.data).coefficients()[i][0] += increment;
208// flush();
209// tmp = getValue();
210// boost::fusion::at_c<0>(x.data).coefficients()[i][0] -= increment;
211// result[i] = (tmp - value)/increment;
212// }
213// std::cout << "Error\n";
214// for(size_t i=0; i<result.size(); ++i){
215// std::cout << result[i] << " : " << reference[i] << " -> " << (result[i] + reference[i]) << std::endl;
216// result[i] += reference[i];
217// if(fabs(result[i]) > 1.0e-8)
218// std::cout << " " << result[i] << std::endl;
219// }
220// // std::cout << result << std::endl;
221//
222// return result;
223// }
224//
225//
226// /// return number of columns
227// constexpr int nColBlocks() const {return Functional::AnsatzVars::noOfVariables;}
228//
229// /// return number of rows
230// constexpr int nRowBlocks() const { return Functional::TestVars::noOfVariables; }
231//
232// /// return point of linearization
233// DomainElement const& getOriginImpl() const {return x;}
234//
235// // return point of linearization
236// DomainElement getX() const { return x; }
237//
238// void setX(DomainElement const& x_){ x = x_; }
239//
240// /// return the implementation
241// Implementation const& getLinImpl() const {return lin; }
242//
243// /// flush all data, gathered so far
244// void flush() { changed(); ass.flush((Assembler::VALUE | Assembler::RHS | Assembler::MATRIX)); }
245//
246// /// return whether x is in the domain of definition
247// bool inDomain(DomainElement const& x) { return InDomainTraits<Functional,DomainElement>::inDomain(x); }
248//
249// /// return the current value Functional(Origin)
250// double getValue() const
251// {
252// doAssemble(Assembler::VALUE);
253// Scalar value =ass.functional();
254// if(DiscreteBlockTraits<Functional>::anyPresent)
255// value += DiscreteBlockTraits<Functional>::getValue(fu,x);
256// return value;
257// }
258//
259// double eval() const { return getValue(); }
260// double evalL1norm() const { return getL1norm(); }
261// void evald(AbstractFunctionSpaceElement& v, int rbegin=0, int rend=nRowBlocks()) const
262// {
263// assert(rbegin<=rend);
264// std::vector<Scalar> rhs(rows(rbegin,rend),0.0);
265// getRHSBlocks(rhs,rbegin,rend);
266// dynamic_cast<Vector<ImageElement>& >(v).read(rhs,rbegin,rend);
267// }
268// void ddxpy(AbstractFunctionSpaceElement& y, AbstractFunctionSpaceElement const& x, int rbegin=0, int rend=nRowBlocks(), int cbegin=0, int cend=nColBlocks()) const
269// {
270// assert(cbegin<=cend);
271// assert(rbegin<=rend);
272// std::vector<Scalar> result, argument;
273// dynamic_cast<Vector<ImageElement> const& >(x).write(argument,cbegin,cend);
274// dynamic_cast<Vector<ImageElement>& >(y).write(result,rbegin,rend);
275// MatrixAsTriplet<Scalar> mat;
276// getMatrixBlocks(mat,rbegin,rend,cbegin,cend);
277// mat.axpy(result, argument);
278// dynamic_cast<Vector<ImageElement>& >(y).read(result,rbegin,rend);
279// }
280// AbstractFunctionSpaceElement const& getOrigin() const
281// {
282// xptr.reset(new Vector<DomainElement>(x));
283// return *xptr;
284// }
285// void connectToSignalForFlush(boost::signals2::signal0<void>& sig) {
286// if(flushconn.connected()) flushconn.disconnect();
287// flushconn=sig.connect(boost::bind(&KaskadeLinearization::flush, this));
288// }
289//
290// double getL1norm() const
291// {
292// doAssemble(Assembler::VALUE);
293// Scalar value =0;//ass.fL1norm();
294// if(DiscreteBlockTraits<Functional>::anyPresent)
295// value += std::fabs(DiscreteBlockTraits<Functional>::getValue(fu,x));
296// return value;
297// }
298//
299// Assembler& getValidAssembler() const
300// {
301// doAssemble((Assembler::VALUE | Assembler::RHS | Assembler::MATRIX));
302// return ass;
303// }
304//
305// Functional const& getFunctional() const
306// {
307// return fu;
308// }
309//
310// private:
311// void addDiscreteRHSBlocks(std::vector<Scalar>& rhs, int rbegin, int rend) const
312// {
313// for(int i=rbegin; i<rend; ++i)
314// {
315// std::vector<Scalar> rhsD;
316// DiscreteBlockTraits<Functional>::getRHSBlock(rhsD,fu,x,i);
317// if(rhsD.size() > 0)
318// {
319// assert(rhsD.size() == rows(i,i+1));
320// for(int k=rows(rbegin,i), l=0;k<rows(rbegin,i+1);++k, ++l)
321// rhs[k]+=rhsD[l];
322// }
323// }
324// }
325//
326// void getDiscreteMatrixBlocks(MatrixAsTriplet<Scalar>& mat, int rbegin, int rend, int colbegin, int colend) const
327// {
328// for(int i=rbegin; i<rend; ++i)
329// for(int j=colbegin; j<colend; ++j)
330// {
331// MatrixAsTriplet<Scalar> matD;
332// DiscreteBlockTraits<Functional>::getMatrixBlock(matD, fu,x,i,j);
333// matD.shiftIndices(rows(rbegin,i),cols(colbegin,j));
334// mat+=matD;
335// }
336// }
337//
338// void doAssemble(int flags) const
339// {
340// int toDoFlag= ((~ass.valid()) & flags);
341// if(toDoFlag!=0)
342// {
343// ass.assemble(lin,toDoFlag,nThreads);
344// }
345// }
346//
347//
348// DomainElement x;
349// Functional const& fu;
350// Implementation lin;
351// mutable Assembler ass;
352// mutable std::unique_ptr<Vector<DomainElement> > xptr;
353// boost::signals2::connection flushconn;
354//};
355
356// template<class Functional>
357// class LinearizationTraits<typename Functional::AnsatzVars::VariableSet, Functional>
358// {
359// public:
360// typedef KaskadeLinearization<Functional> Linearization;
361// };
362
363
365
373 template<typename Implementation>
375 {
376 typedef typename Implementation::DomainElement VectorImplementation;
377 public:
378 typedef typename Implementation::Estimate Estimate;
379
380 ErrorEstimator(std::unique_ptr<Implementation>& imp) : myImplementation(std::move(imp)) {}
381
382 virtual std::unique_ptr<AbstractErrorEstimate> createEstimate(AbstractFunctionSpaceElement const& correction,
383 AbstractLinearization const& lin) const
384 {
385 VectorImplementation const& ci = getImpl<VectorImplementation>(correction);
387 return myImplementation->createEstimate(ci,dynamic_cast<Linearization<LinImpl> const&>(lin));
388 }
389 private:
390 std::unique_ptr<Implementation> myImplementation;
391 };
392
393 template<class ErrorEst, class GridMan>
394 ErrorEst extendMarkings(ErrorEst const&, GridMan &);
395
396
398
400 template <class GridManager, class Estimate>
402 {
403 public:
405 : gridMan(gm), nMarked(0)
406 {
407 gridChange=gm.signals.informAboutRefinement.connect(
410 }
411
412 virtual int size() { return gridMan.grid().size(0); }
413
414 virtual void adapt() { gridMan.preAdapt(); gridMan.adapt(); gridMan.postAdapt(); nMarked = 0; };
415
416 virtual void mark(AbstractErrorEstimate& estimate, double portion)
417 {
418 Estimate cdmarks(CellData<typename GridManager::Grid,int>(markByBulkCriterion(dynamic_cast<Estimate&>(estimate).getData(),portion)));
419
420 nMarked = cdmarks.getData().sum();
421 // if(cdmarks.getData().sum() >= 00)
422 gridMan.mark(cdmarks.getData());
423 // else
424 // {
425 // Estimate
426 // extMarks(extendMarkings<Estimate, GridManager>(cdmarks,gridMan));
427 // gridMan.mark(extMarks.getData());
428 // }
429 }
430
431 virtual int getNMarked() { return nMarked; }
432
433 virtual void flushMarks() { gridMan.flushMarks(); nMarked = 0; }
434
435 virtual ~AdaptiveGrid() { gridChange.disconnect(); }
436
437 private:
438 void onGridChange(GridSignals::Status stat)
439 {
441 }
442
443 GridManager& gridMan;
444 int nMarked;
445 boost::signals2::connection gridChange;
446 };
447
448
449 //--------------------------------------------------------------------------------
450
452 template<class Est>
453 std::unique_ptr<ErrorEstimator<Est> > makeErrorEstimator(Est* est){return makeUP(new ErrorEstimator<Est>(makeUP(est)));}
454
455}
456
457}// namespace Kaskade
458#endif
Interfaces for function space oriented algorithms.
Representation of an adaptive grid and a simple set of operations thereon.
boost::signals2::signal< void()> gridWillChange
Inform others that the grid will change.
Representation of an error estimate, i.e. the output of an error estimator.
Representation of an error estimator.
Abstract Vector for function space algorithms.
Implements AbstractAdaptiveGrid, uses a bulk criterion as a marking strategy.
Definition: dune_bridge.hh:402
virtual void adapt()
Change grid according to marked elements.
Definition: dune_bridge.hh:414
AdaptiveGrid(GridManager &gm)
Definition: dune_bridge.hh:404
virtual void flushMarks()
Remove all marks in the grid.
Definition: dune_bridge.hh:433
virtual void mark(AbstractErrorEstimate &estimate, double portion)
Mark elements, using an error estimate that hold error indicators.
Definition: dune_bridge.hh:416
virtual int size()
Number of patches.
Definition: dune_bridge.hh:412
Bridge class for an error estimator, implements AbstractErrorEstimator.
Definition: dune_bridge.hh:375
Implementation::Estimate Estimate
Definition: dune_bridge.hh:378
ErrorEstimator(std::unique_ptr< Implementation > &imp)
Definition: dune_bridge.hh:380
virtual std::unique_ptr< AbstractErrorEstimate > createEstimate(AbstractFunctionSpaceElement const &correction, AbstractLinearization const &lin) const
Definition: dune_bridge.hh:382
Class that stores information for each cell of a grid.
Definition: celldata.hh:37
bool preAdapt()
Calls grid.preAdapt().
Definition: gridmanager.hh:365
bool adapt()
Refines each marked leaf cell of the grid and prolongates registered FE functions to the new leaf gri...
Definition: gridmanager.hh:413
Grid const & grid() const
Returns a const reference on the owned grid.
Definition: gridmanager.hh:292
void postAdapt()
Calls grid.postAdapt().
Definition: gridmanager.hh:374
void flushMarks()
Remove all cell marks from the grid.
Definition: gridmanager.hh:402
bool mark(int refCount, typename Grid::template Codim< 0 >::Entity const &cell)
Marks the given cell for refinement or coarsening.
Definition: gridmanager.hh:330
Class that takes care of the transfer of data (of each FunctionSpaceElement) after modification of th...
Definition: gridmanager.hh:680
Utility classes for the definition and use of variational functionals.
boost::signals2::signal< void(Status)> informAboutRefinement
A signal that is emitted thrice on grid modifications, once before adaptation takes place and twice a...
Definition: gridmanager.hh:74
Status
The argument type of the signal that is emitted before and after grid adaptation.
Definition: gridmanager.hh:64
std::unique_ptr< T > makeUP(T *t)
Convenience routine: makes an unique_ptr of the right type.
std::unique_ptr< ErrorEstimator< Est > > makeErrorEstimator(Est *est)
Convenience routine: makes an ErrorEstimator of the right type.
Definition: dune_bridge.hh:453
ErrorEst extendMarkings(ErrorEst const &, GridMan &)
Definition: celldata.hh:377
Implementation & getImpl(AbstractFunctionSpaceElement &v)
Get the implementation of an AbstractFunctionSpaceElement.
Dune::FieldVector< Scalar, dim > Vector
std::ostream & operator<<(std::ostream &s, std::vector< Scalar > const &vec)
Definition: dune_bridge.hh:47
CellData< Grid, int >::CellDataVector markByBulkCriterion(CellData< Grid, T > &ic, double Theta)
Create a CellDataVector, in which the largest entities of *this are marked by 1, using the "bulk crit...
Definition: celldata.hh:224
static void getMatrixBlock(MatrixAsTriplet< Scalar > &mat, Functional const &fu, Vars const &origin, int row, int col)
Definition: dune_bridge.hh:27
static Scalar getValue(Functional const &fu, Vars const &origin)
Definition: dune_bridge.hh:39
static void getRHSBlock(std::vector< Scalar > &rhs, Functional const &fu, Vars const &origin, int row)
Definition: dune_bridge.hh:33
Functional::Scalar Scalar
Definition: dune_bridge.hh:24
Functional::AnsatzVars::VariableSet Vars
Definition: dune_bridge.hh:23
static constexpr bool anyPresent
Definition: dune_bridge.hh:26
static bool inDomain(DomainElement const &x)
Definition: dune_bridge.hh:59