Commit 0f388379 authored by René Heß's avatar René Heß
Browse files

Merge branch 'master' into 'bugfix/newton-time-tracking'

# Conflicts:
#   dune/pdelab/solver/newton.hh
#   dune/pdelab/solver/terminate.hh
parents f4b8bd21 08b9965e
Pipeline #31310 passed with stage
in 32 minutes
......@@ -168,6 +168,13 @@
#include <dune/pdelab/backend/istl/blockmatrixdiagonal.hh>
#include <dune/pdelab/backend/istl/dunefunctions.hh>
#include <dune/pdelab/backend/istl/istlsolverbackend.hh>
#include <dune/pdelab/backend/istl/matrixfree/assembledblockjacobipreconditioner.hh>
#include <dune/pdelab/backend/istl/matrixfree/backends.hh>
#include <dune/pdelab/backend/istl/matrixfree/checklopinterface.hh>
#include <dune/pdelab/backend/istl/matrixfree/blocksorpreconditioner.hh>
#include <dune/pdelab/backend/istl/matrixfree/gridoperatorpreconditioner.hh>
#include <dune/pdelab/backend/istl/matrixfree/iterativeblockjacobipreconditioner.hh>
#include <dune/pdelab/backend/istl/matrixfree/solverstatistics.hh>
#include <dune/pdelab/backend/istl/matrixhelpers.hh>
#include <dune/pdelab/backend/istl/geneo/subdomainbasis.hh>
#include <dune/pdelab/backend/istl/geneo/localoperator_ovlp_region.hh>
......
......@@ -58,6 +58,19 @@ X sarkisPartitionOfUnity(const GFS& gfs, LFS& lfs, const CC& cc, int cells_x, in
int my_rank = gfs.gridView().comm().rank();
const int dim = 2;
// throw exception if the setup is currently not supported by the Sarkis partition of unity
typedef typename GFS::Traits::GridView::Grid::ctype DF;
if (gfs.gridView().grid().dimension != dim)
DUNE_THROW(Dune::NotImplemented, "Currently, Sarkis partition of unity is only supported for 2 dimensional grids.");
if (GFS::Traits::FiniteElement::Traits::LocalBasisType::order() != 1)
DUNE_THROW(Dune::NotImplemented, "Currently, Sarkis partition of unity is only supported for polynomial bases of order 1");
// we assume the grid is instantiated as a (0.0, 1.0) x (0.0, 1.0) grid, so we do an exact flaoting point comparison here
if (gfs.gridView().grid().domainSize() != Dune::FieldVector<DF, 2>(1.0))
DUNE_THROW(Dune::NotImplemented, "Currently, Sarkis partition of unity is only supported for the (0,1) x (0,1) unit square.");
X part_unity(gfs, 1);
for (auto it = gfs.gridView().template begin<0>(); it != gfs.gridView().template end<0>(); ++it) {
......
install(FILES
assembledblockjacobipreconditioner.hh
backends.hh
checklopinterface.hh
blocksorpreconditioner.hh
gridoperatorpreconditioner.hh
iterativeblockjacobipreconditioner.hh
solverstatistics.hh
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/pdelab/backend/istl/matrixfree)
// -*- tab-width: 2; indent-tabs-mode: nil -*-
// vi: set et ts=2 sw=2 sts=2:
#ifndef DUNE_PDELAB_BACKEND_ISTL_MATRIXFREE_ASSEMBLEDBLOCKJACOBIPREONDITIONER_HH
#define DUNE_PDELAB_BACKEND_ISTL_MATRIXFREE_ASSEMBLEDBLOCKJACOBIPREONDITIONER_HH
#if HAVE_EIGEN
#include<dune/grid/common/scsgmapper.hh>
#include <dune/pdelab/localoperator/blockdiagonalwrapper.hh>
#include<Eigen/Dense>
namespace Dune {
namespace PDELab {
namespace impl {
// Below we will assemble a block of the matrix as Eigen matrix. This
// accumulation view makes sure that we apply the weights.
template<typename C>
class WeightedEigenMatrixAccumulationView
{
public :
typedef C Container;
typedef typename Container::Scalar value_type;
using size_type = typename Container::StorageIndex;
using weight_type = value_type;
const weight_type& weight()
{
return _weight;
}
auto data()
{
return _container.data();
}
const auto data() const
{
return _container.data();
}
auto& container()
{
return _container;
}
template <typename LFSV, typename LFSU>
void accumulate(const LFSV& lfsv, size_type i, const LFSU& lfsu, size_type j, value_type value)
{
_container(i,j) += _weight * value;
}
WeightedEigenMatrixAccumulationView(Container& container, weight_type weight)
: _container(container)
, _weight(weight)
{}
private :
Container& _container;
weight_type _weight;
};
}
/**\brief Local operator that can be used to create a partially matrix-free Jacobi preconditioner
*
* Partially matrix-free means the following: A matrix-based block Jacobi
* preconditioner would need to invert the diagonal block matrix D that
* comes from the block decomposition A=L+D+U in the lower left blocks,
* diagonal blocks and upper right blocks.
*
* Instead of assembling the diagonal block, inverting it and applying it
* to a vector in the Krylow solver (i.e. D^{-1}v) we never assemble the
* matrix D or D^{-1} and do the application in a matrix free way.
*
* Since the inverse of a block diagonal matrix is just the block matrix
* of the inverse of the blocks we need a way to apply the inverse of a
* block (we denote this with D_i). This solver will assemble the diagonal
* block and compute the inverse with LU decomposition.
*
* In this sense the resulting preconditioner is partially matrix-free: We
* never assemble the complete matrix D but we will assemble all the
* diagonal blocks D_i. For a complete matrix-free implementation have a
* look at the class IterativeBlockJacobiPreconditionerLocalOperator.
*
* On the technical side this operator will calculate the LU decomposition
* of the diagonal blocks in the alpha_volume method. Later on the inverse
* can be applied through the jacobian_volume methods.
*
* For examples see dune-pdelab/dune/pdelab/test/matrixfree/.
*/
template<class BlockDiagonalLocalOperator, class GridFunctionSpace, class DomainField>
class AssembledBlockJacobiPreconditionerLocalOperator
: public Dune::PDELab::FullVolumePattern
, public Dune::PDELab::LocalOperatorDefaultFlags
{
// Extract some types
using GridView = typename GridFunctionSpace::Traits::GridViewType;
using Grid = typename GridView::Traits::Grid;
using EntityType = typename Grid::template Codim<0>::Entity;
using value_type = DomainField;
using DiagonalBlock = ::Eigen::Matrix<value_type, ::Eigen::Dynamic, ::Eigen::Dynamic, ::Eigen::RowMajor>;
using PermutationIndices = ::Eigen::Matrix<int, ::Eigen::Dynamic, 1>;
using TupleType = std::tuple<DiagonalBlock, PermutationIndices>;
public :
// Since this class implements something like D^{-1}v for a diagonal
// block matrix D we will only have volume methods. The underlying local
// operator that describes the block diagonal might of course have
// skeleton or boundary parts.
static constexpr bool doPatternVolume = true;
static constexpr bool doAlphaVolume = true;
static constexpr bool isLinear = BlockDiagonalLocalOperator::isLinear;
/**\brief Constructor
*
* \param blockDiagonalLocalOperator A local operator that can be used to
* make a local assembly of a diagonal block through the
* <assembleLocalDiagonalBlock> function. You can create such a local
* operator by wrapping your local operator into a
* <BlockDiagonalLocalOperatorWrapper>
* \param gridFunctionSpace A grid function space
* \param verbose Controls the amount of output
*/
AssembledBlockJacobiPreconditionerLocalOperator(const BlockDiagonalLocalOperator& blockDiagonalLocalOperator,
const GridFunctionSpace& gridFunctionSpace,
const bool verbose=0)
: _blockDiagonalLocalOperator(blockDiagonalLocalOperator)
, _gridFunctionSpace(gridFunctionSpace)
, _mapper(gridFunctionSpace.gridView().grid())
, _precCache(_mapper.size())
, _verbose(verbose)
, _requireSetup(true)
{
}
// Before we can call the jacobian_apply methods we need to compute the
// LU decomposition of the diagonal block. We use the bool _requireSetup
// to make sure that we have computed this decomposition before we try to
// use it.
bool requireSetup()
{
return _requireSetup;
}
void setRequireSetup(bool requireSetup)
{
_requireSetup = requireSetup;
}
/** \brief Prepare partially matrix-free preconditioner
*
* This assembles the local block diagonal and computes a
* LU-decomposition with partial pivoting.
*/
template<typename EG, typename LFSU, typename X, typename LFSV, typename Y>
void alpha_volume(const EG& eg, const LFSU& lfsu, const X& x, const LFSV& lfsv, Y& y) const
{
// Matrix for storing the LU decomposition of the block diagonal
const int size = lfsu.size();
assert(lfsv.size() == size);
// Assemble local block diagonal
DiagonalBlock diagonalBlock = DiagonalBlock::Constant(size, size, 0.0);
impl::WeightedEigenMatrixAccumulationView<DiagonalBlock> viewDiagonalBlock(diagonalBlock, y.weight());
assembleLocalDiagonalBlock(_blockDiagonalLocalOperator, eg, lfsu, x, lfsv, viewDiagonalBlock);
// Compute the LU-factorization directly inside the matrix
::Eigen::PartialPivLU<::Eigen::Ref<DiagonalBlock>> lu(diagonalBlock);
auto inversePermutationIndices = lu.permutationP().indices();
// We need the inverse of the permutation matrix that Eigen gives
PermutationIndices permutationIndices(size);
for(int i=0; i<inversePermutationIndices.size(); ++i)
permutationIndices[inversePermutationIndices[i]] = i;
// Fill up correct entry of preconditioner cache
std::size_t cache_idx = _mapper.index(eg.entity());
_precCache[cache_idx] = std::make_tuple(diagonalBlock, permutationIndices);
}
template<typename EG, typename LFSU, typename X, typename Z, typename LFSV, typename Y>
void jacobian_apply_volume(const EG& eg, const LFSU& lfsu, const X& x, const Z& z, const LFSV& lfsv, Y& y) const
{
// We don't need the linearization point as the preconditioner is
// already set up
jacobian_apply_volume(eg,lfsu,z,lfsv,y);
}
/**\brief Apply partially matrix-free preconditioner
*
* This computes D_i^{-1}z using the LU-decomposition from the
* preparation step.
*/
template<typename EG, typename LFSU, typename Z, typename LFSV, typename Y>
void jacobian_apply_volume(const EG& eg, const LFSU& lfsu, const Z& z, const LFSV& lfsv, Y& y) const
{
assert(not _requireSetup);
// Get correct LU decomposition of the block diagonal from the cache
std::size_t cache_idx = _mapper.index(eg.entity());
const auto& cacheEntry = _precCache[cache_idx];
auto diagonalBlockLU = std::get<0>(cacheEntry);
auto permutationIndices = std::get<1>(cacheEntry);
// Apply the preconditioner
const int size = lfsu.size();
const auto& z_container = accessBaseContainer(z);
std::vector<value_type> ztmp_container(size);
auto& y_container = accessBaseContainer(y);
// Forward solve with lower triangle
for(int i=0; i<size; ++i){
value_type rhs(z_container[permutationIndices[i]]);
for(int j=0; j<i; ++j)
rhs -= diagonalBlockLU(i, j) * ztmp_container[j];
ztmp_container[i] = rhs; // L has ones on the diagonal
}
// Backward solve with upper triangular
for(int i=size-1; i>=0; i--){
value_type rhs(ztmp_container[i]);
for(int j=size-1; j>i; j--)
rhs -= diagonalBlockLU(i, j) * y_container[j];
y_container[i] = rhs/diagonalBlockLU(i,i);
}
}
private :
BlockDiagonalLocalOperator _blockDiagonalLocalOperator;
const GridFunctionSpace& _gridFunctionSpace;
typename Dune::LeafSingleCodimSingleGeomTypeMapper<Grid, 0> _mapper;
mutable std::vector<TupleType> _precCache;
const int _verbose;
bool _requireSetup;
};
} // namespace PDELab
} // namespace Dune
#endif // HAVE_EIGEN
#endif
// -*- tab-width: 2; indent-tabs-mode: nil -*-
// vi: set et ts=2 sw=2 sts=2:
#ifndef DUNE_PDELAB_BACKEND_ISTL_MATRIXFREE_BACKENDS_HH
#define DUNE_PDELAB_BACKEND_ISTL_MATRIXFREE_BACKENDS_HH
#include<dune/grid/common/rangegenerators.hh>
#include<dune/istl/preconditioner.hh>
#include<dune/istl/operators.hh>
#include<dune/istl/solvers.hh>
#include<dune/pdelab/backend/istl/matrixfree/gridoperatorpreconditioner.hh>
#include<dune/pdelab/gridoperator/fastdg.hh>
#include<dune/pdelab/backend/istl/matrixfree/blocksorpreconditioner.hh>
#include<dune/pdelab/backend/istl/matrixfree/checklopinterface.hh>
namespace Dune {
namespace PDELab {
namespace impl{
// Can be used to check if a local operator is a
// BlockSORPreconditionerLocalOperator
template <typename>
struct isBlockSORPreconditionerLocalOperator : std::false_type {};
template <typename JacobianLOP, typename BlockOffDiagonalLOP, typename GridFunctionSpace>
struct isBlockSORPreconditionerLocalOperator<BlockSORPreconditionerLocalOperator<JacobianLOP,
BlockOffDiagonalLOP,
GridFunctionSpace>>
: std::true_type {};
// Can be used to check if a grid operator is a FastDGGridOperator
template <typename>
struct isFastDGGridOperator : std::false_type {};
template<typename GFSU, typename GFSV, typename LOP,
typename MB, typename DF, typename RF, typename JF,
typename CU, typename CV>
struct isFastDGGridOperator<FastDGGridOperator<GFSU, GFSV, LOP, MB, DF, RF, JF, CU, CV >>
: std::true_type {};
}
/**\brief Sequential matrix-free solver backend
*
* This can be used with a combination of {CGSolver, BiCGSTABSolver,
* MINRESSolver} as a solver and grid operator build from one of the
* following local operators:
*
* - AssembledBlockJacobiPreconditionerLocalOperator for partial matrix-free Jacobi
* - IterativeBlockJacobiPreconditionerLocalOperator for fully matrix-free Jacobi
* - BlockSORPreconditionerLocalOperator for matrix-free SOR
*
* Note: If you use BlockSORPreconditionerLocalOperator you need to use
* FastDGGridOperator!
* \tparam GO Grid operator implementing the matrix-free operator application
* \tparam PrecGO Grid operator implementing matrix-free preconditioning
*/
template<class GO, class PrecGO,
template<class> class Solver>
class ISTLBackend_SEQ_MatrixFree_Base
: public Dune::PDELab::SequentialNorm
, public Dune::PDELab::LinearResultStorage
{
using V = typename GO::Traits::Domain;
using W = typename GO::Traits::Range;
using Operator = Dune::PDELab::OnTheFlyOperator<V,W,GO>;
using SeqPrec = GridOperatorPreconditioner<PrecGO>;
public :
ISTLBackend_SEQ_MatrixFree_Base(const GO& go, const PrecGO& precgo,
unsigned maxiter=5000, int verbose=1)
: opa_(go), seqprec_(precgo)
, maxiter_(maxiter), verbose_(verbose)
{
// First lets brake that down: The case we want to avoid is having SOR
// with regular grid operator. We check for SOR and not fast grid
// operator and assert that this is not the case.
//
// For more information why this is not working have a look at the
// documentation of the class BlockSORPreconditionerLocalOperator
static_assert(not(impl::isBlockSORPreconditionerLocalOperator<
typename PrecGO::Traits::LocalAssembler::LocalOperator>::value
and not impl::isFastDGGridOperator<PrecGO>::value),
"If you use the BlockSORPreconditioner you need to use FastDGGridOperator!");
// We need to assert that the local operator uses the new interface and not the old one
static_assert(!decltype(Dune::PDELab::impl::hasOldLOPInterface(go.localAssembler().localOperator()))::value,
"\n"
"In order to use the matrix-free solvers your grid operator must follow the new\n"
"local operator interface for nonlinear jacobian applys, where the current\n"
"solution and the linearization point can have different types. This is best\n"
"explained by an example.\n\n"
"old: void jacobian_apply_volume (const EG& eg, const LFSU& lfsu, const X& x, const X& z, const LFSV& lfsv, Y& y) const {}\n"
"new: void jacobian_apply_volume (const EG& eg, const LFSU& lfsu, const X& x, const Z& z, const LFSV& lfsv, Y& y) const {}\n\n"
"Note that in the new interface the type of z is Z and doesn't have to be X.\n"
"You need to adjust all the nonlinear jacobian apply methods in your local operator\n"
"in a similar way.");
static_assert(!decltype(Dune::PDELab::impl::hasOldLOPInterface(precgo.localAssembler().localOperator()))::value,
"\n"
"In order to use the matrix-free solvers your grid operator must follow the new\n"
"local operator interface for nonlinear jacobian applys, where the current\n"
"solution and the linearization point can have different types. This is best\n"
"explained by an example.\n\n"
"old: void jacobian_apply_volume (const EG& eg, const LFSU& lfsu, const X& x, const X& z, const LFSV& lfsv, Y& y) const {}\n"
"new: void jacobian_apply_volume (const EG& eg, const LFSU& lfsu, const X& x, const Z& z, const LFSV& lfsv, Y& y) const {}\n\n"
"Note that in the new interface the type of z is Z and doesn't have to be X.\n"
"You need to adjust all the nonlinear jacobian apply methods in your local operator\n"
"in a similar way.");
}
void apply(V& z, W& r, typename V::ElementType reduction)
{
Solver<V> solver(opa_,seqprec_,reduction,maxiter_,verbose_);
Dune::InverseOperatorResult stat;
solver.apply(z, r, stat);
res.converged = stat.converged;
res.iterations = stat.iterations;
res.elapsed = stat.elapsed;
res.reduction = stat.reduction;
res.conv_rate = stat.conv_rate;
}
//! Set position of jacobian.
//! Must be called before apply().
void setLinearizationPoint(const V& u)
{
opa_.setLinearizationPoint(u);
seqprec_.setLinearizationPoint(u);
}
constexpr static bool isMatrixFree {true};
private :
Operator opa_;
SeqPrec seqprec_;
unsigned maxiter_;
int verbose_;
}; // end class ISTLBackend_SEQ_MatrixFree_Base
// A matrix based SOR backend for comparing the matrix-free version
class ISTLBackend_SEQ_BCGS_SOR
: public ISTLBackend_SEQ_Base<Dune::SeqSOR, Dune::BiCGSTABSolver>
{
public:
/*! \brief make a linear solver object
\param[in] maxiter_ maximum number of iterations to do
\param[in] verbose_ print messages if true
*/
explicit ISTLBackend_SEQ_BCGS_SOR (unsigned maxiter_=5000, int verbose_=1)
: ISTLBackend_SEQ_Base<Dune::SeqSOR, Dune::BiCGSTABSolver>(maxiter_, verbose_)
{}
};
} // namespace PDELab
} // namespace Dune
#endif
// -*- tab-width: 2; indent-tabs-mode: nil -*-
// vi: set et ts=2 sw=2 sts=2:
#ifndef DUNE_PDELAB_BACKEND_ISTL_MATRIXFREE_BLOCKSORPRECONDITIONER_HH
#define DUNE_PDELAB_BACKEND_ISTL_MATRIXFREE_BLOCKSORPRECONDITIONER_HH
namespace Dune {
namespace PDELab {
/** \brief Turn a matrix-free Jacobi-type local preconditioner to a SOR one
*
* This preconditioner assumes that we have a discretization that leads to
* a block structure, e.g. DG methods. It can be used to do a matrix-free
* block-SOR preconditioner step.
*
* Given a linear system of equations Ax=b, a preconditioner step solves
* Wv=d for a given vector d and an approximation $W \approx A$.
*
* Using the block decomposition $A=D+L+U$ block-SOR can be implemented in
* the following way (approximately solving Av=d).
*
* for element T_i, i=1,...,m do:
* (1) a_i = d_i - \sum_{j<i} A_{ij} v_j^{(k)} - \sum_{j>i} A_{ij} v_j^{(k-1)}
* (2) Solve D_i b_i = a_i
* (3) Update v_i^{(k)} = (1-\omega) v_i^{(k-1)} + \omega b_i
*
* Here d_i, a_i, ... denote a local vector from the global block vector
* and A_{ij} is the block (i,j) of the global block matrix A.
*
* See the artice "Matrix-free multigrid block-preconditioners for higher
* order discontinuous Galerkin discretisations" by P. Bastian, E. Mueller,
* S. Muething and M. Piatkowski.
*
* \tparam JacobianLOP Type of the Jacobi preconditioner local operator
* \tparam BlockOffDiagonalLOP Type of the local operator for assembling the block off diagonal
* \tparam GridFunctionSpace The type of grid function space.
*/
template<typename JacobianLOP, typename BlockOffDiagonalLOP, typename GridFunctionSpace>
class BlockSORPreconditionerLocalOperator
: public Dune::PDELab::FullVolumePattern
, public Dune::PDELab::LocalOperatorDefaultFlags
{
using value_type = typename GridFunctionSpace::Traits::GridView::Grid::ctype;
using LocalVector = Dune::PDELab::LocalVector<value_type>;
using W = typename LocalVector::WeightedAccumulationView;
public :
// A SOR preconditioner includes non-diagonal blocks so we need volume
// and skeleton methods. We do two-sided assembly!
static constexpr bool doPatternVolume = true;
static constexpr bool doPatternSkeleton = true;
static constexpr bool doPatternVolumePostSkeleton = true;
static constexpr bool doAlphaVolume = true;
static constexpr bool doAlphaSkeleton = true;
static constexpr bool doAlphaVolumePostSkeleton = true;
static constexpr bool doSkeletonTwoSided = true;
static constexpr bool isLinear = JacobianLOP::isLinear;
BlockSORPreconditionerLocalOperator(JacobianLOP& jacobianlop,
BlockOffDiagonalLOP& blockOffDiagonalLOP,
const GridFunctionSpace& gridFunctionSpace,
const double omega=1.0)
: _jacobianlop(jacobianlop)
, _blockOffDiagonalLOP(blockOffDiagonalLOP)
, _omega(omega)
, _a_i(gridFunctionSpace.ordering().maxLocalSize())
, _b_i(gridFunctionSpace.ordering().maxLocalSize())
{}
/** We use a Jacobi preconditioner that requires a setup. The setup will
* be done in the alpha-volume method and later be used during the apply
* methods.
*/
bool requireSetup()
{
return _jacobianlop.requireSetup();
}
void setRequireSetup(bool v)
{
_jacobianlop.setRequireSetup(v);
}
//! Prepare underlying diagonal block preconditioner
template<typename EG, typename LFSU, typename X, typename LFSV, typename Y>
void alpha_volume(const EG& eg, const LFSU& lfsu, const X& x, const LFSV& lfsv, Y& y) const
{
_jacobianlop.alpha_volume(eg,lfsu,x,lfsv,y);
}
//! Provide this method, but it actually does not nothing
template<typename IG, typename LFSU, typename X, typename LFSV, typename Y>
void alpha_skeleton(const IG& ig,
const LFSU& lfsu_s, const X& x_s, const LFSV& lfsv_s,
const LFSU& lfsu_n, const X& x_n, const LFSV& lfsv_n,
Y& y_s, Y& y_n) const
{}
//! Provide this method, but it actually does nothing
template<typename EG, typename LFSU, typename X, typename LFSV, typename Y>
void alpha_volume_post_skeleton(const EG& eg, const LFSU& lfsu, const X& x, const LFSV& lfsv, Y& y) const
{}
//! Linear operator application, volume terms
template<typename EG, typename LFSU, typename X, typename LFSV, typename Y>
void jacobian_apply_volume(const EG& eg,
const LFSU& lfsu, const X& x,
const LFSV& lfsv, Y& y) const
{
//====================================
// How does this preconditioner work
//====================================
//
// When jacobian_apply is called on the grid operator the assembly
// machine will iterate over the grid. Since we have set twosided to
// true the order of methods will be:
//
// jacobian_apply_volume
// jacobian_apply_skeleton for each intersection
// jacobian_apply_volume_post_skeleton
//
// In the volume part we set _a_i to zero. During the skeleton part we
// apply the off-diagonal blocks to the old and new solution, step (1)