Skip to content
Snippets Groups Projects
Commit 4ad19e8e authored by Oliver Sander's avatar Oliver Sander
Browse files

moving the multigrid transfer stuff into my application to get them out of the way

[[Imported from SVN: r4460]]
parent ebf2f456
No related branches found
No related tags found
No related merge requests found
// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
// vi: set et ts=4 sw=2 sts=2:
#include <dune/common/fvector.hh>
#include <dune/istl/matrixindexset.hh>
#include <dune/disc/shapefunctions/lagrangeshapefunctions.hh>
template<class DiscFuncType>
template<class FunctionSpaceType>
void Dune::MultiGridTransfer<DiscFuncType>::setup(const FunctionSpaceType& coarseFSpace,
const FunctionSpaceType& fineFSpace)
{
int cL = coarseFSpace.level();
int fL = fineFSpace.level();
if (fL != cL+1)
DUNE_THROW(Exception, "The two function spaces don't belong to consecutive levels!");
typedef typename FunctionSpaceType::GridType GridType;
const GridType& grid = coarseFSpace.grid();
if (&grid != &(fineFSpace.grid()))
DUNE_THROW(Exception, "The two function spaces don't belong to the same grid!");
int rows = fineFSpace.size();
int cols = coarseFSpace.size();
// Make identity matrix
MatrixBlock identity(0);
for (int i=0; i<blocksize; i++)
identity[i][i] = 1;
//
OperatorType mat(rows, cols, OperatorType::random);
mat = 0;
typedef typename GridType::template Codim<0>::LevelIterator ElementIterator;
typedef typename FunctionSpaceType::BaseFunctionSetType BaseFunctionSetType;
ElementIterator cIt = grid.template lbegin<0>(cL);
ElementIterator cEndIt = grid.template lend<0>(cL);
// ///////////////////////////////////////////
// Determine the indices present in the matrix
// /////////////////////////////////////////////////
MatrixIndexSet indices(rows, cols);
for (; cIt != cEndIt; ++cIt) {
const BaseFunctionSetType& coarseBaseSet = coarseFSpace.getBaseFunctionSet( *cIt );
//const int numCoarseBaseFct = coarseBaseSet.getNumberOfBaseFunctions();
const int numCoarseBaseFct = coarseBaseSet.numBaseFunctions();
typedef typename GridType::template Codim<0>::Entity EntityType;
typedef typename EntityType::HierarchicIterator HierarchicIterator;
HierarchicIterator fIt = cIt->hbegin(fL);
HierarchicIterator fEndIt = cIt->hend(fL);
for (; fIt != fEndIt; ++fIt) {
if (fIt->level()==cIt->level())
continue;
const BaseFunctionSetType& fineBaseSet = fineFSpace.getBaseFunctionSet( *fIt );
const int numFineBaseFct = fineBaseSet.numBaseFunctions();
for (int i=0; i<numCoarseBaseFct; i++) {
int globalCoarse = coarseFSpace.mapToGlobal(*cIt, i);
for (int j=0; j<numFineBaseFct; j++) {
int globalFine = fineFSpace.mapToGlobal(*fIt, j);
FieldVector<double, GridType::dimension> local = cIt->geometry().local(fIt->geometry()[j]);
// Evaluate coarse grid base function
typename FunctionSpaceType::RangeType value;
FieldVector<int, 0> diffVariable;
coarseBaseSet.evaluate(i, diffVariable, local, value);
if (value[0] > 0.001)
indices.add(globalFine, globalCoarse);
}
}
}
}
indices.exportIdx(mat);
// /////////////////////////////////////////////
// Compute the matrix
// /////////////////////////////////////////////
cIt = grid.template lbegin<0>(cL);
for (; cIt != cEndIt; ++cIt) {
const BaseFunctionSetType& coarseBaseSet = coarseFSpace.getBaseFunctionSet( *cIt );
const int numCoarseBaseFct = coarseBaseSet.numBaseFunctions();
typedef typename GridType::template Codim<0>::Entity EntityType;
typedef typename EntityType::HierarchicIterator HierarchicIterator;
HierarchicIterator fIt = cIt->hbegin(fL);
HierarchicIterator fEndIt = cIt->hend(fL);
for (; fIt != fEndIt; ++fIt) {
if (fIt->level()==cIt->level())
continue;
const BaseFunctionSetType& fineBaseSet = fineFSpace.getBaseFunctionSet( *fIt );
const int numFineBaseFct = fineBaseSet.numBaseFunctions();
for (int i=0; i<numCoarseBaseFct; i++) {
int globalCoarse = coarseFSpace.mapToGlobal(*cIt, i);
for (int j=0; j<numFineBaseFct; j++) {
int globalFine = fineFSpace.mapToGlobal(*fIt, j);
// Evaluate coarse grid base function at the location of the fine grid dof
// first determine local fine grid dof position
FieldVector<double, GridType::dimension> local = cIt->geometry().local(fIt->geometry()[j]);
// Evaluate coarse grid base function
typename FunctionSpaceType::RangeType value;
FieldVector<int, 0> diffVariable;
coarseBaseSet.evaluate(i, diffVariable, local, value);
// The following conditional is a hack: evaluation the coarse
// grid base function will often return 0. However, we don't
// want explicit zero entries in our prolongation matrix. Since
// the whole code works for P1-elements only anyways, we know
// that value can only be 0, 0.5, or 1. Thus testing for nonzero
// by testing > 0.001 is safe.
if (value[0] > 0.001) {
MatrixBlock matValue = identity;
matValue *= value[0];
mat[globalFine][globalCoarse] = matValue;
}
}
}
}
}
matrix_ = mat;
}
template<class DiscFuncType>
template<class GridType>
void Dune::MultiGridTransfer<DiscFuncType>::setup(const GridType& grid, int cL, int fL)
{
if (fL != cL+1)
DUNE_THROW(Exception, "The two function spaces don't belong to consecutive levels!");
const int dim = GridType::dimension;
const typename GridType::Traits::LevelIndexSet& coarseIndexSet = grid.levelIndexSet(cL);
const typename GridType::Traits::LevelIndexSet& fineIndexSet = grid.levelIndexSet(fL);
int rows = grid.size(fL, dim);
int cols = grid.size(cL, dim);
// Make identity matrix
MatrixBlock identity(0);
for (int i=0; i<blocksize; i++)
identity[i][i] = 1;
//
OperatorType mat(rows, cols, OperatorType::random);
mat = 0;
typedef typename GridType::template Codim<0>::LevelIterator ElementIterator;
ElementIterator cIt = grid.template lbegin<0>(cL);
ElementIterator cEndIt = grid.template lend<0>(cL);
// ///////////////////////////////////////////
// Determine the indices present in the matrix
// /////////////////////////////////////////////////
MatrixIndexSet indices(rows, cols);
for (; cIt != cEndIt; ++cIt) {
const LagrangeShapeFunctionSet<double, double, dim>& coarseBaseSet
= Dune::LagrangeShapeFunctions<double, double, dim>::general(cIt->geometry().type(), 1);
const int numCoarseBaseFct = coarseBaseSet.size();
typedef typename GridType::template Codim<0>::Entity EntityType;
typedef typename EntityType::HierarchicIterator HierarchicIterator;
HierarchicIterator fIt = cIt->hbegin(fL);
HierarchicIterator fEndIt = cIt->hend(fL);
for (; fIt != fEndIt; ++fIt) {
if (fIt->level()==cIt->level())
continue;
const typename EntityType::Geometry& fGeometryInFather = fIt->geometryInFather();
const LagrangeShapeFunctionSet<double, double, dim>& fineBaseSet
= Dune::LagrangeShapeFunctions<double, double, dim>::general(fIt->geometry().type(), 1);
const int numFineBaseFct = fineBaseSet.size();
for (int i=0; i<numCoarseBaseFct; i++) {
int globalCoarse = coarseIndexSet.template subIndex<dim>(*cIt,i);
for (int j=0; j<numFineBaseFct; j++) {
int globalFine = fineIndexSet.template subIndex<dim>(*fIt,j);
FieldVector<double, dim> local = fGeometryInFather.global(fineBaseSet[j].position());
// Evaluate coarse grid base function
double value = coarseBaseSet[i].evaluateFunction(0, local);
if (value > 0.001)
indices.add(globalFine, globalCoarse);
}
}
}
}
indices.exportIdx(mat);
// /////////////////////////////////////////////
// Compute the matrix
// /////////////////////////////////////////////
cIt = grid.template lbegin<0>(cL);
for (; cIt != cEndIt; ++cIt) {
const LagrangeShapeFunctionSet<double, double, dim>& coarseBaseSet
= Dune::LagrangeShapeFunctions<double, double, dim>::general(cIt->geometry().type(), 1);
const int numCoarseBaseFct = coarseBaseSet.size();
typedef typename GridType::template Codim<0>::Entity EntityType;
typedef typename EntityType::HierarchicIterator HierarchicIterator;
HierarchicIterator fIt = cIt->hbegin(fL);
HierarchicIterator fEndIt = cIt->hend(fL);
for (; fIt != fEndIt; ++fIt) {
if (fIt->level()==cIt->level())
continue;
const typename EntityType::Geometry& fGeometryInFather = fIt->geometryInFather();
const LagrangeShapeFunctionSet<double, double, dim>& fineBaseSet
= Dune::LagrangeShapeFunctions<double, double, dim>::general(fIt->geometry().type(), 1);
const int numFineBaseFct = fineBaseSet.size();
for (int i=0; i<numCoarseBaseFct; i++) {
int globalCoarse = coarseIndexSet.template subIndex<dim>(*cIt,i);
for (int j=0; j<numFineBaseFct; j++) {
int globalFine = fineIndexSet.template subIndex<dim>(*fIt,j);
// Evaluate coarse grid base function at the location of the fine grid dof
// first determine local fine grid dof position
FieldVector<double, dim> local = fGeometryInFather.global(fineBaseSet[j].position());
// Evaluate coarse grid base function
double value = coarseBaseSet[i].evaluateFunction(0, local);
// The following conditional is a hack: evaluating the coarse
// grid base function will often return 0. However, we don't
// want explicit zero entries in our prolongation matrix. Since
// the whole code works for P1-elements only anyways, we know
// that value can only be 0, 0.5, or 1. Thus testing for nonzero
// by testing > 0.001 is safe.
if (value > 0.001) {
MatrixBlock matValue = identity;
matValue *= value;
mat[globalFine][globalCoarse] = matValue;
}
}
}
}
}
matrix_ = mat;
}
// Multiply the vector f from the right to the prolongation matrix
template<class DiscFuncType>
void Dune::MultiGridTransfer<DiscFuncType>::prolong(const DiscFuncType& f, DiscFuncType& t) const
{
if (f.size() != matrix_.M())
DUNE_THROW(Exception, "Number of entries in the coarse grid vector is not equal "
<< "to the number of columns of the interpolation matrix!");
t.resize(matrix_.N());
typedef typename DiscFuncType::Iterator Iterator;
typedef typename DiscFuncType::ConstIterator ConstIterator;
typedef typename OperatorType::row_type RowType;
typedef typename RowType::ConstIterator ColumnIterator;
Iterator tIt = t.begin();
ConstIterator fIt = f.begin();
for(int rowIdx=0; rowIdx<matrix_.N(); rowIdx++) {
const RowType& row = matrix_[rowIdx];
*tIt = 0.0;
ColumnIterator cIt = row.begin();
ColumnIterator cEndIt = row.end();
for(; cIt!=cEndIt; ++cIt) {
cIt->umv(f[cIt.index()], *tIt);
}
++tIt;
}
}
// Multiply the vector f from the right to the transpose of the prolongation matrix
template<class DiscFuncType>
void Dune::MultiGridTransfer<DiscFuncType>::restrict (const DiscFuncType & f, DiscFuncType& t) const
{
if (f.size() != matrix_.N())
DUNE_THROW(Exception, "Fine grid vector has " << f.size() << " entries "
<< "but the interpolation matrix has " << matrix_.N() << " rows!");
t.resize(matrix_.M());
t = 0;
typedef typename DiscFuncType::Iterator Iterator;
typedef typename DiscFuncType::ConstIterator ConstIterator;
typedef typename OperatorType::row_type RowType;
typedef typename RowType::ConstIterator ColumnIterator;
Iterator tIt = t.begin();
ConstIterator fIt = f.begin();
for (int rowIdx=0; rowIdx<matrix_.N(); rowIdx++) {
const RowType& row = matrix_[rowIdx];
ColumnIterator cIt = row.begin();
ColumnIterator cEndIt = row.end();
for(; cIt!=cEndIt; ++cIt) {
cIt->umtv(f[rowIdx], t[cIt.index()]);
}
}
}
// Multiply the vector f from the right to the transpose of the prolongation matrix
template<class DiscFuncType>
void Dune::MultiGridTransfer<DiscFuncType>::restrict (const Dune::BitField& f, Dune::BitField& t) const
{
if (f.size() != (unsigned int)matrix_.N())
DUNE_THROW(Exception, "Fine grid bitfield has " << f.size() << " entries "
<< "but the interpolation matrix has " << matrix_.N() << " rows!");
t.resize(matrix_.M());
t.unsetAll();
typedef typename OperatorType::row_type RowType;
typedef typename RowType::ConstIterator ColumnIterator;
for (int rowIdx=0; rowIdx<matrix_.N(); rowIdx++) {
if (!f[rowIdx])
continue;
const RowType& row = matrix_[rowIdx];
ColumnIterator cIt = row.begin();
ColumnIterator cEndIt = row.end();
for(; cIt!=cEndIt; ++cIt)
t[cIt.index()] = true;
}
}
template<class DiscFuncType>
void Dune::MultiGridTransfer<DiscFuncType>::
galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat) const
{
// ////////////////////////
// Nonsymmetric case
// ////////////////////////
typedef typename OperatorType::row_type RowType;
typedef typename RowType::Iterator ColumnIterator;
typedef typename RowType::ConstIterator ConstColumnIterator;
// Clear coarse matrix
coarseMat = 0;
// Loop over all rows of the stiffness matrix
for (int v=0; v<fineMat.N(); v++) {
const RowType& row = fineMat[v];
// Loop over all columns of the stiffness matrix
ConstColumnIterator m = row.begin();
ConstColumnIterator mEnd = row.end();
for (; m!=mEnd; ++m) {
int w = m.index();
// Loop over all coarse grid vectors iv that have v in their support
ConstColumnIterator im = matrix_[v].begin();
ConstColumnIterator imEnd = matrix_[v].end();
for (; im!=imEnd; ++im) {
int iv = im.index();
// Loop over all coarse grid vectors jv that have w in their support
ConstColumnIterator jm = matrix_[w].begin();
ConstColumnIterator jmEnd = matrix_[w].end();
for (; jm!=jmEnd; ++jm) {
int jv = jm.index();
MatrixBlock& cm = coarseMat[iv][jv];
// Compute cm = im^T * m * jm
for (int i=0; i<blocksize; i++) {
for (int j=0; j<blocksize; j++) {
for (int k=0; k<blocksize; k++) {
for (int l=0; l<blocksize; l++)
cm[i][j] += (*im)[k][i] * (*m)[k][l] * (*jm)[l][j];
}
}
}
}
}
}
}
}
// Set Occupation of Galerkin restricted coarse stiffness matrix
template<class DiscFuncType>
void Dune::MultiGridTransfer<DiscFuncType>::
galerkinRestrictSetOccupation(const OperatorType& fineMat, OperatorType& coarseMat) const
{
// ////////////////////////
// Nonsymmetric case
// ////////////////////////
typedef typename OperatorType::row_type RowType;
typedef typename RowType::ConstIterator ConstColumnIterator;
// Create index set
Dune::MatrixIndexSet indices(matrix_.M(), matrix_.M());
// Loop over all rows of the fine matrix
for (int v=0; v<fineMat.N(); v++) {
const RowType& row = fineMat[v];
// Loop over all columns of the fine matrix
ConstColumnIterator m = row.begin();
ConstColumnIterator mEnd = row.end();
for (; m!=mEnd; ++m) {
int w = m.index();
// Loop over all coarse grid vectors iv that have v in their support
ConstColumnIterator im = matrix_[v].begin();
ConstColumnIterator imEnd = matrix_[v].end();
for (; im!=imEnd; ++im) {
int iv = im.index();
// Loop over all coarse grid vectors jv that have w in their support
ConstColumnIterator jm = matrix_[w].begin();
ConstColumnIterator jmEnd = matrix_[w].end();
for (; jm!=jmEnd; ++jm)
indices.add(iv, jm.index());
}
}
}
indices.exportIdx(coarseMat);
}
/***************************************************************************/
/***************************************************************************/
/* The following functions are the deprecated ones for DiscFuncArrays */
/***************************************************************************/
/***************************************************************************/
template<class DiscFuncType>
void Dune::MultiGridTransfer<DiscFuncType>::prolongDFA(const DiscFuncType& f, DiscFuncType& t) const
{
assert(t.getFunctionSpace().size() == matrix_.N());
assert(f.getFunctionSpace().size() == matrix_.M());
typedef typename DiscFuncType::DofIteratorType Iterator;
typedef typename DiscFuncType::ConstDofIteratorType ConstIterator;
typedef typename OperatorType::row_type RowType;
typedef typename RowType::ConstIterator ConstColumnIterator;
Iterator tIt = t.dbegin();
ConstIterator fIt = f.dbegin();
for(int row=0; row<matrix_.N(); row++) {
*tIt = 0.0;
ConstColumnIterator cIt = matrix_[row].begin();
ConstColumnIterator cEndIt = matrix_[row].end();
for(; cIt!=cEndIt; ++cIt)
*tIt += *cIt * fIt[cIt.index()];
++tIt;
}
}
template<class DiscFuncType>
void Dune::MultiGridTransfer<DiscFuncType>::restrictDFA(const DiscFuncType& f, DiscFuncType& t) const
{
assert(f.getFunctionSpace().size() == matrix_.N());
assert(t.getFunctionSpace().size() == matrix_.M());
typedef typename DiscFuncType::DofIteratorType IteratorType;
typedef typename DiscFuncType::ConstDofIteratorType ConstIteratorType;
typedef typename OperatorType::row_type RowType;
typedef typename RowType::ConstIterator ConstColumnIterator;
t.clear();
IteratorType tIt = t.dbegin();
ConstIteratorType fIt = f.dbegin();
for (int row=0; row<matrix_.N(); row++) {
ConstColumnIterator cIt = matrix_[row].begin();
ConstColumnIterator cEndIt = matrix_[row].end();
for(; cIt!=cEndIt; ++cIt)
tIt[cIt.index()] += fIt[row] * (*cIt);
}
}
template<class DiscFuncType>
Dune::SparseRowMatrix<double> Dune::MultiGridTransfer<DiscFuncType>::
galerkinRestrict(const Dune::SparseRowMatrix<double>& fineMat) const
{
typedef typename OperatorType::row_type RowType;
//typedef typename RowType::Iterator ColumnIterator;
typedef typename RowType::ConstIterator ConstColumnIterator;
typedef typename SparseRowMatrix<double>::ColumnIterator ColumnIterator;
SparseRowMatrix<double> result(matrix_.M(), matrix_.M() , fineMat.numNonZeros());
for (int rowIdx=0; rowIdx<fineMat.rows(); rowIdx++) {
ColumnIterator cIt = const_cast<SparseRowMatrix<double>&>(fineMat).template rbegin(rowIdx);
ColumnIterator cEndIt = const_cast<SparseRowMatrix<double>&>(fineMat).template rend(rowIdx);
for(; cIt!=cEndIt; ++cIt) {
double mvalue = *cIt;
const RowType& row = matrix_[rowIdx];
ConstColumnIterator tciIt = row.begin();
ConstColumnIterator tciEndIt = row.end();
for (; tciIt!=tciEndIt; ++tciIt) {
double fac = mvalue * ((*tciIt)[0][0]);
ConstColumnIterator tcjIt = matrix_[cIt.col()].begin();
ConstColumnIterator tcjEndIt = matrix_[cIt.col()].end();
for (; tcjIt!=tcjEndIt; ++tcjIt)
result.add( tcjIt.index(), tciIt.index(), fac * ((*tcjIt)));
}
}
}
return result;
}
// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
// vi: set et ts=4 sw=2 sts=2:
#ifndef DUNE_MULTIGRID_TRANSFER_HH
#define DUNE_MULTIGRID_TRANSFER_HH
#include <dune/istl/bcrsmatrix.hh>
#include <dune/common/fmatrix.hh>
#include <dune/common/bitfield.hh>
// for backward compatibility
#include <dune/fem/feop/spmatrix.hh>
namespace Dune {
/** \brief Restriction and prolongation operator for standard multigrid
*
* This class implements the standard prolongation and restriction
* operators for standard multigrid solvers. Restriction and prolongation
* of block vectors is provided. Internally, the operator is stored
* as a BCRSMatrix. Therefore, the template parameter DiscFuncType
* has to comply with the ISTL requirements.
* \todo Currently only works for first-order Lagrangian elements!
*/
template<class DiscFuncType>
class MultiGridTransfer {
enum {blocksize = DiscFuncType::block_type::size};
/** \todo Should we extract the value type from DiscFuncType? */
typedef FieldMatrix<double, blocksize, blocksize> MatrixBlock;
public:
typedef BCRSMatrix<MatrixBlock> OperatorType;
virtual ~MultiGridTransfer() {}
/** \brief Sets up the operator between two given function spaces
*
* It is implicitly assumed that the two function spaces are nested.
*
* \param coarseFSpace The coarse grid function space
* \param fineFSpace The fine grid function space
*/
template <class FunctionSpaceType>
void setup(const FunctionSpaceType& coarseFSpace,
const FunctionSpaceType& fineFSpace);
/** \brief Sets up the operator between two P1 spaces
*
* Does not use any of the Freiburg fem stuff
* \param grid The grid
* \param cL The coarse grid level
* \param fL The fine grid level
*/
template <class GridType>
void setup(const GridType& grid, int cL, int fL);
/** \brief Restrict a function from the fine onto the coarse grid
*/
virtual void restrict (const DiscFuncType & f, DiscFuncType &t) const;
/** \brief Restrict a bitfield from the fine onto the coarse grid
*/
virtual void restrict (const BitField & f, BitField& t) const;
/** \brief Prolong a function from the coarse onto the fine grid
*/
virtual void prolong(const DiscFuncType& f, DiscFuncType &t) const;
/** \brief Galerkin assemble a coarse stiffness matrix
*/
virtual void galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat) const;
/** \brief Set Occupation of Galerkin restricted coarse stiffness matrix
*
* Set occupation of Galerkin restricted coarse matrix. Call this one before
* galerkinRestrict to ensure all non-zeroes are present
* \param fineMat The fine level matrix
* \param coarseMat The coarse level matrix
*/
void galerkinRestrictSetOccupation(const OperatorType& fineMat, OperatorType& coarseMat) const;
/** \brief Direct access to the operator matrix, if you absolutely want it! */
virtual const OperatorType& getMatrix() const {return matrix_;}
/** \brief Galerkin assemble a coarse stiffness matrix
\deprecated Only exists for backward compatibility
*/
SparseRowMatrix<double> galerkinRestrict(const SparseRowMatrix<double>& fineMat) const;
/** \brief Restrict a DiscFuncArray from the fine onto the coarse grid
\deprecated Only exists for backward compatibility
*/
void restrictDFA(const DiscFuncType& f, DiscFuncType &t) const;
/** \brief Prolong a DiscFuncArray from the coarse onto the fine grid
\deprecated Only exists for backward compatibility
*/
void prolongDFA(const DiscFuncType& f, DiscFuncType &t) const;
protected:
OperatorType matrix_;
};
} // end namespace Dune
#include "multigridtransfer.cc"
#endif
// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
// vi: set et ts=4 sw=2 sts=2:
template<class DiscFuncType>
void Dune::TruncatedMGTransfer<DiscFuncType>::prolong(const DiscFuncType& f, DiscFuncType& t,
const Dune::BitField& critical) const
{
if (f.size() != this->matrix_.M())
DUNE_THROW(Dune::Exception, "Number of entries in the coarse grid vector is not equal "
<< "to the number of columns of the prolongation matrix!");
if (((unsigned int)this->matrix_.N()*blocksize) != critical.size())
DUNE_THROW(Dune::Exception, "Number of entries in the critical is not equal "
<< "to the number of rows of the prolongation matrix!");
t.resize(this->matrix_.N());
typedef typename DiscFuncType::Iterator Iterator;
typedef typename DiscFuncType::ConstIterator ConstIterator;
typedef typename OperatorType::row_type RowType;
typedef typename RowType::ConstIterator ColumnIterator;
Iterator tIt = t.begin();
ConstIterator fIt = f.begin();
for(int rowIdx=0; rowIdx<this->matrix_.N(); rowIdx++) {
const RowType& row = this->matrix_[rowIdx];
*tIt = 0.0;
ColumnIterator cIt = row.begin();
ColumnIterator cEndIt = row.end();
for(; cIt!=cEndIt; ++cIt) {
// The following lines are a matrix-vector loop, but rows belonging
// to critical dofs are left out
for (int i=0; i<blocksize; i++) {
for (int j=0; j<blocksize; j++) {
if (!critical[rowIdx*blocksize + i])
(*tIt)[i] += (*cIt)[i][j] * f[cIt.index()][j];
}
}
}
++tIt;
}
}
template<class DiscFuncType>
void Dune::TruncatedMGTransfer<DiscFuncType>::restrict (const DiscFuncType & f, DiscFuncType& t,
const Dune::BitField& critical) const
{
if (f.size() != this->matrix_.N())
DUNE_THROW(Dune::Exception, "Fine grid vector has " << f.size() << " entries "
<< "but the interpolation matrix has " << this->matrix_.N() << " rows!");
if (((unsigned int)this->matrix_.N()*blocksize) != critical.size())
DUNE_THROW(Dune::Exception, "Number of entries in the critical is not equal "
<< "to the number of rows of the prolongation matrix!");
t.resize(this->matrix_.M());
t = 0;
typedef typename DiscFuncType::Iterator Iterator;
typedef typename DiscFuncType::ConstIterator ConstIterator;
typedef typename OperatorType::row_type RowType;
typedef typename RowType::ConstIterator ColumnIterator;
Iterator tIt = t.begin();
ConstIterator fIt = f.begin();
for (int rowIdx=0; rowIdx<this->matrix_.N(); rowIdx++) {
const RowType& row = this->matrix_[rowIdx];
ColumnIterator cIt = row.begin();
ColumnIterator cEndIt = row.end();
for(; cIt!=cEndIt; ++cIt) {
// The following lines are a matrix-vector loop, but rows belonging
// to critical dofs are left out
typename DiscFuncType::block_type& tEntry = t[cIt.index()];
for (int i=0; i<blocksize; i++) {
for (int j=0; j<blocksize; j++) {
if (!critical[rowIdx*blocksize + j])
tEntry[i] += (*cIt)[j][i] * f[rowIdx][j];
}
}
}
}
}
template<class DiscFuncType>
void Dune::TruncatedMGTransfer<DiscFuncType>::
galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat,
const Dune::BitField& critical) const
{
if (recompute_ != NULL && recompute_->size() != (unsigned int)this->matrix_.M())
DUNE_THROW(Exception, "The size of the recompute_-bitfield doesn't match the "
<< "size of the coarse grid space!");
// ////////////////////////
// Nonsymmetric case
// ////////////////////////
typedef typename OperatorType::row_type RowType;
typedef typename RowType::Iterator ColumnIterator;
typedef typename RowType::ConstIterator ConstColumnIterator;
// Clear coarse matrix
if (recompute_ == NULL)
coarseMat = 0;
else {
for (int i=0; i<coarseMat.N(); i++) {
RowType& row = coarseMat[i];
// Loop over all columns of the stiffness matrix
ColumnIterator m = row.begin();
ColumnIterator mEnd = row.end();
for (; m!=mEnd; ++m) {
if ((*recompute_)[i] || (*recompute_)[m.index()])
*m = 0;
}
}
}
// Loop over all rows of the stiffness matrix
for (int v=0; v<fineMat.N(); v++) {
const RowType& row = fineMat[v];
// Loop over all columns of the stiffness matrix
ConstColumnIterator m = row.begin();
ConstColumnIterator mEnd = row.end();
for (; m!=mEnd; ++m) {
int w = m.index();
// Loop over all coarse grid vectors iv that have v in their support
ConstColumnIterator im = this->matrix_[v].begin();
ConstColumnIterator imEnd = this->matrix_[v].end();
for (; im!=imEnd; ++im) {
int iv = im.index();
// Loop over all coarse grid vectors jv that have w in their support
ConstColumnIterator jm = this->matrix_[w].begin();
ConstColumnIterator jmEnd = this->matrix_[w].end();
for (; jm!=jmEnd; ++jm) {
int jv = jm.index();
if (recompute_ && !((*recompute_)[iv]) && !((*recompute_)[jv]))
continue;
MatrixBlock& cm = coarseMat[iv][jv];
// Compute im * m * jm, but omitting the critical entries
for (int i=0; i<blocksize; i++) {
for (int j=0; j<blocksize; j++) {
double sum = 0.0;
for (int k=0; k<blocksize; k++) {
for (int l=0; l<blocksize; l++) {
// Truncated Multigrid: Omit coupling if at least
// one of the two vectors is critical
if (!critical[v*blocksize+k] && !critical[w*blocksize+l]) {
sum += (*im)[k][i] * (*m)[k][l] * (*jm)[l][j];
}
}
}
cm[i][j] += sum;
}
}
}
}
}
}
}
// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
// vi: set et ts=4 sw=2 sts=2:
#ifndef DUNE_TRUNCATED_MG_TRANSFER_HH
#define DUNE_TRUNCATED_MG_TRANSFER_HH
#include <dune/istl/bcrsmatrix.hh>
#include <dune/fem/transfer/multigridtransfer.hh>
namespace Dune {
/** \brief Restriction and prolongation operator for truncated multigrid
*
* This class provides prolongation and restriction operators for truncated
* multigrid. That means that you can explicitly switch off certain
* fine grid degrees-of-freedom. This often leads to better coarse
* grid corrections when treating obstacle problems. For an introduction
* to the theory of truncated multigrid see 'Adaptive Monotone Multigrid
* Methods for Nonlinear Variational Problems' by R. Kornhuber.
*
* Currently only works for first-order Lagrangian elements!
*/
template<class DiscFuncType>
class TruncatedMGTransfer : public MultiGridTransfer<DiscFuncType> {
enum {blocksize = DiscFuncType::block_type::size};
/** \todo Should we extract the value type from DiscFuncType? */
typedef Dune::FieldMatrix<double, blocksize, blocksize> MatrixBlock;
public:
typedef Dune::BCRSMatrix<MatrixBlock> OperatorType;
/** \brief Default constructor */
TruncatedMGTransfer() : recompute_(NULL)
{}
/** \brief Restrict level fL of f and store the result in level cL of t
*
* \param critical Has to contain an entry for each degree of freedom.
* Those dofs with a set bit are treated as critical.
*/
void restrict (const DiscFuncType & f, DiscFuncType &t, const Dune::BitField& critical) const;
/** \brief Restriction of MultiGridTransfer*/
using Dune::MultiGridTransfer< DiscFuncType >::restrict;
/** \brief Prolong level cL of f and store the result in level fL of t
*
* \param critical Has to contain an entry for each degree of freedom.
* Those dofs with a set bit are treated as critical.
*/
void prolong(const DiscFuncType& f, DiscFuncType &t, const Dune::BitField& critical) const;
/** \brief Prolongation of MultiGridTransfer*/
using Dune::MultiGridTransfer< DiscFuncType >::prolong;
/** \brief Galerkin assemble a coarse stiffness matrix
*
* \param critical Has to contain an entry for each degree of freedom.
* Those dofs with a set bit are treated as critical.
*/
void galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat,
const Dune::BitField& critical) const;
/** \brief Galerkin restriction of MultiGridTransfer*/
using Dune::MultiGridTransfer< DiscFuncType >::galerkinRestrict;
/** \brief Bitfield specifying a subsets of dofs which need to be recomputed
* when doing Galerkin restriction
*
* If this pointer points to NULL it has no effect. If not it is expected
* to point to a bitfield with the size of the coarse function space.
* Then, when calling galerkinRestrict(), only those matrix entries which
* involve at least one dof which has a set bit get recomputed. Depending
* on the problem this can lead to considerable time savings.
*/
const Dune::BitField* recompute_;
};
} // end namespace Dune
#include "truncatedtransfer.cc"
#endif
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment