From e83857ff3666e916faec9feeb1d1337f057e4e00 Mon Sep 17 00:00:00 2001
From: Oliver Sander <sander@dune-project.org>
Date: Wed, 24 Nov 2010 17:37:08 +0000
Subject: [PATCH] Move the tests for the remote index sets from dune-istl to
 here.

[[Imported from SVN: r6265]]
---
 configure.ac                               |   1 +
 dune/common/parallel/Makefile.am           |   2 +-
 dune/common/parallel/test/Makefile.am      |  52 ++
 dune/common/parallel/test/indexsettest.cc  |  76 +++
 dune/common/parallel/test/indicestest.cc   | 710 +++++++++++++++++++++
 dune/common/parallel/test/selectiontest.cc |  92 +++
 dune/common/parallel/test/syncertest.cc    | 358 +++++++++++
 7 files changed, 1290 insertions(+), 1 deletion(-)
 create mode 100644 dune/common/parallel/test/Makefile.am
 create mode 100644 dune/common/parallel/test/indexsettest.cc
 create mode 100644 dune/common/parallel/test/indicestest.cc
 create mode 100644 dune/common/parallel/test/selectiontest.cc
 create mode 100644 dune/common/parallel/test/syncertest.cc

diff --git a/configure.ac b/configure.ac
index a0c64e2fa..ccb829ecd 100644
--- a/configure.ac
+++ b/configure.ac
@@ -27,6 +27,7 @@ AC_CONFIG_FILES([Makefile
      dune/common/test/Makefile
      dune/common/exprtmpl/Makefile
      dune/common/parallel/Makefile
+     dune/common/parallel/test/Makefile
      doc/Makefile
      doc/devel/Makefile
      doc/layout/Makefile
diff --git a/dune/common/parallel/Makefile.am b/dune/common/parallel/Makefile.am
index d72ca0be3..f54d90396 100644
--- a/dune/common/parallel/Makefile.am
+++ b/dune/common/parallel/Makefile.am
@@ -1,6 +1,6 @@
 # $Id$
 
-SUBDIRS =
+SUBDIRS = test
 
 parallelincludedir = $(includedir)/dune/common/parallel
 parallelinclude_HEADERS = communicator.hh     \
diff --git a/dune/common/parallel/test/Makefile.am b/dune/common/parallel/test/Makefile.am
new file mode 100644
index 000000000..12c631c00
--- /dev/null
+++ b/dune/common/parallel/test/Makefile.am
@@ -0,0 +1,52 @@
+# $Id$
+
+if MPI
+  MPITESTS = indicestest indexsettest syncertest selectiontest
+endif
+
+# which tests where program to build and run are equal
+NORMALTESTS =
+
+# list of tests to run (indicestest is special case)
+TESTS = $(NORMALTESTS) $(MPITESTS)
+
+# programs just to build when "make check" is used
+check_PROGRAMS = $(NORMALTESTS) $(MPITESTS)
+
+# define the programs
+
+if MPI
+  indicestest_SOURCES = indicestest.cc
+  indicestest_CPPFLAGS = $(AM_CPPFLAGS)		\
+	$(DUNEMPICPPFLAGS)
+  indicestest_LDFLAGS = $(AM_LDFLAGS)		\
+	$(DUNEMPILDFLAGS)
+  indicestest_LDADD =				\
+	$(DUNEMPILIBS)				\
+	$(LDADD)
+
+  selectiontest_SOURCES = selectiontest.cc
+  selectiontest_CPPFLAGS = $(AM_CPPFLAGS)	\
+	$(DUNEMPICPPFLAGS)
+  selectiontest_LDFLAGS = $(AM_LDFLAGS)		\
+	$(DUNEMPILDFLAGS)
+  selectiontest_LDADD =				\
+	$(DUNEMPILIBS)				\
+	$(LDADD)
+
+  indexsettest_SOURCES = indexsettest.cc
+
+  syncertest_SOURCES = syncertest.cc
+  syncertest_CPPFLAGS = $(AM_CPPFLAGS)		\
+	$(DUNEMPICPPFLAGS)			\
+	$(DUNE_COMMON_CPPFLAGS)
+  syncertest_LDFLAGS = $(AM_LDFLAGS)		\
+	$(DUNEMPILDFLAGS)			\
+	$(DUNE_COMMON_LDFLAGS)
+  syncertest_LDADD =					\
+	$(DUNE_COMMON_LDFLAGS) $(DUNE_COMMON_LIBS)	\
+	$(DUNEMPILIBS)					\
+	$(LDADD)
+endif
+
+include $(top_srcdir)/am/global-rules
diff --git a/dune/common/parallel/test/indexsettest.cc b/dune/common/parallel/test/indexsettest.cc
new file mode 100644
index 000000000..009304c5c
--- /dev/null
+++ b/dune/common/parallel/test/indexsettest.cc
@@ -0,0 +1,76 @@
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include "config.h"
+#include <iostream>
+#include <cassert>
+#include <dune/istl/indexset.hh>
+
+int testDeleteIndices()
+{
+  Dune::ParallelIndexSet<int,Dune::LocalIndex,15> indexSet;
+  Dune::ParallelIndexSet<int,Dune::LocalIndex,25> indexSet1;
+
+  indexSet.beginResize();
+  indexSet1.beginResize();
+
+  for(int i=0; i< 10; i++) {
+    indexSet.add(i, Dune::LocalIndex (i));
+    indexSet1.add(i, Dune::LocalIndex (i));
+  }
+
+  indexSet.endResize();
+  indexSet1.endResize();
+
+  typedef Dune::ParallelIndexSet<int,Dune::LocalIndex,15>::iterator
+  Iterator;
+
+  Iterator entry = indexSet.begin();
+  indexSet.beginResize();
+
+  for(int i=0; i < 5; i++)
+    ++entry;
+
+  indexSet.markAsDeleted(entry);
+
+  indexSet.endResize();
+
+  std::cout<<"Unchanged: "<<indexSet1<<std::endl;
+  std::cout<<"Deleted:   "<<indexSet<<std::endl;
+
+  Iterator end = indexSet.end();
+  int i=0, ret=0;
+
+  for(entry = indexSet.begin(); entry != end; ++entry,++i)
+    if(entry->global()==5) {
+      std::cerr<<"Entry was not deleted!"<<std::endl;
+      ret++;
+    }
+
+  if(i>9) {
+    std::cerr<<"Number of entries not correct!"<<std::endl;
+    ret++;
+  }
+
+  Dune::ParallelIndexSet<int,Dune::LocalIndex,25>::iterator iter=indexSet1.begin();
+
+  // Test whether the local indices changed
+  for(entry = indexSet.begin(); entry != end; ++entry) {
+    while(iter->global() < entry->global())
+      iter++;
+    if(iter->global() != entry->global()) {
+      std::cerr <<" Global indices do not match!"<<std::endl;
+      ++ret;
+    }
+    if(iter->local() != entry->local()) {
+      std::cerr <<" Local indices do not match!"<<std::endl;
+      ++ret;
+    }
+  }
+
+  return ret;
+}
+
+int main(int argc, char **argv)
+{
+  exit(testDeleteIndices());
+}
diff --git a/dune/common/parallel/test/indicestest.cc b/dune/common/parallel/test/indicestest.cc
new file mode 100644
index 000000000..a7c59a1bd
--- /dev/null
+++ b/dune/common/parallel/test/indicestest.cc
@@ -0,0 +1,710 @@
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include "config.h"
+#include <dune/common/parallel/indexset.hh>
+#include <dune/common/parallel/communicator.hh>
+#include <dune/common/parallel/remoteindices.hh>
+#include <dune/common/enumset.hh>
+#include <algorithm>
+#include <iostream>
+#include "mpi.h"
+
+enum GridFlags {
+  owner, overlap, border
+};
+
+class Array;
+
+std::ostream& operator<<(std::ostream& os, const Array& a);
+
+class Array
+{
+  friend std::ostream& operator<<(std::ostream& os, const Array& a);
+public:
+  typedef double value_type;
+  Array() : vals_(0), size_(0)
+  {}
+
+  Array(int size) : size_(size)
+  {
+    vals_ = new double[size];
+  }
+
+  void build(int size)
+  {
+    vals_ = new double[size];
+    size_ = size;
+  }
+
+  Array& operator+=(double d)
+  {
+    for(int i=0; i < size_; i++)
+      vals_[i]+=d;
+    return *this;
+  }
+
+  ~Array()
+  {
+    if(vals_!=0)
+      delete[] vals_;
+  }
+
+  const double& operator[](int i) const
+  {
+    return vals_[i];
+  }
+
+  double& operator[](int i)
+  {
+    return vals_[i];
+  }
+private:
+  Array(const Array&)
+  {}
+  double *vals_;
+  int size_;
+};
+
+struct ArrayGatherScatter
+{
+  static double gather(const Array& a, int i);
+
+  static void scatter(Array& a, double v, int i);
+
+};
+
+
+inline double ArrayGatherScatter::gather(const Array& a, int i)
+{
+  return a[i];
+}
+
+inline void ArrayGatherScatter::scatter(Array& a, double v, int i)
+{
+  a[i]=v;
+
+}
+
+std::ostream& operator<<(std::ostream& os, const Array& a)
+{
+  if(a.size_>0)
+    os<< "{ "<<a.vals_[0];
+
+  for(int i=1; i<a.size_; i++)
+    os <<", "<< a.vals_[i];
+
+  os << " }";
+  return os;
+}
+
+void testIndices(MPI_Comm comm)
+{
+  //using namespace Dune;
+
+  // The global grid size
+  const int Nx = 20;
+  const int Ny = 2;
+
+  // Process configuration
+  int procs, rank, master=0;
+  MPI_Comm_size(comm, &procs);
+  MPI_Comm_rank(comm, &rank);
+
+  // shift the ranks
+  //rank = (rank + 1) % procs;
+  //master= (master+1) %procs;
+
+  // The local grid
+  int nx = Nx/procs;
+  // distributed indexset
+  //  typedef ParallelLocalIndex<GridFlags> LocalIndexType;
+
+  typedef Dune::ParallelIndexSet<int,Dune::ParallelLocalIndex<GridFlags>,45> ParallelIndexSet;
+
+  ParallelIndexSet distIndexSet;
+  // global indexset
+  ParallelIndexSet globalIndexSet;
+
+  // Set up the indexsets.
+  int start = std::max(rank*nx-1,0);
+  int end = std::min((rank + 1) * nx+1, Nx);
+
+  distIndexSet.beginResize();
+
+  int localIndex=0;
+  int size = Ny*(end-start);
+  Array distArray(size);
+  Array* globalArray;
+  int index=0;
+
+  std::cout<<rank<<": Nx="<<Nx<<" Ny="<<Ny<<" size="<<size<<std::endl;
+
+  for(int j=0; j<Ny; j++)
+    for(int i=start; i<end; i++) {
+      bool isPublic = (i<=start+1)||(i>=end-2);
+      GridFlags flag = owner;
+      if((i==start && i!=0)||(i==end-1 && i!=Nx-1)) {
+        distArray[index++]=-(i+j*Nx+rank*Nx*Ny);
+        flag = overlap;
+      }else
+        distArray[index++]=i+j*Nx+rank*Nx*Ny;
+
+      distIndexSet.add(i+j*Nx, Dune::ParallelLocalIndex<GridFlags> (localIndex++,flag,isPublic));
+    }
+
+  distIndexSet.endResize();
+
+  if(rank==master) {
+    // build global indexset on first process
+    globalIndexSet.beginResize();
+    globalArray=new Array(Nx*Ny);
+    int k=0;
+    for(int j=0; j<Ny; j++)
+      for(int i=0; i<Nx; i++) {
+        globalIndexSet.add(i+j*Nx, Dune::ParallelLocalIndex<GridFlags> (i+j*Nx,owner,false));
+        globalArray->operator[](i+j*Nx)=-(i+j*Nx);
+        k++;
+
+      }
+
+    globalIndexSet.endResize();
+  }else
+    globalArray=new Array(0);
+
+  typedef Dune::RemoteIndices<ParallelIndexSet> RemoteIndices;
+
+  RemoteIndices accuIndices(distIndexSet, globalIndexSet,  comm);
+  RemoteIndices overlapIndices(distIndexSet, distIndexSet, comm);
+  accuIndices.rebuild<true>();
+  overlapIndices.rebuild<false>();
+
+  Dune::DatatypeCommunicator<ParallelIndexSet> accumulator, overlapExchanger;
+
+  Dune::EnumItem<GridFlags,owner> sourceFlags;
+  Dune::Combine<Dune::EnumItem<GridFlags,overlap>,Dune::EnumItem<GridFlags,owner>,GridFlags> destFlags;
+
+  accumulator.build(accuIndices, sourceFlags, distArray, destFlags, *globalArray);
+
+  overlapExchanger.build(overlapIndices, Dune::EnumItem<GridFlags,owner>(), distArray, Dune::EnumItem<GridFlags,overlap>(), distArray);
+
+  std::cout<< rank<<": before forward distArray="<< distArray<<std::endl;
+
+  // Exchange the overlap
+  overlapExchanger.forward();
+
+  std::cout<<rank<<": overlap exchanged distArray"<< distArray<<std::endl;
+
+  if(rank==master)
+    std::cout<<": before forward globalArray="<< *globalArray<<std::endl;
+
+  accumulator.forward();
+
+
+  if(rank==master) {
+    std::cout<<"after forward global: "<<*globalArray<<std::endl;
+    *globalArray+=1;
+    std::cout<<" added one: globalArray="<<*globalArray<<std::endl;
+  }
+
+  accumulator.backward();
+  std::cout<< rank<<": after backward distArray"<< distArray<<std::endl;
+
+
+  // Exchange the overlap
+  overlapExchanger.forward();
+
+  std::cout<<rank<<": overlap exchanged distArray"<< distArray<<std::endl;
+
+  //std::cout << rank<<": source and dest are the same:"<<std::endl;
+  //std::cout << remote<<std::endl<<std::flush;
+  if(rank==master)
+    delete globalArray;
+}
+
+
+template<int NX, int NY, typename TG, typename TA>
+void setupDistributed(Array& distArray, Dune::ParallelIndexSet<TG,Dune::ParallelLocalIndex<TA> >& distIndexSet,
+                      int rank, int procs)
+{
+  // The local grid
+  int nx = NX/procs;
+  int mod = NX%procs;
+
+  // Set up the indexsets.
+  int start, end;
+  int ostart, oend;
+
+  if(rank<mod) {
+    start = rank * (nx + 1);
+    end   = start + (nx + 1);
+  }else{
+    start = mod + rank * nx;
+    end   = start + nx;
+  }
+
+  if(rank>0)
+    ostart = start - 1;
+  else
+    ostart = start;
+
+  if(rank<procs-1)
+    oend = end+1;
+  else
+    oend = end;
+
+  distIndexSet.beginResize();
+
+  int localIndex=0;
+  int size = NY*(oend-ostart);
+
+  distArray.build(size);
+
+  for(int j=0; j<NY; j++)
+    for(int i=ostart; i<oend; i++) {
+      bool isPublic = (i<=start+1)||(i>=end-1);
+      GridFlags flag = owner;
+      if((i<start || i>=end)) {
+        distArray[localIndex]=-(i+j*NX+rank*NX*NY);
+        flag = overlap;
+      }else
+        distArray[localIndex]=i+j*NX+rank*NX*NY;
+
+      distIndexSet.add(i+j*NX, Dune::ParallelLocalIndex<GridFlags> (localIndex++,flag,isPublic));
+    }
+
+  distIndexSet.endResize();
+
+
+}
+
+template<int NX,int NY, typename TG, typename TA>
+void setupGlobal(Array& globalArray, Dune::ParallelIndexSet<TG,Dune::ParallelLocalIndex<TA> >& globalIndexSet)
+{
+  // build global indexset on first process
+  globalIndexSet.beginResize();
+  globalArray.build(NX*NY);
+  int k=0;
+  for(int j=0; j<NY; j++)
+    for(int i=0; i<NX; i++) {
+      globalIndexSet.add(i+j*NX, Dune::ParallelLocalIndex<GridFlags> (i+j*NX,owner,false));
+      globalArray[i+j*NX]=-(i+j*NX);
+      k++;
+
+    }
+
+  globalIndexSet.endResize();
+}
+
+void testIndicesBuffered(MPI_Comm comm)
+{
+  //using namespace Dune;
+
+  // The global grid size
+  const int Nx = 8;
+  const int Ny = 1;
+
+  // Process configuration
+  int procs, rank, master=0;
+  MPI_Comm_size(comm, &procs);
+  MPI_Comm_rank(comm, &rank);
+
+  typedef Dune::ParallelIndexSet<int,Dune::ParallelLocalIndex<GridFlags> > ParallelIndexSet;
+
+  ParallelIndexSet distIndexSet;
+  // global indexset
+  ParallelIndexSet globalIndexSet;
+
+  Array distArray;
+  Array globalArray;
+
+  setupDistributed<Nx,Ny>(distArray, distIndexSet, rank, procs);
+
+
+  if(rank==master) {
+    setupGlobal<Nx,Ny>(globalArray, globalIndexSet);
+  }
+
+  typedef Dune::RemoteIndices<ParallelIndexSet> RemoteIndices;
+
+  RemoteIndices accuIndices(distIndexSet, globalIndexSet, comm);
+
+  accuIndices.rebuild<true>();
+  std::cout<<"dist "<<rank<<": "<<distIndexSet<<std::endl;
+  std::cout<<"global "<<rank<<": "<<globalIndexSet<<std::endl;
+  std::cout << accuIndices<<std::endl;
+  std::cout <<" end remote indices"<<std::endl;
+
+  RemoteIndices overlapIndices(distIndexSet, distIndexSet, comm);
+  overlapIndices.rebuild<false>();
+
+  Dune::Interface accuInterface;
+  Dune::Interface overlapInterface;
+  Dune::EnumItem<GridFlags,owner> sourceFlags;
+  Dune::Combine<Dune::EnumItem<GridFlags,overlap>,Dune::EnumItem<GridFlags,owner>,GridFlags> destFlags;
+  //    Dune::Bool2Type<true> flag;
+
+  accuInterface.build(accuIndices, sourceFlags, destFlags);
+  overlapInterface.build(overlapIndices, Dune::EnumItem<GridFlags,owner>(),
+                         Dune::EnumItem<GridFlags,overlap>());
+  overlapInterface.print();
+  accuInterface.print();
+
+  //accuInterface.print();
+
+  Dune::BufferedCommunicator accumulator, overlapExchanger;
+
+  accumulator.build<Array>(accuInterface);
+
+  overlapExchanger.build<Array>(overlapInterface);
+
+  std::cout<< rank<<": before forward distArray="<< distArray<<std::endl;
+
+  // Exchange the overlap
+  overlapExchanger.forward<ArrayGatherScatter>(distArray, distArray);
+
+  std::cout<<rank<<": overlap exchanged distArray"<< distArray<<std::endl;
+
+  if(rank==master)
+    std::cout<<": before forward globalArray="<< globalArray<<std::endl;
+
+  accumulator.forward<ArrayGatherScatter>(distArray, globalArray);
+
+
+  if(rank==master) {
+    std::cout<<"after forward global: "<<globalArray<<std::endl;
+    globalArray+=1;
+    std::cout<<" added one: globalArray="<<globalArray<<std::endl;
+  }
+
+  accumulator.backward<ArrayGatherScatter>(distArray, globalArray);
+  std::cout<< rank<<": after backward distArray"<< distArray<<std::endl;
+
+
+  // Exchange the overlap
+  overlapExchanger.forward<ArrayGatherScatter>(distArray);
+
+  std::cout<<rank<<": overlap exchanged distArray"<< distArray<<std::endl;
+
+  //std::cout << rank<<": source and dest are the same:"<<std::endl;
+  //std::cout << remote<<std::endl<<std::flush;
+}
+
+
+void testRedistributeIndices(MPI_Comm comm)
+{
+  using namespace Dune;
+
+  // The global grid size
+  const int Nx = 20;
+  const int Ny = 2;
+
+  // Process configuration
+  int procs, rank;
+  MPI_Comm_size(comm, &procs);
+  MPI_Comm_rank(comm, &rank);
+
+  // The local grid
+  int nx = Nx/procs;
+  // distributed indexset
+
+  typedef ParallelIndexSet<int,ParallelLocalIndex<GridFlags> > ParallelIndexSet;
+  ParallelIndexSet sendIndexSet;
+  // global indexset
+  ParallelIndexSet receiveIndexSet;
+
+  Array array, redistributedArray;
+
+  // Set up the indexsets.
+  {
+
+    int start = std::max(rank*nx-1,0);
+    int end = std::min((rank + 1) * nx+1, Nx);
+
+    sendIndexSet.beginResize();
+
+
+    array.build(Ny*(end-start));
+
+    for(int j=0, localIndex=0; j<Ny; j++)
+      for(int i=start; i<end; i++, localIndex++) {
+        bool isPublic = (i<=start+1)||(i>=end-2);
+        GridFlags flag = owner;
+
+        if((i==start && i!=0)||(i==end-1 && i!=Nx-1))
+          flag = overlap;
+
+        sendIndexSet.add(i+j*Nx, ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+        array[localIndex]=i+j*Nx+rank*Nx*Ny;
+      }
+
+    sendIndexSet.endResize();
+  }
+  {
+    int newrank = (rank + 1) % procs;
+
+    int start = std::max(newrank*nx-1,0);
+    int end = std::min((newrank + 1) * nx+1, Nx);
+
+    std::cout<<rank<<": "<<newrank<<" start="<<start<<" end"<<end<<std::endl;
+
+    redistributedArray.build(Ny*(end-start));
+
+    receiveIndexSet.beginResize();
+
+    for(int j=0, localIndex=0; j<Ny; j++)
+      for(int i=start; i<end; i++, localIndex++) {
+        bool isPublic = (i<=start+1)||(i>=end-2);
+        GridFlags flag = owner;
+
+        if((i==start && i!=0)||(i==end-1 && i!=Nx-1))
+          flag = overlap;
+
+        receiveIndexSet.add(i+j*Nx, ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+        redistributedArray[localIndex]=-1;
+      }
+
+    receiveIndexSet.endResize();
+  }
+
+
+  std::cout<< rank<<": distributed and global index set!"<<std::endl<<std::flush;
+  typedef RemoteIndices<ParallelIndexSet> RemoteIndices;
+
+  RemoteIndices redistributeIndices(sendIndexSet,
+                                    receiveIndexSet, comm);
+  RemoteIndices overlapIndices(receiveIndexSet, receiveIndexSet, comm);
+
+  redistributeIndices.rebuild<true>();
+  overlapIndices.rebuild<false>();
+
+  DatatypeCommunicator<ParallelIndexSet> redistribute, overlapComm;
+  EnumItem<GridFlags,owner> fowner;
+  EnumItem<GridFlags,overlap> foverlap;
+
+  redistribute.build(redistributeIndices, fowner, array, fowner, redistributedArray);
+
+  overlapComm.build(overlapIndices, fowner, redistributedArray, foverlap, redistributedArray);
+  std::cout<<rank<<": initial array: "<<array<<std::endl;
+
+  redistribute.forward();
+
+  std::cout<<rank<<": redistributed array: "<<redistributedArray<<std::endl;
+
+  overlapComm.forward();
+
+  std::cout<<rank<<": redistributed array with overlap communicated: "<<redistributedArray<<std::endl;
+}
+
+void testRedistributeIndicesBuffered(MPI_Comm comm)
+{
+  using namespace Dune;
+
+  // The global grid size
+  const int Nx = 20;
+  const int Ny = 2;
+
+  // Process configuration
+  int procs, rank;
+  MPI_Comm_size(comm, &procs);
+  MPI_Comm_rank(comm, &rank);
+
+  // The local grid
+  int nx = Nx/procs;
+  // distributed indexset
+
+  typedef ParallelIndexSet<int,ParallelLocalIndex<GridFlags> > ParallelIndexSet;
+  ParallelIndexSet sendIndexSet;
+  // global indexset
+  ParallelIndexSet receiveIndexSet;
+
+  Array array, redistributedArray;
+
+  std::vector<int> neighbours;
+
+  // Set up the indexsets.
+  {
+
+    int start = std::max(rank*nx-1,0);
+    int end = std::min((rank + 1) * nx+1, Nx);
+
+    neighbours.reserve(2);
+
+    if(rank>0) neighbours.push_back(rank-1);
+    if(rank<procs-1) neighbours.push_back(rank+1);
+
+    sendIndexSet.beginResize();
+
+
+    array.build(Ny*(end-start));
+
+    for(int j=0, localIndex=0; j<Ny; j++)
+      for(int i=start; i<end; i++, localIndex++) {
+        bool isPublic = (i<=start+1)||(i>=end-2);
+        GridFlags flag = owner;
+
+        if((i==start && i!=0)||(i==end-1 && i!=Nx-1))
+          flag = overlap;
+
+        sendIndexSet.add(i+j*Nx, ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+        array[localIndex]=i+j*Nx; //+rank*Nx*Ny;
+        if(flag==overlap)
+          array[localIndex]=-array[localIndex];
+      }
+
+    sendIndexSet.endResize();
+  }
+  {
+    int newrank = (rank + 1) % procs;
+
+    int start = std::max(newrank*nx-1,0);
+    int end = std::min((newrank + 1) * nx+1, Nx);
+
+    std::cout<<rank<<": "<<newrank<<" start="<<start<<" end"<<end<<std::endl;
+
+    redistributedArray.build(Ny*(end-start));
+
+    receiveIndexSet.beginResize();
+
+    for(int j=0, localIndex=0; j<Ny; j++)
+      for(int i=start; i<end; i++, localIndex++) {
+        bool isPublic = (i<=start+1)||(i>=end-2);
+        GridFlags flag = owner;
+
+        if((i==start && i!=0)||(i==end-1 && i!=Nx-1))
+          flag = overlap;
+
+        receiveIndexSet.add(i+j*Nx, ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+        redistributedArray[localIndex]=-1;
+      }
+
+    receiveIndexSet.endResize();
+  }
+
+
+  std::cout<< rank<<": distributed and global index set!"<<std::endl<<std::flush;
+
+  typedef RemoteIndices<ParallelIndexSet> RemoteIndices;
+  RemoteIndices redistributeIndices(sendIndexSet,
+                                    receiveIndexSet, comm);
+  RemoteIndices overlapIndices(receiveIndexSet, receiveIndexSet, comm);
+  RemoteIndices sendIndices(sendIndexSet,
+                            sendIndexSet, comm, neighbours);
+  RemoteIndices sendIndices1(sendIndexSet,
+                             sendIndexSet, comm);
+  overlapIndices.rebuild<false>();
+  redistributeIndices.rebuild<true>();
+  sendIndices.rebuild<true>();
+  sendIndices1.rebuild<true>();
+
+  if(rank==0)
+    std::cout<<sendIndices<<std::endl<<sendIndices1<<std::endl;
+
+  assert(sendIndices==sendIndices1);
+
+  std::cout<<redistributeIndices<<std::endl;
+
+  Interface redistributeInterface, overlapInterface;
+  EnumItem<GridFlags,owner> fowner;
+  EnumItem<GridFlags,overlap> foverlap;
+
+  redistributeInterface.build(redistributeIndices, fowner, fowner);
+  overlapInterface.build(overlapIndices, fowner, foverlap);
+
+  BufferedCommunicator redistribute;
+  BufferedCommunicator overlapComm;
+
+  redistribute.build(array, redistributedArray, redistributeInterface);
+  overlapComm.build<Array>(overlapInterface);
+
+  std::cout<<rank<<": initial array: "<<array<<std::endl;
+
+  redistribute.forward<ArrayGatherScatter>(array, redistributedArray);
+
+  std::cout<<rank<<": redistributed array: "<<redistributedArray<<std::endl;
+
+  redistributedArray +=1;
+
+  std::cout<<rank<<": redistributed array (added one): "<<redistributedArray<<std::endl;
+
+  overlapComm.forward<ArrayGatherScatter>(redistributedArray);
+
+  std::cout<<rank<<": redistributed array with overlap communicated: "<<redistributedArray<<std::endl;
+
+  redistribute.backward<ArrayGatherScatter>(array, redistributedArray);
+
+  std::cout<<rank<<": final array: "<<array<<std::endl;
+
+  redistribute.forward<ArrayGatherScatter>(array, redistributedArray);
+
+  std::cout<<rank<<": final array with overlap communicated: "<<array<<std::endl;
+}
+
+
+/**
+ * @brief MPI Error.
+ * Thrown when an mpi error occurs.
+ */
+class MPIError {
+public:
+  /** @brief Constructor. */
+  MPIError(std::string s, int e) : errorstring(s), errorcode(e){}
+  /** @brief The error string. */
+  std::string errorstring;
+  /** @brief The mpi error code. */
+  int errorcode;
+};
+
+void MPI_err_handler(MPI_Comm *comm, int *err_code, ...){
+  char *err_string=new char[MPI_MAX_ERROR_STRING];
+  int err_length;
+  MPI_Error_string(*err_code, err_string, &err_length);
+  std::string s(err_string, err_length);
+  int rank;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  std::cerr << rank<<": An MPI Error ocurred:"<<std::endl<<s<<std::endl;
+  delete[] err_string;
+  throw MPIError(s, *err_code);
+}
+
+int main(int argc, char **argv)
+{
+  MPI_Init(&argc, &argv);
+  MPI_Errhandler handler;
+  MPI_Errhandler_create(MPI_err_handler, &handler);
+  MPI_Errhandler_set(MPI_COMM_WORLD, handler);
+  int rank;
+  int size;
+  const int firstRank=2;
+  MPI_Comm_size(MPI_COMM_WORLD, &size);
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  MPI_Comm comm;
+  int key = rank;
+
+  if(size>firstRank) {
+    if(rank==0)
+      key = firstRank;
+    if(rank==firstRank)
+      key=0;
+  }
+
+  MPI_Comm_split(MPI_COMM_WORLD, 0, key, &comm);
+
+#ifdef DEBUG
+  bool wait=1;
+  while(size>1 && wait) ;
+#endif
+
+  //  testIndices(comm);
+  testIndicesBuffered(comm);
+
+  if(rank==0)
+    std::cout<<std::endl<<"Redistributing bla!"<<std::endl<<std::endl;
+  MPI_Barrier(comm);
+
+
+  //  testRedistributeIndices(comm);
+  testRedistributeIndicesBuffered(comm);
+  MPI_Comm_free(&comm);
+  MPI_Finalize();
+}
diff --git a/dune/common/parallel/test/selectiontest.cc b/dune/common/parallel/test/selectiontest.cc
new file mode 100644
index 000000000..b6db58fe7
--- /dev/null
+++ b/dune/common/parallel/test/selectiontest.cc
@@ -0,0 +1,92 @@
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include "config.h"
+#include <iostream>
+#include <dune/common/timer.hh>
+#include <dune/common/enumset.hh>
+#include <dune/common/parallel/remoteindices.hh>
+#include <dune/common/parallel/selection.hh>
+
+enum GridFlags {
+  owner, overlap, border
+};
+
+template<class T>
+int meassure(const T& selection)
+{
+  /*
+     return meassure<1>(selection);
+     }
+
+     template<int LOOPS, class T>
+     int meassure(const T& selection)
+     {*/
+  typedef typename T::const_iterator iterator;
+
+  const iterator end = selection.end();
+
+  int count=0;
+  Dune::Timer timer;
+  timer.reset();
+  for(int i=0; i<10; i++)
+    for(iterator iter = selection.begin(); iter != end; ++iter)
+      count+=*iter;
+
+  std::cout<<" took "<< timer.elapsed()<<" seconds"<<std::endl;
+
+  return count;
+}
+
+template<int SIZE>
+void test()
+{
+  const int Nx = SIZE;
+  const int Ny = SIZE;
+
+  // Process configuration
+  const int ALSIZE=55;
+
+  Dune::ParallelIndexSet<int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE> distIndexSet;
+
+  distIndexSet.beginResize();
+
+  for(int y=0, i=0; y < Ny; y++)
+    for(int x=0; x < Nx; x++, i++) {
+      GridFlags flag = owner;
+      if(x==0 || x == Nx-1 || y ==0 || y==Ny-1)
+        flag = overlap;
+
+      distIndexSet.add(i, Dune::ParallelLocalIndex<GridFlags> (i, flag, true));
+    }
+
+  distIndexSet.endResize();
+
+  Dune::UncachedSelection<Dune::EnumItem<GridFlags,owner>,int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE>
+  ownerUncached(distIndexSet);
+
+  Dune::Selection<Dune::EnumItem<GridFlags,owner>,int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE>
+  ownerCached(distIndexSet);
+
+  Dune::UncachedSelection<Dune::EnumItem<GridFlags,overlap>,int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE>
+  overlapUncached(distIndexSet);
+
+  Dune::Selection<Dune::EnumItem<GridFlags,overlap>,int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE>
+  overlapCached(distIndexSet);
+
+  int count=0;
+
+  std::cout<<" Owner selection uncached:";
+  count+=meassure(ownerUncached);
+  std::cout<<" Owner selection cached:";
+  count+=meassure(ownerCached);
+  std::cout<<" Overlap selection uncached:";
+  count+=meassure(overlapUncached);
+  std::cout<<" Overlap selection cached:";
+  count+=meassure(overlapCached);
+  std::cout<<count<<std::endl;
+}
+
+int main()
+{
+  test<1000>();
+}
diff --git a/dune/common/parallel/test/syncertest.cc b/dune/common/parallel/test/syncertest.cc
new file mode 100644
index 000000000..3fabd704b
--- /dev/null
+++ b/dune/common/parallel/test/syncertest.cc
@@ -0,0 +1,358 @@
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include "config.h"
+#include <dune/common/parallel/indicessyncer.hh>
+#include <dune/common/sllist.hh>
+#include <string>
+#include <iostream>
+
+enum GridFlags {
+  owner, overlap, border
+};
+
+template<typename T>
+void deleteOverlapEntries(T& indices,
+                          Dune::RemoteIndices<T>& remoteIndices)
+{
+  typedef typename T::iterator IndexIterator;
+  typedef typename T::GlobalIndex GlobalIndex;
+  typedef typename T::LocalIndex::Attribute Attribute;
+  typedef Dune::RemoteIndices<T> RemoteIndices;
+  typedef typename RemoteIndices::RemoteIndexList::ModifyIterator RemoteModifier;
+  typedef typename RemoteIndices::RemoteIndexList::const_iterator RemoteIterator;
+  typedef Dune::SLList<std::pair<GlobalIndex,Attribute>, typename RemoteIndices::RemoteIndexList::Allocator> GlobalList;
+  typedef typename GlobalList::ModifyIterator GlobalModifier;
+  typedef Dune::tuple<RemoteModifier,GlobalModifier,const RemoteIterator,const typename GlobalList::const_iterator,
+      const GlobalList*, const typename RemoteIndices::RemoteIndexList*> IteratorTuple;
+  typedef std::map<int,IteratorTuple> IteratorMap;
+  typedef typename RemoteIndices::const_iterator RemoteMapIterator;
+
+  int rank;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  std::map<int,GlobalList> globalLists;
+
+  IteratorMap iterators;
+  RemoteMapIterator rmEnd = remoteIndices.end();
+
+  for(RemoteMapIterator remote = remoteIndices.begin();
+      remote != rmEnd; ++remote) {
+    // Initialize global indices
+    GlobalList& gList=globalLists[remote->first];
+    const RemoteIterator rend = remote->second.first->end();
+
+    for(RemoteIterator index= remote->second.first->begin();
+        index != rend; ++index)
+      gList.push_back(std::make_pair(index->localIndexPair().global(),
+                                     index->localIndexPair().local().attribute()));
+
+    assert(gList.size()==remote->second.first->size());
+    std::cout << "Size of remote indices is "<<gList.size()<<std::endl;
+
+    iterators.insert(std::make_pair(remote->first,
+                                    IteratorTuple(remote->second.first->beginModify(),
+                                                  gList.beginModify(),
+                                                  rend,
+                                                  gList.end(),
+                                                  &gList,
+                                                  remote->second.first)));
+  }
+
+  indices.beginResize();
+
+  const IndexIterator endIndex = indices.end();
+  for(IndexIterator index = indices.begin(); index != endIndex; ++index) {
+    if(index->local().attribute()==overlap) {
+      std::cout << rank<<": Deleting "<<*index<<std::endl;
+
+      indices.markAsDeleted(index);
+
+      // Delete corresponding indices in all remote index lists.
+      typedef typename IteratorMap::iterator iterator;
+      iterator end = iterators.end();
+
+      for(iterator remote = iterators.begin(); remote != end; ++remote) {
+
+        // Search for the index
+        while(Dune::Element<0>::get(remote->second) != Dune::Element<2>::get(remote->second)
+              && *(Dune::Element<1>::get(remote->second)) < *index) {
+          // increment all iterators
+          ++(Dune::Element<0>::get(remote->second));
+          ++(Dune::Element<1>::get(remote->second));
+          if(Dune::Element<0>::get(remote->second)!=Dune::Element<2>::get(remote->second))
+            assert(Dune::Element<1>::get(remote->second)!=Dune::Element<3>::get(remote->second));
+        }
+
+        // Delete the entry if present
+        if(Dune::Element<0>::get(remote->second) != Dune::Element<2>::get(remote->second)) {
+          assert(Dune::Element<1>::get(remote->second) != Dune::Element<3>::get(remote->second));
+
+          if(*(Dune::Element<1>::get(remote->second)) == *index) {
+
+            std::cout<<rank<<": Deleting remote "<<
+            Dune::Element<1>::get(remote->second)->first<<", "<<
+            Dune::Element<1>::get(remote->second)->second<<" of process "
+            << remote->first<<std::endl;
+
+            // Delete entries
+            Dune::Element<0>::get(remote->second).remove();
+            Dune::Element<1>::get(remote->second).remove();
+            assert(Dune::Element<4>::get(remote->second)->size()==Dune::Element<5>::get(remote->second)->size());
+          }
+        }
+      }
+    }
+  }
+
+  indices.endResize();
+
+  // Update the pointers to the local index pairs
+  Dune::repairLocalIndexPointers(globalLists, remoteIndices, indices);
+  globalLists.clear();
+}
+
+
+template<typename T>
+bool areEqual(T& indices,
+              Dune::RemoteIndices<T>& remoteIndices,
+              T& oIndices,
+              Dune::RemoteIndices<T>& oRemoteIndices){
+
+  typedef typename T::iterator IndexIterator;
+  typedef Dune::RemoteIndices<T> RemoteIndices;
+  typedef typename RemoteIndices::RemoteIndexList::iterator RemoteIterator;
+
+  IndexIterator iEnd = indices.end();
+  bool ret=true;
+  int rank;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  // Test the index sets
+  if(indices.size() != oIndices.size()) {
+    std::cerr<< rank<<": Size of index set is unequal!"<<std::endl;
+    ret= false;
+  }
+  for(IndexIterator index = indices.begin(), oIndex = oIndices.begin();
+      index != iEnd; ++index, ++oIndex) {
+    if( index->global() != oIndex->global()) {
+      std::cerr<<rank<<": Entry for "<<index->global() <<" is missing!"<<std::endl;
+      ret = false;
+    }
+    else if(index->local().attribute() !=oIndex->local().attribute()) {
+      std::cerr<<rank<<": Entry for "<<index->global() <<" has wrong attribute: "<<
+      index->local().attribute()<< "!= "<<oIndex->local().attribute()<<std::endl;
+      ret = false;
+    }
+  }
+
+  // Test the remote index lists
+  typedef typename RemoteIndices::RemoteIndexMap::const_iterator RemoteMapIterator;
+
+  RemoteMapIterator rmEnd = remoteIndices.end();
+
+  for(RemoteMapIterator remote = remoteIndices.begin(),
+      oRemote = oRemoteIndices.begin();
+      remote != rmEnd; ++remote, ++oRemote) {
+    if(oRemote->second.first->size() != remote->second.first->size()) {
+      std::cerr <<rank<<": Size of remote index list for process "<<remote->first
+                <<" does not match!"<<std::endl;
+      ret=false;
+    }
+
+    RemoteIterator rEnd = oRemote->second.first->end();
+    for(RemoteIterator rIndex= remote->second.first->begin(),
+        oRIndex = oRemote->second.first->begin(); oRIndex != rEnd;
+        ++rIndex, ++oRIndex) {
+
+      if(rIndex->localIndexPair().global() != oRIndex->localIndexPair().global()) {
+
+        std::cerr<<rank<<": Remote Entry for "<< rIndex->localIndexPair().global()
+                 <<" is missing for process "<<remote->first<<std::endl;
+        ret = false;
+      }
+
+      if(rIndex->attribute() != oRIndex->attribute()) {
+        std::cerr<<rank<<": Attribute for entry "<<rIndex->localIndexPair().global()
+                 <<" for process "<< remote->first<<" is wrong: "
+                 <<rIndex->attribute()<<" != "<<oRIndex->attribute()<<std::endl;
+        ret = false;
+      }
+    }
+  }
+
+  return ret;
+}
+
+template<typename T>
+void addFakeRemoteIndices(T& indices,
+                          T& oIndices,
+                          Dune::RemoteIndices<T>& remoteIndices,
+                          Dune::RemoteIndices<T>& oRemoteIndices){
+  typedef typename T::iterator IndexIterator;
+  typedef typename T::GlobalIndex GlobalIndex;
+  typedef typename T::LocalIndex::Attribute Attribute;
+  typedef typename Dune::RemoteIndices<T>::RemoteIndexList RemoteIndexList;
+  assert(remoteIndices.neighbours()==0 && oRemoteIndices.neighbours()==0);
+
+  RemoteIndexList* rlist = new RemoteIndexList();
+  RemoteIndexList* orlist = new RemoteIndexList();
+  int added=0;
+  IndexIterator iEnd = indices.end();
+
+  for(IndexIterator index = indices.begin(), oIndex = oIndices.begin();
+      index != iEnd; ++index, ++oIndex) {
+    assert(*index == *oIndex);
+    if(index->local().attribute()==overlap) {
+      added++;
+      rlist->push_back(Dune::RemoteIndex<GlobalIndex,Attribute>(owner,&(*index)));
+      orlist->push_back(Dune::RemoteIndex<GlobalIndex,Attribute>(owner,&(*oIndex)));
+    }
+  }
+
+
+  remoteIndices.remoteIndices_.insert(std::make_pair(1,std::make_pair(rlist,rlist)));
+  oRemoteIndices.remoteIndices_.insert(std::make_pair(1,std::make_pair(orlist,orlist)));
+
+  std::cout<<"Added "<<added<<" fake remote indices!"<<std::endl;
+}
+
+bool testIndicesSyncer()
+{
+  //using namespace Dune;
+
+  // The global grid size
+  const int Nx = 6;
+  const int Ny = 1;
+
+  // Process configuration
+  int procs, rank;
+  MPI_Comm_size(MPI_COMM_WORLD, &procs);
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  // The local grid
+  int nx = Nx/procs;
+  int first=Nx%procs;
+  // distributed indexset
+  //  typedef ParallelLocalIndex<GridFlags> LocalIndexType;
+
+  typedef Dune::ParallelIndexSet<int,Dune::ParallelLocalIndex<GridFlags> > ParallelIndexSet;
+  ParallelIndexSet indexSet, changedIndexSet;
+
+  // Set up the indexsets.
+  int start,end, ostart, oend;
+  if(rank<first) {
+    start = rank*nx+rank;
+    end = rank +rank * nx+nx+1;
+  }else{
+    start = first+rank*nx;
+    end = first +rank*nx +nx;
+  }
+
+  if(rank>0 &&start<Nx)
+    ostart=start-1;
+  else
+    ostart=start;
+
+  if(rank<procs-1 &&end<Nx)
+    oend=end+1;
+  else
+    oend=end;
+
+  std::cout<<rank<<": ostart="<<ostart<<" start="<<start<<" end="<<end<<" oend="<<oend<<std::endl;
+  //return true;
+
+  indexSet.beginResize();
+  changedIndexSet.beginResize();
+
+  int localIndex=0;
+
+  for(int j=0; j<Ny; j++)
+    for(int i=ostart; i<oend; i++) {
+      bool isPublic = (i<start+1)||(i>=end-1);
+      GridFlags flag = owner;
+      if((i==ostart && (i!=0))||(i==end && (i!=Nx-1))) {
+        flag = overlap;
+      }
+
+      indexSet.add(i+j*Nx, Dune::ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+      changedIndexSet.add(i+j*Nx, Dune::ParallelLocalIndex<GridFlags> (localIndex++,flag,isPublic));
+    }
+
+  indexSet.endResize();
+  changedIndexSet.endResize();
+
+  Dune::RemoteIndices<ParallelIndexSet> remoteIndices(indexSet, indexSet, MPI_COMM_WORLD);
+  Dune::RemoteIndices<ParallelIndexSet> changedRemoteIndices(changedIndexSet, changedIndexSet, MPI_COMM_WORLD);
+
+  remoteIndices.rebuild<false>();
+  changedRemoteIndices.rebuild<false>();
+
+
+  std::cout<<rank<<": Unchanged: "<<indexSet<<std::endl<<remoteIndices<<std::endl;
+  assert(areEqual(indexSet, remoteIndices,changedIndexSet, changedRemoteIndices));
+
+  std::cout<<"Deleting entries!"<<std::endl;
+
+  //if(procs==1)
+  //addFakeRemoteIndices(indexSet, changedIndexSet, remoteIndices, changedRemoteIndices);
+
+  deleteOverlapEntries(changedIndexSet, changedRemoteIndices);
+  std::cout<<rank<<": Changed:   "<<changedIndexSet<<std::endl<<changedRemoteIndices<<std::endl;
+
+  Dune::IndicesSyncer<ParallelIndexSet> syncer(changedIndexSet, changedRemoteIndices);
+  //  return 0;
+
+  std::cout<<"Syncing!"<<std::endl;
+
+  syncer.sync();
+
+  std::cout<<rank<<": Synced:   "<<changedIndexSet<<std::endl<<changedRemoteIndices<<std::endl;
+  if( areEqual(indexSet, remoteIndices,changedIndexSet, changedRemoteIndices))
+    return true;
+  else{
+    std::cerr<<"Output not equal!"<<std::endl;
+    return false;
+  }
+
+
+}
+
+/**
+ * @brief MPI Error.
+ * Thrown when an mpi error occurs.
+ */
+class MPIError {
+public:
+  /** @brief Constructor. */
+  MPIError(std::string s, int e) : errorstring(s), errorcode(e){}
+  /** @brief The error string. */
+  std::string errorstring;
+  /** @brief The mpi error code. */
+  int errorcode;
+};
+
+void MPI_err_handler(MPI_Comm *comm, int *err_code, ...){
+  char *err_string=new char[MPI_MAX_ERROR_STRING];
+  int err_length;
+  MPI_Error_string(*err_code, err_string, &err_length);
+  std::string s(err_string, err_length);
+  std::cerr << "An MPI Error ocurred:"<<std::endl<<s<<std::endl;
+  delete[] err_string;
+  throw MPIError(s, *err_code);
+}
+
+int main(int argc, char** argv){
+  MPI_Init(&argc, &argv);
+  MPI_Errhandler handler;
+  MPI_Errhandler_create(MPI_err_handler, &handler);
+  MPI_Errhandler_set(MPI_COMM_WORLD, handler);
+  int procs, rank;
+  MPI_Comm_size(MPI_COMM_WORLD, &procs);
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  bool ret=testIndicesSyncer();
+  MPI_Barrier(MPI_COMM_WORLD);
+  std::cout<<rank<<": ENd="<<ret<<std::endl;
+  if(!ret)
+    MPI_Abort(MPI_COMM_WORLD, 1);
+  MPI_Finalize();
+}
-- 
GitLab