diff --git a/dune/istl/owneroverlapcopy.hh b/dune/istl/owneroverlapcopy.hh
index d93fb9783698bd1b3547412d3f2e7bfe934804cf..fde36b7238cf8e83e0d284a685ce0509ea3246f0 100644
--- a/dune/istl/owneroverlapcopy.hh
+++ b/dune/istl/owneroverlapcopy.hh
@@ -294,7 +294,7 @@ namespace Dune {
       return category_;
     }
 
-    const CollectiveCommunication<MPI_Comm>& communicator() const
+    const Communication<MPI_Comm>& communicator() const
     {
       return cc;
     }
@@ -673,7 +673,7 @@ namespace Dune {
     OwnerOverlapCopyCommunication (const OwnerOverlapCopyCommunication&)
     {}
     MPI_Comm comm;
-    CollectiveCommunication<MPI_Comm> cc;
+    Communication<MPI_Comm> cc;
     PIS pis;
     RI ri;
     mutable IF OwnerToAllInterface;
diff --git a/dune/istl/paamg/construction.hh b/dune/istl/paamg/construction.hh
index a25a81d861b658a382b047f06a0dee412949b90b..072c49a8e29cdedf81921fd1e9daf9e06a1248ab 100644
--- a/dune/istl/paamg/construction.hh
+++ b/dune/istl/paamg/construction.hh
@@ -88,11 +88,11 @@ namespace Dune
 
     struct SequentialCommunicationArgs
     {
-      SequentialCommunicationArgs(CollectiveCommunication<void*> comm, [[maybe_unused]] int cat)
+      SequentialCommunicationArgs(Communication<void*> comm, [[maybe_unused]] int cat)
         : comm_(comm)
       {}
 
-      CollectiveCommunication<void*> comm_;
+      Communication<void*> comm_;
     };
 
   } // end Amg namspace
diff --git a/dune/istl/paamg/pinfo.hh b/dune/istl/paamg/pinfo.hh
index ae97fc5b4fbe641c64b451a0dd02c7cf53bc1be6..8563054d86dc719588eab4cbc1711618eeaaec1a 100644
--- a/dune/istl/paamg/pinfo.hh
+++ b/dune/istl/paamg/pinfo.hh
@@ -25,7 +25,7 @@ namespace Dune
     class SequentialInformation
     {
     public:
-      typedef CollectiveCommunication<void*> MPICommunicator;
+      typedef Communication<void*> MPICommunicator;
       typedef EmptySet<int> CopyFlags;
       typedef AllSet<int> OwnerSet;
 
@@ -83,7 +83,7 @@ namespace Dune
       }
 
       template<class T>
-      SequentialInformation(const CollectiveCommunication<T>&)
+      SequentialInformation(const Communication<T>&)
       {}
 
       SequentialInformation()
diff --git a/dune/istl/paamg/test/amgtest.cc b/dune/istl/paamg/test/amgtest.cc
index 1b046173dfc4d8a2cfaf978bcc2e7a7d7c8a1f7a..b8dbd0e9a52f8ad16b931e04602a6fe2a8d6ac81 100644
--- a/dune/istl/paamg/test/amgtest.cc
+++ b/dune/istl/paamg/test/amgtest.cc
@@ -87,7 +87,7 @@ void testAMG(int N, int coarsenTarget, int ml)
 
   ParallelIndexSet indices;
   typedef Dune::MatrixAdapter<Matrix,Vector,Vector> Operator;
-  typedef Dune::CollectiveCommunication<void*> Comm;
+  typedef Dune::Communication<void*> Comm;
   int n;
 
   Comm c;
diff --git a/dune/istl/paamg/test/anisotropic.hh b/dune/istl/paamg/test/anisotropic.hh
index d9e2a928d9708f4ffc2243fc70e7f199e02d09cc..344099202a88e5bd9b08696f7046163ea256cf0b 100644
--- a/dune/istl/paamg/test/anisotropic.hh
+++ b/dune/istl/paamg/test/anisotropic.hh
@@ -151,7 +151,7 @@ void setBoundary(V& lhs, V& rhs, const G& N)
  * \tparam M A matrix type
  */
 template<class MatrixEntry, class G, class L, class C, int s>
-Dune::BCRSMatrix<MatrixEntry> setupAnisotropic2d(int N, Dune::ParallelIndexSet<G,L,s>& indices, const Dune::CollectiveCommunication<C>& p, int *nout, typename Dune::BCRSMatrix<MatrixEntry>::field_type eps=1.0)
+Dune::BCRSMatrix<MatrixEntry> setupAnisotropic2d(int N, Dune::ParallelIndexSet<G,L,s>& indices, const Dune::Communication<C>& p, int *nout, typename Dune::BCRSMatrix<MatrixEntry>::field_type eps=1.0)
 {
   int procs=p.size(), rank=p.rank();
 
diff --git a/dune/istl/paamg/test/fastamg.cc b/dune/istl/paamg/test/fastamg.cc
index 1457170558a6028da613f307f4621d7e68ebffe7..c6be46419894a06bc57910e585b41f8212cfc328 100644
--- a/dune/istl/paamg/test/fastamg.cc
+++ b/dune/istl/paamg/test/fastamg.cc
@@ -50,7 +50,7 @@ void testAMG(int N, int coarsenTarget, int ml)
   typedef Dune::FieldVector<double,BS> VectorBlock;
   typedef Dune::BlockVector<VectorBlock> Vector;
   typedef Dune::MatrixAdapter<BCRSMat,Vector,Vector> Operator;
-  typedef Dune::CollectiveCommunication<void*> Comm;
+  typedef Dune::Communication<void*> Comm;
   int n;
 
   Comm c;
diff --git a/dune/istl/paamg/test/galerkintest.cc b/dune/istl/paamg/test/galerkintest.cc
index f10b23b126ed70101c8b328922020e4891305c32..7033559372b83293af1bc7e9f54235e58ed3a791 100644
--- a/dune/istl/paamg/test/galerkintest.cc
+++ b/dune/istl/paamg/test/galerkintest.cc
@@ -38,7 +38,7 @@ void testCoarsenIndices(int N)
   ParallelIndexSet& indices = pinfo.indexSet();
   RemoteIndices& remoteIndices = pinfo.remoteIndices();
 
-  typedef Dune::CollectiveCommunication<MPI_Comm> Comm;
+  typedef Dune::Communication<MPI_Comm> Comm;
   Comm cc(MPI_COMM_WORLD);
 
   BCRSMat mat = setupAnisotropic2d<Block>(N, indices, cc, &n);
diff --git a/dune/istl/paamg/test/hierarchytest.cc b/dune/istl/paamg/test/hierarchytest.cc
index 37f6df9422a8fbe20fac0274a9166e6bbb06299e..1d263718bdce1214e06eaafcf6150179a961a524 100644
--- a/dune/istl/paamg/test/hierarchytest.cc
+++ b/dune/istl/paamg/test/hierarchytest.cc
@@ -30,7 +30,7 @@ void testHierarchy(int N)
   typedef Dune::RemoteIndices<ParallelIndexSet> RemoteIndices;
   RemoteIndices& remoteIndices = pinfo.remoteIndices();
 
-  BCRSMat mat = setupAnisotropic2d<MatrixBlock>(N, indices, Dune::MPIHelper::getCollectiveCommunication(), &n);
+  BCRSMat mat = setupAnisotropic2d<MatrixBlock>(N, indices, Dune::MPIHelper::getCommunication(), &n);
   Vector b(indices.size());
 
   remoteIndices.rebuild<false>();
diff --git a/dune/istl/paamg/test/kamgtest.cc b/dune/istl/paamg/test/kamgtest.cc
index 222f2972dac2f82df852d6a948272c1bbaabf519..d9805ae91734e3f1d34f5205963091ad72c63a24 100644
--- a/dune/istl/paamg/test/kamgtest.cc
+++ b/dune/istl/paamg/test/kamgtest.cc
@@ -67,7 +67,7 @@ void testAMG(int N, int coarsenTarget, int ml)
   typedef Dune::FieldVector<double,BS> VectorBlock;
   typedef Dune::BlockVector<VectorBlock> Vector;
   typedef Dune::MatrixAdapter<BCRSMat,Vector,Vector> Operator;
-  typedef Dune::CollectiveCommunication<void*> Comm;
+  typedef Dune::Communication<void*> Comm;
   int n;
 
   Comm c;
diff --git a/dune/istl/paamg/test/pamgmmtest.cc b/dune/istl/paamg/test/pamgmmtest.cc
index 80b577aef8124c46912855de9bd4076c68488f6c..afeb26900359c8b705f254c4d6b38802bf261f14 100644
--- a/dune/istl/paamg/test/pamgmmtest.cc
+++ b/dune/istl/paamg/test/pamgmmtest.cc
@@ -25,7 +25,7 @@ std::string matrixfile = "gr_30_30.mtx";
 
 void loadMatrix(std::shared_ptr<BCRSMat>& pA){
   pA = std::make_shared<BCRSMat>();
-  if(MPIHelper::getCollectiveCommunication().rank() == 0){
+  if(MPIHelper::getCommunication().rank() == 0){
     Dune::loadMatrixMarket(*pA, matrixfile);
   }
 }
@@ -47,7 +47,7 @@ std::shared_ptr<Comm> repartMatrix(const std::shared_ptr<BCRSMat>& pA_orig, std:
 
 int main(int argc, char** argv){
   auto& mpihelper = MPIHelper::instance(argc, argv);
-  auto world = mpihelper.getCollectiveCommunication();
+  auto world = mpihelper.getCommunication();
 
   int size = mpihelper.size();
   int rank = mpihelper.rank();
diff --git a/dune/istl/paamg/test/pthreadamgtest.cc b/dune/istl/paamg/test/pthreadamgtest.cc
index 19f67f5e19552315946326b97ab73ce3e2933f95..b9567516b29b9bf3eed51af4b4a486d50d85e7e8 100644
--- a/dune/istl/paamg/test/pthreadamgtest.cc
+++ b/dune/istl/paamg/test/pthreadamgtest.cc
@@ -66,7 +66,7 @@ typedef Dune::BCRSMatrix<MatrixBlock> BCRSMat;
 typedef Dune::FieldVector<XREAL,1> VectorBlock;
 typedef Dune::BlockVector<VectorBlock> Vector;
 typedef Dune::MatrixAdapter<BCRSMat,Vector,Vector> Operator;
-typedef Dune::CollectiveCommunication<void*> Comm;
+typedef Dune::Communication<void*> Comm;
 typedef Dune::SeqSSOR<BCRSMat,Vector,Vector> Smoother;
 typedef Dune::Amg::SmootherTraits<Smoother>::Arguments SmootherArgs;
 
diff --git a/dune/istl/paamg/test/pthreadtwoleveltest.cc b/dune/istl/paamg/test/pthreadtwoleveltest.cc
index f4bc528461448fad317096412bfa5537144ca569..9ebaf0f254d8fd4b9cea94294f88f940c9086030 100644
--- a/dune/istl/paamg/test/pthreadtwoleveltest.cc
+++ b/dune/istl/paamg/test/pthreadtwoleveltest.cc
@@ -32,7 +32,7 @@ typedef Dune::BCRSMatrix<MatrixBlock> BCRSMat;
 typedef Dune::FieldVector<XREAL,1> VectorBlock;
 typedef Dune::BlockVector<VectorBlock> Vector;
 typedef Dune::MatrixAdapter<BCRSMat,Vector,Vector> Operator;
-typedef Dune::CollectiveCommunication<void*> Comm;
+typedef Dune::Communication<void*> Comm;
 typedef Dune::SeqSSOR<BCRSMat,Vector,Vector> Smoother;
 typedef Dune::Amg::SmootherTraits<Smoother>::Arguments SmootherArgs;
 #ifndef USE_OVERLAPPINGSCHWARZ
@@ -98,7 +98,7 @@ void testTwoLevelMethod()
     typedef Dune::FieldVector<double,BS> VectorBlock;
     typedef Dune::BlockVector<VectorBlock> Vector;
     typedef Dune::MatrixAdapter<BCRSMat,Vector,Vector> Operator;
-    typedef Dune::CollectiveCommunication<void*> Comm;
+    typedef Dune::Communication<void*> Comm;
     Comm c;
     int n;
     BCRSMat mat = setupAnisotropic2d<MatrixBlock>(N, indices, c, &n, 1);
diff --git a/dune/istl/paamg/test/twolevelmethodtest.cc b/dune/istl/paamg/test/twolevelmethodtest.cc
index d9c2fa746b67132ce99276776c18e1bcfaea1aeb..7327e5082836171b4ecec0a9a208fd2170d8b4c2 100644
--- a/dune/istl/paamg/test/twolevelmethodtest.cc
+++ b/dune/istl/paamg/test/twolevelmethodtest.cc
@@ -34,7 +34,7 @@ void testTwoLevelMethod()
     typedef Dune::FieldVector<double,BS> VectorBlock;
     typedef Dune::BlockVector<VectorBlock> Vector;
     typedef Dune::MatrixAdapter<BCRSMat,Vector,Vector> Operator;
-    typedef Dune::CollectiveCommunication<void*> Comm;
+    typedef Dune::Communication<void*> Comm;
     Comm c;
     int n;
     BCRSMat mat = setupAnisotropic2d<MatrixBlock>(N, indices, c, &n, 1);
diff --git a/dune/istl/test/solverfactorytest.cc.in b/dune/istl/test/solverfactorytest.cc.in
index 5870c25e6d5f94e8a5e23584fb960882a90f4518..37c1c5b663a464714b3592c3953d99a0c531e9e1 100644
--- a/dune/istl/test/solverfactorytest.cc.in
+++ b/dune/istl/test/solverfactorytest.cc.in
@@ -158,11 +158,11 @@ int main(int argc, char** argv){
   Dune::initSolverFactories<Dune::NonoverlappingSchwarzOperator<Matrix,Vector,Vector,Communication>>();
 
   std::cout << std::endl << " Testing sequential tests... " << std::endl;
-  testSeq(config.sub("sequential"), mpihelper.getCollectiveCommunication());
+  testSeq(config.sub("sequential"), mpihelper.getCommunication());
   std::cout << std::endl << " Testing overlapping tests... " << std::endl;
-  testOverlapping(config.sub("overlapping"), mpihelper.getCollectiveCommunication());
+  testOverlapping(config.sub("overlapping"), mpihelper.getCommunication());
   // Still need to implement proper discretization for non-overlapping
   // std::cout << std::endl << " Testing nonoverlapping tests... " << std::endl;
-  // testNonoverlapping(config.sub("overlapping"), mpihelper.getCollectiveCommunication());
+  // testNonoverlapping(config.sub("overlapping"), mpihelper.getCommunication());
   return 0;
 }