Commit b9deda92 authored by Christoph Grüninger's avatar Christoph Grüninger Committed by Christoph Grüninger

Fix Wshadow warnings in headers

parent 994902f9
......@@ -38,10 +38,10 @@ namespace Dune {
typedef Dune::BitSetVector<block_size, Alloc> BitSetVector;
friend class Dune::BitSetVector<block_size, Alloc>;
BitSetVectorConstReference(const BitSetVector& blockBitField, int block_number) :
blockBitField(blockBitField),
block_number(block_number)
{};
BitSetVectorConstReference(const BitSetVector& blockBitField_, int block_number_) :
blockBitField(blockBitField_),
block_number(block_number_)
{}
//! hide assignment operator
BitSetVectorConstReference& operator=(const BitSetVectorConstReference & b);
......@@ -212,10 +212,10 @@ namespace Dune {
typedef Dune::BitSetVectorConstReference<block_size,Alloc> BitSetVectorConstReference;
BitSetVectorReference(BitSetVector& blockBitField, int block_number) :
BitSetVectorConstReference(blockBitField, block_number),
blockBitField(blockBitField)
{};
BitSetVectorReference(BitSetVector& blockBitField_, int block_number_) :
BitSetVectorConstReference(blockBitField_, block_number_),
blockBitField(blockBitField_)
{}
public:
typedef std::bitset<block_size> bitset;
......
......@@ -1095,8 +1095,8 @@ namespace Dune
int messageLength;
MPI_Error_string(status[i].MPI_ERROR, message, &messageLength);
std::cerr<<" source="<<status[i].MPI_SOURCE<<" message: ";
for(int i=0; i< messageLength; i++)
std::cout<<message[i];
for(int j = 0; j < messageLength; j++)
std::cout << message[j];
}
std::cerr<<std::endl;
success=0;
......@@ -1113,8 +1113,8 @@ namespace Dune
int messageLength;
MPI_Error_string(status[i].MPI_ERROR, message, &messageLength);
std::cerr<<" source="<<status[i].MPI_SOURCE<<" message: ";
for(int i=0; i< messageLength; i++)
std::cerr<<message[i];
for(int j = 0; j < messageLength; j++)
std::cerr << message[j];
}
std::cerr<<std::endl;
success=0;
......
......@@ -776,13 +776,13 @@ namespace Dune
std::size_t noOldNeighbours = remoteIndices_.neighbours();
int* oldNeighbours = new int[noOldNeighbours];
sendBufferSizes_ = new std::size_t[noOldNeighbours];
std::size_t i=0;
std::size_t neighbourI = 0;
for(RemoteIterator remote = remoteIndices_.begin(); remote != end; ++remote, ++i) {
for(RemoteIterator remote = remoteIndices_.begin(); remote != end; ++remote, ++neighbourI) {
typedef typename RemoteIndices::RemoteIndexList::const_iterator
RemoteIndexIterator;
oldNeighbours[i]=remote->first;
oldNeighbours[neighbourI] = remote->first;
// Make sure we only have one remote index list.
assert(remote->second.first==remote->second.second);
......@@ -824,7 +824,7 @@ namespace Dune
Dune::dverb<<rank_<<": Neighbours: ";
for(i = 0; i<noOldNeighbours; ++i)
for(std::size_t i = 0; i<noOldNeighbours; ++i)
Dune::dverb<<oldNeighbours[i]<<" ";
Dune::dverb<<std::endl;
......@@ -833,11 +833,11 @@ namespace Dune
MPI_Status* statuses = new MPI_Status[noOldNeighbours];
// Pack Message data and start the sends
for(i = 0; i<noOldNeighbours; ++i)
for(std::size_t i = 0; i<noOldNeighbours; ++i)
packAndSend(oldNeighbours[i], sendBuffers_[i], sendBufferSizes_[i], requests[i]);
// Probe for incoming messages, receive and unpack them
for(i = 0; i<noOldNeighbours; ++i)
for(std::size_t i = 0; i<noOldNeighbours; ++i)
recvAndUnpack(numberer);
// }else{
// recvAndUnpack(oldNeighbours[i], numberer);
......@@ -851,7 +851,7 @@ namespace Dune
// Wait for completion of sends
if(MPI_SUCCESS!=MPI_Waitall(noOldNeighbours, requests, statuses)) {
std::cerr<<": MPI_Error occurred while sending message"<<std::endl;
for(i=0; i< noOldNeighbours; i++)
for(std::size_t i=0; i< noOldNeighbours; i++)
if(MPI_SUCCESS!=statuses[i].MPI_ERROR)
std::cerr<<"Destination "<<statuses[i].MPI_SOURCE<<" error code: "<<statuses[i].MPI_ERROR<<std::endl;
}
......@@ -961,9 +961,9 @@ namespace Dune
assert(pairs <= infoSend_[destination].pairs);
MPI_Pack(&process, 1, MPI_INT, buffer, bufferSize, &bpos,
remoteIndices_.communicator());
char attr = iterators->second.remoteIndex().attribute();
char attr2 = iterators->second.remoteIndex().attribute();
MPI_Pack(&attr, 1, MPI_CHAR, buffer, bufferSize, &bpos,
MPI_Pack(&attr2, 1, MPI_CHAR, buffer, bufferSize, &bpos,
remoteIndices_.communicator());
--indices;
}
......
......@@ -1120,7 +1120,7 @@ namespace Dune {
template<typename T, typename A>
template<bool ignorePublic>
inline void RemoteIndices<T,A>::buildRemote(bool includeSelf)
inline void RemoteIndices<T,A>::buildRemote(bool includeSelf_)
{
// Processor configuration
int rank, procs;
......@@ -1134,7 +1134,7 @@ namespace Dune {
// Do we need to send two index sets?
char sendTwo = (source_ != target_);
if(procs==1 && !(sendTwo || includeSelf))
if(procs==1 && !(sendTwo || includeSelf_))
// Nothing to communicate
return;
......@@ -1209,9 +1209,9 @@ namespace Dune {
// Update remote indices for ourself
if(sendTwo|| includeSelf)
if(sendTwo|| includeSelf_)
unpackCreateRemote(buffer[0], sourcePairs, destPairs, rank, sourcePublish,
destPublish, bufferSize, sendTwo, includeSelf);
destPublish, bufferSize, sendTwo, includeSelf_);
neighbourIds.erase(rank);
......@@ -1866,11 +1866,8 @@ namespace Dune {
if(!rindex->second.second->empty()) {
os<<rank<<": Prozess "<<rindex->first<<": "<<"receive: ";
const typename RList::const_iterator rend= rindex->second.second->end();
for(typename RList::const_iterator index = rindex->second.second->begin();
index != rend; ++index)
os<<*index<<" ";
for(const auto& index : *(rindex->second.second))
os << index << " ";
}
os<<std::endl<<std::flush;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment