Skip to content

Commit e58bb18

Browse files
committed
Replace code calling selectWinnerPointIds and remove unnecesary if HAVE_MPI
1 parent ab94935 commit e58bb18

File tree

2 files changed

+57
-35
lines changed

2 files changed

+57
-35
lines changed

opm/grid/CpGrid.hpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -480,6 +480,10 @@ namespace Dune
480480
int min_globalId_point_in_proc,
481481
const std::vector<std::array<int,3>>& cells_per_dim_vec) const;
482482

483+
void selectWinnerPointIds(std::vector<std::vector<int>>& localToGlobal_points_per_level,
484+
const std::vector<std::tuple<int,std::vector<int>>>& parent_to_children,
485+
const std::vector<std::array<int,3>>& cells_per_dim_vec) const;
486+
483487

484488
/// --------------- Adaptivity (begin) ---------------
485489
/// @brief Mark entity for refinement (or coarsening).

opm/grid/cpgrid/CpGrid.cpp

Lines changed: 53 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1349,7 +1349,6 @@ void CpGrid::collectCellIdsAndCandidatePointIds( std::vector<std::vector<int>>&
13491349
int min_globalId_point_in_proc,
13501350
const std::vector<std::array<int,3>>& cells_per_dim_vec ) const
13511351
{
1352-
#if HAVE_MPI
13531352
for (std::size_t level = 1; level < cells_per_dim_vec.size()+1; ++level) {
13541353
localToGlobal_cells_per_level[level-1].resize((*current_data_)[level]-> size(0));
13551354
localToGlobal_points_per_level[level-1].resize((*current_data_)[level]-> size(3));
@@ -1380,6 +1379,43 @@ void CpGrid::collectCellIdsAndCandidatePointIds( std::vector<std::vector<int>>&
13801379
}
13811380
}
13821381
}
1382+
}
1383+
1384+
void CpGrid::selectWinnerPointIds(std::vector<std::vector<int>>& localToGlobal_points_per_level,
1385+
const std::vector<std::tuple<int,std::vector<int>>>& parent_to_children,
1386+
const std::vector<std::array<int,3>>& cells_per_dim_vec) const
1387+
{
1388+
#if HAVE_MPI
1389+
// To store cell_to_point_ information of all refined level grids.
1390+
std::vector<std::vector<std::array<int,8>>> level_cell_to_point(cells_per_dim_vec.size());
1391+
// To decide which "candidate" point global id wins, the rank is stored. The smallest ranks wins,
1392+
// i.e., the other non-selected candidates get rewritten with the values from the smallest (winner) rank.
1393+
std::vector<std::vector<int>> level_winning_ranks(cells_per_dim_vec.size());
1394+
1395+
for (std::size_t level = 1; level < cells_per_dim_vec.size()+1; ++level) {
1396+
1397+
level_cell_to_point[level -1] = currentData()[level]->cell_to_point_;
1398+
// Set std::numeric_limits<int>::max() to make sure that, during communication, the rank of the interior cell
1399+
// wins (int between 0 and comm().size()).
1400+
level_winning_ranks[level-1].resize(currentData()[level]->size(3), std::numeric_limits<int>::max());
1401+
1402+
for (const auto& element : elements(levelGridView(level))) {
1403+
// For interior cells, rewrite the rank value - later used in "point global id competition".
1404+
if (element.partitionType() == InteriorEntity) {
1405+
for (const auto& corner : currentData()[level]->cell_to_point_[element.index()]){
1406+
int rank = comm().rank();
1407+
level_winning_ranks[level -1][corner] = rank;
1408+
}
1409+
}
1410+
}
1411+
}
1412+
ParentToChildCellToPointGlobalIdHandle parentToChildCellToPointGlobalId_handle(parent_to_children,
1413+
level_cell_to_point,
1414+
level_winning_ranks,
1415+
localToGlobal_points_per_level);
1416+
currentData().front()->communicate(parentToChildCellToPointGlobalId_handle,
1417+
Dune::InteriorBorder_All_Interface,
1418+
Dune::ForwardCommunication );
13831419
#endif
13841420
}
13851421

@@ -2224,7 +2260,8 @@ void CpGrid::addLgrsUpdateLeafView(const std::vector<std::array<int,3>>& cells_p
22242260
const std::vector<std::array<int,3>>& endIJK_vec,
22252261
const std::vector<std::string>& lgr_name_vec)
22262262
{
2227-
// For parallel run, level zero grid is stored in distributed_data_[0]. If CpGrid::scatterGrid has been invoked, then current_view_data_ == distributed_data_[0].
2263+
// For parallel run, level zero grid is stored in distributed_data_[0]. If CpGrid::scatterGrid has been invoked,
2264+
// then current_view_data_ == distributed_data_[0].
22282265
// For serial run, level zero grid is stored in data_[0]. In this case, current_view_data_ == data_[0].
22292266
// Note: currentData() returns data_ (if grid is not distributed) or distributed_data_ otherwise.
22302267

@@ -2329,13 +2366,13 @@ void CpGrid::addLgrsUpdateLeafView(const std::vector<std::array<int,3>>& cells_p
23292366
min_globalId_point_in_proc,
23302367
cells_per_dim_vec);
23312368

2332-
2369+
23332370
const auto& parent_to_children = current_data_->front()->parent_to_children_cells_;
23342371
ParentToChildrenCellGlobalIdHandle parentToChildrenGlobalId_handle(parent_to_children, localToGlobal_cells_per_level);
23352372
currentData().front()->communicate(parentToChildrenGlobalId_handle,
23362373
Dune::InteriorBorder_All_Interface,
23372374
Dune::ForwardCommunication );
2338-
2375+
23392376
// After assigning global IDs to points in refined-level grids, a single point may have
23402377
// a "unique" global ID in each local leaf grid view for every process to which it belongs.
23412378
// To ensure true uniqueness, since global IDs must be distinct across the global leaf view
@@ -2351,37 +2388,18 @@ void CpGrid::addLgrsUpdateLeafView(const std::vector<std::array<int,3>>& cells_p
23512388
// is always the same, accross all processes.
23522389
// Even though the ordering of the corners in cell_to_point_ is the same accross all processes,
23532390
// this may not be enough to correctly overwrite the "winner" point global ids for refined cells.
2354-
2355-
// To store cell_to_point_ information of all refined level grids.
2356-
std::vector<std::vector<std::array<int,8>>> level_cell_to_point(cells_per_dim_vec.size());
2357-
// To decide which "candidate" point global id wins, the rank is stored. The smallest ranks wins,
2358-
// i.e., the other non-selected candidates get rewritten with the values from the smallest (winner) rank.
2359-
std::vector<std::vector<int>> level_winning_ranks(cells_per_dim_vec.size());
2360-
2361-
for (std::size_t level = 1; level < cells_per_dim_vec.size()+1; ++level) {
2362-
2363-
level_cell_to_point[level -1] = currentData()[level]->cell_to_point_;
2364-
// Set std::numeric_limits<int>::max() to make sure that, during communication, the rank of the interior cell
2365-
// wins (int between 0 and comm().size()).
2366-
level_winning_ranks[level-1].resize(currentData()[level]->size(3), std::numeric_limits<int>::max());
2367-
2368-
for (const auto& element : elements(levelGridView(level))) {
2369-
// For interior cells, rewrite the rank value - later used in "point global id competition".
2370-
if (element.partitionType() == InteriorEntity) {
2371-
for (const auto& corner : currentData()[level]->cell_to_point_[element.index()]){
2372-
int rank = comm().rank();
2373-
level_winning_ranks[level -1][corner] = rank;
2374-
}
2375-
}
2376-
}
2377-
}
2378-
ParentToChildCellToPointGlobalIdHandle parentToChildCellToPointGlobalId_handle(parent_to_children,
2379-
level_cell_to_point,
2380-
level_winning_ranks,
2381-
localToGlobal_points_per_level);
2382-
currentData().front()->communicate(parentToChildCellToPointGlobalId_handle,
2383-
Dune::InteriorBorder_All_Interface,
2384-
Dune::ForwardCommunication );
2391+
//
2392+
/** Current approach avoids duplicated point ids when
2393+
// 1. the LGR is distributed in P_{i_0}, ..., P_{i_n}, with n+1 < comm().size(),
2394+
// AND
2395+
// 2. there is no coarse cell seen by a process P with P != P_{i_j}, j = 0, ..., n.
2396+
// Otherwise, there will be duplicated point ids.
2397+
//
2398+
// Reason: neighboring cells that only share corners (not faces) are NOT considered in the
2399+
// overlap layer of the process.*/
2400+
selectWinnerPointIds(localToGlobal_points_per_level,
2401+
parent_to_children,
2402+
cells_per_dim_vec);
23852403

23862404
for (std::size_t level = 1; level < cells_per_dim_vec.size()+1; ++level) {
23872405
// For the general case where the LGRs might be also distributed, a communication step is needed to assign global ids

0 commit comments

Comments
 (0)