Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CMakeLists_files.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@ list (APPEND PUBLIC_HEADER_FILES
opm/grid/LookUpData.hh
opm/grid/cpgrid/OrientedEntityTable.hpp
opm/grid/cpgrid/ParentToChildrenCellGlobalIdHandle.hpp
opm/grid/cpgrid/ParentToChildCellToPointGlobalIdHandle.hpp
opm/grid/cpgrid/PartitionIteratorRule.hpp
opm/grid/cpgrid/PartitionTypeIndicator.hpp
opm/grid/cpgrid/PersistentContainer.hpp
Expand Down
50 changes: 46 additions & 4 deletions opm/grid/cpgrid/CpGrid.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@

#include "../CpGrid.hpp"
#include "ParentToChildrenCellGlobalIdHandle.hpp"
#include "ParentToChildCellToPointGlobalIdHandle.hpp"
#include <opm/grid/common/MetisPartition.hpp>
#include <opm/grid/common/ZoltanPartition.hpp>
//#include <opm/grid/common/ZoltanGraphFunctions.hpp>
Expand Down Expand Up @@ -2298,11 +2299,52 @@ void CpGrid::addLgrsUpdateLeafView(const std::vector<std::array<int,3>>& cells_p
Dune::InteriorBorder_All_Interface,
Dune::ForwardCommunication );

// After assigning global IDs to points in refined-level grids, a single point may have
// After assigning global IDs to points in refined-level grids, a single point may have
// a "unique" global ID in each local leaf grid view for every process to which it belongs.
// To ensure true uniqueness, since global IDs must be distinct across the global leaf view
// and consistent across each refined-level grid, we will rewrite the entries in
// localToGlobal_points_per_level. Correction: TO DO.
// To ensure true uniqueness, since global IDs must be distinct across the global leaf view
// and consistent across each refined-level grid, we will rewrite the entries in
// localToGlobal_points_per_level.
//
// This correction is done using cell_to_point_ across all refined cells through
// communication: gathering the 8 corner points of each interior cell and scattering the
// 8 corner points of overlapping cells, for all child cells of a parent cell in level zero grid.
//
// Design decision: Why we communicate via level zero grid instead of in each refined level grid.
// The reason is that how children are stored (the ordering) in parent_to_children_cells_
// is always the same, accross all processes.
// Even though the ordering of the corners in cell_to_point_ is the same accross all processes,
// this may not be enough to correctly overwrite the "winner" point global ids for refined cells.

// To store cell_to_point_ information of all refined level grids.
std::vector<std::vector<std::array<int,8>>> level_cell_to_point(cells_per_dim_vec.size());
// To decide which "candidate" point global id wins, the rank is stored. The smallest ranks wins,
// i.e., the other non-selected candidates get rewritten with the values from the smallest (winner) rank.
std::vector<std::vector<int>> level_winning_ranks(cells_per_dim_vec.size());

for (std::size_t level = 1; level < cells_per_dim_vec.size()+1; ++level) {

level_cell_to_point[level -1] = currentData()[level]->cell_to_point_;
// Set std::numeric_limits<int>::max() to make sure that, during communication, the rank of the interior cell
// wins (int between 0 and comm().size()).
level_winning_ranks[level-1].resize(currentData()[level]->size(3), std::numeric_limits<int>::max());

for (const auto& element : elements(levelGridView(level))) {
// For interior cells, rewrite the rank value - later used in "point global id competition".
if (element.partitionType() == InteriorEntity) {
for (const auto& corner : currentData()[level]->cell_to_point_[element.index()]){
int rank = comm().rank();
level_winning_ranks[level -1][corner] = rank;
}
}
}
}
ParentToChildCellToPointGlobalIdHandle parentToChildCellToPointGlobalId_handle(parent_to_children,
level_cell_to_point,
level_winning_ranks,
localToGlobal_points_per_level);
currentData().front()->communicate(parentToChildCellToPointGlobalId_handle,
Dune::InteriorBorder_All_Interface,
Dune::ForwardCommunication );

for (std::size_t level = 1; level < cells_per_dim_vec.size()+1; ++level) {
// For the general case where the LGRs might be also distributed, a communication step is needed to assign global ids
Expand Down
172 changes: 172 additions & 0 deletions opm/grid/cpgrid/ParentToChildCellToPointGlobalIdHandle.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
//===========================================================================
//
// File: ParentToChildCellToPointGlobalIdHandle.hpp
//
// Created: November 19 2024
//
// Author(s): Antonella Ritorto <[email protected]>
// Markus Blatt <[email protected]>
//
// $Date$
//
// $Revision$
//
//===========================================================================

/*
Copyright 2024 Equinor ASA
This file is part of The Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/

#ifndef OPM_PARENTTOCHILDCELLTOPOINTGLOBALIDHANDLE_HEADER
#define OPM_PARENTTOCHILDCELLTOPOINTGLOBALIDHANDLE_HEADER


#include <opm/grid/cpgrid/Entity.hpp>

#include <array>
#include <tuple>
#include <vector>


namespace
{
#if HAVE_MPI

/// \brief Handle for assignment of point global ids of refined cells.
struct ParentToChildCellToPointGlobalIdHandle {
// - The container used for gather and scatter contains "candidates" point global ids for interior elements of the refined level grids (LGRs).
// Access is done with the local index of its parent cell index and its parent cell list of children.
// level_point_global_ids[ level-1 ][ child_cell_local_index [ corner ] ] = "candidate" point global id,
// when child_cell_local_index belongs to the children_local_index_list:
// parent_to_children_[ element.index() ] = parent_to_children_[ element.index() ] = { level, children_local_index_list }
// and corner = 0, ...,7.
// To decide which "candidate" point global id wins, we use the rank. The smallest ranks wins,
// i.e., the other non-selected candidates get rewritten with the values from the smallest (winner) rank.
// - In the scatter method, the "winner" rank and the 8 point global ids of each number of children) get rewritten.

using DataType = int;

/// \param parent_to_children Map from parent index to all children, and the level they are stored.
/// parent_to_children_[ element.index() ] = { level, children_list local indices }
/// \param level_cell_to_point
/// \param level_point_global_ids A container that for the elements of a level contains all candidate point global ids.
/// \param level_winning_ranks
/// \param level_point_global_ids
ParentToChildCellToPointGlobalIdHandle(const std::vector<std::tuple<int, std::vector<int>>>& parent_to_children,
const std::vector<std::vector<std::array<int,8>>>& level_cell_to_point,
std::vector<std::vector<DataType>>& level_winning_ranks,
std::vector<std::vector<DataType>>& level_point_global_ids)
: parent_to_children_(parent_to_children)
, level_cell_to_point_(level_cell_to_point)
, level_winning_ranks_(level_winning_ranks)
, level_point_global_ids_(level_point_global_ids)
{
}

// Not every cell has children. When they have children, the amount might vary.
bool fixedSize(std::size_t, std::size_t)
{
return false;
}
// Only communicate values attached to cells.
bool contains(std::size_t, std::size_t codim)
{
return codim == 0;
}
// Communicate variable size: 1 (rank) + (8* amount of child cells) from an interior parent cell from level zero grid.
template <class T> // T = Entity<0>
std::size_t size(const T& element)
{
// Skip values that are not interior, or have no children (in that case, 'invalid' level = -1)
const auto& [level, children] = parent_to_children_[element.index()];
// [Bug in dune-common] VariableSizeCommunicator will deadlock if a process attempts to send a message of size zero.
// This can happen if the size method returns zero for all entities that are shared with another process.
// Therefore, when skipping cells without children or for overlap cells, we set the size to 1.
if ( (element.partitionType() != Dune::InteriorEntity) || (level == -1))
return 1;
return 1 + ( 8*children.size()); // rank + 8 "winner" point global ids per child cell
}

// Gather global ids of child cells of a coarse interior parent cell
template <class B, class T> // T = Entity<0>
void gather(B& buffer, const T& element)
{
// Skip values that are not interior, or have no children (in that case, 'invalid level' = -1)
const auto& [level, children] = parent_to_children_[element.index()];
// [Bug in dune-common] VariableSizeCommunicator will deadlock if a process tries to send a message with size zero.
// To avoid this, for cells without children or for overlap cells, we set the size to 1 and write a single DataType
// value (e.g., '42').
if ( (element.partitionType() != Dune::InteriorEntity) || (level==-1)) {
buffer.write(42);
return;
}
// Store the children's corner global ids in the buffer when the element is interior and has children.
// Write the rank first, for example via the "corner 0" of cell_to_point_ of the first child:
// First child: children[0]
// First corner of first child: level_cell_to_point_[ level -1 ][children[0]] [0]
buffer.write(level_winning_ranks_[level-1][ level_cell_to_point_[ level -1 ][children[0]] [0] ]); // winner rank
for (const auto& child : children)
for (const auto& corner : level_cell_to_point_[level -1][child])
buffer.write(level_point_global_ids_[level-1][corner]);
}

// Scatter global ids of child cells of a coarse overlap parent cell
template <class B, class T> // T = Entity<0>
void scatter(B& buffer, const T& element, std::size_t num_children) // check num_children
{
const auto& [level, children] = parent_to_children_[element.index()];
// Read all values to advance the pointer used by the buffer to the correct index.
// (Skip overlap-cells-without-children and interior-cells).
if ( ( (element.partitionType() == Dune::OverlapEntity) && (level==-1) ) || (element.partitionType() == Dune::InteriorEntity ) ) {
// Read all values to advance the pointer used by the buffer
// to the correct index
for (std::size_t child = 0; child < num_children; ++child) { // this should be 1 + (8* total children)
DataType tmp;
buffer.read(tmp);
}
}
else { // Overlap cell with children.
// Read and store the values in the correct location directly.
// The order of the children is the same on each process.
assert(children.size()>0);
assert(level>0);
// Read and store the values in the correct location directly.
DataType tmp_rank;
buffer.read(tmp_rank);
for (const auto& child : children) {
for (const auto& corner : level_cell_to_point_[level -1][child]) {
auto& min_rank = level_winning_ranks_[level-1][corner];
// Rewrite the rank (smaller rank wins)
if (tmp_rank < min_rank) {
min_rank = tmp_rank;
auto& target_entry = level_point_global_ids_[level-1][corner];
buffer.read(target_entry);
} else {
DataType rubbish;
buffer.read(rubbish);
}
}
}
}
}

private:
const std::vector<std::tuple<int, std::vector<int>>>& parent_to_children_;
const std::vector<std::vector<std::array<int,8>>>& level_cell_to_point_;
std::vector<std::vector<DataType>>& level_winning_ranks_;
std::vector<std::vector<DataType>>& level_point_global_ids_;
};
#endif // HAVE_MPI
} // namespace
#endif
Loading