Skip to content

Commit 5e1246d

Browse files
authored
Merge branch 'main' into main
2 parents 6fda65d + 4ae2d4b commit 5e1246d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+2002
-1112
lines changed

.github/workflows/sonarcloud.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ jobs:
1717
container: fenicsproject/test-env:current-mpich
1818
env:
1919
SONAR_SCANNER_VERSION:
20-
5.0.1.3006 # Find the latest version at:
20+
6.1.0.4477 # Find the latest version at:
2121
# https://github.com/SonarSource/sonar-scanner-cli/tags
2222
SONAR_SERVER_URL: "https://sonarcloud.io"
2323
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed
@@ -43,12 +43,12 @@ jobs:
4343
restore-keys: ${{ runner.os }}-sonar
4444
- name: Download and set up sonar-scanner
4545
env:
46-
SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip
46+
SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux-x64.zip
4747
run: |
4848
mkdir -p $HOME/.sonar
4949
wget -O $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }}
5050
unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/
51-
echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> $GITHUB_PATH
51+
echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux-x64/bin" >> $GITHUB_PATH
5252
- name: Download and set up build-wrapper
5353
env:
5454
BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip
@@ -71,4 +71,4 @@ jobs:
7171
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
7272
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
7373
run: |
74-
sonar-scanner --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}"
74+
sonar-scanner --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" --define sonar.cfamily.compile-commands="${{ env.BUILD_WRAPPER_OUT_DIR }}/compile_commands.json"

cpp/demo/interpolation-io/main.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,6 @@ using namespace dolfinx;
2929
/// and outputs the finite element function to a VTX file for
3030
/// visualisation.
3131
///
32-
/// Also shows how to create a finite element using Basix.
33-
///
3432
/// @tparam T Scalar type of the finite element function.
3533
/// @tparam U Float type for the finite element basis and the mesh.
3634
/// @param mesh Mesh.
@@ -190,8 +188,8 @@ void interpolate_nedelec(std::shared_ptr<mesh::Mesh<U>> mesh,
190188
#endif
191189
}
192190

193-
/// @brief This program shows how to create finite element spaces without FFCx
194-
/// generated code.
191+
/// @brief This program shows how to interpolate functions into different types
192+
/// of finite element spaces and output the result to file for visualisation.
195193
int main(int argc, char* argv[])
196194
{
197195
dolfinx::init_logging(argc, argv);

cpp/dolfinx/common/IndexMap.cpp

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ using namespace dolfinx::common;
1919

2020
namespace
2121
{
22-
2322
/// @brief Given source ranks (ranks that own indices ghosted by the
2423
/// calling rank), compute ranks that ghost indices owned by the calling
2524
/// rank.
@@ -156,6 +155,7 @@ communicate_ghosts_to_owners(MPI_Comm comm, std::span<const int> src,
156155
/// Given an index map and a subset of local indices (can be owned or
157156
/// ghost but must be unique and sorted), compute the owned, ghost and
158157
/// ghost owners in the submap.
158+
///
159159
/// @param[in] imap An index map.
160160
/// @param[in] indices List of entity indices (indices local to the
161161
/// process).
@@ -175,9 +175,6 @@ compute_submap_indices(const IndexMap& imap,
175175
std::span<const std::int32_t> indices,
176176
IndexMapOrder order, bool allow_owner_change)
177177
{
178-
std::span<const int> src = imap.src();
179-
std::span<const int> dest = imap.dest();
180-
181178
// Create lookup array to determine if an index is in the sub-map
182179
std::vector<std::uint8_t> is_in_submap(imap.size_local() + imap.num_ghosts(),
183180
0);
@@ -210,7 +207,9 @@ compute_submap_indices(const IndexMap& imap,
210207
// processes that can own them in the submap.
211208
std::vector<std::pair<std::int64_t, int>> global_idx_to_possible_owner;
212209
const std::array local_range = imap.local_range();
210+
213211
// Loop through the received indices
212+
std::span<const int> dest = imap.dest();
214213
for (std::size_t i = 0; i < recv_disp.size() - 1; ++i)
215214
{
216215
for (int j = recv_disp[i]; j < recv_disp[i + 1]; ++j)
@@ -249,16 +248,16 @@ compute_submap_indices(const IndexMap& imap,
249248
// load balancing, though the impact is probably only very small
250249
auto it = std::ranges::lower_bound(global_idx_to_possible_owner, idx,
251250
std::ranges::less(),
252-
[](auto& e) { return e.first; });
251+
[](auto e) { return e.first; });
253252
assert(it != global_idx_to_possible_owner.end() and it->first == idx);
254253
send_owners.push_back(it->second);
255254
}
256255

257256
// Create neighbourhood comm (owner -> ghost)
258257
MPI_Comm comm1;
259258
int ierr = MPI_Dist_graph_create_adjacent(
260-
imap.comm(), src.size(), src.data(), MPI_UNWEIGHTED, dest.size(),
261-
dest.data(), MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm1);
259+
imap.comm(), imap.src().size(), imap.src().data(), MPI_UNWEIGHTED,
260+
dest.size(), dest.data(), MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm1);
262261
dolfinx::MPI::check_error(imap.comm(), ierr);
263262

264263
// Send the data
@@ -350,7 +349,7 @@ compute_submap_indices(const IndexMap& imap,
350349
}
351350

352351
// Compute submap destination ranks
353-
// FIXME Remove call to NBX
352+
// FIXME: Remove call to NBX
354353
std::vector<int> submap_dest
355354
= dolfinx::MPI::compute_graph_edges_nbx(imap.comm(), submap_src);
356355
std::ranges::sort(submap_dest);
@@ -379,7 +378,7 @@ compute_submap_ghost_indices(std::span<const int> submap_src,
379378
std::span<const std::int32_t> submap_owned,
380379
std::span<const std::int64_t> submap_ghosts_global,
381380
std::span<const std::int32_t> submap_ghost_owners,
382-
int submap_offset, const IndexMap& imap)
381+
std::int64_t submap_offset, const IndexMap& imap)
383382
{
384383
// --- Step 1 ---: Send global ghost indices (w.r.t. original imap) to
385384
// owning rank
@@ -397,15 +396,15 @@ compute_submap_ghost_indices(std::span<const int> submap_src,
397396
std::vector<std::int64_t> send_gidx;
398397
{
399398
send_gidx.reserve(recv_indices.size());
400-
// NOTE: Received indices are owned by this process in the submap, but not
401-
// necessarily in the original imap, so we must use global_to_local to
402-
// convert rather than subtracting local_range[0]
403-
// TODO Convert recv_indices or submap_owned?
404-
std::vector<int32_t> recv_indices_local(recv_indices.size());
399+
// NOTE: Received indices are owned by this process in the submap,
400+
// but not necessarily in the original imap, so we must use
401+
// global_to_local to convert rather than subtracting local_range[0]
402+
// TODO: Convert recv_indices or submap_owned?
403+
std::vector<std::int32_t> recv_indices_local(recv_indices.size());
405404
imap.global_to_local(recv_indices, recv_indices_local);
406405

407406
// Compute submap global index
408-
for (auto idx : recv_indices_local)
407+
for (std::int32_t idx : recv_indices_local)
409408
{
410409
// Could avoid search by creating look-up array
411410
auto it = std::ranges::lower_bound(submap_owned, idx);
@@ -415,7 +414,8 @@ compute_submap_ghost_indices(std::span<const int> submap_src,
415414
}
416415
}
417416

418-
// --- Step 3 ---: Send submap global indices to process that ghost them
417+
// --- Step 3 ---: Send submap global indices to process that ghost
418+
// them
419419

420420
std::vector<std::int64_t> recv_gidx(send_disp.back());
421421
{

cpp/dolfinx/fem/Expression.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ class Expression
199199
num_argument_dofs
200200
= _argument_function_space->dofmap()->element_dof_layout().num_dofs();
201201
auto element = _argument_function_space->element();
202-
202+
num_argument_dofs *= _argument_function_space->dofmap()->bs();
203203
assert(element);
204204
if (element->needs_dof_transformations())
205205
{
@@ -244,7 +244,6 @@ class Expression
244244
std::ranges::fill(values_local, 0);
245245
_fn(values_local.data(), coeff_cell, constant_data.data(),
246246
coord_dofs.data(), entity_index, nullptr);
247-
248247
post_dof_transform(values_local, cell_info, e, size0);
249248
for (std::size_t j = 0; j < values_local.size(); ++j)
250249
values[e * vshape[1] + j] = values_local[j];

cpp/dolfinx/fem/interpolate.h

Lines changed: 44 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -134,12 +134,12 @@ using mdspan_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan<
134134

135135
/// @brief Scatter data into non-contiguous memory.
136136
///
137-
/// Scatter blocked data `send_values` to its corresponding `src_rank` and
138-
/// insert the data into `recv_values`. The insert location in
137+
/// Scatter blocked data `send_values` to its corresponding `src_rank`
138+
/// and insert the data into `recv_values`. The insert location in
139139
/// `recv_values` is determined by `dest_ranks`. If the j-th dest rank
140140
/// is -1, then `recv_values[j*block_size:(j+1)*block_size]) = 0`.
141141
///
142-
/// @param[in] comm The MPI communicator
142+
/// @param[in] comm The MPI communicator.
143143
/// @param[in] src_ranks Rank owning the values of each row in
144144
/// `send_values`.
145145
/// @param[in] dest_ranks List of ranks receiving data. Size of array is
@@ -194,9 +194,11 @@ void scatter_values(MPI_Comm comm, std::span<const std::int32_t> src_ranks,
194194
std::vector<std::int32_t> recv_offsets(in_ranks.size() + 1, 0);
195195
{
196196
// Build map from parent to neighborhood communicator ranks
197-
std::map<std::int32_t, std::int32_t> rank_to_neighbor;
197+
std::vector<std::pair<std::int32_t, std::int32_t>> rank_to_neighbor;
198+
rank_to_neighbor.reserve(in_ranks.size());
198199
for (std::size_t i = 0; i < in_ranks.size(); i++)
199-
rank_to_neighbor[in_ranks[i]] = i;
200+
rank_to_neighbor.push_back({in_ranks[i], i});
201+
std::ranges::sort(rank_to_neighbor);
200202

201203
// Compute receive sizes
202204
std::ranges::for_each(
@@ -205,8 +207,11 @@ void scatter_values(MPI_Comm comm, std::span<const std::int32_t> src_ranks,
205207
{
206208
if (rank >= 0)
207209
{
208-
const int neighbor = rank_to_neighbor[rank];
209-
recv_sizes[neighbor] += block_size;
210+
auto it = std::ranges::lower_bound(rank_to_neighbor, rank,
211+
std::ranges::less(),
212+
[](auto e) { return e.first; });
213+
assert(it != rank_to_neighbor.end() and it->first == rank);
214+
recv_sizes[it->second] += block_size;
210215
}
211216
});
212217

@@ -221,26 +226,42 @@ void scatter_values(MPI_Comm comm, std::span<const std::int32_t> src_ranks,
221226
{
222227
if (const std::int32_t rank = dest_ranks[i]; rank >= 0)
223228
{
224-
const int neighbor = rank_to_neighbor[rank];
225-
int insert_pos = recv_offsets[neighbor] + recv_counter[neighbor];
229+
auto it = std::ranges::lower_bound(rank_to_neighbor, rank,
230+
std::ranges::less(),
231+
[](auto e) { return e.first; });
232+
assert(it != rank_to_neighbor.end() and it->first == rank);
233+
int insert_pos = recv_offsets[it->second] + recv_counter[it->second];
226234
comm_to_output[insert_pos / block_size] = i * block_size;
227-
recv_counter[neighbor] += block_size;
235+
recv_counter[it->second] += block_size;
228236
}
229237
}
230238
}
231239

232240
std::vector<std::int32_t> send_sizes(out_ranks.size());
233241
send_sizes.reserve(1);
234242
{
235-
// Compute map from parent mpi rank to neigbor rank for outgoing data
236-
std::map<std::int32_t, std::int32_t> rank_to_neighbor;
243+
// Compute map from parent MPI rank to neighbor rank for outgoing
244+
// data. `out_ranks` is sorted, so rank_to_neighbor will be sorted
245+
// too.
246+
std::vector<std::pair<std::int32_t, std::int32_t>> rank_to_neighbor;
247+
rank_to_neighbor.reserve(out_ranks.size());
237248
for (std::size_t i = 0; i < out_ranks.size(); i++)
238-
rank_to_neighbor[out_ranks[i]] = i;
249+
rank_to_neighbor.push_back({out_ranks[i], i});
239250

240-
// Compute send sizes
251+
// Compute send sizes. As `src_ranks` is sorted, we can move 'start'
252+
// in search forward.
253+
auto start = rank_to_neighbor.begin();
241254
std::ranges::for_each(
242-
src_ranks, [&rank_to_neighbor, &send_sizes, block_size](auto rank)
243-
{ send_sizes[rank_to_neighbor[rank]] += block_size; });
255+
src_ranks,
256+
[&rank_to_neighbor, &send_sizes, block_size, &start](auto rank)
257+
{
258+
auto it = std::ranges::lower_bound(start, rank_to_neighbor.end(),
259+
rank, std::ranges::less(),
260+
[](auto e) { return e.first; });
261+
assert(it != rank_to_neighbor.end() and it->first == rank);
262+
send_sizes[it->second] += block_size;
263+
start = it;
264+
});
244265
}
245266

246267
// Compute sending offsets
@@ -257,7 +278,8 @@ void scatter_values(MPI_Comm comm, std::span<const std::int32_t> src_ranks,
257278
dolfinx::MPI::mpi_type<T>(), reverse_comm);
258279
MPI_Comm_free(&reverse_comm);
259280

260-
// Insert values received from neighborhood communicator in output span
281+
// Insert values received from neighborhood communicator in output
282+
// span
261283
std::ranges::fill(recv_values, T(0));
262284
for (std::size_t i = 0; i < comm_to_output.size(); i++)
263285
{
@@ -1117,10 +1139,11 @@ void interpolate(Function<T, U>& u, const Function<T, U>& v,
11171139
assert(element_u);
11181140
const std::size_t value_size = u.function_space()->value_size();
11191141

1120-
auto& dest_ranks = interpolation_data.src_owner;
1121-
auto& src_ranks = interpolation_data.dest_owners;
1122-
auto& recv_points = interpolation_data.dest_points;
1123-
auto& evaluation_cells = interpolation_data.dest_cells;
1142+
const std::vector<int>& dest_ranks = interpolation_data.src_owner;
1143+
const std::vector<int>& src_ranks = interpolation_data.dest_owners;
1144+
const std::vector<U>& recv_points = interpolation_data.dest_points;
1145+
const std::vector<std::int32_t>& evaluation_cells
1146+
= interpolation_data.dest_cells;
11241147

11251148
// Evaluate the interpolating function where possible
11261149
std::vector<T> send_values(recv_points.size() / 3 * value_size);

cpp/dolfinx/mesh/Geometry.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ class Geometry
113113
/// Move Assignment
114114
Geometry& operator=(Geometry&&) = default;
115115

116-
/// Return Euclidean dimension of coordinate system
116+
/// Return dimension of the Euclidean coordinate system
117117
int dim() const { return _dim; }
118118

119119
/// @brief DofMap for the geometry

cpp/dolfinx/mesh/Topology.h

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -151,16 +151,18 @@ class Topology
151151
/// @brief Returns the permutation information
152152
const std::vector<std::uint32_t>& get_cell_permutation_info() const;
153153

154-
/// @brief Get the permutation number to apply to a facet.
154+
/// @brief Get the numbers that encode the number of permutations to apply to
155+
/// facets.
155156
///
156-
/// The permutations are numbered so that:
157+
/// The permutations are encoded so that:
157158
///
158159
/// - `n % 2` gives the number of reflections to apply
159160
/// - `n // 2` gives the number of rotations to apply
160161
///
161-
/// Each column of the returned array represents a cell, and each row
162-
/// a facet of that cell.
163-
/// @return The permutation number
162+
/// The data is stored in a flattened 2D array, so that `data[cell_index *
163+
/// facets_per_cell + facet_index]` contains the facet with index
164+
/// `facet_index` of the cell with index `cell_index`.
165+
/// @return The encoded permutation info
164166
/// @note An exception is raised if the permutations have not been
165167
/// computed
166168
const std::vector<std::uint8_t>& get_facet_permutations() const;

0 commit comments

Comments
 (0)