Skip to content

Add missing p4est functions with nonconservative terms #2308

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 29 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
5997d89
add nonconservative term to calc_boundary_flux!
benegee Mar 20, 2025
7e6e3cf
fixup
benegee Mar 20, 2025
6e03d8b
P4est 2D mpi with noncons
benegee Apr 13, 2025
2222011
format
benegee Apr 13, 2025
45ebf61
Merge branch 'main' into bg/noncons-boundary-flux-3d
benegee Apr 13, 2025
599f2fe
fix varible name
benegee Apr 14, 2025
2c0fb81
Merge branch 'bg/noncons-boundary-flux-3d' of github.com:trixi-framew…
benegee Apr 14, 2025
1ce508e
Add passive tracers
Arpit-Babbar Apr 16, 2025
53c074b
Add missing tests
Arpit-Babbar Apr 16, 2025
257338c
Merge branch 'passive_tracer' into bg/noncons-boundary-flux-3d
benegee Apr 16, 2025
3e3abc8
Merge branch 'main' into bg/noncons-boundary-flux-3d
amrueda Apr 23, 2025
18a75c5
Modified calc_mpi_mortar_flux! and mpi_mortar_fluxes_to_elements to c…
amrueda Apr 23, 2025
8d319cf
propagate have_nonconservative_terms
benegee Apr 24, 2025
5a06b6b
Merge branch 'main' into bg/noncons-boundary-flux-3d
benegee Apr 30, 2025
2bce36f
Merge branch 'bg/noncons-boundary-flux-3d' of github.com:trixi-framew…
benegee Apr 30, 2025
d2d613e
missed in merge
benegee Apr 30, 2025
c3ab942
add 2d nonconforming MHD test
benegee Apr 30, 2025
c85a259
Merge branch 'bg/noncons-boundary-flux-3d' of github.com:trixi-framew…
benegee Apr 30, 2025
5e0be26
fix integrate_via_indices as in 3D
benegee Apr 30, 2025
0d450e8
add nonconforming MHD elixir to serial test
benegee Apr 30, 2025
a6f4a4a
tolerances
benegee Apr 30, 2025
7dc1b29
Update test/test_mpi_p4est_2d.jl
benegee Apr 30, 2025
87512ae
references
benegee Apr 30, 2025
b394034
Update test/test_p4est_2d.jl
benegee Apr 30, 2025
0018f11
add test with boundary conditions
benegee May 14, 2025
c6f98df
update reference errors
benegee May 15, 2025
910409b
Merge branch 'main' into bg/noncons-boundary-flux-3d
benegee May 15, 2025
b9009cd
Merge branch 'main' into bg/noncons-boundary-flux-3d
ranocha May 16, 2025
c7b36ee
Merge branch 'main' into bg/noncons-boundary-flux-3d
ranocha May 19, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions examples/p4est_2d_dgsem/elixir_mhd_alfven_wave_nonconforming.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
using OrdinaryDiffEqSSPRK, OrdinaryDiffEqLowStorageRK
using Trixi

###############################################################################
# semidiscretization of the compressible ideal GLM-MHD equations

gamma = 5 / 3
equations = IdealGlmMhdEquations2D(gamma)

initial_condition = initial_condition_convergence_test

volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell)
solver = DGSEM(polydeg = 3,
surface_flux = (flux_hlle,
flux_nonconservative_powell),
volume_integral = VolumeIntegralFluxDifferencing(volume_flux))

coordinates_min = (-1.0, -1.0)
coordinates_max = (1.0, 1.0)

# Create P4estMesh with 2 x 2 trees
trees_per_dimension = (2, 2)
mesh = P4estMesh(trees_per_dimension,
polydeg = 3, initial_refinement_level = 2,
coordinates_min = coordinates_min, coordinates_max = coordinates_max,
periodicity = true)

# OBS! Workaround to add a refinement patch after mesh is constructed
# Refine bottom left quadrant of each tree to level 4
function refine_fn(p8est, which_tree, quadrant)
quadrant_obj = unsafe_load(quadrant)
if quadrant_obj.x == 0 && quadrant_obj.y == 0 &&
quadrant_obj.level < 4
# return true (refine)
return Cint(1)
else
# return false (don't refine)
return Cint(0)
end
end

# Refine recursively until each bottom left quadrant of a tree has level 4
# The mesh will be rebalanced before the simulation starts
refine_fn_c = @cfunction(refine_fn, Cint,
(Ptr{Trixi.p4est_t}, Ptr{Trixi.p4est_topidx_t},
Ptr{Trixi.p4est_quadrant_t}))
Trixi.refine_p4est!(mesh.p4est, true, refine_fn_c, C_NULL)

semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)

###############################################################################
# ODE solvers, callbacks etc.

tspan = (0.0, 1.0)
ode = semidiscretize(semi, tspan)

summary_callback = SummaryCallback()

analysis_interval = 100
analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
alive_callback = AliveCallback(analysis_interval = analysis_interval)

save_solution = SaveSolutionCallback(interval = 100,
save_initial_solution = true,
save_final_solution = true,
solution_variables = cons2prim)

cfl = 1.0
stepsize_callback = StepsizeCallback(cfl = cfl)

glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl)

callbacks = CallbackSet(summary_callback,
analysis_callback, alive_callback,
save_solution,
stepsize_callback,
glm_speed_callback)

###############################################################################
# run the simulation

sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false);
dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
ode_default_options()..., callback = callbacks);
71 changes: 71 additions & 0 deletions examples/p4est_3d_dgsem/elixir_mhd_alfven_wave_nonperiodic.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
using OrdinaryDiffEqSSPRK, OrdinaryDiffEqLowStorageRK
using Trixi

###############################################################################
# semidiscretization of the compressible ideal GLM-MHD equations

equations = IdealGlmMhdEquations3D(5 / 3)

initial_condition = initial_condition_convergence_test

boundary_condition = BoundaryConditionDirichlet(initial_condition)
boundary_conditions = Dict(:x_neg => boundary_condition,
:x_pos => boundary_condition,
:y_neg => boundary_condition,
:y_pos => boundary_condition,
:z_neg => boundary_condition,
:z_pos => boundary_condition)

volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell)
solver = DGSEM(polydeg = 3,
surface_flux = (flux_hlle,
flux_nonconservative_powell),
volume_integral = VolumeIntegralFluxDifferencing(volume_flux))

coordinates_min = (-1.0, -1.0, -1.0)
coordinates_max = (1.0, 1.0, 1.0)

# Create P4estMesh with 2 x 2 x 2 trees
trees_per_dimension = (2, 2, 2)
mesh = P4estMesh(trees_per_dimension,
polydeg = 3, initial_refinement_level = 2,
coordinates_min = coordinates_min, coordinates_max = coordinates_max,
periodicity = false)

semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
boundary_conditions = boundary_conditions)

###############################################################################
# ODE solvers, callbacks etc.

tspan = (0.0, 1.0)
ode = semidiscretize(semi, tspan)

summary_callback = SummaryCallback()

analysis_interval = 100
analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
alive_callback = AliveCallback(analysis_interval = analysis_interval)

save_solution = SaveSolutionCallback(interval = 100,
save_initial_solution = true,
save_final_solution = true,
solution_variables = cons2prim)

cfl = 1.0
stepsize_callback = StepsizeCallback(cfl = cfl)

glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl)

callbacks = CallbackSet(summary_callback,
analysis_callback, alive_callback,
save_solution,
stepsize_callback,
glm_speed_callback)

###############################################################################
# run the simulation

sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false);
dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
ode_default_options()..., callback = callbacks);
8 changes: 8 additions & 0 deletions src/callbacks_step/analysis_dg2d.jl
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,10 @@ function analyze(::Val{:linf_divb}, du, u, t,
linf_divb = max(linf_divb, abs(divb))
end
end
if mpi_isparallel()
# Base.max instead of max needed, see comment in src/auxiliary/math.jl
linf_divb = MPI.Allreduce!(Ref(linf_divb), Base.max, mpi_comm())[]
end

return linf_divb
end
Expand Down Expand Up @@ -398,6 +402,10 @@ function analyze(::Val{:linf_divb}, du, u, t,
linf_divb = max(linf_divb, abs(divb))
end
end
if mpi_isparallel()
# Base.max instead of max needed, see comment in src/auxiliary/math.jl
linf_divb = MPI.Allreduce!(Ref(linf_divb), Base.max, mpi_comm())[]
end

return linf_divb
end
Expand Down
10 changes: 6 additions & 4 deletions src/callbacks_step/analysis_dg2d_parallel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -180,10 +180,12 @@ function integrate_via_indices(func::Func, u,
@unpack weights = dg.basis

# Initialize integral with zeros of the right shape
# Pass `zero(SVector{nvariables(equations), eltype(u))}` to `func` since `u` might be empty, if the
# current rank has no elements, see also https://github.com/trixi-framework/Trixi.jl/issues/1096.
integral = zero(func(zero(SVector{nvariables(equations), eltype(u)}), 1, 1, 1,
equations, dg, args...))
# Pass `zeros(eltype(u), nvariables(equations), nnodes(dg), nnodes(dg), 1)`
# to `func` since `u` might be empty, if the current rank has no elements.
# See also https://github.com/trixi-framework/Trixi.jl/issues/1096, and
# https://github.com/trixi-framework/Trixi.jl/pull/2126/files/7cbc57cfcba93e67353566e10fce1f3edac27330#r1814483243.
integral = zero(func(zeros(eltype(u), nvariables(equations), nnodes(dg), nnodes(dg),
1), 1, 1, 1, equations, dg, args...))
volume = zero(real(mesh))

# Use quadrature to numerically integrate over entire domain
Expand Down
89 changes: 78 additions & 11 deletions src/solvers/dgsem_p4est/dg_2d_parallel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,39 @@ end
end
end

# Inlined version of the interface flux computation for non-conservative equations
@inline function calc_mpi_interface_flux!(surface_flux_values,
mesh::Union{ParallelP4estMesh{2},
ParallelT8codeMesh{2}},
nonconservative_terms::True, equations,
surface_integral, dg::DG, cache,
interface_index, normal_direction,
interface_node_index, local_side,
surface_node_index, local_direction_index,
local_element_index)
@unpack u = cache.mpi_interfaces
surface_flux, nonconservative_flux = surface_integral.surface_flux

u_ll, u_rr = get_surface_node_vars(u, equations, dg,
interface_node_index,
interface_index)

# Compute flux and non-conservative term for this side of the interface
if local_side == 1
flux_ = surface_flux(u_ll, u_rr, normal_direction, equations)
noncons_flux_ = nonconservative_flux(u_ll, u_rr, normal_direction, equations)
else # local_side == 2
flux_ = -surface_flux(u_ll, u_rr, -normal_direction, equations)
noncons_flux_ = -nonconservative_flux(u_rr, u_ll, -normal_direction, equations)
end

for v in eachvariable(equations)
surface_flux_values[v, surface_node_index,
local_direction_index, local_element_index] = flux_[v] +
0.5f0 * noncons_flux_[v]
end
end

function prolong2mpimortars!(cache, u,
mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}},
equations,
Expand Down Expand Up @@ -208,12 +241,15 @@ function calc_mpi_mortar_flux!(surface_flux_values,
@unpack local_neighbor_ids, local_neighbor_positions, node_indices = cache.mpi_mortars
@unpack contravariant_vectors = cache.elements
@unpack fstar_primary_upper_threaded, fstar_primary_lower_threaded = cache
@unpack fstar_secondary_upper_threaded, fstar_secondary_lower_threaded = cache
index_range = eachnode(dg)

@threaded for mortar in eachmpimortar(dg, cache)
# Choose thread-specific pre-allocated container
fstar = (fstar_primary_lower_threaded[Threads.threadid()],
fstar_primary_upper_threaded[Threads.threadid()])
fstar_primary = (fstar_primary_lower_threaded[Threads.threadid()],
fstar_primary_upper_threaded[Threads.threadid()])
fstar_secondary = (fstar_secondary_lower_threaded[Threads.threadid()],
fstar_secondary_upper_threaded[Threads.threadid()])

# Get index information on the small elements
small_indices = node_indices[1, mortar]
Expand All @@ -231,7 +267,8 @@ function calc_mpi_mortar_flux!(surface_flux_values,
normal_direction = get_normal_direction(cache.mpi_mortars, node,
position, mortar)

calc_mpi_mortar_flux!(fstar, mesh, nonconservative_terms, equations,
calc_mpi_mortar_flux!(fstar_primary, fstar_secondary, mesh,
nonconservative_terms, equations,
surface_integral, dg, cache,
mortar, position, normal_direction, node)

Expand All @@ -246,14 +283,14 @@ function calc_mpi_mortar_flux!(surface_flux_values,

mpi_mortar_fluxes_to_elements!(surface_flux_values,
mesh, equations, mortar_l2, dg, cache,
mortar, fstar, u_buffer)
mortar, fstar_primary, fstar_secondary, u_buffer)
end

return nothing
end

# Inlined version of the mortar flux computation on small elements for conservation laws
@inline function calc_mpi_mortar_flux!(fstar,
@inline function calc_mpi_mortar_flux!(fstar_primary, fstar_secondary,
mesh::Union{ParallelP4estMesh{2},
ParallelT8codeMesh{2}},
nonconservative_terms::False, equations,
Expand All @@ -269,15 +306,45 @@ end
flux = surface_flux(u_ll, u_rr, normal_direction, equations)

# Copy flux to buffer
set_node_vars!(fstar[position_index], flux, equations, dg, node_index)
set_node_vars!(fstar_primary[position_index], flux, equations, dg, node_index)
set_node_vars!(fstar_secondary[position_index], flux, equations, dg, node_index)
end

# Inlined version of the mortar flux computation on small elements for non-conservative equations
@inline function calc_mpi_mortar_flux!(fstar_primary, fstar_secondary,
mesh::Union{ParallelP4estMesh{2},
ParallelT8codeMesh{2}},
nonconservative_terms::True, equations,
surface_integral, dg::DG, cache,
mortar_index, position_index, normal_direction,
node_index)
@unpack u = cache.mpi_mortars
surface_flux, nonconservative_flux = surface_integral.surface_flux

u_ll, u_rr = get_surface_node_vars(u, equations, dg, position_index, node_index,
mortar_index)

flux = surface_flux(u_ll, u_rr, normal_direction, equations)
noncons_flux_primary = nonconservative_flux(u_ll, u_rr, normal_direction, equations)
noncons_flux_secondary = nonconservative_flux(u_rr, u_ll, normal_direction,
equations)

for v in eachvariable(equations)
fstar_primary[position_index][v, node_index] = flux[v] +
0.5f0 * noncons_flux_primary[v]
fstar_secondary[position_index][v, node_index] = flux[v] +
0.5f0 *
noncons_flux_secondary[v]
end
end

@inline function mpi_mortar_fluxes_to_elements!(surface_flux_values,
mesh::Union{ParallelP4estMesh{2},
ParallelT8codeMesh{2}},
equations,
mortar_l2::LobattoLegendreMortarL2,
dg::DGSEM, cache, mortar, fstar,
dg::DGSEM, cache, mortar, fstar_primary,
fstar_secondary,
u_buffer)
@unpack local_neighbor_ids, local_neighbor_positions, node_indices = cache.mpi_mortars

Expand All @@ -291,8 +358,8 @@ end
if position == 3 # -> large element
# Project small fluxes to large element.
multiply_dimensionwise!(u_buffer,
mortar_l2.reverse_upper, fstar[2],
mortar_l2.reverse_lower, fstar[1])
mortar_l2.reverse_upper, fstar_secondary[2],
mortar_l2.reverse_lower, fstar_secondary[1])
# The flux is calculated in the outward direction of the small elements,
# so the sign must be switched to get the flux in outward direction
# of the large element.
Expand Down Expand Up @@ -324,8 +391,8 @@ end
# Copy solution small to small
for i in eachnode(dg)
for v in eachvariable(equations)
surface_flux_values[v, i, small_direction, element] = fstar[position][v,
i]
surface_flux_values[v, i, small_direction, element] = fstar_primary[position][v,
i]
end
end
end
Expand Down
Loading
Loading