diff --git a/examples/p4est_2d_dgsem/elixir_mhd_alfven_wave_nonconforming.jl b/examples/p4est_2d_dgsem/elixir_mhd_alfven_wave_nonconforming.jl new file mode 100644 index 00000000000..af857f5dfaf --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_mhd_alfven_wave_nonconforming.jl @@ -0,0 +1,84 @@ +using OrdinaryDiffEqSSPRK, OrdinaryDiffEqLowStorageRK +using Trixi + +############################################################################### +# semidiscretization of the compressible ideal GLM-MHD equations + +gamma = 5 / 3 +equations = IdealGlmMhdEquations2D(gamma) + +initial_condition = initial_condition_convergence_test + +volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) +solver = DGSEM(polydeg = 3, + surface_flux = (flux_hlle, + flux_nonconservative_powell), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = (-1.0, -1.0) +coordinates_max = (1.0, 1.0) + +# Create P4estMesh with 2 x 2 trees +trees_per_dimension = (2, 2) +mesh = P4estMesh(trees_per_dimension, + polydeg = 3, initial_refinement_level = 2, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = true) + +# OBS! Workaround to add a refinement patch after mesh is constructed +# Refine bottom left quadrant of each tree to level 4 +function refine_fn(p8est, which_tree, quadrant) + quadrant_obj = unsafe_load(quadrant) + if quadrant_obj.x == 0 && quadrant_obj.y == 0 && + quadrant_obj.level < 4 + # return true (refine) + return Cint(1) + else + # return false (don't refine) + return Cint(0) + end +end + +# Refine recursively until each bottom left quadrant of a tree has level 4 +# The mesh will be rebalanced before the simulation starts +refine_fn_c = @cfunction(refine_fn, Cint, + (Ptr{Trixi.p4est_t}, Ptr{Trixi.p4est_topidx_t}, + Ptr{Trixi.p4est_quadrant_t})) +Trixi.refine_p4est!(mesh.p4est, true, refine_fn_c, C_NULL) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +cfl = 1.0 +stepsize_callback = StepsizeCallback(cfl = cfl) + +glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback, + glm_speed_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false); + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + ode_default_options()..., callback = callbacks); diff --git a/examples/p4est_3d_dgsem/elixir_mhd_alfven_wave_nonperiodic.jl b/examples/p4est_3d_dgsem/elixir_mhd_alfven_wave_nonperiodic.jl new file mode 100644 index 00000000000..d9d6f2829d9 --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_mhd_alfven_wave_nonperiodic.jl @@ -0,0 +1,71 @@ +using OrdinaryDiffEqSSPRK, OrdinaryDiffEqLowStorageRK +using Trixi + +############################################################################### +# semidiscretization of the compressible ideal GLM-MHD equations + +equations = IdealGlmMhdEquations3D(5 / 3) + +initial_condition = initial_condition_convergence_test + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict(:x_neg => boundary_condition, + :x_pos => boundary_condition, + :y_neg => boundary_condition, + :y_pos => boundary_condition, + :z_neg => boundary_condition, + :z_pos => boundary_condition) + +volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) +solver = DGSEM(polydeg = 3, + surface_flux = (flux_hlle, + flux_nonconservative_powell), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = (-1.0, -1.0, -1.0) +coordinates_max = (1.0, 1.0, 1.0) + +# Create P4estMesh with 2 x 2 x 2 trees +trees_per_dimension = (2, 2, 2) +mesh = P4estMesh(trees_per_dimension, + polydeg = 3, initial_refinement_level = 2, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = false) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +cfl = 1.0 +stepsize_callback = StepsizeCallback(cfl = cfl) + +glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback, + glm_speed_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false); + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + ode_default_options()..., callback = callbacks); diff --git a/src/callbacks_step/analysis_dg2d.jl b/src/callbacks_step/analysis_dg2d.jl index dd166bbe389..0b26c349a17 100644 --- a/src/callbacks_step/analysis_dg2d.jl +++ b/src/callbacks_step/analysis_dg2d.jl @@ -360,6 +360,10 @@ function analyze(::Val{:linf_divb}, du, u, t, linf_divb = max(linf_divb, abs(divb)) end end + if mpi_isparallel() + # Base.max instead of max needed, see comment in src/auxiliary/math.jl + linf_divb = MPI.Allreduce!(Ref(linf_divb), Base.max, mpi_comm())[] + end return linf_divb end @@ -398,6 +402,10 @@ function analyze(::Val{:linf_divb}, du, u, t, linf_divb = max(linf_divb, abs(divb)) end end + if mpi_isparallel() + # Base.max instead of max needed, see comment in src/auxiliary/math.jl + linf_divb = MPI.Allreduce!(Ref(linf_divb), Base.max, mpi_comm())[] + end return linf_divb end diff --git a/src/callbacks_step/analysis_dg2d_parallel.jl b/src/callbacks_step/analysis_dg2d_parallel.jl index ba2989f721d..dad1f01ee6f 100644 --- a/src/callbacks_step/analysis_dg2d_parallel.jl +++ b/src/callbacks_step/analysis_dg2d_parallel.jl @@ -180,10 +180,12 @@ function integrate_via_indices(func::Func, u, @unpack weights = dg.basis # Initialize integral with zeros of the right shape - # Pass `zero(SVector{nvariables(equations), eltype(u))}` to `func` since `u` might be empty, if the - # current rank has no elements, see also https://github.com/trixi-framework/Trixi.jl/issues/1096. - integral = zero(func(zero(SVector{nvariables(equations), eltype(u)}), 1, 1, 1, - equations, dg, args...)) + # Pass `zeros(eltype(u), nvariables(equations), nnodes(dg), nnodes(dg), 1)` + # to `func` since `u` might be empty, if the current rank has no elements. + # See also https://github.com/trixi-framework/Trixi.jl/issues/1096, and + # https://github.com/trixi-framework/Trixi.jl/pull/2126/files/7cbc57cfcba93e67353566e10fce1f3edac27330#r1814483243. + integral = zero(func(zeros(eltype(u), nvariables(equations), nnodes(dg), nnodes(dg), + 1), 1, 1, 1, equations, dg, args...)) volume = zero(real(mesh)) # Use quadrature to numerically integrate over entire domain diff --git a/src/solvers/dgsem_p4est/dg_2d_parallel.jl b/src/solvers/dgsem_p4est/dg_2d_parallel.jl index 8ea7be27610..1491cd50c6c 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parallel.jl @@ -133,6 +133,39 @@ end end end +# Inlined version of the interface flux computation for non-conservative equations +@inline function calc_mpi_interface_flux!(surface_flux_values, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, + nonconservative_terms::True, equations, + surface_integral, dg::DG, cache, + interface_index, normal_direction, + interface_node_index, local_side, + surface_node_index, local_direction_index, + local_element_index) + @unpack u = cache.mpi_interfaces + surface_flux, nonconservative_flux = surface_integral.surface_flux + + u_ll, u_rr = get_surface_node_vars(u, equations, dg, + interface_node_index, + interface_index) + + # Compute flux and non-conservative term for this side of the interface + if local_side == 1 + flux_ = surface_flux(u_ll, u_rr, normal_direction, equations) + noncons_flux_ = nonconservative_flux(u_ll, u_rr, normal_direction, equations) + else # local_side == 2 + flux_ = -surface_flux(u_ll, u_rr, -normal_direction, equations) + noncons_flux_ = -nonconservative_flux(u_rr, u_ll, -normal_direction, equations) + end + + for v in eachvariable(equations) + surface_flux_values[v, surface_node_index, + local_direction_index, local_element_index] = flux_[v] + + 0.5f0 * noncons_flux_[v] + end +end + function prolong2mpimortars!(cache, u, mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, equations, @@ -208,12 +241,15 @@ function calc_mpi_mortar_flux!(surface_flux_values, @unpack local_neighbor_ids, local_neighbor_positions, node_indices = cache.mpi_mortars @unpack contravariant_vectors = cache.elements @unpack fstar_primary_upper_threaded, fstar_primary_lower_threaded = cache + @unpack fstar_secondary_upper_threaded, fstar_secondary_lower_threaded = cache index_range = eachnode(dg) @threaded for mortar in eachmpimortar(dg, cache) # Choose thread-specific pre-allocated container - fstar = (fstar_primary_lower_threaded[Threads.threadid()], - fstar_primary_upper_threaded[Threads.threadid()]) + fstar_primary = (fstar_primary_lower_threaded[Threads.threadid()], + fstar_primary_upper_threaded[Threads.threadid()]) + fstar_secondary = (fstar_secondary_lower_threaded[Threads.threadid()], + fstar_secondary_upper_threaded[Threads.threadid()]) # Get index information on the small elements small_indices = node_indices[1, mortar] @@ -231,7 +267,8 @@ function calc_mpi_mortar_flux!(surface_flux_values, normal_direction = get_normal_direction(cache.mpi_mortars, node, position, mortar) - calc_mpi_mortar_flux!(fstar, mesh, nonconservative_terms, equations, + calc_mpi_mortar_flux!(fstar_primary, fstar_secondary, mesh, + nonconservative_terms, equations, surface_integral, dg, cache, mortar, position, normal_direction, node) @@ -246,14 +283,14 @@ function calc_mpi_mortar_flux!(surface_flux_values, mpi_mortar_fluxes_to_elements!(surface_flux_values, mesh, equations, mortar_l2, dg, cache, - mortar, fstar, u_buffer) + mortar, fstar_primary, fstar_secondary, u_buffer) end return nothing end # Inlined version of the mortar flux computation on small elements for conservation laws -@inline function calc_mpi_mortar_flux!(fstar, +@inline function calc_mpi_mortar_flux!(fstar_primary, fstar_secondary, mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, nonconservative_terms::False, equations, @@ -269,7 +306,36 @@ end flux = surface_flux(u_ll, u_rr, normal_direction, equations) # Copy flux to buffer - set_node_vars!(fstar[position_index], flux, equations, dg, node_index) + set_node_vars!(fstar_primary[position_index], flux, equations, dg, node_index) + set_node_vars!(fstar_secondary[position_index], flux, equations, dg, node_index) +end + +# Inlined version of the mortar flux computation on small elements for non-conservative equations +@inline function calc_mpi_mortar_flux!(fstar_primary, fstar_secondary, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, + nonconservative_terms::True, equations, + surface_integral, dg::DG, cache, + mortar_index, position_index, normal_direction, + node_index) + @unpack u = cache.mpi_mortars + surface_flux, nonconservative_flux = surface_integral.surface_flux + + u_ll, u_rr = get_surface_node_vars(u, equations, dg, position_index, node_index, + mortar_index) + + flux = surface_flux(u_ll, u_rr, normal_direction, equations) + noncons_flux_primary = nonconservative_flux(u_ll, u_rr, normal_direction, equations) + noncons_flux_secondary = nonconservative_flux(u_rr, u_ll, normal_direction, + equations) + + for v in eachvariable(equations) + fstar_primary[position_index][v, node_index] = flux[v] + + 0.5f0 * noncons_flux_primary[v] + fstar_secondary[position_index][v, node_index] = flux[v] + + 0.5f0 * + noncons_flux_secondary[v] + end end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, @@ -277,7 +343,8 @@ end ParallelT8codeMesh{2}}, equations, mortar_l2::LobattoLegendreMortarL2, - dg::DGSEM, cache, mortar, fstar, + dg::DGSEM, cache, mortar, fstar_primary, + fstar_secondary, u_buffer) @unpack local_neighbor_ids, local_neighbor_positions, node_indices = cache.mpi_mortars @@ -291,8 +358,8 @@ end if position == 3 # -> large element # Project small fluxes to large element. multiply_dimensionwise!(u_buffer, - mortar_l2.reverse_upper, fstar[2], - mortar_l2.reverse_lower, fstar[1]) + mortar_l2.reverse_upper, fstar_secondary[2], + mortar_l2.reverse_lower, fstar_secondary[1]) # The flux is calculated in the outward direction of the small elements, # so the sign must be switched to get the flux in outward direction # of the large element. @@ -324,8 +391,8 @@ end # Copy solution small to small for i in eachnode(dg) for v in eachvariable(equations) - surface_flux_values[v, i, small_direction, element] = fstar[position][v, - i] + surface_flux_values[v, i, small_direction, element] = fstar_primary[position][v, + i] end end end diff --git a/src/solvers/dgsem_p4est/dg_3d.jl b/src/solvers/dgsem_p4est/dg_3d.jl index 64494a024cf..d918314dbbb 100644 --- a/src/solvers/dgsem_p4est/dg_3d.jl +++ b/src/solvers/dgsem_p4est/dg_3d.jl @@ -353,12 +353,11 @@ function prolong2boundaries!(cache, u, return nothing end -function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, +function calc_boundary_flux!(cache, t, boundary_condition::BC, boundary_indexing, mesh::Union{P4estMesh{3}, T8codeMesh{3}}, - equations, surface_integral, dg::DG) + equations, surface_integral, dg::DG) where {BC} @unpack boundaries = cache - @unpack surface_flux_values, node_coordinates, contravariant_vectors = cache.elements - @unpack surface_flux = surface_integral + @unpack surface_flux_values = cache.elements index_range = eachnode(dg) @threaded for local_index in eachindex(boundary_indexing) @@ -384,26 +383,10 @@ function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, k_node = k_node_start for j in eachnode(dg) for i in eachnode(dg) - # Extract solution data from boundary container - u_inner = get_node_vars(boundaries.u, equations, dg, i, j, boundary) - - # Outward-pointing normal direction (not normalized) - normal_direction = get_normal_direction(direction, - contravariant_vectors, - i_node, j_node, k_node, element) - - # Coordinates at boundary node - x = get_node_coords(node_coordinates, equations, dg, - i_node, j_node, k_node, element) - - flux_ = boundary_condition(u_inner, normal_direction, x, t, - surface_flux, equations) - - # Copy flux to element storage in the correct orientation - for v in eachvariable(equations) - surface_flux_values[v, i, j, direction, element] = flux_[v] - end - + calc_boundary_flux!(surface_flux_values, t, boundary_condition, mesh, + have_nonconservative_terms(equations), equations, + surface_integral, dg, cache, i_node, j_node, k_node, + i, j, direction, element, boundary) i_node += i_node_step_i j_node += j_node_step_i k_node += k_node_step_i @@ -415,6 +398,80 @@ function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, end end +# inlined version of the boundary flux calculation along a physical interface +@inline function calc_boundary_flux!(surface_flux_values, t, boundary_condition, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + nonconservative_terms::False, equations, + surface_integral, dg::DG, cache, i_index, j_index, + k_index, i_node_index, j_node_index, + direction_index, + element_index, boundary_index) + @unpack boundaries = cache + @unpack node_coordinates, contravariant_vectors = cache.elements + @unpack surface_flux = surface_integral + + # Extract solution data from boundary container + u_inner = get_node_vars(boundaries.u, equations, dg, i_node_index, j_node_index, + boundary_index) + + # Outward-pointing normal direction (not normalized) + normal_direction = get_normal_direction(direction_index, contravariant_vectors, + i_index, j_index, k_index, element_index) + + # Coordinates at boundary node + x = get_node_coords(node_coordinates, equations, dg, + i_index, j_index, k_index, element_index) + + flux_ = boundary_condition(u_inner, normal_direction, x, t, + surface_flux, equations) + + # Copy flux to element storage in the correct orientation + for v in eachvariable(equations) + surface_flux_values[v, i_node_index, j_node_index, + direction_index, element_index] = flux_[v] + end +end + +# inlined version of the boundary flux calculation along a physical interface +@inline function calc_boundary_flux!(surface_flux_values, t, boundary_condition, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + nonconservative_terms::True, equations, + surface_integral, dg::DG, cache, i_index, j_index, + k_index, i_node_index, j_node_index, + direction_index, + element_index, boundary_index) + @unpack boundaries = cache + @unpack node_coordinates, contravariant_vectors = cache.elements + @unpack surface_flux = surface_integral + + # Extract solution data from boundary container + u_inner = get_node_vars(boundaries.u, equations, dg, i_node_index, j_node_index, + boundary_index) + + # Outward-pointing normal direction (not normalized) + normal_direction = get_normal_direction(direction_index, contravariant_vectors, + i_index, j_index, k_index, element_index) + + # Coordinates at boundary node + x = get_node_coords(node_coordinates, equations, dg, + i_index, j_index, k_index, element_index) + + # Call pointwise numerical flux functions for the conservative and nonconservative part + # in the normal direction on the boundary + flux, noncons_flux = boundary_condition(u_inner, normal_direction, x, t, + surface_flux, equations) + + # Copy flux to element storage in the correct orientation + for v in eachvariable(equations) + # Note the factor 0.5 necessary for the nonconservative fluxes based on + # the interpretation of global SBP operators coupled discontinuously via + # central fluxes/SATs + surface_flux_values[v, i_node_index, j_node_index, + direction_index, element_index] = flux[v] + 0.5f0 * + noncons_flux[v] + end +end + function prolong2mortars!(cache, u, mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, mortar_l2::LobattoLegendreMortarL2, diff --git a/test/test_mpi_p4est_2d.jl b/test/test_mpi_p4est_2d.jl index 651be3e1755..4b2b8dc61e3 100644 --- a/test/test_mpi_p4est_2d.jl +++ b/test/test_mpi_p4est_2d.jl @@ -211,6 +211,74 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + + @trixi_testset "elixir_mhd_alfven_wave_nonconforming.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_mhd_alfven_wave_nonconforming.jl"), + l2=[0.032257043714485005, + 0.0698809831015213, + 0.07024507293378073, + 0.09318700512682686, + 0.04075287377819964, + 0.06598033890138222, + 0.06584394125943109, + 0.09317325194007701, + 0.001603893541181234], + linf=[0.17598491051066556, + 0.13831592490115455, + 0.14124330399841845, + 0.17293937185553027, + 0.1332948089388849, + 0.16128651157312346, + 0.15572969249532598, + 0.1810247231315753, + 0.01967917976620706], + tspan=(0.0, 0.25),) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # Same test as above but with only one tree in the mesh + # We use it to test meshes with elements of different size in each partition + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_mhd_alfven_wave_nonconforming.jl"), + l2=[ + 0.02918489280986602, + 0.06894247101538993, + 0.06934211084749892, + 0.09143968257530088, + 0.038237912462171675, + 0.06509909515945271, + 0.06502196369480336, + 0.0915205366320386, + 0.0023325491966802855 + ], + linf=[ + 0.10312055320325908, + 0.13916440641975747, + 0.14191886090656713, + 0.16048337905766766, + 0.12403522681540824, + 0.14689365133406318, + 0.1568420189383094, + 0.16311092390521648, + 0.01959765683054841 + ], + tspan=(0.0, 0.25), trees_per_dimension=(1, 1),) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end # P4estMesh MPI diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index 4b1c7f5caca..21b075d537a 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -666,6 +666,38 @@ end end end +@trixi_testset "elixir_mhd_alfven_wave_nonconforming.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_mhd_alfven_wave_nonconforming.jl"), + l2=[0.032257043714485005, + 0.0698809831015213, + 0.07024507293378073, + 0.09318700512682686, + 0.04075287377819964, + 0.06598033890138222, + 0.06584394125943109, + 0.09317325194007701, + 0.001603893541181234], + linf=[0.17598491051066556, + 0.13831592490115455, + 0.14124330399841845, + 0.17293937185553027, + 0.1332948089388849, + 0.16128651157312346, + 0.15572969249532598, + 0.1810247231315753, + 0.01967917976620706], + tspan=(0.0, 0.25)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_mhd_rotor.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_rotor.jl"), l2=[0.4551839744017604, 0.8917986079085971, 0.832474072904728, diff --git a/test/test_p4est_3d.jl b/test/test_p4est_3d.jl index e42ad6f3e48..852ec52bcfd 100644 --- a/test/test_p4est_3d.jl +++ b/test/test_p4est_3d.jl @@ -587,6 +587,42 @@ end end end +@trixi_testset "elixir_mhd_alfven_wave_nonperiodic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_mhd_alfven_wave_nonperiodic.jl"), + l2=[ + 0.00017912812934894293, + 0.000630910737693146, + 0.0002256138768371346, + 0.0007301686017397987, + 0.0006647296256552257, + 0.0006409790941359089, + 0.00033986873316986315, + 0.0007277161123570452, + 1.3184121257198033e-5 + ], + linf=[ + 0.0012248374096375247, + 0.004857541490859554, + 0.001813452620706816, + 0.004803571938364726, + 0.005271403957646026, + 0.004571200760744465, + 0.002618188297242474, + 0.005010126350015381, + 6.309149507784953e-5 + ], + tspan=(0.0, 0.25),) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_mhd_shockcapturing_amr.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_shockcapturing_amr.jl"), l2=[