From 85accc271a99329a7969d9378cf266ca254b4c3e Mon Sep 17 00:00:00 2001 From: TheRealMDoerr Date: Fri, 31 Oct 2025 15:05:57 +0100 Subject: [PATCH] Backport e6ef74bd722c69f8b0cf144e0b5eba95d30dcd39 --- src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp | 12 ++++++++++++ src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp | 2 ++ .../cpu/aarch64/templateTable_aarch64.cpp | 17 +++++++++++++++-- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp index 1db103730f1..31076a35c4f 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -1850,3 +1850,15 @@ void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register t bind(profile_continue); } } + +#ifdef ASSERT +void InterpreterMacroAssembler::verify_field_offset(Register reg) { + // Verify the field offset is not in the header, implicitly checks for 0 + Label L; + subs(zr, reg, static_cast(sizeof(markWord) + (UseCompressedClassPointers ? sizeof(narrowKlass) : sizeof(Klass*)))); + br(Assembler::GE, L); + stop("bad field offset"); + bind(L); +} +#endif + diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp index f8d0b4adf92..b111ca69d92 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp @@ -292,6 +292,8 @@ class InterpreterMacroAssembler: public MacroAssembler { set_last_Java_frame(esp, rfp, (address) pc(), rscratch1); MacroAssembler::_call_Unimplemented(call_site); } + + void verify_field_offset(Register reg) NOT_DEBUG_RETURN; }; #endif // CPU_AARCH64_INTERP_MASM_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index ffbab796600..84b0bd4cabf 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -165,6 +165,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, Register temp_reg, bool load_bc_into_bc_reg/*=true*/, int byte_no) { + assert_different_registers(bc_reg, temp_reg); if (!RewriteBytecodes) return; Label L_patch_done; @@ -222,8 +223,12 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, __ bind(L_okay); #endif - // patch bytecode - __ strb(bc_reg, at_bcp(0)); + // Patch bytecode with release store to coordinate with ConstantPoolCacheEntry loads + // in fast bytecode codelets. The fast bytecode codelets have a memory barrier that gains + // the needed ordering, together with control dependency on entering the fast codelet + // itself. + __ lea(temp_reg, at_bcp(0)); + __ stlrb(bc_reg, temp_reg); __ bind(L_patch_done); } @@ -2914,6 +2919,7 @@ void TemplateTable::fast_storefield(TosState state) // replace index with field offset from cache entry __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); + __ verify_field_offset(r1); { Label notVolatile; @@ -3007,6 +3013,8 @@ void TemplateTable::fast_accessfield(TosState state) __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()))); + __ verify_field_offset(r1); + __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()))); @@ -3074,8 +3082,13 @@ void TemplateTable::fast_xaccess(TosState state) __ ldr(r0, aaddress(0)); // access constant pool cache __ get_cache_and_index_at_bcp(r2, r3, 2); + + // Must prevent reordering of the following cp cache loads with bytecode load + __ membar(MacroAssembler::LoadLoad); + __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()))); + __ verify_field_offset(r1); // 8179954: We need to make sure that the code generated for // volatile accesses forms a sequentially-consistent set of