diff --git a/Backport-JDK-8269870-PS-Membar-in-PSPromotionManager-copy_unmarke.patch b/Backport-JDK-8269870-PS-Membar-in-PSPromotionManager-copy_unmarke.patch new file mode 100644 index 0000000000000000000000000000000000000000..aac2c3b874ac8d83c6fe8af65d303155bb24ed43 --- /dev/null +++ b/Backport-JDK-8269870-PS-Membar-in-PSPromotionManager-copy_unmarke.patch @@ -0,0 +1,50 @@ +Date: Tue, 25 Nov 2025 19:59:03 +0800 +Subject: [PATCH 6/8] 8269870: PS: Membar in + PSPromotionManager::copy_unmarked_to_survivor_space could be relaxed + +--- + .../gc/parallel/psPromotionManager.inline.hpp | 14 +++++--------- + 1 file changed, 5 insertions(+), 9 deletions(-) + +diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +index 231aec305..7614ca65c 100644 +--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp ++++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +@@ -147,10 +147,6 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) { + if (!m.is_marked()) { + return copy_unmarked_to_survivor_space(o, m); + } else { +- // Ensure any loads from the forwardee follow all changes that precede +- // the release-cmpxchg that performed the forwarding, possibly in some +- // other thread. +- OrderAccess::acquire(); + // Return the already installed forwardee. + return o->forwardee(m); + } +@@ -281,8 +277,11 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, + } + + // Now we have to CAS in the header. +- // Make copy visible to threads reading the forwardee. +- oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_release); ++ // Because the forwarding is done with memory_order_relaxed there is no ++ // ordering with the above copy. Clients that get the forwardee must not ++ // examine its contents without other synchronization, since the contents ++ // may not be up to date for them. ++ oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_relaxed); + if (forwardee == nullptr) { // forwardee is null when forwarding is successful + // We won any races, we "own" this object. + assert(new_obj == o->forwardee(), "Sanity"); +@@ -318,9 +317,6 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, + return new_obj; + } else { + // We lost, someone else "owns" this object. +- // Ensure loads from the forwardee follow all changes that preceded the +- // release-cmpxchg that performed the forwarding in another thread. +- OrderAccess::acquire(); + + assert(o->is_forwarded(), "Object must be forwarded if the cas failed."); + assert(o->forwardee() == forwardee, "invariant"); +-- +2.34.1 + diff --git a/huawei-AbortVMOnException-option-support-matching-multiple-.patch b/huawei-AbortVMOnException-option-support-matching-multiple-.patch new file mode 100644 index 0000000000000000000000000000000000000000..2d376cac502f02a97027ad95b88b731d270c0fe7 --- /dev/null +++ b/huawei-AbortVMOnException-option-support-matching-multiple-.patch @@ -0,0 +1,24 @@ +Date: Tue, 25 Nov 2025 19:59:45 +0800 +Subject: [PATCH 7/8] AbortVMOnException option support matching multiple + exception + +--- + src/hotspot/share/utilities/exceptions.cpp | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/hotspot/share/utilities/exceptions.cpp b/src/hotspot/share/utilities/exceptions.cpp +index ef9a16ce0..dde68c98e 100644 +--- a/src/hotspot/share/utilities/exceptions.cpp ++++ b/src/hotspot/share/utilities/exceptions.cpp +@@ -539,7 +539,7 @@ ExceptionMark::~ExceptionMark() { + // caller frees value_string if necessary + void Exceptions::debug_check_abort(const char *value_string, const char* message) { + if (AbortVMOnException != nullptr && value_string != nullptr && +- strstr(value_string, AbortVMOnException)) { ++ strstr(AbortVMOnException, value_string)) { + if (AbortVMOnExceptionMessage == nullptr || (message != nullptr && + strstr(message, AbortVMOnExceptionMessage))) { + if (message == nullptr) { +-- +2.34.1 + diff --git a/huawei-Add-Compact-Object-Headers-feature-for-AArch64.patch b/huawei-Add-Compact-Object-Headers-feature-for-AArch64.patch new file mode 100644 index 0000000000000000000000000000000000000000..688b58b7d02027c17e3b5c312b653027d18791ed --- /dev/null +++ b/huawei-Add-Compact-Object-Headers-feature-for-AArch64.patch @@ -0,0 +1,10360 @@ +Date: Tue, 25 Nov 2025 19:55:45 +0800 +Subject: [PATCH 3/8] Add Compact Object Headers feature for AArch64 + +--- + make/Images.gmk | 20 +- + make/autoconf/configure.ac | 1 + + make/autoconf/jdk-options.m4 | 26 ++ + make/autoconf/spec.gmk.in | 1 + + src/hotspot/cpu/aarch64/aarch64.ad | 53 ++- + .../cpu/aarch64/c1_CodeStubs_aarch64.cpp | 8 + + .../cpu/aarch64/c1_LIRAssembler_aarch64.cpp | 55 +-- + .../cpu/aarch64/c1_MacroAssembler_aarch64.cpp | 55 ++- + .../cpu/aarch64/c1_MacroAssembler_aarch64.hpp | 2 +- + .../cpu/aarch64/c2_CodeStubs_aarch64.cpp | 11 + + .../cpu/aarch64/c2_MacroAssembler_aarch64.cpp | 328 ++++++++++++-- + .../cpu/aarch64/c2_MacroAssembler_aarch64.hpp | 8 +- + src/hotspot/cpu/aarch64/globals_aarch64.hpp | 2 + + .../cpu/aarch64/interp_masm_aarch64.cpp | 19 +- + .../cpu/aarch64/macroAssembler_aarch64.cpp | 211 ++++++--- + .../cpu/aarch64/macroAssembler_aarch64.hpp | 8 +- + .../cpu/aarch64/sharedRuntime_aarch64.cpp | 5 +- + .../cpu/aarch64/templateTable_aarch64.cpp | 28 +- + .../cpu/aarch64/vm_version_aarch64.hpp | 3 +- + src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp | 4 + + src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp | 2 +- + src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp | 4 + + src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp | 2 +- + src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp | 18 +- + src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.hpp | 2 +- + src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp | 5 + + .../cpu/riscv/c1_LIRAssembler_riscv.cpp | 2 +- + .../cpu/riscv/c1_MacroAssembler_riscv.cpp | 13 +- + .../cpu/riscv/c1_MacroAssembler_riscv.hpp | 2 +- + src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp | 5 + + src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp | 2 +- + .../cpu/s390/c1_MacroAssembler_s390.cpp | 10 +- + .../cpu/s390/c1_MacroAssembler_s390.hpp | 2 +- + src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp | 2 +- + src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp | 17 +- + src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp | 2 +- + src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp | 12 + + src/hotspot/share/c1/c1_CodeStubs.hpp | 18 + + src/hotspot/share/c1/c1_LIR.cpp | 7 + + src/hotspot/share/c1/c1_LIR.hpp | 7 +- + src/hotspot/share/c1/c1_LIRGenerator.cpp | 3 +- + src/hotspot/share/cds/archiveBuilder.cpp | 7 + + src/hotspot/share/cds/archiveHeapWriter.cpp | 40 +- + src/hotspot/share/cds/filemap.cpp | 10 + + src/hotspot/share/cds/filemap.hpp | 2 + + src/hotspot/share/ci/ciKlass.cpp | 22 + + src/hotspot/share/ci/ciKlass.hpp | 5 + + src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 3 + + src/hotspot/share/gc/g1/g1FullCollector.cpp | 31 +- + src/hotspot/share/gc/g1/g1FullCollector.hpp | 4 + + .../share/gc/g1/g1FullGCAdjustTask.cpp | 42 +- + .../share/gc/g1/g1FullGCAdjustTask.hpp | 3 +- + .../share/gc/g1/g1FullGCCompactTask.cpp | 48 +- + .../share/gc/g1/g1FullGCCompactTask.hpp | 6 + + .../share/gc/g1/g1FullGCCompactionPoint.cpp | 19 +- + .../share/gc/g1/g1FullGCCompactionPoint.hpp | 2 + + .../share/gc/g1/g1FullGCOopClosures.hpp | 1 + + .../gc/g1/g1FullGCOopClosures.inline.hpp | 16 +- + .../share/gc/g1/g1FullGCPrepareTask.cpp | 17 +- + .../share/gc/g1/g1FullGCPrepareTask.hpp | 2 + + .../gc/g1/g1FullGCPrepareTask.inline.hpp | 10 +- + .../share/gc/g1/g1OopClosures.inline.hpp | 2 +- + .../share/gc/g1/g1ParScanThreadState.cpp | 28 +- + .../share/gc/g1/g1ParScanThreadState.hpp | 4 +- + .../share/gc/parallel/mutableSpace.cpp | 15 +- + src/hotspot/share/gc/parallel/psOldGen.cpp | 4 +- + .../share/gc/parallel/psPromotionManager.cpp | 4 +- + .../share/gc/parallel/psPromotionManager.hpp | 2 +- + .../gc/parallel/psPromotionManager.inline.hpp | 44 +- + .../share/gc/serial/defNewGeneration.cpp | 5 +- + src/hotspot/share/gc/serial/genMarkSweep.cpp | 35 +- + src/hotspot/share/gc/serial/markSweep.cpp | 29 +- + src/hotspot/share/gc/serial/markSweep.hpp | 13 +- + .../share/gc/serial/markSweep.inline.hpp | 24 +- + src/hotspot/share/gc/shared/collectedHeap.cpp | 15 +- + src/hotspot/share/gc/shared/collectedHeap.hpp | 2 +- + src/hotspot/share/gc/shared/gc_globals.hpp | 8 +- + .../share/gc/shared/genCollectedHeap.cpp | 3 + + src/hotspot/share/gc/shared/memAllocator.cpp | 16 +- + .../share/gc/shared/preservedMarks.cpp | 16 +- + .../share/gc/shared/preservedMarks.hpp | 3 + + .../share/gc/shared/preservedMarks.inline.hpp | 1 + + .../share/gc/shared/slidingForwarding.cpp | 123 +++++ + .../share/gc/shared/slidingForwarding.hpp | 181 ++++++++ + .../gc/shared/slidingForwarding.inline.hpp | 171 +++++++ + src/hotspot/share/gc/shared/space.cpp | 50 +- + src/hotspot/share/gc/shared/space.hpp | 16 +- + .../share/gc/shenandoah/shenandoahAsserts.cpp | 4 +- + .../share/gc/shenandoah/shenandoahFullGC.cpp | 106 ++++- + .../share/gc/shenandoah/shenandoahFullGC.hpp | 5 + + .../share/gc/shenandoah/shenandoahHeap.cpp | 3 + + .../gc/shenandoah/shenandoahHeap.inline.hpp | 27 +- + .../gc/shenandoah/shenandoahVerifier.cpp | 17 +- + src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp | 2 +- + src/hotspot/share/gc/x/xObjArrayAllocator.cpp | 23 +- + src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp | 2 +- + src/hotspot/share/gc/z/zObjArrayAllocator.cpp | 32 +- + src/hotspot/share/gc/z/zRelocate.cpp | 3 +- + .../interpreter/zero/bytecodeInterpreter.cpp | 11 +- + .../chains/objectSampleMarker.hpp | 2 +- + src/hotspot/share/jvmci/jvmciCompilerToVM.cpp | 2 +- + .../share/jvmci/jvmciCompilerToVMInit.cpp | 1 - + src/hotspot/share/jvmci/jvmci_globals.cpp | 1 - + src/hotspot/share/jvmci/jvmci_globals.hpp | 3 - + src/hotspot/share/jvmci/vmStructs_jvmci.cpp | 10 + + src/hotspot/share/memory/universe.cpp | 12 +- + src/hotspot/share/oops/arrayOop.hpp | 77 ++-- + src/hotspot/share/oops/instanceOop.hpp | 17 +- + src/hotspot/share/oops/klass.cpp | 17 + + src/hotspot/share/oops/klass.hpp | 11 + + src/hotspot/share/oops/klass.inline.hpp | 7 + + src/hotspot/share/oops/markWord.hpp | 85 +++- + src/hotspot/share/oops/markWord.inline.hpp | 70 +++ + src/hotspot/share/oops/objArrayKlass.cpp | 3 +- + .../share/oops/objArrayKlass.inline.hpp | 3 +- + src/hotspot/share/oops/objArrayOop.hpp | 37 +- + src/hotspot/share/oops/oop.cpp | 8 +- + src/hotspot/share/oops/oop.hpp | 68 ++- + src/hotspot/share/oops/oop.inline.hpp | 215 ++++++++- + src/hotspot/share/oops/typeArrayKlass.cpp | 3 +- + .../share/oops/typeArrayKlass.inline.hpp | 3 +- + src/hotspot/share/opto/c2_CodeStubs.hpp | 12 +- + src/hotspot/share/opto/callnode.cpp | 13 +- + src/hotspot/share/opto/compile.cpp | 6 + + src/hotspot/share/opto/library_call.cpp | 4 +- + src/hotspot/share/opto/macro.cpp | 4 +- + src/hotspot/share/opto/memnode.cpp | 20 +- + src/hotspot/share/opto/runtime.cpp | 13 +- + src/hotspot/share/opto/type.cpp | 4 +- + src/hotspot/share/prims/unsafe.cpp | 2 +- + src/hotspot/share/prims/whitebox.cpp | 11 + + .../share/runtime/abstract_vm_version.hpp | 5 +- + src/hotspot/share/runtime/arguments.cpp | 45 +- + src/hotspot/share/runtime/basicLock.cpp | 35 +- + src/hotspot/share/runtime/deoptimization.cpp | 6 +- + src/hotspot/share/runtime/globals.hpp | 1 + + src/hotspot/share/runtime/lockStack.cpp | 23 +- + src/hotspot/share/runtime/lockStack.hpp | 37 +- + .../share/runtime/lockStack.inline.hpp | 144 +++++- + src/hotspot/share/runtime/objectMonitor.cpp | 67 ++- + src/hotspot/share/runtime/objectMonitor.hpp | 5 +- + .../share/runtime/objectMonitor.inline.hpp | 8 +- + src/hotspot/share/runtime/sharedRuntime.cpp | 26 +- + src/hotspot/share/runtime/synchronizer.cpp | 264 +++++++---- + src/hotspot/share/runtime/synchronizer.hpp | 23 +- + src/hotspot/share/runtime/vmStructs.cpp | 5 + + src/hotspot/share/utilities/fastHash.hpp | 97 ++++ + .../share/utilities/globalDefinitions.hpp | 3 +- + .../jvm/hotspot/debugger/DebuggerBase.java | 13 +- + .../hotspot/debugger/MachineDescription.java | 5 + + .../debugger/MachineDescriptionAArch64.java | 4 + + .../sun/jvm/hotspot/memory/Universe.java | 7 - + .../classes/sun/jvm/hotspot/oops/Array.java | 41 +- + .../sun/jvm/hotspot/oops/Instance.java | 4 +- + .../classes/sun/jvm/hotspot/oops/Mark.java | 28 +- + .../classes/sun/jvm/hotspot/oops/Oop.java | 41 +- + .../classes/sun/jvm/hotspot/runtime/VM.java | 21 + + .../utilities/RobustOopDeterminator.java | 27 +- + .../gtest/gc/shared/test_preservedMarks.cpp | 3 + + .../gc/shared/test_slidingForwarding.cpp | 124 +++++ + test/hotspot/gtest/oops/test_arrayOop.cpp | 67 ++- + test/hotspot/gtest/oops/test_objArrayOop.cpp | 69 +++ + test/hotspot/gtest/oops/test_typeArrayOop.cpp | 6 +- + test/hotspot/gtest/runtime/test_lockStack.cpp | 427 ++++++++++++++++++ + test/hotspot/jtreg/TEST.groups | 1 + + .../c2/irTests/TestVectorizationNotRun.java | 3 +- + .../lib/ir_framework/TestFramework.java | 3 +- + ...tIndependentPacksWithCyclicDependency.java | 3 +- + .../jtreg/gc/TestAllocHumongousFragment.java | 12 + + .../jtreg/gc/g1/plab/TestPLABPromotion.java | 8 +- + .../stress/systemgc/TestSystemGCWithG1.java | 13 +- + .../systemgc/TestSystemGCWithSerial.java | 33 +- + .../systemgc/TestSystemGCWithShenandoah.java | 16 + + test/hotspot/jtreg/gtest/ArrayTests.java | 56 +++ + test/hotspot/jtreg/gtest/LockStackGtests.java | 32 ++ + test/hotspot/jtreg/gtest/ObjArrayTests.java | 85 ++++ + .../runtime/FieldLayout/BaseOffsets.java | 130 ++++++ + .../runtime/FieldLayout/OldLayoutCheck.java | 32 +- + .../cds/CdsDifferentCompactObjectHeaders.java | 66 +++ + .../runtime/cds/appcds/TestZGCWithCDS.java | 15 + + ...toCreateSharedArchiveNoDefaultArchive.java | 1 + + .../lockStack/TestLockStackCapacity.java | 108 +++++ + test/jdk/com/sun/jdi/EATests.java | 157 +++++++ + .../GetObjectSizeIntrinsicsTest.java | 36 +- + .../tools/jlink/plugins/CDSPluginTest.java | 21 +- + test/lib/jdk/test/whitebox/WhiteBox.java | 6 +- + 186 files changed, 4768 insertions(+), 818 deletions(-) + create mode 100644 src/hotspot/share/gc/shared/slidingForwarding.cpp + create mode 100644 src/hotspot/share/gc/shared/slidingForwarding.hpp + create mode 100644 src/hotspot/share/gc/shared/slidingForwarding.inline.hpp + create mode 100644 src/hotspot/share/oops/markWord.inline.hpp + create mode 100644 src/hotspot/share/utilities/fastHash.hpp + create mode 100644 test/hotspot/gtest/gc/shared/test_slidingForwarding.cpp + create mode 100644 test/hotspot/gtest/oops/test_objArrayOop.cpp + create mode 100644 test/hotspot/gtest/runtime/test_lockStack.cpp + create mode 100644 test/hotspot/jtreg/gtest/ArrayTests.java + create mode 100644 test/hotspot/jtreg/gtest/LockStackGtests.java + create mode 100644 test/hotspot/jtreg/gtest/ObjArrayTests.java + create mode 100644 test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java + create mode 100644 test/hotspot/jtreg/runtime/cds/CdsDifferentCompactObjectHeaders.java + create mode 100644 test/hotspot/jtreg/runtime/lockStack/TestLockStackCapacity.java + +diff --git a/make/Images.gmk b/make/Images.gmk +index 225d9a93d..6414b60cf 100644 +--- a/make/Images.gmk ++++ b/make/Images.gmk +@@ -123,10 +123,16 @@ CDS_DUMP_FLAGS = -Xmx128M -Xms128M + # Helper function for creating the CDS archives for the JDK and JRE + # + # Param1 - VM variant (e.g., server, client, zero, ...) +-# Param2 - _nocoops, or empty ++# Param2 - _nocoops, _coh, _nocoops_coh, or empty + define CreateCDSArchive +- $1_$2_DUMP_EXTRA_ARG := $(if $(filter _nocoops, $2),-XX:-UseCompressedOops,) +- $1_$2_DUMP_TYPE := $(if $(filter _nocoops, $2),-NOCOOPS,) ++ $1_$2_COOPS_OPTION := $(if $(findstring _nocoops, $2),-XX:-UseCompressedOops) ++ # enable and also explicitly disable coh as needed. ++ ifeq ($(call isTargetCpuArch, aarch64), true) ++ $1_$2_COH_OPTION := -XX:+UnlockExperimentalVMOptions \ ++ $(if $(findstring _coh, $2),-XX:+UseCompactObjectHeaders,-XX:-UseCompactObjectHeaders) ++ endif ++ $1_$2_DUMP_EXTRA_ARG := $$($1_$2_COOPS_OPTION) $$($1_$2_COH_OPTION) ++ $1_$2_DUMP_TYPE := $(if $(findstring _nocoops, $2),-NOCOOPS,)$(if $(findstring _coh, $2),-COH,) + + # Only G1 supports dumping the shared heap, so explicitly use G1 if the JVM supports it. + $1_$2_CDS_DUMP_FLAGS := $(CDS_DUMP_FLAGS) $(if $(filter g1gc, $(JVM_FEATURES_$1)),-XX:+UseG1GC) +@@ -173,6 +179,14 @@ ifeq ($(BUILD_CDS_ARCHIVE), true) + $(foreach v, $(JVM_VARIANTS), \ + $(eval $(call CreateCDSArchive,$v,_nocoops)) \ + ) ++ ifeq ($(BUILD_CDS_ARCHIVE_COH), true) ++ $(foreach v, $(JVM_VARIANTS), \ ++ $(eval $(call CreateCDSArchive,$v,_coh)) \ ++ ) ++ $(foreach v, $(JVM_VARIANTS), \ ++ $(eval $(call CreateCDSArchive,$v,_nocoops_coh)) \ ++ ) ++ endif + endif + endif + +diff --git a/make/autoconf/configure.ac b/make/autoconf/configure.ac +index 55201277c..371afa71e 100644 +--- a/make/autoconf/configure.ac ++++ b/make/autoconf/configure.ac +@@ -261,6 +261,7 @@ JDKOPT_ENABLE_DISABLE_GENERATE_CLASSLIST + JDKOPT_EXCLUDE_TRANSLATIONS + JDKOPT_ENABLE_DISABLE_MANPAGES + JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE ++JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE_COH + JDKOPT_ENABLE_DISABLE_COMPATIBLE_CDS_ALIGNMENT + JDKOPT_SETUP_MACOSX_SIGNING + +diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4 +index 0a530cad0..0d39b06d1 100644 +--- a/make/autoconf/jdk-options.m4 ++++ b/make/autoconf/jdk-options.m4 +@@ -704,6 +704,32 @@ AC_DEFUN([JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE], + AC_SUBST(BUILD_CDS_ARCHIVE) + ]) + ++################################################################################ ++# ++# Enable or disable the default CDS archive generation for Compact Object Headers ++# ++AC_DEFUN([JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE_COH], ++[ ++ UTIL_ARG_ENABLE(NAME: cds-archive-coh, DEFAULT: auto, RESULT: BUILD_CDS_ARCHIVE_COH, ++ DESC: [enable generation of default CDS archives for compact object headers (requires --enable-cds-archive)], ++ DEFAULT_DESC: [auto], ++ CHECKING_MSG: [if default CDS archives for compact object headers should be generated], ++ CHECK_AVAILABLE: [ ++ AC_MSG_CHECKING([if CDS archive with compact object headers is available]) ++ if test "x$BUILD_CDS_ARCHIVE" = "xfalse"; then ++ AC_MSG_RESULT([no (CDS default archive generation is disabled)]) ++ AVAILABLE=false ++ elif test "x$OPENJDK_TARGET_CPU" != "xaarch64"; then ++ AC_MSG_RESULT([no (compact object headers not supported for this platform)]) ++ AVAILABLE=false ++ else ++ AC_MSG_RESULT([yes]) ++ AVAILABLE=true ++ fi ++ ]) ++ AC_SUBST(BUILD_CDS_ARCHIVE_COH) ++]) ++ + ################################################################################ + # + # Enable the alternative CDS core region alignment +diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in +index 26fc4fb7f..708dd697d 100644 +--- a/make/autoconf/spec.gmk.in ++++ b/make/autoconf/spec.gmk.in +@@ -368,6 +368,7 @@ EXCLUDE_TRANSLATIONS := @EXCLUDE_TRANSLATIONS@ + BUILD_MANPAGES := @BUILD_MANPAGES@ + + BUILD_CDS_ARCHIVE := @BUILD_CDS_ARCHIVE@ ++BUILD_CDS_ARCHIVE_COH := @BUILD_CDS_ARCHIVE_COH@ + + ENABLE_COMPATIBLE_CDS_ALIGNMENT := @ENABLE_COMPATIBLE_CDS_ALIGNMENT@ + +diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad +index 69114031a..f2ea19f53 100644 +--- a/src/hotspot/cpu/aarch64/aarch64.ad ++++ b/src/hotspot/cpu/aarch64/aarch64.ad +@@ -7142,7 +7142,7 @@ instruct loadKlass(iRegPNoSp dst, memory8 mem) + instruct loadNKlass(iRegNNoSp dst, memory4 mem) + %{ + match(Set dst (LoadNKlass mem)); +- predicate(!needs_acquiring_load(n)); ++ predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders); + + ins_cost(4 * INSN_COST); + format %{ "ldrw $dst, $mem\t# compressed class ptr" %} +@@ -7152,6 +7152,20 @@ instruct loadNKlass(iRegNNoSp dst, memory4 mem) + ins_pipe(iload_reg_mem); + %} + ++instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem, rFlagsReg cr) ++%{ ++ match(Set dst (LoadNKlass mem)); ++ effect(KILL cr); ++ predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders); ++ ++ ins_cost(4 * INSN_COST); ++ format %{ "ldrw $dst, $mem\t# compressed class ptr" %} ++ ins_encode %{ ++ __ load_nklass_compact($dst$$Register, $mem$$base$$Register, $mem$$index$$Register, $mem$$scale, $mem$$disp); ++ %} ++ ins_pipe(pipe_slow); ++%} ++ + // Load Float + instruct loadF(vRegF dst, memory4 mem) + %{ +@@ -16433,13 +16447,12 @@ instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl) + + instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3) + %{ ++ predicate(LockingMode != LM_LIGHTWEIGHT); + match(Set cr (FastLock object box)); + effect(TEMP tmp, TEMP tmp2, TEMP tmp3); + +- // TODO +- // identify correct cost + ins_cost(5 * INSN_COST); +- format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %} ++ format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %} + + ins_encode %{ + __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register); +@@ -16450,6 +16463,7 @@ instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegP + + instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2) + %{ ++ predicate(LockingMode != LM_LIGHTWEIGHT); + match(Set cr (FastUnlock object box)); + effect(TEMP tmp, TEMP tmp2); + +@@ -16463,6 +16477,37 @@ instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRe + ins_pipe(pipe_serial); + %} + ++instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2) ++%{ ++ predicate(LockingMode == LM_LIGHTWEIGHT); ++ match(Set cr (FastLock object box)); ++ effect(TEMP tmp, TEMP tmp2); ++ ++ ins_cost(5 * INSN_COST); ++ format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %} ++ ++ ins_encode %{ ++ __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register); ++ %} ++ ++ ins_pipe(pipe_serial); ++%} ++ ++instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2) ++%{ ++ predicate(LockingMode == LM_LIGHTWEIGHT); ++ match(Set cr (FastUnlock object box)); ++ effect(TEMP tmp, TEMP tmp2); ++ ++ ins_cost(5 * INSN_COST); ++ format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %} ++ ++ ins_encode %{ ++ __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register); ++ %} ++ ++ ins_pipe(pipe_serial); ++%} + + // ============================================================================ + // Safepoint Instructions +diff --git a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp +index ca175fe1c..8dbb28d6f 100644 +--- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp +@@ -32,6 +32,7 @@ + #include "c1/c1_Runtime1.hpp" + #include "classfile/javaClasses.hpp" + #include "nativeInst_aarch64.hpp" ++#include "runtime/objectMonitor.hpp" + #include "runtime/sharedRuntime.hpp" + #include "vmreg_aarch64.inline.hpp" + +@@ -233,6 +234,13 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { + __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); + } + ++void LoadKlassStub::emit_code(LIR_Assembler* ce) { ++ assert(UseCompactObjectHeaders, "Only use with compact object headers"); ++ __ bind(_entry); ++ Register d = _result->as_register(); ++ __ ldr(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header))); ++ __ b(_continuation); ++} + + // Implementation of patching: + // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) +diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +index c42c93cc2..594673db7 100644 +--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +@@ -1230,7 +1230,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { + len, + tmp1, + tmp2, +- arrayOopDesc::header_size(op->type()), ++ arrayOopDesc::base_offset_in_bytes(op->type()), + array_element_size(op->type()), + op->klass()->as_register(), + *op->stub()->entry()); +@@ -2290,8 +2290,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { + + Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); + Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); +- Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); +- Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); + + // test for null + if (flags & LIR_OpArrayCopy::src_null_check) { +@@ -2352,15 +2350,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { + // We don't know the array types are compatible + if (basic_type != T_OBJECT) { + // Simple test for basic type arrays +- if (UseCompressedClassPointers) { +- __ ldrw(tmp, src_klass_addr); +- __ ldrw(rscratch1, dst_klass_addr); +- __ cmpw(tmp, rscratch1); +- } else { +- __ ldr(tmp, src_klass_addr); +- __ ldr(rscratch1, dst_klass_addr); +- __ cmp(tmp, rscratch1); +- } ++ __ cmp_klass(src, dst, tmp, rscratch1); + __ br(Assembler::NE, *stub->entry()); + } else { + // For object arrays, if src is a sub class of dst then we can +@@ -2482,36 +2472,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { + // but not necessarily exactly of type default_type. + Label known_ok, halt; + __ mov_metadata(tmp, default_type->constant_encoding()); +- if (UseCompressedClassPointers) { +- __ encode_klass_not_null(tmp); +- } + + if (basic_type != T_OBJECT) { +- +- if (UseCompressedClassPointers) { +- __ ldrw(rscratch1, dst_klass_addr); +- __ cmpw(tmp, rscratch1); +- } else { +- __ ldr(rscratch1, dst_klass_addr); +- __ cmp(tmp, rscratch1); +- } ++ __ cmp_klass(dst, tmp, rscratch1); + __ br(Assembler::NE, halt); +- if (UseCompressedClassPointers) { +- __ ldrw(rscratch1, src_klass_addr); +- __ cmpw(tmp, rscratch1); +- } else { +- __ ldr(rscratch1, src_klass_addr); +- __ cmp(tmp, rscratch1); +- } ++ __ cmp_klass(src, tmp, rscratch1); + __ br(Assembler::EQ, known_ok); + } else { +- if (UseCompressedClassPointers) { +- __ ldrw(rscratch1, dst_klass_addr); +- __ cmpw(tmp, rscratch1); +- } else { +- __ ldr(rscratch1, dst_klass_addr); +- __ cmp(tmp, rscratch1); +- } ++ __ cmp_klass(dst, tmp, rscratch1); + __ br(Assembler::EQ, known_ok); + __ cmp(src, dst); + __ br(Assembler::EQ, known_ok); +@@ -2593,7 +2561,18 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { + } + + if (UseCompressedClassPointers) { +- __ ldrw(result, Address (obj, oopDesc::klass_offset_in_bytes())); ++ if (UseCompactObjectHeaders) { ++ // Check if we can take the (common) fast path, if obj is unlocked. ++ __ ldr(result, Address(obj, oopDesc::mark_offset_in_bytes())); ++ __ tst(result, markWord::monitor_value); ++ __ br(Assembler::NE, *op->stub()->entry()); ++ __ bind(*op->stub()->continuation()); ++ ++ // Shift to get proper narrow Klass*. ++ __ lsr(result, result, markWord::klass_shift); ++ } else { ++ __ ldrw(result, Address (obj, oopDesc::klass_offset_in_bytes())); ++ } + __ decode_klass_not_null(result); + } else { + __ ldr(result, Address (obj, oopDesc::klass_offset_in_bytes())); +diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +index d3a746178..5d997e074 100644 +--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * +@@ -80,12 +80,12 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr + br(Assembler::NE, slow_case); + } + +- // Load object header +- ldr(hdr, Address(obj, hdr_offset)); + if (LockingMode == LM_LIGHTWEIGHT) { + lightweight_lock(obj, hdr, temp, rscratch2, slow_case); + } else if (LockingMode == LM_LEGACY) { + Label done; ++ // Load object header ++ ldr(hdr, Address(obj, hdr_offset)); + // and mark it as unlocked + orr(hdr, hdr, markWord::unlocked_value); + // save unlocked object header into the displaced header location on the stack +@@ -144,11 +144,6 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_ + verify_oop(obj); + + if (LockingMode == LM_LIGHTWEIGHT) { +- ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes())); +- // We cannot use tbnz here, the target might be too far away and cannot +- // be encoded. +- tst(hdr, markWord::monitor_value); +- br(Assembler::NE, slow_case); + lightweight_unlock(obj, hdr, temp, rscratch2, slow_case); + } else if (LockingMode == LM_LEGACY) { + // test if object header is pointing to the displaced header, and if so, restore +@@ -180,20 +175,25 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i + + void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { + assert_different_registers(obj, klass, len); +- // This assumes that all prototype bits fit in an int32_t +- mov(t1, (int32_t)(intptr_t)markWord::prototype().value()); +- str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); +- +- if (UseCompressedClassPointers) { // Take care not to kill klass +- encode_klass_not_null(t1, klass); +- strw(t1, Address(obj, oopDesc::klass_offset_in_bytes())); ++ if (UseCompactObjectHeaders) { ++ ldr(t1, Address(klass, Klass::prototype_header_offset())); ++ str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); + } else { +- str(klass, Address(obj, oopDesc::klass_offset_in_bytes())); ++ // This assumes that all prototype bits fit in an int32_t ++ mov(t1, (int32_t)(intptr_t)markWord::prototype().value()); ++ str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); ++ ++ if (UseCompressedClassPointers) { // Take care not to kill klass ++ encode_klass_not_null(t1, klass); ++ strw(t1, Address(obj, oopDesc::klass_offset_in_bytes())); ++ } else { ++ str(klass, Address(obj, oopDesc::klass_offset_in_bytes())); ++ } + } + + if (len->is_valid()) { + strw(len, Address(obj, arrayOopDesc::length_offset_in_bytes())); +- } else if (UseCompressedClassPointers) { ++ } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { + store_klass_gap(obj, zr); + } + } +@@ -271,7 +271,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register + + verify_oop(obj); + } +-void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, int f, Register klass, Label& slow_case) { ++void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) { + assert_different_registers(obj, len, t1, t2, klass); + + // determine alignment mask +@@ -284,7 +284,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, + + const Register arr_size = t2; // okay to be the same + // align object end +- mov(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask); ++ mov(arr_size, (int32_t)base_offset_in_bytes + MinObjAlignmentInBytesMask); + add(arr_size, arr_size, len, ext::uxtw, f); + andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask); + +@@ -292,8 +292,19 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, + + initialize_header(obj, klass, len, t1, t2); + ++ // Clear leading 4 bytes, if necessary. ++ // TODO: This could perhaps go into initialize_body() and also clear the leading 4 bytes ++ // for non-array objects, thereby replacing the klass-gap clearing code in initialize_header(). ++ int base_offset = base_offset_in_bytes; ++ if (!is_aligned(base_offset, BytesPerWord)) { ++ assert(is_aligned(base_offset, BytesPerInt), "must be 4-byte aligned"); ++ strw(zr, Address(obj, base_offset)); ++ base_offset += BytesPerInt; ++ } ++ assert(is_aligned(base_offset, BytesPerWord), "must be word-aligned"); ++ + // clear rest of allocated space +- initialize_body(obj, arr_size, header_size * BytesPerWord, t1, t2); ++ initialize_body(obj, arr_size, base_offset, t1, t2); + if (Compilation::current()->bailed_out()) { + return; + } +@@ -312,9 +323,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, + void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { + verify_oop(receiver); + // explicit null check not needed since load from [klass_offset] causes a trap +- // check against inline cache +- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); +- ++ // check against inline cache. This is checked in Universe::genesis().. + cmp_klass(receiver, iCache, rscratch1); + } + +diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp +index 4aa6206aa..5eaa41bc8 100644 +--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp +@@ -100,7 +100,7 @@ using MacroAssembler::null_check; + // header_size: size of object header in words + // f : element scale factor + // slow_case : exit to slow case implementation if fast allocation fails +- void allocate_array(Register obj, Register len, Register t, Register t2, int header_size, int f, Register klass, Label& slow_case); ++ void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case); + + int rsp_offset() const { return _rsp_offset; } + void set_rsp_offset(int n) { _rsp_offset = n; } +diff --git a/src/hotspot/cpu/aarch64/c2_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_CodeStubs_aarch64.cpp +index 69ea37fa4..337a66f76 100644 +--- a/src/hotspot/cpu/aarch64/c2_CodeStubs_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/c2_CodeStubs_aarch64.cpp +@@ -91,4 +91,15 @@ void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) { + __ b(continuation()); + } + ++int C2LoadNKlassStub::max_size() const { ++ return 8; ++} ++ ++void C2LoadNKlassStub::emit(C2_MacroAssembler& masm) { ++ __ bind(entry()); ++ Register d = dst(); ++ __ ldr(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header))); ++ __ b(continuation()); ++} ++ + #undef __ +diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +index 6a2f892a5..5923bbf89 100644 +--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +@@ -33,6 +33,7 @@ + #include "opto/subnode.hpp" + #include "runtime/stubRoutines.hpp" + #include "utilities/powerOfTwo.hpp" ++#include "utilities/globalDefinitions.hpp" + + #ifdef PRODUCT + #define BLOCK_COMMENT(str) /* nothing */ +@@ -157,6 +158,7 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register + Label object_has_monitor; + Label count, no_count; + ++ assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight"); + assert_different_registers(oop, box, tmp, disp_hdr); + + // Load markWord from object into displaced_header. +@@ -175,7 +177,8 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register + if (LockingMode == LM_MONITOR) { + tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0. + b(cont); +- } else if (LockingMode == LM_LEGACY) { ++ } else { ++ assert(LockingMode == LM_LEGACY, "must be"); + // Set tmp to be (markWord of object | UNLOCK_VALUE). + orr(tmp, disp_hdr, markWord::unlocked_value); + +@@ -204,10 +207,6 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register + ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result + str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes())); + b(cont); +- } else { +- assert(LockingMode == LM_LIGHTWEIGHT, "must be"); +- lightweight_lock(oop, disp_hdr, tmp, tmp3Reg, no_count); +- b(count); + } + + // Handle existing monitor. +@@ -221,14 +220,13 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register + cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true, + /*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result + +- if (LockingMode != LM_LIGHTWEIGHT) { +- // Store a non-null value into the box to avoid looking like a re-entrant +- // lock. The fast-path monitor unlock code checks for +- // markWord::monitor_value so use markWord::unused_mark which has the +- // relevant bit set, and also matches ObjectSynchronizer::enter. +- mov(tmp, (address)markWord::unused_mark().value()); +- str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes())); +- } ++ // Store a non-null value into the box to avoid looking like a re-entrant ++ // lock. The fast-path monitor unlock code checks for ++ // markWord::monitor_value so use markWord::unused_mark which has the ++ // relevant bit set, and also matches ObjectSynchronizer::enter. ++ mov(tmp, (address)markWord::unused_mark().value()); ++ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes())); ++ + br(Assembler::EQ, cont); // CAS success means locking succeeded + + cmp(tmp3Reg, rthread); +@@ -259,6 +257,7 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe + Label object_has_monitor; + Label count, no_count; + ++ assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight"); + assert_different_registers(oop, box, tmp, disp_hdr); + + if (LockingMode == LM_LEGACY) { +@@ -277,7 +276,8 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe + if (LockingMode == LM_MONITOR) { + tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0. + b(cont); +- } else if (LockingMode == LM_LEGACY) { ++ } else { ++ assert(LockingMode == LM_LEGACY, "must be"); + // Check if it is still a light weight lock, this is is true if we + // see the stack address of the basicLock in the markWord of the + // object. +@@ -285,10 +285,6 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe + cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false, + /*release*/ true, /*weak*/ false, tmp); + b(cont); +- } else { +- assert(LockingMode == LM_LIGHTWEIGHT, "must be"); +- lightweight_unlock(oop, tmp, box, disp_hdr, no_count); +- b(count); + } + + assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); +@@ -298,19 +294,6 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe + STATIC_ASSERT(markWord::monitor_value <= INT_MAX); + add(tmp, tmp, -(int)markWord::monitor_value); // monitor + +- if (LockingMode == LM_LIGHTWEIGHT) { +- // If the owner is anonymous, we need to fix it -- in an outline stub. +- Register tmp2 = disp_hdr; +- ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset())); +- // We cannot use tbnz here, the target might be too far away and cannot +- // be encoded. +- tst(tmp2, (uint64_t)ObjectMonitor::ANONYMOUS_OWNER); +- C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2); +- Compile::current()->output()->add_stub(stub); +- br(Assembler::NE, stub->entry()); +- bind(stub->continuation()); +- } +- + ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset())); + + Label notRecursive; +@@ -343,6 +326,262 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe + bind(no_count); + } + ++void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1, ++ Register t2, Register t3) { ++ assert(LockingMode == LM_LIGHTWEIGHT, "must be"); ++ assert_different_registers(obj, t1, t2, t3); ++ ++ // Handle inflated monitor. ++ Label inflated; ++ // Finish fast lock successfully. MUST branch to with flag == EQ ++ Label locked; ++ // Finish fast lock unsuccessfully. MUST branch to with flag == NE ++ Label slow_path; ++ ++ if (DiagnoseSyncOnValueBasedClasses != 0) { ++ load_klass(t1, obj); ++ ldrw(t1, Address(t1, Klass::access_flags_offset())); ++ tstw(t1, JVM_ACC_IS_VALUE_BASED_CLASS); ++ br(Assembler::NE, slow_path); ++ } ++ ++ const Register t1_mark = t1; ++ ++ { // Lightweight locking ++ ++ // Push lock to the lock stack and finish successfully. MUST branch to with flag == EQ ++ Label push; ++ ++ const Register t2_top = t2; ++ const Register t3_t = t3; ++ ++ // Check if lock-stack is full. ++ ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset())); ++ cmpw(t2_top, (unsigned)LockStack::end_offset() - 1); ++ br(Assembler::GT, slow_path); ++ ++ // Check if recursive. ++ subw(t3_t, t2_top, oopSize); ++ ldr(t3_t, Address(rthread, t3_t)); ++ cmp(obj, t3_t); ++ br(Assembler::EQ, push); ++ ++ // Relaxed normal load to check for monitor. Optimization for monitor case. ++ ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes())); ++ tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated); ++ ++ // Not inflated ++ assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid a lea"); ++ ++ // Try to lock. Transition lock-bits 0b01 => 0b00 ++ orr(t1_mark, t1_mark, markWord::unlocked_value); ++ eor(t3_t, t1_mark, markWord::unlocked_value); ++ cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword, ++ /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); ++ br(Assembler::NE, slow_path); ++ ++ bind(push); ++ // After successful lock, push object on lock-stack. ++ str(obj, Address(rthread, t2_top)); ++ addw(t2_top, t2_top, oopSize); ++ strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset())); ++ b(locked); ++ } ++ ++ { // Handle inflated monitor. ++ bind(inflated); ++ ++ // mark contains the tagged ObjectMonitor*. ++ const Register t1_tagged_monitor = t1_mark; ++ const uintptr_t monitor_tag = markWord::monitor_value; ++ const Register t2_owner_addr = t2; ++ const Register t3_owner = t3; ++ ++ // Compute owner address. ++ lea(t2_owner_addr, Address(t1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag))); ++ ++ // CAS owner (null => current thread). ++ cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true, ++ /*release*/ false, /*weak*/ false, t3_owner); ++ br(Assembler::EQ, locked); ++ ++ // Check if recursive. ++ cmp(t3_owner, rthread); ++ br(Assembler::NE, slow_path); ++ ++ // Recursive. ++ increment(Address(t1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1); ++ } ++ ++ bind(locked); ++ increment(Address(rthread, JavaThread::held_monitor_count_offset())); ++ ++#ifdef ASSERT ++ // Check that locked label is reached with Flags == EQ. ++ Label flag_correct; ++ br(Assembler::EQ, flag_correct); ++ stop("Fast Lock Flag != EQ"); ++#endif ++ ++ bind(slow_path); ++#ifdef ASSERT ++ // Check that slow_path label is reached with Flags == NE. ++ br(Assembler::NE, flag_correct); ++ stop("Fast Lock Flag != NE"); ++ bind(flag_correct); ++#endif ++ // C2 uses the value of Flags (NE vs EQ) to determine the continuation. ++} ++ ++void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Register t2, ++ Register t3) { ++ assert(LockingMode == LM_LIGHTWEIGHT, "must be"); ++ assert_different_registers(obj, t1, t2, t3); ++ ++ // Handle inflated monitor. ++ Label inflated, inflated_load_monitor; ++ // Finish fast unlock successfully. MUST branch to with flag == EQ ++ Label unlocked; ++ // Finish fast unlock unsuccessfully. MUST branch to with flag == NE ++ Label slow_path; ++ ++ const Register t1_mark = t1; ++ const Register t2_top = t2; ++ const Register t3_t = t3; ++ ++ { // Lightweight unlock ++ ++ // Check if obj is top of lock-stack. ++ ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset())); ++ subw(t2_top, t2_top, oopSize); ++ ldr(t3_t, Address(rthread, t2_top)); ++ cmp(obj, t3_t); ++ // Top of lock stack was not obj. Must be monitor. ++ br(Assembler::NE, inflated_load_monitor); ++ ++ // Pop lock-stack. ++ DEBUG_ONLY(str(zr, Address(rthread, t2_top));) ++ strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset())); ++ ++ // Check if recursive. ++ subw(t3_t, t2_top, oopSize); ++ ldr(t3_t, Address(rthread, t3_t)); ++ cmp(obj, t3_t); ++ br(Assembler::EQ, unlocked); ++ ++ // Not recursive. ++ // Load Mark. ++ ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes())); ++ ++ // Check header for monitor (0b10). ++ tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated); ++ ++ // Try to unlock. Transition lock bits 0b00 => 0b01 ++ assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); ++ orr(t3_t, t1_mark, markWord::unlocked_value); ++ cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword, ++ /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); ++ br(Assembler::EQ, unlocked); ++ ++ // Compare and exchange failed. ++ // Restore lock-stack and handle the unlock in runtime. ++ DEBUG_ONLY(str(obj, Address(rthread, t2_top));) ++ addw(t2_top, t2_top, oopSize); ++ str(t2_top, Address(rthread, JavaThread::lock_stack_top_offset())); ++ b(slow_path); ++ } ++ ++ ++ { // Handle inflated monitor. ++ bind(inflated_load_monitor); ++ ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes())); ++#ifdef ASSERT ++ tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated); ++ stop("Fast Unlock not monitor"); ++#endif ++ ++ bind(inflated); ++ ++#ifdef ASSERT ++ Label check_done; ++ subw(t2_top, t2_top, oopSize); ++ cmpw(t2_top, in_bytes(JavaThread::lock_stack_base_offset())); ++ br(Assembler::LT, check_done); ++ ldr(t3_t, Address(rthread, t2_top)); ++ cmp(obj, t3_t); ++ br(Assembler::NE, inflated); ++ stop("Fast Unlock lock on stack"); ++ bind(check_done); ++#endif ++ ++ // mark contains the tagged ObjectMonitor*. ++ const Register t1_monitor = t1_mark; ++ const uintptr_t monitor_tag = markWord::monitor_value; ++ ++ // Untag the monitor. ++ sub(t1_monitor, t1_mark, monitor_tag); ++ ++ const Register t2_recursions = t2; ++ Label not_recursive; ++ ++ // Check if recursive. ++ ldr(t2_recursions, Address(t1_monitor, ObjectMonitor::recursions_offset())); ++ cbz(t2_recursions, not_recursive); ++ ++ // Recursive unlock. ++ sub(t2_recursions, t2_recursions, 1u); ++ str(t2_recursions, Address(t1_monitor, ObjectMonitor::recursions_offset())); ++ // Set flag == EQ ++ cmp(t2_recursions, t2_recursions); ++ b(unlocked); ++ ++ bind(not_recursive); ++ ++ Label release; ++ const Register t2_owner_addr = t2; ++ ++ // Compute owner address. ++ lea(t2_owner_addr, Address(t1_monitor, ObjectMonitor::owner_offset())); ++ ++ // Check if the entry lists are empty. ++ ldr(rscratch1, Address(t1_monitor, ObjectMonitor::EntryList_offset())); ++ ldr(t3_t, Address(t1_monitor, ObjectMonitor::cxq_offset())); ++ orr(rscratch1, rscratch1, t3_t); ++ cmp(rscratch1, zr); ++ br(Assembler::EQ, release); ++ ++ // The owner may be anonymous and we removed the last obj entry in ++ // the lock-stack. This loses the information about the owner. ++ // Write the thread to the owner field so the runtime knows the owner. ++ str(rthread, Address(t2_owner_addr)); ++ b(slow_path); ++ ++ bind(release); ++ // Set owner to null. ++ // Release to satisfy the JMM ++ stlr(zr, t2_owner_addr); ++ } ++ ++ bind(unlocked); ++ decrement(Address(rthread, JavaThread::held_monitor_count_offset())); ++ ++#ifdef ASSERT ++ // Check that unlocked label is reached with Flags == EQ. ++ Label flag_correct; ++ br(Assembler::EQ, flag_correct); ++ stop("Fast Unlock Flag != EQ"); ++#endif ++ ++ bind(slow_path); ++#ifdef ASSERT ++ // Check that slow_path label is reached with Flags == NE. ++ br(Assembler::NE, flag_correct); ++ stop("Fast Unlock Flag != NE"); ++ bind(flag_correct); ++#endif ++ // C2 uses the value of Flags (NE vs EQ) to determine the continuation. ++} ++ + // Search for str1 in str2 and return index or -1 + // Clobbers: rscratch1, rscratch2, rflags. May also clobber v0-v1, when icnt1==-1. + void C2_MacroAssembler::string_indexof(Register str2, Register str1, +@@ -2356,3 +2595,30 @@ bool C2_MacroAssembler::in_scratch_emit_size() { + } + return MacroAssembler::in_scratch_emit_size(); + } ++ ++void C2_MacroAssembler::load_nklass_compact(Register dst, Register obj, Register index, int scale, int disp) { ++ C2LoadNKlassStub* stub = new (Compile::current()->comp_arena()) C2LoadNKlassStub(dst); ++ Compile::current()->output()->add_stub(stub); ++ ++ // Note: Don't clobber obj anywhere in that method! ++ ++ // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract ++ // obj-start, so that we can load from the object's mark-word instead. Usually the address ++ // comes as obj-start in obj and klass_offset_in_bytes in disp. However, sometimes C2 ++ // emits code that pre-computes obj-start + klass_offset_in_bytes into a register, and ++ // then passes that register as obj and 0 in disp. The following code extracts the base ++ // and offset to load the mark-word. ++ int offset = oopDesc::mark_offset_in_bytes() + disp - oopDesc::klass_offset_in_bytes(); ++ if (index == noreg) { ++ ldr(dst, Address(obj, offset)); ++ } else { ++ lea(dst, Address(obj, index, Address::lsl(scale))); ++ ldr(dst, Address(dst, offset)); ++ } ++ // NOTE: We can't use tbnz here, because the target is sometimes too far away ++ // and cannot be encoded. ++ tst(dst, markWord::monitor_value); ++ br(Assembler::NE, stub->entry()); ++ bind(stub->continuation()); ++ lsr(dst, dst, markWord::klass_shift); ++} +diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp +index bc8d769b8..f1f96ab64 100644 +--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -43,9 +43,11 @@ + BasicType eltype); + + // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. +- // See full description in macroAssembler_aarch64.cpp. + void fast_lock(Register object, Register box, Register tmp, Register tmp2, Register tmp3); + void fast_unlock(Register object, Register box, Register tmp, Register tmp2); ++ // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file. ++ void fast_lock_lightweight(Register object, Register t1, Register t2, Register t3); ++ void fast_unlock_lightweight(Register object, Register t1, Register t2, Register t3); + + void string_compare(Register str1, Register str2, + Register cnt1, Register cnt2, Register result, +@@ -184,4 +186,6 @@ + void vector_signum_sve(FloatRegister dst, FloatRegister src, FloatRegister zero, + FloatRegister one, FloatRegister vtmp, PRegister pgtmp, SIMD_RegVariant T); + ++ void load_nklass_compact(Register dst, Register obj, Register index, int scale, int disp); ++ + #endif // CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP +diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp +index b26eaa4bf..2f1e1f4e6 100644 +--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp +@@ -107,6 +107,8 @@ define_pd_global(intx, InlineSmallCode, 1000); + product(uint, UseSVE, 0, \ + "Highest supported SVE instruction set version") \ + range(0, 2) \ ++ product(bool, UseCompactObjectHeaders, false, EXPERIMENTAL, \ ++ "Use compact 64-bit object headers in 64-bit VM") \ + product(bool, UseBlockZeroing, true, \ + "Use DC ZVA for block zeroing") \ + product(intx, BlockZeroingLowLimit, 256, \ +diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +index 9e69c913a..66a71e166 100644 +--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * +@@ -767,7 +767,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) + } + + if (LockingMode == LM_LIGHTWEIGHT) { +- ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes())); + lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case); + b(count); + } else if (LockingMode == LM_LEGACY) { +@@ -884,22 +883,6 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) + + if (LockingMode == LM_LIGHTWEIGHT) { + Label slow_case; +- +- // Check for non-symmetric locking. This is allowed by the spec and the interpreter +- // must handle it. +- Register tmp = rscratch1; +- // First check for lock-stack underflow. +- ldrw(tmp, Address(rthread, JavaThread::lock_stack_top_offset())); +- cmpw(tmp, (unsigned)LockStack::start_offset()); +- br(Assembler::LE, slow_case); +- // Then check if the top of the lock-stack matches the unlocked object. +- subw(tmp, tmp, oopSize); +- ldr(tmp, Address(rthread, tmp)); +- cmpoop(tmp, obj_reg); +- br(Assembler::NE, slow_case); +- +- ldr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); +- tbnz(header_reg, exact_log2(markWord::monitor_value), slow_case); + lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case); + b(count); + bind(slow_case); +diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +index 8ec1af1bd..e4180386d 100644 +--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * +@@ -23,8 +23,6 @@ + * + */ + +-#include +- + #include "precompiled.hpp" + #include "asm/assembler.hpp" + #include "asm/assembler.inline.hpp" +@@ -54,6 +52,7 @@ + #include "runtime/jniHandles.inline.hpp" + #include "runtime/sharedRuntime.hpp" + #include "runtime/stubRoutines.hpp" ++#include "utilities/globalDefinitions.hpp" + #include "utilities/powerOfTwo.hpp" + #ifdef COMPILER1 + #include "c1/c1_LIRAssembler.hpp" +@@ -65,6 +64,8 @@ + #include "opto/output.hpp" + #endif + ++#include ++ + #ifdef PRODUCT + #define BLOCK_COMMENT(str) /* nothing */ + #else +@@ -4664,8 +4665,30 @@ void MacroAssembler::load_method_holder(Register holder, Register method) { + ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* + } + ++// Loads the obj's Klass* into dst. ++// Preserves all registers (incl src, rscratch1 and rscratch2). ++void MacroAssembler::load_nklass_compact(Register dst, Register src) { ++ assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders"); ++ ++ Label fast; ++ ++ // Check if we can take the (common) fast path, if obj is unlocked. ++ ldr(dst, Address(src, oopDesc::mark_offset_in_bytes())); ++ tbz(dst, exact_log2(markWord::monitor_value), fast); ++ ++ // Fetch displaced header ++ ldr(dst, Address(dst, OM_OFFSET_NO_MONITOR_VALUE_TAG(header))); ++ ++ // Fast-path: shift to get narrowKlass. ++ bind(fast); ++ lsr(dst, dst, markWord::klass_shift); ++} ++ + void MacroAssembler::load_klass(Register dst, Register src) { +- if (UseCompressedClassPointers) { ++ if (UseCompactObjectHeaders) { ++ load_nklass_compact(dst, src); ++ decode_klass_not_null(dst); ++ } else if (UseCompressedClassPointers) { + ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); + decode_klass_not_null(dst); + } else { +@@ -4721,8 +4744,13 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R + } + + void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { ++ assert_different_registers(oop, trial_klass, tmp); + if (UseCompressedClassPointers) { +- ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); ++ if (UseCompactObjectHeaders) { ++ load_nklass_compact(tmp, oop); ++ } else { ++ ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); ++ } + if (CompressedKlassPointers::base() == nullptr) { + cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); + return; +@@ -4739,9 +4767,26 @@ void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) + cmp(trial_klass, tmp); + } + ++void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) { ++ if (UseCompactObjectHeaders) { ++ load_nklass_compact(tmp1, src); ++ load_nklass_compact(tmp2, dst); ++ cmpw(tmp1, tmp2); ++ } else if (UseCompressedClassPointers) { ++ ldrw(tmp1, Address(src, oopDesc::klass_offset_in_bytes())); ++ ldrw(tmp2, Address(dst, oopDesc::klass_offset_in_bytes())); ++ cmpw(tmp1, tmp2); ++ } else { ++ ldr(tmp1, Address(src, oopDesc::klass_offset_in_bytes())); ++ ldr(tmp2, Address(dst, oopDesc::klass_offset_in_bytes())); ++ cmp(tmp1, tmp2); ++ } ++} ++ + void MacroAssembler::store_klass(Register dst, Register src) { + // FIXME: Should this be a store release? concurrent gcs assumes + // klass length is valid if klass field is not null. ++ assert(!UseCompactObjectHeaders, "not with compact headers"); + if (UseCompressedClassPointers) { + encode_klass_not_null(src); + strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); +@@ -4751,6 +4796,7 @@ void MacroAssembler::store_klass(Register dst, Register src) { + } + + void MacroAssembler::store_klass_gap(Register dst, Register src) { ++ assert(!UseCompactObjectHeaders, "not with compact headers"); + if (UseCompressedClassPointers) { + // Store to klass gap in destination + strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); +@@ -6573,97 +6619,122 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { + } + + // Implements lightweight-locking. +-// Branches to slow upon failure to lock the object, with ZF cleared. +-// Falls through upon success with ZF set. + // + // - obj: the object to be locked +-// - hdr: the header, already loaded from obj, will be destroyed +-// - t1, t2: temporary registers, will be destroyed +-void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) { ++// - t1, t2, t3: temporary registers, will be destroyed ++// - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). ++void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) { + assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); +- assert_different_registers(obj, hdr, t1, t2, rscratch1); +- +- // Check if we would have space on lock-stack for the object. +- ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); +- cmpw(t1, (unsigned)LockStack::end_offset() - 1); +- br(Assembler::GT, slow); +- +- // Load (object->mark() | 1) into hdr +- orr(hdr, hdr, markWord::unlocked_value); +- // Clear lock-bits, into t2 +- eor(t2, hdr, markWord::unlocked_value); +- // Try to swing header from unlocked to locked +- // Clobbers rscratch1 when UseLSE is false +- cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword, +- /*acquire*/ true, /*release*/ true, /*weak*/ false, t1); ++ assert_different_registers(obj, t1, t2, t3, rscratch1); ++ ++ Label push; ++ const Register top = t1; ++ const Register mark = t2; ++ const Register t = t3; ++ ++ // Preload the markWord. It is important that this is the first ++ // instruction emitted as it is part of C1's null check semantics. ++ ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); ++ ++ // Check if the lock-stack is full. ++ ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); ++ cmpw(top, (unsigned)LockStack::end_offset()); ++ br(Assembler::GE, slow); ++ ++ // Check for recursion. ++ subw(t, top, oopSize); ++ ldr(t, Address(rthread, t)); ++ cmp(obj, t); ++ br(Assembler::EQ, push); ++ ++ // Check header for monitor (0b10). ++ tst(mark, markWord::monitor_value); + br(Assembler::NE, slow); + +- // After successful lock, push object on lock-stack +- ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); +- str(obj, Address(rthread, t1)); +- addw(t1, t1, oopSize); +- strw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); ++ // Try to lock. Transition lock bits 0b01 => 0b00 ++ assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); ++ orr(mark, mark, markWord::unlocked_value); ++ eor(t, mark, markWord::unlocked_value); ++ cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, ++ /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); ++ br(Assembler::NE, slow); ++ ++ bind(push); ++ // After successful lock, push object on lock-stack. ++ str(obj, Address(rthread, top)); ++ addw(top, top, oopSize); ++ strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); + } + + // Implements lightweight-unlocking. +-// Branches to slow upon failure, with ZF cleared. +-// Falls through upon success, with ZF set. + // + // - obj: the object to be unlocked +-// - hdr: the (pre-loaded) header of the object +-// - t1, t2: temporary registers +-void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) { ++// - t1, t2, t3: temporary registers ++// - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). ++void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { + assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); +- assert_different_registers(obj, hdr, t1, t2, rscratch1); ++ // cmpxchg clobbers rscratch1. ++ assert_different_registers(obj, t1, t2, t3, rscratch1); + + #ifdef ASSERT + { +- // The following checks rely on the fact that LockStack is only ever modified by +- // its owning thread, even if the lock got inflated concurrently; removal of LockStack +- // entries after inflation will happen delayed in that case. +- + // Check for lock-stack underflow. + Label stack_ok; + ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); + cmpw(t1, (unsigned)LockStack::start_offset()); +- br(Assembler::GT, stack_ok); ++ br(Assembler::GE, stack_ok); + STOP("Lock-stack underflow"); + bind(stack_ok); + } +- { +- // Check if the top of the lock-stack matches the unlocked object. +- Label tos_ok; +- subw(t1, t1, oopSize); +- ldr(t1, Address(rthread, t1)); +- cmpoop(t1, obj); +- br(Assembler::EQ, tos_ok); +- STOP("Top of lock-stack does not match the unlocked object"); +- bind(tos_ok); +- } +- { +- // Check that hdr is fast-locked. +- Label hdr_ok; +- tst(hdr, markWord::lock_mask_in_place); +- br(Assembler::EQ, hdr_ok); +- STOP("Header is not fast-locked"); +- bind(hdr_ok); +- } + #endif + +- // Load the new header (unlocked) into t1 +- orr(t1, hdr, markWord::unlocked_value); ++ Label unlocked, push_and_slow; ++ const Register top = t1; ++ const Register mark = t2; ++ const Register t = t3; + +- // Try to swing header from locked to unlocked +- // Clobbers rscratch1 when UseLSE is false +- cmpxchg(obj, hdr, t1, Assembler::xword, +- /*acquire*/ true, /*release*/ true, /*weak*/ false, t2); ++ // Check if obj is top of lock-stack. ++ ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); ++ subw(top, top, oopSize); ++ ldr(t, Address(rthread, top)); ++ cmp(obj, t); + br(Assembler::NE, slow); + +- // After successful unlock, pop object from lock-stack +- ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); +- subw(t1, t1, oopSize); ++ // Pop lock-stack. ++ DEBUG_ONLY(str(zr, Address(rthread, top));) ++ strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); ++ ++ // Check if recursive. ++ subw(t, top, oopSize); ++ ldr(t, Address(rthread, t)); ++ cmp(obj, t); ++ br(Assembler::EQ, unlocked); ++ ++ // Not recursive. Check header for monitor (0b10). ++ ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); ++ tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); ++ + #ifdef ASSERT +- str(zr, Address(rthread, t1)); ++ // Check header not unlocked (0b01). ++ Label not_unlocked; ++ tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); ++ stop("lightweight_unlock already unlocked"); ++ bind(not_unlocked); + #endif +- strw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); ++ ++ // Try to unlock. Transition lock bits 0b00 => 0b01 ++ assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); ++ orr(t, mark, markWord::unlocked_value); ++ cmpxchg(obj, mark, t, Assembler::xword, ++ /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); ++ br(Assembler::EQ, unlocked); ++ ++ bind(push_and_slow); ++ // Restore lock-stack and handle the unlock in runtime. ++ DEBUG_ONLY(str(obj, Address(rthread, top));) ++ addw(top, top, oopSize); ++ strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); ++ b(slow); ++ ++ bind(unlocked); + } +diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +index 71d4f74d6..966c1aab3 100644 +--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * +@@ -862,9 +862,11 @@ public: + void load_method_holder(Register holder, Register method); + + // oop manipulations ++ void load_nklass_compact(Register dst, Register src); + void load_klass(Register dst, Register src); + void store_klass(Register dst, Register src); + void cmp_klass(Register oop, Register trial_klass, Register tmp); ++ void cmp_klass(Register src, Register dst, Register tmp1, Register tmp2); + + void resolve_weak_handle(Register result, Register tmp1, Register tmp2); + void resolve_oop_handle(Register result, Register tmp1, Register tmp2); +@@ -1648,8 +1650,8 @@ public: + // Code for java.lang.Thread::onSpinWait() intrinsic. + void spin_wait(); + +- void lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow); +- void lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow); ++ void lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow); ++ void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow); + + private: + // Check the current thread doesn't need a cross modify fence. +diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +index bdf4ca35f..aae9aee10 100644 +--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +@@ -1817,7 +1817,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + __ br(Assembler::NE, slow_path_lock); + } else { + assert(LockingMode == LM_LIGHTWEIGHT, "must be"); +- __ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); + __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); + } + __ bind(count); +@@ -1960,8 +1959,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + __ decrement(Address(rthread, JavaThread::held_monitor_count_offset())); + } else { + assert(LockingMode == LM_LIGHTWEIGHT, ""); +- __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes())); +- __ tbnz(old_hdr, exact_log2(markWord::monitor_value), slow_path_unlock); + __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock); + __ decrement(Address(rthread, JavaThread::held_monitor_count_offset())); + } +diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +index 240190ab6..7f8cbedc0 100644 +--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +@@ -3573,12 +3573,22 @@ void TemplateTable::_new() { + + // The object is initialized before the header. If the object size is + // zero, go directly to the header initialization. +- __ sub(r3, r3, sizeof(oopDesc)); ++ if (UseCompactObjectHeaders) { ++ assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); ++ __ sub(r3, r3, oopDesc::base_offset_in_bytes()); ++ } else { ++ __ sub(r3, r3, sizeof(oopDesc)); ++ } + __ cbz(r3, initialize_header); + + // Initialize object fields + { +- __ add(r2, r0, sizeof(oopDesc)); ++ if (UseCompactObjectHeaders) { ++ assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); ++ __ add(r2, r0, oopDesc::base_offset_in_bytes()); ++ } else { ++ __ add(r2, r0, sizeof(oopDesc)); ++ } + Label loop; + __ bind(loop); + __ str(zr, Address(__ post(r2, BytesPerLong))); +@@ -3588,11 +3598,15 @@ void TemplateTable::_new() { + + // initialize object header only. + __ bind(initialize_header); +- __ mov(rscratch1, (intptr_t)markWord::prototype().value()); +- __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); +- __ store_klass_gap(r0, zr); // zero klass gap for compressed oops +- __ store_klass(r0, r4); // store klass last +- ++ if (UseCompactObjectHeaders) { ++ __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset())); ++ __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); ++ } else { ++ __ mov(rscratch1, (intptr_t)markWord::prototype().value()); ++ __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); ++ __ store_klass_gap(r0, zr); // zero klass gap for compressed oops ++ __ store_klass(r0, r4); // store klass last ++ } + { + SkipIfEqual skip(_masm, &DTraceAllocProbes, false); + // Trigger dtrace event for fastpath +diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp +index a274565e5..3f2a3ac56 100644 +--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * +@@ -176,6 +176,7 @@ enum Ampere_CPU_Model { + + static bool supports_fast_class_init_checks() { return true; } + constexpr static bool supports_stack_watermark_barrier() { return true; } ++ constexpr static bool supports_recursive_lightweight_locking() { return true; } + + static void get_compatible_board(char *buf, int buflen); + +diff --git a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp +index 3d8dbc380..a23e4938d 100644 +--- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp ++++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp +@@ -217,6 +217,10 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { + __ b(_continuation); + } + ++void LoadKlassStub::emit_code(LIR_Assembler* ce) { ++ // Currently not needed. ++ Unimplemented(); ++} + + // Call return is directly after patch word + int PatchingStub::_patch_info_offset = 0; +diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp +index 457868984..4eb7907f1 100644 +--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp ++++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp +@@ -971,7 +971,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { + op->tmp1()->as_register(), + op->tmp2()->as_register(), + op->tmp3()->as_register(), +- arrayOopDesc::header_size(op->type()), ++ arrayOopDesc::base_offset_in_bytes(op->type()), + type2aelembytes(op->type()), + op->klass()->as_register(), + *op->stub()->entry()); +diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp +index dc70c73d4..c330394f0 100644 +--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp ++++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp +@@ -298,6 +298,10 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { + __ b(_continuation); + } + ++void LoadKlassStub::emit_code(LIR_Assembler* ce) { ++ // Currently not needed. ++ Unimplemented(); ++} + + // Implementation of patching: + // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes). +diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +index 1d97a43c9..a259a38a5 100644 +--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp ++++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +@@ -2328,7 +2328,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { + op->tmp1()->as_register(), + op->tmp2()->as_register(), + op->tmp3()->as_register(), +- arrayOopDesc::header_size(op->type()), ++ arrayOopDesc::base_offset_in_bytes(op->type()), + type2aelembytes(op->type()), + op->klass()->as_register(), + *op->stub()->entry()); +diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +index 1ba786a6c..4267426cc 100644 +--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp ++++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +@@ -337,7 +337,7 @@ void C1_MacroAssembler::allocate_array( + Register t1, // temp register + Register t2, // temp register + Register t3, // temp register +- int hdr_size, // object header size in words ++ int base_offset_in_bytes, // elements offset in bytes + int elt_size, // element size in bytes + Register klass, // object klass + Label& slow_case // continuation point if fast allocation fails +@@ -369,7 +369,7 @@ void C1_MacroAssembler::allocate_array( + sldi(t1, len, log2_elt_size); + arr_len_in_bytes = t1; + } +- addi(arr_size, arr_len_in_bytes, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment. ++ addi(arr_size, arr_len_in_bytes, base_offset_in_bytes + MinObjAlignmentInBytesMask); // Add space for header & alignment. + clrrdi(arr_size, arr_size, LogMinObjAlignmentInBytes); // Align array size. + + // Allocate space & initialize header. +@@ -379,8 +379,18 @@ void C1_MacroAssembler::allocate_array( + // Initialize body. + const Register base = t2; + const Register index = t3; +- addi(base, obj, hdr_size * wordSize); // compute address of first element +- addi(index, arr_size, -(hdr_size * wordSize)); // compute index = number of bytes to clear ++ addi(base, obj, base_offset_in_bytes); // compute address of first element ++ addi(index, arr_size, -(base_offset_in_bytes)); // compute index = number of bytes to clear ++ ++ // Zero first 4 bytes, if start offset is not word aligned. ++ if (!is_aligned(base_offset_in_bytes, BytesPerWord)) { ++ assert(is_aligned(base_offset_in_bytes, BytesPerInt), "must be 4-byte aligned"); ++ li(t1, 0); ++ stw(t1, 0, base); ++ addi(base, base, BytesPerInt); ++ // Note: initialize_body will align index down, no need to correct it here. ++ } ++ + initialize_body(base, index); + + if (CURRENT_ENV->dtrace_alloc_probes()) { +diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.hpp +index 5fa19d5fd..ed4380b3d 100644 +--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.hpp ++++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.hpp +@@ -80,7 +80,7 @@ + Register t1, // temp register + Register t2, // temp register + Register t3, // temp register +- int hdr_size, // object header size in words ++ int base_offset_in_bytes, // elements offset in bytes + int elt_size, // element size in bytes + Register klass, // object klass + Label& slow_case // continuation point if fast allocation fails +diff --git a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp +index 530b99dd0..bb0fad59a 100644 +--- a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp ++++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp +@@ -228,6 +228,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { + __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); + } + ++void LoadKlassStub::emit_code(LIR_Assembler* ce) { ++ // Currently not needed. ++ Unimplemented(); ++} ++ + // Implementation of patching: + // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) + // - Replace original code with a call to the stub +diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +index 8e52a6775..deeef2c17 100644 +--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp ++++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +@@ -1040,7 +1040,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { + len, + tmp1, + tmp2, +- arrayOopDesc::header_size(op->type()), ++ arrayOopDesc::base_offset_in_bytes(op->type()), + array_element_size(op->type()), + op->klass()->as_register(), + *op->stub()->entry()); +diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +index 6c1dce0de..88d2eb144 100644 +--- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp ++++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +@@ -181,6 +181,10 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register + + if (len->is_valid()) { + sw(len, Address(obj, arrayOopDesc::length_offset_in_bytes())); ++ if (!is_aligned(arrayOopDesc::header_size_in_bytes(), BytesPerWord)) { ++ assert(is_aligned(arrayOopDesc::header_size_in_bytes(), BytesPerInt), "must be 4-byte aligned"); ++ sw(zr, Address(obj, arrayOopDesc::header_size_in_bytes())); ++ } + } else if (UseCompressedClassPointers) { + store_klass_gap(obj, zr); + } +@@ -280,7 +284,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register + verify_oop(obj); + } + +-void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int header_size, int f, Register klass, Label& slow_case) { ++void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) { + assert_different_registers(obj, len, tmp1, tmp2, klass); + + // determine alignment mask +@@ -292,7 +296,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1 + + const Register arr_size = tmp2; // okay to be the same + // align object end +- mv(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask); ++ mv(arr_size, (int32_t)base_offset_in_bytes + MinObjAlignmentInBytesMask); + shadd(arr_size, len, arr_size, t0, f); + andi(arr_size, arr_size, ~(uint)MinObjAlignmentInBytesMask); + +@@ -302,7 +306,10 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1 + + // clear rest of allocated space + const Register len_zero = len; +- initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero); ++ // We align-up the header size to word-size, because we clear the ++ // possible alignment gap in initialize_header(). ++ int hdr_size = align_up(base_offset_in_bytes, BytesPerWord); ++ initialize_body(obj, arr_size, hdr_size, len_zero); + + membar(MacroAssembler::StoreStore); + +diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp +index b737a4385..71f1305ce 100644 +--- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp ++++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp +@@ -101,7 +101,7 @@ using MacroAssembler::null_check; + // header_size: size of object header in words + // f : element scale factor + // slow_case : exit to slow case implementation if fast allocation fails +- void allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int header_size, int f, Register klass, Label& slow_case); ++ void allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int base_offset_in_bytes, int f, Register klass, Label& slow_case); + + int rsp_offset() const { return _rsp_offset; } + +diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp +index b7f1d3605..aa2392e47 100644 +--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp ++++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp +@@ -253,6 +253,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { + __ branch_optimized(Assembler::bcondAlways, _continuation); + } + ++void LoadKlassStub::emit_code(LIR_Assembler* ce) { ++ // Currently not needed. ++ Unimplemented(); ++} ++ + // Implementation of patching: + // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes). + // - Replace original code with a call to the stub. +diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +index 52e8b0b54..580122f2b 100644 +--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp ++++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +@@ -2403,7 +2403,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { + op->len()->as_register(), + op->tmp1()->as_register(), + op->tmp2()->as_register(), +- arrayOopDesc::header_size(op->type()), ++ arrayOopDesc::base_offset_in_bytes(op->type()), + type2aelembytes(op->type()), + op->klass()->as_register(), + *op->stub()->entry()); +diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp +index 63201a564..b9784213e 100644 +--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp ++++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp +@@ -300,7 +300,7 @@ void C1_MacroAssembler::allocate_array( + Register len, // array length + Register t1, // temp register + Register t2, // temp register +- int hdr_size, // object header size in words ++ int base_offset_in_bytes, // elements offset in bytes + int elt_size, // element size in bytes + Register klass, // object klass + Label& slow_case // Continuation point if fast allocation fails. +@@ -326,8 +326,8 @@ void C1_MacroAssembler::allocate_array( + case 8: z_sllg(arr_size, len, 3); break; + default: ShouldNotReachHere(); + } +- add2reg(arr_size, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment. +- z_nill(arr_size, (~MinObjAlignmentInBytesMask) & 0xffff); // Align array size. ++ add2reg(arr_size, base_offset_in_bytes + MinObjAlignmentInBytesMask); // Add space for header & alignment. ++ z_nill(arr_size, (~MinObjAlignmentInBytesMask) & 0xffff); // Align array size. + + try_allocate(obj, arr_size, 0, t1, slow_case); + +@@ -337,9 +337,9 @@ void C1_MacroAssembler::allocate_array( + Label done; + Register object_fields = t1; + Register Rzero = Z_R1_scratch; +- z_aghi(arr_size, -(hdr_size * BytesPerWord)); ++ z_aghi(arr_size, -base_offset_in_bytes); + z_bre(done); // Jump if size of fields is zero. +- z_la(object_fields, hdr_size * BytesPerWord, obj); ++ z_la(object_fields, base_offset_in_bytes, obj); + z_xgr(Rzero, Rzero); + initialize_body(object_fields, arr_size, Rzero); + bind(done); +diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp +index 7a4f76af1..a48b5a9b0 100644 +--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp ++++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp +@@ -70,7 +70,7 @@ + Register obj, // result: Pointer to object after successful allocation. + Register t1, // temp register + Register t2, // temp register +- int hdr_size, // object header size in words ++ int base_offset_in_bytes, // elements offset in bytes + int obj_size, // object size in words + Register klass, // object klass + Label& slow_case // Continuation point if fast allocation fails. +diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +index 57da38603..aa022afde 100644 +--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp ++++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +@@ -1635,7 +1635,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { + len, + tmp1, + tmp2, +- arrayOopDesc::header_size(op->type()), ++ arrayOopDesc::base_offset_in_bytes(op->type()), + array_element_size(op->type()), + op->klass()->as_register(), + *op->stub()->entry()); +diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +index ce9133986..0815b9d42 100644 +--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp ++++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +@@ -178,6 +178,14 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register + + if (len->is_valid()) { + movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); ++#ifdef _LP64 ++ if (!is_aligned(arrayOopDesc::header_size_in_bytes(), BytesPerWord)) { ++ assert(is_aligned(arrayOopDesc::header_size_in_bytes(), BytesPerInt), "must be 4-byte aligned"); ++ movl(Address(obj, arrayOopDesc::header_size_in_bytes()), 0); ++ } ++#else ++ assert(is_aligned(arrayOopDesc::header_size_in_bytes(), BytesPerInt), "must be 4-byte aligned"); ++#endif + } + #ifdef _LP64 + else if (UseCompressedClassPointers) { +@@ -261,7 +269,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register + verify_oop(obj); + } + +-void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) { ++void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case) { + assert(obj == rax, "obj must be in rax, for cmpxchg"); + assert_different_registers(obj, len, t1, t2, klass); + +@@ -274,7 +282,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, + + const Register arr_size = t2; // okay to be the same + // align object end +- movptr(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask); ++ movptr(arr_size, base_offset_in_bytes + MinObjAlignmentInBytesMask); + lea(arr_size, Address(arr_size, len, f)); + andptr(arr_size, ~MinObjAlignmentInBytesMask); + +@@ -284,7 +292,10 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, + + // clear rest of allocated space + const Register len_zero = len; +- initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero); ++ // We align-up the header size to word-size, because we clear the ++ // possible alignment gap in initialize_header(). ++ int hdr_size = align_up(base_offset_in_bytes, BytesPerWord); ++ initialize_body(obj, arr_size, hdr_size, len_zero); + + if (CURRENT_ENV->dtrace_alloc_probes()) { + assert(obj == rax, "must be"); +diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp +index b3593feb0..d4a19279f 100644 +--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp ++++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp +@@ -76,7 +76,7 @@ + // header_size: size of object header in words + // object_size: total size of object in words + // slow_case : exit to slow case implementation if fast allocation fails +- void allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case); ++ void allocate_object(Register obj, Register t1, Register t2, int base_offset_in_bytes, int object_size, Register klass, Label& slow_case); + + enum { + max_array_allocation_length = 0x00FFFFFF +diff --git a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp +index cd5e87b29..a2f60bd23 100644 +--- a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp ++++ b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp +@@ -93,6 +93,18 @@ void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) { + #endif + __ jmp(continuation()); + } ++ ++int C2LoadNKlassStub::max_size() const { ++ return 10; ++} ++ ++void C2LoadNKlassStub::emit(C2_MacroAssembler& masm) { ++ __ bind(entry()); ++ Register d = dst(); ++ __ movq(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header))); ++ __ jmp(continuation()); ++} ++ + #endif + + #undef __ +diff --git a/src/hotspot/share/c1/c1_CodeStubs.hpp b/src/hotspot/share/c1/c1_CodeStubs.hpp +index 04e379842..4c30b93e3 100644 +--- a/src/hotspot/share/c1/c1_CodeStubs.hpp ++++ b/src/hotspot/share/c1/c1_CodeStubs.hpp +@@ -581,4 +581,22 @@ class ArrayCopyStub: public CodeStub { + #endif // PRODUCT + }; + ++class LoadKlassStub: public CodeStub { ++private: ++ LIR_Opr _result; ++ ++public: ++ LoadKlassStub(LIR_Opr result) : ++ CodeStub(), _result(result) {}; ++ ++ virtual void emit_code(LIR_Assembler* e); ++ virtual void visit(LIR_OpVisitState* visitor) { ++ visitor->do_temp(_result); ++ visitor->do_output(_result); ++ } ++#ifndef PRODUCT ++ virtual void print_name(outputStream* out) const { out->print("LoadKlassStub"); } ++#endif // PRODUCT ++}; ++ + #endif // SHARE_C1_C1_CODESTUBS_HPP +diff --git a/src/hotspot/share/c1/c1_LIR.cpp b/src/hotspot/share/c1/c1_LIR.cpp +index dee208c11..a5fb9c390 100644 +--- a/src/hotspot/share/c1/c1_LIR.cpp ++++ b/src/hotspot/share/c1/c1_LIR.cpp +@@ -890,6 +890,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) { + + do_input(opLoadKlass->_obj); + do_output(opLoadKlass->_result); ++ if (opLoadKlass->_stub) do_stub(opLoadKlass->_stub); + if (opLoadKlass->_info) do_info(opLoadKlass->_info); + break; + } +@@ -1070,6 +1071,9 @@ void LIR_OpLock::emit_code(LIR_Assembler* masm) { + + void LIR_OpLoadKlass::emit_code(LIR_Assembler* masm) { + masm->emit_load_klass(this); ++ if (stub()) { ++ masm->append_code_stub(stub()); ++ } + } + + #ifdef ASSERT +@@ -2046,6 +2050,9 @@ void LIR_OpLock::print_instr(outputStream* out) const { + void LIR_OpLoadKlass::print_instr(outputStream* out) const { + obj()->print(out); out->print(" "); + result_opr()->print(out); out->print(" "); ++ if (stub()) { ++ out->print("[lbl:" INTPTR_FORMAT "]", p2i(stub()->entry())); ++ } + } + + #ifdef ASSERT +diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp +index 6f527135f..a44378c23 100644 +--- a/src/hotspot/share/c1/c1_LIR.hpp ++++ b/src/hotspot/share/c1/c1_LIR.hpp +@@ -1903,13 +1903,16 @@ class LIR_OpLoadKlass: public LIR_Op { + + private: + LIR_Opr _obj; ++ CodeStub* _stub; + public: +- LIR_OpLoadKlass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info) ++ LIR_OpLoadKlass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, CodeStub* stub) + : LIR_Op(lir_load_klass, result, info) + , _obj(obj) ++ , _stub(stub) + {} + + LIR_Opr obj() const { return _obj; } ++ CodeStub* stub() const { return _stub; } + + virtual LIR_OpLoadKlass* as_OpLoadKlass() { return this; } + virtual void emit_code(LIR_Assembler* masm); +@@ -2375,7 +2378,7 @@ class LIR_List: public CompilationResourceObj { + void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); } + void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); } + +- void load_klass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info) { append(new LIR_OpLoadKlass(obj, result, info)); } ++ void load_klass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, CodeStub* stub) { append(new LIR_OpLoadKlass(obj, result, info, stub)); } + + #ifdef ASSERT + void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); } +diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp +index 5dbb6d657..eb4598831 100644 +--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp ++++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp +@@ -1243,7 +1243,8 @@ void LIRGenerator::do_isInstance(Intrinsic* x) { + } + + void LIRGenerator::load_klass(LIR_Opr obj, LIR_Opr klass, CodeEmitInfo* null_check_info) { +- __ load_klass(obj, klass, null_check_info); ++ CodeStub* slow_path = AARCH64_ONLY(UseCompactObjectHeaders ? new LoadKlassStub(klass) :) nullptr; ++ __ load_klass(obj, klass, null_check_info, slow_path); + } + + // Example: object.getClass () +diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp +index 263efbeec..245c77339 100644 +--- a/src/hotspot/share/cds/archiveBuilder.cpp ++++ b/src/hotspot/share/cds/archiveBuilder.cpp +@@ -686,6 +686,13 @@ void ArchiveBuilder::make_klasses_shareable() { + const char* generated = ""; + Klass* k = get_buffered_addr(klasses()->at(i)); + k->remove_java_mirror(); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ Klass* requested_k = to_requested(k); ++ narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom); ++ k->set_prototype_header(markWord::prototype().set_narrow_klass(nk)); ++ } ++#endif //AARCH64 + if (k->is_objArray_klass()) { + // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info + // on their array classes. +diff --git a/src/hotspot/share/cds/archiveHeapWriter.cpp b/src/hotspot/share/cds/archiveHeapWriter.cpp +index 185fdbce0..db4bed5a9 100644 +--- a/src/hotspot/share/cds/archiveHeapWriter.cpp ++++ b/src/hotspot/share/cds/archiveHeapWriter.cpp +@@ -193,8 +193,15 @@ void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeapprototype_header()); ++ } else ++#endif ++ { ++ oopDesc::set_mark(mem, markWord::prototype()); ++ oopDesc::release_set_klass(mem, k); ++ } + } + { + // This is copied from ObjArrayAllocator::initialize +@@ -260,9 +267,16 @@ void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t + Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass + HeapWord* mem = offset_to_buffered_address(_buffer_used); + memset(mem, 0, fill_bytes); +- oopDesc::set_mark(mem, markWord::prototype()); + narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak); +- cast_to_oop(mem)->set_narrow_klass(nk); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk)); ++ } else ++#endif ++ { ++ oopDesc::set_mark(mem, markWord::prototype()); ++ cast_to_oop(mem)->set_narrow_klass(nk); ++ } + arrayOopDesc::set_length(mem, array_length); + } + +@@ -422,13 +436,27 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s + address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop
(requested_obj)); + + oop fake_oop = cast_to_oop(buffered_addr); +- fake_oop->set_narrow_klass(nk); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ fake_oop->set_mark(fake_oop->mark().set_narrow_klass(nk)); ++ } else ++#endif ++ { ++ fake_oop->set_narrow_klass(nk); ++ } + + // We need to retain the identity_hash, because it may have been used by some hashtables + // in the shared heap. + if (src_obj != nullptr && !src_obj->fast_no_hash_check()) { + int src_hash = src_obj->identity_hash(); +- fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash)); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash)); ++ } else ++#endif ++ { ++ fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash)); ++ } + assert(fake_oop->mark().is_unlocked(), "sanity"); + + DEBUG_ONLY(int archived_hash = fake_oop->identity_hash()); +diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp +index 2edc83bac..74a9d03d3 100644 +--- a/src/hotspot/share/cds/filemap.cpp ++++ b/src/hotspot/share/cds/filemap.cpp +@@ -203,6 +203,7 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment, + _core_region_alignment = core_region_alignment; + _obj_alignment = ObjectAlignmentInBytes; + _compact_strings = CompactStrings; ++ _compact_headers = AARCH64_ONLY(UseCompactObjectHeaders) NOT_AARCH64(false); + if (DumpSharedSpaces && HeapShared::can_write()) { + _narrow_oop_mode = CompressedOops::mode(); + _narrow_oop_base = CompressedOops::base(); +@@ -281,6 +282,7 @@ void FileMapHeader::print(outputStream* st) { + st->print_cr("- narrow_oop_base: " INTPTR_FORMAT, p2i(_narrow_oop_base)); + st->print_cr("- narrow_oop_shift %d", _narrow_oop_shift); + st->print_cr("- compact_strings: %d", _compact_strings); ++ st->print_cr("- compact_headers: %d", _compact_headers); + st->print_cr("- max_heap_size: " UINTX_FORMAT, _max_heap_size); + st->print_cr("- narrow_oop_mode: %d", _narrow_oop_mode); + st->print_cr("- narrow_klass_shift: %d", _narrow_klass_shift); +@@ -2419,6 +2421,14 @@ bool FileMapHeader::validate() { + return false; + } + ++ if (compact_headers() != AARCH64_ONLY(UseCompactObjectHeaders) NOT_AARCH64(false)) { ++ log_info(cds)("The shared archive file's UseCompactObjectHeaders setting (%s)" ++ " does not equal the current UseCompactObjectHeaders setting (%s).", ++ _compact_headers ? "enabled" : "disabled", ++ AARCH64_ONLY(UseCompactObjectHeaders ? "enabled" : "disabled") NOT_AARCH64("disabled")); ++ return false; ++ } ++ + if (!_use_optimized_module_handling) { + MetaspaceShared::disable_optimized_module_handling(); + log_info(cds)("optimized module handling: disabled because archive was created without optimized module handling"); +diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp +index 2c715aeb8..4d7907b72 100644 +--- a/src/hotspot/share/cds/filemap.hpp ++++ b/src/hotspot/share/cds/filemap.hpp +@@ -187,6 +187,7 @@ private: + address _narrow_oop_base; // compressed oop encoding base + int _narrow_oop_shift; // compressed oop encoding shift + bool _compact_strings; // value of CompactStrings ++ bool _compact_headers; // value of UseCompactObjectHeaders + uintx _max_heap_size; // java max heap size during dumping + CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode + int _narrow_klass_shift; // save narrow klass base and shift +@@ -256,6 +257,7 @@ public: + address narrow_oop_base() const { return _narrow_oop_base; } + int narrow_oop_shift() const { return _narrow_oop_shift; } + bool compact_strings() const { return _compact_strings; } ++ bool compact_headers() const { return _compact_headers; } + uintx max_heap_size() const { return _max_heap_size; } + CompressedOops::Mode narrow_oop_mode() const { return _narrow_oop_mode; } + int narrow_klass_shift() const { return _narrow_klass_shift; } +diff --git a/src/hotspot/share/ci/ciKlass.cpp b/src/hotspot/share/ci/ciKlass.cpp +index 6e70d69f0..307b2939c 100644 +--- a/src/hotspot/share/ci/ciKlass.cpp ++++ b/src/hotspot/share/ci/ciKlass.cpp +@@ -249,3 +249,25 @@ const char* ciKlass::external_name() const { + return get_Klass()->external_name(); + ) + } ++ ++// ------------------------------------------------------------------ ++// ciKlass::prototype_header_offset ++juint ciKlass::prototype_header_offset() { ++ assert(is_loaded(), "must be loaded"); ++ ++ VM_ENTRY_MARK; ++ Klass* this_klass = get_Klass(); ++ return in_bytes(this_klass->prototype_header_offset()); ++} ++ ++// ------------------------------------------------------------------ ++// ciKlass::prototype_header ++#ifdef AARCH64 ++uintptr_t ciKlass::prototype_header() { ++ assert(is_loaded(), "must be loaded"); ++ ++ VM_ENTRY_MARK; ++ Klass* this_klass = get_Klass(); ++ return (uintptr_t)this_klass->prototype_header().to_pointer(); ++} ++#endif +diff --git a/src/hotspot/share/ci/ciKlass.hpp b/src/hotspot/share/ci/ciKlass.hpp +index 2dd5a5e2c..be8187672 100644 +--- a/src/hotspot/share/ci/ciKlass.hpp ++++ b/src/hotspot/share/ci/ciKlass.hpp +@@ -129,6 +129,11 @@ public: + void print_name_on(outputStream* st); + + const char* external_name() const; ++ ++ juint prototype_header_offset(); ++#ifdef AARCH64 ++ uintptr_t prototype_header(); ++#endif + }; + + #endif // SHARE_CI_CIKLASS_HPP +diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +index d3ab782f5..bb9f5f0b4 100644 +--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp ++++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +@@ -88,6 +88,7 @@ + #include "gc/shared/oopStorageParState.hpp" + #include "gc/shared/preservedMarks.inline.hpp" + #include "gc/shared/referenceProcessor.inline.hpp" ++#include "gc/shared/slidingForwarding.hpp" + #include "gc/shared/suspendibleThreadSet.hpp" + #include "gc/shared/taskqueue.inline.hpp" + #include "gc/shared/taskTerminator.hpp" +@@ -1530,6 +1531,8 @@ jint G1CollectedHeap::initialize() { + + G1InitLogger::print(); + ++ SlidingForwarding::initialize(heap_rs.region(), HeapRegion::GrainWords); ++ + return JNI_OK; + } + +diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp +index df1afe0d3..99e67f1c7 100644 +--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp ++++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp +@@ -40,6 +40,7 @@ + #include "gc/shared/preservedMarks.inline.hpp" + #include "gc/shared/classUnloadingContext.hpp" + #include "gc/shared/referenceProcessor.hpp" ++#include "gc/shared/slidingForwarding.hpp" + #include "gc/shared/verifyOption.hpp" + #include "gc/shared/weakProcessor.inline.hpp" + #include "gc/shared/workerPolicy.hpp" +@@ -209,6 +210,8 @@ void G1FullCollector::collect() { + // Don't add any more derived pointers during later phases + deactivate_derived_pointers(); + ++ SlidingForwarding::begin(); ++ + phase2_prepare_compaction(); + + if (has_compaction_targets()) { +@@ -221,6 +224,8 @@ void G1FullCollector::collect() { + log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap"); + } + ++ SlidingForwarding::end(); ++ + phase5_reset_metadata(); + + G1CollectedHeap::finish_codecache_marking_cycle(); +@@ -389,7 +394,8 @@ uint G1FullCollector::truncate_parallel_cps() { + return lowest_current; + } + +-void G1FullCollector::phase2c_prepare_serial_compaction() { ++template ++void G1FullCollector::phase2c_prepare_serial_compaction_impl() { + GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer()); + // At this point, we know that after parallel compaction there will be regions that + // are partially compacted into. Thus, the last compaction region of all +@@ -414,7 +420,7 @@ void G1FullCollector::phase2c_prepare_serial_compaction() { + serial_cp->initialize(start_hr); + + HeapWord* dense_prefix_top = compaction_top(start_hr); +- G1SerialRePrepareClosure re_prepare(serial_cp, dense_prefix_top); ++ G1SerialRePrepareClosure re_prepare(serial_cp, dense_prefix_top); + + for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) { + if (is_compaction_target(i)) { +@@ -427,7 +433,16 @@ void G1FullCollector::phase2c_prepare_serial_compaction() { + serial_cp->update(); + } + +-void G1FullCollector::phase2d_prepare_humongous_compaction() { ++void G1FullCollector::phase2c_prepare_serial_compaction() { ++ if (UseAltGCForwarding) { ++ phase2c_prepare_serial_compaction_impl(); ++ } else { ++ phase2c_prepare_serial_compaction_impl(); ++ } ++} ++ ++template ++void G1FullCollector::phase2d_prepare_humongous_compaction_impl() { + GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer()); + G1FullGCCompactionPoint* serial_cp = serial_compaction_point(); + assert(serial_cp->has_regions(), "Sanity!" ); +@@ -445,7 +460,7 @@ void G1FullCollector::phase2d_prepare_humongous_compaction() { + region_index++; + continue; + } else if (hr->is_starts_humongous()) { +- uint num_regions = humongous_cp->forward_humongous(hr); ++ uint num_regions = humongous_cp->forward_humongous(hr); + region_index += num_regions; // Skip over the continues humongous regions. + continue; + } else if (is_compaction_target(region_index)) { +@@ -456,6 +471,14 @@ void G1FullCollector::phase2d_prepare_humongous_compaction() { + } + } + ++void G1FullCollector::phase2d_prepare_humongous_compaction() { ++ if (UseAltGCForwarding) { ++ phase2d_prepare_humongous_compaction_impl(); ++ } else { ++ phase2d_prepare_humongous_compaction_impl(); ++ } ++} ++ + void G1FullCollector::phase3_adjust_pointers() { + // Adjust the pointers to reflect the new locations + GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer()); +diff --git a/src/hotspot/share/gc/g1/g1FullCollector.hpp b/src/hotspot/share/gc/g1/g1FullCollector.hpp +index b1ceb599f..ef7bcad44 100644 +--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp ++++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp +@@ -158,7 +158,11 @@ private: + + void phase2a_determine_worklists(); + bool phase2b_forward_oops(); ++ template ++ void phase2c_prepare_serial_compaction_impl(); + void phase2c_prepare_serial_compaction(); ++ template ++ void phase2d_prepare_humongous_compaction_impl(); + void phase2d_prepare_humongous_compaction(); + + void phase3_adjust_pointers(); +diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp +index a45c3eb17..43d02556f 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp ++++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp +@@ -40,10 +40,11 @@ + #include "memory/iterator.inline.hpp" + #include "runtime/atomic.hpp" + ++template + class G1AdjustLiveClosure : public StackObj { +- G1AdjustClosure* _adjust_closure; ++ G1AdjustClosure* _adjust_closure; + public: +- G1AdjustLiveClosure(G1AdjustClosure* cl) : ++ G1AdjustLiveClosure(G1AdjustClosure* cl) : + _adjust_closure(cl) { } + + size_t apply(oop object) { +@@ -62,7 +63,17 @@ class G1AdjustRegionClosure : public HeapRegionClosure { + _worker_id(worker_id) { } + + bool do_heap_region(HeapRegion* r) { +- G1AdjustClosure cl(_collector); ++ if (UseAltGCForwarding) { ++ return do_heap_region_impl(r); ++ } else { ++ return do_heap_region_impl(r); ++ } ++ } ++ ++private: ++ template ++ bool do_heap_region_impl(HeapRegion* r) { ++ G1AdjustClosure cl(_collector); + if (r->is_humongous()) { + // Special handling for humongous regions to get somewhat better + // work distribution. +@@ -70,7 +81,7 @@ class G1AdjustRegionClosure : public HeapRegionClosure { + obj->oop_iterate(&cl, MemRegion(r->bottom(), r->top())); + } else if (!r->is_free()) { + // Free regions do not contain objects to iterate. So skip them. +- G1AdjustLiveClosure adjust(&cl); ++ G1AdjustLiveClosure adjust(&cl); + r->apply_to_marked_objects(_bitmap, &adjust); + } + return false; +@@ -81,12 +92,12 @@ G1FullGCAdjustTask::G1FullGCAdjustTask(G1FullCollector* collector) : + G1FullGCTask("G1 Adjust", collector), + _root_processor(G1CollectedHeap::heap(), collector->workers()), + _weak_proc_task(collector->workers()), +- _hrclaimer(collector->workers()), +- _adjust(collector) { ++ _hrclaimer(collector->workers()) { + ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust); + } + +-void G1FullGCAdjustTask::work(uint worker_id) { ++template ++void G1FullGCAdjustTask::work_impl(uint worker_id) { + Ticks start = Ticks::now(); + ResourceMark rm; + +@@ -94,18 +105,27 @@ void G1FullGCAdjustTask::work(uint worker_id) { + G1FullGCMarker* marker = collector()->marker(worker_id); + marker->preserved_stack()->adjust_during_full_gc(); + ++ G1AdjustClosure adjust(collector()); + { + // Adjust the weak roots. + AlwaysTrueClosure always_alive; +- _weak_proc_task.work(worker_id, &always_alive, &_adjust); ++ _weak_proc_task.work(worker_id, &always_alive, &adjust); + } + +- CLDToOopClosure adjust_cld(&_adjust, ClassLoaderData::_claim_stw_fullgc_adjust); +- CodeBlobToOopClosure adjust_code(&_adjust, CodeBlobToOopClosure::FixRelocations); +- _root_processor.process_all_roots(&_adjust, &adjust_cld, &adjust_code); ++ CLDToOopClosure adjust_cld(&adjust, ClassLoaderData::_claim_stw_fullgc_adjust); ++ CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations); ++ _root_processor.process_all_roots(&adjust, &adjust_cld, &adjust_code); + + // Now adjust pointers region by region + G1AdjustRegionClosure blk(collector(), worker_id); + G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id); + log_task("Adjust task", worker_id, start); + } ++ ++void G1FullGCAdjustTask::work(uint worker_id) { ++ if (UseAltGCForwarding) { ++ work_impl(worker_id); ++ } else { ++ work_impl(worker_id); ++ } ++} +diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp +index c9b190acd..04aac0e02 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp ++++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp +@@ -38,8 +38,9 @@ class G1FullGCAdjustTask : public G1FullGCTask { + G1RootProcessor _root_processor; + WeakProcessor::Task _weak_proc_task; + HeapRegionClaimer _hrclaimer; +- G1AdjustClosure _adjust; + ++ template ++ void work_impl(uint worker_id); + public: + G1FullGCAdjustTask(G1FullCollector* collector); + void work(uint worker_id); +diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp +index 8c4fa4eb2..2c9ef23f1 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp ++++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp +@@ -30,19 +30,22 @@ + #include "gc/g1/g1FullGCCompactTask.hpp" + #include "gc/g1/heapRegion.inline.hpp" + #include "gc/shared/gcTraceTime.inline.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" + #include "logging/log.hpp" + #include "oops/oop.inline.hpp" + #include "utilities/ticks.hpp" + +-void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_bitmap(oop obj) { ++template ++void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_bitmap(oop obj) { + assert(_bitmap->is_marked(obj), "Should only compact marked objects"); + _bitmap->clear(obj); + } + +-size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { ++template ++size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { + size_t size = obj->size(); +- if (obj->is_forwarded()) { +- G1FullGCCompactTask::copy_object_to_new_location(obj); ++ if (SlidingForwarding::is_forwarded(obj)) { ++ G1FullGCCompactTask::copy_object_to_new_location(obj); + } + + // Clear the mark for the compacted object to allow reuse of the +@@ -51,14 +54,15 @@ size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { + return size; + } + ++template + void G1FullGCCompactTask::copy_object_to_new_location(oop obj) { +- assert(obj->is_forwarded(), "Sanity!"); +- assert(obj->forwardee() != obj, "Object must have a new location"); ++ assert(SlidingForwarding::is_forwarded(obj), "Sanity!"); ++ assert(SlidingForwarding::forwardee(obj) != obj, "Object must have a new location"); + + size_t size = obj->size(); + // Copy object and reinit its mark. + HeapWord* obj_addr = cast_from_oop(obj); +- HeapWord* destination = cast_from_oop(obj->forwardee()); ++ HeapWord* destination = cast_from_oop(SlidingForwarding::forwardee(obj)); + Copy::aligned_conjoint_words(obj_addr, destination, size); + + // There is no need to transform stack chunks - marking already did that. +@@ -77,8 +81,13 @@ void G1FullGCCompactTask::compact_region(HeapRegion* hr) { + // showed that it was better overall to clear bit by bit, compared + // to clearing the whole region at the end. This difference was + // clearly seen for regions with few marks. +- G1CompactRegionClosure compact(collector()->mark_bitmap()); +- hr->apply_to_marked_objects(collector()->mark_bitmap(), &compact); ++ if (UseAltGCForwarding) { ++ G1CompactRegionClosure compact(collector()->mark_bitmap()); ++ hr->apply_to_marked_objects(collector()->mark_bitmap(), &compact); ++ } else { ++ G1CompactRegionClosure compact(collector()->mark_bitmap()); ++ hr->apply_to_marked_objects(collector()->mark_bitmap(), &compact); ++ } + } + + hr->reset_compacted_after_full_gc(_collector->compaction_top(hr)); +@@ -104,15 +113,24 @@ void G1FullGCCompactTask::serial_compaction() { + } + } + +-void G1FullGCCompactTask::humongous_compaction() { +- GCTraceTime(Debug, gc, phases) tm("Phase 4: Humonguous Compaction", collector()->scope()->timer()); +- ++template ++void G1FullGCCompactTask::humongous_compaction_impl() { + for (HeapRegion* hr : collector()->humongous_compaction_regions()) { + assert(collector()->is_compaction_target(hr->hrm_index()), "Sanity"); +- compact_humongous_obj(hr); ++ compact_humongous_obj(hr); ++ } ++} ++ ++void G1FullGCCompactTask::humongous_compaction() { ++ GCTraceTime(Debug, gc, phases) tm("Phase 4: Humonguous Compaction", collector()->scope()->timer()); ++ if (UseAltGCForwarding) { ++ humongous_compaction_impl(); ++ } else { ++ humongous_compaction_impl(); + } + } + ++template + void G1FullGCCompactTask::compact_humongous_obj(HeapRegion* src_hr) { + assert(src_hr->is_starts_humongous(), "Should be start region of the humongous object"); + +@@ -120,12 +138,12 @@ void G1FullGCCompactTask::compact_humongous_obj(HeapRegion* src_hr) { + size_t word_size = obj->size(); + + uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(word_size); +- HeapWord* destination = cast_from_oop(obj->forwardee()); ++ HeapWord* destination = cast_from_oop(SlidingForwarding::forwardee(obj)); + + assert(collector()->mark_bitmap()->is_marked(obj), "Should only compact marked objects"); + collector()->mark_bitmap()->clear(obj); + +- copy_object_to_new_location(obj); ++ copy_object_to_new_location(obj); + + uint dest_start_idx = _g1h->addr_to_region(destination); + // Update the metadata for the destination regions. +diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.hpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.hpp +index a7e2ea38c..e2ebe1f4f 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.hpp ++++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.hpp +@@ -41,11 +41,16 @@ class G1FullGCCompactTask : public G1FullGCTask { + G1CollectedHeap* _g1h; + + void compact_region(HeapRegion* hr); ++ template + void compact_humongous_obj(HeapRegion* hr); + void free_non_overlapping_regions(uint src_start_idx, uint dest_start_idx, uint num_regions); + ++ template + static void copy_object_to_new_location(oop obj); + ++ template ++ void humongous_compaction_impl(); ++ + public: + G1FullGCCompactTask(G1FullCollector* collector) : + G1FullGCTask("G1 Compact Task", collector), +@@ -57,6 +62,7 @@ public: + void serial_compaction(); + void humongous_compaction(); + ++ template + class G1CompactRegionClosure : public StackObj { + G1CMBitMap* _bitmap; + void clear_in_bitmap(oop object); +diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp +index dabff2bc9..4788b9fd3 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp ++++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp +@@ -27,6 +27,7 @@ + #include "gc/g1/g1FullGCCompactionPoint.hpp" + #include "gc/g1/heapRegion.hpp" + #include "gc/shared/preservedMarks.inline.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" + #include "oops/oop.inline.hpp" + #include "utilities/debug.hpp" + +@@ -92,6 +93,7 @@ void G1FullGCCompactionPoint::switch_region() { + initialize_values(); + } + ++template + void G1FullGCCompactionPoint::forward(oop object, size_t size) { + assert(_current_region != nullptr, "Must have been initialized"); + +@@ -102,10 +104,10 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) { + + // Store a forwarding pointer if the object should be moved. + if (cast_from_oop(object) != _compaction_top) { +- object->forward_to(cast_to_oop(_compaction_top)); +- assert(object->is_forwarded(), "must be forwarded"); ++ SlidingForwarding::forward_to(object, cast_to_oop(_compaction_top)); ++ assert(SlidingForwarding::is_forwarded(object), "must be forwarded"); + } else { +- assert(!object->is_forwarded(), "must not be forwarded"); ++ assert(SlidingForwarding::is_not_forwarded(object), "must not be forwarded"); + } + + // Update compaction values. +@@ -113,6 +115,9 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) { + _current_region->update_bot_for_block(_compaction_top - size, _compaction_top); + } + ++template void G1FullGCCompactionPoint::forward(oop object, size_t size); ++template void G1FullGCCompactionPoint::forward(oop object, size_t size); ++ + void G1FullGCCompactionPoint::add(HeapRegion* hr) { + _compaction_regions->append(hr); + } +@@ -145,6 +150,7 @@ void G1FullGCCompactionPoint::add_humongous(HeapRegion* hr) { + }); + } + ++template + uint G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr) { + assert(hr->is_starts_humongous(), "Sanity!"); + +@@ -168,8 +174,8 @@ uint G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr) { + _collector->marker(0)->preserved_stack()->push_if_necessary(obj, obj->mark()); + + HeapRegion* dest_hr = _compaction_regions->at(range_begin); +- obj->forward_to(cast_to_oop(dest_hr->bottom())); +- assert(obj->is_forwarded(), "Object must be forwarded!"); ++ SlidingForwarding::forward_to(obj, cast_to_oop(dest_hr->bottom())); ++ assert(SlidingForwarding::is_forwarded(obj), "Object must be forwarded!"); + + // Add the humongous object regions to the compaction point. + add_humongous(hr); +@@ -180,6 +186,9 @@ uint G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr) { + return num_regions; + } + ++template uint G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr); ++template uint G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr); ++ + uint G1FullGCCompactionPoint::find_contiguous_before(HeapRegion* hr, uint num_regions) { + assert(num_regions > 0, "Sanity!"); + assert(has_regions(), "Sanity!"); +diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp +index ea8351f12..b77bf0365 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp ++++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp +@@ -54,7 +54,9 @@ public: + bool is_initialized(); + void initialize(HeapRegion* hr); + void update(); ++ template + void forward(oop object, size_t size); ++ template + uint forward_humongous(HeapRegion* hr); + void add(HeapRegion* hr); + void add_humongous(HeapRegion* hr); +diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp +index 388f8032d..ce82b834a 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp ++++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp +@@ -73,6 +73,7 @@ public: + virtual void do_oop(narrowOop* p); + }; + ++template + class G1AdjustClosure : public BasicOopIterateClosure { + G1FullCollector* _collector; + +diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp +index aa194d162..3fe1615ce 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp ++++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -32,6 +32,7 @@ + #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" + #include "gc/g1/g1FullGCMarker.inline.hpp" + #include "gc/g1/heapRegionRemSet.inline.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" + #include "memory/iterator.inline.hpp" + #include "memory/universe.hpp" + #include "oops/access.inline.hpp" +@@ -51,7 +52,8 @@ inline void G1MarkAndPushClosure::do_oop(narrowOop* p) { + do_oop_work(p); + } + +-template inline void G1AdjustClosure::adjust_pointer(T* p) { ++template ++template inline void G1AdjustClosure::adjust_pointer(T* p) { + T heap_oop = RawAccess<>::oop_load(p); + if (CompressedOops::is_null(heap_oop)) { + return; +@@ -65,8 +67,8 @@ template inline void G1AdjustClosure::adjust_pointer(T* p) { + return; + } + +- if (obj->is_forwarded()) { +- oop forwardee = obj->forwardee(); ++ if (SlidingForwarding::is_forwarded(obj)) { ++ oop forwardee = SlidingForwarding::forwardee(obj); + // Forwarded, just update. + assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space"); + RawAccess::oop_store(p, forwardee); +@@ -74,8 +76,10 @@ template inline void G1AdjustClosure::adjust_pointer(T* p) { + + } + +-inline void G1AdjustClosure::do_oop(oop* p) { do_oop_work(p); } +-inline void G1AdjustClosure::do_oop(narrowOop* p) { do_oop_work(p); } ++template ++inline void G1AdjustClosure::do_oop(oop* p) { do_oop_work(p); } ++template ++inline void G1AdjustClosure::do_oop(narrowOop* p) { do_oop_work(p); } + + inline bool G1IsAliveClosure::do_object_b(oop p) { + return _bitmap->is_marked(p); +diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp +index 7cef60293..faa0270a6 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp ++++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp +@@ -104,18 +104,25 @@ G1FullGCPrepareTask::G1CalculatePointersClosure::G1CalculatePointersClosure(G1Fu + _cp(cp) { } + + +-G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) : ++template ++G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) : + _cp(cp) { } + +-size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) { ++template ++size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) { + size_t size = object->size(); +- _cp->forward(object, size); ++ _cp->forward(object, size); + return size; + } + + void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) { + if (!_collector->is_free(hr->hrm_index())) { +- G1PrepareCompactLiveClosure prepare_compact(_cp); +- hr->apply_to_marked_objects(_bitmap, &prepare_compact); ++ if (UseAltGCForwarding) { ++ G1PrepareCompactLiveClosure prepare_compact(_cp); ++ hr->apply_to_marked_objects(_bitmap, &prepare_compact); ++ } else { ++ G1PrepareCompactLiveClosure prepare_compact(_cp); ++ hr->apply_to_marked_objects(_bitmap, &prepare_compact); ++ } + } + } +diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp +index 7f09f0553..2895b45d6 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp ++++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp +@@ -89,6 +89,7 @@ private: + bool do_heap_region(HeapRegion* hr); + }; + ++ template + class G1PrepareCompactLiveClosure : public StackObj { + G1FullGCCompactionPoint* _cp; + +@@ -100,6 +101,7 @@ private: + + // Closure to re-prepare objects in the serial compaction point queue regions for + // serial compaction. ++template + class G1SerialRePrepareClosure : public StackObj { + G1FullGCCompactionPoint* _cp; + HeapWord* _dense_prefix_top; +diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp +index 76a647a3d..d4939e39f 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp ++++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp +@@ -32,6 +32,7 @@ + #include "gc/g1/g1FullGCCompactionPoint.hpp" + #include "gc/g1/g1FullGCScope.hpp" + #include "gc/g1/heapRegion.inline.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" + + void G1DetermineCompactionQueueClosure::free_empty_humongous_region(HeapRegion* hr) { + _g1h->free_humongous_region(hr, nullptr); +@@ -101,18 +102,19 @@ inline bool G1DetermineCompactionQueueClosure::do_heap_region(HeapRegion* hr) { + return false; + } + +-inline size_t G1SerialRePrepareClosure::apply(oop obj) { +- if (obj->is_forwarded()) { ++template ++inline size_t G1SerialRePrepareClosure::apply(oop obj) { ++ if (SlidingForwarding::is_forwarded(obj)) { + // We skip objects compiled into the first region or + // into regions not part of the serial compaction point. +- if (cast_from_oop(obj->forwardee()) < _dense_prefix_top) { ++ if (cast_from_oop(SlidingForwarding::forwardee(obj)) < _dense_prefix_top) { + return obj->size(); + } + } + + // Get size and forward. + size_t size = obj->size(); +- _cp->forward(obj, size); ++ _cp->forward(obj, size); + + return size; + } +diff --git a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp +index ea7f690d3..b77056d25 100644 +--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp ++++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp +@@ -228,7 +228,7 @@ void G1ParCopyClosure::do_oop_work(T* p) { + oop forwardee; + markWord m = obj->mark(); + if (m.is_marked()) { +- forwardee = cast_to_oop(m.decode_pointer()); ++ forwardee = obj->forwardee(m); + } else { + forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); + } +diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +index 4985c2135..a1d9daca3 100644 +--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp ++++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +@@ -207,7 +207,7 @@ void G1ParScanThreadState::do_oop_evac(T* p) { + + markWord m = obj->mark(); + if (m.is_marked()) { +- obj = cast_to_oop(m.decode_pointer()); ++ obj = obj->forwardee(m); + } else { + obj = do_copy_to_survivor_space(region_attr, obj, m); + } +@@ -221,7 +221,7 @@ void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) { + oop from_obj = task.to_source_array(); + + assert(_g1h->is_in_reserved(from_obj), "must be in heap."); +- assert(from_obj->is_objArray(), "must be obj array"); ++ assert(from_obj->forward_safe_klass()->is_objArray_klass(), "must be obj array"); + assert(from_obj->is_forwarded(), "must be forwarded"); + + oop to_obj = from_obj->forwardee(); +@@ -251,7 +251,7 @@ MAYBE_INLINE_EVACUATION + void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr, + oop from_obj, + oop to_obj) { +- assert(from_obj->is_objArray(), "precondition"); ++ assert(from_obj->forward_safe_klass()->is_objArray_klass(), "precondition"); + assert(from_obj->is_forwarded(), "precondition"); + assert(from_obj->forwardee() == to_obj, "precondition"); + assert(from_obj != to_obj, "should not be scanning self-forwarded objects"); +@@ -378,22 +378,22 @@ G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const r + } + + void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, +- oop const old, size_t word_sz, uint age, ++ Klass* klass, size_t word_sz, uint age, + HeapWord * const obj_ptr, uint node_index) const { + PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index); + if (alloc_buf->contains(obj_ptr)) { +- _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, ++ _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age, + dest_attr.type() == G1HeapRegionAttr::Old, + alloc_buf->word_sz() * HeapWordSize); + } else { +- _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age, ++ _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age, + dest_attr.type() == G1HeapRegionAttr::Old); + } + } + + NOINLINE + HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, +- oop old, ++ Klass* klass, + size_t word_sz, + uint age, + uint node_index) { +@@ -416,7 +416,7 @@ HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, + update_numa_stats(node_index); + if (_g1h->gc_tracer_stw()->should_report_promotion_events()) { + // The events are checked individually as part of the actual commit +- report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index); ++ report_promotion_event(*dest_attr, klass, word_sz, age, obj_ptr, node_index); + } + } + return obj_ptr; +@@ -453,7 +453,13 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio + + // Get the klass once. We'll need it again later, and this avoids + // re-decoding when it's compressed. +- Klass* klass = old->klass(); ++ // NOTE: With compact headers, it is not safe to load the Klass* from o, because ++ // that would access the mark-word, and the mark-word might change at any time by ++ // concurrent promotion. The promoted mark-word would point to the forwardee, which ++ // may not yet have completed copying. Therefore we must load the Klass* from ++ // the mark-word that we have already loaded. This is safe, because we have checked ++ // that this is not yet forwarded in the caller. ++ Klass* klass = old->forward_safe_klass(old_mark); + const size_t word_sz = old->size_given_klass(klass); + + uint age = 0; +@@ -466,7 +472,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio + // PLAB allocations should succeed most of the time, so we'll + // normally check against null once and that's it. + if (obj_ptr == nullptr) { +- obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index); ++ obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index); + if (obj_ptr == nullptr) { + // This will either forward-to-self, or detect that someone else has + // installed a forwarding pointer. +@@ -620,7 +626,7 @@ NOINLINE + oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz) { + assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); + +- oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed); ++ oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed); + if (forward_ptr == nullptr) { + // Forward-to-self succeeded. We are the "owner" of the object. + HeapRegion* r = _g1h->heap_region_containing(old); +diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +index fd9430c02..931bc64f4 100644 +--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp ++++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +@@ -161,7 +161,7 @@ private: + void start_partial_objarray(G1HeapRegionAttr dest_dir, oop from, oop to); + + HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr, +- oop old, ++ Klass* klass, + size_t word_sz, + uint age, + uint node_index); +@@ -196,7 +196,7 @@ private: + inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age); + + void report_promotion_event(G1HeapRegionAttr const dest_attr, +- oop const old, size_t word_sz, uint age, ++ Klass* klass, size_t word_sz, uint age, + HeapWord * const obj_ptr, uint node_index) const; + + void trim_queue_to_threshold(uint threshold); +diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp +index cdbd0ac48..442bd0e08 100644 +--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp ++++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp +@@ -240,15 +240,16 @@ void MutableSpace::object_iterate(ObjectClosure* cl) { + // When promotion-failure occurs during Young GC, eden/from space is not cleared, + // so we can encounter objects with "forwarded" markword. + // They are essentially dead, so skipping them +- if (!obj->is_forwarded()) { +- cl->do_object(obj); +- } +-#ifdef ASSERT +- else { ++ if (obj->is_forwarded()) { + assert(obj->forwardee() != obj, "must not be self-forwarded"); ++ // It is safe to use the forwardee here. Parallel GC only uses ++ // header-based forwarding during promotion. Full GC doesn't ++ // use the object header for forwarding at all. ++ p += obj->forwardee()->size(); ++ } else { ++ cl->do_object(obj); ++ p += obj->size(); + } +-#endif +- p += cast_to_oop(p)->size(); + } + } + +diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp +index 18c4285b9..51bfbc110 100644 +--- a/src/hotspot/share/gc/parallel/psOldGen.cpp ++++ b/src/hotspot/share/gc/parallel/psOldGen.cpp +@@ -399,7 +399,9 @@ class VerifyObjectStartArrayClosure : public ObjectClosure { + _start_array(start_array) { } + + virtual void do_object(oop obj) { +- HeapWord* test_addr = cast_from_oop(obj) + 1; ++ // With compact headers, the objects can be one-word sized. ++ size_t int_off = AARCH64_ONLY(UseCompactObjectHeaders ? MIN2((size_t)1, obj->size() - 1) :) 1; ++ HeapWord* test_addr = cast_from_oop(obj) + int_off; + guarantee(_start_array->object_start(test_addr) == cast_from_oop(obj), "ObjectStartArray cannot find start of object"); + guarantee(_start_array->is_block_allocated(cast_from_oop(obj)), "ObjectStartArray missing block allocation"); + } +diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.cpp b/src/hotspot/share/gc/parallel/psPromotionManager.cpp +index 20ee8ec1f..a8f937c89 100644 +--- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp ++++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp +@@ -295,7 +295,7 @@ void PSPromotionManager::process_array_chunk(PartialArrayScanTask task) { + assert(PSChunkLargeArrays, "invariant"); + + oop old = task.to_source_array(); +- assert(old->is_objArray(), "invariant"); ++ assert(old->forward_safe_klass()->is_objArray_klass(), "invariant"); + assert(old->is_forwarded(), "invariant"); + + TASKQUEUE_STATS_ONLY(++_array_chunks_processed); +@@ -333,7 +333,7 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markWord obj_mark) { + // this started. If it is the same (i.e., no forwarding + // pointer has been installed), then this thread owns + // it. +- if (obj->forward_to_atomic(obj, obj_mark) == nullptr) { ++ if (obj->forward_to_self_atomic(obj_mark) == nullptr) { + // We won any races, we "own" this object. + assert(obj == obj->forwardee(), "Sanity"); + +diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.hpp +index d053ffb6c..c4056145b 100644 +--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp ++++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp +@@ -105,7 +105,7 @@ class PSPromotionManager { + + void push_depth(ScannerTask task); + +- inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size, ++ inline void promotion_trace_event(oop new_obj, Klass* klass, size_t obj_size, + uint age, bool tenured, + const PSPromotionLAB* lab); + +diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +index c1cbeb0f5..231aec305 100644 +--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp ++++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +@@ -63,7 +63,7 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) { + push_depth(ScannerTask(p)); + } + +-inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, ++inline void PSPromotionManager::promotion_trace_event(oop new_obj, Klass* klass, + size_t obj_size, + uint age, bool tenured, + const PSPromotionLAB* lab) { +@@ -76,14 +76,14 @@ inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, + if (gc_tracer->should_report_promotion_in_new_plab_event()) { + size_t obj_bytes = obj_size * HeapWordSize; + size_t lab_size = lab->capacity(); +- gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes, ++ gc_tracer->report_promotion_in_new_plab_event(klass, obj_bytes, + age, tenured, lab_size); + } + } else { + // Promotion of object directly to heap + if (gc_tracer->should_report_promotion_outside_plab_event()) { + size_t obj_bytes = obj_size * HeapWordSize; +- gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, ++ gc_tracer->report_promotion_outside_plab_event(klass, obj_bytes, + age, tenured); + } + } +@@ -152,7 +152,7 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) { + // other thread. + OrderAccess::acquire(); + // Return the already installed forwardee. +- return cast_to_oop(m.decode_pointer()); ++ return o->forwardee(m); + } + } + +@@ -168,7 +168,14 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, + + oop new_obj = nullptr; + bool new_obj_is_tenured = false; +- size_t new_obj_size = o->size(); ++ // NOTE: With compact headers, it is not safe to load the Klass* from o, because ++ // that would access the mark-word, and the mark-word might change at any time by ++ // concurrent promotion. The promoted mark-word would point to the forwardee, which ++ // may not yet have completed copying. Therefore we must load the Klass* from ++ // the mark-word that we have already loaded. This is safe, because we have checked ++ // that this is not yet forwarded in the caller. ++ Klass* klass = o->forward_safe_klass(test_mark); ++ size_t new_obj_size = o->size_given_klass(klass); + + // Find the objects age, MT safe. + uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ? +@@ -183,7 +190,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, + if (new_obj_size > (YoungPLABSize / 2)) { + // Allocate this object directly + new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size)); +- promotion_trace_event(new_obj, o, new_obj_size, age, false, nullptr); ++ promotion_trace_event(new_obj, klass, new_obj_size, age, false, nullptr); + } else { + // Flush and fill + _young_lab.flush(); +@@ -193,7 +200,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, + _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); + // Try the young lab allocation again. + new_obj = cast_to_oop(_young_lab.allocate(new_obj_size)); +- promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); ++ promotion_trace_event(new_obj, klass, new_obj_size, age, false, &_young_lab); + } else { + _young_gen_is_full = true; + } +@@ -219,7 +226,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, + if (new_obj_size > (OldPLABSize / 2)) { + // Allocate this object directly + new_obj = cast_to_oop(old_gen()->allocate(new_obj_size)); +- promotion_trace_event(new_obj, o, new_obj_size, age, true, nullptr); ++ promotion_trace_event(new_obj, klass, new_obj_size, age, true, nullptr); + } else { + // Flush and fill + _old_lab.flush(); +@@ -229,7 +236,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, + _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); + // Try the old lab allocation again. + new_obj = cast_to_oop(_old_lab.allocate(new_obj_size)); +- promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); ++ promotion_trace_event(new_obj, klass, new_obj_size, age, true, &_old_lab); + } + } + } +@@ -254,7 +261,24 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, + + // Parallel GC claims with a release - so other threads might access this object + // after claiming and they should see the "completed" object. +- ContinuationGCSupport::transform_stack_chunk(new_obj); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ // The copy above is not atomic. Make sure we have seen the proper mark ++ // and re-install it into the copy, so that Klass* is guaranteed to be correct. ++ markWord mark = o->mark(); ++ if (!mark.is_marked()) { ++ new_obj->set_mark(mark); ++ ContinuationGCSupport::transform_stack_chunk(new_obj); ++ } else { ++ // If we copied a mark-word that indicates 'forwarded' state, the object ++ // installation would not succeed. We cannot access Klass* anymore either. ++ // Skip the transformation. ++ } ++ } else ++#endif ++ { ++ ContinuationGCSupport::transform_stack_chunk(new_obj); ++ } + + // Now we have to CAS in the header. + // Make copy visible to threads reading the forwardee. +diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp +index bd256c9c5..3c181e018 100644 +--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp ++++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp +@@ -882,7 +882,7 @@ void DefNewGeneration::remove_forwarding_pointers() { + struct ResetForwardedMarkWord : ObjectClosure { + void do_object(oop obj) override { + if (obj->is_forwarded()) { +- obj->init_mark(); ++ obj->forward_safe_init_mark(); + } + } + } cl; +@@ -905,8 +905,7 @@ void DefNewGeneration::handle_promotion_failure(oop old) { + + ContinuationGCSupport::transform_stack_chunk(old); + +- // forward to self +- old->forward_to(old); ++ old->forward_to_self(); + + _promo_failure_scan_stack.push(old); + +diff --git a/src/hotspot/share/gc/serial/genMarkSweep.cpp b/src/hotspot/share/gc/serial/genMarkSweep.cpp +index 7d06fe588..523ce0561 100644 +--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp ++++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp +@@ -46,6 +46,7 @@ + #include "gc/shared/modRefBarrierSet.hpp" + #include "gc/shared/referencePolicy.hpp" + #include "gc/shared/referenceProcessorPhaseTimes.hpp" ++#include "gc/shared/slidingForwarding.hpp" + #include "gc/shared/space.hpp" + #include "gc/shared/strongRootsScope.hpp" + #include "gc/shared/weakProcessor.hpp" +@@ -88,6 +89,8 @@ void GenMarkSweep::invoke_at_safepoint(bool clear_all_softrefs) { + + mark_sweep_phase1(clear_all_softrefs); + ++ SlidingForwarding::begin(); ++ + mark_sweep_phase2(); + + // Don't add any more derived pointers during phase3 +@@ -106,6 +109,8 @@ void GenMarkSweep::invoke_at_safepoint(bool clear_all_softrefs) { + // (Should this be in general part?) + gch->save_marks(); + ++ SlidingForwarding::end(); ++ + deallocate_stacks(); + + MarkSweep::_string_dedup_requests->flush(); +@@ -260,15 +265,27 @@ void GenMarkSweep::mark_sweep_phase3() { + + ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust); + +- CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); +- gch->process_roots(GenCollectedHeap::SO_AllCodeCache, +- &adjust_pointer_closure, +- &adjust_cld_closure, +- &adjust_cld_closure, +- &code_closure); +- +- gch->gen_process_weak_roots(&adjust_pointer_closure); +- ++ if (UseAltGCForwarding) { ++ AdjustPointerClosure adjust_pointer_closure; ++ CLDToOopClosure adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); ++ CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); ++ gch->process_roots(GenCollectedHeap::SO_AllCodeCache, ++ &adjust_pointer_closure, ++ &adjust_cld_closure, ++ &adjust_cld_closure, ++ &code_closure); ++ gch->gen_process_weak_roots(&adjust_pointer_closure); ++ } else { ++ AdjustPointerClosure adjust_pointer_closure; ++ CLDToOopClosure adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); ++ CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); ++ gch->process_roots(GenCollectedHeap::SO_AllCodeCache, ++ &adjust_pointer_closure, ++ &adjust_cld_closure, ++ &adjust_cld_closure, ++ &code_closure); ++ gch->gen_process_weak_roots(&adjust_pointer_closure); ++ } + adjust_marks(); + GenAdjustPointersClosure blk; + gch->generation_iterate(&blk, true); +diff --git a/src/hotspot/share/gc/serial/markSweep.cpp b/src/hotspot/share/gc/serial/markSweep.cpp +index d1dc1c07d..4af0c2d35 100644 +--- a/src/hotspot/share/gc/serial/markSweep.cpp ++++ b/src/hotspot/share/gc/serial/markSweep.cpp +@@ -60,7 +60,6 @@ MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; + + MarkAndPushClosure MarkSweep::mark_and_push_closure(ClassLoaderData::_claim_stw_fullgc_mark); + CLDToOopClosure MarkSweep::follow_cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark); +-CLDToOopClosure MarkSweep::adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); + + template void MarkSweep::KeepAliveClosure::do_oop_work(T* p) { + mark_and_push(p); +@@ -142,8 +141,9 @@ template void MarkSweep::follow_root(T* p) { + void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); } + void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); } + ++template + void PreservedMark::adjust_pointer() { +- MarkSweep::adjust_pointer(&_obj); ++ MarkSweep::adjust_pointer(&_obj); + } + + void PreservedMark::restore() { +@@ -172,12 +172,14 @@ void MarkSweep::mark_object(oop obj) { + _string_dedup_requests->add(obj); + } + ++ // Do the transform while we still have the header intact, ++ // which might include important class information. ++ ContinuationGCSupport::transform_stack_chunk(obj); ++ + // some marks may contain information we need to preserve so we store them away + // and overwrite the mark. We'll restore it at the end of markSweep. + markWord mark = obj->mark(); +- obj->set_mark(markWord::prototype().set_marked()); +- +- ContinuationGCSupport::transform_stack_chunk(obj); ++ obj->set_mark(obj->prototype_mark().set_marked()); + + if (obj->mark_must_be_preserved(mark)) { + preserve_mark(obj, mark); +@@ -200,19 +202,26 @@ void MarkAndPushClosure::do_oop_work(T* p) { MarkSweep::mark_and_push + void MarkAndPushClosure::do_oop( oop* p) { do_oop_work(p); } + void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_work(p); } + +-AdjustPointerClosure MarkSweep::adjust_pointer_closure; +- +-void MarkSweep::adjust_marks() { ++template ++void MarkSweep::adjust_marks_impl() { + // adjust the oops we saved earlier + for (size_t i = 0; i < _preserved_count; i++) { +- _preserved_marks[i].adjust_pointer(); ++ _preserved_marks[i].adjust_pointer(); + } + + // deal with the overflow stack + StackIterator iter(_preserved_overflow_stack); + while (!iter.is_empty()) { + PreservedMark* p = iter.next_addr(); +- p->adjust_pointer(); ++ p->adjust_pointer(); ++ } ++} ++ ++void MarkSweep::adjust_marks() { ++ if (UseAltGCForwarding) { ++ adjust_marks_impl(); ++ } else { ++ adjust_marks_impl(); + } + } + +diff --git a/src/hotspot/share/gc/serial/markSweep.hpp b/src/hotspot/share/gc/serial/markSweep.hpp +index 60360c86d..254caf009 100644 +--- a/src/hotspot/share/gc/serial/markSweep.hpp ++++ b/src/hotspot/share/gc/serial/markSweep.hpp +@@ -50,7 +50,6 @@ class STWGCTimer; + // declared at end + class PreservedMark; + class MarkAndPushClosure; +-class AdjustPointerClosure; + + class MarkSweep : AllStatic { + // +@@ -84,7 +83,6 @@ class MarkSweep : AllStatic { + // + // Friend decls + // +- friend class AdjustPointerClosure; + friend class KeepAliveClosure; + + // +@@ -124,8 +122,6 @@ class MarkSweep : AllStatic { + static MarkAndPushClosure mark_and_push_closure; + static FollowStackClosure follow_stack_closure; + static CLDToOopClosure follow_cld_closure; +- static AdjustPointerClosure adjust_pointer_closure; +- static CLDToOopClosure adjust_cld_closure; + + // Accessors + static uint total_invocations() { return _total_invocations; } +@@ -141,16 +137,21 @@ class MarkSweep : AllStatic { + static void adjust_marks(); // Adjust the pointers in the preserved marks table + static void restore_marks(); // Restore the marks that we saved in preserve_mark + ++ template + static size_t adjust_pointers(oop obj); + + static void follow_stack(); // Empty marking stack. + +- template static inline void adjust_pointer(T* p); ++ template ++ static void adjust_pointer(T* p); + + // Check mark and maybe push on marking stack + template static void mark_and_push(T* p); + + private: ++ template ++ static void adjust_marks_impl(); ++ + // Call backs for marking + static void mark_object(oop obj); + // Mark pointer and follow contents. Empty marking stack afterwards. +@@ -178,6 +179,7 @@ public: + } + }; + ++template + class AdjustPointerClosure: public BasicOopIterateClosure { + public: + template void do_oop_work(T* p); +@@ -193,6 +195,7 @@ private: + + public: + PreservedMark(oop obj, markWord mark) : _obj(obj), _mark(mark) {} ++ template + void adjust_pointer(); + void restore(); + }; +diff --git a/src/hotspot/share/gc/serial/markSweep.inline.hpp b/src/hotspot/share/gc/serial/markSweep.inline.hpp +index 97283e987..f804eb93b 100644 +--- a/src/hotspot/share/gc/serial/markSweep.inline.hpp ++++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -31,6 +31,7 @@ + #include "classfile/javaClasses.inline.hpp" + #include "gc/shared/continuationGCSupport.inline.hpp" + #include "gc/serial/serialStringDedup.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" + #include "memory/universe.hpp" + #include "oops/markWord.hpp" + #include "oops/access.inline.hpp" +@@ -39,27 +40,34 @@ + #include "utilities/align.hpp" + #include "utilities/stack.inline.hpp" + +-template inline void MarkSweep::adjust_pointer(T* p) { ++template ++inline void MarkSweep::adjust_pointer(T* p) { + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); + assert(Universe::heap()->is_in(obj), "should be in heap"); + +- if (obj->is_forwarded()) { +- oop new_obj = obj->forwardee(); ++ if (SlidingForwarding::is_forwarded(obj)) { ++ oop new_obj = SlidingForwarding::forwardee(obj); + assert(is_object_aligned(new_obj), "oop must be aligned"); + RawAccess::oop_store(p, new_obj); + } + } + } + ++template + template +-void AdjustPointerClosure::do_oop_work(T* p) { MarkSweep::adjust_pointer(p); } +-inline void AdjustPointerClosure::do_oop(oop* p) { do_oop_work(p); } +-inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); } ++void AdjustPointerClosure::do_oop_work(T* p) { MarkSweep::adjust_pointer(p); } ++template ++inline void AdjustPointerClosure::do_oop(oop* p) { do_oop_work(p); } + ++template ++inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); } ++ ++template + inline size_t MarkSweep::adjust_pointers(oop obj) { +- return obj->oop_iterate_size(&MarkSweep::adjust_pointer_closure); ++ AdjustPointerClosure adjust_pointer_closure; ++ return obj->oop_iterate_size(&adjust_pointer_closure); + } + + #endif // SHARE_GC_SERIAL_MARKSWEEP_INLINE_HPP +diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp +index ac3a7b8d6..de43717cd 100644 +--- a/src/hotspot/share/gc/shared/collectedHeap.cpp ++++ b/src/hotspot/share/gc/shared/collectedHeap.cpp +@@ -228,7 +228,9 @@ bool CollectedHeap::is_oop(oop object) const { + return false; + } + +- if (!Metaspace::contains(object->klass_raw())) { ++ // With compact headers, we can't safely access the class, due ++ // to possibly forwarded objects. ++ if (AARCH64_ONLY(!UseCompactObjectHeaders &&) !Metaspace::contains(object->klass_raw())) { + return false; + } + +@@ -401,6 +403,13 @@ void CollectedHeap::set_gc_cause(GCCause::Cause v) { + _gc_cause = v; + } + ++// Returns the header size in words aligned to the requirements of the ++// array object type. ++static int int_array_header_size() { ++ size_t typesize_in_bytes = arrayOopDesc::header_size_in_bytes(); ++ return (int)align_up(typesize_in_bytes, HeapWordSize)/HeapWordSize; ++} ++ + size_t CollectedHeap::max_tlab_size() const { + // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. + // This restriction could be removed by enabling filling with multiple arrays. +@@ -410,14 +419,14 @@ size_t CollectedHeap::max_tlab_size() const { + // We actually lose a little by dividing first, + // but that just makes the TLAB somewhat smaller than the biggest array, + // which is fine, since we'll be able to fill that. +- size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + ++ size_t max_int_size = int_array_header_size() + + sizeof(jint) * + ((juint) max_jint / (size_t) HeapWordSize); + return align_down(max_int_size, MinObjAlignment); + } + + size_t CollectedHeap::filler_array_hdr_size() { +- return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long ++ return align_object_offset(int_array_header_size()); // align to Long + } + + size_t CollectedHeap::filler_array_min_size() { +diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp +index bd675760c..5784b7077 100644 +--- a/src/hotspot/share/gc/shared/collectedHeap.hpp ++++ b/src/hotspot/share/gc/shared/collectedHeap.hpp +@@ -309,7 +309,7 @@ class CollectedHeap : public CHeapObj { + } + + virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap); +- static constexpr size_t min_dummy_object_size() { ++ static size_t min_dummy_object_size() { + return oopDesc::header_size(); + } + +diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp +index 0cdd68b03..94c917fc8 100644 +--- a/src/hotspot/share/gc/shared/gc_globals.hpp ++++ b/src/hotspot/share/gc/shared/gc_globals.hpp +@@ -708,8 +708,12 @@ + product(uint, GCCardSizeInBytes, 512, \ + "Card table entry size (in bytes) for card based collectors") \ + range(128, NOT_LP64(512) LP64_ONLY(1024)) \ +- constraint(GCCardSizeInBytesConstraintFunc,AtParse) +- // end of GC_FLAGS ++ constraint(GCCardSizeInBytesConstraintFunc,AtParse) \ ++ \ ++ product(bool, UseAltGCForwarding, false, EXPERIMENTAL, \ ++ "Use alternative GC forwarding that preserves object headers") \ ++ ++// end of GC_FLAGS + + DECLARE_FLAGS(GC_FLAGS) + +diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp +index fd61c4b45..ead8111bf 100644 +--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp ++++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp +@@ -55,6 +55,7 @@ + #include "gc/shared/oopStorageParState.inline.hpp" + #include "gc/shared/oopStorageSet.inline.hpp" + #include "gc/shared/scavengableNMethods.hpp" ++#include "gc/shared/slidingForwarding.hpp" + #include "gc/shared/space.hpp" + #include "gc/shared/strongRootsScope.hpp" + #include "gc/shared/weakProcessor.hpp" +@@ -133,6 +134,8 @@ jint GenCollectedHeap::initialize() { + + GCInitLogger::print(); + ++ SlidingForwarding::initialize(_reserved, SpaceAlignment / HeapWordSize); ++ + return JNI_OK; + } + +diff --git a/src/hotspot/share/gc/shared/memAllocator.cpp b/src/hotspot/share/gc/shared/memAllocator.cpp +index b56afc016..058dae1e0 100644 +--- a/src/hotspot/share/gc/shared/memAllocator.cpp ++++ b/src/hotspot/share/gc/shared/memAllocator.cpp +@@ -377,18 +377,26 @@ void MemAllocator::mem_clear(HeapWord* mem) const { + assert(mem != nullptr, "cannot initialize null object"); + const size_t hs = oopDesc::header_size(); + assert(_word_size >= hs, "unexpected object size"); +- oopDesc::set_klass_gap(mem, 0); ++ if (AARCH64_ONLY(!UseCompactObjectHeaders) NOT_AARCH64(true)) { ++ oopDesc::set_klass_gap(mem, 0); ++ } + Copy::fill_to_aligned_words(mem + hs, _word_size - hs); + } + + oop MemAllocator::finish(HeapWord* mem) const { + assert(mem != nullptr, "null object pointer"); +- // May be bootstrapping +- oopDesc::set_mark(mem, markWord::prototype()); + // Need a release store to ensure array/class length, mark word, and + // object zeroing are visible before setting the klass non-null, for + // concurrent collectors. +- oopDesc::release_set_klass(mem, _klass); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ oopDesc::release_set_mark(mem, _klass->prototype_header()); ++ } else ++#endif ++ { ++ oopDesc::set_mark(mem, markWord::prototype()); ++ oopDesc::release_set_klass(mem, _klass); ++ } + return cast_to_oop(mem); + } + +diff --git a/src/hotspot/share/gc/shared/preservedMarks.cpp b/src/hotspot/share/gc/shared/preservedMarks.cpp +index 9003ccb16..623b0da65 100644 +--- a/src/hotspot/share/gc/shared/preservedMarks.cpp ++++ b/src/hotspot/share/gc/shared/preservedMarks.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -24,6 +24,7 @@ + + #include "precompiled.hpp" + #include "gc/shared/preservedMarks.inline.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" + #include "gc/shared/workerThread.hpp" + #include "gc/shared/workerUtils.hpp" + #include "memory/allocation.inline.hpp" +@@ -40,18 +41,27 @@ void PreservedMarks::restore() { + assert_empty(); + } + +-void PreservedMarks::adjust_during_full_gc() { ++template ++void PreservedMarks::adjust_during_full_gc_impl() { + StackIterator iter(_stack); + while (!iter.is_empty()) { + OopAndMarkWord* elem = iter.next_addr(); + + oop obj = elem->get_oop(); + if (obj->is_forwarded()) { +- elem->set_oop(obj->forwardee()); ++ elem->set_oop(SlidingForwarding::forwardee(obj)); + } + } + } + ++void PreservedMarks::adjust_during_full_gc() { ++ if (UseAltGCForwarding) { ++ adjust_during_full_gc_impl(); ++ } else { ++ adjust_during_full_gc_impl(); ++ } ++} ++ + void PreservedMarks::restore_and_increment(volatile size_t* const total_size_addr) { + const size_t stack_size = size(); + restore(); +diff --git a/src/hotspot/share/gc/shared/preservedMarks.hpp b/src/hotspot/share/gc/shared/preservedMarks.hpp +index c96124eff..227f39a42 100644 +--- a/src/hotspot/share/gc/shared/preservedMarks.hpp ++++ b/src/hotspot/share/gc/shared/preservedMarks.hpp +@@ -54,6 +54,9 @@ private: + + inline bool should_preserve_mark(oop obj, markWord m) const; + ++ template ++ void adjust_during_full_gc_impl(); ++ + public: + size_t size() const { return _stack.size(); } + inline void push_if_necessary(oop obj, markWord m); +diff --git a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp +index 107acdba5..faef25afa 100644 +--- a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp ++++ b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp +@@ -26,6 +26,7 @@ + #define SHARE_GC_SHARED_PRESERVEDMARKS_INLINE_HPP + + #include "gc/shared/preservedMarks.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" + + #include "logging/log.hpp" + #include "oops/oop.inline.hpp" +diff --git a/src/hotspot/share/gc/shared/slidingForwarding.cpp b/src/hotspot/share/gc/shared/slidingForwarding.cpp +new file mode 100644 +index 000000000..ca7a5ef92 +--- /dev/null ++++ b/src/hotspot/share/gc/shared/slidingForwarding.cpp +@@ -0,0 +1,123 @@ ++/* ++ * Copyright (c) 2021, Red Hat, Inc. All rights reserved. ++ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "gc/shared/gc_globals.hpp" ++#include "gc/shared/slidingForwarding.hpp" ++#include "utilities/ostream.hpp" ++#include "utilities/powerOfTwo.hpp" ++ ++// We cannot use 0, because that may already be a valid base address in zero-based heaps. ++// 0x1 is safe because heap base addresses must be aligned by much larger alignment ++HeapWord* const SlidingForwarding::UNUSED_BASE = reinterpret_cast(0x1); ++ ++HeapWord* SlidingForwarding::_heap_start = nullptr; ++size_t SlidingForwarding::_region_size_words = 0; ++size_t SlidingForwarding::_heap_start_region_bias = 0; ++size_t SlidingForwarding::_num_regions = 0; ++uint SlidingForwarding::_region_size_bytes_shift = 0; ++uintptr_t SlidingForwarding::_region_mask = 0; ++HeapWord** SlidingForwarding::_biased_bases[SlidingForwarding::NUM_TARGET_REGIONS] = { nullptr, nullptr }; ++HeapWord** SlidingForwarding::_bases_table = nullptr; ++SlidingForwarding::FallbackTable* SlidingForwarding::_fallback_table = nullptr; ++ ++void SlidingForwarding::initialize(MemRegion heap, size_t region_size_words) { ++#ifdef _LP64 ++ if (UseAltGCForwarding) { ++ _heap_start = heap.start(); ++ ++ // If the heap is small enough to fit directly into the available offset bits, ++ // and we are running Serial GC, we can treat the whole heap as a single region ++ // if it happens to be aligned to allow biasing. ++ size_t rounded_heap_size = round_up_power_of_2(heap.byte_size()); ++ ++ if (UseSerialGC && (heap.word_size() <= (1 << NUM_OFFSET_BITS)) && ++ is_aligned((uintptr_t)_heap_start, rounded_heap_size)) { ++ _num_regions = 1; ++ _region_size_words = heap.word_size(); ++ _region_size_bytes_shift = log2i_exact(rounded_heap_size); ++ } else { ++ _num_regions = align_up(pointer_delta(heap.end(), heap.start()), region_size_words) / region_size_words; ++ _region_size_words = region_size_words; ++ _region_size_bytes_shift = log2i_exact(_region_size_words) + LogHeapWordSize; ++ } ++ _heap_start_region_bias = (uintptr_t)_heap_start >> _region_size_bytes_shift; ++ _region_mask = ~((uintptr_t(1) << _region_size_bytes_shift) - 1); ++ ++ guarantee((_heap_start_region_bias << _region_size_bytes_shift) == (uintptr_t)_heap_start, "must be aligned: _heap_start_region_bias: " SIZE_FORMAT ", _region_size_byte_shift: %u, _heap_start: " PTR_FORMAT, _heap_start_region_bias, _region_size_bytes_shift, p2i(_heap_start)); ++ ++ assert(_region_size_words >= 1, "regions must be at least a word large"); ++ assert(_bases_table == nullptr, "should not be initialized yet"); ++ assert(_fallback_table == nullptr, "should not be initialized yet"); ++ } ++#endif ++} ++ ++void SlidingForwarding::begin() { ++#ifdef _LP64 ++ if (UseAltGCForwarding) { ++ assert(_bases_table == nullptr, "should not be initialized yet"); ++ assert(_fallback_table == nullptr, "should not be initialized yet"); ++ ++ size_t max = _num_regions * NUM_TARGET_REGIONS; ++ _bases_table = NEW_C_HEAP_ARRAY(HeapWord*, max, mtGC); ++ HeapWord** biased_start = _bases_table - _heap_start_region_bias; ++ _biased_bases[0] = biased_start; ++ _biased_bases[1] = biased_start + _num_regions; ++ for (size_t i = 0; i < max; i++) { ++ _bases_table[i] = UNUSED_BASE; ++ } ++ } ++#endif ++} ++ ++void SlidingForwarding::end() { ++#ifdef _LP64 ++ if (UseAltGCForwarding) { ++ assert(_bases_table != nullptr, "should be initialized"); ++ FREE_C_HEAP_ARRAY(HeapWord*, _bases_table); ++ _bases_table = nullptr; ++ delete _fallback_table; ++ _fallback_table = nullptr; ++ } ++#endif ++} ++ ++void SlidingForwarding::fallback_forward_to(HeapWord* from, HeapWord* to) { ++ if (_fallback_table == nullptr) { ++ _fallback_table = new (mtGC) FallbackTable(); ++ } ++ _fallback_table->put_when_absent(from, to); ++} ++ ++HeapWord* SlidingForwarding::fallback_forwardee(HeapWord* from) { ++ assert(_fallback_table != nullptr, "fallback table must be present"); ++ HeapWord** found = _fallback_table->get(from); ++ if (found != nullptr) { ++ return *found; ++ } else { ++ return nullptr; ++ } ++} +diff --git a/src/hotspot/share/gc/shared/slidingForwarding.hpp b/src/hotspot/share/gc/shared/slidingForwarding.hpp +new file mode 100644 +index 000000000..12468e34c +--- /dev/null ++++ b/src/hotspot/share/gc/shared/slidingForwarding.hpp +@@ -0,0 +1,181 @@ ++/* ++ * Copyright (c) 2021, Red Hat, Inc. All rights reserved. ++ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARE_GC_SHARED_SLIDINGFORWARDING_HPP ++#define SHARE_GC_SHARED_SLIDINGFORWARDING_HPP ++ ++#include "memory/allocation.hpp" ++#include "memory/memRegion.hpp" ++#include "oops/markWord.hpp" ++#include "oops/oopsHierarchy.hpp" ++#include "utilities/fastHash.hpp" ++#include "utilities/resourceHash.hpp" ++ ++/** ++ * SlidingForwarding is a method to store forwarding information in a compressed form into the object header, ++ * that has been specifically designed for sliding compaction GCs and compact object headers. With compact object ++ * headers, we store the compressed class pointer in the header, which would be overwritten by full forwarding ++ * pointer, if we allow the legacy forwarding code to act. This would lose the class information for the object, ++ * which is required later in GC cycle to iterate the reference fields and get the object size for copying. ++ * ++ * SlidingForwarding requires only small side tables and guarantees constant-time access and modification. ++ * ++ * The idea is to use a pointer compression scheme very similar to the one that is used for compressed oops. ++ * We divide the heap into number of logical regions. Each region spans maximum of 2^NUM_OFFSET_BITS words. ++ * ++ * The key advantage of sliding compaction for encoding efficiency: it can forward objects from one region to a ++ * maximum of two regions. This is an intuitive property: when we slide the compact region full of data, it can ++ * only span two adjacent regions. This property allows us to use the off-side table to record the addresses of ++ * two target regions. The table holds N*2 entries for N logical regions. For each region, it gives the base ++ * address of the two target regions, or a special placeholder if not used. A single bit in forwarding would ++ * indicate to which of the two "to" regions the object is forwarded into. ++ * ++ * This encoding efficiency allows to store the forwarding information in the object header _together_ with the ++ * compressed class pointer. ++ * ++ * When recording the sliding forwarding, the mark word would look roughly like this: ++ * ++ * 64 32 0 ++ * [................................OOOOOOOOOOOOOOOOOOOOOOOOOOOOAFTT] ++ * ^----- normal lock bits, would record "object is forwarded" ++ * ^------- fallback bit (explained below) ++ * ^-------- alternate region select ++ * ^------------------------------------ in-region offset ++ * ^-------------------------------------------------------------------- protected area, *not touched* by this code, useful for ++ * compressed class pointer with compact object headers ++ * ++ * Adding a forwarding then generally works as follows: ++ * 1. Compute the "to" offset in the "to" region, this gives "offset". ++ * 2. Check if the primary "from" offset at base table contains "to" region base, use it. ++ * If not usable, continue to next step. If usable, set "alternate" = "false" and jump to (4). ++ * 3. Check if the alternate "from" offset at base table contains "to" region base, use it. ++ * This gives us "alternate" = "true". This should always complete for sliding forwarding. ++ * 4. Compute the mark word from "offset" and "alternate", write it out ++ * ++ * Similarly, looking up the target address, given an original object address generally works as follows: ++ * 1. Load the mark from object, and decode "offset" and "alternate" from there ++ * 2. Compute the "from" base offset from the object ++ * 3. Look up "to" region base from the base table either at primary or alternate indices, using "alternate" flag ++ * 4. Compute the "to" address from "to" region base and "offset" ++ * ++ * This algorithm is broken by G1 last-ditch serial compaction: there, object from a single region can be ++ * forwarded to multiple, more than two regions. To deal with that, we initialize a fallback-hashtable for ++ * storing those extra forwardings, and set another bit in the header to indicate that the forwardee is not ++ * encoded but should be looked-up in the hashtable. G1 serial compaction is not very common - it is the ++ * last-last-ditch GC that is used when the JVM is scrambling to squeeze more space out of the heap, and at ++ * that point, ultimate performance is no longer the main concern. ++ */ ++class SlidingForwarding : public AllStatic { ++private: ++ ++ /* ++ * A simple hash-table that acts as fallback for the sliding forwarding. ++ * This is used in the case of G1 serial compaction, which violates the ++ * assumption of sliding forwarding that each object of any region is only ++ * ever forwarded to one of two target regions. At this point, the GC is ++ * scrambling to free up more Java heap memory, and therefore performance ++ * is not the major concern. ++ * ++ * The implementation is a straightforward open hashtable. ++ * It is a single-threaded (not thread-safe) implementation, and that ++ * is sufficient because G1 serial compaction is single-threaded. ++ */ ++ inline static unsigned hash(HeapWord* const& from) { ++ uint64_t val = reinterpret_cast(from); ++ uint64_t hash = FastHash::get_hash64(val, UCONST64(0xAAAAAAAAAAAAAAAA)); ++ return checked_cast(hash >> 32); ++ } ++ inline static bool equals(HeapWord* const& lhs, HeapWord* const& rhs) { ++ return lhs == rhs; ++ } ++ typedef ResourceHashtable FallbackTable; ++ ++ static const uintptr_t MARK_LOWER_HALF_MASK = right_n_bits(32); ++ ++ // We need the lowest two bits to indicate a forwarded object. ++ // The next bit indicates that the forwardee should be looked-up in a fallback-table. ++ static const int FALLBACK_SHIFT = markWord::lock_bits; ++ static const int FALLBACK_BITS = 1; ++ static const int FALLBACK_MASK = right_n_bits(FALLBACK_BITS) << FALLBACK_SHIFT; ++ ++ // Next bit selects the target region ++ static const int ALT_REGION_SHIFT = FALLBACK_SHIFT + FALLBACK_BITS; ++ static const int ALT_REGION_BITS = 1; ++ // This will be "2" always, but expose it as named constant for clarity ++ static const size_t NUM_TARGET_REGIONS = 1 << ALT_REGION_BITS; ++ ++ // The offset bits start then ++ static const int OFFSET_BITS_SHIFT = ALT_REGION_SHIFT + ALT_REGION_BITS; ++ ++ // How many bits we use for the offset ++ static const int NUM_OFFSET_BITS = 32 - OFFSET_BITS_SHIFT; ++ ++ // Indicates an unused base address in the target base table. ++ static HeapWord* const UNUSED_BASE; ++ ++ static HeapWord* _heap_start; ++ static size_t _region_size_words; ++ ++ static size_t _heap_start_region_bias; ++ static size_t _num_regions; ++ static uint _region_size_bytes_shift; ++ static uintptr_t _region_mask; ++ ++ // The target base table memory. ++ static HeapWord** _bases_table; ++ // Entries into the target base tables, biased to the start of the heap. ++ static HeapWord** _biased_bases[NUM_TARGET_REGIONS]; ++ ++ static FallbackTable* _fallback_table; ++ ++ static inline size_t biased_region_index_containing(HeapWord* addr); ++ ++ static inline uintptr_t encode_forwarding(HeapWord* from, HeapWord* to); ++ static inline HeapWord* decode_forwarding(HeapWord* from, uintptr_t encoded); ++ ++ static void fallback_forward_to(HeapWord* from, HeapWord* to); ++ static HeapWord* fallback_forwardee(HeapWord* from); ++ ++ static inline void forward_to_impl(oop from, oop to); ++ static inline oop forwardee_impl(oop from); ++ ++public: ++ static void initialize(MemRegion heap, size_t region_size_words); ++ ++ static void begin(); ++ static void end(); ++ ++ static inline bool is_forwarded(oop obj); ++ static inline bool is_not_forwarded(oop obj); ++ ++ template ++ static inline void forward_to(oop from, oop to); ++ template ++ static inline oop forwardee(oop from); ++}; ++ ++#endif // SHARE_GC_SHARED_SLIDINGFORWARDING_HPP +diff --git a/src/hotspot/share/gc/shared/slidingForwarding.inline.hpp b/src/hotspot/share/gc/shared/slidingForwarding.inline.hpp +new file mode 100644 +index 000000000..e81b345d2 +--- /dev/null ++++ b/src/hotspot/share/gc/shared/slidingForwarding.inline.hpp +@@ -0,0 +1,171 @@ ++/* ++ * Copyright (c) 2021, Red Hat, Inc. All rights reserved. ++ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef SHARE_GC_SHARED_SLIDINGFORWARDING_INLINE_HPP ++#define SHARE_GC_SHARED_SLIDINGFORWARDING_INLINE_HPP ++ ++#include "gc/shared/gc_globals.hpp" ++#include "gc/shared/slidingForwarding.hpp" ++#include "oops/markWord.hpp" ++#include "oops/oop.inline.hpp" ++#include "utilities/macros.hpp" ++ ++inline bool SlidingForwarding::is_forwarded(oop obj) { ++ return obj->is_forwarded(); ++} ++ ++inline bool SlidingForwarding::is_not_forwarded(oop obj) { ++ return !obj->is_forwarded(); ++} ++ ++size_t SlidingForwarding::biased_region_index_containing(HeapWord* addr) { ++ return (uintptr_t)addr >> _region_size_bytes_shift; ++} ++ ++uintptr_t SlidingForwarding::encode_forwarding(HeapWord* from, HeapWord* to) { ++ static_assert(NUM_TARGET_REGIONS == 2, "Only implemented for this amount"); ++ ++ size_t from_reg_idx = biased_region_index_containing(from); ++ HeapWord* to_region_base = (HeapWord*)((uintptr_t)to & _region_mask); ++ ++ HeapWord** base = &_biased_bases[0][from_reg_idx]; ++ uintptr_t alternate = 0; ++ if (*base == to_region_base) { ++ // Primary is good ++ } else if (*base == UNUSED_BASE) { ++ // Primary is free ++ *base = to_region_base; ++ } else { ++ base = &_biased_bases[1][from_reg_idx]; ++ if (*base == to_region_base) { ++ // Alternate is good ++ } else if (*base == UNUSED_BASE) { ++ // Alternate is free ++ *base = to_region_base; ++ } else { ++ // Both primary and alternate are not fitting ++ // This happens only in the following rare situations: ++ // - In Serial GC, sometimes when compact-top switches spaces, because the ++ // region boudaries are virtual and objects can cross regions ++ // - In G1 serial compaction, because tails of various compaction chains ++ // are distributed across the remainders of already compacted regions. ++ return (1 << FALLBACK_SHIFT) | markWord::marked_value; ++ } ++ alternate = 1; ++ } ++ ++ size_t offset = pointer_delta(to, to_region_base); ++ assert(offset < _region_size_words, "Offset should be within the region. from: " PTR_FORMAT ++ ", to: " PTR_FORMAT ", to_region_base: " PTR_FORMAT ", offset: " SIZE_FORMAT, ++ p2i(from), p2i(to), p2i(to_region_base), offset); ++ ++ uintptr_t encoded = (offset << OFFSET_BITS_SHIFT) | ++ (alternate << ALT_REGION_SHIFT) | ++ markWord::marked_value; ++ ++ assert(to == decode_forwarding(from, encoded), "must be reversible"); ++ assert((encoded & ~MARK_LOWER_HALF_MASK) == 0, "must encode to lowest 32 bits"); ++ return encoded; ++} ++ ++HeapWord* SlidingForwarding::decode_forwarding(HeapWord* from, uintptr_t encoded) { ++ assert((encoded & markWord::lock_mask_in_place) == markWord::marked_value, "must be marked as forwarded"); ++ assert((encoded & FALLBACK_MASK) == 0, "must not be fallback-forwarded"); ++ assert((encoded & ~MARK_LOWER_HALF_MASK) == 0, "must decode from lowest 32 bits"); ++ size_t alternate = (encoded >> ALT_REGION_SHIFT) & right_n_bits(ALT_REGION_BITS); ++ assert(alternate < NUM_TARGET_REGIONS, "Sanity"); ++ uintptr_t offset = (encoded >> OFFSET_BITS_SHIFT); ++ ++ size_t from_idx = biased_region_index_containing(from); ++ HeapWord* base = _biased_bases[alternate][from_idx]; ++ assert(base != UNUSED_BASE, "must not be unused base"); ++ HeapWord* decoded = base + offset; ++ assert(decoded >= _heap_start, ++ "Address must be above heap start. encoded: " INTPTR_FORMAT ", alt_region: " SIZE_FORMAT ", base: " PTR_FORMAT, ++ encoded, alternate, p2i(base)); ++ ++ return decoded; ++} ++ ++inline void SlidingForwarding::forward_to_impl(oop from, oop to) { ++ assert(_bases_table != nullptr, "call begin() before forwarding"); ++ ++ markWord from_header = from->mark(); ++ if (from_header.has_displaced_mark_helper()) { ++ from_header = from_header.displaced_mark_helper(); ++ } ++ ++ HeapWord* from_hw = cast_from_oop(from); ++ HeapWord* to_hw = cast_from_oop(to); ++ uintptr_t encoded = encode_forwarding(from_hw, to_hw); ++ markWord new_header = markWord((from_header.value() & ~MARK_LOWER_HALF_MASK) | encoded); ++ from->set_mark(new_header); ++ ++ if ((encoded & FALLBACK_MASK) != 0) { ++ fallback_forward_to(from_hw, to_hw); ++ } ++} ++ ++template ++inline void SlidingForwarding::forward_to(oop obj, oop fwd) { ++#ifdef _LP64 ++ if (ALT_FWD) { ++ assert(_bases_table != nullptr, "expect sliding forwarding initialized"); ++ forward_to_impl(obj, fwd); ++ assert(forwardee(obj) == fwd, "must be forwarded to correct forwardee"); ++ } else ++#endif ++ { ++ obj->forward_to(fwd); ++ } ++} ++ ++inline oop SlidingForwarding::forwardee_impl(oop from) { ++ assert(_bases_table != nullptr, "call begin() before asking for forwarding"); ++ ++ markWord header = from->mark(); ++ HeapWord* from_hw = cast_from_oop(from); ++ if ((header.value() & FALLBACK_MASK) != 0) { ++ HeapWord* to = fallback_forwardee(from_hw); ++ return cast_to_oop(to); ++ } ++ uintptr_t encoded = header.value() & MARK_LOWER_HALF_MASK; ++ HeapWord* to = decode_forwarding(from_hw, encoded); ++ return cast_to_oop(to); ++} ++ ++template ++inline oop SlidingForwarding::forwardee(oop obj) { ++#ifdef _LP64 ++ if (ALT_FWD) { ++ assert(_bases_table != nullptr, "expect sliding forwarding initialized"); ++ return forwardee_impl(obj); ++ } else ++#endif ++ { ++ return obj->forwardee(); ++ } ++} ++ ++#endif // SHARE_GC_SHARED_SLIDINGFORWARDING_INLINE_HPP +diff --git a/src/hotspot/share/gc/shared/space.cpp b/src/hotspot/share/gc/shared/space.cpp +index 48a1f01b9..ad523483d 100644 +--- a/src/hotspot/share/gc/shared/space.cpp ++++ b/src/hotspot/share/gc/shared/space.cpp +@@ -27,6 +27,7 @@ + #include "classfile/vmSymbols.hpp" + #include "gc/shared/collectedHeap.inline.hpp" + #include "gc/shared/genCollectedHeap.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" + #include "gc/shared/space.hpp" + #include "gc/shared/space.inline.hpp" + #include "gc/shared/spaceDecorator.inline.hpp" +@@ -244,7 +245,7 @@ void ContiguousSpace::mangle_unused_area_complete() { + } + #endif // NOT_PRODUCT + +- ++template + HeapWord* ContiguousSpace::forward(oop q, size_t size, + CompactPoint* cp, HeapWord* compact_top) { + // q is alive +@@ -269,13 +270,13 @@ HeapWord* ContiguousSpace::forward(oop q, size_t size, + + // store the forwarding pointer into the mark word + if (cast_from_oop(q) != compact_top) { +- q->forward_to(cast_to_oop(compact_top)); ++ SlidingForwarding::forward_to(q, cast_to_oop(compact_top)); + assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); + } else { + // if the object isn't moving we can just set the mark to the default + // mark and handle it specially later on. + q->init_mark(); +- assert(!q->is_forwarded(), "should not be forwarded"); ++ assert(SlidingForwarding::is_not_forwarded(q), "should not be forwarded"); + } + + compact_top += size; +@@ -289,7 +290,8 @@ HeapWord* ContiguousSpace::forward(oop q, size_t size, + + #if INCLUDE_SERIALGC + +-void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { ++template ++void ContiguousSpace::prepare_for_compaction_impl(CompactPoint* cp) { + // Compute the new addresses for the live objects and store it in the mark + // Used by universe::mark_sweep_phase2() + +@@ -322,7 +324,7 @@ void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { + // prefetch beyond cur_obj + Prefetch::write(cur_obj, interval); + size_t size = cast_to_oop(cur_obj)->size(); +- compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top); ++ compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top); + cur_obj += size; + end_of_live = cur_obj; + } else { +@@ -338,7 +340,7 @@ void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { + // we don't have to compact quite as often. + if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) { + oop obj = cast_to_oop(cur_obj); +- compact_top = cp->space->forward(obj, obj->size(), cp, compact_top); ++ compact_top = cp->space->forward(obj, obj->size(), cp, compact_top); + end_of_live = end; + } else { + // otherwise, it really is a free region. +@@ -369,7 +371,16 @@ void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { + cp->space->set_compaction_top(compact_top); + } + +-void ContiguousSpace::adjust_pointers() { ++void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { ++ if (UseAltGCForwarding) { ++ prepare_for_compaction_impl(cp); ++ } else { ++ prepare_for_compaction_impl(cp); ++ } ++} ++ ++template ++void ContiguousSpace::adjust_pointers_impl() { + // Check first is there is any work to do. + if (used() == 0) { + return; // Nothing to do. +@@ -392,7 +403,7 @@ void ContiguousSpace::adjust_pointers() { + if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) { + // cur_obj is alive + // point all the oops to the new location +- size_t size = MarkSweep::adjust_pointers(cast_to_oop(cur_obj)); ++ size_t size = MarkSweep::adjust_pointers(cast_to_oop(cur_obj)); + debug_only(prev_obj = cur_obj); + cur_obj += size; + } else { +@@ -406,7 +417,16 @@ void ContiguousSpace::adjust_pointers() { + assert(cur_obj == end_of_live, "just checking"); + } + +-void ContiguousSpace::compact() { ++void ContiguousSpace::adjust_pointers() { ++ if (UseAltGCForwarding) { ++ adjust_pointers_impl(); ++ } else { ++ adjust_pointers_impl(); ++ } ++} ++ ++template ++void ContiguousSpace::compact_impl() { + // Copy all live objects to their new location + // Used by MarkSweep::mark_sweep_phase4() + +@@ -435,7 +455,7 @@ void ContiguousSpace::compact() { + + debug_only(HeapWord* prev_obj = nullptr); + while (cur_obj < end_of_live) { +- if (!cast_to_oop(cur_obj)->is_forwarded()) { ++ if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_obj))) { + debug_only(prev_obj = cur_obj); + // The first word of the dead object contains a pointer to the next live object or end of space. + cur_obj = *(HeapWord**)cur_obj; +@@ -446,7 +466,7 @@ void ContiguousSpace::compact() { + + // size and destination + size_t size = cast_to_oop(cur_obj)->size(); +- HeapWord* compaction_top = cast_from_oop(cast_to_oop(cur_obj)->forwardee()); ++ HeapWord* compaction_top = cast_from_oop(SlidingForwarding::forwardee(cast_to_oop(cur_obj))); + + // prefetch beyond compaction_top + Prefetch::write(compaction_top, copy_interval); +@@ -469,6 +489,14 @@ void ContiguousSpace::compact() { + clear_empty_region(this); + } + ++void ContiguousSpace::compact() { ++ if (UseAltGCForwarding) { ++ compact_impl(); ++ } else { ++ compact_impl(); ++ } ++} ++ + #endif // INCLUDE_SERIALGC + + void Space::print_short() const { print_short_on(tty); } +diff --git a/src/hotspot/share/gc/shared/space.hpp b/src/hotspot/share/gc/shared/space.hpp +index 7282b4de2..a254134c4 100644 +--- a/src/hotspot/share/gc/shared/space.hpp ++++ b/src/hotspot/share/gc/shared/space.hpp +@@ -311,7 +311,18 @@ private: + + static inline void clear_empty_region(ContiguousSpace* space); + +- protected: ++#if INCLUDE_SERIALGC ++ template ++ void prepare_for_compaction_impl(CompactPoint* cp); ++ ++ template ++ void adjust_pointers_impl(); ++ ++ template ++ void compact_impl(); ++#endif ++ ++protected: + HeapWord* _top; + // A helper for mangling the unused area of the space in debug builds. + GenSpaceMangler* _mangler; +@@ -398,7 +409,8 @@ private: + // and then forward. In either case, returns the new value of "compact_top". + // Invokes the "alloc_block" function of the then-current compaction + // space. +- virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, ++ template ++ HeapWord* forward(oop q, size_t size, CompactPoint* cp, + HeapWord* compact_top); + + // Accessors +diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +index 3699e2b3e..2102aa8f6 100644 +--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp ++++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +@@ -197,7 +197,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* + file, line); + } + +- Klass* obj_klass = obj->klass_or_null(); ++ Klass* obj_klass = obj->forward_safe_klass(); + if (obj_klass == nullptr) { + print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", + "Object klass pointer should not be null", +@@ -235,7 +235,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* + file, line); + } + +- if (obj_klass != fwd->klass()) { ++ if (obj_klass != fwd->forward_safe_klass()) { + print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", + "Forwardee klass disagrees with object class", + file, line); +diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +index 07f62d56a..de4dfaea1 100644 +--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp ++++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +@@ -28,6 +28,7 @@ + #include "gc/shared/continuationGCSupport.hpp" + #include "gc/shared/gcTraceTime.inline.hpp" + #include "gc/shared/preservedMarks.inline.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" + #include "gc/shared/tlab_globals.hpp" + #include "gc/shared/workerThread.hpp" + #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +@@ -221,6 +222,8 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { + // until all phases run together. + ShenandoahHeapLocker lock(heap->lock()); + ++ SlidingForwarding::begin(); ++ + phase2_calculate_target_addresses(worker_slices); + + OrderAccess::fence(); +@@ -234,6 +237,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { + // Epilogue + _preserved_marks->restore(heap->workers()); + _preserved_marks->reclaim(); ++ SlidingForwarding::end(); + } + + // Resize metaspace +@@ -295,6 +299,7 @@ void ShenandoahFullGC::phase1_mark_heap() { + heap->parallel_cleaning(true /* full_gc */); + } + ++template + class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { + private: + PreservedMarks* const _preserved_marks; +@@ -364,7 +369,7 @@ public: + shenandoah_assert_not_forwarded(nullptr, p); + if (_compact_point != cast_from_oop(p)) { + _preserved_marks->push_if_necessary(p, p->mark()); +- p->forward_to(cast_to_oop(_compact_point)); ++ SlidingForwarding::forward_to(p, cast_to_oop(_compact_point)); + } + _compact_point += obj_size; + } +@@ -396,6 +401,16 @@ public: + } + + void work(uint worker_id) { ++ if (UseAltGCForwarding) { ++ work_impl(worker_id); ++ } else { ++ work_impl(worker_id); ++ } ++ } ++ ++private: ++ template ++ void work_impl(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; + ShenandoahHeapRegionSetIterator it(slice); +@@ -411,7 +426,7 @@ public: + + GrowableArray empty_regions((int)_heap->num_regions()); + +- ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); ++ ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); + + while (from_region != nullptr) { + assert(is_candidate_region(from_region), "Sanity"); +@@ -437,7 +452,8 @@ public: + } + }; + +-void ShenandoahFullGC::calculate_target_humongous_objects() { ++template ++void ShenandoahFullGC::calculate_target_humongous_objects_impl() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // Compute the new addresses for humongous objects. We need to do this after addresses +@@ -473,7 +489,7 @@ void ShenandoahFullGC::calculate_target_humongous_objects() { + if (start >= to_begin && start != r->index()) { + // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. + _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); +- old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom())); ++ SlidingForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom())); + to_end = start; + continue; + } +@@ -485,6 +501,14 @@ void ShenandoahFullGC::calculate_target_humongous_objects() { + } + } + ++void ShenandoahFullGC::calculate_target_humongous_objects() { ++ if (UseAltGCForwarding) { ++ calculate_target_humongous_objects_impl(); ++ } else { ++ calculate_target_humongous_objects_impl(); ++ } ++} ++ + class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { + private: + ShenandoahHeap* const _heap; +@@ -722,6 +746,7 @@ void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet + } + } + ++template + class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { + private: + ShenandoahHeap* const _heap; +@@ -733,8 +758,8 @@ private: + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + assert(_ctx->is_marked(obj), "must be marked"); +- if (obj->is_forwarded()) { +- oop forw = obj->forwardee(); ++ if (SlidingForwarding::is_forwarded(obj)) { ++ oop forw = SlidingForwarding::forwardee(obj); + RawAccess::oop_store(p, forw); + } + } +@@ -751,10 +776,11 @@ public: + void do_nmethod(nmethod* nm) {} + }; + ++template + class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { + private: + ShenandoahHeap* const _heap; +- ShenandoahAdjustPointersClosure _cl; ++ ShenandoahAdjustPointersClosure _cl; + + public: + ShenandoahAdjustPointersObjectClosure() : +@@ -777,9 +803,11 @@ public: + _heap(ShenandoahHeap::heap()) { + } + +- void work(uint worker_id) { ++private: ++ template ++ void work_impl(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); +- ShenandoahAdjustPointersObjectClosure obj_cl; ++ ShenandoahAdjustPointersObjectClosure obj_cl; + ShenandoahHeapRegion* r = _regions.next(); + while (r != nullptr) { + if (!r->is_humongous_continuation() && r->has_live()) { +@@ -788,24 +816,45 @@ public: + r = _regions.next(); + } + } ++ ++public: ++ void work(uint worker_id) { ++ if (UseAltGCForwarding) { ++ work_impl(worker_id); ++ } else { ++ work_impl(worker_id); ++ } ++ } + }; + + class ShenandoahAdjustRootPointersTask : public WorkerTask { + private: + ShenandoahRootAdjuster* _rp; + PreservedMarksSet* _preserved_marks; ++ + public: + ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : + WorkerTask("Shenandoah Adjust Root Pointers"), + _rp(rp), + _preserved_marks(preserved_marks) {} + +- void work(uint worker_id) { ++private: ++ template ++ void work_impl(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); +- ShenandoahAdjustPointersClosure cl; ++ ShenandoahAdjustPointersClosure cl; + _rp->roots_do(worker_id, &cl); + _preserved_marks->get(worker_id)->adjust_during_full_gc(); + } ++ ++public: ++ void work(uint worker_id) { ++ if (UseAltGCForwarding) { ++ work_impl(worker_id); ++ } else { ++ work_impl(worker_id); ++ } ++ } + }; + + void ShenandoahFullGC::phase3_update_references() { +@@ -832,6 +881,7 @@ void ShenandoahFullGC::phase3_update_references() { + workers->run_task(&adjust_pointers_task); + } + ++template + class ShenandoahCompactObjectsClosure : public ObjectClosure { + private: + ShenandoahHeap* const _heap; +@@ -844,9 +894,9 @@ public: + void do_object(oop p) { + assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); + size_t size = p->size(); +- if (p->is_forwarded()) { ++ if (SlidingForwarding::is_forwarded(p)) { + HeapWord* compact_from = cast_from_oop(p); +- HeapWord* compact_to = cast_from_oop(p->forwardee()); ++ HeapWord* compact_to = cast_from_oop(SlidingForwarding::forwardee(p)); + assert(compact_from != compact_to, "Forwarded object should move"); + Copy::aligned_conjoint_words(compact_from, compact_to, size); + oop new_obj = cast_to_oop(compact_to); +@@ -869,11 +919,13 @@ public: + _worker_slices(worker_slices) { + } + +- void work(uint worker_id) { ++private: ++ template ++ void work_impl(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); + +- ShenandoahCompactObjectsClosure cl(worker_id); ++ ShenandoahCompactObjectsClosure cl(worker_id); + ShenandoahHeapRegion* r = slice.next(); + while (r != nullptr) { + assert(!r->is_humongous(), "must not get humongous regions here"); +@@ -884,6 +936,15 @@ public: + r = slice.next(); + } + } ++ ++public: ++ void work(uint worker_id) { ++ if (UseAltGCForwarding) { ++ work_impl(worker_id); ++ } else { ++ work_impl(worker_id); ++ } ++ } + }; + + class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { +@@ -939,7 +1000,8 @@ public: + } + }; + +-void ShenandoahFullGC::compact_humongous_objects() { ++template ++void ShenandoahFullGC::compact_humongous_objects_impl() { + // Compact humongous regions, based on their fwdptr objects. + // + // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, +@@ -952,7 +1014,7 @@ void ShenandoahFullGC::compact_humongous_objects() { + ShenandoahHeapRegion* r = heap->get_region(c - 1); + if (r->is_humongous_start()) { + oop old_obj = cast_to_oop(r->bottom()); +- if (!old_obj->is_forwarded()) { ++ if (SlidingForwarding::is_not_forwarded(old_obj)) { + // No need to move the object, it stays at the same slot + continue; + } +@@ -961,7 +1023,7 @@ void ShenandoahFullGC::compact_humongous_objects() { + + size_t old_start = r->index(); + size_t old_end = old_start + num_regions - 1; +- size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); ++ size_t new_start = heap->heap_region_index_containing(SlidingForwarding::forwardee(old_obj)); + size_t new_end = new_start + num_regions - 1; + assert(old_start != new_start, "must be real move"); + assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); +@@ -1002,6 +1064,14 @@ void ShenandoahFullGC::compact_humongous_objects() { + } + } + ++void ShenandoahFullGC::compact_humongous_objects() { ++ if (UseAltGCForwarding) { ++ compact_humongous_objects_impl(); ++ } else { ++ compact_humongous_objects_impl(); ++ } ++} ++ + // This is slightly different to ShHeap::reset_next_mark_bitmap: + // we need to remain able to walk pinned regions. + // Since pinned region do not move and don't get compacted, we will get holes with +diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp +index 1c1653e59..af8c25bc1 100644 +--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp ++++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp +@@ -55,6 +55,7 @@ class VM_ShenandoahFullGC; + class ShenandoahDegenGC; + + class ShenandoahFullGC : public ShenandoahGC { ++ template + friend class ShenandoahPrepareForCompactionObjectClosure; + friend class VM_ShenandoahFullGC; + friend class ShenandoahDegenGC; +@@ -83,7 +84,11 @@ private: + void phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices); + + void distribute_slices(ShenandoahHeapRegionSet** worker_slices); ++ template ++ void calculate_target_humongous_objects_impl(); + void calculate_target_humongous_objects(); ++ template ++ void compact_humongous_objects_impl(); + void compact_humongous_objects(); + }; + +diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +index 8a395b6f7..daca7cc6d 100644 +--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp ++++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +@@ -34,6 +34,7 @@ + #include "gc/shared/locationPrinter.inline.hpp" + #include "gc/shared/memAllocator.hpp" + #include "gc/shared/plab.hpp" ++#include "gc/shared/slidingForwarding.hpp" + #include "gc/shared/tlab_globals.hpp" + + #include "gc/shenandoah/shenandoahBarrierSet.hpp" +@@ -404,6 +405,8 @@ jint ShenandoahHeap::initialize() { + + ShenandoahInitLogger::print(); + ++ SlidingForwarding::initialize(_heap_region, ShenandoahHeapRegion::region_size_words()); ++ + return JNI_OK; + } + +diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +index 226190822..31d51c732 100644 +--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp ++++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +@@ -284,7 +284,7 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { + + assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); + +- size_t size = p->size(); ++ size_t size = p->forward_safe_size(); + + assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); + +@@ -319,11 +319,28 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { + + // Copy the object: + Copy::aligned_disjoint_words(cast_from_oop(p), copy, size); +- +- // Try to install the new forwarding pointer. + oop copy_val = cast_to_oop(copy); +- ContinuationGCSupport::relativize_stack_chunk(copy_val); + ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ // The copy above is not atomic. Make sure we have seen the proper mark ++ // and re-install it into the copy, so that Klass* is guaranteed to be correct. ++ markWord mark = copy_val->mark(); ++ if (!mark.is_marked()) { ++ copy_val->set_mark(mark); ++ ContinuationGCSupport::relativize_stack_chunk(copy_val); ++ } else { ++ // If we copied a mark-word that indicates 'forwarded' state, the object ++ // installation would not succeed. We cannot access Klass* anymore either. ++ // Skip the transformation. ++ } ++ } else ++#endif ++ { ++ ContinuationGCSupport::relativize_stack_chunk(copy_val); ++ } ++ ++ // Try to install the new forwarding pointer. + oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); + if (result == copy_val) { + // Successfully evacuated. Our copy is now the public one! +@@ -499,7 +516,7 @@ inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, + oop obj = cast_to_oop(cs); + assert(oopDesc::is_oop(obj), "sanity"); + assert(ctx->is_marked(obj), "object expected to be marked"); +- size_t size = obj->size(); ++ size_t size = obj->forward_safe_size(); + cl->do_object(obj); + cs += size; + } +diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +index 159cbf7b6..285fa4508 100644 +--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp ++++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +@@ -102,7 +102,7 @@ private: + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); +- if (is_instance_ref_klass(obj->klass())) { ++ if (is_instance_ref_klass(obj->forward_safe_klass())) { + obj = ShenandoahForwarding::get_forwardee(obj); + } + // Single threaded verification can use faster non-atomic stack and bitmap +@@ -129,7 +129,7 @@ private: + "oop must be aligned"); + + ShenandoahHeapRegion *obj_reg = _heap->heap_region_containing(obj); +- Klass* obj_klass = obj->klass_or_null(); ++ Klass* obj_klass = obj->forward_safe_klass(); + + // Verify that obj is not in dead space: + { +@@ -144,11 +144,11 @@ private: + "Object start should be within the region"); + + if (!obj_reg->is_humongous()) { +- check(ShenandoahAsserts::_safe_unknown, obj, (obj_addr + obj->size()) <= obj_reg->top(), ++ check(ShenandoahAsserts::_safe_unknown, obj, (obj_addr + obj->forward_safe_size()) <= obj_reg->top(), + "Object end should be within the region"); + } else { + size_t humongous_start = obj_reg->index(); +- size_t humongous_end = humongous_start + (obj->size() >> ShenandoahHeapRegion::region_size_words_shift()); ++ size_t humongous_end = humongous_start + (obj->forward_safe_size() >> ShenandoahHeapRegion::region_size_words_shift()); + for (size_t idx = humongous_start + 1; idx < humongous_end; idx++) { + check(ShenandoahAsserts::_safe_unknown, obj, _heap->get_region(idx)->is_humongous_continuation(), + "Humongous object is in continuation that fits it"); +@@ -165,7 +165,7 @@ private: + // skip + break; + case ShenandoahVerifier::_verify_liveness_complete: +- Atomic::add(&_ld[obj_reg->index()], (uint) obj->size(), memory_order_relaxed); ++ Atomic::add(&_ld[obj_reg->index()], (uint) obj->forward_safe_size(), memory_order_relaxed); + // fallthrough for fast failure for un-live regions: + case ShenandoahVerifier::_verify_liveness_conservative: + check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(), +@@ -209,7 +209,7 @@ private: + HeapWord *fwd_addr = cast_from_oop(fwd); + check(ShenandoahAsserts::_safe_oop, obj, fwd_addr < fwd_reg->top(), + "Forwardee start should be within the region"); +- check(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + fwd->size()) <= fwd_reg->top(), ++ check(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + fwd->forward_safe_size()) <= fwd_reg->top(), + "Forwardee end should be within the region"); + + oop fwd2 = ShenandoahForwarding::get_forwardee_raw_unchecked(fwd); +@@ -312,7 +312,8 @@ public: + */ + void verify_oops_from(oop obj) { + _loc = obj; +- obj->oop_iterate(this); ++ Klass* klass = obj->forward_safe_klass(); ++ obj->oop_iterate_backwards(this, klass); + _loc = nullptr; + } + +@@ -592,7 +593,7 @@ public: + + // Verify everything reachable from that object too, hopefully realizing + // everything was already marked, and never touching further: +- if (!is_instance_ref_klass(obj->klass())) { ++ if (!is_instance_ref_klass(obj->forward_safe_klass())) { + cl.verify_oops_from(obj); + (*processed)++; + } +diff --git a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp +index d006b37e7..06de533d2 100644 +--- a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp ++++ b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp +@@ -298,7 +298,7 @@ void XBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a + assert(src_offset == dest_offset, "should be equal"); + jlong offset = src_offset->get_long(); + if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) { +- assert(!UseCompressedClassPointers, "should only happen without compressed class pointers"); ++ assert(!UseCompressedClassPointers AARCH64_ONLY(|| UseCompactObjectHeaders), "should only happen without compressed class pointers"); + assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset"); + length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs + src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT)); +diff --git a/src/hotspot/share/gc/x/xObjArrayAllocator.cpp b/src/hotspot/share/gc/x/xObjArrayAllocator.cpp +index 9408e027c..0a0c545f8 100644 +--- a/src/hotspot/share/gc/x/xObjArrayAllocator.cpp ++++ b/src/hotspot/share/gc/x/xObjArrayAllocator.cpp +@@ -50,7 +50,17 @@ oop XObjArrayAllocator::initialize(HeapWord* mem) const { + // time and time-to-safepoint + const size_t segment_max = XUtils::bytes_to_words(64 * K); + const BasicType element_type = ArrayKlass::cast(_klass)->element_type(); +- const size_t header = arrayOopDesc::header_size(element_type); ++ ++ // Clear leading 32 bits, if necessary. ++ int base_offset = arrayOopDesc::base_offset_in_bytes(element_type); ++ if (!is_aligned(base_offset, HeapWordSize)) { ++ assert(is_aligned(base_offset, BytesPerInt), "array base must be 32 bit aligned"); ++ *reinterpret_cast(reinterpret_cast(mem) + base_offset) = 0; ++ base_offset += BytesPerInt; ++ } ++ assert(is_aligned(base_offset, HeapWordSize), "remaining array base must be 64 bit aligned"); ++ ++ const size_t header = heap_word_size(base_offset); + const size_t payload_size = _word_size - header; + + if (payload_size <= segment_max) { +@@ -63,8 +73,15 @@ oop XObjArrayAllocator::initialize(HeapWord* mem) const { + // The array is going to be exposed before it has been completely + // cleared, therefore we can't expose the header at the end of this + // function. Instead explicitly initialize it according to our needs. +- arrayOopDesc::set_mark(mem, markWord::prototype()); +- arrayOopDesc::release_set_klass(mem, _klass); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ arrayOopDesc::release_set_mark(mem, _klass->prototype_header()); ++ } else ++#endif ++ { ++ arrayOopDesc::set_mark(mem, markWord::prototype()); ++ arrayOopDesc::release_set_klass(mem, _klass); ++ } + assert(_length >= 0, "length should be non-negative"); + arrayOopDesc::set_length(mem, _length); + +diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp +index b9aced7d6..f436be90b 100644 +--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp ++++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp +@@ -483,7 +483,7 @@ void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a + assert(src_offset == dest_offset, "should be equal"); + const jlong offset = src_offset->get_long(); + if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) { +- assert(!UseCompressedClassPointers, "should only happen without compressed class pointers"); ++ assert(!UseCompressedClassPointers AARCH64_ONLY(|| UseCompactObjectHeaders), "should only happen without compressed class pointers"); + assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset"); + length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs + src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT)); +diff --git a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp +index c65f0d613..59054ca82 100644 +--- a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp ++++ b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp +@@ -50,7 +50,17 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const { + // time and time-to-safepoint + const size_t segment_max = ZUtils::bytes_to_words(64 * K); + const BasicType element_type = ArrayKlass::cast(_klass)->element_type(); +- const size_t header = arrayOopDesc::header_size(element_type); ++ ++ // Clear leading 32 bits, if necessary. ++ int base_offset = arrayOopDesc::base_offset_in_bytes(element_type); ++ if (!is_aligned(base_offset, HeapWordSize)) { ++ assert(is_aligned(base_offset, BytesPerInt), "array base must be 32 bit aligned"); ++ *reinterpret_cast(reinterpret_cast(mem) + base_offset) = 0; ++ base_offset += BytesPerInt; ++ } ++ assert(is_aligned(base_offset, HeapWordSize), "remaining array base must be 64 bit aligned"); ++ ++ const size_t header = heap_word_size(base_offset); + const size_t payload_size = _word_size - header; + + if (payload_size <= segment_max) { +@@ -66,8 +76,15 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const { + + // Signal to the ZIterator that this is an invisible root, by setting + // the mark word to "marked". Reset to prototype() after the clearing. +- arrayOopDesc::set_mark(mem, markWord::prototype().set_marked()); +- arrayOopDesc::release_set_klass(mem, _klass); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ arrayOopDesc::release_set_mark(mem, _klass->prototype_header().set_marked()); ++ } else ++#endif ++ { ++ arrayOopDesc::set_mark(mem, markWord::prototype().set_marked()); ++ arrayOopDesc::release_set_klass(mem, _klass); ++ } + assert(_length >= 0, "length should be non-negative"); + arrayOopDesc::set_length(mem, _length); + +@@ -135,7 +152,14 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const { + ZThreadLocalData::clear_invisible_root(_thread); + + // Signal to the ZIterator that this is no longer an invisible root +- oopDesc::release_set_mark(mem, markWord::prototype()); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ oopDesc::release_set_mark(mem, _klass->prototype_header()); ++ } else ++#endif ++ { ++ oopDesc::release_set_mark(mem, markWord::prototype()); ++ } + + return cast_to_oop(mem); + } +diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp +index 281553f7f..d84eab920 100644 +--- a/src/hotspot/share/gc/z/zRelocate.cpp ++++ b/src/hotspot/share/gc/z/zRelocate.cpp +@@ -621,7 +621,6 @@ private: + zaddress try_relocate_object_inner(zaddress from_addr) { + ZForwardingCursor cursor; + +- const size_t size = ZUtils::object_size(from_addr); + ZPage* const to_page = target(_forwarding->to_age()); + + // Lookup forwarding +@@ -629,12 +628,14 @@ private: + const zaddress to_addr = forwarding_find(_forwarding, from_addr, &cursor); + if (!is_null(to_addr)) { + // Already relocated ++ const size_t size = ZUtils::object_size(to_addr); + increase_other_forwarded(size); + return to_addr; + } + } + + // Allocate object ++ const size_t size = ZUtils::object_size(from_addr); + const zaddress allocated_addr = _allocator->alloc_object(to_page, size); + if (is_null(allocated_addr)) { + // Allocation failed +diff --git a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp +index 42a8c6cab..fefcdd8c3 100644 +--- a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp ++++ b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp +@@ -1997,10 +1997,13 @@ run: + } + + // Initialize header, mirrors MemAllocator. +- oopDesc::set_mark(result, markWord::prototype()); +- oopDesc::set_klass_gap(result, 0); +- oopDesc::release_set_klass(result, ik); +- ++ if (UseCompactObjectHeaders) { ++ oopDesc::release_set_mark(result, ik->prototype_header()); ++ } else { ++ oopDesc::set_mark(result, markWord::prototype()); ++ oopDesc::set_klass_gap(result, 0); ++ oopDesc::release_set_klass(result, ik); ++ } + oop obj = cast_to_oop(result); + + // Must prevent reordering of stores for object initialization +diff --git a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp +index 13b55c34e..dd8fc2cc4 100644 +--- a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp ++++ b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp +@@ -70,7 +70,7 @@ class ObjectSampleMarker : public StackObj { + // now we will set the mark word to "marked" in order to quickly + // identify sample objects during the reachability search from gc roots. + assert(!obj->mark().is_marked(), "should only mark an object once"); +- obj->set_mark(markWord::prototype().set_marked()); ++ obj->set_mark(obj->prototype_mark().set_marked()); + assert(obj->mark().is_marked(), "invariant"); + } + }; +diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +index df9f866b9..9645b4a35 100644 +--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp ++++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +@@ -2388,7 +2388,7 @@ C2V_END + + C2V_VMENTRY_0(jint, arrayBaseOffset, (JNIEnv* env, jobject, jchar type_char)) + BasicType type = JVMCIENV->typeCharToBasicType(type_char, JVMCI_CHECK_0); +- return arrayOopDesc::header_size(type) * HeapWordSize; ++ return arrayOopDesc::base_offset_in_bytes(type); + C2V_END + + C2V_VMENTRY_0(jint, arrayIndexScale, (JNIEnv* env, jobject, jchar type_char)) +diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp +index 1b36905fe..b1222f9fb 100644 +--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp ++++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp +@@ -275,7 +275,6 @@ JVMCIObjectArray CompilerToVM::initialize_intrinsics(JVMCI_TRAPS) { + do_bool_flag(Inline) \ + do_intx_flag(JVMCICounterSize) \ + do_bool_flag(JVMCIPrintProperties) \ +- do_bool_flag(JVMCIUseFastLocking) \ + do_int_flag(ObjectAlignmentInBytes) \ + do_bool_flag(PrintInlining) \ + do_bool_flag(ReduceInitialCardMarks) \ +diff --git a/src/hotspot/share/jvmci/jvmci_globals.cpp b/src/hotspot/share/jvmci/jvmci_globals.cpp +index c8e504b24..4a8f2dcbd 100644 +--- a/src/hotspot/share/jvmci/jvmci_globals.cpp ++++ b/src/hotspot/share/jvmci/jvmci_globals.cpp +@@ -123,7 +123,6 @@ bool JVMCIGlobals::check_jvmci_flags_are_consistent() { + CHECK_NOT_SET(JVMCITraceLevel, EnableJVMCI) + CHECK_NOT_SET(JVMCICounterSize, EnableJVMCI) + CHECK_NOT_SET(JVMCICountersExcludeCompiler, EnableJVMCI) +- CHECK_NOT_SET(JVMCIUseFastLocking, EnableJVMCI) + CHECK_NOT_SET(JVMCINMethodSizeLimit, EnableJVMCI) + CHECK_NOT_SET(JVMCIPrintProperties, EnableJVMCI) + CHECK_NOT_SET(JVMCIThreadsPerNativeLibraryRuntime, EnableJVMCI) +diff --git a/src/hotspot/share/jvmci/jvmci_globals.hpp b/src/hotspot/share/jvmci/jvmci_globals.hpp +index bb6eeb8c8..c2e8954c0 100644 +--- a/src/hotspot/share/jvmci/jvmci_globals.hpp ++++ b/src/hotspot/share/jvmci/jvmci_globals.hpp +@@ -127,9 +127,6 @@ class fileStream; + product(bool, JVMCICountersExcludeCompiler, true, EXPERIMENTAL, \ + "Exclude JVMCI compiler threads from benchmark counters") \ + \ +- develop(bool, JVMCIUseFastLocking, true, \ +- "Use fast inlined locking code") \ +- \ + product(intx, JVMCINMethodSizeLimit, (80*K)*wordSize, EXPERIMENTAL, \ + "Maximum size of a compiled method.") \ + range(0, max_jint) \ +diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +index a4195a04f..add524d92 100644 +--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp ++++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +@@ -216,9 +216,12 @@ + nonstatic_field(JavaThread, _poll_data, SafepointMechanism::ThreadData) \ + nonstatic_field(JavaThread, _stack_overflow_state._reserved_stack_activation, address) \ + nonstatic_field(JavaThread, _held_monitor_count, int64_t) \ ++ nonstatic_field(JavaThread, _lock_stack, LockStack) \ + JVMTI_ONLY(nonstatic_field(JavaThread, _is_in_VTMS_transition, bool)) \ + JVMTI_ONLY(nonstatic_field(JavaThread, _is_in_tmp_VTMS_transition, bool)) \ + \ ++ nonstatic_field(LockStack, _top, uint32_t) \ ++ \ + JVMTI_ONLY(static_field(JvmtiVTMSTransitionDisabler, _VTMS_notify_jvmti_events, bool)) \ + \ + static_field(java_lang_Class, _klass_offset, int) \ +@@ -493,6 +496,7 @@ + declare_constant(BranchData::not_taken_off_set) \ + \ + declare_constant_with_value("CardTable::dirty_card", CardTable::dirty_card_val()) \ ++ declare_constant_with_value("LockStack::_end_offset", LockStack::end_offset()) \ + \ + declare_constant(CodeInstaller::VERIFIED_ENTRY) \ + declare_constant(CodeInstaller::UNVERIFIED_ENTRY) \ +@@ -677,6 +681,10 @@ + declare_constant(InstanceKlass::being_initialized) \ + declare_constant(InstanceKlass::fully_initialized) \ + \ ++ declare_constant(LockingMode::LM_MONITOR) \ ++ declare_constant(LockingMode::LM_LEGACY) \ ++ declare_constant(LockingMode::LM_LIGHTWEIGHT) \ ++ \ + /*********************************/ \ + /* InstanceKlass _misc_flags */ \ + /*********************************/ \ +@@ -724,6 +732,8 @@ + AARCH64_ONLY(declare_constant(NMethodPatchingType::conc_instruction_and_data_patch)) \ + AARCH64_ONLY(declare_constant(NMethodPatchingType::conc_data_patch)) \ + \ ++ declare_constant(ObjectMonitor::ANONYMOUS_OWNER) \ ++ \ + declare_constant(ReceiverTypeData::nonprofiled_count_off_set) \ + declare_constant(ReceiverTypeData::receiver_type_row_cell_count) \ + declare_constant(ReceiverTypeData::receiver0_offset) \ +diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp +index 4b47cd0bc..f1a61cb61 100644 +--- a/src/hotspot/share/memory/universe.cpp ++++ b/src/hotspot/share/memory/universe.cpp +@@ -325,8 +325,16 @@ void Universe::genesis(TRAPS) { + HandleMark hm(THREAD); + + // Explicit null checks are needed if these offsets are not smaller than the page size +- assert(oopDesc::klass_offset_in_bytes() < static_cast(os::vm_page_size()), +- "Klass offset is expected to be less than the page size"); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ assert(oopDesc::mark_offset_in_bytes() < static_cast(os::vm_page_size()), ++ "Mark offset is expected to be less than the page size"); ++ } else ++#endif ++ { ++ assert(oopDesc::klass_offset_in_bytes() < static_cast(os::vm_page_size()), ++ "Klass offset is expected to be less than the page size"); ++ } + assert(arrayOopDesc::length_offset_in_bytes() < static_cast(os::vm_page_size()), + "Array length offset is expected to be less than the page size"); + +diff --git a/src/hotspot/share/oops/arrayOop.hpp b/src/hotspot/share/oops/arrayOop.hpp +index 0d265ea70..6e7f2c075 100644 +--- a/src/hotspot/share/oops/arrayOop.hpp ++++ b/src/hotspot/share/oops/arrayOop.hpp +@@ -27,6 +27,7 @@ + + #include "oops/oop.hpp" + #include "utilities/align.hpp" ++#include "utilities/globalDefinitions.hpp" + + // arrayOopDesc is the abstract baseclass for all arrays. It doesn't + // declare pure virtual to enforce this because that would allocate a vtbl +@@ -45,47 +46,60 @@ class arrayOopDesc : public oopDesc { + + // Interpreter/Compiler offsets + +- // Header size computation. +- // The header is considered the oop part of this type plus the length. +- // Returns the aligned header_size_in_bytes. This is not equivalent to +- // sizeof(arrayOopDesc) which should not appear in the code. +- static int header_size_in_bytes() { +- size_t hs = align_up(length_offset_in_bytes() + sizeof(int), +- HeapWordSize); +-#ifdef ASSERT +- // make sure it isn't called before UseCompressedOops is initialized. +- static size_t arrayoopdesc_hs = 0; +- if (arrayoopdesc_hs == 0) arrayoopdesc_hs = hs; +- assert(arrayoopdesc_hs == hs, "header size can't change"); +-#endif // ASSERT +- return (int)hs; +- } +- ++private: + // Returns the address of the length "field". See length_offset_in_bytes(). + static int* length_addr_impl(void* obj_ptr) { + char* ptr = static_cast(obj_ptr); + return reinterpret_cast(ptr + length_offset_in_bytes()); + } + +- // Check whether an element of a typeArrayOop with the given type must be +- // aligned 0 mod 8. The typeArrayOop itself must be aligned at least this ++ // Check whether an element of an arrayOop with the given type must be ++ // aligned 0 mod 8. The arrayOop itself must be aligned at least this + // strongly. + static bool element_type_should_be_aligned(BasicType type) { ++#ifdef _LP64 ++ if (type == T_OBJECT || type == T_ARRAY) { ++ return !UseCompressedOops; ++ } ++#endif + return type == T_DOUBLE || type == T_LONG; + } + + public: ++ // Header size computation. ++ // The header is considered the oop part of this type plus the length. ++ // This is not equivalent to sizeof(arrayOopDesc) which should not appear in the code. ++ static int header_size_in_bytes() { ++ size_t hs = length_offset_in_bytes() + sizeof(int); ++#ifdef ASSERT ++ // make sure it isn't called before UseCompressedOops is initialized. ++ static size_t arrayoopdesc_hs = 0; ++ if (arrayoopdesc_hs == 0) arrayoopdesc_hs = hs; ++ assert(arrayoopdesc_hs == hs, "header size can't change"); ++#endif // ASSERT ++ return (int)hs; ++ } ++ + // The _length field is not declared in C++. It is allocated after the + // declared nonstatic fields in arrayOopDesc if not compressed, otherwise + // it occupies the second half of the _klass field in oopDesc. + static int length_offset_in_bytes() { +- return UseCompressedClassPointers ? klass_gap_offset_in_bytes() : +- sizeof(arrayOopDesc); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ return oopDesc::base_offset_in_bytes(); ++ } else ++#endif ++ if (UseCompressedClassPointers) { ++ return klass_gap_offset_in_bytes(); ++ } else { ++ return sizeof(arrayOopDesc); ++ } + } + + // Returns the offset of the first element. + static int base_offset_in_bytes(BasicType type) { +- return header_size(type) * HeapWordSize; ++ size_t hs = header_size_in_bytes(); ++ return (int)(element_type_should_be_aligned(type) ? align_up(hs, BytesPerLong) : hs); + } + + // Returns the address of the first element. The elements in the array will not +@@ -122,18 +136,7 @@ class arrayOopDesc : public oopDesc { + *length_addr_impl(mem) = length; + } + +- // Should only be called with constants as argument +- // (will not constant fold otherwise) +- // Returns the header size in words aligned to the requirements of the +- // array object type. +- static int header_size(BasicType type) { +- size_t typesize_in_bytes = header_size_in_bytes(); +- return (int)(element_type_should_be_aligned(type) +- ? align_object_offset(typesize_in_bytes/HeapWordSize) +- : typesize_in_bytes/HeapWordSize); +- } +- +- // Return the maximum length of an array of BasicType. The length can passed ++ // Return the maximum length of an array of BasicType. The length can be passed + // to typeArrayOop::object_size(scale, length, header_size) without causing an + // overflow. We also need to make sure that this will not overflow a size_t on + // 32 bit platforms when we convert it to a byte size. +@@ -141,8 +144,12 @@ class arrayOopDesc : public oopDesc { + assert(type >= 0 && type < T_CONFLICT, "wrong type"); + assert(type2aelembytes(type) != 0, "wrong type"); + ++ size_t hdr_size_in_bytes = base_offset_in_bytes(type); ++ // This is rounded-up and may overlap with the first array elements. ++ size_t hdr_size_in_words = align_up(hdr_size_in_bytes, HeapWordSize) / HeapWordSize; ++ + const size_t max_element_words_per_size_t = +- align_down((SIZE_MAX/HeapWordSize - header_size(type)), MinObjAlignment); ++ align_down((SIZE_MAX/HeapWordSize - hdr_size_in_words), MinObjAlignment); + const size_t max_elements_per_size_t = + HeapWordSize * max_element_words_per_size_t / type2aelembytes(type); + if ((size_t)max_jint < max_elements_per_size_t) { +@@ -150,7 +157,7 @@ class arrayOopDesc : public oopDesc { + // (CollectedHeap, Klass::oop_oop_iterate(), and more) uses an int for + // passing around the size (in words) of an object. So, we need to avoid + // overflowing an int when we add the header. See CRs 4718400 and 7110613. +- return align_down(max_jint - header_size(type), MinObjAlignment); ++ return align_down(max_jint - hdr_size_in_words, MinObjAlignment); + } + return (int32_t)max_elements_per_size_t; + } +diff --git a/src/hotspot/share/oops/instanceOop.hpp b/src/hotspot/share/oops/instanceOop.hpp +index 8de3b1a74..bd7950b24 100644 +--- a/src/hotspot/share/oops/instanceOop.hpp ++++ b/src/hotspot/share/oops/instanceOop.hpp +@@ -33,15 +33,18 @@ + + class instanceOopDesc : public oopDesc { + public: +- // aligned header size. +- static int header_size() { return sizeof(instanceOopDesc)/HeapWordSize; } +- + // If compressed, the offset of the fields of the instance may not be aligned. + static int base_offset_in_bytes() { +- return (UseCompressedClassPointers) ? +- klass_gap_offset_in_bytes() : +- sizeof(instanceOopDesc); +- ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ return oopDesc::base_offset_in_bytes(); ++ } else ++#endif ++ if (UseCompressedClassPointers) { ++ return klass_gap_offset_in_bytes(); ++ } else { ++ return sizeof(instanceOopDesc); ++ } + } + }; + +diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp +index 668dc6d2c..490c94a9d 100644 +--- a/src/hotspot/share/oops/klass.cpp ++++ b/src/hotspot/share/oops/klass.cpp +@@ -251,11 +251,22 @@ void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word + return Metaspace::allocate(loader_data, word_size, MetaspaceObj::ClassType, THREAD); + } + ++static markWord make_prototype(Klass* kls) { ++ markWord prototype = markWord::prototype(); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ prototype = prototype.set_klass(kls); ++ } ++#endif ++ return prototype; ++} ++ + // "Normal" instantiation is preceded by a MetaspaceObj allocation + // which zeros out memory - calloc equivalent. + // The constructor is also used from CppVtableCloner, + // which doesn't zero out the memory before calling the constructor. + Klass::Klass(KlassKind kind) : _kind(kind), ++ _prototype_header(make_prototype(this)), + _shared_class_path_index(-1) { + CDS_ONLY(_shared_class_flags = 0;) + CDS_JAVA_HEAP_ONLY(_archived_mirror_index = -1;) +@@ -960,6 +971,12 @@ void Klass::oop_print_on(oop obj, outputStream* st) { + // print header + obj->mark().print_on(st); + st->cr(); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ st->print(BULLET"prototype_header: " INTPTR_FORMAT, _prototype_header.value()); ++ st->cr(); ++ } ++#endif + } + + // print class +diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp +index ae9a731be..2706c56ce 100644 +--- a/src/hotspot/share/oops/klass.hpp ++++ b/src/hotspot/share/oops/klass.hpp +@@ -171,6 +171,8 @@ class Klass : public Metadata { + // contention that may happen when a nearby object is modified. + AccessFlags _access_flags; // Access flags. The class/interface distinction is stored here. + ++ markWord _prototype_header; // Used to initialize objects' header ++ + JFR_ONLY(DEFINE_TRACE_ID_FIELD;) + + private: +@@ -705,6 +707,15 @@ protected: + bool is_cloneable() const; + void set_is_cloneable(); + ++#ifdef AARCH64 ++ markWord prototype_header() const { ++ assert(UseCompactObjectHeaders, "only use with compact object headers"); ++ return _prototype_header; ++ } ++ inline void set_prototype_header(markWord header); ++#endif ++ static ByteSize prototype_header_offset() { return in_ByteSize(offset_of(Klass, _prototype_header)); } ++ + JFR_ONLY(DEFINE_TRACE_ID_METHODS;) + + virtual void metaspace_pointers_do(MetaspaceClosure* iter); +diff --git a/src/hotspot/share/oops/klass.inline.hpp b/src/hotspot/share/oops/klass.inline.hpp +index a72868a08..85f124596 100644 +--- a/src/hotspot/share/oops/klass.inline.hpp ++++ b/src/hotspot/share/oops/klass.inline.hpp +@@ -52,6 +52,13 @@ inline bool Klass::is_loader_alive() const { + return class_loader_data()->is_alive(); + } + ++#ifdef AARCH64 ++inline void Klass::set_prototype_header(markWord header) { ++ assert(UseCompactObjectHeaders, "only with compact headers"); ++ _prototype_header = header; ++} ++#endif ++ + inline oop Klass::java_mirror() const { + return _java_mirror.resolve(); + } +diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp +index d7dc61d80..002fab662 100644 +--- a/src/hotspot/share/oops/markWord.hpp ++++ b/src/hotspot/share/oops/markWord.hpp +@@ -25,6 +25,7 @@ + #ifndef SHARE_OOPS_MARKWORD_HPP + #define SHARE_OOPS_MARKWORD_HPP + ++#include "gc/shared/gc_globals.hpp" + #include "metaprogramming/primitiveConversions.hpp" + #include "oops/oopsHierarchy.hpp" + #include "runtime/globals.hpp" +@@ -43,6 +44,10 @@ + // -------- + // unused:25 hash:31 -->| unused_gap:1 age:4 unused_gap:1 lock:2 (normal object) + // ++// 64 bits (with compact headers): ++// ------------------------------- ++// nklass:32 hash:25 -->| unused_gap:1 age:4 self-fwded:1 lock:2 (normal object) ++// + // - hash contains the identity hash value: largest value is + // 31 bits, see os::random(). Also, 64-bit vm's require + // a hash value no bigger than 32 bits because they will not +@@ -103,21 +108,43 @@ class markWord { + // Constants + static const int age_bits = 4; + static const int lock_bits = 2; +- static const int first_unused_gap_bits = 1; +- static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - first_unused_gap_bits; ++ static const int self_forwarded_bits = 1; ++ static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - self_forwarded_bits; + static const int hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits; +- static const int second_unused_gap_bits = LP64_ONLY(1) NOT_LP64(0); ++ static const int hash_bits_compact = max_hash_bits > 25 ? 25 : max_hash_bits; ++ // Used only without compact headers. ++ static const int unused_gap_bits = LP64_ONLY(1) NOT_LP64(0); ++#ifdef AARCH64 ++ // Used only with compact headers. ++ static const int klass_bits = 32; ++#endif + + static const int lock_shift = 0; +- static const int age_shift = lock_bits + first_unused_gap_bits; +- static const int hash_shift = age_shift + age_bits + second_unused_gap_bits; ++ static const int self_forwarded_shift = lock_shift + lock_bits; ++ static const int age_shift = self_forwarded_shift + self_forwarded_bits; ++ static const int hash_shift = age_shift + age_bits + unused_gap_bits; ++ static const int hash_shift_compact = age_shift + age_bits; ++#ifdef AARCH64 ++ // Used only with compact headers. ++ static const int klass_shift = hash_shift_compact + hash_bits_compact; ++#endif + + static const uintptr_t lock_mask = right_n_bits(lock_bits); + static const uintptr_t lock_mask_in_place = lock_mask << lock_shift; ++ static const uintptr_t self_forwarded_mask = right_n_bits(self_forwarded_bits); ++ static const uintptr_t self_forwarded_mask_in_place = self_forwarded_mask << self_forwarded_shift; + static const uintptr_t age_mask = right_n_bits(age_bits); + static const uintptr_t age_mask_in_place = age_mask << age_shift; + static const uintptr_t hash_mask = right_n_bits(hash_bits); + static const uintptr_t hash_mask_in_place = hash_mask << hash_shift; ++ static const uintptr_t hash_mask_compact = right_n_bits(hash_bits_compact); ++ static const uintptr_t hash_mask_compact_in_place = hash_mask_compact << hash_shift_compact; ++#ifdef AARCH64 ++ // Used only with compact headers. ++ static const uintptr_t klass_mask = right_n_bits(klass_bits); ++ static const uintptr_t klass_mask_in_place = klass_mask << klass_shift; ++#endif ++ + + static const uintptr_t locked_value = 0; + static const uintptr_t unlocked_value = 1; +@@ -205,9 +232,19 @@ class markWord { + markWord displaced_mark_helper() const; + void set_displaced_mark_helper(markWord m) const; + markWord copy_set_hash(intptr_t hash) const { +- uintptr_t tmp = value() & (~hash_mask_in_place); +- tmp |= ((hash & hash_mask) << hash_shift); +- return markWord(tmp); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ uintptr_t tmp = value() & (~hash_mask_compact_in_place); ++ tmp |= ((hash & hash_mask_compact) << hash_shift_compact); ++ return markWord(tmp); ++ } else { ++#endif ++ uintptr_t tmp = value() & (~hash_mask_in_place); ++ tmp |= ((hash & hash_mask) << hash_shift); ++ return markWord(tmp); ++#ifdef AARCH64 ++ } ++#endif + } + // it is only used to be stored into BasicLock as the + // indicator that the lock is using heavyweight monitor +@@ -240,13 +277,30 @@ class markWord { + + // hash operations + intptr_t hash() const { +- return mask_bits(value() >> hash_shift, hash_mask); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ return mask_bits(value() >> hash_shift_compact, hash_mask_compact); ++ } else { ++#endif ++ return mask_bits(value() >> hash_shift, hash_mask); ++#ifdef AARCH64 ++ } ++#endif + } + + bool has_no_hash() const { + return hash() == no_hash; + } + ++#ifdef AARCH64 ++ inline markWord actual_mark() const; ++ inline Klass* klass() const; ++ inline Klass* klass_or_null() const; ++ inline narrowKlass narrow_klass() const; ++ inline markWord set_narrow_klass(narrowKlass nklass) const; ++ inline markWord set_klass(Klass* klass) const; ++#endif ++ + // Prototype mark for initialization + static markWord prototype() { + return markWord( no_hash_in_place | no_lock_in_place ); +@@ -260,6 +314,19 @@ class markWord { + + // Recover address of oop from encoded form used in mark + inline void* decode_pointer() { return (void*)clear_lock_bits().value(); } ++ ++#ifdef _LP64 ++ inline bool self_forwarded() const { ++ bool self_fwd = mask_bits(value(), self_forwarded_mask_in_place) != 0; ++ assert(!self_fwd || UseAltGCForwarding, "Only set self-fwd bit when using alt GC forwarding"); ++ return self_fwd; ++ } ++ ++ inline markWord set_self_forwarded() const { ++ assert(UseAltGCForwarding, "Only call this with alt GC forwarding"); ++ return markWord(value() | self_forwarded_mask_in_place | marked_value); ++ } ++#endif + }; + + // Support atomic operations. +diff --git a/src/hotspot/share/oops/markWord.inline.hpp b/src/hotspot/share/oops/markWord.inline.hpp +new file mode 100644 +index 000000000..0e874fed8 +--- /dev/null ++++ b/src/hotspot/share/oops/markWord.inline.hpp +@@ -0,0 +1,70 @@ ++/* ++ * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARE_OOPS_MARKWORD_INLINE_HPP ++#define SHARE_OOPS_MARKWORD_INLINE_HPP ++ ++#include "oops/markWord.hpp" ++#include "oops/compressedOops.inline.hpp" ++ ++#ifdef AARCH64 ++markWord markWord::actual_mark() const { ++ assert(UseCompactObjectHeaders, "only safe when using compact headers"); ++ if (has_displaced_mark_helper()) { ++ return displaced_mark_helper(); ++ } else { ++ return *this; ++ } ++} ++ ++Klass* markWord::klass() const { ++ assert(UseCompactObjectHeaders, "only used with compact object headers"); ++ assert(!CompressedKlassPointers::is_null(narrow_klass()), "narrow klass must not be null: " INTPTR_FORMAT, value()); ++ return CompressedKlassPointers::decode_not_null(narrow_klass()); ++} ++ ++Klass* markWord::klass_or_null() const { ++ assert(UseCompactObjectHeaders, "only used with compact object headers"); ++ return CompressedKlassPointers::decode(narrow_klass()); ++} ++ ++narrowKlass markWord::narrow_klass() const { ++ assert(UseCompactObjectHeaders, "only used with compact object headers"); ++ return narrowKlass(value() >> klass_shift); ++} ++ ++markWord markWord::set_narrow_klass(narrowKlass nklass) const { ++ assert(UseCompactObjectHeaders, "only used with compact object headers"); ++ return markWord((value() & ~klass_mask_in_place) | ((uintptr_t) nklass << klass_shift)); ++} ++ ++markWord markWord::set_klass(Klass* klass) const { ++ assert(UseCompactObjectHeaders, "only used with compact object headers"); ++ assert(UseCompressedClassPointers, "expect compressed klass pointers"); ++ narrowKlass nklass = CompressedKlassPointers::encode(const_cast(klass)); ++ return set_narrow_klass(nklass); ++} ++#endif // AARCH64 ++ ++#endif // SHARE_OOPS_MARKWORD_INLINE_HPP +diff --git a/src/hotspot/share/oops/objArrayKlass.cpp b/src/hotspot/share/oops/objArrayKlass.cpp +index 46a7c299f..d34c31972 100644 +--- a/src/hotspot/share/oops/objArrayKlass.cpp ++++ b/src/hotspot/share/oops/objArrayKlass.cpp +@@ -156,7 +156,8 @@ ObjArrayKlass::ObjArrayKlass(int n, Klass* element_klass, Symbol* name) : ArrayK + } + + size_t ObjArrayKlass::oop_size(oop obj) const { +- assert(obj->is_objArray(), "must be object array"); ++ // In this assert, we cannot safely access the Klass* with compact headers. ++ assert(AARCH64_ONLY(UseCompactObjectHeaders ||) obj->is_objArray(), "must be object array"); + return objArrayOop(obj)->object_size(); + } + +diff --git a/src/hotspot/share/oops/objArrayKlass.inline.hpp b/src/hotspot/share/oops/objArrayKlass.inline.hpp +index 6c9165509..ed98d1f7d 100644 +--- a/src/hotspot/share/oops/objArrayKlass.inline.hpp ++++ b/src/hotspot/share/oops/objArrayKlass.inline.hpp +@@ -70,7 +70,8 @@ void ObjArrayKlass::oop_oop_iterate_elements_bounded( + + template + void ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { +- assert (obj->is_array(), "obj must be array"); ++ // In this assert, we cannot safely access the Klass* with compact headers. ++ assert (AARCH64_ONLY(UseCompactObjectHeaders ||) obj->is_array(), "obj must be array"); + objArrayOop a = objArrayOop(obj); + + if (Devirtualizer::do_metadata(closure)) { +diff --git a/src/hotspot/share/oops/objArrayOop.hpp b/src/hotspot/share/oops/objArrayOop.hpp +index de6d4d3d0..0b8bf99f2 100644 +--- a/src/hotspot/share/oops/objArrayOop.hpp ++++ b/src/hotspot/share/oops/objArrayOop.hpp +@@ -51,32 +51,6 @@ class objArrayOopDesc : public arrayOopDesc { + return base_offset_in_bytes() + sizeof(T) * index; + } + +-private: +- // Give size of objArrayOop in HeapWords minus the header +- static int array_size(int length) { +- const uint OopsPerHeapWord = HeapWordSize/heapOopSize; +- assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0), +- "Else the following (new) computation would be in error"); +- uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord; +-#ifdef ASSERT +- // The old code is left in for sanity-checking; it'll +- // go away pretty soon. XXX +- // Without UseCompressedOops, this is simply: +- // oop->length() * HeapWordsPerOop; +- // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer. +- // The oop elements are aligned up to wordSize +- const uint HeapWordsPerOop = heapOopSize/HeapWordSize; +- uint old_res; +- if (HeapWordsPerOop > 0) { +- old_res = length * HeapWordsPerOop; +- } else { +- old_res = align_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord; +- } +- assert(res == old_res, "Inconsistency between old and new."); +-#endif // ASSERT +- return res; +- } +- + public: + // Returns the offset of the first element. + static int base_offset_in_bytes() { +@@ -94,16 +68,15 @@ private: + oop replace_if_null(int index, oop exchange_value); + + // Sizing +- static int header_size() { return arrayOopDesc::header_size(T_OBJECT); } + size_t object_size() { return object_size(length()); } + + static size_t object_size(int length) { + // This returns the object size in HeapWords. +- uint asz = array_size(length); +- uint osz = align_object_size(header_size() + asz); +- assert(osz >= asz, "no overflow"); +- assert((int)osz > 0, "no overflow"); +- return (size_t)osz; ++ size_t asz = (size_t)length * heapOopSize; ++ size_t size_words = heap_word_size(base_offset_in_bytes() + asz); ++ size_t osz = align_object_size(size_words); ++ assert(osz < max_jint, "no overflow"); ++ return osz; + } + + Klass* element_klass(); +diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp +index 68dab1fee..b89990878 100644 +--- a/src/hotspot/share/oops/oop.cpp ++++ b/src/hotspot/share/oops/oop.cpp +@@ -156,7 +156,8 @@ bool oopDesc::is_typeArray_noinline() const { return is_typeArray(); } + + bool oopDesc::has_klass_gap() { + // Only has a klass gap when compressed class pointers are used. +- return UseCompressedClassPointers; ++ // Except when using compact headers. ++ return UseCompressedClassPointers AARCH64_ONLY(&& !UseCompactObjectHeaders); + } + + #if INCLUDE_CDS_JAVA_HEAP +@@ -168,6 +169,11 @@ void oopDesc::set_narrow_klass(narrowKlass nk) { + #endif + + void* oopDesc::load_klass_raw(oop obj) { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ return obj->klass(); ++ } else ++#endif + if (UseCompressedClassPointers) { + narrowKlass narrow_klass = obj->_metadata._compressed_klass; + if (narrow_klass == 0) return nullptr; +diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp +index f71c09055..19482330f 100644 +--- a/src/hotspot/share/oops/oop.hpp ++++ b/src/hotspot/share/oops/oop.hpp +@@ -79,6 +79,11 @@ class oopDesc { + inline markWord cas_set_mark(markWord new_mark, markWord old_mark); + inline markWord cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order); + ++ inline markWord resolve_mark() const; ++ ++ // Returns the prototype mark that should be used for this object. ++ inline markWord prototype_mark() const; ++ + // Used only to re-initialize the mark word (e.g., of promoted + // objects during a GC) -- requires a valid klass pointer + inline void init_mark(); +@@ -97,7 +102,17 @@ class oopDesc { + static inline void set_klass_gap(HeapWord* mem, int z); + + // size of object header, aligned to platform wordSize +- static constexpr int header_size() { return sizeof(oopDesc)/HeapWordSize; } ++ static int header_size() { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ return sizeof(markWord) / HeapWordSize; ++ } else { ++#endif ++ return sizeof(oopDesc)/HeapWordSize; ++#ifdef AARCH64 ++ } ++#endif ++ } + + // Returns whether this is an instance of k or an instance of a subclass of k + inline bool is_a(Klass* k) const; +@@ -109,6 +124,20 @@ class oopDesc { + // to be able to figure out the size of an object knowing its klass. + inline size_t size_given_klass(Klass* klass); + ++ // The following set of methods is used to access the mark-word and related ++ // properties when the object may be forwarded. Be careful where and when ++ // using this method. It assumes that the forwardee is installed in ++ // the header as a plain pointer (or self-forwarded). In particular, ++ // those methods can not deal with the sliding-forwarding that is used ++ // in Serial, G1 and Shenandoah full-GCs. ++private: ++ inline Klass* forward_safe_klass_impl(markWord m) const; ++public: ++ inline Klass* forward_safe_klass() const; ++ inline Klass* forward_safe_klass(markWord m) const; ++ inline size_t forward_safe_size(); ++ inline void forward_safe_init_mark(); ++ + // type test operations (inlined in oop.inline.hpp) + inline bool is_instance() const; + inline bool is_instanceRef() const; +@@ -261,14 +290,17 @@ class oopDesc { + inline bool is_forwarded() const; + + inline void forward_to(oop p); ++ inline void forward_to_self(); + + // Like "forward_to", but inserts the forwarding pointer atomically. + // Exactly one thread succeeds in inserting the forwarding pointer, and + // this call returns null for that thread; any other thread has the + // value of the forwarding pointer returned and does not modify "this". + inline oop forward_to_atomic(oop p, markWord compare, atomic_memory_order order = memory_order_conservative); ++ inline oop forward_to_self_atomic(markWord compare, atomic_memory_order order = memory_order_conservative); + + inline oop forwardee() const; ++ inline oop forwardee(markWord header) const; + + // Age of object during scavenge + inline uint age() const; +@@ -312,12 +344,44 @@ class oopDesc { + + // for code generation + static int mark_offset_in_bytes() { return (int)offset_of(oopDesc, _mark); } +- static int klass_offset_in_bytes() { return (int)offset_of(oopDesc, _metadata._klass); } ++ static int klass_offset_in_bytes() { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ STATIC_ASSERT(markWord::klass_shift % 8 == 0); ++ return mark_offset_in_bytes() + markWord::klass_shift / 8; ++ } else ++#endif ++ { ++ return (int)offset_of(oopDesc, _metadata._klass); ++ } ++ } + static int klass_gap_offset_in_bytes() { + assert(has_klass_gap(), "only applicable to compressed klass pointers"); ++#ifdef AARCH64 ++ assert(!UseCompactObjectHeaders, "don't use klass_offset_in_bytes() with compact headers"); ++#endif + return klass_offset_in_bytes() + sizeof(narrowKlass); + } + ++ static int base_offset_in_bytes() { ++#ifdef _LP64 ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ // With compact headers, the Klass* field is not used for the Klass* ++ // and is used for the object fields instead. ++ STATIC_ASSERT(sizeof(markWord) == 8); ++ return sizeof(markWord); ++ } else ++#endif // AARCH64 ++ if (UseCompressedClassPointers) { ++ return sizeof(markWord) + sizeof(narrowKlass); ++ } else ++#endif // _LP64 ++ { ++ return sizeof(oopDesc); ++ } ++ } ++ + // for error reporting + static void* load_klass_raw(oop obj); + static void* load_oop_raw(oop obj, int offset); +diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp +index 1a3ae0067..4ad69cdd4 100644 +--- a/src/hotspot/share/oops/oop.inline.hpp ++++ b/src/hotspot/share/oops/oop.inline.hpp +@@ -34,10 +34,11 @@ + #include "oops/arrayOop.hpp" + #include "oops/compressedOops.inline.hpp" + #include "oops/instanceKlass.hpp" +-#include "oops/markWord.hpp" ++#include "oops/markWord.inline.hpp" + #include "oops/oopsHierarchy.hpp" + #include "runtime/atomic.hpp" + #include "runtime/globals.hpp" ++#include "runtime/safepoint.hpp" + #include "utilities/align.hpp" + #include "utilities/debug.hpp" + #include "utilities/macros.hpp" +@@ -66,14 +67,14 @@ void oopDesc::set_mark(HeapWord* mem, markWord m) { + *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m; + } + +-void oopDesc::release_set_mark(HeapWord* mem, markWord m) { +- Atomic::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m); +-} +- + void oopDesc::release_set_mark(markWord m) { + Atomic::release_store(&_mark, m); + } + ++void oopDesc::release_set_mark(HeapWord* mem, markWord m) { ++ Atomic::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m); ++} ++ + markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) { + return Atomic::cmpxchg(&_mark, old_mark, new_mark); + } +@@ -82,36 +83,99 @@ markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memo + return Atomic::cmpxchg(&_mark, old_mark, new_mark, order); + } + ++markWord oopDesc::resolve_mark() const { ++ assert(LockingMode != LM_LEGACY, "Not safe with legacy stack-locking"); ++ markWord m = mark(); ++ if (m.has_displaced_mark_helper()) { ++ m = m.displaced_mark_helper(); ++ } ++ return m; ++} ++ ++markWord oopDesc::prototype_mark() const { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ return klass()->prototype_header(); ++ } else { ++#endif ++ return markWord::prototype(); ++#ifdef AARCH64 ++ } ++#endif ++} ++ + void oopDesc::init_mark() { +- set_mark(markWord::prototype()); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ set_mark(klass()->prototype_header()); ++ } else ++#endif ++ { ++ set_mark(markWord::prototype()); ++ } + } + + Klass* oopDesc::klass() const { ++#ifdef _LP64 ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ markWord m = resolve_mark(); ++ return m.klass(); ++ } else ++#endif // AARCH64 + if (UseCompressedClassPointers) { + return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass); +- } else { ++ } else ++#endif // _LP64 ++ { + return _metadata._klass; + } + } + + Klass* oopDesc::klass_or_null() const { ++#ifdef _LP64 ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ markWord m = resolve_mark(); ++ return m.klass_or_null(); ++ } else ++#endif // AARCH64 + if (UseCompressedClassPointers) { + return CompressedKlassPointers::decode(_metadata._compressed_klass); +- } else { ++ } else ++#endif // _LP64 ++ { + return _metadata._klass; + } + } + + Klass* oopDesc::klass_or_null_acquire() const { ++#ifdef _LP64 ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ markWord m = mark_acquire(); ++ if (m.has_displaced_mark_helper()) { ++ m = m.displaced_mark_helper(); ++ } ++ return m.klass_or_null(); ++ } else ++#endif // AARCH64 + if (UseCompressedClassPointers) { +- narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass); +- return CompressedKlassPointers::decode(nklass); +- } else { ++ narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass); ++ return CompressedKlassPointers::decode(nklass); ++ } else ++#endif // _LP64 ++ { + return Atomic::load_acquire(&_metadata._klass); + } + } + + Klass* oopDesc::klass_raw() const { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ return klass(); ++ } else ++#endif + if (UseCompressedClassPointers) { + return CompressedKlassPointers::decode_raw(_metadata._compressed_klass); + } else { +@@ -121,6 +185,9 @@ Klass* oopDesc::klass_raw() const { + + void oopDesc::set_klass(Klass* k) { + assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); ++#ifdef AARCH64 ++ assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers"); ++#endif + if (UseCompressedClassPointers) { + _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k); + } else { +@@ -130,6 +197,9 @@ void oopDesc::set_klass(Klass* k) { + + void oopDesc::release_set_klass(HeapWord* mem, Klass* k) { + assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); ++#ifdef AARCH64 ++ assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers"); ++#endif + char* raw_mem = ((char*)mem + klass_offset_in_bytes()); + if (UseCompressedClassPointers) { + Atomic::release_store((narrowKlass*)raw_mem, +@@ -140,6 +210,9 @@ void oopDesc::release_set_klass(HeapWord* mem, Klass* k) { + } + + void oopDesc::set_klass_gap(HeapWord* mem, int v) { ++#ifdef AARCH64 ++ assert(!UseCompactObjectHeaders, "don't set Klass* gap with compact headers"); ++#endif + if (UseCompressedClassPointers) { + *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v; + } +@@ -202,6 +275,56 @@ size_t oopDesc::size_given_klass(Klass* klass) { + return s; + } + ++#ifdef AARCH64 ++Klass* oopDesc::forward_safe_klass_impl(markWord m) const { ++ assert(UseCompactObjectHeaders, "Only get here with compact headers"); ++ if (m.is_marked()) { ++ oop fwd = forwardee(m); ++ markWord m2 = fwd->mark(); ++ assert(!m2.is_marked() || m2.self_forwarded(), "no double forwarding: this: " PTR_FORMAT " (" INTPTR_FORMAT "), fwd: " PTR_FORMAT " (" INTPTR_FORMAT ")", p2i(this), m.value(), p2i(fwd), m2.value()); ++ m = m2; ++ } ++ return m.actual_mark().klass(); ++} ++#endif ++ ++Klass* oopDesc::forward_safe_klass(markWord m) const { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ return forward_safe_klass_impl(m); ++ } else ++#endif ++ { ++ return klass(); ++ } ++} ++ ++Klass* oopDesc::forward_safe_klass() const { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ return forward_safe_klass_impl(mark()); ++ } else ++#endif ++ { ++ return klass(); ++ } ++} ++ ++size_t oopDesc::forward_safe_size() { ++ return size_given_klass(forward_safe_klass()); ++} ++ ++void oopDesc::forward_safe_init_mark() { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ set_mark(forward_safe_klass()->prototype_header()); ++ } else ++#endif ++ { ++ set_mark(markWord::prototype()); ++ } ++} ++ + bool oopDesc::is_instance() const { return klass()->is_instance_klass(); } + bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass(); } + bool oopDesc::is_stackChunk() const { return klass()->is_stack_chunk_instance_klass(); } +@@ -272,19 +395,79 @@ bool oopDesc::is_forwarded() const { + + // Used by scavengers + void oopDesc::forward_to(oop p) { ++ assert(p != cast_to_oop(this) || !UseAltGCForwarding, "Must not be called with self-forwarding"); + markWord m = markWord::encode_pointer_as_mark(p); +- assert(m.decode_pointer() == p, "encoding must be reversible"); ++ assert(forwardee(m) == p, "encoding must be reversible"); + set_mark(m); + } + ++void oopDesc::forward_to_self() { ++#ifdef _LP64 ++ if (UseAltGCForwarding) { ++ markWord m = mark(); ++ // If mark is displaced, we need to preserve the real header during GC. ++ // It will be restored to the displaced header after GC. ++ assert(SafepointSynchronize::is_at_safepoint(), "we can only safely fetch the displaced header at safepoint"); ++ if (m.has_displaced_mark_helper()) { ++ m = m.displaced_mark_helper(); ++ } ++ m = m.set_self_forwarded(); ++ assert(forwardee(m) == cast_to_oop(this), "encoding must be reversible"); ++ set_mark(m); ++ } else ++#endif ++ { ++ forward_to(oop(this)); ++ } ++} ++ + oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) { ++ assert(p != cast_to_oop(this) || !UseAltGCForwarding, "Must not be called with self-forwarding"); + markWord m = markWord::encode_pointer_as_mark(p); + assert(m.decode_pointer() == p, "encoding must be reversible"); + markWord old_mark = cas_set_mark(m, compare, order); + if (old_mark == compare) { + return nullptr; + } else { +- return cast_to_oop(old_mark.decode_pointer()); ++ return forwardee(old_mark); ++ } ++} ++ ++oop oopDesc::forward_to_self_atomic(markWord compare, atomic_memory_order order) { ++#ifdef _LP64 ++ if (UseAltGCForwarding) { ++ markWord m = compare; ++ // If mark is displaced, we need to preserve the real header during GC. ++ // It will be restored to the displaced header after GC. ++ assert(SafepointSynchronize::is_at_safepoint(), "we can only safely fetch the displaced header at safepoint"); ++ if (m.has_displaced_mark_helper()) { ++ m = m.displaced_mark_helper(); ++ } ++ m = m.set_self_forwarded(); ++ assert(forwardee(m) == cast_to_oop(this), "encoding must be reversible"); ++ markWord old_mark = cas_set_mark(m, compare, order); ++ if (old_mark == compare) { ++ return nullptr; ++ } else { ++ assert(old_mark.is_marked(), "must be marked here"); ++ return forwardee(old_mark); ++ } ++ } else ++#endif ++ { ++ return forward_to_atomic(cast_to_oop(this), compare, order); ++ } ++} ++ ++oop oopDesc::forwardee(markWord header) const { ++ assert(header.is_marked(), "only decode when actually forwarded"); ++#ifdef _LP64 ++ if (header.self_forwarded()) { ++ return cast_to_oop(this); ++ } else ++#endif ++ { ++ return cast_to_oop(header.decode_pointer()); + } + } + +@@ -292,8 +475,7 @@ oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order orde + // The forwardee is used when copying during scavenge and mark-sweep. + // It does need to clear the low two locking- and GC-related bits. + oop oopDesc::forwardee() const { +- assert(is_forwarded(), "only decode when actually forwarded"); +- return cast_to_oop(mark().decode_pointer()); ++ return forwardee(mark()); + } + + // The following method needs to be MT safe. +@@ -348,7 +530,8 @@ void oopDesc::oop_iterate_backwards(OopClosureType* cl) { + + template + void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) { +- assert(k == klass(), "wrong klass"); ++ // In this assert, we cannot safely access the Klass* with compact headers. ++ assert(AARCH64_ONLY(UseCompactObjectHeaders ||) k == klass(), "wrong klass"); + OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k); + } + +diff --git a/src/hotspot/share/oops/typeArrayKlass.cpp b/src/hotspot/share/oops/typeArrayKlass.cpp +index 899531d33..6e040d7d4 100644 +--- a/src/hotspot/share/oops/typeArrayKlass.cpp ++++ b/src/hotspot/share/oops/typeArrayKlass.cpp +@@ -228,7 +228,8 @@ Klass* TypeArrayKlass::array_klass_or_null() { + } + + size_t TypeArrayKlass::oop_size(oop obj) const { +- assert(obj->is_typeArray(),"must be a type array"); ++ // In this assert, we cannot safely access the Klass* with compact headers. ++ assert(AARCH64_ONLY(UseCompactObjectHeaders ||) obj->is_typeArray(),"must be a type array"); + typeArrayOop t = typeArrayOop(obj); + return t->object_size(this); + } +diff --git a/src/hotspot/share/oops/typeArrayKlass.inline.hpp b/src/hotspot/share/oops/typeArrayKlass.inline.hpp +index 098f9e739..06839077e 100644 +--- a/src/hotspot/share/oops/typeArrayKlass.inline.hpp ++++ b/src/hotspot/share/oops/typeArrayKlass.inline.hpp +@@ -35,7 +35,8 @@ + class OopIterateClosure; + + inline void TypeArrayKlass::oop_oop_iterate_impl(oop obj, OopIterateClosure* closure) { +- assert(obj->is_typeArray(),"must be a type array"); ++ // In this assert, we cannot safely access the Klass* with compact headers. ++ assert(AARCH64_ONLY(UseCompactObjectHeaders ||) obj->is_typeArray(),"must be a type array"); + // Performance tweak: We skip processing the klass pointer since all + // TypeArrayKlasses are guaranteed processed via the null class loader. + } +diff --git a/src/hotspot/share/opto/c2_CodeStubs.hpp b/src/hotspot/share/opto/c2_CodeStubs.hpp +index 3df1e4b72..fe9ebe862 100644 +--- a/src/hotspot/share/opto/c2_CodeStubs.hpp ++++ b/src/hotspot/share/opto/c2_CodeStubs.hpp +@@ -110,7 +110,17 @@ public: + int max_size() const; + void emit(C2_MacroAssembler& masm); + }; +-#endif ++ ++class C2LoadNKlassStub : public C2CodeStub { ++private: ++ Register _dst; ++public: ++ C2LoadNKlassStub(Register dst) : C2CodeStub(), _dst(dst) {} ++ Register dst() { return _dst; } ++ int max_size() const; ++ void emit(C2_MacroAssembler& masm); ++}; ++#endif // _LP64 + + //-----------------------------C2GeneralStub----------------------------------- + // A generalized stub that can be used to implement an arbitrary stub in a +diff --git a/src/hotspot/share/opto/callnode.cpp b/src/hotspot/share/opto/callnode.cpp +index a53412ed2..8eafc92d8 100644 +--- a/src/hotspot/share/opto/callnode.cpp ++++ b/src/hotspot/share/opto/callnode.cpp +@@ -1575,8 +1575,17 @@ void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) + } + Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) { + Node* mark_node = nullptr; +- // For now only enable fast locking for non-array types +- mark_node = phase->MakeConX(markWord::prototype().value()); ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ Node* klass_node = in(AllocateNode::KlassNode); ++ Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); ++ mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); ++ } else ++#endif ++ { ++ // For now only enable fast locking for non-array types ++ mark_node = phase->MakeConX(markWord::prototype().value()); ++ } + return mark_node; + } + +diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp +index cbe7b929c..a3300c16c 100644 +--- a/src/hotspot/share/opto/compile.cpp ++++ b/src/hotspot/share/opto/compile.cpp +@@ -1696,6 +1696,12 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr + } + } + if (flat->isa_klassptr()) { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ if (flat->offset() == in_bytes(Klass::prototype_header_offset())) ++ alias_type(idx)->set_rewritable(false); ++ } ++#endif + if (flat->offset() == in_bytes(Klass::super_check_offset_offset())) + alias_type(idx)->set_rewritable(false); + if (flat->offset() == in_bytes(Klass::modifier_flags_offset())) +diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp +index 7ca86d094..d4edd74e7 100644 +--- a/src/hotspot/share/opto/library_call.cpp ++++ b/src/hotspot/share/opto/library_call.cpp +@@ -4543,8 +4543,8 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) { + // We depend on hash_mask being at most 32 bits and avoid the use of + // hash_mask_in_place because it could be larger than 32 bits in a 64-bit + // vm: see markWord.hpp. +- Node *hash_mask = _gvn.intcon(markWord::hash_mask); +- Node *hash_shift = _gvn.intcon(markWord::hash_shift); ++ Node *hash_mask = _gvn.intcon(AARCH64_ONLY(UseCompactObjectHeaders ? markWord::hash_mask_compact :) markWord::hash_mask); ++ Node *hash_shift = _gvn.intcon(AARCH64_ONLY(UseCompactObjectHeaders ? markWord::hash_shift_compact :) markWord::hash_shift); + Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift)); + // This hack lets the hash bits live anywhere in the mark object now, as long + // as the shift drops the relevant bits into the low 32 bits. Note that +diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp +index f6d9fcc6b..1ffeb4bdc 100644 +--- a/src/hotspot/share/opto/macro.cpp ++++ b/src/hotspot/share/opto/macro.cpp +@@ -1661,7 +1661,9 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc, + } + rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type()); + +- rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); ++ if (AARCH64_ONLY(!UseCompactObjectHeaders) NOT_AARCH64(true)) { ++ rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); ++ } + int header_size = alloc->minimum_header_size(); // conservatively small + + // Array length +diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp +index def2dbb71..637d3d2c2 100644 +--- a/src/hotspot/share/opto/memnode.cpp ++++ b/src/hotspot/share/opto/memnode.cpp +@@ -1885,6 +1885,15 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { + const Type* + LoadNode::load_array_final_field(const TypeKlassPtr *tkls, + ciKlass* klass) const { ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) { ++ // The field is Klass::_prototype_header. Return its (constant) value. ++ assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header"); ++ return TypeX::make(klass->prototype_header()); ++ } ++ } ++#endif + if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) { + // The field is Klass::_modifier_flags. Return its (constant) value. + // (Folds up the 2nd indirection in aClassConstant.getModifiers().) +@@ -2057,6 +2066,15 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { + assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); + return TypeInt::make(klass->super_check_offset()); + } ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) { ++ // The field is Klass::_prototype_header. Return its (constant) value. ++ assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header"); ++ return TypeX::make(klass->prototype_header()); ++ } ++ } ++#endif + // Compute index into primary_supers array + juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); + // Check for overflowing; use unsigned compare to handle the negative case. +@@ -2147,7 +2165,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { + } + + Node* alloc = is_new_object_mark_load(phase); +- if (alloc != nullptr) { ++ if (AARCH64_ONLY(!UseCompactObjectHeaders &&) alloc != nullptr) { + return TypeX::make(markWord::prototype().value()); + } + +diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp +index bb79da326..f4b7bc4fd 100644 +--- a/src/hotspot/share/opto/runtime.cpp ++++ b/src/hotspot/share/opto/runtime.cpp +@@ -320,14 +320,17 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len + // Zero array here if the caller is deoptimized. + const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result); + BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); +- const size_t hs = arrayOopDesc::header_size(elem_type); +- // Align to next 8 bytes to avoid trashing arrays's length. +- const size_t aligned_hs = align_object_offset(hs); ++ size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type); ++ assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned"); + HeapWord* obj = cast_from_oop(result); +- if (aligned_hs > hs) { +- Copy::zero_to_words(obj+hs, aligned_hs-hs); ++ if (!is_aligned(hs_bytes, BytesPerLong)) { ++ *reinterpret_cast(reinterpret_cast(obj) + hs_bytes) = 0; ++ hs_bytes += BytesPerInt; + } ++ + // Optimized zeroing. ++ assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned"); ++ const size_t aligned_hs = hs_bytes / BytesPerLong; + Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); + } + +diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp +index 1dddea3ef..28ce28e28 100644 +--- a/src/hotspot/share/opto/type.cpp ++++ b/src/hotspot/share/opto/type.cpp +@@ -5181,12 +5181,12 @@ void TypeAryPtr::dump2( Dict &d, uint depth, outputStream *st ) const { + } + + if( _offset != 0 ) { +- int header_size = objArrayOopDesc::header_size() * wordSize; ++ BasicType basic_elem_type = elem()->basic_type(); ++ int header_size = arrayOopDesc::base_offset_in_bytes(basic_elem_type); + if( _offset == OffsetTop ) st->print("+undefined"); + else if( _offset == OffsetBot ) st->print("+any"); + else if( _offset < header_size ) st->print("+%d", _offset); + else { +- BasicType basic_elem_type = elem()->basic_type(); + if (basic_elem_type == T_ILLEGAL) { + st->print("+any"); + } else { +diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp +index 4eb1c96a6..174b9f6e6 100644 +--- a/src/hotspot/share/prims/unsafe.cpp ++++ b/src/hotspot/share/prims/unsafe.cpp +@@ -68,7 +68,7 @@ + + + #define MAX_OBJECT_SIZE \ +- ( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \ ++ ( arrayOopDesc::base_offset_in_bytes(T_DOUBLE) \ + + ((julong)max_jint * sizeof(double)) ) + + +diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp +index 50410d1fa..c066746c0 100644 +--- a/src/hotspot/share/prims/whitebox.cpp ++++ b/src/hotspot/share/prims/whitebox.cpp +@@ -82,6 +82,7 @@ + #include "runtime/javaCalls.hpp" + #include "runtime/javaThread.inline.hpp" + #include "runtime/jniHandles.inline.hpp" ++#include "runtime/lockStack.hpp" + #include "runtime/os.hpp" + #include "runtime/stackFrameStream.inline.hpp" + #include "runtime/synchronizer.hpp" +@@ -1856,6 +1857,14 @@ WB_ENTRY(jboolean, WB_IsUbsanEnabled(JNIEnv* env)) + return (jboolean) WhiteBox::is_ubsan_enabled(); + WB_END + ++WB_ENTRY(jint, WB_getLockStackCapacity(JNIEnv* env)) ++ return (jint) LockStack::CAPACITY; ++WB_END ++ ++WB_ENTRY(jboolean, WB_supportsRecursiveLightweightLocking(JNIEnv* env)) ++ return (jboolean) VM_Version::supports_recursive_lightweight_locking(); ++WB_END ++ + WB_ENTRY(jboolean, WB_DeflateIdleMonitors(JNIEnv* env, jobject wb)) + log_info(monitorinflation)("WhiteBox initiated DeflateIdleMonitors"); + return ObjectSynchronizer::request_deflate_idle_monitors_from_wb(); +@@ -2784,6 +2793,8 @@ static JNINativeMethod methods[] = { + {CC"isMonitorInflated0", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated }, + {CC"isAsanEnabled", CC"()Z", (void*)&WB_IsAsanEnabled }, + {CC"isUbsanEnabled", CC"()Z", (void*)&WB_IsUbsanEnabled }, ++ {CC"getLockStackCapacity", CC"()I", (void*)&WB_getLockStackCapacity }, ++ {CC"supportsRecursiveLightweightLocking", CC"()Z", (void*)&WB_supportsRecursiveLightweightLocking }, + {CC"forceSafepoint", CC"()V", (void*)&WB_ForceSafepoint }, + {CC"forceClassLoaderStatsSafepoint", CC"()V", (void*)&WB_ForceClassLoaderStatsSafepoint }, + {CC"getConstantPool0", CC"(Ljava/lang/Class;)J", (void*)&WB_GetConstantPool }, +diff --git a/src/hotspot/share/runtime/abstract_vm_version.hpp b/src/hotspot/share/runtime/abstract_vm_version.hpp +index 4bf0741a2..fb5db3f47 100644 +--- a/src/hotspot/share/runtime/abstract_vm_version.hpp ++++ b/src/hotspot/share/runtime/abstract_vm_version.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -183,6 +183,9 @@ class Abstract_VM_Version: AllStatic { + // Does platform support secondary supers table lookup? + constexpr static bool supports_secondary_supers_table() { return false; } + ++ // Is recursive lightweight locking implemented for this platform? ++ constexpr static bool supports_recursive_lightweight_locking() { return false; } ++ + // Does platform support float16 instructions? + static bool supports_float16() { return false; } + +diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp +index 0056821fe..130730b22 100644 +--- a/src/hotspot/share/runtime/arguments.cpp ++++ b/src/hotspot/share/runtime/arguments.cpp +@@ -3108,6 +3108,28 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) { + UNSUPPORTED_OPTION(ShowRegistersOnAssert); + #endif // CAN_SHOW_REGISTERS_ON_ASSERT + ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders && UseZGC && !ZGenerational) { ++ if (FLAG_IS_CMDLINE(UseCompactObjectHeaders)) { ++ warning("Single-generational ZGC does not work with compact object headers, disabling UseCompactObjectHeaders"); ++ } ++ FLAG_SET_DEFAULT(UseCompactObjectHeaders, false); ++ } ++ if (UseCompactObjectHeaders && FLAG_IS_CMDLINE(UseCompressedClassPointers) && !UseCompressedClassPointers) { ++ warning("Compact object headers require compressed class pointers. Disabling compact object headers."); ++ FLAG_SET_DEFAULT(UseCompactObjectHeaders, false); ++ } ++ if (UseCompactObjectHeaders && LockingMode == LM_LEGACY) { ++ FLAG_SET_DEFAULT(LockingMode, LM_LIGHTWEIGHT); ++ } ++ if (UseCompactObjectHeaders && !UseAltGCForwarding) { ++ FLAG_SET_DEFAULT(UseAltGCForwarding, true); ++ } ++ if (UseCompactObjectHeaders && !UseCompressedClassPointers) { ++ FLAG_SET_DEFAULT(UseCompressedClassPointers, true); ++ } ++#endif ++ + return JNI_OK; + } + +@@ -3398,13 +3420,22 @@ char* Arguments::get_default_shared_archive_path() { + os::jvm_path(jvm_path, sizeof(jvm_path)); + char *end = strrchr(jvm_path, *os::file_separator()); + if (end != nullptr) *end = '\0'; +- size_t jvm_path_len = strlen(jvm_path); +- size_t file_sep_len = strlen(os::file_separator()); +- const size_t len = jvm_path_len + file_sep_len + 20; +- _default_shared_archive_path = NEW_C_HEAP_ARRAY(char, len, mtArguments); +- jio_snprintf(_default_shared_archive_path, len, +- LP64_ONLY(!UseCompressedOops ? "%s%sclasses_nocoops.jsa":) "%s%sclasses.jsa", +- jvm_path, os::file_separator()); ++ stringStream tmp; ++ tmp.print("%s%sclasses", jvm_path, os::file_separator()); ++#ifdef _LP64 ++ if (!UseCompressedOops) { ++ tmp.print_raw("_nocoops"); ++ } ++#ifdef AARCH64 ++ if (UseCompactObjectHeaders) { ++ // Note that generation of xxx_coh.jsa variants require ++ // --enable-cds-archive-coh at build time ++ tmp.print_raw("_coh"); ++ } ++#endif // AARCH64 ++#endif // _LP64 ++ tmp.print_raw(".jsa"); ++ _default_shared_archive_path = os::strdup(tmp.base()); + } + return _default_shared_archive_path; + } +diff --git a/src/hotspot/share/runtime/basicLock.cpp b/src/hotspot/share/runtime/basicLock.cpp +index 40ba712bf..b6cb4b075 100644 +--- a/src/hotspot/share/runtime/basicLock.cpp ++++ b/src/hotspot/share/runtime/basicLock.cpp +@@ -66,19 +66,26 @@ void BasicLock::move_to(oop obj, BasicLock* dest) { + // is small (given the support for inflated fast-path locking in the fast_lock, etc) + // we'll leave that optimization for another time. + +- if (displaced_header().is_neutral()) { +- // The object is locked and the resulting ObjectMonitor* will also be +- // locked so it can't be async deflated until ownership is dropped. +- ObjectSynchronizer::inflate_helper(obj); +- // WARNING: We cannot put a check here, because the inflation +- // will not update the displaced header. Once BasicLock is inflated, +- // no one should ever look at its content. +- } else { +- // Typically the displaced header will be 0 (recursive stack lock) or +- // unused_mark. Naively we'd like to assert that the displaced mark +- // value is either 0, neutral, or 3. But with the advent of the +- // store-before-CAS avoidance in fast_lock/compiler_lock_object +- // we can find any flavor mark in the displaced mark. ++ if (LockingMode == LM_LEGACY) { ++ if (displaced_header().is_neutral()) { ++ // The object is locked and the resulting ObjectMonitor* will also be ++ // locked so it can't be async deflated until ownership is dropped. ++ ObjectSynchronizer::inflate_helper(obj); ++ // WARNING: We cannot put a check here, because the inflation ++ // will not update the displaced header. Once BasicLock is inflated, ++ // no one should ever look at its content. ++ } else { ++ // Typically the displaced header will be 0 (recursive stack lock) or ++ // unused_mark. Naively we'd like to assert that the displaced mark ++ // value is either 0, neutral, or 3. But with the advent of the ++ // store-before-CAS avoidance in fast_lock/compiler_lock_object ++ // we can find any flavor mark in the displaced mark. ++ } ++ dest->set_displaced_header(displaced_header()); + } +- dest->set_displaced_header(displaced_header()); ++#ifdef ASSERT ++ else { ++ dest->set_displaced_header(markWord(badDispHeaderDeopt)); ++ } ++#endif + } +diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp +index 2058da0ff..4f82836a4 100644 +--- a/src/hotspot/share/runtime/deoptimization.cpp ++++ b/src/hotspot/share/runtime/deoptimization.cpp +@@ -1665,13 +1665,13 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArrayowner()->is_locked(), "object must be locked now"); +- ObjectMonitor* mon = ObjectSynchronizer::inflate(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal); ++ ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal); + assert(mon->owner() == deoptee_thread, "must be"); + } else { + BasicLock* lock = mon_info->lock(); +- ObjectSynchronizer::enter(obj, lock, deoptee_thread); ++ ObjectSynchronizer::enter_for(obj, lock, deoptee_thread); + assert(mon_info->owner()->is_locked(), "object must be locked now"); + } + } +diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp +index 55e664a18..e1662bb5c 100644 +--- a/src/hotspot/share/runtime/globals.hpp ++++ b/src/hotspot/share/runtime/globals.hpp +@@ -147,6 +147,7 @@ const size_t minimumSymbolTableSize = 1024; + constraint) + const bool UseCompressedOops = false; + const bool UseCompressedClassPointers = false; ++const bool UseCompactObjectHeaders = false; + const int ObjectAlignmentInBytes = 8; + + #endif // _LP64 +diff --git a/src/hotspot/share/runtime/lockStack.cpp b/src/hotspot/share/runtime/lockStack.cpp +index b4a3bf1e8..d7dcbdda7 100644 +--- a/src/hotspot/share/runtime/lockStack.cpp ++++ b/src/hotspot/share/runtime/lockStack.cpp +@@ -1,6 +1,7 @@ + /* + * Copyright (c) 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. ++ * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -25,20 +26,30 @@ + + #include "precompiled.hpp" + #include "memory/allocation.hpp" ++#include "runtime/globals.hpp" + #include "runtime/lockStack.inline.hpp" + #include "runtime/safepoint.hpp" + #include "runtime/stackWatermark.hpp" + #include "runtime/stackWatermarkSet.inline.hpp" + #include "runtime/thread.hpp" + #include "utilities/copy.hpp" ++#include "utilities/debug.hpp" ++#include "utilities/globalDefinitions.hpp" + #include "utilities/ostream.hpp" + ++#include ++ + const int LockStack::lock_stack_offset = in_bytes(JavaThread::lock_stack_offset()); + const int LockStack::lock_stack_top_offset = in_bytes(JavaThread::lock_stack_top_offset()); + const int LockStack::lock_stack_base_offset = in_bytes(JavaThread::lock_stack_base_offset()); + + LockStack::LockStack(JavaThread* jt) : + _top(lock_stack_base_offset), _base() { ++ // Make sure the layout of the object is compatible with the emitted code's assumptions. ++ STATIC_ASSERT(sizeof(_bad_oop_sentinel) == oopSize); ++ STATIC_ASSERT(sizeof(_base[0]) == oopSize); ++ STATIC_ASSERT(std::is_standard_layout::value); ++ STATIC_ASSERT(offsetof(LockStack, _bad_oop_sentinel) == offsetof(LockStack, _base) - oopSize); + #ifdef ASSERT + for (int i = 0; i < CAPACITY; i++) { + _base[i] = nullptr; +@@ -62,11 +73,21 @@ uint32_t LockStack::end_offset() { + void LockStack::verify(const char* msg) const { + assert(LockingMode == LM_LIGHTWEIGHT, "never use lock-stack when light weight locking is disabled"); + assert((_top <= end_offset()), "lockstack overflow: _top %d end_offset %d", _top, end_offset()); +- assert((_top >= start_offset()), "lockstack underflow: _top %d end_offset %d", _top, start_offset()); ++ assert((_top >= start_offset()), "lockstack underflow: _top %d start_offset %d", _top, start_offset()); + if (SafepointSynchronize::is_at_safepoint() || (Thread::current()->is_Java_thread() && is_owning_thread())) { + int top = to_index(_top); + for (int i = 0; i < top; i++) { + assert(_base[i] != nullptr, "no zapped before top"); ++ if (VM_Version::supports_recursive_lightweight_locking()) { ++ oop o = _base[i]; ++ for (; i < top - 1; i++) { ++ // Consecutive entries may be the same ++ if (_base[i + 1] != o) { ++ break; ++ } ++ } ++ } ++ + for (int j = i + 1; j < top; j++) { + assert(_base[i] != _base[j], "entries must be unique: %s", msg); + } +diff --git a/src/hotspot/share/runtime/lockStack.hpp b/src/hotspot/share/runtime/lockStack.hpp +index 25ab7a8de..17b0a1ca8 100644 +--- a/src/hotspot/share/runtime/lockStack.hpp ++++ b/src/hotspot/share/runtime/lockStack.hpp +@@ -1,6 +1,7 @@ + /* + * Copyright (c) 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. ++ * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -35,9 +36,12 @@ class OopClosure; + class outputStream; + + class LockStack { ++ friend class LockStackTest; + friend class VMStructs; +-private: ++ JVMCI_ONLY(friend class JVMCIVMStructs;) ++public: + static const int CAPACITY = 8; ++private: + + // TODO: It would be very useful if JavaThread::lock_stack_offset() and friends were constexpr, + // but this is currently not the case because we're using offset_of() which is non-constexpr, +@@ -50,6 +54,9 @@ private: + // We do this instead of a simple index into the array because this allows for + // efficient addressing in generated code. + uint32_t _top; ++ // The _bad_oop_sentinel acts as a sentinel value to elide underflow checks in generated code. ++ // The correct layout is statically asserted in the constructor. ++ const uintptr_t _bad_oop_sentinel = badOopVal; + oop _base[CAPACITY]; + + // Get the owning thread of this lock-stack. +@@ -74,17 +81,35 @@ public: + static uint32_t start_offset(); + static uint32_t end_offset(); + +- // Return true if we have room to push onto this lock-stack, false otherwise. +- inline bool can_push() const; ++ // Returns true if the lock-stack is full. False otherwise. ++ inline bool is_full() const; + + // Pushes an oop on this lock-stack. + inline void push(oop o); + +- // Pops an oop from this lock-stack. +- inline oop pop(); ++ // Get the oldest oop from this lock-stack. ++ // Precondition: This lock-stack must not be empty. ++ inline oop bottom() const; ++ ++ // Is the lock-stack empty. ++ inline bool is_empty() const; ++ ++ // Check if object is recursive. ++ // Precondition: This lock-stack must contain the oop. ++ inline bool is_recursive(oop o) const; ++ ++ // Try recursive enter. ++ // Precondition: This lock-stack must not be full. ++ inline bool try_recursive_enter(oop o); ++ ++ // Try recursive exit. ++ // Precondition: This lock-stack must contain the oop. ++ inline bool try_recursive_exit(oop o); + + // Removes an oop from an arbitrary location of this lock-stack. +- inline void remove(oop o); ++ // Precondition: This lock-stack must contain the oop. ++ // Returns the number of oops removed. ++ inline size_t remove(oop o); + + // Tests whether the oop is on this lock-stack. + inline bool contains(oop o) const; +diff --git a/src/hotspot/share/runtime/lockStack.inline.hpp b/src/hotspot/share/runtime/lockStack.inline.hpp +index b36be2f72..7a9874a92 100644 +--- a/src/hotspot/share/runtime/lockStack.inline.hpp ++++ b/src/hotspot/share/runtime/lockStack.inline.hpp +@@ -1,6 +1,7 @@ + /* + * Copyright (c) 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. ++ * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -26,14 +27,20 @@ + #ifndef SHARE_RUNTIME_LOCKSTACK_INLINE_HPP + #define SHARE_RUNTIME_LOCKSTACK_INLINE_HPP + ++#include "runtime/lockStack.hpp" ++ + #include "memory/iterator.hpp" + #include "runtime/javaThread.hpp" +-#include "runtime/lockStack.hpp" + #include "runtime/safepoint.hpp" + #include "runtime/stackWatermark.hpp" + #include "runtime/stackWatermarkSet.inline.hpp" ++#include "utilities/align.hpp" ++#include "utilities/globalDefinitions.hpp" + + inline int LockStack::to_index(uint32_t offset) { ++ assert(is_aligned(offset, oopSize), "Bad alignment: %u", offset); ++ assert((offset <= end_offset()), "lockstack overflow: offset %d end_offset %d", offset, end_offset()); ++ assert((offset >= start_offset()), "lockstack underflow: offset %d start_offset %d", offset, start_offset()); + return (offset - lock_stack_base_offset) / oopSize; + } + +@@ -42,8 +49,8 @@ JavaThread* LockStack::get_thread() const { + return reinterpret_cast(addr - lock_stack_offset); + } + +-inline bool LockStack::can_push() const { +- return to_index(_top) < CAPACITY; ++inline bool LockStack::is_full() const { ++ return to_index(_top) == CAPACITY; + } + + inline bool LockStack::is_owning_thread() const { +@@ -61,45 +68,132 @@ inline void LockStack::push(oop o) { + verify("pre-push"); + assert(oopDesc::is_oop(o), "must be"); + assert(!contains(o), "entries must be unique"); +- assert(can_push(), "must have room"); ++ assert(!is_full(), "must have room"); + assert(_base[to_index(_top)] == nullptr, "expect zapped entry"); + _base[to_index(_top)] = o; + _top += oopSize; + verify("post-push"); + } + +-inline oop LockStack::pop() { +- verify("pre-pop"); +- assert(to_index(_top) > 0, "underflow, probably unbalanced push/pop"); ++inline oop LockStack::bottom() const { ++ assert(to_index(_top) > 0, "must contain an oop"); ++ return _base[0]; ++} ++ ++inline bool LockStack::is_empty() const { ++ return to_index(_top) == 0; ++} ++ ++inline bool LockStack::is_recursive(oop o) const { ++ if (!VM_Version::supports_recursive_lightweight_locking()) { ++ return false; ++ } ++ verify("pre-is_recursive"); ++ ++ // This will succeed iff there is a consecutive run of oops on the ++ // lock-stack with a length of at least 2. ++ ++ assert(contains(o), "at least one entry must exist"); ++ int end = to_index(_top); ++ // Start iterating from the top because the runtime code is more ++ // interested in the balanced locking case when the top oop on the ++ // lock-stack matches o. This will cause the for loop to break out ++ // in the first loop iteration if it is non-recursive. ++ for (int i = end - 1; i > 0; i--) { ++ if (_base[i - 1] == o && _base[i] == o) { ++ verify("post-is_recursive"); ++ return true; ++ } ++ if (_base[i] == o) { ++ // o can only occur in one consecutive run on the lock-stack. ++ // Only one of the two oops checked matched o, so this run ++ // must be of length 1 and thus not be recursive. Stop the search. ++ break; ++ } ++ } ++ ++ verify("post-is_recursive"); ++ return false; ++} ++ ++inline bool LockStack::try_recursive_enter(oop o) { ++ if (!VM_Version::supports_recursive_lightweight_locking()) { ++ return false; ++ } ++ verify("pre-try_recursive_enter"); ++ ++ // This will succeed iff the top oop on the stack matches o. ++ // When successful o will be pushed to the lock-stack creating ++ // a consecutive run at least 2 oops that matches o on top of ++ // the lock-stack. ++ ++ assert(!is_full(), "precond"); ++ ++ int end = to_index(_top); ++ if (end == 0 || _base[end - 1] != o) { ++ // Topmost oop does not match o. ++ verify("post-try_recursive_enter"); ++ return false; ++ } ++ ++ _base[end] = o; ++ _top += oopSize; ++ verify("post-try_recursive_enter"); ++ return true; ++} ++ ++inline bool LockStack::try_recursive_exit(oop o) { ++ if (!VM_Version::supports_recursive_lightweight_locking()) { ++ return false; ++ } ++ verify("pre-try_recursive_exit"); ++ ++ // This will succeed iff the top two oops on the stack matches o. ++ // When successful the top oop will be popped of the lock-stack. ++ // When unsuccessful the lock may still be recursive, in which ++ // case the locking is unbalanced. This case is handled externally. ++ ++ assert(contains(o), "entries must exist"); ++ ++ int end = to_index(_top); ++ if (end <= 1 || _base[end - 1] != o || _base[end - 2] != o) { ++ // The two topmost oops do not match o. ++ verify("post-try_recursive_exit"); ++ return false; ++ } ++ + _top -= oopSize; +- oop o = _base[to_index(_top)]; +-#ifdef ASSERT +- _base[to_index(_top)] = nullptr; +-#endif +- assert(!contains(o), "entries must be unique: " PTR_FORMAT, p2i(o)); +- verify("post-pop"); +- return o; ++ DEBUG_ONLY(_base[to_index(_top)] = nullptr;) ++ verify("post-try_recursive_exit"); ++ return true; + } + +-inline void LockStack::remove(oop o) { ++inline size_t LockStack::remove(oop o) { + verify("pre-remove"); + assert(contains(o), "entry must be present: " PTR_FORMAT, p2i(o)); ++ + int end = to_index(_top); ++ int inserted = 0; + for (int i = 0; i < end; i++) { +- if (_base[i] == o) { +- int last = end - 1; +- for (; i < last; i++) { +- _base[i] = _base[i + 1]; ++ if (_base[i] != o) { ++ if (inserted != i) { ++ _base[inserted] = _base[i]; + } +- _top -= oopSize; +-#ifdef ASSERT +- _base[to_index(_top)] = nullptr; +-#endif +- break; ++ inserted++; + } + } +- assert(!contains(o), "entries must be unique: " PTR_FORMAT, p2i(o)); ++ ++#ifdef ASSERT ++ for (int i = inserted; i < end; i++) { ++ _base[i] = nullptr; ++ } ++#endif ++ ++ uint32_t removed = end - inserted; ++ _top -= removed * oopSize; ++ assert(!contains(o), "entry must have been removed: " PTR_FORMAT, p2i(o)); + verify("post-remove"); ++ return removed; + } + + inline bool LockStack::contains(oop o) const { +diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp +index ee0f754b8..696803bbe 100644 +--- a/src/hotspot/share/runtime/objectMonitor.cpp ++++ b/src/hotspot/share/runtime/objectMonitor.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -39,6 +39,7 @@ + #include "prims/jvmtiDeferredUpdates.hpp" + #include "prims/jvmtiExport.hpp" + #include "runtime/atomic.hpp" ++#include "runtime/globals.hpp" + #include "runtime/handles.inline.hpp" + #include "runtime/interfaceSupport.inline.hpp" + #include "runtime/javaThread.inline.hpp" +@@ -53,6 +54,7 @@ + #include "runtime/sharedRuntime.hpp" + #include "services/threadService.hpp" + #include "utilities/dtrace.hpp" ++#include "utilities/globalDefinitions.hpp" + #include "utilities/macros.hpp" + #include "utilities/preserveException.hpp" + #if INCLUDE_JFR +@@ -312,7 +314,70 @@ void ObjectMonitor::ClearSuccOnSuspend::operator()(JavaThread* current) { + // ----------------------------------------------------------------------------- + // Enter support + ++bool ObjectMonitor::enter_for(JavaThread* locking_thread) { ++ // Used by ObjectSynchronizer::enter_for to enter for another thread. ++ // The monitor is private to or already owned by locking_thread which must be suspended. ++ // So this code may only contend with deflation. ++ assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); ++ ++ // Block out deflation as soon as possible. ++ add_to_contentions(1); ++ ++ bool success = false; ++ if (!is_being_async_deflated()) { ++ void* prev_owner = try_set_owner_from(nullptr, locking_thread); ++ ++ if (prev_owner == nullptr) { ++ assert(_recursions == 0, "invariant"); ++ success = true; ++ } else if (prev_owner == locking_thread) { ++ _recursions++; ++ success = true; ++ } else if (prev_owner == DEFLATER_MARKER) { ++ // Racing with deflation. ++ prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread); ++ if (prev_owner == DEFLATER_MARKER) { ++ // Cancelled deflation. Increment contentions as part of the deflation protocol. ++ add_to_contentions(1); ++ success = true; ++ } else if (prev_owner == nullptr) { ++ // At this point we cannot race with deflation as we have both incremented ++ // contentions, seen contention > 0 and seen a DEFLATER_MARKER. ++ // success will only be false if this races with something other than ++ // deflation. ++ prev_owner = try_set_owner_from(nullptr, locking_thread); ++ success = prev_owner == nullptr; ++ } ++ } else if (LockingMode == LM_LEGACY && locking_thread->is_lock_owned((address)prev_owner)) { ++ assert(_recursions == 0, "must be"); ++ _recursions = 1; ++ set_owner_from_BasicLock(prev_owner, locking_thread); ++ success = true; ++ } ++ assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT ++ ", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT, ++ p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner)); ++ } else { ++ // Async deflation is in progress and our contentions increment ++ // above lost the race to async deflation. Undo the work and ++ // force the caller to retry. ++ const oop l_object = object(); ++ if (l_object != nullptr) { ++ // Attempt to restore the header/dmw to the object's header so that ++ // we only retry once if the deflater thread happens to be slow. ++ install_displaced_markword_in_object(l_object); ++ } ++ } ++ ++ add_to_contentions(-1); ++ ++ assert(!success || owner_raw() == locking_thread, "must be"); ++ ++ return success; ++} ++ + bool ObjectMonitor::enter(JavaThread* current) { ++ assert(current == JavaThread::current(), "must be"); + // The following code is ordered to check the most common cases first + // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. + +diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp +index d6c0e31f7..e0ffd70b1 100644 +--- a/src/hotspot/share/runtime/objectMonitor.hpp ++++ b/src/hotspot/share/runtime/objectMonitor.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -217,6 +217,7 @@ private: + + static int Knob_SpinLimit; + ++ static ByteSize header_offset() { return byte_offset_of(ObjectMonitor, _header); } + static ByteSize owner_offset() { return byte_offset_of(ObjectMonitor, _owner); } + static ByteSize recursions_offset() { return byte_offset_of(ObjectMonitor, _recursions); } + static ByteSize cxq_offset() { return byte_offset_of(ObjectMonitor, _cxq); } +@@ -298,6 +299,7 @@ private: + int contentions() const; + void add_to_contentions(int value); + intx recursions() const { return _recursions; } ++ void set_recursions(size_t recursions); + + // JVM/TI GetObjectMonitorUsage() needs this: + ObjectWaiter* first_waiter() { return _WaitSet; } +@@ -332,6 +334,7 @@ private: + void operator()(JavaThread* current); + }; + public: ++ bool enter_for(JavaThread* locking_thread); + bool enter(JavaThread* current); + void exit(JavaThread* current, bool not_suspended = true); + void wait(jlong millis, bool interruptible, TRAPS); +diff --git a/src/hotspot/share/runtime/objectMonitor.inline.hpp b/src/hotspot/share/runtime/objectMonitor.inline.hpp +index 36790925b..b371663ee 100644 +--- a/src/hotspot/share/runtime/objectMonitor.inline.hpp ++++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -102,6 +102,12 @@ inline void ObjectMonitor::add_to_contentions(int value) { + Atomic::add(&_contentions, value); + } + ++inline void ObjectMonitor::set_recursions(size_t recursions) { ++ assert(_recursions == 0, "must be"); ++ assert(has_owner(), "must be owned"); ++ _recursions = checked_cast(recursions); ++} ++ + // Clear _owner field; current value must match old_value. + inline void ObjectMonitor::release_clear_owner(void* old_value) { + #ifdef ASSERT +diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp +index 3b3cfb0a9..8a55492d9 100644 +--- a/src/hotspot/share/runtime/sharedRuntime.cpp ++++ b/src/hotspot/share/runtime/sharedRuntime.cpp +@@ -3257,16 +3257,24 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) ) + kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) { + if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array + BasicLock *lock = kptr2->lock(); +- // Inflate so the object's header no longer refers to the BasicLock. +- if (lock->displaced_header().is_unlocked()) { +- // The object is locked and the resulting ObjectMonitor* will also be +- // locked so it can't be async deflated until ownership is dropped. +- // See the big comment in basicLock.cpp: BasicLock::move_to(). +- ObjectSynchronizer::inflate_helper(kptr2->obj()); ++ if (LockingMode == LM_LEGACY) { ++ // Inflate so the object's header no longer refers to the BasicLock. ++ if (lock->displaced_header().is_unlocked()) { ++ // The object is locked and the resulting ObjectMonitor* will also be ++ // locked so it can't be async deflated until ownership is dropped. ++ // See the big comment in basicLock.cpp: BasicLock::move_to(). ++ ObjectSynchronizer::inflate_helper(kptr2->obj()); ++ } ++ // Now the displaced header is free to move because the ++ // object's header no longer refers to it. ++ buf[i] = (intptr_t)lock->displaced_header().value(); + } +- // Now the displaced header is free to move because the +- // object's header no longer refers to it. +- buf[i++] = (intptr_t)lock->displaced_header().value(); ++#ifdef ASSERT ++ else { ++ buf[i] = badDispHeaderOSR; ++ } ++#endif ++ i++; + buf[i++] = cast_from_oop(kptr2->obj()); + } + } +diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp +index cc73082ed..10edd920e 100644 +--- a/src/hotspot/share/runtime/synchronizer.cpp ++++ b/src/hotspot/share/runtime/synchronizer.cpp +@@ -36,6 +36,7 @@ + #include "oops/oop.inline.hpp" + #include "runtime/atomic.hpp" + #include "runtime/frame.inline.hpp" ++#include "runtime/globals.hpp" + #include "runtime/handles.inline.hpp" + #include "runtime/handshake.hpp" + #include "runtime/interfaceSupport.inline.hpp" +@@ -60,6 +61,7 @@ + #include "utilities/align.hpp" + #include "utilities/dtrace.hpp" + #include "utilities/events.hpp" ++#include "utilities/globalDefinitions.hpp" + #include "utilities/linkedlist.hpp" + #include "utilities/preserveException.hpp" + +@@ -384,6 +386,19 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current, + return false; + } + ++ if (LockingMode == LM_LIGHTWEIGHT) { ++ LockStack& lock_stack = current->lock_stack(); ++ if (lock_stack.is_full()) { ++ // Always go into runtime if the lock stack is full. ++ return false; ++ } ++ if (lock_stack.try_recursive_enter(obj)) { ++ // Recursive lock successful. ++ current->inc_held_monitor_count(); ++ return true; ++ } ++ } ++ + const markWord mark = obj->mark(); + + if (mark.has_monitor()) { +@@ -437,8 +452,9 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current, + } + + // Handle notifications when synchronizing on value based classes +-void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) { +- frame last_frame = current->last_frame(); ++void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) { ++ assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); ++ frame last_frame = locking_thread->last_frame(); + bool bcp_was_adjusted = false; + // Don't decrement bcp if it points to the frame's first instruction. This happens when + // handle_sync_on_value_based_class() is called because of a synchronized method. There +@@ -451,9 +467,9 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread + } + + if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) { +- ResourceMark rm(current); ++ ResourceMark rm; + stringStream ss; +- current->print_active_stack_on(&ss); ++ locking_thread->print_active_stack_on(&ss); + char* base = (char*)strstr(ss.base(), "at"); + char* newline = (char*)strchr(ss.base(), '\n'); + if (newline != nullptr) { +@@ -462,13 +478,13 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread + fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base); + } else { + assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses"); +- ResourceMark rm(current); ++ ResourceMark rm; + Log(valuebasedclasses) vblog; + + vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name()); +- if (current->has_last_Java_frame()) { ++ if (locking_thread->has_last_Java_frame()) { + LogStream info_stream(vblog.info()); +- current->print_active_stack_on(&info_stream); ++ locking_thread->print_active_stack_on(&info_stream); + } else { + vblog.info("Cannot find the last Java frame"); + } +@@ -495,38 +511,111 @@ static bool useHeavyMonitors() { + + // ----------------------------------------------------------------------------- + // Monitor Enter/Exit ++ ++void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) { ++ // When called with locking_thread != Thread::current() some mechanism must synchronize ++ // the locking_thread with respect to the current thread. Currently only used when ++ // deoptimizing and re-locking locks. See Deoptimization::relock_objects ++ assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); ++ if (!enter_fast_impl(obj, lock, locking_thread)) { ++ // Inflated ObjectMonitor::enter_for is required ++ ++ // An async deflation can race after the inflate_for() call and before ++ // enter_for() can make the ObjectMonitor busy. enter_for() returns false ++ // if we have lost the race to async deflation and we simply try again. ++ while (true) { ++ ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter); ++ if (monitor->enter_for(locking_thread)) { ++ return; ++ } ++ assert(monitor->is_being_async_deflated(), "must be"); ++ } ++ } ++} ++ ++void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { ++ assert(current == Thread::current(), "must be"); ++ if (!enter_fast_impl(obj, lock, current)) { ++ // Inflated ObjectMonitor::enter is required ++ ++ // An async deflation can race after the inflate() call and before ++ // enter() can make the ObjectMonitor busy. enter() returns false if ++ // we have lost the race to async deflation and we simply try again. ++ while (true) { ++ ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter); ++ if (monitor->enter(current)) { ++ return; ++ } ++ } ++ } ++} ++ + // The interpreter and compiler assembly code tries to lock using the fast path + // of this algorithm. Make sure to update that code if the following function is + // changed. The implementation is extremely sensitive to race condition. Be careful. ++bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) { + +-void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { + if (obj->klass()->is_value_based()) { +- handle_sync_on_value_based_class(obj, current); ++ handle_sync_on_value_based_class(obj, locking_thread); + } + +- current->inc_held_monitor_count(); ++ locking_thread->inc_held_monitor_count(); + + if (!useHeavyMonitors()) { + if (LockingMode == LM_LIGHTWEIGHT) { + // Fast-locking does not use the 'lock' argument. +- LockStack& lock_stack = current->lock_stack(); +- if (lock_stack.can_push()) { +- markWord mark = obj()->mark_acquire(); +- while (mark.is_neutral()) { +- // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications. +- // Try to swing into 'fast-locked' state. +- assert(!lock_stack.contains(obj()), "thread must not already hold the lock"); +- const markWord locked_mark = mark.set_fast_locked(); +- const markWord old_mark = obj()->cas_set_mark(locked_mark, mark); +- if (old_mark == mark) { +- // Successfully fast-locked, push object to lock-stack and return. +- lock_stack.push(obj()); +- return; +- } +- mark = old_mark; ++ LockStack& lock_stack = locking_thread->lock_stack(); ++ if (lock_stack.is_full()) { ++ // We unconditionally make room on the lock stack by inflating ++ // the least recently locked object on the lock stack. ++ ++ // About the choice to inflate least recently locked object. ++ // First we must chose to inflate a lock, either some lock on ++ // the lock-stack or the lock that is currently being entered ++ // (which may or may not be on the lock-stack). ++ // Second the best lock to inflate is a lock which is entered ++ // in a control flow where there are only a very few locks being ++ // used, as the costly part of inflated locking is inflation, ++ // not locking. But this property is entirely program dependent. ++ // Third inflating the lock currently being entered on when it ++ // is not present on the lock-stack will result in a still full ++ // lock-stack. This creates a scenario where every deeper nested ++ // monitorenter must call into the runtime. ++ // The rational here is as follows: ++ // Because we cannot (currently) figure out the second, and want ++ // to avoid the third, we inflate a lock on the lock-stack. ++ // The least recently locked lock is chosen as it is the lock ++ // with the longest critical section. ++ ++ log_info(monitorinflation)("LockStack capacity exceeded, inflating."); ++ ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal); ++ assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT, ++ p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value()); ++ assert(!lock_stack.is_full(), "must have made room here"); ++ } ++ ++ markWord mark = obj()->mark_acquire(); ++ while (mark.is_neutral()) { ++ // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications. ++ // Try to swing into 'fast-locked' state. ++ assert(!lock_stack.contains(obj()), "thread must not already hold the lock"); ++ const markWord locked_mark = mark.set_fast_locked(); ++ const markWord old_mark = obj()->cas_set_mark(locked_mark, mark); ++ if (old_mark == mark) { ++ // Successfully fast-locked, push object to lock-stack and return. ++ lock_stack.push(obj()); ++ return true; + } ++ mark = old_mark; + } +- // All other paths fall-through to inflate-enter. ++ ++ if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) { ++ // Recursive lock successful. ++ return true; ++ } ++ ++ // Failed to fast lock. ++ return false; + } else if (LockingMode == LM_LEGACY) { + markWord mark = obj->mark(); + if (mark.is_neutral()) { +@@ -534,15 +623,14 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) + // be visible <= the ST performed by the CAS. + lock->set_displaced_header(mark); + if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { +- return; ++ return true; + } +- // Fall through to inflate() ... + } else if (mark.has_locker() && +- current->is_lock_owned((address) mark.locker())) { ++ locking_thread->is_lock_owned((address) mark.locker())) { + assert(lock != mark.locker(), "must not re-lock the same lock"); + assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock"); + lock->set_displaced_header(markWord::from_pointer(nullptr)); +- return; ++ return true; + } + + // The object header will never be displaced to this lock, +@@ -550,20 +638,15 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) + // must be non-zero to avoid looking like a re-entrant lock, + // and must not look locked either. + lock->set_displaced_header(markWord::unused_mark()); ++ ++ // Failed to fast lock. ++ return false; + } + } else if (VerifyHeavyMonitors) { + guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked"); + } + +- // An async deflation can race after the inflate() call and before +- // enter() can make the ObjectMonitor busy. enter() returns false if +- // we have lost the race to async deflation and we simply try again. +- while (true) { +- ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter); +- if (monitor->enter(current)) { +- return; +- } +- } ++ return false; + } + + void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) { +@@ -573,15 +656,28 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) + markWord mark = object->mark(); + if (LockingMode == LM_LIGHTWEIGHT) { + // Fast-locking does not use the 'lock' argument. +- while (mark.is_fast_locked()) { +- // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications. +- const markWord unlocked_mark = mark.set_unlocked(); +- const markWord old_mark = object->cas_set_mark(unlocked_mark, mark); +- if (old_mark == mark) { +- current->lock_stack().remove(object); +- return; ++ LockStack& lock_stack = current->lock_stack(); ++ if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) { ++ // Recursively unlocked. ++ return; ++ } ++ ++ if (mark.is_fast_locked() && lock_stack.is_recursive(object)) { ++ // This lock is recursive but is not at the top of the lock stack so we're ++ // doing an unbalanced exit. We have to fall thru to inflation below and ++ // let ObjectMonitor::exit() do the unlock. ++ } else { ++ while (mark.is_fast_locked()) { ++ // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications. ++ const markWord unlocked_mark = mark.set_unlocked(); ++ const markWord old_mark = object->cas_set_mark(unlocked_mark, mark); ++ if (old_mark == mark) { ++ size_t recursions = lock_stack.remove(object) - 1; ++ assert(recursions == 0, "must not be recursive here"); ++ return; ++ } ++ mark = old_mark; + } +- mark = old_mark; + } + } else if (LockingMode == LM_LEGACY) { + markWord dhw = lock->displaced_header(); +@@ -631,13 +727,7 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) + // The ObjectMonitor* can't be async deflated until ownership is + // dropped inside exit() and the ObjectMonitor* must be !is_busy(). + ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal); +- if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) { +- // It must be owned by us. Pop lock object from lock stack. +- LockStack& lock_stack = current->lock_stack(); +- oop popped = lock_stack.pop(); +- assert(popped == object, "must be owned by this thread"); +- monitor->set_owner_from_anonymous(current); +- } ++ assert(!monitor->is_owner_anonymous(), "must not be"); + monitor->exit(current); + } + +@@ -905,7 +995,7 @@ static inline intptr_t get_next_hash(Thread* current, oop obj) { + value = v; + } + +- value &= markWord::hash_mask; ++ value &= AARCH64_ONLY(UseCompactObjectHeaders ? markWord::hash_mask_compact :) markWord::hash_mask; + if (value == 0) value = 0xBAD; + assert(value != markWord::no_hash, "invariant"); + return value; +@@ -1313,15 +1403,28 @@ void ObjectSynchronizer::inflate_helper(oop obj) { + (void)inflate(Thread::current(), obj, inflate_cause_vm_internal); + } + +-// Can be called from non JavaThreads (e.g., VMThread) for FastHashCode +-// calculations as part of JVM/TI tagging. +-static bool is_lock_owned(Thread* thread, oop obj) { +- assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled"); +- return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false; ++ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) { ++ assert(current == Thread::current(), "must be"); ++ if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) { ++ return inflate_impl(JavaThread::cast(current), obj, cause); ++ } ++ return inflate_impl(nullptr, obj, cause); ++} ++ ++ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) { ++ assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be"); ++ return inflate_impl(thread, obj, cause); + } + +-ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, +- const InflateCause cause) { ++ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) { ++ // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires ++ // that the inflating_thread == Thread::current() or is suspended throughout the call by ++ // some other mechanism. ++ // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non ++ // JavaThread. (As may still be the case from FastHashCode). However it is only ++ // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread ++ // is set when called from ObjectSynchronizer::enter from the owning thread, ++ // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit. + EventJavaMonitorInflate event; + + for (;;) { +@@ -1330,10 +1433,10 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, + // The mark can be in one of the following states: + // * inflated - Just return if using stack-locking. + // If using fast-locking and the ObjectMonitor owner +- // is anonymous and the current thread owns the +- // object lock, then we make the current thread the +- // ObjectMonitor owner and remove the lock from the +- // current thread's lock stack. ++ // is anonymous and the inflating_thread owns the ++ // object lock, then we make the inflating_thread ++ // the ObjectMonitor owner and remove the lock from ++ // the inflating_thread's lock stack. + // * fast-locked - Coerce it to inflated from fast-locked. + // * stack-locked - Coerce it to inflated from stack-locked. + // * INFLATING - Busy wait for conversion from stack-locked to +@@ -1345,9 +1448,11 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, + ObjectMonitor* inf = mark.monitor(); + markWord dmw = inf->header(); + assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); +- if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() && is_lock_owned(current, object)) { +- inf->set_owner_from_anonymous(current); +- JavaThread::cast(current)->lock_stack().remove(object); ++ if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() && ++ inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) { ++ inf->set_owner_from_anonymous(inflating_thread); ++ size_t removed = inflating_thread->lock_stack().remove(object); ++ inf->set_recursions(removed - 1); + } + return inf; + } +@@ -1367,12 +1472,12 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, + } + + // CASE: fast-locked +- // Could be fast-locked either by current or by some other thread. ++ // Could be fast-locked either by the inflating_thread or by some other thread. + // + // Note that we allocate the ObjectMonitor speculatively, _before_ + // attempting to set the object's mark to the new ObjectMonitor. If +- // this thread owns the monitor, then we set the ObjectMonitor's +- // owner to this thread. Otherwise, we set the ObjectMonitor's owner ++ // the inflating_thread owns the monitor, then we set the ObjectMonitor's ++ // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner + // to anonymous. If we lose the race to set the object's mark to the + // new ObjectMonitor, then we just delete it and loop around again. + // +@@ -1380,10 +1485,10 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, + if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) { + ObjectMonitor* monitor = new ObjectMonitor(object); + monitor->set_header(mark.set_unlocked()); +- bool own = is_lock_owned(current, object); ++ bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object); + if (own) { +- // Owned by us. +- monitor->set_owner_from(nullptr, current); ++ // Owned by inflating_thread. ++ monitor->set_owner_from(nullptr, inflating_thread); + } else { + // Owned by somebody else. + monitor->set_owner_anonymous(); +@@ -1393,7 +1498,8 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, + if (old_mark == mark) { + // Success! Return inflated monitor. + if (own) { +- JavaThread::cast(current)->lock_stack().remove(object); ++ size_t removed = inflating_thread->lock_stack().remove(object); ++ monitor->set_recursions(removed - 1); + } + // Once the ObjectMonitor is configured and object is associated + // with the ObjectMonitor, it is safe to allow async deflation: +@@ -1403,7 +1509,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, + // cache lines to avoid false sharing on MP systems ... + OM_PERFDATA_OP(Inflations, inc()); + if (log_is_enabled(Trace, monitorinflation)) { +- ResourceMark rm(current); ++ ResourceMark rm; + lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" + INTPTR_FORMAT ", type='%s'", p2i(object), + object->mark().value(), object->klass()->external_name()); +@@ -1502,7 +1608,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, + // to avoid false sharing on MP systems ... + OM_PERFDATA_OP(Inflations, inc()); + if (log_is_enabled(Trace, monitorinflation)) { +- ResourceMark rm(current); ++ ResourceMark rm; + lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" + INTPTR_FORMAT ", type='%s'", p2i(object), + object->mark().value(), object->klass()->external_name()); +@@ -1546,7 +1652,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, + // cache lines to avoid false sharing on MP systems ... + OM_PERFDATA_OP(Inflations, inc()); + if (log_is_enabled(Trace, monitorinflation)) { +- ResourceMark rm(current); ++ ResourceMark rm; + lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" + INTPTR_FORMAT ", type='%s'", p2i(object), + object->mark().value(), object->klass()->external_name()); +diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp +index e983aeb9d..f1a14e362 100644 +--- a/src/hotspot/share/runtime/synchronizer.hpp ++++ b/src/hotspot/share/runtime/synchronizer.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -92,7 +92,18 @@ class ObjectSynchronizer : AllStatic { + // This is the "slow path" version of monitor enter and exit. + static void enter(Handle obj, BasicLock* lock, JavaThread* current); + static void exit(oop obj, BasicLock* lock, JavaThread* current); ++ // Used to enter a monitor for another thread. This requires that the ++ // locking_thread is suspended, and that entering on a potential ++ // inflated monitor may only contend with deflation. That is the obj being ++ // locked on is either already locked by the locking_thread or cannot ++ // escape the locking_thread. ++ static void enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread); ++private: ++ // Shared implementation for enter and enter_for. Performs all but ++ // inflated monitor enter. ++ static bool enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread); + ++public: + // Used only to handle jni locks or other unmatched monitor enter/exit + // Internally they will use heavy weight monitor. + static void jni_enter(Handle obj, JavaThread* current); +@@ -113,6 +124,14 @@ class ObjectSynchronizer : AllStatic { + + // Inflate light weight monitor to heavy weight monitor + static ObjectMonitor* inflate(Thread* current, oop obj, const InflateCause cause); ++ // Used to inflate a monitor as if it was done from the thread JavaThread. ++ static ObjectMonitor* inflate_for(JavaThread* thread, oop obj, const InflateCause cause); ++ ++private: ++ // Shared implementation between the different LockingMode. ++ static ObjectMonitor* inflate_impl(JavaThread* thread, oop obj, const InflateCause cause); ++ ++public: + // This version is only for internal use + static void inflate_helper(oop obj); + static const char* inflate_cause_name(const InflateCause cause); +@@ -193,7 +212,7 @@ class ObjectSynchronizer : AllStatic { + static size_t get_gvars_size(); + static u_char* get_gvars_stw_random_addr(); + +- static void handle_sync_on_value_based_class(Handle obj, JavaThread* current); ++ static void handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread); + }; + + // ObjectLocker enforces balanced locking and can never throw an +diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp +index abd3a90d4..aa33e6303 100644 +--- a/src/hotspot/share/runtime/vmStructs.cpp ++++ b/src/hotspot/share/runtime/vmStructs.cpp +@@ -2591,10 +2591,13 @@ + declare_constant(markWord::lock_bits) \ + declare_constant(markWord::max_hash_bits) \ + declare_constant(markWord::hash_bits) \ ++ declare_constant(markWord::hash_bits_compact) \ + \ + declare_constant(markWord::lock_shift) \ + declare_constant(markWord::age_shift) \ + declare_constant(markWord::hash_shift) \ ++ declare_constant(markWord::hash_shift_compact) \ ++ AARCH64_ONLY(declare_constant(markWord::klass_shift)) \ + \ + declare_constant(markWord::lock_mask) \ + declare_constant(markWord::lock_mask_in_place) \ +@@ -2602,6 +2605,8 @@ + declare_constant(markWord::age_mask_in_place) \ + declare_constant(markWord::hash_mask) \ + declare_constant(markWord::hash_mask_in_place) \ ++ declare_constant(markWord::hash_mask_compact) \ ++ declare_constant(markWord::hash_mask_compact_in_place) \ + \ + declare_constant(markWord::locked_value) \ + declare_constant(markWord::unlocked_value) \ +diff --git a/src/hotspot/share/utilities/fastHash.hpp b/src/hotspot/share/utilities/fastHash.hpp +new file mode 100644 +index 000000000..86b1dcf2b +--- /dev/null ++++ b/src/hotspot/share/utilities/fastHash.hpp +@@ -0,0 +1,97 @@ ++/* ++ * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARE_UTILITIES_FASTHASH_HPP ++#define SHARE_UTILITIES_FASTHASH_HPP ++ ++#include "memory/allStatic.hpp" ++ ++class FastHash : public AllStatic { ++private: ++ static void fullmul64(uint64_t& hi, uint64_t& lo, uint64_t op1, uint64_t op2) { ++#if defined(__SIZEOF_INT128__) ++ __uint128_t prod = static_cast<__uint128_t>(op1) * static_cast<__uint128_t>(op2); ++ hi = static_cast(prod >> 64); ++ lo = static_cast(prod >> 0); ++#else ++ /* First calculate all of the cross products. */ ++ uint64_t lo_lo = (op1 & 0xFFFFFFFF) * (op2 & 0xFFFFFFFF); ++ uint64_t hi_lo = (op1 >> 32) * (op2 & 0xFFFFFFFF); ++ uint64_t lo_hi = (op1 & 0xFFFFFFFF) * (op2 >> 32); ++ uint64_t hi_hi = (op1 >> 32) * (op2 >> 32); ++ ++ /* Now add the products together. These will never overflow. */ ++ uint64_t cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; ++ uint64_t upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; ++ hi = upper; ++ lo = (cross << 32) | (lo_lo & 0xFFFFFFFF); ++#endif ++ } ++ ++ static void fullmul32(uint32_t& hi, uint32_t& lo, uint32_t op1, uint32_t op2) { ++ uint64_t x64 = op1, y64 = op2, xy64 = x64 * y64; ++ hi = (uint32_t)(xy64 >> 32); ++ lo = (uint32_t)(xy64 >> 0); ++ } ++ ++ static uint64_t ror(uint64_t x, uint64_t distance) { ++ distance = distance & 0x3F; ++ return (x >> distance) | (x << (64 - distance)); ++ } ++ ++public: ++ static uint64_t get_hash64(uint64_t x, uint64_t y) { ++ const uint64_t M = 0x8ADAE89C337954D5; ++ const uint64_t A = 0xAAAAAAAAAAAAAAAA; // REPAA ++ const uint64_t H0 = (x ^ y), L0 = (x ^ A); ++ ++ uint64_t U0, V0; fullmul64(U0, V0, L0, M); ++ const uint64_t Q0 = (H0 * M); ++ const uint64_t L1 = (Q0 ^ U0); ++ ++ uint64_t U1, V1; fullmul64(U1, V1, L1, M); ++ const uint64_t P1 = (V0 ^ M); ++ const uint64_t Q1 = ror(P1, L1); ++ const uint64_t L2 = (Q1 ^ U1); ++ return V1 ^ L2; ++ } ++ ++ static uint32_t get_hash32(uint32_t x, uint32_t y) { ++ const uint32_t M = 0x337954D5; ++ const uint32_t A = 0xAAAAAAAA; // REPAA ++ const uint32_t H0 = (x ^ y), L0 = (x ^ A); ++ ++ uint32_t U0, V0; fullmul32(U0, V0, L0, M); ++ const uint32_t Q0 = (H0 * M); ++ const uint32_t L1 = (Q0 ^ U0); ++ ++ uint32_t U1, V1; fullmul32(U1, V1, L1, M); ++ const uint32_t P1 = (V0 ^ M); ++ const uint32_t Q1 = ror(P1, L1); ++ const uint32_t L2 = (Q1 ^ U1); ++ return V1 ^ L2; ++ } ++}; ++ ++#endif// SHARE_UTILITIES_FASTHASH_HPP +diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp +index 625fdcc41..eec3dd468 100644 +--- a/src/hotspot/share/utilities/globalDefinitions.hpp ++++ b/src/hotspot/share/utilities/globalDefinitions.hpp +@@ -1084,7 +1084,8 @@ const juint badHeapWordVal = 0xBAADBABE; // value used to zap + const juint badMetaWordVal = 0xBAADFADE; // value used to zap metadata heap after GC + const int badCodeHeapNewVal= 0xCC; // value used to zap Code heap at allocation + const int badCodeHeapFreeVal = 0xDD; // value used to zap Code heap at deallocation +- ++const intptr_t badDispHeaderDeopt = 0xDE0BD000; // value to fill unused displaced header during deoptimization ++const intptr_t badDispHeaderOSR = 0xDEAD05A0; // value to fill unused displaced header during OSR + + // (These must be implemented as #defines because C++ compilers are + // not obligated to inline non-integral constants!) +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java +index 67bba3311..2be7089ac 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java +@@ -24,6 +24,9 @@ + + package sun.jvm.hotspot.debugger; + ++import sun.jvm.hotspot.oops.Mark; ++import sun.jvm.hotspot.runtime.VM; ++ + /**

DebuggerBase is a recommended base class for debugger + implementations. It can use a PageCache to cache data from the + target process. Note that this class would not be suitable if the +@@ -394,7 +397,15 @@ public abstract class DebuggerBase implements Debugger { + + protected long readCompKlassAddressValue(long address) + throws UnmappedAddressException, UnalignedAddressException { +- long value = readCInteger(address, getKlassPtrSize(), true); ++ long value; ++ if (VM.getVM().isCompactObjectHeadersEnabled()) { ++ // With compact headers, the compressed Klass* is currently read from the mark ++ // word. We need to load the whole mark, and shift the upper parts. ++ value = readCInteger(address, machDesc.getAddressSize(), true); ++ value = value >>> Mark.getKlassShift(); ++ } else { ++ value = readCInteger(address, getKlassPtrSize(), true); ++ } + if (value != 0) { + value = (long)(narrowKlassBase + (long)(value << narrowKlassShift)); + } +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java +index e83dd248c..ed7502188 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java +@@ -62,4 +62,9 @@ public interface MachineDescription extends Serializable { + default public boolean supports32bitAlignmentOf64bitTypes() { + return false; + } ++ ++ /** Indicates whether the underlying machine is Aarch64 platform. */ ++ default public boolean isAarch64() { ++ return false; ++ } + } +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAArch64.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAArch64.java +index 449ab72be..fa3186aba 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAArch64.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAArch64.java +@@ -36,4 +36,8 @@ public class MachineDescriptionAArch64 extends MachineDescriptionTwosComplement + public boolean isBigEndian() { + return false; + } ++ ++ public boolean isAarch64() { ++ return true; ++ } + } +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java +index f2f5422ad..4b27e8bed 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java +@@ -116,13 +116,6 @@ public class Universe { + heap().printOn(tty); + } + +- // Check whether an element of a typeArrayOop with the given type must be +- // aligned 0 mod 8. The typeArrayOop itself must be aligned at least this +- // strongly. +- public static boolean elementTypeShouldBeAligned(BasicType type) { +- return type == BasicType.T_DOUBLE || type == BasicType.T_LONG; +- } +- + // Check whether an object field (static/non-static) of the given type must be + // aligned 0 mod 8. + public static boolean fieldTypeShouldBeAligned(BasicType type) { +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java +index 6ba23c9ea..413764c47 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java +@@ -57,32 +57,33 @@ public class Array extends Oop { + private static long lengthOffsetInBytes=0; + private static long typeSize; + ++ // Check whether an element of an arrayOop with the given type must be ++ // aligned 0 mod 8. The arrayOop itself must be aligned at least this ++ // strongly. ++ private static boolean elementTypeShouldBeAligned(BasicType type) { ++ if (VM.getVM().isLP64()) { ++ if (type == BasicType.T_OBJECT || type == BasicType.T_ARRAY) { ++ return !VM.getVM().isCompressedOopsEnabled(); ++ } ++ } ++ return type == BasicType.T_DOUBLE || type == BasicType.T_LONG; ++ } ++ + private static long headerSizeInBytes() { + if (headerSize != 0) { + return headerSize; + } +- if (VM.getVM().isCompressedKlassPointersEnabled()) { +- headerSize = typeSize; +- } else { +- headerSize = VM.getVM().alignUp(typeSize + VM.getVM().getIntSize(), +- VM.getVM().getHeapWordSize()); +- } ++ headerSize = lengthOffsetInBytes() + VM.getVM().getIntSize(); + return headerSize; + } + +- private static long headerSize(BasicType type) { +- if (Universe.elementTypeShouldBeAligned(type)) { +- return alignObjectSize(headerSizeInBytes())/VM.getVM().getHeapWordSize(); +- } else { +- return headerSizeInBytes()/VM.getVM().getHeapWordSize(); +- } +- } +- +- private long lengthOffsetInBytes() { ++ private static long lengthOffsetInBytes() { + if (lengthOffsetInBytes != 0) { + return lengthOffsetInBytes; + } +- if (VM.getVM().isCompressedKlassPointersEnabled()) { ++ if (VM.getVM().isCompactObjectHeadersEnabled()) { ++ lengthOffsetInBytes = Oop.getHeaderSize(); ++ } else if (VM.getVM().isCompressedKlassPointersEnabled()) { + lengthOffsetInBytes = typeSize - VM.getVM().getIntSize(); + } else { + lengthOffsetInBytes = typeSize; +@@ -108,7 +109,13 @@ public class Array extends Oop { + } + + public static long baseOffsetInBytes(BasicType type) { +- return headerSize(type) * VM.getVM().getHeapWordSize(); ++ long typeSizeInBytes = headerSizeInBytes(); ++ if (elementTypeShouldBeAligned(type)) { ++ VM vm = VM.getVM(); ++ return vm.alignUp(typeSizeInBytes, vm.getVM().getHeapWordSize()); ++ } else { ++ return typeSizeInBytes; ++ } + } + + public boolean isArray() { return true; } +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java +index b837d869e..fd364d6a1 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java +@@ -55,7 +55,9 @@ public class Instance extends Oop { + + // Returns header size in bytes. + public static long getHeaderSize() { +- if (VM.getVM().isCompressedKlassPointersEnabled()) { ++ if (VM.getVM().isCompactObjectHeadersEnabled()) { ++ return Oop.getHeaderSize(); ++ } else if (VM.getVM().isCompressedKlassPointersEnabled()) { + return typeSize - VM.getVM().getIntSize(); + } else { + return typeSize; +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java +index 7512257a1..c1c6438ee 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java +@@ -51,15 +51,22 @@ public class Mark extends VMObject { + lockBits = db.lookupLongConstant("markWord::lock_bits").longValue(); + maxHashBits = db.lookupLongConstant("markWord::max_hash_bits").longValue(); + hashBits = db.lookupLongConstant("markWord::hash_bits").longValue(); ++ hashBitsCompact = db.lookupLongConstant("markWord::hash_bits_compact").longValue(); + lockShift = db.lookupLongConstant("markWord::lock_shift").longValue(); + ageShift = db.lookupLongConstant("markWord::age_shift").longValue(); + hashShift = db.lookupLongConstant("markWord::hash_shift").longValue(); ++ hashShiftCompact = db.lookupLongConstant("markWord::hash_shift_compact").longValue(); ++ if (VM.getVM().isAarch64()) { ++ klassShift = db.lookupLongConstant("markWord::klass_shift").longValue(); ++ } + lockMask = db.lookupLongConstant("markWord::lock_mask").longValue(); + lockMaskInPlace = db.lookupLongConstant("markWord::lock_mask_in_place").longValue(); + ageMask = db.lookupLongConstant("markWord::age_mask").longValue(); + ageMaskInPlace = db.lookupLongConstant("markWord::age_mask_in_place").longValue(); + hashMask = db.lookupLongConstant("markWord::hash_mask").longValue(); + hashMaskInPlace = db.lookupLongConstant("markWord::hash_mask_in_place").longValue(); ++ hashMaskCompact = db.lookupLongConstant("markWord::hash_mask_compact").longValue(); ++ hashMaskCompactInPlace = db.lookupLongConstant("markWord::hash_mask_compact_in_place").longValue(); + lockedValue = db.lookupLongConstant("markWord::locked_value").longValue(); + unlockedValue = db.lookupLongConstant("markWord::unlocked_value").longValue(); + monitorValue = db.lookupLongConstant("markWord::monitor_value").longValue(); +@@ -78,10 +85,13 @@ public class Mark extends VMObject { + private static long lockBits; + private static long maxHashBits; + private static long hashBits; ++ private static long hashBitsCompact; + + private static long lockShift; + private static long ageShift; + private static long hashShift; ++ private static long hashShiftCompact; ++ private static long klassShift; + + private static long lockMask; + private static long lockMaskInPlace; +@@ -89,6 +99,8 @@ public class Mark extends VMObject { + private static long ageMaskInPlace; + private static long hashMask; + private static long hashMaskInPlace; ++ private static long hashMaskCompact; ++ private static long hashMaskCompactInPlace; + + private static long lockedValue; + private static long unlockedValue; +@@ -102,6 +114,10 @@ public class Mark extends VMObject { + + private static long maxAge; + ++ public static long getKlassShift() { ++ return klassShift; ++ } ++ + public Mark(Address addr) { + super(addr); + } +@@ -174,13 +190,23 @@ public class Mark extends VMObject { + + // hash operations + public long hash() { +- return Bits.maskBitsLong(value() >> hashShift, hashMask); ++ if (VM.getVM().isCompactObjectHeadersEnabled()) { ++ return Bits.maskBitsLong(value() >> hashShiftCompact, hashMaskCompact); ++ } else { ++ return Bits.maskBitsLong(value() >> hashShift, hashMask); ++ } + } + + public boolean hasNoHash() { + return hash() == noHash; + } + ++ public Klass getKlass() { ++ assert(VM.getVM().isCompactObjectHeadersEnabled()); ++ assert(!hasMonitor()); ++ return (Klass)Metadata.instantiateWrapperFor(addr.getCompKlassAddressAt(0)); ++ } ++ + // Debugging + public void printOn(PrintStream tty) { + if (isLocked()) { +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java +index bf957941a..c0c0441d3 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java +@@ -46,9 +46,14 @@ public class Oop { + private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { + Type type = db.lookupType("oopDesc"); + mark = new CIntField(type.getCIntegerField("_mark"), 0); +- klass = new MetadataField(type.getAddressField("_metadata._klass"), 0); +- compressedKlass = new NarrowKlassField(type.getAddressField("_metadata._compressed_klass"), 0); +- headerSize = type.getSize(); ++ if (VM.getVM().isCompactObjectHeadersEnabled()) { ++ Type markType = db.lookupType("markWord"); ++ headerSize = markType.getSize(); ++ } else { ++ headerSize = type.getSize(); ++ klass = new MetadataField(type.getAddressField("_metadata._klass"), 0); ++ compressedKlass = new NarrowKlassField(type.getAddressField("_metadata._compressed_klass"), 0); ++ } + } + + private OopHandle handle; +@@ -75,8 +80,21 @@ public class Oop { + + // Accessors for declared fields + public Mark getMark() { return new Mark(getHandle()); } ++ ++ private static Klass getKlass(Mark mark) { ++ assert(VM.getVM().isCompactObjectHeadersEnabled()); ++ if (mark.hasMonitor()) { ++ ObjectMonitor mon = mark.monitor(); ++ mark = mon.header(); ++ } ++ return mark.getKlass(); ++ } ++ + public Klass getKlass() { +- if (VM.getVM().isCompressedKlassPointersEnabled()) { ++ if (VM.getVM().isCompactObjectHeadersEnabled()) { ++ assert(VM.getVM().isCompressedKlassPointersEnabled()); ++ return getKlass(getMark()); ++ } else if (VM.getVM().isCompressedKlassPointersEnabled()) { + return (Klass)compressedKlass.getValue(getHandle()); + } else { + return (Klass)klass.getValue(getHandle()); +@@ -147,10 +165,12 @@ public class Oop { + void iterateFields(OopVisitor visitor, boolean doVMFields) { + if (doVMFields) { + visitor.doCInt(mark, true); +- if (VM.getVM().isCompressedKlassPointersEnabled()) { +- visitor.doMetadata(compressedKlass, true); +- } else { +- visitor.doMetadata(klass, true); ++ if (!VM.getVM().isCompactObjectHeadersEnabled()) { ++ if (VM.getVM().isCompressedKlassPointersEnabled()) { ++ visitor.doMetadata(compressedKlass, true); ++ } else { ++ visitor.doMetadata(klass, true); ++ } + } + } + } +@@ -206,7 +226,10 @@ public class Oop { + if (handle == null) { + return null; + } +- if (VM.getVM().isCompressedKlassPointersEnabled()) { ++ if (VM.getVM().isCompactObjectHeadersEnabled()) { ++ Mark mark = new Mark(handle); ++ return getKlass(mark); ++ } else if (VM.getVM().isCompressedKlassPointersEnabled()) { + return (Klass)Metadata.instantiateWrapperFor(handle.getCompKlassAddressAt(compressedKlass.getOffset())); + } else { + return (Klass)Metadata.instantiateWrapperFor(handle.getAddressAt(klass.getOffset())); +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java +index 8a5d704de..dd9b45e6b 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java +@@ -96,6 +96,7 @@ public class VM { + private boolean usingServerCompiler; + /** alignment constants */ + private boolean isLP64; ++ private boolean isAarch64; + private int bytesPerLong; + private int bytesPerWord; + private int logBytesPerWord; +@@ -148,6 +149,7 @@ public class VM { + private Boolean sharingEnabled; + private Boolean compressedOopsEnabled; + private Boolean compressedKlassPointersEnabled; ++ private Boolean compactObjectHeadersEnabled; + + // command line flags supplied to VM - see struct JVMFlag in jvmFlag.hpp + public static final class Flag { +@@ -475,6 +477,7 @@ public class VM { + + if (debugger != null) { + isLP64 = debugger.getMachineDescription().isLP64(); ++ isAarch64 = debugger.getMachineDescription().isAarch64(); + } + bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue(); + bytesPerWord = db.lookupIntConstant("BytesPerWord").intValue(); +@@ -679,6 +682,15 @@ public class VM { + return isLP64; + } + ++ /** Indicates whether the underlying machine supports the Aarch64 data ++ model. This is needed for conditionalizing code in a few places */ ++ public boolean isAarch64() { ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(isDebugging(), "Debugging system only for now"); ++ } ++ return isAarch64; ++ } ++ + /** Get bytes-per-long == long/double natural alignment. */ + public int getBytesPerLong() { + return bytesPerLong; +@@ -970,6 +982,15 @@ public class VM { + return compressedKlassPointersEnabled.booleanValue(); + } + ++ public boolean isCompactObjectHeadersEnabled() { ++ if (compactObjectHeadersEnabled == null) { ++ Flag flag = getCommandLineFlag("UseCompactObjectHeaders"); ++ compactObjectHeadersEnabled = (flag == null) ? Boolean.FALSE: ++ (flag.getBool()? Boolean.TRUE: Boolean.FALSE); ++ } ++ return compactObjectHeadersEnabled.booleanValue(); ++ } ++ + public int getObjectAlignmentInBytes() { + if (objectAlignmentInBytes == 0) { + Flag flag = getCommandLineFlag("ObjectAlignmentInBytes"); +diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java +index 3f701b8d2..6a8b794a9 100644 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java +@@ -26,6 +26,7 @@ package sun.jvm.hotspot.utilities; + + import sun.jvm.hotspot.debugger.*; + import sun.jvm.hotspot.oops.Metadata; ++import sun.jvm.hotspot.oops.Oop; + import sun.jvm.hotspot.runtime.*; + import sun.jvm.hotspot.types.*; + +@@ -37,26 +38,6 @@ import sun.jvm.hotspot.types.*; + states than the ObjectHeap code. */ + + public class RobustOopDeterminator { +- private static AddressField klassField; +- +- static { +- VM.registerVMInitializedObserver(new Observer() { +- public void update(Observable o, Object data) { +- initialize(VM.getVM().getTypeDataBase()); +- } +- }); +- } +- +- private static void initialize(TypeDataBase db) { +- Type type = db.lookupType("oopDesc"); +- +- if (VM.getVM().isCompressedKlassPointersEnabled()) { +- klassField = type.getAddressField("_metadata._compressed_klass"); +- } else { +- klassField = type.getAddressField("_metadata._klass"); +- } +- } +- + public static boolean oopLooksValid(OopHandle oop) { + if (oop == null) { + return false; +@@ -66,11 +47,7 @@ public class RobustOopDeterminator { + } + try { + // Try to instantiate the Klass +- if (VM.getVM().isCompressedKlassPointersEnabled()) { +- Metadata.instantiateWrapperFor(oop.getCompKlassAddressAt(klassField.getOffset())); +- } else { +- Metadata.instantiateWrapperFor(klassField.getValue(oop)); +- } ++ Oop.getKlassForOopHandle(oop); + return true; + } catch (AddressException | WrongTypeException e) { + return false; +diff --git a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp +index 5f9a36110..17658d794 100644 +--- a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp ++++ b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp +@@ -22,6 +22,7 @@ + */ + + #include "precompiled.hpp" ++#include "gc/shared/gc_globals.hpp" + #include "gc/shared/preservedMarks.inline.hpp" + #include "oops/oop.inline.hpp" + #include "unittest.hpp" +@@ -55,6 +56,8 @@ TEST_VM(PreservedMarks, iterate_and_restore) { + FakeOop o3; + FakeOop o4; + ++ FlagSetting fs(UseAltGCForwarding, false); ++ + // Make sure initial marks are correct. + ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::originalMark()); + ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::originalMark()); +diff --git a/test/hotspot/gtest/gc/shared/test_slidingForwarding.cpp b/test/hotspot/gtest/gc/shared/test_slidingForwarding.cpp +new file mode 100644 +index 000000000..418b27bf4 +--- /dev/null ++++ b/test/hotspot/gtest/gc/shared/test_slidingForwarding.cpp +@@ -0,0 +1,124 @@ ++/* ++ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "precompiled.hpp" ++#include "gc/shared/gc_globals.hpp" ++#include "gc/shared/slidingForwarding.inline.hpp" ++#include "oops/markWord.hpp" ++#include "oops/oop.inline.hpp" ++#include "utilities/align.hpp" ++#include "unittest.hpp" ++ ++#ifdef _LP64 ++#ifndef PRODUCT ++ ++static uintptr_t make_mark(uintptr_t target_region, uintptr_t offset) { ++ return (target_region) << 3 | (offset << 4) | 3 /* forwarded */; ++} ++ ++static uintptr_t make_fallback() { ++ return ((uintptr_t(1) << 2) /* fallback */ | 3 /* forwarded */); ++} ++ ++// Test simple forwarding within the same region. ++TEST_VM(SlidingForwarding, simple) { ++ FlagSetting fs(UseAltGCForwarding, true); ++ HeapWord fakeheap[32] = { nullptr }; ++ HeapWord* heap = align_up(fakeheap, 8 * sizeof(HeapWord)); ++ oop obj1 = cast_to_oop(&heap[2]); ++ oop obj2 = cast_to_oop(&heap[0]); ++ SlidingForwarding::initialize(MemRegion(&heap[0], &heap[16]), 8); ++ obj1->set_mark(markWord::prototype()); ++ SlidingForwarding::begin(); ++ ++ SlidingForwarding::forward_to(obj1, obj2); ++ ASSERT_EQ(obj1->mark().value(), make_mark(0 /* target_region */, 0 /* offset */)); ++ ASSERT_EQ(SlidingForwarding::forwardee(obj1), obj2); ++ ++ SlidingForwarding::end(); ++} ++ ++// Test forwardings crossing 2 regions. ++TEST_VM(SlidingForwarding, tworegions) { ++ FlagSetting fs(UseAltGCForwarding, true); ++ HeapWord fakeheap[32] = { nullptr }; ++ HeapWord* heap = align_up(fakeheap, 8 * sizeof(HeapWord)); ++ oop obj1 = cast_to_oop(&heap[14]); ++ oop obj2 = cast_to_oop(&heap[2]); ++ oop obj3 = cast_to_oop(&heap[10]); ++ SlidingForwarding::initialize(MemRegion(&heap[0], &heap[16]), 8); ++ obj1->set_mark(markWord::prototype()); ++ SlidingForwarding::begin(); ++ ++ SlidingForwarding::forward_to(obj1, obj2); ++ ASSERT_EQ(obj1->mark().value(), make_mark(0 /* target_region */, 2 /* offset */)); ++ ASSERT_EQ(SlidingForwarding::forwardee(obj1), obj2); ++ ++ SlidingForwarding::forward_to(obj1, obj3); ++ ASSERT_EQ(obj1->mark().value(), make_mark(1 /* target_region */, 2 /* offset */)); ++ ASSERT_EQ(SlidingForwarding::forwardee(obj1), obj3); ++ ++ SlidingForwarding::end(); ++} ++ ++// Test fallback forwardings crossing 4 regions. ++TEST_VM(SlidingForwarding, fallback) { ++ FlagSetting fs(UseAltGCForwarding, true); ++ HeapWord fakeheap[32] = { nullptr }; ++ HeapWord* heap = align_up(fakeheap, 8 * sizeof(HeapWord)); ++ oop s_obj1 = cast_to_oop(&heap[12]); ++ oop s_obj2 = cast_to_oop(&heap[13]); ++ oop s_obj3 = cast_to_oop(&heap[14]); ++ oop s_obj4 = cast_to_oop(&heap[15]); ++ oop t_obj1 = cast_to_oop(&heap[2]); ++ oop t_obj2 = cast_to_oop(&heap[4]); ++ oop t_obj3 = cast_to_oop(&heap[10]); ++ oop t_obj4 = cast_to_oop(&heap[12]); ++ SlidingForwarding::initialize(MemRegion(&heap[0], &heap[16]), 4); ++ s_obj1->set_mark(markWord::prototype()); ++ s_obj2->set_mark(markWord::prototype()); ++ s_obj3->set_mark(markWord::prototype()); ++ s_obj4->set_mark(markWord::prototype()); ++ SlidingForwarding::begin(); ++ ++ SlidingForwarding::forward_to(s_obj1, t_obj1); ++ ASSERT_EQ(s_obj1->mark().value(), make_mark(0 /* target_region */, 2 /* offset */)); ++ ASSERT_EQ(SlidingForwarding::forwardee(s_obj1), t_obj1); ++ ++ SlidingForwarding::forward_to(s_obj2, t_obj2); ++ ASSERT_EQ(s_obj2->mark().value(), make_mark(1 /* target_region */, 0 /* offset */)); ++ ASSERT_EQ(SlidingForwarding::forwardee(s_obj2), t_obj2); ++ ++ SlidingForwarding::forward_to(s_obj3, t_obj3); ++ ASSERT_EQ(s_obj3->mark().value(), make_fallback()); ++ ASSERT_EQ(SlidingForwarding::forwardee(s_obj3), t_obj3); ++ ++ SlidingForwarding::forward_to(s_obj4, t_obj4); ++ ASSERT_EQ(s_obj4->mark().value(), make_fallback()); ++ ASSERT_EQ(SlidingForwarding::forwardee(s_obj4), t_obj4); ++ ++ SlidingForwarding::end(); ++} ++ ++#endif // PRODUCT ++#endif // _LP64 +diff --git a/test/hotspot/gtest/oops/test_arrayOop.cpp b/test/hotspot/gtest/oops/test_arrayOop.cpp +index 84063813b..daec71172 100644 +--- a/test/hotspot/gtest/oops/test_arrayOop.cpp ++++ b/test/hotspot/gtest/oops/test_arrayOop.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -27,19 +27,11 @@ + #include "unittest.hpp" + #include "utilities/globalDefinitions.hpp" + +-class arrayOopDescTest { +- public: +- +- static int header_size_in_bytes() { +- return arrayOopDesc::header_size_in_bytes(); +- } +-}; +- + static bool check_max_length_overflow(BasicType type) { + julong length = arrayOopDesc::max_array_length(type); + julong bytes_per_element = type2aelembytes(type); + julong bytes = length * bytes_per_element +- + arrayOopDescTest::header_size_in_bytes(); ++ + arrayOopDesc::base_offset_in_bytes(type); + return (julong) (size_t) bytes == bytes; + } + +@@ -87,3 +79,58 @@ TEST_VM(arrayOopDesc, narrowOop) { + ASSERT_PRED1(check_max_length_overflow, T_NARROWOOP); + } + // T_VOID and T_ADDRESS are not supported by max_array_length() ++ ++TEST_VM(arrayOopDesc, base_offset) { ++#ifdef _LP64 ++ if (UseCompactObjectHeaders) { ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BOOLEAN), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BYTE), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_SHORT), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_CHAR), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_INT), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_FLOAT), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_LONG), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_DOUBLE), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_OBJECT), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_ARRAY), 12); ++ } else if (UseCompressedClassPointers) { ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BOOLEAN), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BYTE), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_SHORT), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_CHAR), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_INT), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_FLOAT), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_LONG), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_DOUBLE), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_OBJECT), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_ARRAY), 16); ++ } else { ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BOOLEAN), 20); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BYTE), 20); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_SHORT), 20); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_CHAR), 20); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_INT), 20); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_FLOAT), 20); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_LONG), 24); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_DOUBLE), 24); ++ if (UseCompressedOops) { ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_OBJECT), 20); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_ARRAY), 20); ++ } else { ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_OBJECT), 24); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_ARRAY), 24); ++ } ++ } ++#else ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BOOLEAN), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BYTE), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_SHORT), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_CHAR), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_INT), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_FLOAT), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_LONG), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_DOUBLE), 16); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_OBJECT), 12); ++ EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_ARRAY), 12); ++#endif ++} +diff --git a/test/hotspot/gtest/oops/test_objArrayOop.cpp b/test/hotspot/gtest/oops/test_objArrayOop.cpp +new file mode 100644 +index 000000000..89608f76f +--- /dev/null ++++ b/test/hotspot/gtest/oops/test_objArrayOop.cpp +@@ -0,0 +1,69 @@ ++/* ++ * Copyright Amazon.com Inc. or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "precompiled.hpp" ++#include "oops/objArrayOop.hpp" ++#include "unittest.hpp" ++#include "utilities/globalDefinitions.hpp" ++ ++TEST_VM(objArrayOop, osize) { ++ static const struct { ++ int objal; bool ccp; bool coops; bool coh; int result; ++ } x[] = { ++// ObjAligInB, UseCCP, UseCoops, UseCOH, object size in heap words ++#ifdef _LP64 ++ { 8, false, false, false, 4 }, // 20 byte header, 8 byte oops ++ { 8, false, true, false, 3 }, // 20 byte header, 4 byte oops ++ { 8, true, false, false, 3 }, // 16 byte header, 8 byte oops ++ { 8, true, true, false, 3 }, // 16 byte header, 4 byte oops ++ { 16, false, false, false, 4 }, // 20 byte header, 8 byte oops, 16-byte align ++ { 16, false, true, false, 4 }, // 20 byte header, 4 byte oops, 16-byte align ++ { 16, true, false, false, 4 }, // 16 byte header, 8 byte oops, 16-byte align ++ { 16, true, true, false, 4 }, // 16 byte header, 4 byte oops, 16-byte align ++ { 256, false, false, false, 32 }, // 20 byte header, 8 byte oops, 256-byte align ++ { 256, false, true, false, 32 }, // 20 byte header, 4 byte oops, 256-byte align ++ { 256, true, false, false, 32 }, // 16 byte header, 8 byte oops, 256-byte align ++ { 256, true, true, false, 32 }, // 16 byte header, 4 byte oops, 256-byte align ++ { 8, false, false, true, 3 }, // 16 byte header, 8 byte oops ++ { 8, false, true, true, 2 }, // 12 byte header, 4 byte oops ++ { 8, true, false, true, 3 }, // 16 byte header, 8 byte oops ++ { 8, true, true, true, 2 }, // 12 byte header, 4 byte oops ++ { 16, false, false, true, 4 }, // 16 byte header, 8 byte oops, 16-byte align ++ { 16, false, true, true, 2 }, // 12 byte header, 4 byte oops, 16-byte align ++ { 16, true, false, true, 4 }, // 16 byte header, 8 byte oops, 16-byte align ++ { 16, true, true, true, 2 }, // 12 byte header, 4 byte oops, 16-byte align ++ { 256, false, false, true, 32 }, // 16 byte header, 8 byte oops, 256-byte align ++ { 256, false, true, true, 32 }, // 12 byte header, 4 byte oops, 256-byte align ++ { 256, true, false, true, 32 }, // 16 byte header, 8 byte oops, 256-byte align ++ { 256, true, true, true, 32 }, // 12 byte header, 4 byte oops, 256-byte align ++#else ++ { 8, false, false, false, 4 }, // 12 byte header, 4 byte oops, wordsize 4 ++#endif ++ { -1, false, false, false, -1 } ++ }; ++ for (int i = 0; x[i].result != -1; i++) { ++ if (x[i].objal == (int)ObjectAlignmentInBytes && x[i].ccp == UseCompressedClassPointers && x[i].coops == UseCompressedOops && x[i].coh == UseCompactObjectHeaders) { ++ EXPECT_EQ(objArrayOopDesc::object_size(1), (size_t)x[i].result); ++ } ++ } ++} +diff --git a/test/hotspot/gtest/oops/test_typeArrayOop.cpp b/test/hotspot/gtest/oops/test_typeArrayOop.cpp +index 2d9c8cfd9..fc6f4c439 100644 +--- a/test/hotspot/gtest/oops/test_typeArrayOop.cpp ++++ b/test/hotspot/gtest/oops/test_typeArrayOop.cpp +@@ -36,7 +36,11 @@ TEST_VM(typeArrayOopDesc, bool_at_put) { + char* addr = align_up(mem, 16); + + typeArrayOop o = (typeArrayOop) cast_to_oop(addr); +- o->set_klass(Universe::boolArrayKlassObj()); ++ if (UseCompactObjectHeaders) { ++ o->set_mark(Universe::boolArrayKlassObj()->prototype_header()); ++ } else { ++ o->set_klass(Universe::boolArrayKlassObj()); ++ } + o->set_length(10); + + +diff --git a/test/hotspot/gtest/runtime/test_lockStack.cpp b/test/hotspot/gtest/runtime/test_lockStack.cpp +new file mode 100644 +index 000000000..43e8959ed +--- /dev/null ++++ b/test/hotspot/gtest/runtime/test_lockStack.cpp +@@ -0,0 +1,427 @@ ++/* ++ * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "precompiled.hpp" ++#include "runtime/interfaceSupport.inline.hpp" ++#include "runtime/lockStack.inline.hpp" ++#include "runtime/os.hpp" ++#include "unittest.hpp" ++#include "utilities/globalDefinitions.hpp" ++ ++class LockStackTest : public ::testing::Test { ++public: ++ static void push_raw(LockStack& ls, oop obj) { ++ ls._base[ls.to_index(ls._top)] = obj; ++ ls._top += oopSize; ++ } ++ ++ static void pop_raw(LockStack& ls) { ++ ls._top -= oopSize; ++#ifdef ASSERT ++ ls._base[ls.to_index(ls._top)] = nullptr; ++#endif ++ } ++ ++ static oop at(LockStack& ls, int index) { ++ return ls._base[index]; ++ } ++ ++ static size_t size(LockStack& ls) { ++ return ls.to_index(ls._top); ++ } ++}; ++ ++#define recursive_enter(ls, obj) \ ++ do { \ ++ bool ret = ls.try_recursive_enter(obj); \ ++ EXPECT_TRUE(ret); \ ++ } while (false) ++ ++#define recursive_exit(ls, obj) \ ++ do { \ ++ bool ret = ls.try_recursive_exit(obj); \ ++ EXPECT_TRUE(ret); \ ++ } while (false) ++ ++TEST_VM_F(LockStackTest, is_recursive) { ++ if (LockingMode != LM_LIGHTWEIGHT || !VM_Version::supports_recursive_lightweight_locking()) { ++ return; ++ } ++ ++ JavaThread* THREAD = JavaThread::current(); ++ // the thread should be in vm to use locks ++ ThreadInVMfromNative ThreadInVMfromNative(THREAD); ++ ++ LockStack& ls = THREAD->lock_stack(); ++ ++ EXPECT_TRUE(ls.is_empty()); ++ ++ oop obj0 = Universe::int_mirror(); ++ oop obj1 = Universe::float_mirror(); ++ ++ push_raw(ls, obj0); ++ ++ // 0 ++ EXPECT_FALSE(ls.is_recursive(obj0)); ++ ++ push_raw(ls, obj1); ++ ++ // 0, 1 ++ EXPECT_FALSE(ls.is_recursive(obj0)); ++ EXPECT_FALSE(ls.is_recursive(obj1)); ++ ++ push_raw(ls, obj1); ++ ++ // 0, 1, 1 ++ EXPECT_FALSE(ls.is_recursive(obj0)); ++ EXPECT_TRUE(ls.is_recursive(obj1)); ++ ++ pop_raw(ls); ++ pop_raw(ls); ++ push_raw(ls, obj0); ++ ++ // 0, 0 ++ EXPECT_TRUE(ls.is_recursive(obj0)); ++ ++ push_raw(ls, obj0); ++ ++ // 0, 0, 0 ++ EXPECT_TRUE(ls.is_recursive(obj0)); ++ ++ pop_raw(ls); ++ push_raw(ls, obj1); ++ ++ // 0, 0, 1 ++ EXPECT_TRUE(ls.is_recursive(obj0)); ++ EXPECT_FALSE(ls.is_recursive(obj1)); ++ ++ push_raw(ls, obj1); ++ ++ // 0, 0, 1, 1 ++ EXPECT_TRUE(ls.is_recursive(obj0)); ++ EXPECT_TRUE(ls.is_recursive(obj1)); ++ ++ // Clear stack ++ pop_raw(ls); ++ pop_raw(ls); ++ pop_raw(ls); ++ pop_raw(ls); ++ ++ EXPECT_TRUE(ls.is_empty()); ++} ++ ++TEST_VM_F(LockStackTest, try_recursive_enter) { ++ if (LockingMode != LM_LIGHTWEIGHT || !VM_Version::supports_recursive_lightweight_locking()) { ++ return; ++ } ++ ++ JavaThread* THREAD = JavaThread::current(); ++ // the thread should be in vm to use locks ++ ThreadInVMfromNative ThreadInVMfromNative(THREAD); ++ ++ LockStack& ls = THREAD->lock_stack(); ++ ++ EXPECT_TRUE(ls.is_empty()); ++ ++ oop obj0 = Universe::int_mirror(); ++ oop obj1 = Universe::float_mirror(); ++ ++ ls.push(obj0); ++ ++ // 0 ++ EXPECT_FALSE(ls.is_recursive(obj0)); ++ ++ ls.push(obj1); ++ ++ // 0, 1 ++ EXPECT_FALSE(ls.is_recursive(obj0)); ++ EXPECT_FALSE(ls.is_recursive(obj1)); ++ ++ recursive_enter(ls, obj1); ++ ++ // 0, 1, 1 ++ EXPECT_FALSE(ls.is_recursive(obj0)); ++ EXPECT_TRUE(ls.is_recursive(obj1)); ++ ++ recursive_exit(ls, obj1); ++ pop_raw(ls); ++ recursive_enter(ls, obj0); ++ ++ // 0, 0 ++ EXPECT_TRUE(ls.is_recursive(obj0)); ++ ++ recursive_enter(ls, obj0); ++ ++ // 0, 0, 0 ++ EXPECT_TRUE(ls.is_recursive(obj0)); ++ ++ recursive_exit(ls, obj0); ++ push_raw(ls, obj1); ++ ++ // 0, 0, 1 ++ EXPECT_TRUE(ls.is_recursive(obj0)); ++ EXPECT_FALSE(ls.is_recursive(obj1)); ++ ++ recursive_enter(ls, obj1); ++ ++ // 0, 0, 1, 1 ++ EXPECT_TRUE(ls.is_recursive(obj0)); ++ EXPECT_TRUE(ls.is_recursive(obj1)); ++ ++ // Clear stack ++ pop_raw(ls); ++ pop_raw(ls); ++ pop_raw(ls); ++ pop_raw(ls); ++ ++ EXPECT_TRUE(ls.is_empty()); ++} ++ ++TEST_VM_F(LockStackTest, contains) { ++ if (LockingMode != LM_LIGHTWEIGHT) { ++ return; ++ } ++ ++ const bool test_recursive = VM_Version::supports_recursive_lightweight_locking(); ++ ++ JavaThread* THREAD = JavaThread::current(); ++ // the thread should be in vm to use locks ++ ThreadInVMfromNative ThreadInVMfromNative(THREAD); ++ ++ LockStack& ls = THREAD->lock_stack(); ++ ++ EXPECT_TRUE(ls.is_empty()); ++ ++ oop obj0 = Universe::int_mirror(); ++ oop obj1 = Universe::float_mirror(); ++ ++ EXPECT_FALSE(ls.contains(obj0)); ++ ++ ls.push(obj0); ++ ++ // 0 ++ EXPECT_TRUE(ls.contains(obj0)); ++ EXPECT_FALSE(ls.contains(obj1)); ++ ++ if (test_recursive) { ++ push_raw(ls, obj0); ++ ++ // 0, 0 ++ EXPECT_TRUE(ls.contains(obj0)); ++ EXPECT_FALSE(ls.contains(obj1)); ++ } ++ ++ push_raw(ls, obj1); ++ ++ // 0, 0, 1 ++ EXPECT_TRUE(ls.contains(obj0)); ++ EXPECT_TRUE(ls.contains(obj1)); ++ ++ if (test_recursive) { ++ push_raw(ls, obj1); ++ ++ // 0, 0, 1, 1 ++ EXPECT_TRUE(ls.contains(obj0)); ++ EXPECT_TRUE(ls.contains(obj1)); ++ } ++ ++ pop_raw(ls); ++ if (test_recursive) { ++ pop_raw(ls); ++ pop_raw(ls); ++ } ++ push_raw(ls, obj1); ++ ++ // 0, 1 ++ EXPECT_TRUE(ls.contains(obj0)); ++ EXPECT_TRUE(ls.contains(obj1)); ++ ++ // Clear stack ++ pop_raw(ls); ++ pop_raw(ls); ++ ++ EXPECT_TRUE(ls.is_empty()); ++} ++ ++TEST_VM_F(LockStackTest, remove) { ++ if (LockingMode != LM_LIGHTWEIGHT) { ++ return; ++ } ++ ++ const bool test_recursive = VM_Version::supports_recursive_lightweight_locking(); ++ ++ JavaThread* THREAD = JavaThread::current(); ++ // the thread should be in vm to use locks ++ ThreadInVMfromNative ThreadInVMfromNative(THREAD); ++ ++ LockStack& ls = THREAD->lock_stack(); ++ ++ EXPECT_TRUE(ls.is_empty()); ++ ++ oop obj0 = Universe::int_mirror(); ++ oop obj1 = Universe::float_mirror(); ++ oop obj2 = Universe::short_mirror(); ++ oop obj3 = Universe::long_mirror(); ++ ++ push_raw(ls, obj0); ++ ++ // 0 ++ { ++ size_t removed = ls.remove(obj0); ++ EXPECT_EQ(removed, 1u); ++ EXPECT_FALSE(ls.contains(obj0)); ++ } ++ ++ if (test_recursive) { ++ push_raw(ls, obj0); ++ push_raw(ls, obj0); ++ ++ // 0, 0 ++ { ++ size_t removed = ls.remove(obj0); ++ EXPECT_EQ(removed, 2u); ++ EXPECT_FALSE(ls.contains(obj0)); ++ } ++ } ++ ++ push_raw(ls, obj0); ++ push_raw(ls, obj1); ++ ++ // 0, 1 ++ { ++ size_t removed = ls.remove(obj0); ++ EXPECT_EQ(removed, 1u); ++ EXPECT_FALSE(ls.contains(obj0)); ++ EXPECT_TRUE(ls.contains(obj1)); ++ ++ ls.remove(obj1); ++ EXPECT_TRUE(ls.is_empty()); ++ } ++ ++ push_raw(ls, obj0); ++ push_raw(ls, obj1); ++ ++ // 0, 1 ++ { ++ size_t removed = ls.remove(obj1); ++ EXPECT_EQ(removed, 1u); ++ EXPECT_FALSE(ls.contains(obj1)); ++ EXPECT_TRUE(ls.contains(obj0)); ++ ++ ls.remove(obj0); ++ EXPECT_TRUE(ls.is_empty()); ++ } ++ ++ if (test_recursive) { ++ push_raw(ls, obj0); ++ push_raw(ls, obj0); ++ push_raw(ls, obj1); ++ ++ // 0, 0, 1 ++ { ++ size_t removed = ls.remove(obj0); ++ EXPECT_EQ(removed, 2u); ++ EXPECT_FALSE(ls.contains(obj0)); ++ EXPECT_TRUE(ls.contains(obj1)); ++ ++ ls.remove(obj1); ++ EXPECT_TRUE(ls.is_empty()); ++ } ++ ++ push_raw(ls, obj0); ++ push_raw(ls, obj1); ++ push_raw(ls, obj1); ++ ++ // 0, 1, 1 ++ { ++ size_t removed = ls.remove(obj1); ++ EXPECT_EQ(removed, 2u); ++ EXPECT_FALSE(ls.contains(obj1)); ++ EXPECT_TRUE(ls.contains(obj0)); ++ ++ ls.remove(obj0); ++ EXPECT_TRUE(ls.is_empty()); ++ } ++ ++ push_raw(ls, obj0); ++ push_raw(ls, obj1); ++ push_raw(ls, obj1); ++ push_raw(ls, obj2); ++ push_raw(ls, obj2); ++ push_raw(ls, obj2); ++ push_raw(ls, obj2); ++ push_raw(ls, obj3); ++ ++ // 0, 1, 1, 2, 2, 2, 2, 3 ++ { ++ EXPECT_EQ(size(ls), 8u); ++ ++ size_t removed = ls.remove(obj1); ++ EXPECT_EQ(removed, 2u); ++ ++ EXPECT_TRUE(ls.contains(obj0)); ++ EXPECT_FALSE(ls.contains(obj1)); ++ EXPECT_TRUE(ls.contains(obj2)); ++ EXPECT_TRUE(ls.contains(obj3)); ++ ++ EXPECT_EQ(at(ls, 0), obj0); ++ EXPECT_EQ(at(ls, 1), obj2); ++ EXPECT_EQ(at(ls, 2), obj2); ++ EXPECT_EQ(at(ls, 3), obj2); ++ EXPECT_EQ(at(ls, 4), obj2); ++ EXPECT_EQ(at(ls, 5), obj3); ++ EXPECT_EQ(size(ls), 6u); ++ ++ removed = ls.remove(obj2); ++ EXPECT_EQ(removed, 4u); ++ ++ EXPECT_TRUE(ls.contains(obj0)); ++ EXPECT_FALSE(ls.contains(obj1)); ++ EXPECT_FALSE(ls.contains(obj2)); ++ EXPECT_TRUE(ls.contains(obj3)); ++ ++ EXPECT_EQ(at(ls, 0), obj0); ++ EXPECT_EQ(at(ls, 1), obj3); ++ EXPECT_EQ(size(ls), 2u); ++ ++ removed = ls.remove(obj0); ++ EXPECT_EQ(removed, 1u); ++ ++ EXPECT_FALSE(ls.contains(obj0)); ++ EXPECT_FALSE(ls.contains(obj1)); ++ EXPECT_FALSE(ls.contains(obj2)); ++ EXPECT_TRUE(ls.contains(obj3)); ++ ++ EXPECT_EQ(at(ls, 0), obj3); ++ EXPECT_EQ(size(ls), 1u); ++ ++ removed = ls.remove(obj3); ++ EXPECT_EQ(removed, 1u); ++ ++ EXPECT_TRUE(ls.is_empty()); ++ EXPECT_EQ(size(ls), 0u); ++ } ++ } ++ ++ EXPECT_TRUE(ls.is_empty()); ++} +diff --git a/test/hotspot/jtreg/TEST.groups b/test/hotspot/jtreg/TEST.groups +index 6fb2e2b0b..8568098eb 100644 +--- a/test/hotspot/jtreg/TEST.groups ++++ b/test/hotspot/jtreg/TEST.groups +@@ -149,6 +149,7 @@ serviceability_ttf_virtual = \ + tier1_common = \ + sanity/BasicVMTest.java \ + gtest/GTestWrapper.java \ ++ gtest/LockStackGtests.java \ + gtest/MetaspaceGtests.java \ + gtest/LargePageGtests.java \ + gtest/NMTGtests.java \ +diff --git a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java +index 5968b7221..28196f125 100644 +--- a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java ++++ b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java +@@ -51,7 +51,8 @@ public class TestVectorizationNotRun { + static long[] longArray = new long[size]; + + @Test +- @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) ++ @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, ++ applyIf = {"UseCompactObjectHeaders", "false"}) + public static void test(byte[] dest, long[] src) { + for (int i = 0; i < src.length; i++) { + if ((i < 0) || (8 > sizeBytes - i)) { +diff --git a/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java b/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java +index 8ce751f39..6c9dcbff5 100644 +--- a/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java ++++ b/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java +@@ -141,7 +141,8 @@ public class TestFramework { + "UseZbb", + "UseRVV", + "Xlog", +- "LogCompilation" ++ "LogCompilation", ++ "UseCompactObjectHeaders" + ) + ); + +diff --git a/test/hotspot/jtreg/compiler/loopopts/superword/TestIndependentPacksWithCyclicDependency.java b/test/hotspot/jtreg/compiler/loopopts/superword/TestIndependentPacksWithCyclicDependency.java +index b594b4462..9deebca75 100644 +--- a/test/hotspot/jtreg/compiler/loopopts/superword/TestIndependentPacksWithCyclicDependency.java ++++ b/test/hotspot/jtreg/compiler/loopopts/superword/TestIndependentPacksWithCyclicDependency.java +@@ -267,7 +267,8 @@ public class TestIndependentPacksWithCyclicDependency { + + @Test + @IR(counts = {IRNode.ADD_VI, "> 0", IRNode.MUL_VI, "> 0", IRNode.ADD_VF, "> 0"}, +- applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true"}) ++ applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true"}, ++ applyIf = {"UseCompactObjectHeaders", "false"}) + static void test6(int[] dataIa, int[] dataIb, float[] dataFa, float[] dataFb, + long[] dataLa, long[] dataLb) { + for (int i = 0; i < RANGE; i+=2) { +diff --git a/test/hotspot/jtreg/gc/TestAllocHumongousFragment.java b/test/hotspot/jtreg/gc/TestAllocHumongousFragment.java +index 88b838b4b..45208b700 100644 +--- a/test/hotspot/jtreg/gc/TestAllocHumongousFragment.java ++++ b/test/hotspot/jtreg/gc/TestAllocHumongousFragment.java +@@ -177,6 +177,18 @@ + * TestAllocHumongousFragment + */ + ++ /* ++ * @test id=g1-alt-forwarding ++ * @summary Make sure G1 can recover from humongous allocation fragmentation, with alt GC forwarding ++ * @key randomness ++ * @requires vm.gc.G1 ++ * @library /test/lib ++ * ++ * @run main/othervm -Xlog:gc+region=trace -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g ++ * -XX:VerifyGCType=full -XX:+VerifyDuringGC -XX:+VerifyAfterGC -XX:+UseAltGCForwarding ++ * TestAllocHumongousFragment ++ */ ++ + import java.util.*; + import jdk.test.lib.Utils; + +diff --git a/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java b/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java +index 95740559c..f51850a21 100644 +--- a/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java ++++ b/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java +@@ -32,7 +32,7 @@ + * @modules java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox +- * @run main/timeout=240 gc.g1.plab.TestPLABPromotion ++ * @run main/othervm/timeout=240 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI gc.g1.plab.TestPLABPromotion + */ + package gc.g1.plab; + +@@ -45,14 +45,18 @@ import gc.g1.plab.lib.LogParser; + import gc.g1.plab.lib.PLABUtils; + import gc.g1.plab.lib.PlabInfo; + ++import jdk.test.lib.Platform; + import jdk.test.lib.process.OutputAnalyzer; + import jdk.test.lib.process.ProcessTools; ++import jdk.test.whitebox.WhiteBox; + + /** + * Test checks PLAB promotion of different size objects. + */ + public class TestPLABPromotion { + ++ private static final boolean COMPACT_HEADERS = Platform.is64bit() && WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompactObjectHeaders"); ++ + // GC ID with survivor PLAB statistics + private final static long GC_ID_SURVIVOR_STATS = 1l; + // GC ID with old PLAB statistics +@@ -72,7 +76,7 @@ public class TestPLABPromotion { + private static final int PLAB_SIZE_HIGH = 65536; + private static final int OBJECT_SIZE_SMALL = 10; + private static final int OBJECT_SIZE_MEDIUM = 100; +- private static final int OBJECT_SIZE_HIGH = 3250; ++ private static final int OBJECT_SIZE_HIGH = COMPACT_HEADERS ? 3266 : 3250; + private static final int GC_NUM_SMALL = 1; + private static final int GC_NUM_MEDIUM = 3; + private static final int GC_NUM_HIGH = 7; +diff --git a/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithG1.java b/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithG1.java +index 64a090025..eeb6bfac5 100644 +--- a/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithG1.java ++++ b/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithG1.java +@@ -25,7 +25,7 @@ + package gc.stress.systemgc; + + /* +- * @test TestSystemGCWithG1 ++ * @test id=default + * @key stress + * @bug 8190703 + * @library / +@@ -33,6 +33,17 @@ package gc.stress.systemgc; + * @summary Stress the G1 GC full GC by allocating objects of different lifetimes concurrently with System.gc(). + * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UseG1GC gc.stress.systemgc.TestSystemGCWithG1 270 + */ ++ ++/* ++ * @test id=alt-forwarding ++ * @key stress ++ * @bug 8190703 ++ * @library / ++ * @requires vm.gc.G1 ++ * @requires (vm.bits == "64") ++ * @summary Stress the G1 GC full GC by allocating objects of different lifetimes concurrently with System.gc(). ++ * @run main/othervm/timeout=300 -XX:+UnlockExperimentalVMOptions -XX:+UseAltGCForwarding -Xlog:gc*=info -Xmx512m -XX:+UseG1GC gc.stress.systemgc.TestSystemGCWithG1 270 ++ */ + public class TestSystemGCWithG1 { + public static void main(String[] args) throws Exception { + TestSystemGC.main(args); +diff --git a/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithSerial.java b/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithSerial.java +index 1db15b76e..c64459e15 100644 +--- a/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithSerial.java ++++ b/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithSerial.java +@@ -25,7 +25,7 @@ + package gc.stress.systemgc; + + /* +- * @test TestSystemGCWithSerial ++ * @test id=default + * @key stress + * @bug 8190703 + * @library / +@@ -33,6 +33,37 @@ package gc.stress.systemgc; + * @summary Stress the Serial GC full GC by allocating objects of different lifetimes concurrently with System.gc(). + * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UseSerialGC gc.stress.systemgc.TestSystemGCWithSerial 270 + */ ++ ++/* ++ * @test id=alt-forwarding ++ * @key stress ++ * @bug 8190703 ++ * @library / ++ * @requires vm.gc.Serial ++ * @summary Stress the Serial GC full GC by allocating objects of different lifetimes concurrently with System.gc(). ++ * @run main/othervm/timeout=300 -XX:+UnlockExperimentalVMOptions -XX:+UseAltGCForwarding -Xlog:gc*=info -Xmx512m -XX:+UseSerialGC gc.stress.systemgc.TestSystemGCWithSerial 270 ++ */ ++ ++/* ++ * @test id=alt-forwarding-unaligned ++ * @key stress ++ * @bug 8190703 ++ * @library / ++ * @requires vm.gc.Serial ++ * @summary Stress the Serial GC full GC by allocating objects of different lifetimes concurrently with System.gc(). ++ * @run main/othervm/timeout=300 -XX:+UnlockExperimentalVMOptions -XX:+UseAltGCForwarding -Xlog:gc*=info -Xmx700m -XX:+UseSerialGC gc.stress.systemgc.TestSystemGCWithSerial 270 ++ */ ++ ++/* ++ * @test id=alt-forwarding-large-heap ++ * @key stress ++ * @bug 8190703 ++ * @library / ++ * @requires vm.gc.Serial ++ * @requires (vm.bits == "64") & (os.maxMemory >= 6G) ++ * @summary Stress the Serial GC full GC by allocating objects of different lifetimes concurrently with System.gc(). ++ * @run main/othervm/timeout=300 -XX:+UnlockExperimentalVMOptions -XX:+UseAltGCForwarding -Xlog:gc*=info -Xmx6g -XX:+UseSerialGC gc.stress.systemgc.TestSystemGCWithSerial 270 ++ */ + public class TestSystemGCWithSerial { + public static void main(String[] args) throws Exception { + TestSystemGC.main(args); +diff --git a/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithShenandoah.java b/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithShenandoah.java +index 1b12e22b6..cda514124 100644 +--- a/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithShenandoah.java ++++ b/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithShenandoah.java +@@ -40,6 +40,22 @@ package gc.stress.systemgc; + * -XX:+UseShenandoahGC + * gc.stress.systemgc.TestSystemGCWithShenandoah 270 + */ ++/* ++ * @test id=alt-forwarding ++ * @key stress ++ * @library / ++ * @requires vm.gc.Shenandoah ++ * @summary Stress the Shenandoah GC full GC by allocating objects of different lifetimes concurrently with System.gc(). ++ * ++ * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions ++ * -XX:+UseShenandoahGC -XX:+UseAltGCForwarding ++ * -XX:+ShenandoahVerify ++ * gc.stress.systemgc.TestSystemGCWithShenandoah 270 ++ * ++ * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions ++ * -XX:+UseShenandoahGC ++ * gc.stress.systemgc.TestSystemGCWithShenandoah 270 ++ */ + + /* + * @test id=iu +diff --git a/test/hotspot/jtreg/gtest/ArrayTests.java b/test/hotspot/jtreg/gtest/ArrayTests.java +new file mode 100644 +index 000000000..b1afa4795 +--- /dev/null ++++ b/test/hotspot/jtreg/gtest/ArrayTests.java +@@ -0,0 +1,56 @@ ++/* ++ * Copyright Amazon.com Inc. or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++/* ++ * This tests object array sizes by running gtests with different settings. ++ */ ++ ++/* @test id=with-coops-with-ccp ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=arrayOop -XX:+UseCompressedClassPointers -XX:+UseCompressedOops ++ */ ++/* @test id=with-coops-no-ccp ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=arrayOop -XX:-UseCompressedClassPointers -XX:+UseCompressedOops ++ */ ++/* @test id=no-coops-with-ccp ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=arrayOop -XX:+UseCompressedClassPointers -XX:-UseCompressedOops ++ */ ++/* @test id=no-coops-no-ccp ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=arrayOop -XX:-UseCompressedClassPointers -XX:-UseCompressedOops ++ */ +diff --git a/test/hotspot/jtreg/gtest/LockStackGtests.java b/test/hotspot/jtreg/gtest/LockStackGtests.java +new file mode 100644 +index 000000000..b51cebdbf +--- /dev/null ++++ b/test/hotspot/jtreg/gtest/LockStackGtests.java +@@ -0,0 +1,32 @@ ++/* ++ * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++/* @test ++ * @summary Run LockStack gtests with LockingMode=2 ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @requires vm.flagless ++ * @run main/native GTestWrapper --gtest_filter=LockStackTest* -XX:+UnlockExperimentalVMOptions -XX:LockingMode=2 ++ */ +diff --git a/test/hotspot/jtreg/gtest/ObjArrayTests.java b/test/hotspot/jtreg/gtest/ObjArrayTests.java +new file mode 100644 +index 000000000..baae18404 +--- /dev/null ++++ b/test/hotspot/jtreg/gtest/ObjArrayTests.java +@@ -0,0 +1,85 @@ ++/* ++ * Copyright Amazon.com Inc. or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++/* ++ * This tests object array sizes by running gtests with different settings. ++ */ ++ ++/* @test id=with-coops-with-ccp ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=objArrayOop -XX:+UseCompressedClassPointers -XX:+UseCompressedOops ++ */ ++/* @test id=with-coops-no-ccp ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=objArrayOop -XX:-UseCompressedClassPointers -XX:+UseCompressedOops ++ */ ++/* @test id=no-coops-with-ccp ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=objArrayOop -XX:+UseCompressedClassPointers -XX:-UseCompressedOops ++ */ ++/* @test id=no-coops-no-ccp ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=objArrayOop -XX:-UseCompressedClassPointers -XX:-UseCompressedOops ++ */ ++ ++/* @test id=with-coops-with-ccp-large-align ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=objArrayOop -XX:+UseCompressedClassPointers -XX:+UseCompressedOops -XX:ObjAlignmentInBytes=256 ++ */ ++/* @test id=with-coops-no-ccp-large-align ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=objArrayOop -XX:-UseCompressedClassPointers -XX:+UseCompressedOops -XX:ObjAlignmentInBytes=256 ++ */ ++/* @test id=no-coops-with-ccp-large-align ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=objArrayOop -XX:+UseCompressedClassPointers -XX:-UseCompressedOops -XX:ObjAlignmentInBytes=256 ++ */ ++/* @test id=no-coops-no-ccp-large-align ++ * @summary Run object array size tests with compressed oops and compressed class pointers ++ * @library /test/lib ++ * @modules java.base/jdk.internal.misc ++ * java.xml ++ * @run main/native GTestWrapper --gtest_filter=objArrayOop -XX:-UseCompressedClassPointers -XX:-UseCompressedOops -XX:ObjAlignmentInBytes=256 ++ */ +diff --git a/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java b/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java +new file mode 100644 +index 000000000..0d62429b2 +--- /dev/null ++++ b/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java +@@ -0,0 +1,130 @@ ++/* ++ * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ++ * @test id=default ++ * @library /test/lib / ++ * @modules java.base/jdk.internal.misc ++ * java.management ++ * @build jdk.test.whitebox.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox ++ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI BaseOffsets ++ */ ++/* ++ * @test id=no-coops ++ * @library /test/lib / ++ * @requires vm.bits == "64" ++ * @modules java.base/jdk.internal.misc ++ * java.management ++ * @build jdk.test.whitebox.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox ++ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-UseCompressedOops BaseOffsets ++ */ ++/* ++ * @test id=no-ccp ++ * @library /test/lib / ++ * @requires vm.bits == "64" ++ * @modules java.base/jdk.internal.misc ++ * java.management ++ * @build jdk.test.whitebox.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox ++ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-UseCompressedClassPointers BaseOffsets ++ */ ++/* ++ * @test id=no-compact-headers ++ * @library /test/lib / ++ * @requires vm.bits == "64" ++ * @modules java.base/jdk.internal.misc ++ * java.management ++ * @build jdk.test.whitebox.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox ++ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+UseCompactObjectHeaders BaseOffsets ++ */ ++ ++import java.lang.reflect.Field; ++import java.util.Arrays; ++import java.util.Comparator; ++import jdk.internal.misc.Unsafe; ++ ++import jdk.test.lib.Asserts; ++import jdk.test.lib.Platform; ++import jdk.test.whitebox.WhiteBox; ++ ++public class BaseOffsets { ++ ++ static class LIClass { ++ public int i; ++ } ++ ++ public static final WhiteBox WB = WhiteBox.getWhiteBox(); ++ ++ static final long INT_OFFSET; ++ static final int INT_ARRAY_OFFSET; ++ static final int LONG_ARRAY_OFFSET; ++ static { ++ if (!Platform.is64bit()) { ++ INT_OFFSET = 8; ++ INT_ARRAY_OFFSET = 12; ++ LONG_ARRAY_OFFSET = 16; ++ } else if (WB.getBooleanVMFlag("UseCompactObjectHeaders")) { ++ INT_OFFSET = 8; ++ INT_ARRAY_OFFSET = 12; ++ LONG_ARRAY_OFFSET = 16; ++ } else if (WB.getBooleanVMFlag("UseCompressedClassPointers")) { ++ INT_OFFSET = 12; ++ INT_ARRAY_OFFSET = 16; ++ LONG_ARRAY_OFFSET = 16; ++ } else { ++ INT_OFFSET = 16; ++ INT_ARRAY_OFFSET = 20; ++ LONG_ARRAY_OFFSET = 24; ++ } ++ } ++ ++ static public void main(String[] args) { ++ Unsafe unsafe = Unsafe.getUnsafe(); ++ Class c = LIClass.class; ++ Field[] fields = c.getFields(); ++ for (int i = 0; i < fields.length; i++) { ++ long offset = unsafe.objectFieldOffset(fields[i]); ++ if (fields[i].getType() == int.class) { ++ Asserts.assertEquals(offset, INT_OFFSET, "Misplaced int field"); ++ } else { ++ Asserts.fail("Unexpected field type"); ++ } ++ } ++ ++ Asserts.assertEquals(unsafe.arrayBaseOffset(boolean[].class), INT_ARRAY_OFFSET, "Misplaced boolean array base"); ++ Asserts.assertEquals(unsafe.arrayBaseOffset(byte[].class), INT_ARRAY_OFFSET, "Misplaced byte array base"); ++ Asserts.assertEquals(unsafe.arrayBaseOffset(char[].class), INT_ARRAY_OFFSET, "Misplaced char array base"); ++ Asserts.assertEquals(unsafe.arrayBaseOffset(short[].class), INT_ARRAY_OFFSET, "Misplaced short array base"); ++ Asserts.assertEquals(unsafe.arrayBaseOffset(int[].class), INT_ARRAY_OFFSET, "Misplaced int array base"); ++ Asserts.assertEquals(unsafe.arrayBaseOffset(long[].class), LONG_ARRAY_OFFSET, "Misplaced long array base"); ++ Asserts.assertEquals(unsafe.arrayBaseOffset(float[].class), INT_ARRAY_OFFSET, "Misplaced float array base"); ++ Asserts.assertEquals(unsafe.arrayBaseOffset(double[].class), LONG_ARRAY_OFFSET, "Misplaced double array base"); ++ boolean narrowOops = System.getProperty("java.vm.compressedOopsMode") != null || ++ !Platform.is64bit(); ++ int expected_objary_offset = narrowOops ? INT_ARRAY_OFFSET : LONG_ARRAY_OFFSET; ++ Asserts.assertEquals(unsafe.arrayBaseOffset(Object[].class), expected_objary_offset, "Misplaced object array base"); ++ } ++} +diff --git a/test/hotspot/jtreg/runtime/FieldLayout/OldLayoutCheck.java b/test/hotspot/jtreg/runtime/FieldLayout/OldLayoutCheck.java +index a68b0a9ef..4de89437c 100644 +--- a/test/hotspot/jtreg/runtime/FieldLayout/OldLayoutCheck.java ++++ b/test/hotspot/jtreg/runtime/FieldLayout/OldLayoutCheck.java +@@ -25,20 +25,24 @@ + * @test + * @bug 8239014 + * @summary -XX:-UseEmptySlotsInSupers sometime fails to reproduce the layout of the old code +- * @library /test/lib ++ * @library /test/lib / + * @modules java.base/jdk.internal.misc + * java.management + * @requires vm.bits == "64" & vm.opt.final.UseCompressedOops == true & vm.gc != "Z" +- * @run main/othervm -XX:+UseCompressedClassPointers -XX:-UseEmptySlotsInSupers OldLayoutCheck ++ * @build jdk.test.whitebox.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox ++ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseCompressedClassPointers -XX:-UseEmptySlotsInSupers OldLayoutCheck + */ + + /* + * @test + * @requires vm.bits == "32" +- * @library /test/lib ++ * @library /test/lib / + * @modules java.base/jdk.internal.misc + * java.management +- * @run main/othervm -XX:-UseEmptySlotsInSupers OldLayoutCheck ++ * @build jdk.test.whitebox.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox ++ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-UseEmptySlotsInSupers OldLayoutCheck + */ + + import java.lang.reflect.Field; +@@ -48,6 +52,7 @@ import jdk.internal.misc.Unsafe; + + import jdk.test.lib.Asserts; + import jdk.test.lib.Platform; ++import jdk.test.whitebox.WhiteBox; + + public class OldLayoutCheck { + +@@ -56,10 +61,21 @@ public class OldLayoutCheck { + public int i; + } + +- // 32-bit VMs: @0: 8 byte header, @8: long field, @16: int field +- // 64-bit VMs: @0: 12 byte header, @12: int field, @16: long field +- static final long INT_OFFSET = Platform.is64bit() ? 12L : 16L; +- static final long LONG_OFFSET = Platform.is64bit() ? 16L : 8L; ++ public static final WhiteBox WB = WhiteBox.getWhiteBox(); ++ ++ // 32-bit VMs/compact headers: @0: 8 byte header, @8: long field, @16: int field ++ // 64-bit VMs: @0: 12 byte header, @12: int field, @16: long field ++ static final long INT_OFFSET; ++ static final long LONG_OFFSET; ++ static { ++ if (!Platform.is64bit() || WB.getBooleanVMFlag("UseCompactObjectHeaders")) { ++ INT_OFFSET = 16L; ++ LONG_OFFSET = 8L; ++ } else { ++ INT_OFFSET = 12L; ++ LONG_OFFSET = 16L; ++ } ++ } + + static public void main(String[] args) { + Unsafe unsafe = Unsafe.getUnsafe(); +diff --git a/test/hotspot/jtreg/runtime/cds/CdsDifferentCompactObjectHeaders.java b/test/hotspot/jtreg/runtime/cds/CdsDifferentCompactObjectHeaders.java +new file mode 100644 +index 000000000..604bfb678 +--- /dev/null ++++ b/test/hotspot/jtreg/runtime/cds/CdsDifferentCompactObjectHeaders.java +@@ -0,0 +1,66 @@ ++/* ++ * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/** ++ * @test CdsDifferentCompactObjectHeaders ++ * @summary Testing CDS (class data sharing) using opposite compact object header settings. ++ * Using different compact bject headers setting for each dump/load pair. ++ * This is a negative test; using compact header setting for loading that ++ * is different from compact headers for creating a CDS file ++ * should fail when loading. ++ * @requires vm.cds ++ * @requires vm.bits == 64 ++ * @library /test/lib ++ * @run driver CdsDifferentCompactObjectHeaders ++ */ ++ ++import jdk.test.lib.cds.CDSTestUtils; ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.Platform; ++ ++public class CdsDifferentCompactObjectHeaders { ++ ++ public static void main(String[] args) throws Exception { ++ createAndLoadSharedArchive(true, false); ++ createAndLoadSharedArchive(false, true); ++ } ++ ++ // Parameters are object alignment expressed in bytes ++ private static void ++ createAndLoadSharedArchive(boolean createCompactHeaders, boolean loadCompactHeaders) ++ throws Exception { ++ String createCompactHeadersArg = "-XX:" + (createCompactHeaders ? "+" : "-") + "UseCompactObjectHeaders"; ++ String loadCompactHeadersArg = "-XX:" + (loadCompactHeaders ? "+" : "-") + "UseCompactObjectHeaders"; ++ String expectedErrorMsg = ++ String.format( ++ "The shared archive file's UseCompactObjectHeaders setting (%s)" + ++ " does not equal the current UseCompactObjectHeaders setting (%s)", ++ createCompactHeaders ? "enabled" : "disabled", ++ loadCompactHeaders ? "enabled" : "disabled"); ++ ++ CDSTestUtils.createArchiveAndCheck("-XX:+UnlockExperimentalVMOptions", createCompactHeadersArg); ++ ++ OutputAnalyzer out = CDSTestUtils.runWithArchive("-Xlog:cds", "-XX:+UnlockExperimentalVMOptions", loadCompactHeadersArg); ++ CDSTestUtils.checkExecExpectError(out, 1, expectedErrorMsg); ++ } ++} +diff --git a/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java b/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java +index ea51b198f..e44abe7ab 100644 +--- a/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java ++++ b/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java +@@ -56,6 +56,7 @@ public class TestZGCWithCDS { + public final static String ERR_MSG = "The saved state of UseCompressedOops and UseCompressedClassPointers is different from runtime, CDS will be disabled."; + public static void main(String... args) throws Exception { + String zGenerational = args[0]; ++ String compactHeaders = "-XX:" + (zGenerational.equals("-XX:+ZGenerational") ? "+" : "-") + "UseCompactObjectHeaders"; + String helloJar = JarBuilder.build("hello", "Hello"); + System.out.println("0. Dump with ZGC"); + OutputAnalyzer out = TestCommon +@@ -63,6 +64,8 @@ public class TestZGCWithCDS { + new String[] {"Hello"}, + "-XX:+UseZGC", + zGenerational, ++ "-XX:+UnlockExperimentalVMOptions", ++ compactHeaders, + "-Xlog:cds"); + out.shouldContain("Dumping shared data to file:"); + out.shouldHaveExitValue(0); +@@ -72,6 +75,8 @@ public class TestZGCWithCDS { + .exec(helloJar, + "-XX:+UseZGC", + zGenerational, ++ "-XX:+UnlockExperimentalVMOptions", ++ compactHeaders, + "-Xlog:cds", + "Hello"); + out.shouldContain(HELLO); +@@ -83,6 +88,8 @@ public class TestZGCWithCDS { + "-XX:-UseZGC", + "-XX:+UseCompressedOops", // in case turned off by vmoptions + "-XX:+UseCompressedClassPointers", // by jtreg ++ "-XX:+UnlockExperimentalVMOptions", ++ compactHeaders, + "-Xlog:cds", + "Hello"); + out.shouldContain(UNABLE_TO_USE_ARCHIVE); +@@ -107,6 +114,8 @@ public class TestZGCWithCDS { + "-XX:+UseSerialGC", + "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", ++ "-XX:+UnlockExperimentalVMOptions", ++ compactHeaders, + "-Xlog:cds", + "Hello"); + out.shouldContain(HELLO); +@@ -130,6 +139,8 @@ public class TestZGCWithCDS { + "-XX:+UseSerialGC", + "-XX:+UseCompressedOops", + "-XX:+UseCompressedClassPointers", ++ "-XX:+UnlockExperimentalVMOptions", ++ compactHeaders, + "-Xlog:cds", + "Hello"); + out.shouldContain(UNABLE_TO_USE_ARCHIVE); +@@ -143,6 +154,8 @@ public class TestZGCWithCDS { + "-XX:+UseSerialGC", + "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", ++ "-XX:+UnlockExperimentalVMOptions", ++ compactHeaders, + "-Xlog:cds"); + out.shouldContain("Dumping shared data to file:"); + out.shouldHaveExitValue(0); +@@ -152,6 +165,8 @@ public class TestZGCWithCDS { + .exec(helloJar, + "-XX:+UseZGC", + zGenerational, ++ "-XX:+UnlockExperimentalVMOptions", ++ compactHeaders, + "-Xlog:cds", + "Hello"); + out.shouldContain(HELLO); +diff --git a/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/TestAutoCreateSharedArchiveNoDefaultArchive.java b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/TestAutoCreateSharedArchiveNoDefaultArchive.java +index 46e08c7b7..bf2b7a5b8 100644 +--- a/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/TestAutoCreateSharedArchiveNoDefaultArchive.java ++++ b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/TestAutoCreateSharedArchiveNoDefaultArchive.java +@@ -144,6 +144,7 @@ public class TestAutoCreateSharedArchiveNoDefaultArchive { + private static void removeDefaultArchives(String java_home_dst, String variant) { + removeDefaultArchive(java_home_dst, variant, ""); + removeDefaultArchive(java_home_dst, variant, "_nocoops"); ++ removeDefaultArchive(java_home_dst, variant, "_coh"); + } + + private static void removeDefaultArchive(String java_home_dst, String variant, String suffix) { +diff --git a/test/hotspot/jtreg/runtime/lockStack/TestLockStackCapacity.java b/test/hotspot/jtreg/runtime/lockStack/TestLockStackCapacity.java +new file mode 100644 +index 000000000..fd8faf1b4 +--- /dev/null ++++ b/test/hotspot/jtreg/runtime/lockStack/TestLockStackCapacity.java +@@ -0,0 +1,108 @@ ++/* ++ * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++/* ++ * @test TestLockStackCapacity ++ * @summary Tests the interaction between recursive lightweight locking and ++ * when the lock stack capacity is exceeded. ++ * @requires vm.flagless ++ * @library /testlibrary /test/lib ++ * @build jdk.test.whitebox.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox ++ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xint -XX:LockingMode=2 TestLockStackCapacity ++ */ ++ ++import jdk.test.lib.Asserts; ++import jdk.test.whitebox.WhiteBox; ++import jtreg.SkippedException; ++ ++public class TestLockStackCapacity { ++ static final WhiteBox WB = WhiteBox.getWhiteBox(); ++ static final int LockingMode = WB.getIntVMFlag("LockingMode").intValue(); ++ static final int LM_LIGHTWEIGHT = 2; ++ ++ static class SynchronizedObject { ++ static final SynchronizedObject OUTER = new SynchronizedObject(); ++ static final SynchronizedObject INNER = new SynchronizedObject(); ++ static final int LockStackCapacity = WB.getLockStackCapacity(); ++ ++ synchronized void runInner(int depth) { ++ assertNotInflated(); ++ if (depth == 1) { ++ return; ++ } else { ++ runInner(depth - 1); ++ } ++ assertNotInflated(); ++ } ++ ++ synchronized void runOuter(int depth, SynchronizedObject inner) { ++ assertNotInflated(); ++ if (depth == 1) { ++ inner.runInner(LockStackCapacity); ++ } else { ++ runOuter(depth - 1, inner); ++ } ++ assertInflated(); ++ } ++ ++ public static void runTest() { ++ // Test Requires a capacity of at least 2. ++ Asserts.assertGTE(LockStackCapacity, 2); ++ ++ // Just checking ++ OUTER.assertNotInflated(); ++ INNER.assertNotInflated(); ++ ++ synchronized(OUTER) { ++ OUTER.assertNotInflated(); ++ INNER.assertNotInflated(); ++ OUTER.runOuter(LockStackCapacity - 1, INNER); ++ ++ OUTER.assertInflated(); ++ INNER.assertNotInflated(); ++ } ++ } ++ ++ void assertNotInflated() { ++ Asserts.assertFalse(WB.isMonitorInflated(this)); ++ } ++ ++ void assertInflated() { ++ Asserts.assertTrue(WB.isMonitorInflated(this)); ++ } ++ } ++ ++ public static void main(String... args) throws Exception { ++ if (LockingMode != LM_LIGHTWEIGHT) { ++ throw new SkippedException("Test only valid for LM_LIGHTWEIGHT"); ++ } ++ ++ if (!WB.supportsRecursiveLightweightLocking()) { ++ throw new SkippedException("Test only valid if LM_LIGHTWEIGHT supports recursion"); ++ } ++ ++ SynchronizedObject.runTest(); ++ } ++} +diff --git a/test/jdk/com/sun/jdi/EATests.java b/test/jdk/com/sun/jdi/EATests.java +index 8f0a8fabd..717cbfff3 100644 +--- a/test/jdk/com/sun/jdi/EATests.java ++++ b/test/jdk/com/sun/jdi/EATests.java +@@ -120,7 +120,46 @@ + * -XX:-DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks + * -XX:+IgnoreUnrecognizedVMOptions -XX:+DeoptimizeObjectsALot + * ++ * @bug 8324881 ++ * @comment Regression test for using the wrong thread when logging during re-locking from deoptimization. ++ * ++ * @comment DiagnoseSyncOnValueBasedClasses=2 will cause logging when locking on \@ValueBased objects. ++ * @run driver EATests ++ * -XX:+UnlockDiagnosticVMOptions ++ * -Xms256m -Xmx256m ++ * -Xbootclasspath/a:. ++ * -XX:CompileCommand=dontinline,*::dontinline_* ++ * -XX:+WhiteBoxAPI ++ * -Xbatch ++ * -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks ++ * -XX:+UnlockExperimentalVMOptions -XX:LockingMode=1 ++ * -XX:DiagnoseSyncOnValueBasedClasses=2 ++ * ++ * @comment Re-lock may inflate monitors when re-locking, which cause monitorinflation trace logging. ++ * @run driver EATests ++ * -XX:+UnlockDiagnosticVMOptions ++ * -Xms256m -Xmx256m ++ * -Xbootclasspath/a:. ++ * -XX:CompileCommand=dontinline,*::dontinline_* ++ * -XX:+WhiteBoxAPI ++ * -Xbatch ++ * -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks ++ * -XX:+UnlockExperimentalVMOptions -XX:LockingMode=2 ++ * -Xlog:monitorinflation=trace:file=monitorinflation.log ++ * ++ * @comment Re-lock may race with deflation. ++ * @run driver EATests ++ * -XX:+UnlockDiagnosticVMOptions ++ * -Xms256m -Xmx256m ++ * -Xbootclasspath/a:. ++ * -XX:CompileCommand=dontinline,*::dontinline_* ++ * -XX:+WhiteBoxAPI ++ * -Xbatch ++ * -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks ++ * -XX:+UnlockExperimentalVMOptions -XX:LockingMode=0 ++ * -XX:GuaranteedAsyncDeflationInterval=1000 + */ ++ + /** + * @test + * @bug 8227745 +@@ -254,12 +293,14 @@ class EATestsTarget { + new EARelockingRecursiveTarget() .run(); + new EARelockingNestedInflatedTarget() .run(); + new EARelockingNestedInflated_02Target() .run(); ++ new EARelockingNestedInflated_03Target() .run(); + new EARelockingArgEscapeLWLockedInCalleeFrameTarget() .run(); + new EARelockingArgEscapeLWLockedInCalleeFrame_2Target() .run(); + new EARelockingArgEscapeLWLockedInCalleeFrameNoRecursiveTarget() .run(); + new EAGetOwnedMonitorsTarget() .run(); + new EAEntryCountTarget() .run(); + new EARelockingObjectCurrentlyWaitingOnTarget() .run(); ++ new EARelockingValueBasedTarget() .run(); + + // Test cases that require deoptimization even though neither + // locks nor allocations are eliminated at the point where +@@ -374,12 +415,14 @@ public class EATests extends TestScaffold { + new EARelockingRecursive() .run(this); + new EARelockingNestedInflated() .run(this); + new EARelockingNestedInflated_02() .run(this); ++ new EARelockingNestedInflated_03() .run(this); + new EARelockingArgEscapeLWLockedInCalleeFrame() .run(this); + new EARelockingArgEscapeLWLockedInCalleeFrame_2() .run(this); + new EARelockingArgEscapeLWLockedInCalleeFrameNoRecursive() .run(this); + new EAGetOwnedMonitors() .run(this); + new EAEntryCount() .run(this); + new EARelockingObjectCurrentlyWaitingOn() .run(this); ++ new EARelockingValueBased() .run(this); + + // Test cases that require deoptimization even though neither + // locks nor allocations are eliminated at the point where +@@ -2013,6 +2056,94 @@ class EARelockingNestedInflated_02Target extends EATestCaseBaseTarget { + + ///////////////////////////////////////////////////////////////////////////// + ++/** ++ * Like {@link EARelockingNestedInflated_02} with the difference that the ++ * inflation of the lock happens because of contention. ++ */ ++class EARelockingNestedInflated_03 extends EATestCaseBaseDebugger { ++ ++ public void runTestCase() throws Exception { ++ BreakpointEvent bpe = resumeTo(TARGET_TESTCASE_BASE_NAME, "dontinline_brkpt", "()V"); ++ printStack(bpe.thread()); ++ @SuppressWarnings("unused") ++ ObjectReference o = getLocalRef(bpe.thread().frame(2), XYVAL_NAME, "l1"); ++ } ++} ++ ++class EARelockingNestedInflated_03Target extends EATestCaseBaseTarget { ++ ++ public XYVal lockInflatedByContention; ++ public boolean doLockNow; ++ public EATestCaseBaseTarget testCase; ++ ++ @Override ++ public void setUp() { ++ super.setUp(); ++ testMethodDepth = 2; ++ lockInflatedByContention = new XYVal(1, 1); ++ testCase = this; ++ } ++ ++ @Override ++ public void warmupDone() { ++ super.warmupDone(); ++ // Use new lock. lockInflatedByContention might have been inflated because of recursion. ++ lockInflatedByContention = new XYVal(1, 1); ++ // Start thread that tries to enter lockInflatedByContention while the main thread owns it -> inflation ++ TestScaffold.newThread(() -> { ++ while (true) { ++ synchronized (testCase) { ++ try { ++ if (doLockNow) { ++ doLockNow = false; // reset for main thread ++ testCase.notify(); ++ break; ++ } ++ testCase.wait(); ++ } catch (InterruptedException e) { /* ignored */ } ++ } ++ } ++ synchronized (lockInflatedByContention) { // will block and trigger inflation ++ msg(Thread.currentThread().getName() + ": acquired lockInflatedByContention"); ++ } ++ }, testCaseName + ": Lock Contender (test thread)").start(); ++ } ++ ++ public void dontinline_testMethod() { ++ @SuppressWarnings("unused") ++ XYVal xy = new XYVal(1, 1); // scalar replaced ++ XYVal l1 = lockInflatedByContention; // read by debugger ++ synchronized (l1) { ++ testMethod_inlined(l1); ++ } ++ } ++ ++ public void testMethod_inlined(XYVal l2) { ++ synchronized (l2) { // eliminated nested locking ++ dontinline_notifyOtherThread(); ++ dontinline_brkpt(); ++ } ++ } ++ ++ public void dontinline_notifyOtherThread() { ++ if (!warmupDone) { ++ return; ++ } ++ synchronized (testCase) { ++ doLockNow = true; ++ testCase.notify(); ++ // wait for other thread to reset doLockNow again ++ while (doLockNow) { ++ try { ++ testCase.wait(); ++ } catch (InterruptedException e) { /* ignored */ } ++ } ++ } ++ } ++} ++ ++///////////////////////////////////////////////////////////////////////////// ++ + /** + * Checks if an eliminated lock of an ArgEscape object l1 can be relocked if + * l1 is locked in a callee frame. +@@ -2228,6 +2359,32 @@ class EARelockingObjectCurrentlyWaitingOnTarget extends EATestCaseBaseTarget { + } + } + ++ ++///////////////////////////////////////////////////////////////////////////// ++ ++/** ++ * Test relocking eliminated @ValueBased object. ++ */ ++class EARelockingValueBased extends EATestCaseBaseDebugger { ++ ++ public void runTestCase() throws Exception { ++ BreakpointEvent bpe = resumeTo(TARGET_TESTCASE_BASE_NAME, "dontinline_brkpt", "()V"); ++ printStack(bpe.thread()); ++ @SuppressWarnings("unused") ++ ObjectReference o = getLocalRef(bpe.thread().frame(1), Integer.class.getName(), "l1"); ++ } ++} ++ ++class EARelockingValueBasedTarget extends EATestCaseBaseTarget { ++ ++ public void dontinline_testMethod() { ++ Integer l1 = new Integer(255); ++ synchronized (l1) { ++ dontinline_brkpt(); ++ } ++ } ++} ++ + ///////////////////////////////////////////////////////////////////////////// + // + // Test cases that require deoptimization even though neither locks +diff --git a/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java b/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java +index fc3b1a66d..0435a9bb7 100644 +--- a/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java ++++ b/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java +@@ -301,6 +301,7 @@ import jdk.test.whitebox.WhiteBox; + + public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase { + ++ private static final boolean COMPACT_HEADERS = Platform.is64bit() && WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompactObjectHeaders"); + static final Boolean COMPRESSED_OOPS = WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompressedOops"); + static final long REF_SIZE = (COMPRESSED_OOPS == null || COMPRESSED_OOPS == true) ? 4 : 8; + +@@ -313,6 +314,9 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase + static final int LARGE_INT_ARRAY_SIZE = 1024*1024*1024 + 1024; + static final int LARGE_OBJ_ARRAY_SIZE = (4096/(int)REF_SIZE)*1024*1024 + 1024; + ++ static final boolean CCP = WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompressedClassPointers"); ++ static final int ARRAY_HEADER_SIZE = CCP ? 16 : (Platform.is64bit() ? 20 : 16); ++ + final String mode; + + public GetObjectSizeIntrinsicsTest(String name, String mode) { +@@ -371,15 +375,25 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase + return (v + a - 1) / a * a; + } + ++ private static long expectedSmallObjSize() { ++ long size; ++ if (!Platform.is64bit() || COMPACT_HEADERS) { ++ size = 8; ++ } else { ++ size = 16; ++ } ++ return roundUp(size, OBJ_ALIGN); ++ } ++ + private void testSize_newObject() { +- long expected = roundUp(Platform.is64bit() ? 16 : 8, OBJ_ALIGN); ++ long expected = expectedSmallObjSize(); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(new Object())); + } + } + + private void testSize_localObject() { +- long expected = roundUp(Platform.is64bit() ? 16 : 8, OBJ_ALIGN); ++ long expected = expectedSmallObjSize(); + Object o = new Object(); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(o)); +@@ -389,14 +403,14 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase + static Object staticO = new Object(); + + private void testSize_fieldObject() { +- long expected = roundUp(Platform.is64bit() ? 16 : 8, OBJ_ALIGN); ++ long expected = expectedSmallObjSize(); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(staticO)); + } + } + + private void testSize_newSmallIntArray() { +- long expected = roundUp(4L*SMALL_ARRAY_SIZE + 16, OBJ_ALIGN); ++ long expected = roundUp(4L*SMALL_ARRAY_SIZE + ARRAY_HEADER_SIZE, OBJ_ALIGN); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(new int[SMALL_ARRAY_SIZE])); + } +@@ -404,7 +418,7 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase + + private void testSize_localSmallIntArray() { + int[] arr = new int[SMALL_ARRAY_SIZE]; +- long expected = roundUp(4L*SMALL_ARRAY_SIZE + 16, OBJ_ALIGN); ++ long expected = roundUp(4L*SMALL_ARRAY_SIZE + ARRAY_HEADER_SIZE, OBJ_ALIGN); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(arr)); + } +@@ -413,14 +427,14 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase + static int[] smallArr = new int[SMALL_ARRAY_SIZE]; + + private void testSize_fieldSmallIntArray() { +- long expected = roundUp(4L*SMALL_ARRAY_SIZE + 16, OBJ_ALIGN); ++ long expected = roundUp(4L*SMALL_ARRAY_SIZE + ARRAY_HEADER_SIZE, OBJ_ALIGN); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(smallArr)); + } + } + + private void testSize_newSmallObjArray() { +- long expected = roundUp(REF_SIZE*SMALL_ARRAY_SIZE + 16, OBJ_ALIGN); ++ long expected = roundUp(REF_SIZE*SMALL_ARRAY_SIZE + ARRAY_HEADER_SIZE, OBJ_ALIGN); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(new Object[SMALL_ARRAY_SIZE])); + } +@@ -428,7 +442,7 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase + + private void testSize_localSmallObjArray() { + Object[] arr = new Object[SMALL_ARRAY_SIZE]; +- long expected = roundUp(REF_SIZE*SMALL_ARRAY_SIZE + 16, OBJ_ALIGN); ++ long expected = roundUp(REF_SIZE*SMALL_ARRAY_SIZE + ARRAY_HEADER_SIZE, OBJ_ALIGN); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(arr)); + } +@@ -437,7 +451,7 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase + static Object[] smallObjArr = new Object[SMALL_ARRAY_SIZE]; + + private void testSize_fieldSmallObjArray() { +- long expected = roundUp(REF_SIZE*SMALL_ARRAY_SIZE + 16, OBJ_ALIGN); ++ long expected = roundUp(REF_SIZE*SMALL_ARRAY_SIZE + ARRAY_HEADER_SIZE, OBJ_ALIGN); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(smallObjArr)); + } +@@ -445,7 +459,7 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase + + private void testSize_localLargeIntArray() { + int[] arr = new int[LARGE_INT_ARRAY_SIZE]; +- long expected = roundUp(4L*LARGE_INT_ARRAY_SIZE + 16, OBJ_ALIGN); ++ long expected = roundUp(4L*LARGE_INT_ARRAY_SIZE + ARRAY_HEADER_SIZE, OBJ_ALIGN); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(arr)); + } +@@ -453,7 +467,7 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase + + private void testSize_localLargeObjArray() { + Object[] arr = new Object[LARGE_OBJ_ARRAY_SIZE]; +- long expected = roundUp(REF_SIZE*LARGE_OBJ_ARRAY_SIZE + 16, OBJ_ALIGN); ++ long expected = roundUp(REF_SIZE*LARGE_OBJ_ARRAY_SIZE + ARRAY_HEADER_SIZE, OBJ_ALIGN); + for (int c = 0; c < ITERS; c++) { + assertEquals(expected, fInst.getObjectSize(arr)); + } +diff --git a/test/jdk/tools/jlink/plugins/CDSPluginTest.java b/test/jdk/tools/jlink/plugins/CDSPluginTest.java +index c57c10f9e..f1910631f 100644 +--- a/test/jdk/tools/jlink/plugins/CDSPluginTest.java ++++ b/test/jdk/tools/jlink/plugins/CDSPluginTest.java +@@ -26,6 +26,7 @@ import java.io.File; + import jdk.test.lib.JDKToolFinder; + import jdk.test.lib.Platform; + import jdk.test.lib.process.*; ++import jdk.test.whitebox.WhiteBox; + + import tests.Helper; + +@@ -44,7 +45,9 @@ import jtreg.SkippedException; + * jdk.jlink/jdk.tools.jimage + * jdk.compiler + * @build tests.* +- * @run main CDSPluginTest ++ * @build jdk.test.whitebox.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox ++ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. CDSPluginTest + */ + + public class CDSPluginTest { +@@ -61,9 +64,12 @@ public class CDSPluginTest { + } + + var module = "cds"; ++ boolean COMPACT_HEADERS = ++ Platform.isAArch64() && WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompactObjectHeaders"); + helper.generateDefaultJModule(module); +- var image = helper.generateDefaultImage(new String[] { "--generate-cds-archive" }, +- module) ++ String[] options = COMPACT_HEADERS ? new String[] { "--generate-cds-archive", "--add-options", "-XX:+UnlockExperimentalVMOptions -XX:+UseCompactObjectHeaders" } ++ : new String[] { "--generate-cds-archive" }; ++ var image = helper.generateDefaultImage(options, module) + .assertSuccess(); + + String subDir; +@@ -75,12 +81,15 @@ public class CDSPluginTest { + } + subDir += "server" + sep; + +- if (Platform.isAArch64() || Platform.isX64()) { ++ ++ String suffix = COMPACT_HEADERS ? "_coh.jsa" : ".jsa"; ++ ++ if (Platform.isAArch64()) { + helper.checkImage(image, module, null, null, +- new String[] { subDir + "classes.jsa", subDir + "classes_nocoops.jsa" }); ++ new String[] { subDir + "classes" + suffix, subDir + "classes_nocoops" + suffix }); + } else { + helper.checkImage(image, module, null, null, +- new String[] { subDir + "classes.jsa" }); ++ new String[] { subDir + "classes" + suffix }); + } + } + } +diff --git a/test/lib/jdk/test/whitebox/WhiteBox.java b/test/lib/jdk/test/whitebox/WhiteBox.java +index b0e2530f7..9d905b684 100644 +--- a/test/lib/jdk/test/whitebox/WhiteBox.java ++++ b/test/lib/jdk/test/whitebox/WhiteBox.java +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -119,6 +119,10 @@ public class WhiteBox { + return isMonitorInflated0(obj); + } + ++ public native int getLockStackCapacity(); ++ ++ public native boolean supportsRecursiveLightweightLocking(); ++ + public native void forceSafepoint(); + + public native void forceClassLoaderStatsSafepoint(); +-- +2.34.1 + diff --git a/huawei-Add-Jprofilecache-feature.patch b/huawei-Add-Jprofilecache-feature.patch new file mode 100644 index 0000000000000000000000000000000000000000..8ffe5e908fc22982ef494d98e6ec6dad9c744408 --- /dev/null +++ b/huawei-Add-Jprofilecache-feature.patch @@ -0,0 +1,6296 @@ +Date: Tue, 25 Nov 2025 19:57:11 +0800 +Subject: [PATCH 5/8] Add Jprofilecache feature + +--- + make/autoconf/jvm-features.m4 | 26 +- + make/hotspot/lib/JvmFeatures.gmk | 6 + + src/hotspot/cpu/aarch64/globals_aarch64.hpp | 44 ++ + src/hotspot/share/ci/ciEnv.cpp | 59 ++ + src/hotspot/share/ci/ciEnv.hpp | 9 + + src/hotspot/share/ci/ciMethod.cpp | 2 + + src/hotspot/share/ci/ciMethodData.cpp | 78 ++- + src/hotspot/share/ci/ciMethodData.hpp | 2 +- + .../share/classfile/classLoaderDataGraph.cpp | 13 + + .../share/classfile/systemDictionary.cpp | 70 +- + src/hotspot/share/code/nmethod.cpp | 10 + + src/hotspot/share/compiler/compileBroker.cpp | 5 + + src/hotspot/share/compiler/compileTask.cpp | 6 + + src/hotspot/share/compiler/compileTask.hpp | 14 + + src/hotspot/share/jprofilecache/hashtable.cpp | 338 ++++++++++ + src/hotspot/share/jprofilecache/hashtable.hpp | 446 +++++++++++++ + .../share/jprofilecache/hashtable.inline.hpp | 116 ++++ + .../share/jprofilecache/jitProfileCache.cpp | 287 +++++++++ + .../share/jprofilecache/jitProfileCache.hpp | 148 +++++ + .../jprofilecache/jitProfileCacheClass.cpp | 112 ++++ + .../jprofilecache/jitProfileCacheClass.hpp | 121 ++++ + .../jitProfileCacheFileParser.cpp | 433 +++++++++++++ + .../jitProfileCacheFileParser.hpp | 100 +++ + .../jprofilecache/jitProfileCacheHolders.cpp | 120 ++++ + .../jprofilecache/jitProfileCacheHolders.hpp | 164 +++++ + .../jprofilecache/jitProfileCacheThread.cpp | 116 ++++ + .../jprofilecache/jitProfileCacheThread.hpp | 54 ++ + .../jprofilecache/jitProfileCacheUtils.cpp | 105 +++ + .../jprofilecache/jitProfileCacheUtils.hpp | 41 ++ + .../jprofilecache/jitProfileClassChain.cpp | 523 +++++++++++++++ + .../jprofilecache/jitProfileClassChain.hpp | 231 +++++++ + .../share/jprofilecache/jitProfileRecord.cpp | 602 ++++++++++++++++++ + .../share/jprofilecache/jitProfileRecord.hpp | 213 +++++++ + .../jprofilecache/symbolRegexMatcher.cpp | 95 +++ + .../jprofilecache/symbolRegexMatcher.hpp | 64 ++ + src/hotspot/share/libadt/dict.cpp | 26 + + src/hotspot/share/libadt/dict.hpp | 1 + + src/hotspot/share/logging/logTag.hpp | 1 + + src/hotspot/share/oops/constantPool.cpp | 124 +++- + src/hotspot/share/oops/constantPool.hpp | 37 ++ + src/hotspot/share/oops/instanceKlass.cpp | 18 + + src/hotspot/share/oops/instanceKlass.hpp | 37 ++ + src/hotspot/share/oops/method.cpp | 13 + + src/hotspot/share/oops/method.hpp | 33 + + src/hotspot/share/oops/methodData.hpp | 3 + + src/hotspot/share/opto/callGenerator.cpp | 6 +- + src/hotspot/share/opto/compile.cpp | 12 + + src/hotspot/share/opto/graphKit.cpp | 16 +- + src/hotspot/share/opto/lcm.cpp | 5 + + src/hotspot/share/runtime/arguments.cpp | 97 +++ + src/hotspot/share/runtime/init.cpp | 24 + + src/hotspot/share/runtime/java.cpp | 10 + + src/hotspot/share/runtime/mutexLocker.cpp | 10 + + src/hotspot/share/runtime/mutexLocker.hpp | 5 + + src/hotspot/share/runtime/thread.cpp | 6 + + src/hotspot/share/runtime/thread.hpp | 14 + + src/hotspot/share/runtime/threads.cpp | 14 + + src/hotspot/share/utilities/ostream.cpp | 27 + + src/hotspot/share/utilities/ostream.hpp | 15 +- + 59 files changed, 5307 insertions(+), 20 deletions(-) + create mode 100644 src/hotspot/share/jprofilecache/hashtable.cpp + create mode 100644 src/hotspot/share/jprofilecache/hashtable.hpp + create mode 100644 src/hotspot/share/jprofilecache/hashtable.inline.hpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCache.cpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCache.hpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheClass.cpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheClass.hpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheFileParser.cpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheFileParser.hpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheHolders.cpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheHolders.hpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheThread.cpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheThread.hpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheUtils.cpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileCacheUtils.hpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileClassChain.cpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileClassChain.hpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileRecord.cpp + create mode 100644 src/hotspot/share/jprofilecache/jitProfileRecord.hpp + create mode 100644 src/hotspot/share/jprofilecache/symbolRegexMatcher.cpp + create mode 100644 src/hotspot/share/jprofilecache/symbolRegexMatcher.hpp + +diff --git a/make/autoconf/jvm-features.m4 b/make/autoconf/jvm-features.m4 +index 288019848..2a66809bb 100644 +--- a/make/autoconf/jvm-features.m4 ++++ b/make/autoconf/jvm-features.m4 +@@ -44,7 +44,7 @@ + m4_define(jvm_features_valid, m4_normalize( \ + ifdef([custom_jvm_features_valid], custom_jvm_features_valid) \ + \ +- cds compiler1 compiler2 dtrace epsilongc g1gc jfr jni-check \ ++ cds compiler1 compiler2 dtrace epsilongc g1gc jfr jni-check jprofilecache\ + jvmci jvmti link-time-opt management minimal opt-size parallelgc \ + serialgc services shenandoahgc static-build vm-structs zero zgc \ + )) +@@ -63,6 +63,7 @@ m4_define(jvm_feature_desc_epsilongc, [include the epsilon (no-op) garbage colle + m4_define(jvm_feature_desc_g1gc, [include the G1 garbage collector]) + m4_define(jvm_feature_desc_jfr, [enable JDK Flight Recorder (JFR)]) + m4_define(jvm_feature_desc_jni_check, [enable -Xcheck:jni support]) ++m4_define(jvm_feature_desc_jprofilecache, [enable Profile Cache (JPROFILECACHE)]) + m4_define(jvm_feature_desc_jvmci, [enable JVM Compiler Interface (JVMCI)]) + m4_define(jvm_feature_desc_jvmti, [enable Java Virtual Machine Tool Interface (JVM TI)]) + m4_define(jvm_feature_desc_link_time_opt, [enable link time optimization]) +@@ -270,6 +271,22 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_DTRACE], + ]) + ]) + ++############################################################################### ++# Check if the feature 'jprofilecache' is available on this platform. ++# ++AC_DEFUN_ONCE([JVM_FEATURES_CHECK_JPROFILECACHE], ++[ ++ JVM_FEATURES_CHECK_AVAILABILITY(jprofilecache, [ ++ AC_MSG_CHECKING([if platform is supported by JPROFILECACHE]) ++ if test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then ++ AC_MSG_RESULT([yes]) ++ else ++ AC_MSG_RESULT([no, $OPENJDK_TARGET_CPU]) ++ AVAILABLE=false ++ fi ++ ]) ++]) ++ + ############################################################################### + # Check if the feature 'jvmci' is available on this platform. + # +@@ -393,6 +410,7 @@ AC_DEFUN_ONCE([JVM_FEATURES_PREPARE_PLATFORM], + + JVM_FEATURES_CHECK_CDS + JVM_FEATURES_CHECK_DTRACE ++ JVM_FEATURES_CHECK_JPROFILECACHE + JVM_FEATURES_CHECK_JVMCI + JVM_FEATURES_CHECK_SHENANDOAHGC + JVM_FEATURES_CHECK_STATIC_BUILD +@@ -419,7 +437,7 @@ AC_DEFUN([JVM_FEATURES_PREPARE_VARIANT], + JVM_FEATURES_VARIANT_UNAVAILABLE="cds minimal zero" + elif test "x$variant" = "xzero"; then + JVM_FEATURES_VARIANT_UNAVAILABLE="compiler1 compiler2 \ +- jvmci minimal zgc" ++ jprofilecache jvmci minimal zgc" + else + JVM_FEATURES_VARIANT_UNAVAILABLE="minimal zero" + fi +@@ -515,6 +533,10 @@ AC_DEFUN([JVM_FEATURES_VERIFY], + [ + variant=$1 + ++ if JVM_FEATURES_IS_ACTIVE(jprofilecache) && ! (JVM_FEATURES_IS_ACTIVE(compiler2)); then ++ AC_MSG_ERROR([Specified JVM feature 'jprofilecache' requires feature 'compiler2' for variant '$variant']) ++ fi ++ + if JVM_FEATURES_IS_ACTIVE(jvmci) && ! (JVM_FEATURES_IS_ACTIVE(compiler1) || \ + JVM_FEATURES_IS_ACTIVE(compiler2)); then + AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1' for variant '$variant']) +diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk +index f57406dc1..664376b07 100644 +--- a/make/hotspot/lib/JvmFeatures.gmk ++++ b/make/hotspot/lib/JvmFeatures.gmk +@@ -91,6 +91,12 @@ ifneq ($(call check-jvm-feature, jvmti), true) + jvmtiClassFileReconstituter.cpp jvmtiTagMapTable.cpp jvmtiAgent.cpp jvmtiAgentList.cpp + endif + ++ifneq ($(call check-jvm-feature, jprofilecache), true) ++ JVM_CFLAGS_FEATURES += -DINCLUDE_JPROFILECACHE=0 ++ JVM_EXCLUDES += jprofilecache ++ JVM_EXCLUDE_PATTERNS += jprofilecache/ ++endif ++ + ifneq ($(call check-jvm-feature, jvmci), true) + JVM_CFLAGS_FEATURES += -DINCLUDE_JVMCI=0 + JVM_EXCLUDES += jvmci +diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp +index 77f72226e..efd29661a 100644 +--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp +@@ -115,6 +115,50 @@ define_pd_global(intx, InlineSmallCode, 1000); + "Minimum size in bytes when block zeroing will be used") \ + range(wordSize, max_jint) \ + product(bool, TraceTraps, false, "Trace all traps the signal handler")\ ++ \ ++ product(bool, ExitVMProfileCacheFlush, false, EXPERIMENTAL, \ ++ "ExitVMProfileCacheFlush") \ ++ \ ++ product(bool, JProfilingCacheRecording, false, EXPERIMENTAL, \ ++ "Collect profiling information for JProfilingCache") \ ++ \ ++ product(bool, JProfilingCacheCompileAdvance, false, EXPERIMENTAL, \ ++ "Enable JProfilingCacheCompileAdvance from a log file") \ ++ \ ++ product(ccstr, CompilationProfileCacheExclude, nullptr, EXPERIMENTAL, \ ++ "JProfilingCacheCompileAdvance excluding list ") \ ++ \ ++ product(bool, UseJProfilingCacheSystemBlackList, true, EXPERIMENTAL, \ ++ "Block Some System Classes loaded by jprofilecache") \ ++ \ ++ product(uintx, JProfilingCacheDelayLoadTime, 1000, EXPERIMENTAL, \ ++ "Sleep time (in milliseconds) before JProfileCache loads " \ ++ "classes and methods profile ") \ ++ range(0, 3600000) \ ++ \ ++ develop(bool, CompilationProfileCacheResolveClassEagerly, true, \ ++ "resolve class from constant pool eagerly") \ ++ \ ++ product(ccstr, ProfilingCacheFile, nullptr, EXPERIMENTAL, \ ++ "Log file name for JProfilingCache") \ ++ \ ++ product(uintx, CompilationProfileCacheAppID, 0, EXPERIMENTAL, \ ++ "Application ID written in log file for verification ") \ ++ range(0, 4294967295) \ ++ \ ++ product(ccstr, JProfilingCacheAutoArchiveDir, nullptr, EXPERIMENTAL, \ ++ "Specify JProfilingCache directory under which the " \ ++ "jprofilecache file will be auto generated and replayed") \ ++ \ ++ product(int, JProfilingCacheMaxTierLimit, 3, EXPERIMENTAL, \ ++ "If compile_level is higher than the option, method will " \ ++ "be precompiled by the option level") \ ++ range(1, 4) \ ++ \ ++ product(bool, JProfilingCacheReplayProfileData, false, EXPERIMENTAL, \ ++ "Load method data with dumped ProfileData in the " \ ++ "jprofilecache file if exists") \ ++ \ + product(int, SoftwarePrefetchHintDistance, -1, \ + "Use prfm hint with specified distance in compiled code." \ + "Value -1 means off.") \ +diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp +index 31ef828b2..5c21b92b6 100644 +--- a/src/hotspot/share/ci/ciEnv.cpp ++++ b/src/hotspot/share/ci/ciEnv.cpp +@@ -819,6 +819,65 @@ ciField* ciEnv::get_field_by_index(ciInstanceKlass* accessor, + GUARDED_VM_ENTRY(return get_field_by_index_impl(accessor, index, bc);) + } + ++#ifdef AARCH64 ++// ------------------------------------------------------------------ ++// ciEnv::is_field_resolved ++// ++// is whether this field has been resolved. ++bool ciEnv::is_field_resolved(ciInstanceKlass* accessor_klass, ++ int constant_pool_index, Bytecodes::Code bc) { ++ GUARDED_VM_ENTRY( ++ ciConstantPoolCache* field_cache = accessor_klass->field_cache(); ++ if (field_cache != nullptr) { ++ ciField* field = (ciField*)field_cache->get(constant_pool_index); ++ if (field != nullptr) { ++ return true; ++ } ++ } ++ CompilerThread *current_thread = CompilerThread::current(); ++ assert(accessor_klass->get_instanceKlass()->is_linked(), "must be linked before using its constant-pool"); ++ constantPoolHandle constant_pool(current_thread, accessor_klass->get_instanceKlass()->constants()); ++ ++ // Get the field's name, signature, and type. ++ Symbol* name = constant_pool->name_ref_at(constant_pool_index, bc); ++ if (name == nullptr) { ++ return false; ++ } ++ int name_index = constant_pool->name_and_type_ref_index_at(constant_pool_index, bc); ++ int signature_index = constant_pool->signature_ref_index_at(name_index); ++ Symbol* signature = constant_pool->symbol_at(signature_index); ++ if (signature == nullptr) { ++ return false; ++ } ++ return true; ++ ) ++} ++ ++// ------------------------------------------------------------------ ++// ++// Check if all fields needed by this method in ConstantPool are resolved ++bool ciEnv::are_method_fields_all_resolved(ciMethod* method) { ++ ciInstanceKlass* holder_klass = method->holder(); ++ ciBytecodeStream bytecode_stream(method); ++ int start_bci = 0; ++ int end_bci = method->code_size(); ++ bytecode_stream.reset_to_bci(start_bci); ++ Bytecodes::Code current_opcode; ++ while ((current_opcode = bytecode_stream.next()) != ciBytecodeStream::EOBC() && ++ bytecode_stream.cur_bci() < end_bci) { ++ if (current_opcode == Bytecodes::_getfield || ++ current_opcode == Bytecodes::_getstatic || ++ current_opcode == Bytecodes::_putfield || ++ current_opcode == Bytecodes::_putstatic) { ++ if (!is_field_resolved(holder_klass, bytecode_stream.get_index_u2_cpcache(), current_opcode)) { ++ return false; ++ } ++ } ++ } ++ return true; ++} ++#endif ++ + // ------------------------------------------------------------------ + // ciEnv::lookup_method + // +diff --git a/src/hotspot/share/ci/ciEnv.hpp b/src/hotspot/share/ci/ciEnv.hpp +index ef3afa3eb..afc0befb8 100644 +--- a/src/hotspot/share/ci/ciEnv.hpp ++++ b/src/hotspot/share/ci/ciEnv.hpp +@@ -164,6 +164,10 @@ private: + Symbol* sig, + Bytecodes::Code bc, + constantTag tag); ++#ifdef AARCH64 ++ bool is_field_resolved(ciInstanceKlass* accessor_klass, ++ int constant_pool_index, Bytecodes::Code bc); ++#endif + + ciConstant unbox_primitive_value(ciObject* cibox, BasicType expected_bt = T_ILLEGAL); + ciConstant get_resolved_constant(const constantPoolHandle& cpool, int obj_index); +@@ -325,6 +329,11 @@ public: + // Reason this compilation is failing, such as "too many basic blocks". + const char* failure_reason() const { return _failure_reason.get(); } + ++#ifdef AARCH64 ++ // Check if all fields needed by this method in ConstantPool are resolved ++ bool are_method_fields_all_resolved(ciMethod* method); ++#endif ++ + // Return state of appropriate compatibility + int compilable() { return _compilable; } + +diff --git a/src/hotspot/share/ci/ciMethod.cpp b/src/hotspot/share/ci/ciMethod.cpp +index f111e110b..95cdd8f71 100644 +--- a/src/hotspot/share/ci/ciMethod.cpp ++++ b/src/hotspot/share/ci/ciMethod.cpp +@@ -36,6 +36,7 @@ + #include "ci/ciUtilities.inline.hpp" + #include "compiler/abstractCompiler.hpp" + #include "compiler/compilerDefinitions.inline.hpp" ++#include "compiler/compileTask.hpp" + #include "compiler/methodLiveness.hpp" + #include "interpreter/interpreter.hpp" + #include "interpreter/linkResolver.hpp" +@@ -983,6 +984,7 @@ bool ciMethod::ensure_method_data(const methodHandle& h_m) { + if (is_native() || is_abstract() || h_m()->is_accessor()) { + return true; + } ++ + if (h_m()->method_data() == nullptr) { + Method::build_profiling_method_data(h_m, THREAD); + if (HAS_PENDING_EXCEPTION) { +diff --git a/src/hotspot/share/ci/ciMethodData.cpp b/src/hotspot/share/ci/ciMethodData.cpp +index 9bd73d32f..113efc7cf 100644 +--- a/src/hotspot/share/ci/ciMethodData.cpp ++++ b/src/hotspot/share/ci/ciMethodData.cpp +@@ -33,6 +33,11 @@ + #include "oops/klass.inline.hpp" + #include "runtime/deoptimization.hpp" + #include "utilities/copy.hpp" ++#ifdef AARCH64 ++#include "compiler/compileTask.hpp" ++#include "jprofilecache/jitProfileCacheHolders.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" ++#endif + + // ciMethodData + +@@ -120,7 +125,7 @@ void ciMethodData::prepare_metadata() { + } + } + +-void ciMethodData::load_remaining_extra_data() { ++void ciMethodData::load_remaining_extra_data(AARCH64_ONLY(bool need_load_jprofile)) { + MethodData* mdo = get_MethodData(); + MutexLocker ml(mdo->extra_data_lock()); + // Deferred metadata cleaning due to concurrent class unloading. +@@ -155,12 +160,27 @@ void ciMethodData::load_remaining_extra_data() { + } + case DataLayout::bit_data_tag: + break; +- case DataLayout::no_tag: + case DataLayout::arg_info_data_tag: + // An empty slot or ArgInfoData entry marks the end of the trap data + { ++#ifdef AARCH64 ++ if (need_load_jprofile) { ++ ProfileCacheMethodHold* mh = mdo->method()->jpc_method_holder(); ++ if (mh->profile_list()->length() > 0 && mh->profile_list()->first()->is_ArgInfoData()) { ++ ArgInfoData record(mh->profile_list()->first()->data_in()); ++ ArgInfoData dst(dp_dst); ++ for (int i = 0; i < record.number_of_args(); i++) { ++ dst.set_arg_modified(i, record.arg_modified(i)); ++ } ++ } ++ } ++#endif + return; // Need a block to avoid SS compiler bug + } ++ case DataLayout::no_tag: ++ { ++ return; ++ } + default: + fatal("bad tag = %d", tag); + } +@@ -231,11 +251,53 @@ bool ciMethodData::load_data() { + ResourceMark rm; + ciProfileData* ci_data = first_data(); + ProfileData* data = mdo->first_data(); +- while (is_valid(ci_data)) { +- ci_data->translate_from(data); +- ci_data = next_data(ci_data); +- data = mdo->next_data(data); +- } ++#ifdef AARCH64 ++ int jprofile_index = 0; ++ bool need_load_jprofile = JProfilingCacheCompileAdvance && JProfilingCacheReplayProfileData && ++ CURRENT_ENV->task()->is_jprofilecache_compilation() && ++ CURRENT_ENV->task()->comp_level() == CompLevel_full_optimization && ++ mdo->method()->jpc_method_holder() != nullptr; ++ if (need_load_jprofile) { ++ ProfileCacheMethodHold* mh = mdo->method()->jpc_method_holder(); ++ ++ if (mh->profile_list()->length() > 0 && mh->profile_list()->first()->is_ArgInfoData()) { ++ jprofile_index++; ++ } ++ while (is_valid(ci_data)) { ++ bool is_translated = false; ++ if (JitProfileRecorder::is_recordable_data(data)) { ++ while (jprofile_index < mh->profile_list()->length()) { ++ BytecodeProfileRecord* jprofile = mh->profile_list()->at(jprofile_index); ++ jprofile_index++; ++ if (data->bci() == jprofile->bci()) { ++ ci_data->translate_from(jprofile->data_in()->data_in()); ++ is_translated = true; ++ log_debug(jprofilecache)("Apply ProfileData on bytecode(%d) of method: %s", ++ ci_data->bci(), mdo->method()->name_and_sig_as_C_string()); ++ break; ++ } else if (data->bci() > jprofile->bci()) { ++ jprofile_index = mh->profile_list()->length(); ++ log_warning(jprofilecache)("Classfile is changed, method %s is not the same as dumping", ++ mdo->method()->name_and_sig_as_C_string()); ++ } ++ } ++ } ++ if (!is_translated) { ++ ci_data->translate_from(data); ++ } ++ ci_data = next_data(ci_data); ++ data = mdo->next_data(data); ++ } ++ } else { ++#endif ++ while (is_valid(ci_data)) { ++ ci_data->translate_from(data); ++ ci_data = next_data(ci_data); ++ data = mdo->next_data(data); ++ } ++#ifdef AARCH64 ++ } ++#endif + if (mdo->parameters_type_data() != nullptr) { + _parameters = data_layout_at(mdo->parameters_type_data_di()); + ciParametersTypeData* parameters = new ciParametersTypeData(_parameters); +@@ -244,7 +306,7 @@ bool ciMethodData::load_data() { + + assert((DataLayout*) ((address)_data + total_size - parameters_data_size) == args_data_limit(), + "sanity - parameter data starts after the argument data of the single ArgInfoData entry"); +- load_remaining_extra_data(); ++ load_remaining_extra_data(AARCH64_ONLY(need_load_jprofile)); + + // Note: Extra data are all BitData, and do not need translation. + _invocation_counter = mdo->invocation_count(); +diff --git a/src/hotspot/share/ci/ciMethodData.hpp b/src/hotspot/share/ci/ciMethodData.hpp +index 005fa2146..92a121e14 100644 +--- a/src/hotspot/share/ci/ciMethodData.hpp ++++ b/src/hotspot/share/ci/ciMethodData.hpp +@@ -456,7 +456,7 @@ private: + ciArgInfoData *arg_info() const; + + void prepare_metadata(); +- void load_remaining_extra_data(); ++ void load_remaining_extra_data(AARCH64_ONLY(bool need_load_jprofile)); + ciProfileData* bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots); + + void dump_replay_data_type_helper(outputStream* out, int round, int& count, ProfileData* pdata, ByteSize offset, ciKlass* k); +diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.cpp b/src/hotspot/share/classfile/classLoaderDataGraph.cpp +index 204628665..b396db09e 100644 +--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp ++++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp +@@ -47,6 +47,9 @@ + #include "utilities/growableArray.hpp" + #include "utilities/macros.hpp" + #include "utilities/ostream.hpp" ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileCache.hpp" ++#endif + + volatile size_t ClassLoaderDataGraph::_num_array_classes = 0; + volatile size_t ClassLoaderDataGraph::_num_instance_classes = 0; +@@ -414,6 +417,16 @@ bool ClassLoaderDataGraph::do_unloading() { + uint loaders_processed = 0; + uint loaders_removed = 0; + ++#ifdef AARCH64 ++ // Unload ProfileCacheClassChain ++ if (JProfilingCacheCompileAdvance) { ++ JitProfileCache* jpc = JitProfileCache::instance(); ++ assert(jpc != nullptr, "JitProfileCache object is null"); ++ ProfileCacheClassChain* chain = jpc->preloader()->chain(); ++ chain->unload_class(); ++ } ++#endif ++ + for (ClassLoaderData* data = _head; data != nullptr; data = data->next()) { + if (data->is_alive()) { + prev = data; +diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp +index 7022bfca7..fadce1099 100644 +--- a/src/hotspot/share/classfile/systemDictionary.cpp ++++ b/src/hotspot/share/classfile/systemDictionary.cpp +@@ -89,6 +89,9 @@ + #if INCLUDE_JFR + #include "jfr/jfr.hpp" + #endif ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileCache.hpp" ++#endif + + class InvokeMethodKey : public StackObj { + private: +@@ -383,6 +386,32 @@ static inline void log_circularity_error(Symbol* name, PlaceholderEntry* probe) + } + } + ++#ifdef AARCH64 ++class SuperClassRecursionTracker : public StackObj { ++public: ++ SuperClassRecursionTracker() { ++ initialize(Thread::current()); ++ } ++ ++ SuperClassRecursionTracker(Thread* thread) { ++ initialize(thread); ++ } ++ ++ ~SuperClassRecursionTracker() { ++ assert(JProfilingCacheCompileAdvance, "wrong usage"); ++ _thread->super_class_depth_dec(); ++ } ++protected: ++ void initialize(Thread* thread) { ++ assert(JProfilingCacheCompileAdvance, "wrong usage"); ++ _thread = thread; ++ _thread->super_class_depth_add(); ++ } ++private: ++ Thread* _thread; ++}; ++#endif ++ + // Must be called for any superclass or superinterface resolution + // during class definition to allow class circularity checking + // superinterface callers: +@@ -469,11 +498,24 @@ InstanceKlass* SystemDictionary::resolve_super_or_fail(Symbol* class_name, + } + + // Resolve the superclass or superinterface, check results on return +- InstanceKlass* superk = +- SystemDictionary::resolve_instance_class_or_null(super_name, +- class_loader, +- protection_domain, +- THREAD); ++ InstanceKlass* superk = nullptr; ++#ifdef AARCH64 ++ if (JProfilingCacheCompileAdvance) { ++ SuperClassRecursionTracker superClassRecursionTracker; ++ superk = ++ SystemDictionary::resolve_instance_class_or_null(super_name, ++ class_loader, ++ protection_domain, ++ THREAD); ++ } else ++#endif ++ { ++ superk = ++ SystemDictionary::resolve_instance_class_or_null(super_name, ++ class_loader, ++ protection_domain, ++ THREAD); ++ } + + // Clean up placeholder entry. + { +@@ -722,6 +764,16 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, + // Make sure we have the right class in the dictionary + DEBUG_ONLY(verify_dictionary_entry(name, loaded_class)); + ++#ifdef AARCH64 ++ if (JProfilingCacheCompileAdvance) { ++ if (loaded_class != nullptr) { ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ assert(jprofilecache != nullptr, "sanity check"); ++ jprofilecache->preloader()->resolve_loaded_klass(loaded_class); ++ } ++ } ++#endif ++ + // Check if the protection domain is present it has the right access + if (protection_domain() != nullptr) { + // Verify protection domain. If it fails an exception is thrown +@@ -914,6 +966,14 @@ InstanceKlass* SystemDictionary::resolve_class_from_stream( + // Make sure we have an entry in the SystemDictionary on success + DEBUG_ONLY(verify_dictionary_entry(h_name, k)); + ++#ifdef AARCH64 ++ if (JProfilingCacheCompileAdvance) { ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ assert(jprofilecache != nullptr, "sanity check"); ++ jprofilecache->preloader()->resolve_loaded_klass(k); ++ } ++#endif ++ + return k; + } + +diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp +index caa065b74..ae5097595 100644 +--- a/src/hotspot/share/code/nmethod.cpp ++++ b/src/hotspot/share/code/nmethod.cpp +@@ -88,6 +88,10 @@ + #if INCLUDE_JBOLT + #include "jbolt/jBoltManager.hpp" + #endif ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" ++#endif + + #ifdef DTRACE_ENABLED + +@@ -633,6 +637,12 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, + // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet. + DEBUG_ONLY(nm->verify();) + nm->log_new_nmethod(); ++#ifdef AARCH64 ++ if (JProfilingCacheRecording) { ++ int bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; ++ JitProfileCache::instance()->recorder()->add_method(nm->method(), bci); ++ } ++#endif + } + return nm; + } +diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp +index 4eb40ee2f..66771cf32 100644 +--- a/src/hotspot/share/compiler/compileBroker.cpp ++++ b/src/hotspot/share/compiler/compileBroker.cpp +@@ -1639,6 +1639,11 @@ CompileTask* CompileBroker::create_compile_task(CompileQueue* queue, + new_task->initialize(compile_id, method, osr_bci, comp_level, + hot_method, hot_count, compile_reason, + blocking); ++#ifdef AARCH64 ++ if (compile_reason == CompileTask::CompileReason::Reason_JitProfile) { ++ new_task->mark_jprofilecache_compilation(); ++ } ++#endif + queue->add(new_task); + return new_task; + } +diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp +index 524f11b3d..b26fa297c 100644 +--- a/src/hotspot/share/compiler/compileTask.cpp ++++ b/src/hotspot/share/compiler/compileTask.cpp +@@ -35,6 +35,9 @@ + #include "runtime/handles.inline.hpp" + #include "runtime/jniHandles.hpp" + #include "runtime/mutexLocker.hpp" ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileCacheHolders.hpp" ++#endif + + CompileTask* CompileTask::_task_free_list = nullptr; + +@@ -123,6 +126,9 @@ void CompileTask::initialize(int compile_id, + _nm_total_size = 0; + _failure_reason = nullptr; + _failure_reason_on_C_heap = false; ++#ifdef AARCH64 ++ _is_jprofilecache_compilation = false; // flag of compiileTask started by jitprofilecache ++#endif + + if (LogCompilation) { + if (hot_method.not_null()) { +diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp +index aff2df38a..e5cdb6b3b 100644 +--- a/src/hotspot/share/compiler/compileTask.hpp ++++ b/src/hotspot/share/compiler/compileTask.hpp +@@ -56,6 +56,9 @@ class CompileTask : public CHeapObj { + Reason_Whitebox, // Whitebox API + Reason_MustBeCompiled, // Used for -Xcomp or AlwaysCompileLoopMethods (see CompilationPolicy::must_be_compiled()) + Reason_Bootstrap, // JVMCI bootstrap ++#ifdef AARCH64 ++ Reason_JitProfile, // JitProfile trigger ++#endif + #if INCLUDE_JBOLT + Reason_Reorder, // JBolt reorder + #endif +@@ -72,6 +75,9 @@ class CompileTask : public CHeapObj { + "whitebox", + "must_be_compiled", + "bootstrap" ++#ifdef AARCH64 ++ , "jitprofile" ++#endif + #if INCLUDE_JBOLT + , "reorder" + #endif +@@ -112,6 +118,10 @@ class CompileTask : public CHeapObj { + const char* _failure_reason; + // Specifies if _failure_reason is on the C heap. + bool _failure_reason_on_C_heap; ++#ifdef AARCH64 ++ // compile task triggered by jitprofile ++ bool _is_jprofilecache_compilation; ++#endif + + public: + CompileTask() : _failure_reason(nullptr), _failure_reason_on_C_heap(false) { +@@ -134,6 +144,10 @@ class CompileTask : public CHeapObj { + bool is_blocking() const { return _is_blocking; } + bool is_success() const { return _is_success; } + DirectiveSet* directive() const { return _directive; } ++#ifdef AARCH64 ++ bool is_jprofilecache_compilation() const { return _is_jprofilecache_compilation; } ++ void mark_jprofilecache_compilation() { _is_jprofilecache_compilation = true; } ++#endif + CodeSection::csize_t nm_content_size() { return _nm_content_size; } + void set_nm_content_size(CodeSection::csize_t size) { _nm_content_size = size; } + CodeSection::csize_t nm_insts_size() { return _nm_insts_size; } +diff --git a/src/hotspot/share/jprofilecache/hashtable.cpp b/src/hotspot/share/jprofilecache/hashtable.cpp +new file mode 100644 +index 000000000..205caacf6 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/hashtable.cpp +@@ -0,0 +1,338 @@ ++/* ++ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "cds/archiveBuilder.hpp" ++#include "cds/metaspaceShared.hpp" ++#include "classfile/altHashing.hpp" ++#include "jprofilecache/hashtable.hpp" ++#include "jprofilecache/hashtable.inline.hpp" ++#include "classfile/javaClasses.hpp" ++#include "memory/allocation.inline.hpp" ++#include "memory/resourceArea.hpp" ++#include "oops/oop.inline.hpp" ++#include "runtime/safepoint.hpp" ++#include "utilities/dtrace.hpp" ++#include "utilities/numberSeq.hpp" ++#include "utilities/powerOfTwo.hpp" ++#include "utilities/align.hpp" ++ ++// This hashtable is implemented as an open hash table with a fixed number of buckets. ++ ++template BasicHashtableEntry* BasicHashtable::new_entry_free_list() { ++ BasicHashtableEntry* entry = nullptr; ++ if (_free_list != nullptr) { ++ entry = _free_list; ++ _free_list = _free_list->next(); ++ } ++ return entry; ++} ++ ++// HashtableEntrys are allocated in blocks to reduce the space overhead. ++template BasicHashtableEntry* BasicHashtable::new_entry(unsigned int hashValue) { ++ BasicHashtableEntry* entry = new_entry_free_list(); ++ ++ if (entry == nullptr) { ++ if (_first_free_entry + _entry_size >= _end_block) { ++ int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); ++ int len = _entry_size * block_size; ++ len = 1 << log2i(len); // round down to power of 2 ++ assert(len >= _entry_size, ""); ++ _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC); ++ memset(_first_free_entry, 0, len); ++ _end_block = _first_free_entry + len; ++ } ++ entry = (BasicHashtableEntry*)_first_free_entry; ++ _first_free_entry += _entry_size; ++ } ++ ++ assert(_entry_size % HeapWordSize == 0, ""); ++ entry->set_hash(hashValue); ++ return entry; ++} ++ ++ ++template HashtableEntry* Hashtable::new_entry(unsigned int hashValue, T obj) { ++ HashtableEntry* entry; ++ ++ entry = (HashtableEntry*)BasicHashtable::new_entry(hashValue); ++ entry->set_literal(obj); ++ return entry; ++} ++ ++// Check to see if the hashtable is unbalanced. The caller set a flag to ++// rehash at the next safepoint. If this bucket is 60 times greater than the ++// expected average bucket length, it's an unbalanced hashtable. ++// This is somewhat an arbitrary heuristic but if one bucket gets to ++// rehash_count which is currently 100, there's probably something wrong. ++ ++template bool RehashableHashtable::check_rehash_table(int count) { ++ assert(this->table_size() != 0, "underflow"); ++ if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) { ++ // Set a flag for the next safepoint, which should be at some guaranteed ++ // safepoint interval. ++ return true; ++ } ++ return false; ++} ++ ++// Create a new table and using alternate hash code, populate the new table ++// with the existing elements. This can be used to change the hash code ++// and could in the future change the size of the table. ++ ++template void RehashableHashtable::move_to(RehashableHashtable* new_table) { ++ ++ // Initialize the global seed for hashing. ++ _seed = AltHashing::compute_seed(); ++ assert(seed() != 0, "shouldn't be zero"); ++ ++ int saved_entry_count = this->number_of_entries(); ++ ++ // Iterate through the table and create a new entry for the new table ++ for (int i = 0; i < new_table->table_size(); ++i) { ++ for (HashtableEntry* p = this->bucket(i); p != nullptr; ) { ++ HashtableEntry* next = p->next(); ++ T string = p->literal(); ++ // Use alternate hashing algorithm on the symbol in the first table ++ unsigned int hashValue = string->new_hash(seed()); ++ // Get a new index relative to the new table (can also change size) ++ int index = new_table->hash_to_index(hashValue); ++ p->set_hash(hashValue); ++ // Keep the shared bit in the Hashtable entry to indicate that this entry ++ // can't be deleted. The shared bit is the LSB in the _next field so ++ // walking the hashtable past these entries requires ++ // BasicHashtableEntry::make_ptr() call. ++ bool keep_shared = p->is_shared(); ++ this->unlink_entry(p); ++ new_table->add_entry(index, p); ++ if (keep_shared) { ++ p->set_shared(); ++ } ++ p = next; ++ } ++ } ++ // give the new table the free list as well ++ new_table->copy_freelist(this); ++ assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?"); ++ ++ // Destroy memory used by the buckets in the hashtable. The memory ++ // for the elements has been used in a new table and is not ++ // destroyed. The memory reuse will benefit resizing the SystemDictionary ++ // to avoid a memory allocation spike at safepoint. ++ BasicHashtable::free_buckets(); ++} ++ ++template void BasicHashtable::free_buckets() { ++ if (nullptr != _buckets) { ++ // Don't delete the buckets in the shared space. They aren't ++ // allocated by os::malloc ++ if (!UseSharedSpaces || ++ !MetaspaceShared::is_in_shared_metaspace(_buckets)) { ++ FREE_C_HEAP_ARRAY(HashtableBucket, _buckets); ++ } ++ _buckets = nullptr; ++ } ++} ++ ++template void BasicHashtable::BucketUnlinkContext::free_entry(BasicHashtableEntry* entry) { ++ entry->set_next(_removed_head); ++ _removed_head = entry; ++ if (_removed_tail == nullptr) { ++ _removed_tail = entry; ++ } ++ _num_removed++; ++} ++ ++template void BasicHashtable::bulk_free_entries(BucketUnlinkContext* context) { ++ if (context->_num_removed == 0) { ++ assert(context->_removed_head == nullptr && context->_removed_tail == nullptr, ++ "Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT, ++ p2i(context->_removed_head), p2i(context->_removed_tail)); ++ return; ++ } ++ ++ // MT-safe add of the list of BasicHashTableEntrys from the context to the free list. ++ BasicHashtableEntry* current = _free_list; ++ while (true) { ++ context->_removed_tail->set_next(current); ++ BasicHashtableEntry* old = Atomic::cmpxchg(&_free_list, context->_removed_head, current); ++ if (old == current) { ++ break; ++ } ++ current = old; ++ } ++ Atomic::add(&_number_of_entries, context->_num_removed); ++} ++ ++template int RehashableHashtable::literal_size(Symbol *symbol) { ++ return symbol->size() * HeapWordSize; ++} ++ ++template int RehashableHashtable::literal_size(oop oop) { ++ // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true, ++ // and the String.value array is shared by several Strings. However, starting from JDK8, ++ // the String.value array is not shared anymore. ++ assert(oop != nullptr && oop->klass() == vmClasses::String_klass(), "only strings are supported"); ++ return (oop->size() + java_lang_String::value(oop)->size()) * HeapWordSize; ++} ++ ++// Dump footprint and bucket length statistics ++// ++// Note: if you create a new subclass of Hashtable, you will need to ++// add a new function Hashtable::literal_size(MyNewType lit) ++ ++template void RehashableHashtable::dump_table(outputStream* st, const char *table_name) { ++ NumberSeq summary; ++ int literal_bytes = 0; ++ for (int i = 0; i < this->table_size(); ++i) { ++ int count = 0; ++ for (HashtableEntry* e = this->bucket(i); ++ e != nullptr; e = e->next()) { ++ count++; ++ literal_bytes += literal_size(e->literal()); ++ } ++ summary.add((double)count); ++ } ++ double num_buckets = summary.num(); ++ double num_entries = summary.sum(); ++ ++ int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket); ++ int entry_bytes = (int)num_entries * sizeof(HashtableEntry); ++ int total_bytes = literal_bytes + bucket_bytes + entry_bytes; ++ ++ double bucket_avg = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets); ++ double entry_avg = (num_entries <= 0) ? 0 : (entry_bytes / num_entries); ++ double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries); ++ ++ st->print_cr("%s statistics:", table_name); ++ st->print_cr("Number of buckets : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes, bucket_avg); ++ st->print_cr("Number of entries : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes, entry_avg); ++ st->print_cr("Number of literals : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg); ++ st->print_cr("Total footprint : %9s = %9d bytes", "", total_bytes); ++ st->print_cr("Average bucket size : %9.3f", summary.avg()); ++ st->print_cr("Variance of bucket size : %9.3f", summary.variance()); ++ st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); ++ st->print_cr("Maximum bucket size : %9d", (int)summary.maximum()); ++} ++ ++template bool BasicHashtable::resize(int new_size) { ++ ++ // Allocate new buckets ++ HashtableBucket* buckets_new = NEW_C_HEAP_ARRAY2_RETURN_NULL(HashtableBucket, new_size, F, CURRENT_PC); ++ if (buckets_new == nullptr) { ++ return false; ++ } ++ ++ // Clear the new buckets ++ for (int i = 0; i < new_size; i++) { ++ buckets_new[i].clear(); ++ } ++ ++ int table_size_old = _table_size; ++ // hash_to_index() uses _table_size, so switch the sizes now ++ _table_size = new_size; ++ ++ // Move entries from the old table to a new table ++ for (int index_old = 0; index_old < table_size_old; index_old++) { ++ for (BasicHashtableEntry* p = _buckets[index_old].get_entry(); p != nullptr; ) { ++ BasicHashtableEntry* next = p->next(); ++ int index_new = hash_to_index(p->hash()); ++ ++ p->set_next(buckets_new[index_new].get_entry()); ++ buckets_new[index_new].set_entry(p); ++ p = next; ++ } ++ } ++ ++ // The old backets now can be released ++ BasicHashtable::free_buckets(); ++ ++ // Switch to the new storage ++ _buckets = buckets_new; ++ ++ return true; ++} ++ ++template bool BasicHashtable::maybe_grow(int max_size, int load_factor) { ++ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); ++ ++ if (table_size() >= max_size) { ++ return false; ++ } ++ if (number_of_entries() / table_size() > load_factor) { ++ resize(MIN2(table_size() * 2, max_size)); ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++#ifndef PRODUCT ++ ++template void Hashtable::print() { ++ ResourceMark rm; ++ ++ for (int i = 0; i < BasicHashtable::table_size(); i++) { ++ HashtableEntry* entry = bucket(i); ++ while(entry != nullptr) { ++ tty->print("%d : ", i); ++ entry->literal()->print(); ++ tty->cr(); ++ entry = entry->next(); ++ } ++ } ++} ++ ++template void BasicHashtable::verify() { ++ int count = 0; ++ for (int i = 0; i < table_size(); i++) { ++ for (BasicHashtableEntry* p = bucket(i); p != nullptr; p = p->next()) { ++ ++count; ++ } ++ } ++ assert(count == number_of_entries(), "number of hashtable entries incorrect"); ++} ++ ++ ++#endif // PRODUCT ++ ++ ++#ifdef ASSERT ++ ++template void BasicHashtable::verify_lookup_length(double load) { ++ if ((double)_lookup_length / (double)_lookup_count > load * 2.0) { ++ warning("Performance bug: SystemDictionary lookup_count=%d " ++ "lookup_length=%d average=%lf load=%f", ++ _lookup_count, _lookup_length, ++ (double) _lookup_length / _lookup_count, load); ++ } ++} ++ ++#endif ++// Explicitly instantiate these types ++template class BasicHashtable; ++template class HashtableEntry; ++template class HashtableEntry; ++template class Hashtable; ++template class Hashtable; +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/hashtable.hpp b/src/hotspot/share/jprofilecache/hashtable.hpp +new file mode 100644 +index 000000000..ea679324e +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/hashtable.hpp +@@ -0,0 +1,446 @@ ++/* ++ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARE_JPROFILECACHE_HASHTABLE_HPP ++#define SHARE_JPROFILECACHE_HASHTABLE_HPP ++ ++#include "classfile/classLoaderData.hpp" ++#include "memory/allocation.hpp" ++#include "oops/oop.hpp" ++#include "oops/symbol.hpp" ++#include "runtime/handles.hpp" ++ ++// This is a generic hashtable, designed to be used for the symbol ++// and string tables. ++// ++// It is implemented as an open hash table with a fixed number of buckets. ++// ++// %note: ++// - TableEntrys are allocated in blocks to reduce the space overhead. ++ ++ ++ ++template class BasicHashtableEntry : public CHeapObj { ++ friend class VMStructs; ++private: ++ unsigned int _hash; // 32-bit hash for item ++ ++ // Link to next element in the linked list for this bucket. EXCEPT ++ // bit 0 set indicates that this entry is shared and must not be ++ // unlinked from the table. Bit 0 is set during the dumping of the ++ // archive. Since shared entries are immutable, _next fields in the ++ // shared entries will not change. New entries will always be ++ // unshared and since pointers are align, bit 0 will always remain 0 ++ // with no extra effort. ++ BasicHashtableEntry* _next; ++ ++ // Windows IA64 compiler requires subclasses to be able to access these ++protected: ++ // Entry objects should not be created, they should be taken from the ++ // free list with BasicHashtable.new_entry(). ++ BasicHashtableEntry() { ShouldNotReachHere(); } ++ // Entry objects should not be destroyed. They should be placed on ++ // the free list instead with BasicHashtable.free_entry(). ++ ~BasicHashtableEntry() { ShouldNotReachHere(); } ++ ++public: ++ ++ unsigned int hash() const { return _hash; } ++ void set_hash(unsigned int hash) { _hash = hash; } ++ unsigned int* hash_addr() { return &_hash; } ++ ++ static BasicHashtableEntry* make_ptr(BasicHashtableEntry* p) { ++ return (BasicHashtableEntry*)((intptr_t)p & -2); ++ } ++ ++ BasicHashtableEntry* next() const { ++ return make_ptr(_next); ++ } ++ ++ void set_next(BasicHashtableEntry* next) { ++ _next = next; ++ } ++ ++ BasicHashtableEntry** next_addr() { ++ return &_next; ++ } ++ ++ bool is_shared() const { ++ return ((intptr_t)_next & 1) != 0; ++ } ++ ++ void set_shared() { ++ _next = (BasicHashtableEntry*)((intptr_t)_next | 1); ++ } ++}; ++ ++ ++ ++template class HashtableEntry : public BasicHashtableEntry { ++ friend class VMStructs; ++private: ++ T _literal; // ref to item in table. ++ ++public: ++ // Literal ++ T literal() const { return _literal; } ++ T* literal_addr() { return &_literal; } ++ void set_literal(T s) { _literal = s; } ++ ++ HashtableEntry* next() const { ++ return (HashtableEntry*)BasicHashtableEntry::next(); ++ } ++ HashtableEntry** next_addr() { ++ return (HashtableEntry**)BasicHashtableEntry::next_addr(); ++ } ++}; ++ ++ ++ ++template class HashtableBucket : public CHeapObj { ++ friend class VMStructs; ++private: ++ // Instance variable ++ BasicHashtableEntry* _entry; ++ ++public: ++ // Accessing ++ void clear() { _entry = nullptr; } ++ ++ // The following methods use order access methods to avoid race ++ // conditions in multiprocessor systems. ++ BasicHashtableEntry* get_entry() const; ++ void set_entry(BasicHashtableEntry* l); ++ ++ // The following method is not MT-safe and must be done under lock. ++ BasicHashtableEntry** entry_addr() { return &_entry; } ++}; ++ ++ ++template class BasicHashtable : public CHeapObj { ++ friend class VMStructs; ++ ++public: ++ BasicHashtable(int table_size, int entry_size); ++ BasicHashtable(int table_size, int entry_size, ++ HashtableBucket* buckets, int number_of_entries); ++ ++ // Bucket handling ++ int hash_to_index(unsigned int full_hash) const { ++ int h = full_hash % _table_size; ++ assert(h >= 0 && h < _table_size, "Illegal hash value"); ++ return h; ++ } ++ ++private: ++ // Instance variables ++ int _table_size; ++ HashtableBucket* _buckets; ++ BasicHashtableEntry* volatile _free_list; ++ char* _first_free_entry; ++ char* _end_block; ++ int _entry_size; ++ volatile int _number_of_entries; ++ ++protected: ++ ++#ifdef ASSERT ++ int _lookup_count; ++ int _lookup_length; ++ void verify_lookup_length(double load); ++#endif ++ ++ void initialize(int table_size, int entry_size, int number_of_entries); ++ ++ // Accessor ++ int entry_size() const { return _entry_size; } ++ ++ // The following method is MT-safe and may be used with caution. ++ BasicHashtableEntry* bucket(int i) const; ++ ++ // The following method is not MT-safe and must be done under lock. ++ BasicHashtableEntry** bucket_addr(int i) { return _buckets[i].entry_addr(); } ++ ++ // Attempt to get an entry from the free list ++ BasicHashtableEntry* new_entry_free_list(); ++ ++ // Table entry management ++ BasicHashtableEntry* new_entry(unsigned int hashValue); ++ ++ // Used when moving the entry to another table ++ // Clean up links, but do not add to free_list ++ void unlink_entry(BasicHashtableEntry* entry) { ++ entry->set_next(nullptr); ++ --_number_of_entries; ++ } ++ ++ // Move over freelist and free block for allocation ++ void copy_freelist(BasicHashtable* src) { ++ _free_list = src->_free_list; ++ src->_free_list = nullptr; ++ _first_free_entry = src->_first_free_entry; ++ src->_first_free_entry = nullptr; ++ _end_block = src->_end_block; ++ src->_end_block = nullptr; ++ } ++ ++ // Free the buckets in this hashtable ++ void free_buckets(); ++ ++ // Helper data structure containing context for the bucket entry unlink process, ++ // storing the unlinked buckets in a linked list. ++ // Also avoids the need to pass around these four members as parameters everywhere. ++ struct BucketUnlinkContext { ++ int _num_processed; ++ int _num_removed; ++ // Head and tail pointers for the linked list of removed entries. ++ BasicHashtableEntry* _removed_head; ++ BasicHashtableEntry* _removed_tail; ++ ++ BucketUnlinkContext() : _num_processed(0), _num_removed(0), _removed_head(nullptr), _removed_tail(nullptr) { ++ } ++ ++ void free_entry(BasicHashtableEntry* entry); ++ }; ++ // Add of bucket entries linked together in the given context to the global free list. This method ++ // is mt-safe wrt. to other calls of this method. ++ void bulk_free_entries(BucketUnlinkContext* context); ++public: ++ int table_size() const { return _table_size; } ++ void set_entry(int index, BasicHashtableEntry* entry); ++ ++ void add_entry(int index, BasicHashtableEntry* entry); ++ ++ void free_entry(BasicHashtableEntry* entry); ++ ++ int number_of_entries() { return _number_of_entries; } ++ ++ bool resize(int new_size); ++ ++ bool maybe_grow(int max_size, int load_factor = 0); ++ ++ void verify() PRODUCT_RETURN; ++}; ++ ++ ++template class Hashtable : public BasicHashtable { ++ friend class VMStructs; ++ ++public: ++ Hashtable(int table_size, int entry_size) ++ : BasicHashtable(table_size, entry_size) { } ++ ++ Hashtable(int table_size, int entry_size, ++ HashtableBucket* buckets, int number_of_entries) ++ : BasicHashtable(table_size, entry_size, buckets, number_of_entries) { } ++ ++ // Debugging ++ void print() PRODUCT_RETURN; ++ ++protected: ++ ++ unsigned int compute_hash(Symbol* name) { ++ return (unsigned int) name->identity_hash(); ++ } ++ ++ int index_for(Symbol* name) { ++ return this->hash_to_index(compute_hash(name)); ++ } ++ ++ // Table entry management ++ HashtableEntry* new_entry(unsigned int hashValue, T obj); ++ ++ // The following method is MT-safe and may be used with caution. ++ HashtableEntry* bucket(int i) { ++ return (HashtableEntry*)BasicHashtable::bucket(i); ++ } ++ ++ // The following method is not MT-safe and must be done under lock. ++ HashtableEntry** bucket_addr(int i) { ++ return (HashtableEntry**)BasicHashtable::bucket_addr(i); ++ } ++ ++}; ++ ++template class RehashableHashtable : public Hashtable { ++ protected: ++ ++ enum { ++ rehash_count = 100, ++ rehash_multiple = 60 ++ }; ++ ++ // Check that the table is unbalanced ++ bool check_rehash_table(int count); ++ ++ public: ++ RehashableHashtable(int table_size, int entry_size) ++ : Hashtable(table_size, entry_size) { } ++ ++ RehashableHashtable(int table_size, int entry_size, ++ HashtableBucket* buckets, int number_of_entries) ++ : Hashtable(table_size, entry_size, buckets, number_of_entries) { } ++ ++ ++ // Function to move these elements into the new table. ++ void move_to(RehashableHashtable* new_table); ++ static bool use_alternate_hashcode(); ++ static juint seed(); ++ ++ static int literal_size(Symbol *symbol); ++ static int literal_size(oop oop); ++ ++ // The following two are currently not used, but are needed anyway because some ++ // C++ compilers (MacOS and Solaris) force the instantiation of ++ // Hashtable::dump_table() even though we never call this function ++ // in the VM code. ++ static int literal_size(ConstantPool *cp) {Unimplemented(); return 0;} ++ static int literal_size(Klass *k) {Unimplemented(); return 0;} ++ ++ void dump_table(outputStream* st, const char *table_name); ++ ++ private: ++ static juint _seed; ++}; ++ ++template juint RehashableHashtable::_seed = 0; ++template juint RehashableHashtable::seed() { return _seed; }; ++template bool RehashableHashtable::use_alternate_hashcode() { return _seed != 0; }; ++ ++// Verions of hashtable where two handles are used to compute the index. ++ ++template class TwoOopHashtable : public Hashtable { ++ friend class VMStructs; ++protected: ++ TwoOopHashtable(int table_size, int entry_size) ++ : Hashtable(table_size, entry_size) {} ++ ++ TwoOopHashtable(int table_size, int entry_size, HashtableBucket* t, ++ int number_of_entries) ++ : Hashtable(table_size, entry_size, t, number_of_entries) {} ++ ++public: ++ unsigned int compute_hash(Symbol* name, ClassLoaderData* loader_data) { ++ unsigned int name_hash = name->identity_hash(); ++ // loader is nullptr with CDS ++ assert(loader_data != nullptr || UseSharedSpaces || DumpSharedSpaces, ++ "only allowed with shared spaces"); ++ unsigned int loader_hash = loader_data == nullptr ? 0 : loader_data->identity_hash(); ++ return name_hash ^ loader_hash; ++ } ++ ++ int index_for(Symbol* name, ClassLoaderData* loader_data) { ++ return this->hash_to_index(compute_hash(name, loader_data)); ++ } ++}; ++ ++// A subclass of BasicHashtable that allows you to do a simple K -> V mapping ++// without using tons of boilerplate code. ++template< ++ typename K, typename V, MEMFLAGS F, ++ unsigned (*HASH) (K const&) = primitive_hash, ++ bool (*EQUALS)(K const&, K const&) = primitive_equals ++ > ++class KVHashtable : public BasicHashtable { ++ class KVHashtableEntry : public BasicHashtableEntry { ++ public: ++ K _key; ++ V _value; ++ KVHashtableEntry* next() { ++ return (KVHashtableEntry*)BasicHashtableEntry::next(); ++ } ++ }; ++ ++protected: ++ KVHashtableEntry* bucket(int i) const { ++ return (KVHashtableEntry*)BasicHashtable::bucket(i); ++ } ++ ++ KVHashtableEntry* new_entry(unsigned int hashValue, K key, V value) { ++ KVHashtableEntry* entry = (KVHashtableEntry*)BasicHashtable::new_entry(hashValue); ++ entry->_key = key; ++ entry->_value = value; ++ return entry; ++ } ++ ++public: ++ KVHashtable(int table_size) : BasicHashtable(table_size, sizeof(KVHashtableEntry)) {} ++ ++ V* add(K key, V value) { ++ unsigned int hash = HASH(key); ++ KVHashtableEntry* entry = new_entry(hash, key, value); ++ BasicHashtable::add_entry(BasicHashtable::hash_to_index(hash), entry); ++ return &(entry->_value); ++ } ++ ++ V* lookup(K key) const { ++ unsigned int hash = HASH(key); ++ int index = BasicHashtable::hash_to_index(hash); ++ for (KVHashtableEntry* e = bucket(index); e != nullptr; e = e->next()) { ++ if (e->hash() == hash && EQUALS(e->_key, key)) { ++ return &(e->_value); ++ } ++ } ++ return nullptr; ++ } ++ ++ // Look up the key. ++ // If an entry for the key exists, leave map unchanged and return a pointer to its value. ++ // If no entry for the key exists, create a new entry from key and value and return a ++ // pointer to the value. ++ // *p_created is true if entry was created, false if entry pre-existed. ++ V* add_if_absent(K key, V value, bool* p_created) { ++ unsigned int hash = HASH(key); ++ int index = BasicHashtable::hash_to_index(hash); ++ for (KVHashtableEntry* e = bucket(index); e != nullptr; e = e->next()) { ++ if (e->hash() == hash && EQUALS(e->_key, key)) { ++ *p_created = false; ++ return &(e->_value); ++ } ++ } ++ KVHashtableEntry* entry = new_entry(hash, key, value); ++ BasicHashtable::add_entry(BasicHashtable::hash_to_index(hash), entry); ++ *p_created = true; ++ return &(entry->_value); ++ } ++ ++ int table_size() const { ++ return BasicHashtable::table_size(); ++ } ++ ++ // ITER contains bool do_entry(K, V const&), which will be ++ // called for each entry in the table. If do_entry() returns false, ++ // the iteration is cancelled. ++ template ++ void iterate(ITER* iter) const { ++ for (int index = 0; index < table_size(); index++) { ++ for (KVHashtableEntry* e = bucket(index); e != nullptr; e = e->next()) { ++ bool cont = iter->do_entry(e->_key, &e->_value); ++ if (!cont) { return; } ++ } ++ } ++ } ++}; ++ ++#endif // SHARE_JPROFILECACHE_HASHTABLE_HPP +diff --git a/src/hotspot/share/jprofilecache/hashtable.inline.hpp b/src/hotspot/share/jprofilecache/hashtable.inline.hpp +new file mode 100644 +index 000000000..cc8a50367 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/hashtable.inline.hpp +@@ -0,0 +1,116 @@ ++/* ++ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++ #ifndef SHARE_JPROFILECACHE_HASHTABLE_INLINE_HPP ++ #define SHARE_JPROFILECACHE_HASHTABLE_INLINE_HPP ++ ++ #include "memory/allocation.inline.hpp" ++ #include "runtime/atomic.hpp" ++ #include "jprofilecache/hashtable.hpp" ++ #include "utilities/dtrace.hpp" ++ ++ // Inline function definitions for hashtable.hpp. ++ ++ // -------------------------------------------------------------------------- ++ ++ // Initialize a table. ++ ++ template inline BasicHashtable::BasicHashtable(int table_size, int entry_size) { ++ // Called on startup, no locking needed ++ initialize(table_size, entry_size, 0); ++ _buckets = NEW_C_HEAP_ARRAY2(HashtableBucket, table_size, F, CURRENT_PC); ++ for (int index = 0; index < _table_size; index++) { ++ _buckets[index].clear(); ++ } ++ } ++ ++ ++ template inline BasicHashtable::BasicHashtable(int table_size, int entry_size, ++ HashtableBucket* buckets, ++ int number_of_entries) { ++ // Called on startup, no locking needed ++ initialize(table_size, entry_size, number_of_entries); ++ _buckets = buckets; ++ } ++ ++ ++ template inline void BasicHashtable::initialize(int table_size, int entry_size, ++ int number_of_entries) { ++ // Called on startup, no locking needed ++ _table_size = table_size; ++ _entry_size = entry_size; ++ _free_list = nullptr; ++ _first_free_entry = nullptr; ++ _end_block = nullptr; ++ _number_of_entries = number_of_entries; ++ #ifdef ASSERT ++ _lookup_count = 0; ++ _lookup_length = 0; ++ #endif ++ } ++ ++ ++ // The following method is MT-safe and may be used with caution. ++ template inline BasicHashtableEntry* BasicHashtable::bucket(int i) const { ++ return _buckets[i].get_entry(); ++ } ++ ++ ++ template inline void HashtableBucket::set_entry(BasicHashtableEntry* l) { ++ // Warning: Preserve store ordering. The SystemDictionary is read ++ // without locks. The new SystemDictionaryEntry must be ++ // complete before other threads can be allowed to see it ++ // via a store to _buckets[index]. ++ Atomic::release_store(&_entry, l); ++ } ++ ++ ++ template inline BasicHashtableEntry* HashtableBucket::get_entry() const { ++ // Warning: Preserve load ordering. The SystemDictionary is read ++ // without locks. The new SystemDictionaryEntry must be ++ // complete before other threads can be allowed to see it ++ // via a store to _buckets[index]. ++ return (BasicHashtableEntry*) Atomic::load_acquire(&_entry); ++ } ++ ++ ++ template inline void BasicHashtable::set_entry(int index, BasicHashtableEntry* entry) { ++ _buckets[index].set_entry(entry); ++ } ++ ++ ++ template inline void BasicHashtable::add_entry(int index, BasicHashtableEntry* entry) { ++ entry->set_next(bucket(index)); ++ _buckets[index].set_entry(entry); ++ ++_number_of_entries; ++ } ++ ++ template inline void BasicHashtable::free_entry(BasicHashtableEntry* entry) { ++ entry->set_next(_free_list); ++ _free_list = entry; ++ --_number_of_entries; ++ } ++ ++ #endif // SHARE_JPROFILECACHE_HASHTABLE_INLINE_HPP ++ +diff --git a/src/hotspot/share/jprofilecache/jitProfileCache.cpp b/src/hotspot/share/jprofilecache/jitProfileCache.cpp +new file mode 100644 +index 000000000..f1a926a27 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCache.cpp +@@ -0,0 +1,287 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "precompiled.hpp" ++#include "classfile/classLoaderData.hpp" ++#include "classfile/classLoaderData.inline.hpp" ++#include "classfile/symbolTable.hpp" ++#include "classfile/systemDictionary.hpp" ++#include "compiler/compileBroker.hpp" ++#include "jprofilecache/hashtable.inline.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" ++#include "jprofilecache/jitProfileCacheFileParser.hpp" ++#include "jprofilecache/jitProfileCacheUtils.hpp" ++#include "logging/log.hpp" ++#include "logging/logStream.hpp" ++#include "oops/method.hpp" ++#include "oops/typeArrayKlass.hpp" ++#include "runtime/arguments.hpp" ++#include "compiler/compilationPolicy.hpp" ++#include "runtime/handles.inline.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "runtime/os.hpp" ++#include "runtime/thread.hpp" ++#include "utilities/stack.hpp" ++#include "utilities/stack.inline.hpp" ++#include "runtime/atomic.hpp" ++#include "libadt/dict.hpp" ++ ++JitProfileCache* JitProfileCache::_jit_profile_cache_instance = nullptr; ++ ++JitProfileCache::JitProfileCache() ++ : profilecacheComplete(false), ++ _jit_profile_cache_state(NOT_INIT), ++ _jit_profile_cache_version(JITPROFILECACHE_VERSION), ++ _dummy_method(nullptr), ++ _jit_profile_cache_recorder(nullptr), ++ _jit_profile_cache_info(nullptr), ++ _excluding_matcher(nullptr) {} ++ ++JitProfileCache::~JitProfileCache() { ++ delete _jit_profile_cache_recorder; ++ delete _jit_profile_cache_info; ++} ++ ++JitProfileCache* JitProfileCache::create_instance() { ++ _jit_profile_cache_instance = new JitProfileCache(); ++ return _jit_profile_cache_instance; ++} ++ ++JitProfileCache::JitProfileCacheState JitProfileCache::init_for_recording() { ++ assert(JProfilingCacheRecording && !JProfilingCacheCompileAdvance, " JitProfileCache JVM option verify failure"); ++ _jit_profile_cache_recorder = new JitProfileRecorder(); ++ _jit_profile_cache_recorder->init(); ++ ++ // check state ++ if (_jit_profile_cache_recorder->is_valid()) { ++ _jit_profile_cache_state = JitProfileCache::IS_OK; ++ } else { ++ _jit_profile_cache_state = JitProfileCache::IS_ERR; ++ } ++ return _jit_profile_cache_state; ++} ++ ++JitProfileCache::JitProfileCacheState JitProfileCache::init_for_profilecache() { ++ assert(!JProfilingCacheRecording && JProfilingCacheCompileAdvance, "JitProfileCache JVM option verify failure"); ++ if (CompilationProfileCacheExclude != nullptr) { ++ _excluding_matcher = new SymbolRegexMatcher(CompilationProfileCacheExclude); ++ } ++ ++ _jit_profile_cache_info = new JitProfileCacheInfo(); ++ _jit_profile_cache_info->set_holder(this); ++ _jit_profile_cache_info->init(); ++ if (_jit_profile_cache_info->is_valid()) { ++ _jit_profile_cache_state = JitProfileCache::IS_OK; ++ } else { ++ _jit_profile_cache_state = JitProfileCache::IS_ERR; ++ } ++ return _jit_profile_cache_state; ++} ++ ++void JitProfileCache::init() { ++ ++ // set log level ++ // set_log_level(); ++ ++ if (JProfilingCacheCompileAdvance) { ++ init_for_profilecache(); ++ } else if(JProfilingCacheRecording) { ++ init_for_recording(); ++ } ++ if ((JProfilingCacheRecording || JProfilingCacheCompileAdvance) && !JitProfileCache::is_valid()) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR: JProfileCache init error."); ++ vm_exit(-1); ++ } ++} ++ ++JitProfileCache::JitProfileCacheState JitProfileCache::flush_recorder() { ++ if(_jit_profile_cache_state == IS_ERR) { ++ return _jit_profile_cache_state; ++ } ++ _jit_profile_cache_recorder->flush_record(); ++ if (_jit_profile_cache_recorder->is_valid()) { ++ _jit_profile_cache_state = IS_OK; ++ } else { ++ _jit_profile_cache_state = IS_ERR; ++ } ++ return _jit_profile_cache_state; ++} ++ ++#define PRELOAD_CLASS_HS_SIZE 10240 ++ ++JitProfileCacheInfo::JitProfileCacheInfo() ++ : _jit_profile_cache_dict(nullptr), ++ _profile_cache_chain(nullptr), ++ _method_loaded_count(0), ++ _state(NOT_INIT), ++ _holder(nullptr), ++ _jvm_booted_is_done(false) { ++} ++ ++JitProfileCacheInfo::~JitProfileCacheInfo() { ++ delete _jit_profile_cache_dict; ++ delete _profile_cache_chain; ++} ++ ++void JitProfileCacheInfo::jvm_booted_is_done() { ++ _jvm_booted_is_done = true; ++ ProfileCacheClassChain* chain = this->chain(); ++ assert(chain != nullptr, "ProfileCacheClassChain is nullptr"); ++} ++ ++void JitProfileCacheInfo::notify_precompilation() { ++ ProfileCacheClassChain *chain = this->chain(); ++ assert(chain != nullptr, "ProfileCacheClassChain is nullptr"); ++ chain->try_transition_to_state(ProfileCacheClassChain::PRE_PROFILECACHE); ++ ++ // preload class ++ log_info(jprofilecache)("JProfileCache [INFO]: start preload class from constant pool"); ++ chain->preload_class_in_constantpool(); ++ ++ // precompile cache method ++ log_info(jprofilecache)("JProfileCache [INFO]: start profilecache compilation"); ++ chain->precompilation(); ++ Thread *THREAD = Thread::current(); ++ if (HAS_PENDING_EXCEPTION) { ++ return; ++ } ++ ++ if (!chain->try_transition_to_state(ProfileCacheClassChain::PROFILECACHE_DONE)) { ++ log_error(jprofilecache)("JProfileCache [ERROR]: can not change state to PROFILECACHE_DONE"); ++ } else { ++ log_info(jprofilecache)("JProfileCache [INFO]: profilecache compilation is done"); ++ } ++} ++ ++bool JitProfileCacheInfo::should_preload_class(Symbol* s) { ++ if (UseJProfilingCacheSystemBlackList && ++ JitProfileCacheUtils::is_in_unpreloadable_classes_black_list(s)) { ++ return false; ++ } ++ SymbolRegexMatcher* matcher = holder()->excluding_matcher(); ++ if (matcher != nullptr && matcher->matches(s)) { ++ return false; ++ } ++ int hash = s->identity_hash(); ++ ProfileCacheClassEntry* e = jit_profile_cache_dict()->find_head_entry(hash, s); ++ if (e == nullptr) { ++ return false; ++ } ++ if (!CompilationProfileCacheResolveClassEagerly) { ++ int offset = e->chain_offset(); ++ ProfileCacheClassChain::ProfileCacheClassChainEntry* entry = chain()->at(offset); ++ return entry->is_not_loaded(); ++ } else { ++ return true; ++ } ++} ++ ++bool JitProfileCacheInfo::resolve_loaded_klass(InstanceKlass* k) { ++ if (k == nullptr) { return false; } ++ if (k->is_jprofilecache_recorded()) { ++ return false; ++ } ++ { ++ MutexLocker mu(ProfileCacheClassChain_lock); ++ if (!chain()->can_record_class()) { ++ return false; ++ } ++ } ++ k->set_jprofilecache_recorded(true); ++ chain()->mark_loaded_class(k, jit_profile_cache_dict()->find_entry(k)); ++ return true; ++} ++ ++class RandomFileStreamGuard : StackObj { ++public: ++ RandomFileStreamGuard(randomAccessFileStream* fs) ++ : _file_stream(fs) { ++ } ++ ++ ~RandomFileStreamGuard() { delete _file_stream; } ++ ++ randomAccessFileStream* operator ->() const { return _file_stream; } ++ randomAccessFileStream* operator ()() const { return _file_stream; } ++ ++private: ++ randomAccessFileStream* _file_stream; ++}; ++ ++#define MAX_DEOPT_NUMBER 500 ++ ++void JitProfileCacheInfo::init() { ++ if (JProfilingCacheRecording) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR: you can not set both JProfilingCacheCompileAdvance and JProfilingCacheRecording"); ++ _state = IS_ERR; ++ return; ++ } ++ ++ _jit_profile_cache_dict = new JProfileCacheClassDictionary(PRELOAD_CLASS_HS_SIZE); ++ // initialization parameters ++ _method_loaded_count = 0; ++ _state = IS_OK; ++ ++ if (JProfilingCacheAutoArchiveDir != nullptr) { ++ ProfilingCacheFile = JitProfileRecorder::auto_jpcfile_name(); ++ } ++ ++ if (ProfilingCacheFile == nullptr) { ++ _state = IS_ERR; ++ return; ++ } ++ ++ RandomFileStreamGuard fsg(new (mtInternal) randomAccessFileStream( ++ ProfilingCacheFile, "rb+")); ++ JitProfileCacheFileParser parser(fsg(), this); ++ if (!fsg->is_open()) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR : JitProfile doesn't exist"); ++ _state = IS_ERR; ++ return; ++ } ++ parser.set_file_size(fsg->fileSize()); ++ ++ // parse header ++ if (!parser.parse_header()) { ++ _state = IS_ERR; ++ return; ++ } ++ ++ // parse class ++ if (!parser.parse_class()) { ++ _state = IS_ERR; ++ return; ++ } ++ ++ // parse method ++ while (parser.has_next_method_record()) { ++ ProfileCacheMethodHold* holder = parser.parse_method(); ++ if (holder != nullptr) { ++ // count method parse successfully ++ ++_method_loaded_count; ++ } ++ parser.increment_parsed_number_count(); ++ } ++ log_info(jprofilecache)("JProfileCache [INFO]: parsed method number %d successful loaded %" PRIu64, parser.parsed_methods(), _method_loaded_count); ++} +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCache.hpp b/src/hotspot/share/jprofilecache/jitProfileCache.hpp +new file mode 100644 +index 000000000..87e29259e +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCache.hpp +@@ -0,0 +1,148 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARED_VM_JPROFILECACHE_JITPROFILECACHE_HPP ++#define SHARED_VM_JPROFILECACHE_JITPROFILECACHE_HPP ++ ++#include "code/codeBlob.hpp" ++#include "libadt/dict.hpp" ++#include "memory/allocation.hpp" ++#include "jprofilecache/hashtable.hpp" ++#include "jprofilecache/jitProfileClassChain.hpp" ++#include "jprofilecache/jitProfileCacheClass.hpp" ++#include "jprofilecache/symbolRegexMatcher.hpp" ++#include "utilities/linkedlist.hpp" ++#include "utilities/ostream.hpp" ++#include "utilities/globalDefinitions.hpp" ++#include "utilities/growableArray.hpp" ++#include "runtime/timer.hpp" ++#include "runtime/atomic.hpp" ++#include "runtime/jniHandles.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "oops/klass.hpp" ++#include "oops/method.hpp" ++#include "oops/methodData.hpp" ++#include "oops/methodCounters.hpp" ++ ++class JitProfileRecorder; ++class JitProfileCacheInfo; ++ ++ ++#define INVALID_FIRST_INVOKE_INIT_ORDER -1 ++ ++// Manager of the feature, created when vm is started ++class JitProfileCache : public CHeapObj { ++public: ++ enum JitProfileCacheState { ++ NOT_INIT = 0, ++ IS_OK = 1, ++ IS_ERR = 2 ++ }; ++ ++ unsigned int version() { return _jit_profile_cache_version; } ++ bool is_valid() { return _jit_profile_cache_state == JitProfileCache::IS_OK; } ++ ++ void set_dummy_method(Method* m) { _dummy_method = m; } ++ Method* dummy_method() { return _dummy_method; } ++ ++ // init in JVM start ++ void init(); ++ ++ void set_log_level(); ++ ++ static JitProfileCache* create_instance(); ++ static JitProfileCache* instance() { return _jit_profile_cache_instance; } ++ JitProfileRecorder* recorder() { return _jit_profile_cache_recorder; } ++ JitProfileCacheInfo* preloader() { return _jit_profile_cache_info; } ++ // init JProfilingCacheRecording ++ JitProfileCacheState init_for_recording(); ++ // init JProfilingCacheCompileAdvance ++ JitProfileCacheState init_for_profilecache(); ++ ++ SymbolRegexMatcher* excluding_matcher() { return _excluding_matcher; } ++ ++ JitProfileCacheState flush_recorder(); ++ ++ // static Symbol* get_class_loader_name(ClassLoaderData* cls); ++ ++ bool profilecacheComplete; ++ ++protected: ++ JitProfileCache(); ++ virtual ~JitProfileCache(); ++ ++private: ++ JitProfileCacheState _jit_profile_cache_state; ++ unsigned int _jit_profile_cache_version; ++ static JitProfileCache* _jit_profile_cache_instance; ++ Method* _dummy_method; ++ JitProfileRecorder* _jit_profile_cache_recorder; ++ JitProfileCacheInfo* _jit_profile_cache_info; ++ SymbolRegexMatcher* _excluding_matcher; ++}; ++ ++// forward class ++class JitProfileRecorder; ++ ++class JitProfileCacheInfo : public CHeapObj { ++public: ++ enum JitProfileCacheInfoState { ++ NOT_INIT = 0, ++ IS_OK = 1, ++ IS_ERR = 2 ++ }; ++ ++ JitProfileCacheInfo(); ++ virtual ~JitProfileCacheInfo(); ++ ++ bool is_valid() { return _state == IS_OK; } ++ void init(); ++ ++ bool should_preload_class(Symbol* s); ++ ++ JProfileCacheClassDictionary* jit_profile_cache_dict() { return _jit_profile_cache_dict; } ++ uint64_t loaded_count() { return _method_loaded_count; } ++ ++ ProfileCacheClassChain* chain() { return _profile_cache_chain; } ++ void set_chain(ProfileCacheClassChain* chain) { _profile_cache_chain = chain; } ++ ++ JitProfileCache* holder() { return _holder; } ++ void set_holder(JitProfileCache* h) { _holder = h; } ++ ++ bool resolve_loaded_klass(InstanceKlass* klass); ++ ++ void jvm_booted_is_done(); ++ ++ void notify_precompilation(); ++ ++ private: ++ JProfileCacheClassDictionary* _jit_profile_cache_dict; ++ ProfileCacheClassChain* _profile_cache_chain; ++ uint64_t _method_loaded_count; ++ JitProfileCacheInfoState _state; ++ JitProfileCache* _holder; ++ bool _jvm_booted_is_done; ++}; ++ ++#endif //SHARED_VM_JPROFILECACHE_JITPROFILECACHE_HPP +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheClass.cpp b/src/hotspot/share/jprofilecache/jitProfileCacheClass.cpp +new file mode 100644 +index 000000000..3992d838c +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheClass.cpp +@@ -0,0 +1,112 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "classfile/symbolTable.hpp" ++#include "jprofilecache/jitProfileCacheClass.hpp" ++#include "jprofilecache/jitProfileCacheUtils.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" ++#include "oops/typeArrayKlass.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "oops/method.inline.hpp" ++ ++ProfileCacheClassHolder* ProfileCacheClassEntry::find_class_holder(unsigned int size, ++ unsigned int crc32) { ++ for (ProfileCacheClassHolder* p = this->head_holder(); p != nullptr; p = p->next()) { ++ if (p->crc32() == crc32 && p->size() == size) { ++ return p; ++ } ++ } ++ return nullptr; ++} ++ ++#define HEADER_SIZE 36 ++ ++#define JVM_DEFINE_CLASS_PATH "_JVM_DefineClass_" ++ ++JProfileCacheClassDictionary::JProfileCacheClassDictionary(int size) ++ : Hashtable(size, sizeof(ProfileCacheClassEntry)) { ++} ++ ++JProfileCacheClassDictionary::~JProfileCacheClassDictionary() { } ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::new_entry(Symbol* symbol) { ++ unsigned int hash = symbol->identity_hash(); ++ ProfileCacheClassEntry* entry = (ProfileCacheClassEntry*)Hashtable:: ++ new_entry(hash, symbol); ++ entry->init(); ++ return entry; ++} ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::find_entry(InstanceKlass* k) { ++ Symbol* name = k->name(); ++ Symbol* path = k->source_file_path(); ++ if (path == nullptr) { ++ path = SymbolTable::new_symbol(JVM_DEFINE_CLASS_PATH); ++ } ++ Symbol* loader_name = JitProfileCacheUtils::get_class_loader_name(k->class_loader_data()); ++ int hash = name->identity_hash(); ++ return find_entry(hash, name, loader_name, path); ++} ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::find_entry(unsigned int hash_value, ++ Symbol* name, ++ Symbol* loader_name, ++ Symbol* path) { ++ int index = hash_to_index(hash_value); ++ for (ProfileCacheClassEntry* p = bucket(index); p != nullptr; p = p->next()) { ++ if (p->literal()->fast_compare(name) == 0 && ++ p->class_loader_name()->fast_compare(loader_name) == 0 && ++ p->class_path()->fast_compare(path) == 0) { ++ return p; ++ } ++ } ++ return nullptr; ++} ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::find_head_entry(unsigned int hash_value, ++ Symbol* name) { ++ int index = hash_to_index(hash_value); ++ for (ProfileCacheClassEntry* p = bucket(index); p != nullptr; p = p->next()) { ++ if (p->literal()->fast_compare(name) == 0) { ++ return p; ++ } ++ } ++ return nullptr; ++} ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::find_or_create_class_entry(unsigned int hash_value, ++ Symbol* name, ++ Symbol* loader_name, ++ Symbol* path, ++ int index) { ++ ProfileCacheClassEntry* p = find_entry(hash_value, name, loader_name, path); ++ if (p == nullptr) { ++ p = new_entry(name); ++ p->set_chain_offset(index); ++ p->set_class_loader_name(loader_name); ++ p->set_class_path(path); ++ add_entry(hash_to_index(hash_value), p); ++ } ++ return p; ++} +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheClass.hpp b/src/hotspot/share/jprofilecache/jitProfileCacheClass.hpp +new file mode 100644 +index 000000000..8af41d6de +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheClass.hpp +@@ -0,0 +1,121 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARED_VM_JPROFILECACHE_JITPROFILECACHECLASS_HPP ++#define SHARED_VM_JPROFILECACHE_JITPROFILECACHECLASS_HPP ++ ++#include "jprofilecache/jitProfileCacheHolders.hpp" ++#include "memory/allocation.hpp" ++#include "oops/klass.hpp" ++#include "oops/method.hpp" ++#include "oops/methodData.hpp" ++#include "oops/methodCounters.hpp" ++ ++ ++class ProfileCacheClassEntry : public HashtableEntry { ++ friend class JitProfileCacheInfo; ++public: ++ ProfileCacheClassEntry(ProfileCacheClassHolder* holder) ++ : _head_holder(holder), ++ _chain_offset(-1), ++ _class_loader_name(nullptr), ++ _class_path(nullptr) { ++ ++ } ++ ++ ProfileCacheClassEntry() ++ : _head_holder(nullptr), ++ _chain_offset(-1), ++ _class_loader_name(nullptr), ++ _class_path(nullptr) { ++ } ++ ++ virtual ~ProfileCacheClassEntry() { } ++ ++ void init() { ++ _head_holder = nullptr; ++ _chain_offset = -1; ++ _class_loader_name = nullptr; ++ _class_path = nullptr; ++ } ++ ++ ProfileCacheClassHolder* head_holder() { return _head_holder; } ++ void set_head_holder(ProfileCacheClassHolder* h) { _head_holder = h; } ++ ++ int chain_offset() { return _chain_offset; } ++ void set_chain_offset(int offset) { _chain_offset = offset; } ++ ++ Symbol* class_loader_name() { return _class_loader_name; } ++ void set_class_loader_name(Symbol* s) { _class_loader_name = s; } ++ Symbol* class_path() { return _class_path; } ++ void set_class_path(Symbol* s) { _class_path = s; } ++ ++ ++ ProfileCacheClassEntry* next() { ++ return (ProfileCacheClassEntry*)HashtableEntry::next(); ++ } ++ ++ void add_class_holder(ProfileCacheClassHolder* h) { ++ h->set_next(_head_holder); ++ _head_holder = h; ++ } ++ ++ ProfileCacheClassHolder* find_class_holder(unsigned int size, unsigned int crc32); ++ ++private: ++ ProfileCacheClassHolder* _head_holder; ++ int _chain_offset; ++ Symbol* _class_loader_name; ++ Symbol* _class_path; ++ ++}; ++ ++class JProfileCacheClassDictionary : public Hashtable { ++public: ++ JProfileCacheClassDictionary(int size); ++ virtual ~JProfileCacheClassDictionary(); ++ ++ ProfileCacheClassEntry* find_entry(unsigned int hash_value, Symbol* name, ++ Symbol* loader_name, Symbol* path); ++ ++ ProfileCacheClassEntry* find_head_entry(unsigned int hash_value, Symbol* name); ++ ++ ProfileCacheClassEntry* find_entry(InstanceKlass* k); ++ ++ ProfileCacheClassEntry* bucket(int i) { ++ return (ProfileCacheClassEntry*)Hashtable::bucket(i); ++ } ++ ++ ProfileCacheClassEntry* find_or_create_class_entry(unsigned int hash_value, Symbol* symbol, ++ Symbol* loader_name, Symbol* path, ++ int order); ++ ++private: ++ ++ ProfileCacheClassEntry* new_entry(Symbol* symbol); ++}; ++ ++ ++#endif // SHARE_VM_JPROFILECACHE_JITPROFILECACHECLASS_HPP +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheFileParser.cpp b/src/hotspot/share/jprofilecache/jitProfileCacheFileParser.cpp +new file mode 100644 +index 000000000..e59d34fc5 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheFileParser.cpp +@@ -0,0 +1,433 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "runtime/arguments.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/thread.hpp" ++#include "runtime/atomic.hpp" ++#include "classfile/classLoaderData.inline.hpp" ++#include "classfile/symbolTable.hpp" ++#include "classfile/systemDictionary.hpp" ++#include "compiler/compileBroker.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileCacheClass.hpp" ++#include "jprofilecache/jitProfileCacheFileParser.hpp" ++#include "jprofilecache/jitProfileCacheUtils.hpp" ++#include "jprofilecache/jitProfileClassChain.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" ++#include "libadt/dict.hpp" ++#include "logging/log.hpp" ++#include "logging/logStream.hpp" ++ ++// offset ++#define PROFILECACHE_VERSION_OFFSET 0 ++#define PROFILECACHE_MAGIC_NUMBER_OFFSET 4 ++#define FILE_SIZE_OFFSET 8 ++#define PROFILECACHE_CRC32_OFFSET 12 ++#define APPID_OFFSET 16 ++#define MAX_SYMBOL_LENGTH_OFFSET 20 ++#define RECORD_COUNT_OFFSET 24 ++#define PROFILECACHE_TIME_OFFSET 28 ++ ++#define HEADER_SIZE 36 ++ ++// width section ++#define RECORE_VERSION_WIDTH (PROFILECACHE_MAGIC_NUMBER_OFFSET - PROFILECACHE_VERSION_OFFSET) ++#define RECORE_MAGIC_WIDTH (FILE_SIZE_OFFSET - PROFILECACHE_MAGIC_NUMBER_OFFSET) ++#define FILE_SIZE_WIDTH (PROFILECACHE_CRC32_OFFSET - FILE_SIZE_OFFSET) ++#define RECORE_CRC32_WIDTH (APPID_OFFSET - PROFILECACHE_CRC32_OFFSET) ++#define RECORE_APPID_WIDTH (MAX_SYMBOL_LENGTH_OFFSET - APPID_OFFSET) ++#define RECORE_MAX_SYMBOL_LENGTH_WIDTH (RECORD_COUNT_OFFSET - MAX_SYMBOL_LENGTH_OFFSET) ++#define RECORD_COUNTS_WIDTH (PROFILECACHE_TIME_OFFSET - RECORD_COUNT_OFFSET) ++#define RECORE_TIME_WIDTH (HEADER_SIZE - PROFILECACHE_TIME_OFFSET) ++ ++// value ++#define RECORE_FILE_DEFAULT_NUMBER 0 ++#define RECORE_CRC32_DEFAULT_NUMBER 0 ++ ++#define ARENA_SIZE 128 ++#define READ_U1_INTERVAL 1 ++#define READ_U4_INTERVAL 4 ++#define READ_U8_INTERVAL 8 ++ ++#define JVM_DEFINE_CLASS_PATH "_JVM_DefineClass_" ++ ++JitProfileCacheFileParser::JitProfileCacheFileParser(randomAccessFileStream* fs, JitProfileCacheInfo* holder) ++ : _is_valid(false), ++ _has_parsed_header(false), ++ _file_size(0), ++ _position(0), ++ _parsed_method_count(0), ++ _total_recorder_method(0), ++ _file_stream(fs), ++ _max_symbol_length(0), ++ _parse_str_buf(nullptr), ++ _holder(holder), ++ _arena(new (mtInternal) Arena(mtInternal, ARENA_SIZE)) { ++} ++ ++JitProfileCacheFileParser::~JitProfileCacheFileParser() { ++ delete _arena; ++} ++ ++char parse_int_buf[8]; ++u1 JitProfileCacheFileParser::read_u1() { ++ _file_stream->read(parse_int_buf, 1, 1); ++ _position += READ_U1_INTERVAL; ++ return *(u1*)parse_int_buf; ++} ++ ++u4 JitProfileCacheFileParser::read_u4() { ++ _file_stream->read(parse_int_buf, READ_U4_INTERVAL, 1); ++ _position += READ_U4_INTERVAL; ++ return *(u4*)parse_int_buf; ++} ++ ++u8 JitProfileCacheFileParser::read_u8() { ++ _file_stream->read(parse_int_buf, READ_U8_INTERVAL, 1); ++ _position += READ_U8_INTERVAL; ++ return *(u8*)parse_int_buf; ++} ++ ++const char* JitProfileCacheFileParser::read_string() { ++ int current_read_pos = 0; ++ do { ++ _file_stream->read(_parse_str_buf + current_read_pos, 1, 1); ++ current_read_pos++; ++ } while (*(_parse_str_buf + current_read_pos - 1) != '\0' ++ && current_read_pos <= _max_symbol_length + 1); ++ ++ _position += current_read_pos; ++ int actual_string_length = current_read_pos - 1; ++ if (actual_string_length == 0) { ++ log_warning(jprofilecache)("[JitProfileCache] WARNING : Parsed empty symbol at position %d\n", _position); ++ return ""; ++ } else if (actual_string_length > max_symbol_length()) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR : The parsed symbol length exceeds %d\n", max_symbol_length()); ++ return nullptr; ++ } else { ++ char* parsed_string = NEW_RESOURCE_ARRAY(char, actual_string_length + 1); ++ memcpy(parsed_string, _parse_str_buf, actual_string_length + 1); ++ return parsed_string; ++ } ++} ++ ++BytecodeProfileRecord* JitProfileCacheFileParser::read_data_layout() { ++ u4 size = read_u4(); ++ BytecodeProfileRecord* bpr = new BytecodeProfileRecord(size); ++ _file_stream->read(bpr->data(), size, 1); ++ _position += size; ++ return bpr; ++} ++ ++#define MAX_COUNT_VALUE (1024 * 1024 * 128) ++ ++bool JitProfileCacheFileParser::logparse_illegal_check(const char* s, bool ret_value, int end_position) { ++ if (_position > end_position) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR : read out of bound, " ++ "file format error"); ++ return ret_value; ++ } ++ if (s == nullptr) { ++ _position = end_position; ++ log_error(jprofilecache)("[JitProfileCache] ERROR : illegal string in log file"); ++ return ret_value; ++ } ++ return true; ++} ++ ++bool JitProfileCacheFileParser::logparse_illegal_count_check(int cnt, bool ret_value, int end_position) { ++ if (_position > end_position) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR : read out of bound, " ++ "file format error"); ++ return ret_value; ++ } ++ if ((u4)cnt > MAX_COUNT_VALUE) { ++ _position = end_position; ++ log_error(jprofilecache)("[JitProfileCache] ERROR : illegal count (" ++ UINT32_FORMAT ") too big", cnt); ++ return ret_value; ++ } ++ return true; ++} ++ ++bool JitProfileCacheFileParser::should_ignore_this_class(Symbol* symbol) { ++ // deal with spring auto-generated ++ ResourceMark rm; ++ char* name = symbol->as_C_string(); ++ static const char* const CGLIB_SIG = "CGLIB$$"; ++ static const char* const ACCESSER_SUFFIX = "ConstructorAccess"; ++ if (::strstr(name, CGLIB_SIG) != nullptr || ++ ::strstr(name, ACCESSER_SUFFIX) != nullptr || ++ (UseJProfilingCacheSystemBlackList && ++ JitProfileCacheUtils::is_in_unpreloadable_classes_black_list(name))) { ++ log_debug(jprofilecache)("[JitProfileCache] DEBUG : Ignore class %s", name); ++ return true; ++ } ++ JitProfileCache* jprofilecache = info_holder()->holder(); ++ SymbolRegexMatcher* matcher = jprofilecache->excluding_matcher(); ++ if (matcher == nullptr) { ++ return false; ++ } ++ if (matcher->matches(symbol)) { ++ log_debug(jprofilecache)("[JitProfileCache] DEBUG : Ignore class %s", name); ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++#define SYMBOL_TERMINATOR_SPACE 2 ++ ++bool JitProfileCacheFileParser::parse_header() { ++ int begin_position = _position; ++ int end_position = begin_position + HEADER_SIZE; ++ u4 parse_version = read_u4(); ++ u4 parse_magic_number = read_u4(); ++ u4 parse_file_size = read_u4(); ++ int parse_crc32_recorded = (int)read_u4(); ++ u4 appid = read_u4(); ++ unsigned int version = JitProfileCache::instance()->version(); ++ ++ if (parse_version != version) { ++ _is_valid = false; ++ log_error(jprofilecache)("[JitProfileCache] ERROR : Version mismatch, expect %d but %d", version, parse_version); ++ return false; ++ } ++ if (parse_magic_number != JPROFILECACHE_MAGIC_NUMBER ++ || (long)parse_file_size != this->file_size()) { ++ _is_valid = false; ++ log_error(jprofilecache)("[JitProfileCache] ERROR : illegal header"); ++ return false; ++ } ++ // valid appid ++ if (CompilationProfileCacheAppID != 0 && CompilationProfileCacheAppID != appid) { ++ _is_valid = false; ++ log_error(jprofilecache)("[JitProfileCache] ERROR : illegal CompilationProfileCacheAppID"); ++ return false; ++ } ++ // valid crc32 ++ int crc32_actual = JitProfileRecorder::compute_crc32(_file_stream); ++ if (parse_crc32_recorded != crc32_actual) { ++ _is_valid = false; ++ log_error(jprofilecache)("[JitProfileCache] ERROR : JitProfile crc32 check failure"); ++ return false; ++ } ++ ++ u4 parse_max_symbol_length = read_u4(); ++ logparse_illegal_count_check(parse_max_symbol_length, false, end_position); ++ _parse_str_buf = (char*)_arena->Amalloc(parse_max_symbol_length + SYMBOL_TERMINATOR_SPACE); ++ _max_symbol_length = (int)parse_max_symbol_length; ++ ++ u4 parse_record_count = read_u4(); ++ logparse_illegal_count_check(parse_record_count, false, end_position); ++ _total_recorder_method = parse_record_count; ++ u4 utc_time = read_u8(); ++ _is_valid = true; ++ return true; ++} ++ ++Symbol* JitProfileCacheFileParser::create_symbol(const char* char_name) { ++ return SymbolTable::new_symbol(char_name, strlen(char_name)); ++} ++ ++bool JitProfileCacheFileParser::parse_class() { ++ ResourceMark rm; ++ int begin_position = _position; ++ u4 section_size = read_u4(); ++ int end_position = begin_position + (int)section_size; ++ u4 parse_cnt = read_u4(); ++ logparse_illegal_count_check(parse_cnt, false, end_position); ++ ++ ProfileCacheClassChain* chain = new ProfileCacheClassChain(parse_cnt); ++ info_holder()->set_chain(chain); ++ ++ for (int i = 0; i < (int)parse_cnt; i++) { ++ const char* parse_name_char = read_string(); ++ logparse_illegal_check(parse_name_char, false, end_position); ++ const char* parse_loader_char = read_string(); ++ logparse_illegal_check(parse_loader_char, false, end_position); ++ const char* parse_path_char = read_string(); ++ logparse_illegal_check(parse_path_char, false, end_position); ++ Symbol* name = create_symbol(parse_name_char); ++ Symbol* loader_name = create_symbol(parse_loader_char); ++ Symbol* path = create_symbol(parse_path_char); ++ loader_name = JitProfileCacheUtils::remove_meaningless_suffix(loader_name); ++ chain->at(i)->set_class_name(name); ++ chain->at(i)->set_class_loader_name(loader_name); ++ chain->at(i)->set_class_path(path); ++ ++ check_class(i, name, loader_name, path, chain); ++ ++ } // end of for loop ++ ++ // check section size ++ if (_position - begin_position != (int)section_size) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR : JitProfile class parse fail"); ++ return false; ++ } ++ return true; ++} ++ ++void JitProfileCacheFileParser::check_class(int i, Symbol* name, Symbol* loader_name, Symbol* path, ProfileCacheClassChain* chain) { ++ // add to preload class dictionary ++ unsigned int hash_value = name->identity_hash(); ++ ProfileCacheClassEntry* e = info_holder()->jit_profile_cache_dict()-> ++ find_or_create_class_entry(hash_value, name, loader_name, path, i); ++ // e->chain_offset() < i : means same class symbol already existed in the chain ++ // should_ignore_this_class(name): means this class is in skipped list(build-in or user-defined) ++ // so set entry state is skipped, will be ignored in JitProfileCache ++ if (e->chain_offset() < i || should_ignore_this_class(name)) { ++ chain->at(i)->set_skipped(); ++ } else { ++ Symbol* name_no_suffix = JitProfileCacheUtils::remove_meaningless_suffix(name); ++ if (name_no_suffix->fast_compare(name) != 0) { ++ unsigned int hash_no_suffix = name_no_suffix->identity_hash(); ++ ProfileCacheClassEntry* e_no_suffix = info_holder()->jit_profile_cache_dict()-> ++ find_or_create_class_entry(hash_no_suffix, name_no_suffix, loader_name, path, i); ++ if (e_no_suffix->chain_offset() < i) { ++ chain->at(i)->set_skipped(); ++ } ++ } ++ } ++} ++ ++bool JitProfileCacheFileParser::valid() { ++ if(!_has_parsed_header) { ++ parse_header(); ++ } ++ return _is_valid; ++} ++ ++bool JitProfileCacheFileParser::has_next_method_record() { ++ return _parsed_method_count < _total_recorder_method && _position < _file_size; ++} ++ ++ProfileCacheMethodHold* JitProfileCacheFileParser::parse_method() { ++ ResourceMark rm; ++ _file_stream->seek(_position, SEEK_SET); ++ int begin_position = _position; ++ u4 section_size = read_u4(); ++ int end_position = begin_position + section_size; ++ ++ u4 comp_order = read_u4(); ++ u1 compilation_type = read_u1(); ++ if (compilation_type != 0 && compilation_type != 1) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR : illegal compilation type in JitProfile"); ++ _position = end_position; ++ return nullptr; ++ } ++ // parse method info ++ const char* parse_method_name_char = read_string(); ++ logparse_illegal_check(parse_method_name_char, false, end_position); ++ Symbol* method_name = create_symbol(parse_method_name_char); ++ const char* parse_method_sig_char = read_string(); ++ logparse_illegal_check(parse_method_sig_char, false, end_position); ++ Symbol* method_sig = create_symbol(parse_method_sig_char); ++ u4 parse_first_invoke_init_order = read_u4(); ++ ++ if ((int)parse_first_invoke_init_order == INVALID_FIRST_INVOKE_INIT_ORDER) { ++ parse_first_invoke_init_order = this->info_holder()->chain()->length() - 1; ++ } ++ u4 parse_method_size = read_u4(); ++ u4 parse_method_hash = read_u4(); ++ int32_t parse_bci = (int32_t)read_u4(); ++ if (parse_bci != InvocationEntryBci) { ++ logparse_illegal_count_check(parse_bci, false, end_position); ++ } ++ ++ // parse class info ++ const char* parse_class_name_char = read_string(); ++ logparse_illegal_check(parse_class_name_char, false, end_position); ++ Symbol* class_name = create_symbol(parse_class_name_char); ++ // ignore ++ if (should_ignore_this_class(class_name)) { ++ _position = end_position; ++ return nullptr; ++ } ++ const char* parse_class_loader_char = read_string(); ++ logparse_illegal_check(parse_class_loader_char, false, end_position); ++ Symbol* class_loader = create_symbol(parse_class_loader_char); ++ class_loader = JitProfileCacheUtils::remove_meaningless_suffix(class_loader); ++ const char* path_char = read_string(); ++ logparse_illegal_check(path_char, false, end_position); ++ Symbol* path = create_symbol(path_char); ++ ++ JProfileCacheClassDictionary* dict = this->info_holder()->jit_profile_cache_dict(); ++ unsigned int dict_hash = class_name->identity_hash(); ++ ProfileCacheClassEntry* entry = dict->find_head_entry(dict_hash, class_name); ++ if (entry == nullptr) { ++ log_info(jprofilecache)("[JitProfileCache] WARNING : class %s is missed in method parse", parse_class_name_char); ++ _position = end_position; ++ return nullptr; ++ } ++ u4 parse_class_size = read_u4(); ++ u4 parse_class_crc32 = read_u4(); ++ u4 parse_class_hash = read_u4(); ++ ++ // method counters info ++ u4 parse_intp_invocation_count = read_u4(); ++ u4 parse_intp_throwout_count = read_u4(); ++ u4 parse_invocation_count = read_u4(); ++ u4 parse_backedge_count = read_u4(); ++ u1 compile_level = read_u1(); ++ ++ int class_chain_offset = entry->chain_offset(); ++ ProfileCacheClassHolder* holder = entry->find_class_holder(parse_class_size, parse_class_crc32); ++ if (holder == nullptr) { ++ holder = new ProfileCacheClassHolder(class_name, class_loader, path, parse_class_size, parse_class_hash, parse_class_crc32); ++ entry->add_class_holder(holder); ++ } ++ ProfileCacheMethodHold* mh = new ProfileCacheMethodHold(method_name, method_sig); ++ mh->set_interpreter_invocation_count(parse_intp_invocation_count); ++ mh->set_interpreter_exception_count(parse_intp_throwout_count); ++ mh->set_invocation_count(parse_invocation_count); ++ mh->set_backage_count(parse_backedge_count); ++ ++ mh->set_compile_level(compile_level); ++ mh->set_method_hash(parse_method_hash); ++ mh->set_method_size(parse_method_size); ++ ++ parse_profile_data(mh); ++ ++ int method_chain_offset = class_chain_offset; ++ mh->set_mounted_offset(method_chain_offset); ++ this->info_holder()->chain()->add_method_at_index(mh, method_chain_offset); ++ holder->add_method(mh); ++ ++ log_debug(jprofilecache)("[JitProfileCache] method %s.%s is parsed successfully", parse_class_name_char, parse_method_name_char); ++ ++ return mh; ++} ++ ++void JitProfileCacheFileParser::parse_profile_data(ProfileCacheMethodHold* mh) { ++ u4 count = read_u4(); ++ GrowableArray* profile_list = mh->profile_list(); ++ while (count > 0) { ++ BytecodeProfileRecord* bpr = read_data_layout(); ++ profile_list->append(bpr); ++ count--; ++ } ++} +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheFileParser.hpp b/src/hotspot/share/jprofilecache/jitProfileCacheFileParser.hpp +new file mode 100644 +index 000000000..7c4f8c4dc +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheFileParser.hpp +@@ -0,0 +1,100 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef LINUX_AARCH64_NORMAL_SERVER_FASTDEBUG_JITPROFILECACHELOGPARSER_H ++#define LINUX_AARCH64_NORMAL_SERVER_FASTDEBUG_JITPROFILECACHELOGPARSER_H ++ ++#include "memory/allocation.hpp" ++ ++#define INVALID_FIRST_INVOKE_INIT_ORDER -1 ++#define JPROFILECACHE_MAGIC_NUMBER 0xCAB5 ++ ++class JitProfileCacheInfo; ++class ProfileCacheMethodHold; ++class ProfileCacheClassChain; ++class BytecodeProfileRecord; ++ ++// JitProfileCache file parser ++class JitProfileCacheFileParser : CHeapObj { ++public: ++ JitProfileCacheFileParser(randomAccessFileStream* fs, JitProfileCacheInfo* holder); ++ virtual ~JitProfileCacheFileParser(); ++ ++ bool valid(); ++ ++ bool parse_header(); ++ Symbol* create_symbol(const char* char_name); ++ bool parse_class(); ++ ++ void check_class(int i, Symbol* name, Symbol* loader_name, Symbol* path, ProfileCacheClassChain* chain); ++ ++ bool should_ignore_this_class(Symbol* symbol); ++ ++ bool has_next_method_record(); ++ ProfileCacheMethodHold* parse_method(); ++ ++ void increment_parsed_number_count() { _parsed_method_count++; } ++ ++ int parsed_methods() { return _parsed_method_count; } ++ int total_recorder_method() { return _total_recorder_method; } ++ ++ long file_size() { return _file_size; } ++ void set_file_size(long size) { _file_size = size; } ++ ++ int max_symbol_length() { return _max_symbol_length; } ++ ++ JitProfileCacheInfo* info_holder() { return _holder; } ++ void set_info_holder(JitProfileCacheInfo* holder) { _holder = holder; } ++ bool logparse_illegal_check(const char* s, bool ret_value, int end_position); ++ bool logparse_illegal_count_check(int cnt, bool ret_value, int end_position); ++ ++private: ++ // disable default constructor ++ JitProfileCacheFileParser(); ++ ++ void parse_profile_data(ProfileCacheMethodHold* mh); ++ ++ bool _is_valid; ++ bool _has_parsed_header; ++ long _file_size; ++ int _position; ++ int _parsed_method_count; ++ int _total_recorder_method; ++ randomAccessFileStream* _file_stream; ++ ++ int _max_symbol_length; ++ char* _parse_str_buf; ++ ++ JitProfileCacheInfo* _holder; ++ Arena* _arena; ++ ++ u1 read_u1(); ++ u4 read_u4(); ++ u8 read_u8(); ++ const char* read_string(); ++ BytecodeProfileRecord* read_data_layout(); ++}; ++ ++ ++#endif // LINUX_AARCH64_NORMAL_SERVER_FASTDEBUG_JITPROFILECACHELOGPARSER_H +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheHolders.cpp b/src/hotspot/share/jprofilecache/jitProfileCacheHolders.cpp +new file mode 100644 +index 000000000..4ed60e8da +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheHolders.cpp +@@ -0,0 +1,120 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "jprofilecache/jitProfileCacheHolders.hpp" ++#include "classfile/classLoaderData.hpp" ++#include "classfile/classLoaderData.inline.hpp" ++ ++#define METHOD_LIST_INITIAL_CAPACITY 16 ++ ++ProfileCacheMethodHold::ProfileCacheMethodHold(Symbol* name, Symbol* signature) ++ : _method_name(name), ++ _method_signature(signature), ++ _method_size(0), ++ _method_hash(0), ++ _interpreter_invocation_count(0), ++ _interpreter_exception_count(0), ++ _invocation_count(0), ++ _backage_count(0), ++ _mounted_offset(-1), ++ _owns_profile_list(true), ++ _next(nullptr), ++ _resolved_method(nullptr), ++ _profile_list(new (mtClass) ++ GrowableArray(METHOD_LIST_INITIAL_CAPACITY, mtClass)) { ++} ++ ++ProfileCacheMethodHold::ProfileCacheMethodHold(ProfileCacheMethodHold& rhs) ++ : _method_name(rhs._method_name), ++ _method_signature(rhs._method_signature), ++ _method_size(rhs._method_size), ++ _method_hash(rhs._method_hash), ++ _interpreter_invocation_count(rhs._interpreter_invocation_count), ++ _interpreter_exception_count(rhs._interpreter_exception_count), ++ _invocation_count(rhs._invocation_count), ++ _backage_count(rhs._backage_count), ++ _mounted_offset(rhs._mounted_offset), ++ _owns_profile_list(false), ++ _next(nullptr), ++ _resolved_method(nullptr), ++ _profile_list(rhs._profile_list) { ++} ++ ++ProfileCacheMethodHold::~ProfileCacheMethodHold() { ++ if (_owns_profile_list) { ++ for (int i = 0; i < _profile_list->length(); i++) { ++ delete _profile_list->at(i); ++ } ++ delete _profile_list; ++ } ++} ++ ++bool ProfileCacheMethodHold::is_method_match(Method* method) { ++ if (method_name()->fast_compare(method->name()) == 0 ++ && method_signature()->fast_compare(method->signature()) == 0) { ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++bool ProfileCacheMethodHold::is_alive() const { ++ if (_resolved_method == nullptr || _resolved_method->constMethod() == nullptr || _resolved_method->constants() == nullptr || _resolved_method->constants()->pool_holder() == nullptr) { ++ return false; ++ } ++ ClassLoaderData* data = _resolved_method->method_holder()->class_loader_data(); ++ if (data == nullptr || !data->is_alive()) { ++ return false; ++ } ++ return true; ++} ++ ++ProfileCacheMethodHold* ProfileCacheMethodHold::clone_and_add() { ++ ProfileCacheMethodHold* clone = new ProfileCacheMethodHold(*this); ++ clone->set_next(_next); ++ _next = clone; ++ return clone; ++} ++ ++#define CLASS_METHOD_LIST_INITIAL_CAPACITY 16 ++ ++ProfileCacheClassHolder::ProfileCacheClassHolder(Symbol* name, Symbol* loader_name, ++ Symbol* path, unsigned int size, ++ unsigned int hash, unsigned int crc32) ++ : _class_name(name), ++ _class_loader_name(loader_name), ++ _class_path(path), ++ _class_size(size), ++ _class_hash(hash), ++ _class_crc32(crc32), ++ _class_resolved(false), ++ _class_method_list(new (mtInternal) ++ GrowableArray(CLASS_METHOD_LIST_INITIAL_CAPACITY, mtClass)), ++ _next(nullptr) { ++} ++ ++ ++ProfileCacheClassHolder::~ProfileCacheClassHolder() { ++ delete _class_method_list; ++} +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheHolders.hpp b/src/hotspot/share/jprofilecache/jitProfileCacheHolders.hpp +new file mode 100644 +index 000000000..d5073a564 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheHolders.hpp +@@ -0,0 +1,164 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARED_VM_JPROFILECACHE_JITPROFILECACHEHOLDERS_HPP ++#define SHARED_VM_JPROFILECACHE_JITPROFILECACHEHOLDERS_HPP ++ ++#include "memory/allocation.hpp" ++#include "jprofilecache/hashtable.hpp" ++#include "oops/klass.hpp" ++#include "oops/method.hpp" ++#include "oops/methodData.hpp" ++#include "oops/methodCounters.hpp" ++ ++class BytecodeProfileRecord : public CHeapObj { ++public: ++ BytecodeProfileRecord(int size) : _size_in_bytes(size) { ++ _data = NEW_C_HEAP_ARRAY(char, size, mtInternal); ++ } ++ ~BytecodeProfileRecord() { if (_size_in_bytes > 0) {FREE_C_HEAP_ARRAY(char, _data);}} ++ ++ DataLayout* data_in() const { return (DataLayout*)_data; } ++ char* data() { return _data;} ++ int size_in_bytes() const { return _size_in_bytes; } ++ u2 bci() const { return data_in()->bci(); } ++ ++ bool is_BranchData() const { return data_in()->data_in()->is_BranchData(); } ++ bool is_MultiBranchData() const { return data_in()->data_in()->is_MultiBranchData(); } ++ bool is_ArgInfoData() const { return data_in()->data_in()->is_ArgInfoData(); } ++ ++private: ++ char* _data; ++ int _size_in_bytes; ++}; ++ ++class ProfileCacheMethodHold : public CHeapObj { ++ friend class ProfileCacheClassHolder; ++public: ++ ProfileCacheMethodHold(Symbol* name, Symbol* signature); ++ ProfileCacheMethodHold(ProfileCacheMethodHold& rhs); ++ virtual ~ProfileCacheMethodHold(); ++ ++ Symbol* method_name() const { return _method_name; } ++ Symbol* method_signature() const { return _method_signature; } ++ ++ unsigned int invocation_count() const { return _invocation_count;} ++ int compile_level() const { return _compile_level; } ++ ++ void set_interpreter_invocation_count(unsigned int value) { _interpreter_invocation_count = value; } ++ void set_interpreter_exception_count(unsigned int value) { _interpreter_exception_count = value; } ++ void set_invocation_count(unsigned int value) { _invocation_count = value; } ++ void set_backage_count(unsigned int value) { _backage_count = value; } ++ void set_compile_level(int value) { _compile_level = value; } ++ ++ void set_method_hash(unsigned int value) { _method_hash = value; } ++ void set_method_size(unsigned int value) { _method_size = value; } ++ void set_mounted_offset(int value) { _mounted_offset = value; } ++ ++ bool is_method_match(Method* method); ++ ++ ProfileCacheMethodHold* next() const { return _next; } ++ void set_next(ProfileCacheMethodHold* h) { _next = h; } ++ ++ Method* resolved_method() const { return _resolved_method; } ++ void set_resolved_method(Method* m) { _resolved_method = m; } ++ ++ GrowableArray* profile_list() const { return _profile_list; } ++ void set_profile_list(GrowableArray* value) { _profile_list = value; } ++ ++ ProfileCacheMethodHold* clone_and_add(); ++ ++ bool is_alive() const; ++ ++private: ++ Symbol* _method_name; ++ Symbol* _method_signature; ++ ++ unsigned int _method_size; ++ unsigned int _method_hash; ++ ++ unsigned int _interpreter_invocation_count; ++ unsigned int _interpreter_exception_count; ++ unsigned int _invocation_count; ++ unsigned int _backage_count; ++ int _compile_level; ++ ++ int _mounted_offset; ++ ++ bool _owns_profile_list; ++ ++ // A single linked list stores entries with the same initialization order ++ ProfileCacheMethodHold* _next; ++ // The resolved method within the holder's list ++ Method* _resolved_method; ++ // An array of profile information, shared among entries with the same ++ GrowableArray* _profile_list; ++}; ++ ++class ProfileCacheClassHolder : public CHeapObj { ++public: ++ ProfileCacheClassHolder(Symbol* name, Symbol* loader_name, ++ Symbol* path, unsigned int size, ++ unsigned int hash, unsigned int crc32); ++ virtual ~ProfileCacheClassHolder(); ++ ++ void add_method(ProfileCacheMethodHold* mh) { ++ assert(_class_method_list != nullptr, "not initialize"); ++ _class_method_list->append(mh); ++ } ++ ++ unsigned int size() const { return _class_size; } ++ unsigned int hash() const { return _class_hash; } ++ unsigned int crc32() const { return _class_crc32; } ++ unsigned int methods_count() const { return _class_method_list->length(); } ++ Symbol* class_name() const { return _class_name; } ++ Symbol* class_loader_name() const { return _class_loader_name; } ++ Symbol* path() const { return _class_path; } ++ ProfileCacheClassHolder* next() const { return _next; } ++ bool resolved() const { return _class_resolved; } ++ ++ void set_resolved() { _class_resolved = true; } ++ void set_next(ProfileCacheClassHolder* h) { _next = h; } ++ ++ GrowableArray* method_list() const { return _class_method_list; } ++ ++private: ++ Symbol* _class_name; ++ Symbol* _class_loader_name; ++ Symbol* _class_path; ++ ++ unsigned int _class_size; ++ unsigned int _class_hash; ++ unsigned int _class_crc32; ++ unsigned int _class_init_chain_index; ++ ++ bool _class_resolved; ++ ++ GrowableArray* _class_method_list; ++ ++ ProfileCacheClassHolder* _next; ++}; ++ ++#endif // SHARED_VM_JPROFILECACHE_JITPROFILECACHEHOLDERS_HPP +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheThread.cpp b/src/hotspot/share/jprofilecache/jitProfileCacheThread.cpp +new file mode 100644 +index 000000000..cda176a6a +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheThread.cpp +@@ -0,0 +1,116 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "precompiled.hpp" ++#include "classfile/vmSymbols.hpp" ++#include "code/codeCache.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileCacheThread.hpp" ++#include "oops/instanceKlass.hpp" ++#include "runtime/handles.inline.hpp" ++#include "runtime/java.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/mutex.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "runtime/orderAccess.hpp" ++#include "runtime/threads.hpp" ++#include "utilities/exceptions.hpp" ++ ++JavaThread* JitProfileCacheThread::_jprofilecache_thread = nullptr; ++unsigned int JitProfileCacheThread::_interval_seconds = 0; ++volatile bool JitProfileCacheThread::_is_active = false; ++ ++#define MILLISECONDS_PER_SECOND 1000 ++ ++bool has_error(TRAPS, const char* error) { ++ if (HAS_PENDING_EXCEPTION) { ++ tty->print_cr("%s", error); ++ java_lang_Throwable::print(PENDING_EXCEPTION, tty); ++ tty->cr(); ++ CLEAR_PENDING_EXCEPTION; ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++void JitProfileCacheThread::run(TRAPS) { ++ InstanceKlass* ik = vmClasses::Thread_klass(); ++ assert(ik->is_initialized(), "must be initialized"); ++ instanceHandle thread_oop = ik->allocate_instance_handle(CHECK); ++ const char thread_name[] = "delay load class/profilecache"; ++ Handle string = java_lang_String::create_from_str(thread_name, CHECK); ++ // Initialize thread_oop to put it into the system threadGroup ++ Handle thread_group(THREAD, Universe::system_thread_group()); ++ JavaValue result(T_VOID); ++ ++ JavaCalls::call_special(&result, thread_oop, ++ ik, ++ vmSymbols::object_initializer_name(), ++ vmSymbols::threadgroup_string_void_signature(), ++ thread_group, ++ string, ++ THREAD); ++ if (has_error(THREAD, "Exception in VM (JitProfileCacheThread::run): ")) { ++ vm_exit_during_initialization("Cannot create delay load class/profilecache thread."); ++ return; ++ } ++ { ++ MutexLocker mu(Threads_lock); ++ JavaThread* _thread = new JavaThread(&JitProfileCacheThread::load_class_thread_entry); ++ if (_thread == nullptr || _thread->osthread() == nullptr) { ++ vm_exit_during_initialization("Cannot create PeriodicGC timer thread. Out of system resources."); ++ } ++ java_lang_Thread::set_thread(thread_oop(), _thread); ++ // java_lang_Thread::set_daemon(thread_oop()); ++ _thread->set_threadOopHandles(thread_oop()); ++ Threads::add(_thread); ++ Thread::start(_thread); ++ _is_active = true; ++ } ++ ++ { ++ MutexLocker mu(JitProfileCachePrint_lock); ++ _jprofilecache_thread = nullptr; ++ } ++} ++ ++void JitProfileCacheThread::load_class_thread_entry(JavaThread* thread, TRAPS) { ++ THREAD->sleep(interval_seconds()); ++ JitProfileCache::instance()->preloader()->notify_precompilation(); ++} ++ ++void JitProfileCacheThread::launch_with_delay(unsigned int sec, TRAPS) { ++ set_interval_seconds(sec); ++ run(THREAD); ++} ++ ++void JitProfileCacheThread::print_jit_profile_cache_thread_info_on(outputStream* st) { ++ MutexLocker mu(JitProfileCachePrint_lock); ++ if (_jprofilecache_thread == nullptr || !is_active()) { ++ return; ++ } ++ st->print("\"%s\" ", _jprofilecache_thread->name()); ++ _jprofilecache_thread->print_on(st); ++ st->cr(); ++} +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheThread.hpp b/src/hotspot/share/jprofilecache/jitProfileCacheThread.hpp +new file mode 100644 +index 000000000..61c305fa5 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheThread.hpp +@@ -0,0 +1,54 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef SHARE_VM_JPROFILECACHE_JITPROFILECACHETHREAD_HPP ++#define SHARE_VM_JPROFILECACHE_JITPROFILECACHETHREAD_HPP ++ ++#include "runtime/nonJavaThread.hpp" ++ ++// Thread to trigger the load of class data and profile info on delay ++class JitProfileCacheThread : public AllStatic { ++public: ++ ++ static unsigned int interval_seconds() { return _interval_seconds; } ++ ++ static void set_interval_seconds(unsigned int sec) { _interval_seconds = sec; } ++ ++ static bool is_active() { return _is_active; } ++ ++ static void launch_with_delay(unsigned int sec, TRAPS); ++ ++ static void load_class_thread_entry(JavaThread* thread, TRAPS); ++ ++ static void print_jit_profile_cache_thread_info_on(outputStream* st); ++ ++private: ++ static void run(TRAPS); ++ ++ static unsigned int _interval_seconds; ++ static volatile bool _is_active; ++ ++ static JavaThread* _jprofilecache_thread; ++}; ++ ++#endif //SHARE_VM_JPROFILECACHE_JITPROFILECACHETHREAD_HPP +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheUtils.cpp b/src/hotspot/share/jprofilecache/jitProfileCacheUtils.cpp +new file mode 100644 +index 000000000..83e582413 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheUtils.cpp +@@ -0,0 +1,105 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "classfile/symbolTable.hpp" ++#include "compiler/compilationPolicy.hpp" ++#include "compiler/compileBroker.hpp" ++#include "jprofilecache/jitProfileCacheUtils.hpp" ++ ++Symbol* JitProfileCacheUtils::get_class_loader_name(ClassLoaderData* cld) { ++ Handle class_loader(Thread::current(), cld->class_loader()); ++ Symbol* loader_name = nullptr; ++ if (class_loader() != nullptr) { ++ loader_name = JitProfileCacheUtils::remove_meaningless_suffix(class_loader()->klass()->name()); ++ } else { ++ loader_name = SymbolTable::new_symbol("nullptr"); ++ } ++ return loader_name; ++} ++ ++Symbol* JitProfileCacheUtils::remove_meaningless_suffix(Symbol* s) { ++ ResourceMark rm; ++ Symbol* result = s; ++ char* s_char = s->as_C_string(); ++ int len = (int)::strlen(s_char); ++ int i = 0; ++ for (i = 0; i < len - 1; i++) { ++ if (s_char[i] == '$' && s_char[i+1] == '$') { ++ break; ++ } ++ } ++ if (i < len - 1) { ++ i = (i == 0) ? 1: i; ++ result = SymbolTable::new_symbol(s_char, i); ++ s_char = result->as_C_string(); ++ } ++ len = (int)::strlen(s_char); ++ i = len - 1; ++ for (; i >= 0; i--) { ++ if (s_char[i] >= '0' && s_char[i] <= '9') { ++ continue; ++ } else if (s_char[i] == '$') { ++ continue; ++ } else { ++ break; ++ } ++ } ++ if (i != len - 1){ ++ i = i == -1 ? 0 : i; ++ result = SymbolTable::new_symbol(s_char, i + 1); ++ } ++ return result; ++} ++ ++bool JitProfileCacheUtils::commit_compilation(methodHandle m, int comp_level, int bci, TRAPS) { ++ if (comp_level > JProfilingCacheMaxTierLimit) { ++ comp_level = JProfilingCacheMaxTierLimit; ++ } ++ if (CompilationPolicy::can_be_compiled(m, comp_level)) { ++ CompileBroker::compile_method(m, bci, comp_level, ++ methodHandle(), 1, ++ CompileTask::CompileReason::Reason_JitProfile, THREAD); ++ return true; ++ } ++ return false; ++} ++ ++bool JitProfileCacheUtils::is_in_unpreloadable_classes_black_list(Symbol* s) { ++ ResourceMark rm; ++ const char * str = s->as_C_string(); ++ return is_in_unpreloadable_classes_black_list(str); ++} ++ ++bool JitProfileCacheUtils::is_in_unpreloadable_classes_black_list(const char* str) { ++ static const char* const JFR_PREFIX = "jdk/jfr"; ++ static const int JFR_PREFIX_LEN = strlen(JFR_PREFIX); ++ static const char* const KRB5_PREFIX = "sun/security/krb5"; ++ static const int KRB5_PREFIX_LEN = strlen(KRB5_PREFIX); ++ static const char* const PLATFORMLOGGER_PREFIX = "sun/util/logging/PlatformLogger"; ++ static const int PLATFORMLOGGER_LEN = strlen(PLATFORMLOGGER_PREFIX); ++ return strncmp(str, JFR_PREFIX, JFR_PREFIX_LEN) == 0 || ++ strncmp(str, KRB5_PREFIX, KRB5_PREFIX_LEN) == 0 || ++ strncmp(str, PLATFORMLOGGER_PREFIX, PLATFORMLOGGER_LEN) == 0; ++} +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileCacheUtils.hpp b/src/hotspot/share/jprofilecache/jitProfileCacheUtils.hpp +new file mode 100644 +index 000000000..f0094b423 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileCacheUtils.hpp +@@ -0,0 +1,41 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef SHARE_VM_JPROFILECACHE_JITPROFILECACHEUTILS_HPP ++#define SHARE_VM_JPROFILECACHE_JITPROFILECACHEUTILS_HPP ++ ++#include "classfile/classLoaderData.hpp" ++#include "classfile/classLoaderData.inline.hpp" ++ ++class JitProfileCacheUtils : public AllStatic { ++public: ++ static Symbol* get_class_loader_name(ClassLoaderData* cld); ++ static Symbol* remove_meaningless_suffix(Symbol* s); ++ ++ static bool commit_compilation(methodHandle m, int comp_level, int bci, TRAPS); ++ ++ static bool is_in_unpreloadable_classes_black_list(Symbol* s); ++ static bool is_in_unpreloadable_classes_black_list(const char* str); ++}; ++ ++#endif //SHARE_VM_JPROFILECACHE_JITPROFILECACHEUTILS_HPP +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileClassChain.cpp b/src/hotspot/share/jprofilecache/jitProfileClassChain.cpp +new file mode 100644 +index 000000000..e6e2fbf73 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileClassChain.cpp +@@ -0,0 +1,523 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "precompiled.hpp" ++#include "classfile/classLoaderData.hpp" ++#include "classfile/classLoaderData.inline.hpp" ++#include "classfile/symbolTable.hpp" ++#include "classfile/systemDictionary.hpp" ++#include "compiler/compilationPolicy.hpp" ++#include "compiler/compileBroker.hpp" ++#include "jprofilecache/jitProfileCacheClass.hpp" ++#include "jprofilecache/jitProfileCacheUtils.hpp" ++#include "jprofilecache/jitProfileClassChain.hpp" ++#include "logging/log.hpp" ++#include "logging/logStream.hpp" ++#include "oops/method.hpp" ++#include "oops/method.inline.hpp" ++#include "oops/typeArrayKlass.hpp" ++#include "runtime/arguments.hpp" ++#include "runtime/deoptimization.hpp" ++#include "runtime/handles.inline.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "runtime/os.hpp" ++#include "runtime/thread.hpp" ++#include "runtime/atomic.hpp" ++ ++ProfileCacheMethodHold* MethodHolderIterator::next() { ++ ProfileCacheMethodHold* next_holder = _current_method_hold->next(); ++ if (next_holder != nullptr) { ++ _current_method_hold = next_holder; ++ return _current_method_hold; ++ } ++ while (_holder_index > 0) { ++ _holder_index--; ++ ProfileCacheClassChain::ProfileCacheClassChainEntry* entry = _profile_cache_class_chain->at(_holder_index); ++ if (entry->method_holder() != nullptr) { ++ _current_method_hold = entry->method_holder(); ++ return _current_method_hold; ++ } ++ } ++ _current_method_hold = nullptr; ++ return _current_method_hold; ++} ++ ++bool ProfileCacheClassChain::ProfileCacheClassChainEntry::is_all_initialized() { ++ int len = resolved_klasses()->length(); ++ // if resolved klass is empty return false ++ if (len == 0) { ++ return false; ++ } ++ for (int i = 0; i < len; i++) { ++ InstanceKlass* k = resolved_klasses()->at(i); ++ if (k != nullptr && k->is_not_initialized() && !k->is_in_error_state() ) { ++ return false; ++ } ++ } ++ return true; ++} ++ ++bool ProfileCacheClassChain::ProfileCacheClassChainEntry::contains_redefined_class() { ++ int len = resolved_klasses()->length(); ++ for (int i = 0; i < len; i++) { ++ InstanceKlass* k = resolved_klasses()->at(i); ++ if (k != nullptr && k->has_been_redefined()) { ++ ResourceMark rm; ++ log_warning(jprofilecache)("[JitProfileCache] WARNING: ignore redefined class after API" ++ " triggerPrecompilation : %s:%s@%s.", class_name()->as_C_string(), ++ class_loader_name()->as_C_string(), class_path()->as_C_string()); ++ return true; ++ } ++ } ++ return false; ++} ++ ++InstanceKlass* ProfileCacheClassChain::ProfileCacheClassChainEntry::get_first_uninitialized_klass() { ++ int len = resolved_klasses()->length(); ++ for (int i = 0; i < len; i++) { ++ InstanceKlass* k = resolved_klasses()->at(i); ++ if (k != nullptr && k->is_not_initialized()) { ++ return k; ++ } ++ } ++ return nullptr; ++} ++ ++ProfileCacheClassChain::ProfileCacheClassChain(unsigned int size) ++ : _class_chain_inited_index(-1), ++ _loaded_class_index(-1), ++ _length(size), ++ _state(NOT_INITED), ++ _entries(new ProfileCacheClassChainEntry[size]), ++ _init_timestamp(), ++ _last_timestamp(), ++ _deopt_index(-1), ++ _deopt_cur_holder(nullptr), ++ _has_unmarked_compiling_flag(false) { ++ _init_timestamp.update(); ++ _last_timestamp.update(); ++ try_transition_to_state(INITED); ++} ++ ++ProfileCacheClassChain::~ProfileCacheClassChain() { ++ delete[] _entries; ++} ++ ++const char* ProfileCacheClassChain::get_state(ClassChainState state) { ++ switch (state) { ++ case NOT_INITED: ++ return "not init"; ++ case INITED: ++ return "inited"; ++ case PRE_PROFILECACHE: ++ return "notify precompile"; ++ case PROFILECACHE_COMPILING: ++ return "precompiling"; ++ case PROFILECACHE_DONE: ++ return "precompile done"; ++ case PROFILECACHE_ERROR_STATE: ++ return "profilecache error state"; ++ } ++ assert(false, "invalid state"); ++ return nullptr; ++} ++ ++bool ProfileCacheClassChain::try_transition_to_state(ClassChainState new_state) { ++ ClassChainState old_state = current_state(); ++ if (old_state == new_state) { ++ log_warning(jprofilecache)("JProfileCache [WARNING]: profilecache state has already been %s Doesn't need transferred to %s", ++ get_state(old_state), get_state(new_state)); ++ return true; ++ } ++ bool can_transfer = false; ++ switch (new_state) { ++ case PROFILECACHE_ERROR_STATE: ++ can_transfer = true; ++ break; ++ default: ++ if (new_state == old_state + 1) { ++ can_transfer = true; ++ } ++ break; ++ } ++ if (can_transfer) { ++ if (Atomic::cmpxchg((jint*)&_state, (jint)old_state, (jint)new_state) == old_state) { ++ return true; ++ } else { ++ log_warning(jprofilecache)("JProfileCache [WARNING]: failed to transfer profilecache state from %s to %s, conflict with other operation", ++ get_state(old_state), get_state(new_state)); ++ return false; ++ } ++ } else { ++ log_warning(jprofilecache)("JProfileCache [WARNING]: can not transfer profilecache state from %s to %s", ++ get_state(old_state), get_state(new_state)); ++ return false; ++ } ++} ++ ++void ProfileCacheClassChain::mark_loaded_class(InstanceKlass* k, ProfileCacheClassEntry* class_entry) { ++ Symbol* class_name = k->name(); ++ unsigned int crc32 = k->crc32(); ++ unsigned int size = k->bytes_size(); ++ ++ if (!can_record_class()) { ++ return; ++ } ++ ++ if (class_entry == nullptr) { ++ return; ++ } ++ int chain_index = class_entry->chain_offset(); ++ ProfileCacheClassHolder* holder = class_entry->find_class_holder(size, crc32); ++ if (holder != nullptr) { ++ if (holder->resolved()) { ++ handle_duplicate_class(k, chain_index); ++ return; ++ } else { ++ resolve_class_methods(k, holder, chain_index); ++ } ++ } else { ++ ResourceMark rm; ++ log_debug(jprofilecache)("[JitProfileCache] DEBUG : class %s is not in profile", ++ k->name()->as_C_string()); ++ } ++ ++ update_class_chain(k, chain_index); ++} ++ ++void ProfileCacheClassChain::handle_duplicate_class(InstanceKlass *k, int chain_index) { ++ Thread *const t = Thread::current(); ++ if (!t->is_super_class_resolution_active()) { ++ assert(k->is_not_initialized(), "Invalid klass state"); ++ assert(t->is_Java_thread(), "Thread type mismatch"); ++ ResourceMark rm; ++ log_warning(jprofilecache)("[JitProfileCache] WARNING : Duplicate load class %s at index %d", ++ k->name()->as_C_string(), chain_index); ++ } ++} ++ ++void ProfileCacheClassChain::resolve_class_methods(InstanceKlass* k, ProfileCacheClassHolder* holder, int chain_index) { ++ MutexLocker mu(ProfileCacheClassChain_lock); ++ int methods = k->methods()->length(); ++ for (int index = 0; index < methods; index++) { ++ Method* m = k->methods()->at(index); ++ resolve_method_info(m, holder); ++ } ++ { ++ ResourceMark rm; ++ log_debug(jprofilecache)("[JitProfileCache] DEBUG : class %s at index %d method_list has been recorded", ++ k->name()->as_C_string(), chain_index); ++ } ++ holder->set_resolved(); ++} ++ ++void ProfileCacheClassChain::update_class_chain(InstanceKlass* k, int chain_index) { ++ MutexLocker mu(ProfileCacheClassChain_lock); ++ assert(chain_index >= 0 && chain_index <= length(), "index out of bound"); ++ assert(loaded_index() >= class_chain_inited_index(), "loaded index must larger than inited index"); ++ ProfileCacheClassChainEntry* chain_entry = &_entries[chain_index]; ++ ++ // check class state is skip or init return ++ if (chain_entry->is_skipped()) { ++ ResourceMark rm; ++ char* class_name = k->name()->as_C_string(); ++ int index = chain_index; ++ return; ++ } else if (chain_entry->is_inited()) { ++ return; ++ } ++ // set class reserved ++ chain_entry->resolved_klasses()->append(k); ++ Thread* thread = Thread::current(); ++ chain_entry->method_keep_holders()->append(JNIHandles::make_global(Handle(thread, k->klass_holder()))); ++ ++ chain_entry->set_loaded(); ++ ++ if (chain_index == loaded_index() + 1) { ++ update_loaded_index(chain_index); ++ } ++} ++ ++void ProfileCacheClassChain::add_method_at_index(ProfileCacheMethodHold* mh, int index) { ++ assert(index >= 0 && index < length(), "out of bound"); ++ ProfileCacheClassChainEntry* entry = &_entries[index]; ++ entry->add_method_holder(mh); ++} ++ ++void ProfileCacheClassChain::update_loaded_index(int index) { ++ assert(index >= 0 && index < length(), "out of bound"); ++ while (index < length() && !_entries[index].is_not_loaded()) { ++ index++; ++ } ++ set_loaded_index(index - 1); ++} ++ ++void ProfileCacheClassChain::compile_methodholders_queue(Stack& compile_queue) { ++ while (!compile_queue.is_empty()) { ++ ProfileCacheMethodHold* pmh = compile_queue.pop(); ++ compile_method(pmh); ++ Thread* THREAD = Thread::current(); ++ if (HAS_PENDING_EXCEPTION) { ++ ResourceMark rm; ++ log_warning(jprofilecache)("[JitProfileCache] WARNING: Exceptions happened in compiling %s", ++ pmh->method_name()->as_C_string()); ++ CLEAR_PENDING_EXCEPTION; ++ continue; ++ } ++ } ++} ++ ++void ProfileCacheClassChain::precompilation() { ++ Thread* THREAD = Thread::current(); ++ if (!try_transition_to_state(PROFILECACHE_COMPILING)) { ++ log_warning(jprofilecache)("JProfileCache [WARNING]: The compilation cannot be started in the current state"); ++ return; ++ } ++ ++ bool cancel_precompilation = false; ++ for ( int index = 0; index < length(); index++ ) { ++ if (cancel_precompilation) { ++ break; ++ } ++ InstanceKlass* klass = nullptr; ++ Stack compile_queue; ++ { ++ MutexLocker mu(ProfileCacheClassChain_lock); ++ ProfileCacheClassChainEntry *entry = &_entries[index]; ++ switch(entry->class_state()) { ++ case ProfileCacheClassChainEntry::_not_loaded: ++ // if class not load before skip ++ entry->set_skipped(); ++ { ++ ResourceMark rm; ++ char* class_name = entry->class_name()->as_C_string(); ++ char* class_loader_name = entry->class_loader_name()->as_C_string(); ++ char* class_path = entry->class_path()->as_C_string(); ++ } ++ case ProfileCacheClassChainEntry::_load_skipped: ++ break; ++ case ProfileCacheClassChainEntry::_class_loaded: ++ klass = entry->get_first_uninitialized_klass(); ++ entry->set_inited(); ++ case ProfileCacheClassChainEntry::_class_inited: ++ if (!entry->contains_redefined_class()){ ++ ProfileCacheMethodHold* mh = entry->method_holder(); ++ while (mh != nullptr) { ++ compile_queue.push(mh); ++ mh = mh->next(); ++ } ++ } ++ break; ++ default: ++ { ++ ResourceMark rm; ++ log_error(jprofilecache)("[JitProfileCache] ERROR: class %s has an invalid state %d", ++ entry->class_name()->as_C_string(), ++ entry->class_state()); ++ return; ++ } ++ } ++ } ++ if (klass != nullptr) { ++ assert(THREAD->is_Java_thread(), "sanity check"); ++ klass->initialize((JavaThread *)THREAD); ++ if (HAS_PENDING_EXCEPTION) { ++ Symbol *loader = JitProfileCacheUtils::get_class_loader_name(klass->class_loader_data()); ++ ResourceMark rm; ++ log_error(jprofilecache)("[JitProfileCache] ERROR: Exceptions happened in initializing %s being loaded by %s", ++ klass->name()->as_C_string(), loader->as_C_string()); ++ return; ++ } ++ } ++ { ++ MutexLocker mu(ProfileCacheClassChain_lock); ++ refresh_indexes(); ++ if (index > class_chain_inited_index()) { ++ cancel_precompilation = true; ++ } ++ } ++ ++ // add method to compile queue and precompile ++ compile_methodholders_queue(compile_queue); ++ } ++} ++ ++bool ProfileCacheClassChain::compile_method(ProfileCacheMethodHold* mh) { ++ JavaThread* t = JavaThread::current(); ++ methodHandle m(t, mh->resolved_method()); ++ if (m() == nullptr || m->compiled_by_jprofilecache() || m->has_compiled_code()) { ++ return false; ++ } ++ ++ InstanceKlass* klass = m->constants()->pool_holder(); ++ ++ // if klass not initialize return ++ if (!klass->is_initialized()) { ++ return false; ++ } ++ ++ m->set_compiled_by_jprofilecache(true); ++ m->set_jpc_method_holder(mh); ++ int bci = InvocationEntryBci; ++ ++ // commit compile ++ bool ret = JitProfileCacheUtils::commit_compilation(m, mh->compile_level(), bci, t); ++ if (ret) { ++ ResourceMark rm; ++ log_info(jprofilecache)("[JitProfileCache] method %s successfully compiled", ++ m->name_and_sig_as_C_string()); ++ } ++ return ret; ++} ++ ++void ProfileCacheClassChain::refresh_indexes() { ++ assert_lock_strong(ProfileCacheClassChain_lock); ++ int loaded = loaded_index(); ++ int inited = class_chain_inited_index(); ++ for (int i = inited + 1; i < length(); i++) { ++ ProfileCacheClassChainEntry* e = &_entries[i]; ++ int len = e->resolved_klasses()->length(); ++ if (e->is_not_loaded()) { ++ assert(len == 0, "wrong state"); ++ } ++ if (e->is_loaded()) { ++ assert(len > 0, "class init chain entry state error"); ++ if (e->is_all_initialized()) { ++ e->set_inited(); ++ } ++ } ++ if (e->is_loaded() && i == loaded + 1) { ++ loaded = i; ++ } else if (e->is_inited() && i == inited + 1) { ++ loaded = i; ++ inited = i; ++ } else if (e->is_skipped()) { ++ if (i == loaded + 1) { ++ loaded = i; ++ } ++ if (i == inited + 1) { ++ inited = i; ++ } ++ } else { ++ break; ++ } ++ } ++ assert(loaded >= inited, "loaded index must not less than inited index"); ++ set_loaded_index(loaded); ++ set_inited_index(inited); ++} ++ ++void ProfileCacheClassChain::unload_class() { ++ assert(SafepointSynchronize::is_at_safepoint(), "must be in safepoint"); ++ for (int i = 0; i < length(); i++) { ++ ProfileCacheClassChainEntry* entry = this->at(i); ++ GrowableArray* array = entry->resolved_klasses(); ++ GrowableArray* keep_array = entry->method_keep_holders(); ++ for (int i = 0; i < array->length(); i++) { ++ InstanceKlass* k = array->at(i); ++ if (k == nullptr) { ++ continue; ++ } ++ ++ // if class not load continue ++ if (entry->is_not_loaded() || entry->is_skipped()) { ++ continue; ++ } ++ ++ ClassLoaderData* data = k->class_loader_data(); ++ // if data is nullptr or not alive should be remove ++ if (data == nullptr || !data->is_alive()) { ++ // remove class from chain ++ array->remove_at(i); ++ JNIHandles::destroy_global(keep_array->at(i)); ++ keep_array->remove_at(i); ++ } ++ } ++ for (ProfileCacheMethodHold* holder = entry->method_holder(); holder != nullptr; ++ holder = holder->next()) { ++ // if method not compile or deopted continue ++ if (holder->resolved_method() == nullptr) { ++ continue; ++ } ++ if (!holder->is_alive()) { ++ // process the method in the class. ++ holder->set_resolved_method(nullptr); ++ } ++ } ++ } ++} ++ ++ProfileCacheMethodHold* ProfileCacheClassChain::resolve_method_info(Method* method, ProfileCacheClassHolder* holder) { ++ ProfileCacheMethodHold* mh = nullptr; ++ // find method ++ for (int i = 0; i < holder->method_list()->length(); i++) { ++ ProfileCacheMethodHold* current_mh = holder->method_list()->at(i); ++ if (current_mh->is_method_match(method)) { ++ mh = current_mh; ++ break; ++ } ++ } ++ if (mh == nullptr) { ++ return mh; ++ } else if (mh->resolved_method() == nullptr) { ++ mh->set_resolved_method(method); ++ method->set_jpc_method_holder(mh); ++ return mh; ++ } else { ++ log_info(jprofilecache)("[JitProfileCache] method %s is resolved again", ++ method->name_and_sig_as_C_string()); ++ return mh; ++ } ++} ++ ++void ProfileCacheClassChain::preload_class_in_constantpool() { ++ int index = 0; ++ int klass_index = 0; ++ while (true) { ++ InstanceKlass* current_k = nullptr; ++ { ++ MutexLocker mu(ProfileCacheClassChain_lock); ++ if (index == length()) { ++ break; ++ } ++ ProfileCacheClassChain::ProfileCacheClassChainEntry* e = this->at(index); ++ GrowableArray* array = e->resolved_klasses(); ++ assert(array != nullptr, "should not be nullptr"); ++ if (e->is_skipped() || e->is_not_loaded() || klass_index >= array->length()) { ++ index++; ++ klass_index = 0; ++ continue; ++ } ++ current_k = array->at(klass_index); ++ } ++ ++ if (current_k != nullptr) { ++ current_k->constants()->preload_jprofilecache_classes(JavaThread::current()); ++ log_info(jprofilecache)("[JitProfileCache] class %s is preloaded", ++ current_k->internal_name()); ++ } ++ klass_index++; ++ } ++} +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileClassChain.hpp b/src/hotspot/share/jprofilecache/jitProfileClassChain.hpp +new file mode 100644 +index 000000000..dbed77d11 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileClassChain.hpp +@@ -0,0 +1,231 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARED_VM_JPROFILECACHE_JITPROFILECLASSCHAIN_HPP ++#define SHARED_VM_JPROFILECACHE_JITPROFILECLASSCHAIN_HPP ++ ++#include "jprofilecache/jitProfileCacheHolders.hpp" ++#include "runtime/jniHandles.hpp" ++ ++class ProfileCacheClassChain; ++ ++class MethodHolderIterator { ++public: ++ MethodHolderIterator() ++ : _profile_cache_class_chain(nullptr), ++ _current_method_hold(nullptr), ++ _holder_index(-1) { ++ } ++ ++ MethodHolderIterator(ProfileCacheClassChain* chain, ProfileCacheMethodHold* holder, int index) ++ : _profile_cache_class_chain(chain), ++ _current_method_hold(holder), ++ _holder_index(index) { ++ } ++ ++ ~MethodHolderIterator() { } ++ ++ ProfileCacheMethodHold* operator*() { return _current_method_hold; } ++ ++ int index() { return _holder_index; } ++ ++ bool initialized() { return _profile_cache_class_chain != nullptr; } ++ ++ ProfileCacheMethodHold* next(); ++ ++private: ++ ProfileCacheClassChain* _profile_cache_class_chain; ++ ProfileCacheMethodHold* _current_method_hold; ++ int _holder_index; // current holder's position in ProfileCacheClassChain ++}; ++ ++class ProfileCacheClassEntry; ++ ++class ProfileCacheClassChain : public CHeapObj { ++public: ++ class ProfileCacheClassChainEntry : public CHeapObj { ++ public: ++ enum ClassState { ++ _not_loaded = 0, ++ _load_skipped, ++ _class_loaded, ++ _class_inited ++ }; ++ ++ ProfileCacheClassChainEntry() ++ : _class_name(nullptr), ++ _class_loader_name(nullptr), ++ _class_path(nullptr), ++ _class_state(_not_loaded), ++ _method_holder(nullptr), ++ _resolved_klasses(new (mtClass) GrowableArray(1, mtClass)), ++ _method_keep_holders(new (mtClass) GrowableArray(1, mtClass)) { } ++ ++ ProfileCacheClassChainEntry(Symbol* class_name, Symbol* loader_name, Symbol* path) ++ : _class_name(class_name), ++ _class_loader_name(loader_name), ++ _class_path(path), ++ _class_state(_not_loaded), ++ _method_holder(nullptr), ++ _resolved_klasses(new (mtClass) GrowableArray(1, mtClass)), ++ _method_keep_holders(new (mtClass) GrowableArray(1, mtClass)) { } ++ ++ virtual ~ProfileCacheClassChainEntry() { ++ if(!_method_keep_holders->is_empty()) { ++ int len = _method_keep_holders->length(); ++ for (int i = 0; i < len; i++) { ++ JNIHandles::destroy_global(_method_keep_holders->at(i)); ++ } ++ } ++ } ++ ++ Symbol* class_name() const { return _class_name; } ++ Symbol* class_loader_name() const { return _class_loader_name; } ++ Symbol* class_path() const { return _class_path; } ++ void set_class_name(Symbol* name) { _class_name = name; } ++ void set_class_loader_name(Symbol* name) { _class_loader_name = name; } ++ void set_class_path(Symbol* path) { _class_path = path; } ++ ++ GrowableArray* resolved_klasses() ++ { return _resolved_klasses; } ++ ++ GrowableArray* method_keep_holders() ++ { return _method_keep_holders; } ++ ++ // entry state ++ bool is_not_loaded() const { return _class_state == _not_loaded; } ++ bool is_skipped() const { return _class_state == _load_skipped; } ++ bool is_loaded() const { return _class_state == _class_loaded; } ++ bool is_inited() const { return _class_state == _class_inited; } ++ void set_not_loaded() { _class_state = _not_loaded; } ++ void set_skipped() { _class_state = _load_skipped; } ++ void set_loaded() { _class_state = _class_loaded; } ++ void set_inited() { _class_state = _class_inited; } ++ ++ void set_class_state(int state) { _class_state = state;} ++ ++ int class_state() { return _class_state; } ++ ++ void add_method_holder(ProfileCacheMethodHold* h) { ++ h->set_next(_method_holder); ++ _method_holder = h; ++ } ++ ++ bool is_all_initialized(); ++ ++ bool contains_redefined_class(); ++ ++ InstanceKlass* get_first_uninitialized_klass(); ++ ++ ProfileCacheMethodHold* method_holder() { return _method_holder; } ++ ++ private: ++ ++ Symbol* _class_name; ++ Symbol* _class_loader_name; ++ Symbol* _class_path; ++ int _class_state; ++ ++ ProfileCacheMethodHold* _method_holder; ++ GrowableArray* _resolved_klasses; ++ GrowableArray* _method_keep_holders; ++ }; ++ ++ ProfileCacheClassChain(unsigned int size); ++ virtual ~ProfileCacheClassChain(); ++ ++ enum ClassChainState { ++ NOT_INITED = 0, ++ INITED = 1, ++ PRE_PROFILECACHE = 2, ++ PROFILECACHE_COMPILING = 3, ++ PROFILECACHE_DONE = 4, ++ PROFILECACHE_ERROR_STATE = 5 ++ }; ++ const char* get_state(ClassChainState state); ++ bool try_transition_to_state(ClassChainState new_state); ++ ClassChainState current_state() { return _state; } ++ ++ int class_chain_inited_index() const { return _class_chain_inited_index; } ++ int loaded_index() const { return _loaded_class_index; } ++ int length() const { return _length; } ++ ++ void set_loaded_index(int index) { _loaded_class_index = index; } ++ void set_length(int length) { _length = length; } ++ void set_inited_index(int index) { _class_chain_inited_index = index; } ++ ++ bool can_record_class() { ++ return _state == INITED || _state == PRE_PROFILECACHE || _state == PROFILECACHE_COMPILING; ++ } ++ ++ void mark_loaded_class(InstanceKlass* klass, ProfileCacheClassEntry* class_entry); ++ ++ ProfileCacheClassChainEntry* at(int index) { return &_entries[index]; } ++ ++ void refresh_indexes(); ++ ++ void precompilation(); ++ ++ void unload_class(); ++ ++ // count method ++ void add_method_at_index(ProfileCacheMethodHold* mh, int index); ++ ++ bool compile_method(ProfileCacheMethodHold* mh); ++ ++ void preload_class_in_constantpool(); ++ ++private: ++ int _class_chain_inited_index; ++ int _loaded_class_index; ++ int _length; ++ ++ volatile ClassChainState _state; ++ ++ ProfileCacheClassChainEntry* _entries; ++ ++ TimeStamp _init_timestamp; ++ TimeStamp _last_timestamp; ++ ++ int _deopt_index; ++ ProfileCacheMethodHold* _deopt_cur_holder; ++ ++ bool _has_unmarked_compiling_flag; ++ ++ void handle_duplicate_class(InstanceKlass* k, int chain_index); ++ ++ void resolve_class_methods(InstanceKlass* k, ProfileCacheClassHolder* holder, int chain_index); ++ ++ void update_class_chain(InstanceKlass* ky, int chain_index); ++ ++ void compile_methodholders_queue(Stack& compile_queue); ++ ++ void update_loaded_index(int index); ++ ++ ProfileCacheMethodHold* resolve_method_info(Method* method, ++ ProfileCacheClassHolder* holder); ++}; ++ ++#endif // SHARED_VM_JPROFILECACHE_JITPROFILECLASSCHAIN_HPP +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileRecord.cpp b/src/hotspot/share/jprofilecache/jitProfileRecord.cpp +new file mode 100644 +index 000000000..a7b8471fb +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileRecord.cpp +@@ -0,0 +1,602 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++* Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "classfile/classLoaderData.hpp" ++#include "classfile/classLoaderData.inline.hpp" ++#include "classfile/classLoader.hpp" ++#include "code/compiledMethod.hpp" ++#include "jprofilecache/jitProfileCacheFileParser.hpp" ++#include "jprofilecache/jitProfileCacheUtils.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" ++#include "libadt/dict.hpp" ++#include "logging/log.hpp" ++#include "logging/logStream.hpp" ++#include "runtime/arguments.hpp" ++#include "runtime/globals.hpp" ++#include "runtime/mutexLocker.hpp" ++ ++// define offset ++#define PROFILECACHE_VERSION_OFFSET 0 ++#define PROFILECACHE_MAGIC_NUMBER_OFFSET 4 ++#define FILE_SIZE_OFFSET 8 ++#define PROFILECACHE_CRC32_OFFSET 12 ++#define APPID_OFFSET 16 ++#define MAX_SYMBOL_LENGTH_OFFSET 20 ++#define RECORD_COUNT_OFFSET 24 ++#define PROFILECACHE_TIME_OFFSET 28 ++ ++#define HEADER_SIZE 36 ++ ++// define width ++#define RECORE_VERSION_WIDTH (PROFILECACHE_MAGIC_NUMBER_OFFSET - PROFILECACHE_VERSION_OFFSET) ++#define RECORE_MAGIC_WIDTH (FILE_SIZE_OFFSET - PROFILECACHE_MAGIC_NUMBER_OFFSET) ++#define FILE_SIZE_WIDTH (PROFILECACHE_CRC32_OFFSET - FILE_SIZE_OFFSET) ++#define RECORE_CRC32_WIDTH (APPID_OFFSET - PROFILECACHE_CRC32_OFFSET) ++#define RECORE_APPID_WIDTH (MAX_SYMBOL_LENGTH_OFFSET - APPID_OFFSET) ++#define RECORE_MAX_SYMBOL_LENGTH_WIDTH (RECORD_COUNT_OFFSET - MAX_SYMBOL_LENGTH_OFFSET) ++#define RECORD_COUNTS_WIDTH (PROFILECACHE_TIME_OFFSET - RECORD_COUNT_OFFSET) ++#define RECORE_TIME_WIDTH (HEADER_SIZE - PROFILECACHE_TIME_OFFSET) ++ ++// default value ++#define RECORE_FILE_DEFAULT_NUMBER 0 ++#define RECORE_CRC32_DEFAULT_NUMBER 0 ++ ++#define JVM_DEFINE_CLASS_PATH "_JVM_DefineClass_" ++ ++// auto jprofile ++#define AUTO_TEMP_JPCFILE_NAME "jprofilecache.profile.tmp" ++#define AUTO_JPCFILE_NAME "jprofilecache.profile" ++ ++const char* JitProfileRecorder::_auto_jpcfile_name = nullptr; ++const char* JitProfileRecorder::_auto_temp_jpcfile_name = nullptr; ++FILE* JitProfileRecorder::_auto_jpcfile_filepointer = nullptr; ++ ++JitProfileRecorder::JitProfileRecorder(): ++ _max_symbol_length(0), ++ _pos(0), ++ _class_init_order_num(-1), ++ _flushed(false), ++ _record_file_name(nullptr), ++ _profilelog(nullptr), ++ _recorder_state(NOT_INIT), ++ _class_init_list(nullptr), ++ _init_list_tail_node(nullptr), ++ _profile_record_dict(nullptr){} ++ ++JitProfileRecorder::~JitProfileRecorder() { ++ if (!ProfilingCacheFile) { ++ os::free((void*)logfile_name()); ++ } ++ delete _class_init_list; ++} ++ ++#define PROFILE_RECORDER_HT_SIZE 10240 ++ ++void JitProfileRecorder::set_logfile_name(const char* name) { ++ _record_file_name = make_log_name(name, nullptr); ++} ++ ++void JitProfileRecorder::set_jpcfile_filepointer(FILE* file) { ++ _auto_jpcfile_filepointer = file; ++} ++ ++const char* JitProfileRecorder::auto_jpcfile_name() { ++ if (_auto_jpcfile_name == nullptr) { ++ _auto_jpcfile_name = make_log_name(AUTO_JPCFILE_NAME, JProfilingCacheAutoArchiveDir); ++ } ++ return _auto_jpcfile_name; ++} ++ ++const char* JitProfileRecorder::auto_temp_jpcfile_name() { ++ if (_auto_temp_jpcfile_name == nullptr) { ++ _auto_temp_jpcfile_name = make_log_name(AUTO_TEMP_JPCFILE_NAME, JProfilingCacheAutoArchiveDir); ++ } ++ return _auto_temp_jpcfile_name; ++} ++ ++#define PROFILECACHE_PID_BUFFER_SIZE 100 ++ ++void JitProfileRecorder::init() { ++ assert(_recorder_state == NOT_INIT, "JitProfileRecorder state error"); ++ if (JProfilingCacheCompileAdvance) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR: JProfilingCacheCompileAdvance and JProfilingCacheRecording cannot be enabled at the same time"); ++ _recorder_state = IS_ERR; ++ return; ++ } ++ if (!ProfileInterpreter) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR: ProfileInterpreter must be enable"); ++ _recorder_state = IS_ERR; ++ return; ++ } ++ // disable class unloading ++ if (ClassUnloading) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR: ClassUnloading must be disabled"); ++ _recorder_state = IS_ERR; ++ return; ++ } ++ ++ if (UseG1GC && ClassUnloadingWithConcurrentMark) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR: if use G1 gc, ClassUnloadingWithConcurrentMark must be disabled"); ++ _recorder_state = IS_ERR; ++ return; ++ } ++ ++ // profile file name ++ if (JProfilingCacheAutoArchiveDir != nullptr) { ++ _record_file_name = auto_temp_jpcfile_name(); ++ } else if (ProfilingCacheFile == nullptr) { ++ char* buf = (char*)os::malloc(100, mtInternal); ++ char fmt[] = "jprofilecache_%p.profile"; ++ Arguments::copy_expand_pid(fmt, sizeof(fmt), buf, PROFILECACHE_PID_BUFFER_SIZE); ++ _record_file_name = buf; ++ } else { ++ set_logfile_name(ProfilingCacheFile); ++ } ++ ++ _class_init_list = new (mtInternal) LinkedListImpl(); ++ _profile_record_dict = new JitProfileRecordDictionary(PROFILE_RECORDER_HT_SIZE); ++ _recorder_state = IS_OK; ++ ++ log_debug(jprofilecache)("[JitProfileCache] begin to collect, log file is %s", logfile_name()); ++} ++ ++int JitProfileRecorder::assign_class_init_order(InstanceKlass* klass) { ++ // ignore anonymous class ++ if (klass->is_hidden()) { ++ return -1; ++ } ++ Symbol* record_name = klass->name(); ++ Symbol* record_path = klass->source_file_path(); ++ Symbol* record_loader_name = JitProfileCacheUtils::get_class_loader_name(klass->class_loader_data()); ++ if (record_name == nullptr || record_name->utf8_length() == 0) { ++ return -1; ++ } ++ MutexLocker mu(JitProfileRecorder_lock, Mutex::_no_safepoint_check_flag); ++ if (_init_list_tail_node == nullptr) { ++ _class_init_list->add(ClassSymbolEntry(record_name, record_loader_name, record_path)); ++ _init_list_tail_node = _class_init_list->head(); ++ } else { ++ _class_init_list->insert_after(ClassSymbolEntry(record_name, record_loader_name, record_path), ++ _init_list_tail_node); ++ _init_list_tail_node = _init_list_tail_node->next(); ++ } ++ _class_init_order_num++; ++#ifndef PRODUCT ++ klass->set_initialize_order(_class_init_order_num); ++#endif ++ return _class_init_order_num; ++} ++ ++void JitProfileRecorder::add_method(Method* method, int method_bci) { ++ MutexLocker mu(JitProfileRecorder_lock, Mutex::_no_safepoint_check_flag); ++ // if is flushed, stop adding method ++ if (is_flushed()) { ++ return; ++ } ++ // not deal with OSR Compilation ++ if (method_bci != InvocationEntryBci) { ++ return; ++ } ++ assert(is_valid(), "JProfileCache state must be OK"); ++ unsigned int hash = compute_hash(method); ++ dict()->add_method(hash, method, method_bci); ++} ++ ++void JitProfileRecorder::update_max_symbol_length(int len) { ++ if (len > _max_symbol_length) { ++ _max_symbol_length = len; ++ } ++} ++ ++JitProfileRecordDictionary::JitProfileRecordDictionary(unsigned int size) ++ : Hashtable(size, sizeof(JitProfileRecorderEntry)), ++ _count(0) { ++ // do nothing ++} ++ ++JitProfileRecordDictionary::~JitProfileRecordDictionary() { ++ free_buckets(); ++} ++ ++JitProfileRecorderEntry* JitProfileRecordDictionary::new_entry(unsigned int hash, Method* method) { ++ JitProfileRecorderEntry* entry = (JitProfileRecorderEntry*)Hashtable::new_entry(hash, method); ++ entry->init(); ++ return entry; ++} ++ ++JitProfileRecorderEntry* JitProfileRecordDictionary::add_method(unsigned int method_hash, Method* method, int bci) { ++ assert_lock_strong(JitProfileRecorder_lock); ++ int target_bucket = hash_to_index(method_hash); ++ JitProfileRecorderEntry* record_entry = find_entry(method_hash, method); ++ if (record_entry != nullptr) { ++ return record_entry; ++ } ++ // add method entry ++ record_entry = new_entry(method_hash, method); ++ record_entry->set_bci(bci); ++ record_entry->set_order(count()); ++ add_entry(target_bucket, record_entry); ++ _count++; ++ ++ log_debug(jprofilecache)("[JitProfileCache] Record method %s", method->name_and_sig_as_C_string()); ++ ++ return record_entry; ++} ++ ++JitProfileRecorderEntry* JitProfileRecordDictionary::find_entry(unsigned int hash, Method* method) { ++ int index = hash_to_index(hash); ++ for (JitProfileRecorderEntry* p = bucket(index); p != nullptr; p = p->next()) { ++ if (p->literal() == method) { ++ return p; ++ } ++ } ++ return nullptr; ++} ++ ++void JitProfileRecordDictionary::free_entry(JitProfileRecorderEntry* entry) { ++ Hashtable::free_entry(entry); ++} ++ ++#define WRITE_U1_INTERVAL 1 ++#define WRITE_U4_INTERVAL 4 ++#define OVERWRITE_U4_INTERVAL 4 ++ ++static char record_buf[12]; ++void JitProfileRecorder::write_u1(u1 value) { ++ *(u1*)record_buf = value; ++ _profilelog->write(record_buf, WRITE_U1_INTERVAL); ++ _pos += WRITE_U1_INTERVAL; ++} ++ ++void JitProfileRecorder::write_u4(u4 value) { ++ *(u4*)record_buf = value; ++ _profilelog->write(record_buf, WRITE_U4_INTERVAL); ++ _pos += WRITE_U4_INTERVAL; ++} ++ ++void JitProfileRecorder::write_data_layout(DataLayout* value) { ++ int size = value->size_in_bytes(); ++ write_u4(size); ++ _profilelog->write((char*)value, size); ++ _pos += size; ++} ++ ++void JitProfileRecorder::overwrite_u4(u4 value, unsigned int offset) { ++ *(u4*)record_buf = value; ++ _profilelog->write(record_buf, OVERWRITE_U4_INTERVAL, offset); ++} ++ ++void JitProfileRecorder::write_string(const char* src, size_t len) { ++ assert(src != nullptr && len != 0, "empty string is not allowed"); ++ _profilelog->write(src, len); ++ _profilelog->write("\0", 1); ++ _pos += len + 1; ++ update_max_symbol_length((int)len); ++} ++ ++#define JVM_DEFINE_CLASS_PATH "_JVM_DefineClass_" ++ ++#define CRC32_BUF_SIZE 1024 ++static char crc32_buf[CRC32_BUF_SIZE]; ++ ++int JitProfileRecorder::compute_crc32(randomAccessFileStream* fileStream) { ++ long old_position = (long)fileStream->tell(); ++ fileStream->seek(HEADER_SIZE, SEEK_SET); ++ int content_size = fileStream->fileSize() - HEADER_SIZE; ++ assert(content_size > 0, "sanity check"); ++ int loops = content_size / CRC32_BUF_SIZE; ++ int partial_chunk_size = content_size % CRC32_BUF_SIZE; ++ int crc = 0; ++ ++ for (int i = 0; i < loops; ++i) { ++ fileStream->read(crc32_buf, CRC32_BUF_SIZE, 1); ++ crc = ClassLoader::crc32(crc, crc32_buf, CRC32_BUF_SIZE); ++ } ++ if (partial_chunk_size > 0) { ++ fileStream->read(crc32_buf, partial_chunk_size, 1); ++ crc = ClassLoader::crc32(crc, crc32_buf, partial_chunk_size); ++ } ++ fileStream->seek(old_position, SEEK_SET); ++ ++ return crc; ++} ++#undef CRC32_BUF_SIZE ++ ++static char header_buf[HEADER_SIZE]; ++void JitProfileRecorder::write_profilecache_header() { ++ assert(_profilelog->is_open(), ""); ++ ++ size_t offset = 0; ++ ++ *(unsigned int*)header_buf = version(); ++ _pos += RECORE_VERSION_WIDTH; ++ offset += RECORE_VERSION_WIDTH; ++ ++ *(unsigned int*)((char*)header_buf + offset) = JPROFILECACHE_MAGIC_NUMBER; ++ _pos += RECORE_MAGIC_WIDTH; ++ offset += RECORE_MAGIC_WIDTH; ++ ++ *(unsigned int*)((char*)header_buf + offset) = RECORE_FILE_DEFAULT_NUMBER; ++ _pos += RECORE_CRC32_WIDTH; ++ offset += RECORE_CRC32_WIDTH; ++ ++ *(unsigned int*)((char*)header_buf + offset) = RECORE_CRC32_DEFAULT_NUMBER; ++ _pos += RECORE_CRC32_WIDTH; ++ offset += RECORE_CRC32_WIDTH; ++ ++ *(unsigned int*)((char*)header_buf + offset) = CompilationProfileCacheAppID; ++ _pos += RECORE_APPID_WIDTH; ++ offset += RECORE_APPID_WIDTH; ++ ++ *(unsigned int*)((char*)header_buf + offset) = 0; ++ _pos += RECORE_MAX_SYMBOL_LENGTH_WIDTH; ++ offset += RECORE_MAX_SYMBOL_LENGTH_WIDTH; ++ ++ *(unsigned int*)((char*)header_buf + offset) = recorded_count(); ++ _pos += RECORD_COUNTS_WIDTH; ++ offset += RECORD_COUNTS_WIDTH; ++ ++ *(jlong*)((char*)header_buf + offset) = os::javaTimeMillis(); ++ _pos += RECORE_TIME_WIDTH; ++ offset += RECORE_TIME_WIDTH; ++ ++ _profilelog->write(header_buf, offset); ++} ++ ++void JitProfileRecorder::write_inited_class() { ++ assert(_profilelog->is_open(), "log file must be opened"); ++ ResourceMark rm; ++ unsigned int begin_position = _pos; ++ unsigned int size_anchor = begin_position; ++ ++ write_u4((u4)JPROFILECACHE_MAGIC_NUMBER); ++ write_u4((u4)class_init_count()); ++ ++ int cnt = 0; ++ const LinkedListNode* node = class_init_list()->head(); ++ while (node != nullptr) { ++ const ClassSymbolEntry* record_entry = node->peek(); ++ char* record_class_name = record_entry->class_name()->as_C_string(); ++ const char* record_class_loader_name = nullptr; ++ if (record_entry->class_loader_name() == nullptr) { ++ record_class_loader_name = "nullptr"; ++ } else { ++ record_class_loader_name = record_entry->class_loader_name()->as_C_string(); ++ } ++ const char* path = nullptr; ++ if (record_entry->path() == nullptr) { ++ path = JVM_DEFINE_CLASS_PATH; ++ } else { ++ path = record_entry->path()->as_C_string(); ++ } ++ write_string(record_class_name, strlen(record_class_name)); ++ write_string(record_class_loader_name, strlen(record_class_loader_name)); ++ write_string(path, strlen(path)); ++ node = node->next(); ++ cnt++; ++ } ++ assert(cnt == class_init_count(), "error happened in profile info record"); ++ unsigned int end_position = _pos; ++ unsigned int section_size = end_position - begin_position; ++ overwrite_u4(section_size, size_anchor); ++} ++ ++void JitProfileRecorder::write_profilecache_record(Method* method, int bci, int order) { ++ ResourceMark rm; ++ unsigned int begin_position = _pos; ++ unsigned int total_size = 0; ++ ConstMethod* const_method = method->constMethod(); ++ MethodCounters* method_counters = method->method_counters(); ++ InstanceKlass* klass = const_method->constants()->pool_holder(); ++ ++ unsigned int size_anchor = begin_position; ++ write_u4((u4)JPROFILECACHE_MAGIC_NUMBER); ++ write_u4((u4)order); ++ ++ // record compilation type ++ u1 compilation_type = bci == -1 ? 0 : 1; ++ write_u1(compilation_type); ++ ++ // record method info ++ record_method_info(method, const_method, bci); ++ ++ // record class info ++ record_class_info(klass); ++ ++ // record method counters ++ if (method_counters != nullptr) { ++ write_u4((u4)method->interpreter_invocation_count()); ++ write_u4((u4)method_counters->interpreter_throwout_count()); ++ write_u4((u4)method_counters->invocation_counter()->raw_counter()); ++ write_u4((u4)method_counters->backedge_counter()->raw_counter()); ++ } else { ++ log_warning(jprofilecache)("[JitProfileCache] WARNING: the method counter is nullptr"); ++ write_u4((u4)0); ++ write_u4((u4)0); ++ write_u4((u4)0); ++ write_u4((u4)0); ++ } ++ ++ // write compile level ++ write_u1((u1)method->highest_comp_level()); ++ ++ write_method_profiledata(method->method_data()); ++ ++ unsigned int end_position = _pos; ++ unsigned int section_size = end_position - begin_position; ++ overwrite_u4(section_size, size_anchor); ++} ++ ++bool JitProfileRecorder::is_recordable_data(ProfileData* dp) { ++ return dp->is_BranchData() || dp->is_MultiBranchData(); ++} ++ ++ArgInfoData * JitProfileRecorder::get_ArgInfoData(MethodData* mdo) { ++ DataLayout* dp = mdo->extra_data_base(); ++ DataLayout* end = mdo->args_data_limit(); ++ for (; dp < end; dp = MethodData::next_extra(dp)) { ++ if (dp->tag() == DataLayout::arg_info_data_tag) ++ return new ArgInfoData(dp); ++ } ++ return nullptr; ++} ++ ++void JitProfileRecorder::write_method_profiledata(MethodData* mdo) { ++ if (mdo == nullptr) { ++ write_u4((u4)0); ++ return; ++ } ++ ProfileData* dp_src = mdo->first_data(); ++ int count = 0; ++ for (; mdo->is_valid(dp_src) ;dp_src = mdo->next_data(dp_src)) { ++ if (is_recordable_data(dp_src)) { ++ count++; ++ } ++ } ++ ArgInfoData * arg_info = get_ArgInfoData(mdo); ++ if (arg_info != nullptr) { count++; } ++ write_u4((u4)count); ++ if (arg_info != nullptr) { ++ write_data_layout((DataLayout*)(arg_info->dp())); ++ log_info(jprofilecache)("Record ArgInfoData of method: %s", ++ mdo->method()->name_and_sig_as_C_string()); ++ } ++ for (dp_src = mdo->first_data(); mdo->is_valid(dp_src) ;dp_src = mdo->next_data(dp_src)) { ++ if (is_recordable_data(dp_src)) { ++ DataLayout* dp = (DataLayout*)dp_src->dp(); ++ write_data_layout(dp); ++ log_info(jprofilecache)("Record ProfileData(Tag:%d) on bytecode(%d) of method: %s", ++ dp->tag(), dp_src->bci(), mdo->method()->name_and_sig_as_C_string()); ++ } ++ } ++} ++ ++void JitProfileRecorder::record_class_info(InstanceKlass* klass) { ++ char* record_class_name = klass->name()->as_C_string(); ++ Symbol* record_path_sym = klass->source_file_path(); ++ const char* record_path = nullptr; ++ if (record_path_sym != nullptr) { ++ record_path = record_path_sym->as_C_string(); ++ } else { ++ record_path = JVM_DEFINE_CLASS_PATH; ++ } ++ oop record_class_loader = klass->class_loader(); ++ const char* loader_name = nullptr; ++ if (record_class_loader != nullptr) { ++ loader_name = record_class_loader->klass()->name()->as_C_string(); ++ } else { ++ loader_name = "nullptr"; ++ } ++ write_string(record_class_name, strlen(record_class_name)); ++ write_string(loader_name, strlen(loader_name)); ++ write_string(record_path, strlen(record_path)); ++ write_u4((u4)klass->bytes_size()); ++ write_u4((u4)klass->crc32()); ++ write_u4((u4)0x00); ++} ++ ++void JitProfileRecorder::record_method_info(Method *method, ConstMethod* const_method, int bci) { ++ char* record_method_name = method->name()->as_C_string(); ++ write_string(record_method_name, strlen(record_method_name)); ++ char* record_method_sig = method->signature()->as_C_string(); ++ write_string(record_method_sig, strlen(record_method_sig)); ++ // first invoke init order ++ write_u4((u4)method->first_invoke_init_order()); ++ // bytecode size ++ write_u4((u4)const_method->code_size()); ++ ++#ifdef _LP64 ++ int record_method_hash = compute_universal_hash((char *)(const_method->code_base()), const_method->code_size()); ++ write_u4((u4)record_method_hash); ++ write_u4((u4)bci); ++ ++#endif ++} ++ ++void JitProfileRecorder::write_profilecache_footer() { ++} ++ ++void JitProfileRecorder::flush_record() { ++ MutexLocker mu(JitProfileRecorder_lock, Mutex::_no_safepoint_check_flag); ++ if (!is_valid() || is_flushed()) { ++ return; ++ } ++ set_flushed(true); ++ ++ // open randomAccessFileStream ++ if (JProfilingCacheAutoArchiveDir != nullptr) { ++ _profilelog = new (mtInternal) randomAccessFileStream(_auto_jpcfile_filepointer); ++ } else { ++ _profilelog = new (mtInternal) randomAccessFileStream(logfile_name(), "wb+"); ++ } ++ if (_profilelog == nullptr || !_profilelog->is_open()) { ++ log_error(jprofilecache)("[JitProfileCache] ERROR : open log file fail! path is %s", logfile_name()); ++ _recorder_state = IS_ERR; ++ return; ++ } ++ ++ // head section ++ write_profilecache_header(); ++ // write class init section ++ write_inited_class(); ++ // write method profile info ++ for (int index = 0; index < dict()->table_size(); index++) { ++ for (JitProfileRecorderEntry* entry = dict()->bucket(index); ++ entry != nullptr; ++ entry = entry->next()) { ++ write_profilecache_record(entry->literal(), entry->bci(), entry->order()); ++ } ++ } ++ // foot section ++ write_profilecache_footer(); ++ ++ // set file size ++ overwrite_u4((u4)_pos, FILE_SIZE_OFFSET); ++ // set max symbol length ++ overwrite_u4((u4)_max_symbol_length, MAX_SYMBOL_LENGTH_OFFSET); ++ // compute and set file's crc32 ++ int crc32 = JitProfileRecorder::compute_crc32(_profilelog); ++ overwrite_u4((u4)crc32, PROFILECACHE_CRC32_OFFSET); ++ ++ _profilelog->flush(); ++ // Auto jprofile makes a temp file to record. When recording is completed, ++ // temp file needs to rename to real jprofile filename and unlock. ++ if (JProfilingCacheAutoArchiveDir != nullptr) { ++ int res = ::rename(logfile_name(), auto_jpcfile_name()); ++ if (res != 0) { ++ delete _profilelog; ++ _profilelog = nullptr; ++ ::unlink(logfile_name()); ++ log_error(jprofilecache)("[JitProfileCache] Autogenerate jprofilecache file failed to rename!"); ++ return; ++ } ++ } ++ ++ // close fd (also unlock file) ++ delete _profilelog; ++ _profilelog = nullptr; ++ ++ log_info(jprofilecache)("[JitProfileCache] Profile information output completed. File: %s", logfile_name()); ++} +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/jitProfileRecord.hpp b/src/hotspot/share/jprofilecache/jitProfileRecord.hpp +new file mode 100644 +index 000000000..1fc80c5d4 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/jitProfileRecord.hpp +@@ -0,0 +1,213 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARED_VM_JPROFILECACHE_JITPROFILERECORD_HPP ++#define SHARED_VM_JPROFILECACHE_JITPROFILERECORD_HPP ++ ++#include "jprofilecache/hashtable.hpp" ++#include "oops/method.hpp" ++#include "oops/methodData.hpp" ++#include "utilities/linkedlist.hpp" ++ ++class JitProfileRecorderEntry : public HashtableEntry { ++public: ++ JitProfileRecorderEntry() { } ++ virtual ~JitProfileRecorderEntry() { } ++ ++ void init() { ++ _bci = InvocationEntryBci; ++ } ++ ++ void set_bci(int bci) { _bci = bci; } ++ int bci() { return _bci; } ++ ++ void set_order(int order) { _order = order; } ++ int order() { return _order; } ++ ++ JitProfileRecorderEntry* next() { ++ return (JitProfileRecorderEntry*)HashtableEntry::next(); ++ } ++ ++private: ++ int _bci; ++ int _order; ++}; ++ ++class JitProfileRecordDictionary : public Hashtable { ++ friend class VMStructs; ++ friend class JitProfileCache; ++public: ++ JitProfileRecordDictionary(unsigned int size); ++ virtual ~JitProfileRecordDictionary(); ++ ++ JitProfileRecorderEntry* add_method(unsigned int method_hash, Method* method, int bci); ++ ++ JitProfileRecorderEntry* find_entry(unsigned int hash, Method* method); ++ ++ void free_entry(JitProfileRecorderEntry* entry); ++ ++ unsigned int count() { return _count; } ++ ++ void print(); ++ ++ JitProfileRecorderEntry* bucket(int i) { ++ return (JitProfileRecorderEntry*)Hashtable::bucket(i); ++ } ++ ++private: ++ unsigned int _count; ++ JitProfileRecorderEntry* new_entry(unsigned int hash, Method* method); ++}; ++ ++class ClassSymbolEntry { ++public: ++ ClassSymbolEntry(Symbol* class_name, Symbol* class_loader_name, Symbol* path) ++ : _class_name(class_name), ++ _class_loader_name(class_loader_name), ++ _class_path(path) { ++ if (_class_name != nullptr) _class_name->increment_refcount(); ++ if (_class_loader_name != nullptr) _class_loader_name->increment_refcount(); ++ if (_class_path != nullptr) _class_path->increment_refcount(); ++ } ++ ++ ClassSymbolEntry() ++ : _class_name(nullptr), ++ _class_loader_name(nullptr), ++ _class_path(nullptr) { ++ } ++ ++ ~ClassSymbolEntry() { ++ if (_class_name != nullptr) _class_name->decrement_refcount(); ++ if (_class_loader_name != nullptr) _class_loader_name->decrement_refcount(); ++ if (_class_path != nullptr) _class_path->decrement_refcount(); ++ } ++ ++ Symbol* class_name() const { return _class_name; } ++ Symbol* class_loader_name() const { return _class_loader_name; } ++ Symbol* path() const { return _class_path; } ++ ++ bool equals(const ClassSymbolEntry& rhs) const { ++ return _class_name == rhs._class_name; ++ } ++ ++private: ++ Symbol* _class_name; ++ Symbol* _class_loader_name; ++ Symbol* _class_path; ++}; ++ ++#define KNUTH_HASH_MULTIPLIER 2654435761UL ++#define ADDR_CHANGE_NUMBER 3 ++#define JITPROFILECACHE_VERSION 0x1 ++ ++class JitProfileRecorder : public CHeapObj { ++public: ++ enum RecorderState { ++ IS_OK = 0, ++ IS_ERR = 1, ++ NOT_INIT = 2 ++ }; ++public: ++ JitProfileRecorder(); ++ virtual ~JitProfileRecorder(); ++ ++ void init(); ++ ++ unsigned int version() { return JITPROFILECACHE_VERSION; } ++ ++ int class_init_count() { return _class_init_order_num + 1; } ++ ++ address current_init_order_addr() { return (address)&_class_init_order_num;} ++ ++ unsigned int is_flushed() { return _flushed; } ++ void set_flushed(bool value) { _flushed = value; } ++ ++ const char* logfile_name() { return _record_file_name; } ++ ++ unsigned int recorded_count() { return _profile_record_dict->count(); } ++ JitProfileRecordDictionary* dict() { return _profile_record_dict; } ++ ++ void set_logfile_name(const char* name); ++ ++ bool is_valid() { return _recorder_state == IS_OK;} ++ ++ LinkedListImpl* ++ class_init_list() { return _class_init_list; } ++ ++ void add_method(Method* method, int method_bci); ++ ++ void flush_record(); ++ ++ int assign_class_init_order(InstanceKlass* klass); ++ ++ unsigned int compute_hash(Method* method) { ++ uint64_t m_addr = (uint64_t)method; ++ return (m_addr >> ADDR_CHANGE_NUMBER) * KNUTH_HASH_MULTIPLIER; // Knuth multiply hash ++ } ++ ++ static int compute_crc32(randomAccessFileStream* fileStream); ++ ++ static const char* auto_jpcfile_name(); ++ static const char* auto_temp_jpcfile_name(); ++ static void set_jpcfile_filepointer(FILE* file); ++ static bool is_recordable_data(ProfileData* dp); ++ static ArgInfoData* get_ArgInfoData(MethodData* mdo); ++ ++private: ++ int _max_symbol_length; ++ unsigned int _pos; ++ volatile int _class_init_order_num; ++ volatile bool _flushed; ++ const char* _record_file_name; ++ static const char* _auto_jpcfile_name; ++ static const char* _auto_temp_jpcfile_name; ++ static FILE* _auto_jpcfile_filepointer; ++ ++ randomAccessFileStream* _profilelog; ++ RecorderState _recorder_state; ++ LinkedListImpl* _class_init_list; ++ LinkedListNode* _init_list_tail_node; ++ JitProfileRecordDictionary* _profile_record_dict; ++ ++private: ++ void write_u1(u1 value); ++ void write_u4(u4 value); ++ void write_data_layout(DataLayout* value); ++ ++ void write_profilecache_header(); ++ void write_inited_class(); ++ void write_profilecache_record(Method* method, int bci, int order); ++ void record_class_info(InstanceKlass* klass); ++ void record_method_info(Method* method, ConstMethod* const_method, int bci); ++ void write_profilecache_footer(); ++ void write_method_profiledata(MethodData* mdo); ++ ++ void write_string(const char* src, size_t len); ++ void overwrite_u4(u4 value, unsigned int offset); ++ ++ void update_max_symbol_length(int len); ++}; ++ ++#endif // SHARED_VM_JPROFILECACHE_JITPROFILERECORD_HPP +\ No newline at end of file +diff --git a/src/hotspot/share/jprofilecache/symbolRegexMatcher.cpp b/src/hotspot/share/jprofilecache/symbolRegexMatcher.cpp +new file mode 100644 +index 000000000..2565547e7 +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/symbolRegexMatcher.cpp +@@ -0,0 +1,95 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ */ ++ ++ #include "precompiled.hpp" ++#include "jprofilecache/symbolRegexMatcher.hpp" ++#include "oops/symbol.hpp" ++#include "utilities/globalDefinitions.hpp" ++#include "utilities/growableArray.hpp" ++ ++#define SYMBOLREGEXMATCHER_INIT_SIZE 4 ++ ++template SymbolRegexMatcher::SymbolRegexMatcher(const char* regexes) ++ : _patterns(new (F) GrowableArray(SYMBOLREGEXMATCHER_INIT_SIZE, F)) { ++ assert(regexes != nullptr, "illegal regexes"); ++ int input_length = (int)strlen(regexes); ++ int current_pattern_length = 0; ++ char* current_pattern_start = (char*)®exes[0]; ++ for (int i = 0; i < input_length + 1; i++) { ++ if (regexes[i] == ',' || regexes[i] == ';' || i == input_length) { ++ add_regex_pattern(current_pattern_start, current_pattern_length); ++ // reset ++ current_pattern_length = -1; ++ current_pattern_start = (char*)®exes[i+1]; ++ } ++ current_pattern_length++; ++ } ++} ++ ++template void SymbolRegexMatcher::add_regex_pattern(const char* s, int len) { ++ if (len == 0) { ++ return; ++ } ++ _patterns->push(SymbolPatternMatcher(s, len)); ++} ++ ++template bool SymbolRegexMatcher::matches(Symbol* symbol) { ++ ResourceMark rm; ++ char* s = symbol->as_C_string(); ++ return matches(s); ++} ++ ++template bool SymbolRegexMatcher::matches(const char* s) { ++ int regex_num = _patterns->length(); ++ for (int i = 0; i < regex_num; i++) { ++ const char* regex = _patterns->at(i).regex_pattern(); ++ int regex_len = _patterns->at(i).length(); ++ if (matches_wildcard_pattern(regex, regex_len, s)) { ++ return true; ++ } ++ } ++ return false; ++} ++ ++template bool SymbolRegexMatcher::matches_wildcard_pattern(const char* wildcard_pattern, int pattern_length, const char* target_string) { ++ int s_len = (int)strlen(target_string); ++ if (s_len < pattern_length - 1) { ++ return false; ++ } ++ for (int i =0; i < pattern_length; i++) { ++ if (wildcard_pattern[i] == '*') { ++ return true; ++ } ++ if (wildcard_pattern[i] == target_string[i]) { ++ continue; ++ } ++ if ((wildcard_pattern[i] == '.' && target_string[i] == '/') ++ || (wildcard_pattern[i] == '/' && target_string[i] == '.')) { ++ continue; ++ } ++ if (wildcard_pattern[i] != '*' && wildcard_pattern[i] != target_string[i]) { ++ return false; ++ } ++ } ++ return (s_len == pattern_length); ++} ++ ++template class SymbolRegexMatcher; ++ +diff --git a/src/hotspot/share/jprofilecache/symbolRegexMatcher.hpp b/src/hotspot/share/jprofilecache/symbolRegexMatcher.hpp +new file mode 100644 +index 000000000..09381c81c +--- /dev/null ++++ b/src/hotspot/share/jprofilecache/symbolRegexMatcher.hpp +@@ -0,0 +1,64 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * Copyright (c) 2019 Alibaba Group Holding Limited. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ */ ++ ++#ifndef SHARED_VM_UTILITIES_SYMBOLREGEXMATCHER_HPP ++#define SHARED_VM_UTILITIES_SYMBOLREGEXMATCHER_HPP ++ ++#include "memory/allocation.hpp" ++#include "utilities/growableArray.hpp" ++ ++class SymbolPatternMatcher { ++public: ++ SymbolPatternMatcher() { } ++ SymbolPatternMatcher(const char* pattern, int length) ++ : _regex_pattern(pattern), ++ _pattern_length(length) { ++ } ++ ++ ~SymbolPatternMatcher() { } ++ ++ int length() { return _pattern_length; } ++ void set_length(int value) { _pattern_length = value; } ++ const char* regex_pattern() { return _regex_pattern; } ++ void set_regex_pattern(char* s) { _regex_pattern = s; } ++ ++private: ++ const char* _regex_pattern; ++ int _pattern_length; ++}; ++ ++template ++class SymbolRegexMatcher : public CHeapObj { ++public: ++ SymbolRegexMatcher(const char* regexes); ++ GrowableArray* patterns() { return _patterns; } ++ ++ bool matches(Symbol* symbol); ++ bool matches(const char* s); ++ ++private: ++ void add_regex_pattern(const char* src, int len); ++ bool matches_wildcard_pattern(const char* wildcard_pattern, int pattern_length, const char* target_string); ++ ++ GrowableArray* _patterns; ++}; ++ ++#endif // SHARED_VM_UTILITIES_SYMBOLREGEXMATCHER_HPP +\ No newline at end of file +diff --git a/src/hotspot/share/libadt/dict.cpp b/src/hotspot/share/libadt/dict.cpp +index 034312501..fe15a7690 100644 +--- a/src/hotspot/share/libadt/dict.cpp ++++ b/src/hotspot/share/libadt/dict.cpp +@@ -247,6 +247,32 @@ int hashstr(const void* t) { + return (int)((sum + xsum[k]) >> 1); // Hash key, un-modulo'd table size + } + ++int compute_universal_hash(const char *input, int len) { ++ char current_char; ++ int32_t k = 0; ++ int32_t sum = 0; ++ int current_position = 0; ++ static short xsum_local[MAXID]; ++ static bool initflag = false; // True after 1st initialization ++ ++ if( !initflag ) { ++ xsum_local[0] = (1<> 1); ++} ++ + //------------------------------hashptr-------------------------------------- + // Slimey cheap hash function; no guaranteed performance. Better than the + // default for pointers, especially on MS-DOS machines. +diff --git a/src/hotspot/share/libadt/dict.hpp b/src/hotspot/share/libadt/dict.hpp +index 031e9a884..1e9f2fe0c 100644 +--- a/src/hotspot/share/libadt/dict.hpp ++++ b/src/hotspot/share/libadt/dict.hpp +@@ -84,6 +84,7 @@ class Dict : public AnyObj { // Dictionary structure + + // Hashing functions + int hashstr(const void* s); // Nice string hash ++int compute_universal_hash(const char *input, int len); // hash string with given length + // Slimey cheap hash function; no guaranteed performance. Better than the + // default for pointers, especially on MS-DOS machines. + int hashptr(const void* key); +diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp +index 4ea67e3ff..6e656de0c 100644 +--- a/src/hotspot/share/logging/logTag.hpp ++++ b/src/hotspot/share/logging/logTag.hpp +@@ -102,6 +102,7 @@ class outputStream; + LOG_TAG(jfr) \ + LOG_TAG(jit) \ + LOG_TAG(jni) \ ++ AARCH64_ONLY(LOG_TAG(jprofilecache)) \ + LOG_TAG(jvmci) \ + LOG_TAG(jvmti) \ + LOG_TAG(lambda) \ +diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp +index e2d865406..5d7d682b6 100644 +--- a/src/hotspot/share/oops/constantPool.cpp ++++ b/src/hotspot/share/oops/constantPool.cpp +@@ -65,11 +65,23 @@ + #include "runtime/signature.hpp" + #include "runtime/vframe.inline.hpp" + #include "utilities/copy.hpp" ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileCache.hpp" ++#endif + + ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) { + Array* tags = MetadataFactory::new_array(loader_data, length, 0, CHECK_NULL); + int size = ConstantPool::size(length); +- return new (loader_data, size, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags); ++ ++#ifdef AARCH64 ++ if (JProfilingCacheCompileAdvance) { ++ Array* jpc_tags = MetadataFactory::new_array(loader_data, length, 0, CHECK_NULL); ++ return new (loader_data, size, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags, jpc_tags); ++ } else ++#endif ++ { ++ return new (loader_data, size, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags); ++ } + } + + void ConstantPool::copy_fields(const ConstantPool* orig) { +@@ -104,6 +116,9 @@ static bool tag_array_is_zero_initialized(Array* tags) { + + ConstantPool::ConstantPool(Array* tags) : + _tags(tags), ++#ifdef AARCH64 ++ _jpc_tags(nullptr), ++#endif + _length(tags->length()) { + + assert(_tags != nullptr, "invariant"); +@@ -114,6 +129,29 @@ ConstantPool::ConstantPool(Array* tags) : + assert(nullptr == _pool_holder, "invariant"); + } + ++#ifdef AARCH64 ++ConstantPool::ConstantPool(Array* tags, Array* jpt_markers): ++ _tags(tags), ++ _jpc_tags(nullptr), ++ _length(tags->length()) { ++ ++ assert(JProfilingCacheCompileAdvance, "must in JProfilingCacheCompileAdvance"); ++ assert(jpt_markers != nullptr, "invariant"); ++ assert(jpt_markers->length() == tags->length(), "invariant"); ++ assert(_tags != nullptr, "invariant"); ++ assert(tags->length() == _length, "invariant"); ++ assert(tag_array_is_zero_initialized(tags), "invariant"); ++ assert(0 == flags(), "invariant"); ++ assert(0 == version(), "invariant"); ++ assert(nullptr == _pool_holder, "invariant"); ++ ++ for (int i = 0; i < jpt_markers->length(); i++) { ++ jpt_markers->at_put(i, _jwp_has_not_been_traversed); ++ } ++ set_jpc_tags(jpt_markers); ++ } ++#endif ++ + void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) { + if (cache() != nullptr) { + MetadataFactory::free_metadata(loader_data, cache()); +@@ -2291,6 +2329,90 @@ void ConstantPool::set_on_stack(const bool value) { + } + } + ++#ifdef AARCH64 ++void ConstantPool::preload_jprofilecache_classes(TRAPS) { ++ constantPoolHandle cp(THREAD, this); ++ guarantee(cp->pool_holder() != nullptr, "must be fully loaded"); ++ if (THREAD->is_eager_class_loading_active()) { ++ return; ++ } ++ THREAD->set_is_eager_class_loading_active(true); ++ Stack s; ++ s.push(cp->pool_holder()); ++ preload_classes_for_jprofilecache(s, THREAD); ++ THREAD->set_is_eager_class_loading_active(false); ++} ++ ++Klass* ConstantPool::resolve_class_at_index(int constant_pool_index, TRAPS) { ++ assert(THREAD->is_Java_thread(), "must be a Java thread"); ++ if (CompilationProfileCacheResolveClassEagerly) { ++ Klass* k = klass_at(constant_pool_index, CHECK_NULL); ++ return k; ++ } else { ++ Handle mirror_handle; ++ constantPoolHandle current_pool(THREAD, this); ++ Symbol* name = nullptr; ++ Handle loader; ++ { ++ if (current_pool->tag_at(constant_pool_index).is_unresolved_klass()) { ++ if (current_pool->tag_at(constant_pool_index).is_unresolved_klass_in_error()) { ++ return nullptr; ++ } else { ++ name = current_pool->klass_name_at(constant_pool_index); ++ loader = Handle(THREAD, current_pool->pool_holder()->class_loader()); ++ } ++ } ++ } ++ oop protection_domain = current_pool->pool_holder()->protection_domain(); ++ Handle protection_domain_handle (THREAD, protection_domain); ++ Klass* loaded_oop = SystemDictionary::resolve_or_fail(name, loader, protection_domain_handle, true, THREAD); ++ return loaded_oop; ++ } ++} ++ ++void ConstantPool::preload_classes_for_jprofilecache(Stack& class_processing_stack, ++ TRAPS) { ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ while (!class_processing_stack.is_empty()) { ++ InstanceKlass* ik = class_processing_stack.pop(); ++ constantPoolHandle current_constant_pool(THREAD, ik->constants()); ++ for (int i = 0; i< current_constant_pool->length(); i++) { ++ bool is_unresolved = false; ++ Symbol* name = nullptr; ++ { ++ if (current_constant_pool->tag_at(i).is_unresolved_klass()) { ++ if (ik->is_shared()) { ++ name = current_constant_pool->klass_name_at(i); ++ is_unresolved = true; ++ } else if (!current_constant_pool->jprofilecache_traversed_at(i)) { ++ name = current_constant_pool->klass_name_at(i); ++ is_unresolved = true; ++ current_constant_pool->jprofilecache_has_traversed_at(i); ++ } ++ } ++ } ++ if (is_unresolved) { ++ if (name != nullptr && !jprofilecache->preloader()->should_preload_class(name)) { ++ continue; ++ } ++ Klass* klass = current_constant_pool->resolve_class_at_index(i, THREAD); ++ if (HAS_PENDING_EXCEPTION) { ++ ResourceMark rm; ++ log_debug(jprofilecache)("[JitProfileCache] WARNING : resolve %s from constant pool failed", ++ name->as_C_string()); ++ if (PENDING_EXCEPTION->is_a(vmClasses::LinkageError_klass())) { ++ CLEAR_PENDING_EXCEPTION; ++ } ++ } ++ if (klass != nullptr && klass->is_instance_klass()) { ++ class_processing_stack.push((InstanceKlass*)klass); ++ } ++ } ++ } ++ } ++} ++#endif ++ + // Printing + + void ConstantPool::print_on(outputStream* st) const { +diff --git a/src/hotspot/share/oops/constantPool.hpp b/src/hotspot/share/oops/constantPool.hpp +index de92b653f..15de8f71b 100644 +--- a/src/hotspot/share/oops/constantPool.hpp ++++ b/src/hotspot/share/oops/constantPool.hpp +@@ -38,6 +38,7 @@ + #include "utilities/bytes.hpp" + #include "utilities/constantTag.hpp" + #include "utilities/resourceHash.hpp" ++#include "utilities/stack.inline.hpp" + + // A ConstantPool is an array containing class constants as described in the + // class file. +@@ -104,6 +105,15 @@ class ConstantPool : public Metadata { + // containing this klass, 0 if not specified. + u2 _source_file_name_index; + ++#ifdef AARCH64 ++ enum { ++ _jwp_has_not_been_traversed = 0, ++ _jwp_has_been_traversed = 1 ++ }; ++ ++ Array* _jpc_tags; // the jpc tag array records the corresponding tag whether is traversed ++#endif ++ + enum { + _has_preresolution = 1, // Flags + _on_stack = 2, +@@ -123,6 +133,9 @@ class ConstantPool : public Metadata { + } _saved; + + void set_tags(Array* tags) { _tags = tags; } ++#ifdef AARCH64 ++ void set_jpc_tags(Array* tags) { _jpc_tags = tags; } ++#endif + void tag_at_put(int which, jbyte t) { tags()->at_put(which, t); } + void release_tag_at_put(int which, jbyte t) { tags()->release_at_put(which, t); } + +@@ -162,6 +175,7 @@ class ConstantPool : public Metadata { + } + + ConstantPool(Array* tags); ++ ConstantPool(Array* tags, Array* jpt_markers); + ConstantPool() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); } + public: + static ConstantPool* allocate(ClassLoaderData* loader_data, int length, TRAPS); +@@ -171,6 +185,20 @@ class ConstantPool : public Metadata { + Array* tags() const { return _tags; } + Array* operands() const { return _operands; } + ++#ifdef AARCH64 ++ Array* jwp_tags() const { return _jpc_tags; } ++ ++ bool jprofilecache_traversed_at(int which) { ++ assert(0 < which && which < jwp_tags()->length(), "out of bound"); ++ return jwp_tags()->at(which) == _jwp_has_been_traversed; ++ } ++ ++ void jprofilecache_has_traversed_at(int which) { ++ assert(which < jwp_tags()->length(), "out of bound"); ++ jwp_tags()->at_put(which, _jwp_has_been_traversed); ++ } ++#endif ++ + bool has_preresolution() const { return (_flags & _has_preresolution) != 0; } + void set_has_preresolution() { + assert(!is_shared(), "should never be called on shared ConstantPools"); +@@ -931,6 +959,15 @@ class ConstantPool : public Metadata { + oop resolved_reference_from_indy(int index) { + return resolved_references()->obj_at(cache()->resolved_indy_entry_at(index)->resolved_references_index()); + } ++ ++#ifdef AARCH64 ++ void preload_jprofilecache_classes(TRAPS); ++ ++ Klass* resolve_class_at_index(int constant_pool_index, TRAPS); ++ ++private: ++ void preload_classes_for_jprofilecache(Stack& class_processing_stack, TRAPS); ++#endif + }; + + #endif // SHARE_OOPS_CONSTANTPOOL_HPP +diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp +index b924af34c..a97a8f6d8 100644 +--- a/src/hotspot/share/oops/instanceKlass.cpp ++++ b/src/hotspot/share/oops/instanceKlass.cpp +@@ -99,6 +99,10 @@ + #if INCLUDE_JFR + #include "jfr/jfrEvents.hpp" + #endif ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" ++#endif + + #ifdef DTRACE_ENABLED + +@@ -626,6 +630,14 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) { + } + set_default_vtable_indices(nullptr); + ++#ifdef AARCH64 ++ if (JProfilingCacheRecording) { ++ if (source_file_path() != nullptr) { ++ source_file_path()->decrement_refcount(); ++ set_source_file_path(nullptr); ++ } ++ } ++#endif + + // This array is in Klass, but remove it with the InstanceKlass since + // this place would be the only caller and it can share memory with transitive +@@ -1137,6 +1149,12 @@ void InstanceKlass::initialize_impl(TRAPS) { + jt->name(), external_name()); + } + } ++ ++#ifdef AARCH64 ++ if (JProfilingCacheRecording) { ++ JitProfileCache::instance()->recorder()->assign_class_init_order(this); ++ } ++#endif + } + + // Step 7 +diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp +index 08fc2b49c..83cba66b7 100644 +--- a/src/hotspot/share/oops/instanceKlass.hpp ++++ b/src/hotspot/share/oops/instanceKlass.hpp +@@ -207,6 +207,23 @@ class InstanceKlass: public Klass { + // it is stored in the instanceklass as a null-terminated UTF-8 string + const char* _source_debug_extension; + ++#ifdef AARCH64 ++ // if not using JProfileCache, default value is 0 ++ unsigned int _crc32; ++ // if not using JProfileCache, default value is 0 ++ unsigned int _class_bytes_size; ++ ++ // JProfilingCacheCompileAdvance eager init support ++ bool _is_jprofilecache_recorded; ++ ++ // source file path, e.g. /home/xxx/liba.jar ++ Symbol* _source_file_path; ++ ++#ifndef PRODUCT ++ int _initialize_order; ++#endif // PRODUCT ++#endif // AARCH64 ++ + // Number of heapOopSize words used by non-static fields in this klass + // (including inherited fields but after header_size()). + int _nonstatic_field_size; +@@ -654,6 +671,26 @@ public: + const char* source_debug_extension() const { return _source_debug_extension; } + void set_source_debug_extension(const char* array, int length); + ++#ifdef AARCH64 ++ // JProfileCache support ++ unsigned int crc32() { return _crc32; } ++ void set_crc32(unsigned int crc32) { _crc32 = crc32; } ++ ++ unsigned int bytes_size() { return _class_bytes_size; } ++ void set_bytes_size(unsigned int size) { _class_bytes_size = size; } ++ ++ bool is_jprofilecache_recorded() { return _is_jprofilecache_recorded; } ++ void set_jprofilecache_recorded(bool value) { _is_jprofilecache_recorded = value; } ++ ++ Symbol* source_file_path() { return _source_file_path; } ++ void set_source_file_path(Symbol* value) { _source_file_path = value; } ++ ++#ifndef PRODUCT ++ unsigned int initialize_order() { return _initialize_order; } ++ void set_initialize_order(int order) { _initialize_order = order; } ++#endif // PRODUCT ++#endif // AARCH64 ++ + // nonstatic oop-map blocks + static int nonstatic_oop_map_size(unsigned int oop_map_count) { + return oop_map_count * OopMapBlock::size_in_words(); +diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp +index 4423cbde8..da9eb8d69 100644 +--- a/src/hotspot/share/oops/method.cpp ++++ b/src/hotspot/share/oops/method.cpp +@@ -76,6 +76,10 @@ + #include "utilities/quickSort.hpp" + #include "utilities/vmError.hpp" + #include "utilities/xmlstream.hpp" ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileCacheFileParser.hpp" ++#include "jprofilecache/jitProfileClassChain.hpp" ++#endif + + // Implementation of Method + +@@ -106,6 +110,15 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, Symbol* name) { + clear_method_counters(); + set_vtable_index(Method::garbage_vtable_index); + ++#ifdef AARCH64 ++ set_first_invoke_init_order(INVALID_FIRST_INVOKE_INIT_ORDER); ++ set_compiled_by_jprofilecache(false); ++ ++#ifndef PRODUCT ++ set_deopted_by_jprofilecache(false); ++#endif // PRODUCT ++#endif // AARCH64 ++ + // Fix and bury in Method* + set_interpreter_entry(nullptr); // sets i2i entry and from_int + set_adapter_entry(nullptr); +diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp +index af5b9b1a4..bb6d34bb8 100644 +--- a/src/hotspot/share/oops/method.hpp ++++ b/src/hotspot/share/oops/method.hpp +@@ -66,6 +66,9 @@ class ConstMethod; + class InlineTableSizes; + class CompiledMethod; + class InterpreterOopMap; ++#ifdef AARCH64 ++class ProfileCacheMethodHold; ++#endif + + class Method : public Metadata { + friend class VMStructs; +@@ -104,6 +107,16 @@ class Method : public Metadata { + CompiledMethod* volatile _code; // Points to the corresponding piece of native code + volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry + ++#ifdef AARCH64 ++ int _first_invoke_init_order; // record class initialize order when this method first been invoked ++ bool _compiled_by_jprofilecache; ++ ++ ProfileCacheMethodHold* _jpc_method_holder; ++#ifndef PRODUCT ++ bool _deopted_by_jprofilecache; ++#endif // PRODUCT ++#endif // AARCH64 ++ + // Constructor + Method(ConstMethod* xconst, AccessFlags access_flags, Symbol* name); + public: +@@ -168,6 +181,26 @@ class Method : public Metadata { + return constMethod()->type_annotations(); + } + ++#ifdef AARCH64 ++ int first_invoke_init_order() { return _first_invoke_init_order; } ++ void set_first_invoke_init_order(int value) { _first_invoke_init_order = value; } ++ ++ bool compiled_by_jprofilecache() { return _compiled_by_jprofilecache; } ++ void set_compiled_by_jprofilecache(bool value) { _compiled_by_jprofilecache = value; } ++ ++ ProfileCacheMethodHold* jpc_method_holder() const { return _jpc_method_holder; } ++ void set_jpc_method_holder(ProfileCacheMethodHold* value) { _jpc_method_holder = value; } ++ ++#ifndef PRODUCT ++ bool deopted_by_jprofilecache() { return _deopted_by_jprofilecache; } ++ void set_deopted_by_jprofilecache(bool value) { _deopted_by_jprofilecache = value; } ++#endif ++ ++ static ByteSize first_invoke_init_order_offset() { ++ return byte_offset_of(Method, _first_invoke_init_order); ++ } ++#endif // AARCH64 ++ + // Helper routine: get klass name + "." + method name + signature as + // C string, for the purpose of providing more useful + // fatal error handling. The string is allocated in resource +diff --git a/src/hotspot/share/oops/methodData.hpp b/src/hotspot/share/oops/methodData.hpp +index 3384df378..40eacd250 100644 +--- a/src/hotspot/share/oops/methodData.hpp ++++ b/src/hotspot/share/oops/methodData.hpp +@@ -1481,6 +1481,9 @@ public: + class BranchData : public JumpData { + friend class VMStructs; + friend class JVMCIVMStructs; ++#ifdef AARCH64 ++ friend class ciMethodData; ++#endif + protected: + enum { + not_taken_off_set = jump_cell_count, +diff --git a/src/hotspot/share/opto/callGenerator.cpp b/src/hotspot/share/opto/callGenerator.cpp +index 0579ce515..3624d849f 100644 +--- a/src/hotspot/share/opto/callGenerator.cpp ++++ b/src/hotspot/share/opto/callGenerator.cpp +@@ -250,7 +250,11 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) { + if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || + ((ImplicitNullCheckThreshold > 0) && caller_md && + (caller_md->trap_count(Deoptimization::Reason_null_check) +- >= (uint)ImplicitNullCheckThreshold))) { ++ >= (uint)ImplicitNullCheckThreshold)) ++#ifdef AARCH64 ++ || (JProfilingCacheCompileAdvance && kit.C->env()->task()->is_jprofilecache_compilation()) ++#endif ++ ) { + // Make an explicit receiver null_check as part of this call. + // Since we share a map with the caller, his JVMS gets adjusted. + receiver = kit.null_check_receiver_before_call(method()); +diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp +index a3300c16c..d90f9699a 100644 +--- a/src/hotspot/share/opto/compile.cpp ++++ b/src/hotspot/share/opto/compile.cpp +@@ -709,6 +709,18 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, + + print_compile_messages(); + ++#ifdef AARCH64 ++ if (JProfilingCacheCompileAdvance) { ++ bool fields_resolved = ci_env->are_method_fields_all_resolved(method()); ++ if (!fields_resolved) { ++ stringStream ss; ++ ss.print("Cannot parse method: fields needed by method are not all resolved"); ++ record_method_not_compilable(ss.as_string()); ++ return; ++ } ++ } ++#endif ++ + _ilt = InlineTree::build_inline_tree_root(); + + // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice +diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp +index 8cad76098..327c5f72c 100644 +--- a/src/hotspot/share/opto/graphKit.cpp ++++ b/src/hotspot/share/opto/graphKit.cpp +@@ -2946,9 +2946,16 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculatin + if (speculating) { + return true; + } +- if (data == nullptr) ++ if (data == nullptr) { ++#ifdef AARCH64 ++ // Compiled by JitProfile ++ if (JProfilingCacheCompileAdvance && this->C->env()->task()->is_jprofilecache_compilation()) { ++ return false; ++ } ++#endif + // Edge case: no mature data. Be optimistic here. + return true; ++ } + // If the profile has not seen a null, assume it won't happen. + assert(java_bc() == Bytecodes::_checkcast || + java_bc() == Bytecodes::_instanceof || +@@ -3077,6 +3084,13 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj, + + // type is null if profiling tells us this object is always null + if (type != nullptr) { ++#ifdef AARCH64 ++ if (JProfilingCacheCompileAdvance) { ++ if (this->C->env()->task()->is_jprofilecache_compilation()) { ++ return obj; ++ } ++ } ++#endif + Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check; + Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check; + +diff --git a/src/hotspot/share/opto/lcm.cpp b/src/hotspot/share/opto/lcm.cpp +index 1d85029d8..648c10474 100644 +--- a/src/hotspot/share/opto/lcm.cpp ++++ b/src/hotspot/share/opto/lcm.cpp +@@ -90,6 +90,11 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo + // mechanism exists (yet) to set the switches at an os_cpu level + if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return; + ++#ifdef AARCH64 ++ // Disable implicit_null_check for jprofile compilation to reduce deoptimization ++ if (JProfilingCacheCompileAdvance && this->C->env()->task()->is_jprofilecache_compilation()) return; ++#endif ++ + // Make sure the ptr-is-null path appears to be uncommon! + float f = block->end()->as_MachIf()->_prob; + if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f; +diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp +index 130730b22..e91cf731c 100644 +--- a/src/hotspot/share/runtime/arguments.cpp ++++ b/src/hotspot/share/runtime/arguments.cpp +@@ -71,6 +71,10 @@ + #if INCLUDE_JFR + #include "jfr/jfr.hpp" + #endif ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileRecord.hpp" ++#include ++#endif + + #include + +@@ -3128,6 +3132,99 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) { + if (UseCompactObjectHeaders && !UseCompressedClassPointers) { + FLAG_SET_DEFAULT(UseCompressedClassPointers, true); + } ++ ++ if (JProfilingCacheAutoArchiveDir != nullptr) { ++ if (FLAG_IS_CMDLINE(JProfilingCacheRecording) || FLAG_IS_CMDLINE(JProfilingCacheCompileAdvance)) { ++ warning("Profile cache file will be dumpped automatically. No need to set JProfilingCacheRecording/JProfilingCacheCompileAdvance"); ++ JProfilingCacheRecording = false; ++ JProfilingCacheCompileAdvance = false; ++ } ++ ++ if (FLAG_IS_CMDLINE(ProfilingCacheFile)) { ++ warning("ProfilingCacheFile will be ignored"); ++ } ++ ++ DIR* dir = os::opendir(JProfilingCacheAutoArchiveDir); ++ if (dir == nullptr) { ++ int err_code = errno; ++ switch (err_code) { ++ case ENOENT: ++ if (::mkdir(JProfilingCacheAutoArchiveDir, 0755) == OS_ERR) { ++ if (errno == EEXIST) break; ++ else { ++ jio_fprintf(defaultStream::error_stream(), ++ "Fail to create JProfilingCacheAutoArchiveDir directory '%s'\n", ++ JProfilingCacheAutoArchiveDir); ++ return JNI_ERR; ++ } ++ } ++ break; ++ case EACCES: ++ jio_fprintf(defaultStream::error_stream(), ++ "Permission denied to open JProfilingCacheAutoArchiveDir directory '%s'\n", ++ JProfilingCacheAutoArchiveDir); ++ return JNI_ERR; ++ case ENOTDIR: ++ jio_fprintf(defaultStream::error_stream(), ++ "JProfilingCacheAutoArchiveDir '%s' is not a directory\n", ++ JProfilingCacheAutoArchiveDir); ++ return JNI_ERR; ++ default: ++ jio_fprintf(defaultStream::error_stream(), ++ "Couldn't open JProfilingCacheAutoArchiveDir directory '%s'\n", ++ JProfilingCacheAutoArchiveDir); ++ return JNI_ERR; ++ } ++ } else { ++ os::closedir(dir); ++ } ++ ++ const char* jpc_path = JitProfileRecorder::auto_jpcfile_name(); ++ const char* jpc_tmp_path = JitProfileRecorder::auto_temp_jpcfile_name(); ++ struct stat st; ++ if (os::stat(jpc_tmp_path, &st) == 0) { // recording jprofile by other JVM ++ //Test temp file is still valid ++ int jpc_tmp_fd = ::open(jpc_tmp_path, O_RDWR, 0644); ++ if (jpc_tmp_fd != -1) { ++ if (flock(jpc_tmp_fd, LOCK_EX | LOCK_NB) == 0) { ++ ::unlink(jpc_tmp_path); ++ flock(jpc_tmp_fd, LOCK_UN); ++ } ++ ::close(jpc_tmp_fd); ++ } ++ } else { ++ if (os::stat(jpc_path, &st) == 0) { // jprofilecache file exists, replay profile data ++ JProfilingCacheCompileAdvance = true; ++ } else { ++ int jpc_fd = ::open(jpc_tmp_path, O_RDWR | O_CREAT, 0644); ++ if (jpc_fd == -1) { ++ jio_fprintf(defaultStream::error_stream(), ++ "Could not open/create jprofile cache file under JProfilingCacheAutoArchiveDir '%s'\n", ++ dir); ++ } else { ++ if (flock(jpc_fd, LOCK_EX | LOCK_NB) == 0) { // lock the jprofile file and prepare to generate ++ FILE* jpc_file = ::fdopen(jpc_fd, "wb+"); ++ if (jpc_file == nullptr) { ++ jio_fprintf(defaultStream::error_stream(), ++ "Could not open/create jprofile cache file under JProfilingCacheAutoArchiveDir '%s'\n", ++ dir); ++ } else { ++ log_info(jprofilecache)("AutoJProfileCache use Record Mode"); ++ JitProfileRecorder::set_jpcfile_filepointer(jpc_file); ++ JProfilingCacheRecording = true; ++ ClassUnloading = false; ++ ExitVMProfileCacheFlush = true; ++ if (NUMANodesRandom != 0) { ++ NUMANodesRandom = 0; ++ } ++ } ++ } else { ++ ::close(jpc_fd); ++ } ++ } ++ } ++ } ++ } + #endif + + return JNI_OK; +diff --git a/src/hotspot/share/runtime/init.cpp b/src/hotspot/share/runtime/init.cpp +index c3f75cdcf..8b0f71df7 100644 +--- a/src/hotspot/share/runtime/init.cpp ++++ b/src/hotspot/share/runtime/init.cpp +@@ -50,6 +50,10 @@ + #if INCLUDE_JVMCI + #include "jvmci/jvmci.hpp" + #endif ++#ifdef AARCH64 ++#include "java.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#endif + + // Initialization done by VM thread in vm_init_globals() + void check_ThreadShadow(); +@@ -134,6 +138,16 @@ jint init_globals() { + } + #endif // LEAK_SANITIZER + ++#ifdef AARCH64 ++ if (JProfilingCacheRecording) { ++ JitProfileCache* jpc = JitProfileCache::create_instance(); ++ jpc->init(); ++ if (!jpc->is_valid()) { ++ vm_exit_during_initialization("[JitProfileCache] ERROR: init fail"); ++ } ++ } ++#endif ++ + AsyncLogWriter::initialize(); + gc_barrier_stubs_init(); // depends on universe_init, must be before interpreter_init + continuations_init(); // must precede continuation stub generation +@@ -143,6 +157,16 @@ jint init_globals() { + InterfaceSupport_init(); + VMRegImpl::set_regName(); // need this before generate_stubs (for printing oop maps). + SharedRuntime::generate_stubs(); ++ ++#ifdef AARCH64 ++ if (JProfilingCacheCompileAdvance) { ++ JitProfileCache* jpc = JitProfileCache::create_instance(); ++ jpc->init(); ++ if (!jpc->is_valid()) { ++ vm_exit_during_initialization("[JitProfileCache] ERROR: init fail"); ++ } ++ } ++#endif + return JNI_OK; + } + +diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp +index 52551446c..429292f38 100644 +--- a/src/hotspot/share/runtime/java.cpp ++++ b/src/hotspot/share/runtime/java.cpp +@@ -100,6 +100,9 @@ + #if INCLUDE_JBOLT + #include "jbolt/jBoltManager.hpp" + #endif ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileCache.hpp" ++#endif + + GrowableArray* collected_profiled_methods; + +@@ -457,6 +460,13 @@ void before_exit(JavaThread* thread, bool halt) { + + // Actual shutdown logic begins here. + ++#ifdef AARCH64 ++ // flush jprofilecache ++ if (JProfilingCacheRecording && ExitVMProfileCacheFlush) { ++ JitProfileCache::instance()->flush_recorder(); ++ } ++#endif ++ + #if INCLUDE_JVMCI + if (EnableJVMCI) { + JVMCI::shutdown(thread); +diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp +index a8ebba660..f82456317 100644 +--- a/src/hotspot/share/runtime/mutexLocker.cpp ++++ b/src/hotspot/share/runtime/mutexLocker.cpp +@@ -38,6 +38,11 @@ + + Mutex* CompiledMethod_lock = nullptr; + Monitor* SystemDictionary_lock = nullptr; ++#ifdef AARCH64 ++Mutex* JitProfileRecorder_lock = nullptr; ++Mutex* ProfileCacheClassChain_lock = nullptr; ++Mutex* JitProfileCachePrint_lock = nullptr; ++#endif + Mutex* InvokeMethodTypeTable_lock = nullptr; + Monitor* InvokeMethodIntrinsicTable_lock = nullptr; + Mutex* SharedDictionary_lock = nullptr; +@@ -210,6 +215,11 @@ void mutex_init() { + MUTEX_DEFN(tty_lock , PaddedMutex , tty); // allow to lock in VM + + MUTEX_DEFN(STS_lock , PaddedMonitor, nosafepoint); ++#ifdef AARCH64 ++ MUTEX_DEFN(JitProfileRecorder_lock , PaddedMutex , nosafepoint); ++ MUTEX_DEFN(ProfileCacheClassChain_lock , PaddedMutex , safepoint); ++ MUTEX_DEFN(JitProfileCachePrint_lock , PaddedMutex , safepoint); ++#endif + + if (UseG1GC) { + MUTEX_DEFN(CGC_lock , PaddedMonitor, nosafepoint); +diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp +index 6b2411322..33adc7e8d 100644 +--- a/src/hotspot/share/runtime/mutexLocker.hpp ++++ b/src/hotspot/share/runtime/mutexLocker.hpp +@@ -33,6 +33,11 @@ + + extern Mutex* CompiledMethod_lock; // a lock used to guard a compiled method and OSR queues + extern Monitor* SystemDictionary_lock; // a lock on the system dictionary ++#ifdef AARCH64 ++extern Mutex* JitProfileRecorder_lock; // a lock on the JProfileCache class JitProfileRecorder ++extern Mutex* ProfileCacheClassChain_lock; // a lock on the JProfileCache preload class chain ++extern Mutex* JitProfileCachePrint_lock; // a lock on the JProfileCache jstack print ++#endif + extern Mutex* InvokeMethodTypeTable_lock; + extern Monitor* InvokeMethodIntrinsicTable_lock; + extern Mutex* SharedDictionary_lock; // a lock on the CDS shared dictionary +diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp +index a576c4886..ac7a344f9 100644 +--- a/src/hotspot/share/runtime/thread.cpp ++++ b/src/hotspot/share/runtime/thread.cpp +@@ -81,6 +81,9 @@ Thread::Thread() { + DEBUG_ONLY(_current_resource_mark = nullptr;) + set_handle_area(new (mtThread) HandleArea(nullptr)); + set_metadata_handles(new (mtClass) GrowableArray(30, mtClass)); ++#ifdef AARCH64 ++ set_is_eager_class_loading_active(false); ++#endif + set_last_handle_mark(nullptr); + DEBUG_ONLY(_missed_ic_stub_refill_verifier = nullptr); + +@@ -90,6 +93,9 @@ Thread::Thread() { + _threads_list_ptr = nullptr; + _nested_threads_hazard_ptr_cnt = 0; + _rcu_counter = 0; ++#ifdef AARCH64 ++ _super_class_resolution_depth = 0; ++#endif + + // the handle mark links itself to last_handle_mark + new HandleMark(this); +diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp +index fabeb218e..9e09c47c6 100644 +--- a/src/hotspot/share/runtime/thread.hpp ++++ b/src/hotspot/share/runtime/thread.hpp +@@ -208,6 +208,20 @@ class Thread: public ThreadShadow { + DEBUG_ONLY(bool _indirectly_suspendible_thread;) + DEBUG_ONLY(bool _indirectly_safepoint_thread;) + ++#ifdef AARCH64 ++ // JProfileCache support ++private: ++ int _super_class_resolution_depth; ++ bool _is_eager_class_loading_active; ++ ++public: ++ void super_class_depth_add() { _super_class_resolution_depth++; } ++ void super_class_depth_dec() { _super_class_resolution_depth--; } ++ bool is_eager_class_loading_active() { return _is_eager_class_loading_active; } ++ void set_is_eager_class_loading_active(bool value) { _is_eager_class_loading_active = value; } ++ bool is_super_class_resolution_active() const { return _super_class_resolution_depth > 0; } ++#endif ++ + public: + // Determines if a heap allocation failure will be retried + // (e.g., by deoptimizing and re-executing in the interpreter). +diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp +index 609248146..e2d60226f 100644 +--- a/src/hotspot/share/runtime/threads.cpp ++++ b/src/hotspot/share/runtime/threads.cpp +@@ -118,6 +118,10 @@ + #include "jbolt/jBoltDcmds.hpp" + #include "jbolt/jBoltManager.hpp" + #endif // INCLUDE_JBOLT ++#ifdef AARCH64 ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileCacheThread.hpp" ++#endif + + // Initialization after module runtime initialization + void universe_post_module_init(); // must happen after call_initPhase2 +@@ -824,6 +828,16 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { + StatSampler::engage(); + if (CheckJNICalls) JniPeriodicChecker::engage(); + ++#ifdef AARCH64 ++ if (JProfilingCacheCompileAdvance) { ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ assert(jprofilecache != nullptr, "sanity check"); ++ jprofilecache->preloader()->jvm_booted_is_done(); ++ JitProfileCacheThread::launch_with_delay(JProfilingCacheDelayLoadTime, THREAD); ++ // register_jprofilecache_dcmds(); ++ } ++#endif ++ + #if INCLUDE_RTM_OPT + RTMLockingCounters::init(); + #endif +diff --git a/src/hotspot/share/utilities/ostream.cpp b/src/hotspot/share/utilities/ostream.cpp +index 49257795e..fa926157f 100644 +--- a/src/hotspot/share/utilities/ostream.cpp ++++ b/src/hotspot/share/utilities/ostream.cpp +@@ -626,6 +626,33 @@ void fileStream::flush() { + } + } + ++randomAccessFileStream::randomAccessFileStream() : fileStream() { } ++ ++randomAccessFileStream::randomAccessFileStream(const char* file_name, const char* open_mode) ++ : fileStream(file_name, open_mode) { } ++ ++void randomAccessFileStream::write(const char* s, size_t len, long pos) { ++ assert(pos <= fileSize(), "pos check"); ++ if (_file != nullptr) { ++ long old_pos = ::ftell(_file); ++ if (old_pos != pos) { ++ int ret = seek(pos, SEEK_SET); ++ assert(ret != -1, "fseek return value check"); ++ } ++ size_t count = fwrite(s, 1, len, _file); ++ if (old_pos != pos) { ++ seek(old_pos, SEEK_SET); ++ } ++ } ++} ++ ++void randomAccessFileStream::write(const char* s, size_t len) { ++ if (_file != nullptr) { ++ // Make an unused local variable to avoid warning from gcc 4.x compiler. ++ size_t count = fwrite(s, 1, len, _file); ++ } ++} ++ + fdStream fdStream::_stdout_stream(1); + fdStream fdStream::_stderr_stream(2); + +diff --git a/src/hotspot/share/utilities/ostream.hpp b/src/hotspot/share/utilities/ostream.hpp +index eaef66f71..96f72e608 100644 +--- a/src/hotspot/share/utilities/ostream.hpp ++++ b/src/hotspot/share/utilities/ostream.hpp +@@ -239,7 +239,7 @@ class fileStream : public outputStream { + fileStream(const char* file_name); + fileStream(const char* file_name, const char* opentype); + fileStream(FILE* file, bool need_close = false) { _file = file; _need_close = need_close; } +- ~fileStream(); ++ virtual ~fileStream(); + bool is_open() const { return _file != nullptr; } + virtual void write(const char* c, size_t len); + size_t read(void *data, size_t size, size_t count) { return _file != nullptr ? ::fread(data, size, count, _file) : 0; } +@@ -250,6 +250,19 @@ class fileStream : public outputStream { + void flush(); + }; + ++class randomAccessFileStream : public fileStream { ++public: ++ randomAccessFileStream(); ++ randomAccessFileStream(const char* file_name, const char* open_mode); ++ explicit randomAccessFileStream(FILE* file) : fileStream(file, true) {} ++ ~randomAccessFileStream() override {} ++ void write(const char* data, size_t length) override; ++ ++ int seek(long offset, int position) { return ::fseek(_file, offset, position); } ++ long tell() { return ::ftell(_file); } ++ virtual void write(const char* data, size_t length, long position); ++}; ++ + // unlike fileStream, fdStream does unbuffered I/O by calling + // open() and write() directly. It is async-safe, but output + // from multiple thread may be mixed together. Used by fatal +-- +2.34.1 + diff --git a/huawei-Add-NUMARandom-feature.patch b/huawei-Add-NUMARandom-feature.patch new file mode 100644 index 0000000000000000000000000000000000000000..18d94ac7a3fbb253f91f2039180f5ff6499ebf44 --- /dev/null +++ b/huawei-Add-NUMARandom-feature.patch @@ -0,0 +1,1442 @@ +Date: Tue, 25 Nov 2025 19:56:31 +0800 +Subject: [PATCH 4/8] Add NUMARandom feature + +--- + make/data/hotspot-symbols/symbols-shared | 1 + + src/hotspot/cpu/aarch64/globals_aarch64.hpp | 13 + + src/hotspot/os/linux/os_linux.cpp | 80 +++- + src/hotspot/os/linux/os_linux.hpp | 132 ++++++- + .../os_cpu/linux_aarch64/os_linux_aarch64.cpp | 368 ++++++++++++++++++ + src/hotspot/share/prims/jni.cpp | 6 + + src/java.base/share/native/include/jni.h | 3 + + src/java.base/share/native/libjli/java.c | 2 + + src/java.base/share/native/libjli/java.h | 3 + + src/java.base/unix/native/libjli/java_md.c | 6 + + .../docker/TestNUMAErrorHandling.java | 113 ++++++ + .../containers/docker/TestNUMANodes.java | 177 +++++++++ + .../docker/TestNUMANodesRandom.java | 203 ++++++++++ + 13 files changed, 1092 insertions(+), 15 deletions(-) + create mode 100644 test/hotspot/jtreg/containers/docker/TestNUMAErrorHandling.java + create mode 100644 test/hotspot/jtreg/containers/docker/TestNUMANodes.java + create mode 100644 test/hotspot/jtreg/containers/docker/TestNUMANodesRandom.java + +diff --git a/make/data/hotspot-symbols/symbols-shared b/make/data/hotspot-symbols/symbols-shared +index c5b13ef1e..6f8b319ee 100644 +--- a/make/data/hotspot-symbols/symbols-shared ++++ b/make/data/hotspot-symbols/symbols-shared +@@ -29,6 +29,7 @@ jio_vfprintf + jio_vsnprintf + JNI_CreateJavaVM + JNI_GetCreatedJavaVMs ++JNI_SetCParam + JNI_GetDefaultJavaVMInitArgs + JVM_IsForeignLinkerSupported + JVM_FindClassFromBootLoader +diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp +index 2f1e1f4e6..77f72226e 100644 +--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp +@@ -129,6 +129,19 @@ define_pd_global(intx, InlineSmallCode, 1000); + range(1, 99) \ + product(ccstr, UseBranchProtection, "none", \ + "Branch Protection to use: none, standard, pac-ret") \ ++ \ ++ product(bool, LogNUMANodes, false, \ ++ "Print NUMANodes") \ ++ \ ++ product(ccstr, NUMANodes, NULL, \ ++ "This parameter provides the same functionality as" \ ++ "'numactl --all -N -m '." \ ++ " can be '0-2', '0,1,2', 'all' and so on.") \ ++ \ ++ product(uintx, NUMANodesRandom, 0, \ ++ "Number of continuous nodes to bind" \ ++ "with the first node randomly chosen." \ ++ "NUMANodesRandom has higher priority than NUMANodes") \ + + // end of ARCH_FLAGS + +diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp +index aaa7927a9..53982c495 100644 +--- a/src/hotspot/os/linux/os_linux.cpp ++++ b/src/hotspot/os/linux/os_linux.cpp +@@ -3212,6 +3212,10 @@ bool os::Linux::libnuma_init() { + libnuma_dlsym(handle, "numa_node_to_cpus"))); + set_numa_node_to_cpus_v2(CAST_TO_FN_PTR(numa_node_to_cpus_v2_func_t, + libnuma_v2_dlsym(handle, "numa_node_to_cpus"))); ++ set_numa_node_of_cpu(CAST_TO_FN_PTR(numa_node_of_cpu_func_t, ++ libnuma_dlsym(handle, "numa_node_of_cpu"))); ++ set_numa_num_configured_cpus(CAST_TO_FN_PTR(numa_num_configured_cpus_func_t, ++ libnuma_dlsym(handle, "numa_num_configured_cpus"))); + set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t, + libnuma_dlsym(handle, "numa_max_node"))); + set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t, +@@ -3223,7 +3227,7 @@ bool os::Linux::libnuma_init() { + set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, + libnuma_dlsym(handle, "numa_interleave_memory"))); + set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t, +- libnuma_v2_dlsym(handle, "numa_interleave_memory"))); ++ libnuma_v2_dlsym(handle, "numa_interleave_memory"))); + set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t, + libnuma_dlsym(handle, "numa_set_bind_policy"))); + set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t, +@@ -3232,12 +3236,38 @@ bool os::Linux::libnuma_init() { + libnuma_dlsym(handle, "numa_distance"))); + set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t, + libnuma_v2_dlsym(handle, "numa_get_membind"))); ++ set_numa_get_mems_allowed(CAST_TO_FN_PTR(numa_get_mems_allowed_func_t, ++ libnuma_dlsym(handle, "numa_get_mems_allowed"))); ++ set_numa_allocate_cpumask(CAST_TO_FN_PTR(numa_allocate_cpumask_func_t, ++ libnuma_dlsym(handle, "numa_allocate_cpumask"))); ++ set_numa_allocate_nodemask(CAST_TO_FN_PTR(numa_allocate_nodemask_func_t, ++ libnuma_dlsym(handle, "numa_allocate_nodemask"))); ++ set_numa_sched_setaffinity(CAST_TO_FN_PTR(numa_sched_setaffinity_func_t, ++ libnuma_dlsym(handle, "numa_sched_setaffinity"))); ++ set_numa_bitmask_nbytes(CAST_TO_FN_PTR(numa_bitmask_nbytes_func_t, ++ libnuma_dlsym(handle, "numa_bitmask_nbytes"))); ++ set_numa_bitmask_setbit(CAST_TO_FN_PTR(numa_bitmask_setbit_func_t, ++ libnuma_dlsym(handle, "numa_bitmask_setbit"))); ++ set_numa_bitmask_clearall(CAST_TO_FN_PTR(numa_bitmask_clearall_func_t, ++ libnuma_dlsym(handle, "numa_bitmask_clearall"))); + set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t, + libnuma_v2_dlsym(handle, "numa_get_interleave_mask"))); + set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t, + libnuma_dlsym(handle, "numa_move_pages"))); + set_numa_set_preferred(CAST_TO_FN_PTR(numa_set_preferred_func_t, + libnuma_dlsym(handle, "numa_set_preferred"))); ++ set_numa_parse_nodestring_all(CAST_TO_FN_PTR(numa_parse_nodestring_all_func_t, ++ libnuma_dlsym(handle, "numa_parse_nodestring_all"))); ++ set_numa_run_on_node_mask(CAST_TO_FN_PTR(numa_run_on_node_mask_func_t, ++ libnuma_v2_dlsym(handle, "numa_run_on_node_mask"))); ++ set_numa_bitmask_equal(CAST_TO_FN_PTR(numa_bitmask_equal_func_t, ++ libnuma_v2_dlsym(handle, "numa_bitmask_equal"))); ++ set_numa_set_membind(CAST_TO_FN_PTR(numa_set_membind_func_t, ++ libnuma_v2_dlsym(handle, "numa_set_membind"))); ++ set_numa_bitmask_free(CAST_TO_FN_PTR(numa_bitmask_free_func_t, ++ libnuma_dlsym(handle, "numa_bitmask_free"))); ++ set_numa_get_run_node_mask(CAST_TO_FN_PTR(numa_get_run_node_mask_func_t, ++ libnuma_v2_dlsym(handle, "numa_get_run_node_mask"))); + + if (numa_available() != -1) { + set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes")); +@@ -3245,6 +3275,8 @@ bool os::Linux::libnuma_init() { + set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr")); + set_numa_interleave_bitmask(_numa_get_interleave_mask()); + set_numa_membind_bitmask(_numa_get_membind()); ++ set_numa_cpunodebind_bitmask(_numa_get_run_node_mask()); ++ AARCH64_ONLY(chose_numa_nodes();) + // Create an index -> node mapping, since nodes are not always consecutive + _nindex_to_node = new (mtInternal) GrowableArray(0, mtInternal); + rebuild_nindex_to_node_map(); +@@ -3413,6 +3445,8 @@ GrowableArray* os::Linux::_nindex_to_node; + os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu; + os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus; + os::Linux::numa_node_to_cpus_v2_func_t os::Linux::_numa_node_to_cpus_v2; ++os::Linux::numa_node_of_cpu_func_t os::Linux::_numa_node_of_cpu; ++os::Linux::numa_num_configured_cpus_func_t os::Linux::_numa_num_configured_cpus; + os::Linux::numa_max_node_func_t os::Linux::_numa_max_node; + os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes; + os::Linux::numa_available_func_t os::Linux::_numa_available; +@@ -3423,15 +3457,29 @@ os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy; + os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset; + os::Linux::numa_distance_func_t os::Linux::_numa_distance; + os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind; ++os::Linux::numa_get_mems_allowed_func_t os::Linux::_numa_get_mems_allowed; ++os::Linux::numa_allocate_cpumask_func_t os::Linux::_numa_allocate_cpumask; ++os::Linux::numa_allocate_nodemask_func_t os::Linux::_numa_allocate_nodemask; ++os::Linux::numa_sched_setaffinity_func_t os::Linux::_numa_sched_setaffinity; ++os::Linux::numa_bitmask_nbytes_func_t os::Linux::_numa_bitmask_nbytes; ++os::Linux::numa_bitmask_setbit_func_t os::Linux::_numa_bitmask_setbit; ++os::Linux::numa_bitmask_clearall_func_t os::Linux::_numa_bitmask_clearall; + os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask; + os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages; + os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred; + os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy; ++os::Linux::numa_parse_nodestring_all_func_t os::Linux::_numa_parse_nodestring_all; ++os::Linux::numa_run_on_node_mask_func_t os::Linux::_numa_run_on_node_mask; ++os::Linux::numa_bitmask_equal_func_t os::Linux::_numa_bitmask_equal; ++os::Linux::numa_set_membind_func_t os::Linux::_numa_set_membind; ++os::Linux::numa_bitmask_free_func_t os::Linux::_numa_bitmask_free; ++os::Linux::numa_get_run_node_mask_func_t os::Linux::_numa_get_run_node_mask; + unsigned long* os::Linux::_numa_all_nodes; + struct bitmask* os::Linux::_numa_all_nodes_ptr; + struct bitmask* os::Linux::_numa_nodes_ptr; + struct bitmask* os::Linux::_numa_interleave_bitmask; + struct bitmask* os::Linux::_numa_membind_bitmask; ++struct bitmask* os::Linux::_numa_cpunodebind_bitmask; + + bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { + uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, +@@ -4648,19 +4696,19 @@ void os::Linux::numa_init() { + // bitmask when externally configured to run on all or fewer nodes. + + if (!Linux::libnuma_init()) { +- FLAG_SET_ERGO(UseNUMA, false); +- FLAG_SET_ERGO(UseNUMAInterleaving, false); // Also depends on libnuma. ++ disable_numa("Failed to initialize libnuma", true); + } else { +- if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) { +- // If there's only one node (they start from 0) or if the process +- // is bound explicitly to a single node using membind, disable NUMA +- UseNUMA = false; ++ Linux::set_configured_numa_policy(Linux::identify_numa_policy()); ++ if (Linux::numa_max_node() < 1) { ++ disable_numa("Only a single NUMA node is available", false); ++ } else if (Linux::is_bound_to_single_mem_node()) { ++ disable_numa("The process is bound to a single NUMA node", true); ++ } else if (Linux::mem_and_cpu_node_mismatch()) { ++ disable_numa("The process memory and cpu node configuration does not match", true); + } else { + LogTarget(Info,os) log; + LogStream ls(log); + +- Linux::set_configured_numa_policy(Linux::identify_numa_policy()); +- + struct bitmask* bmp = Linux::_numa_membind_bitmask; + const char* numa_mode = "membind"; + +@@ -4803,6 +4851,20 @@ void os::Linux::load_ACC_library() { + BOOL_TO_STR(_jboltMerge_judge != nullptr));) + } + ++void os::Linux::disable_numa(const char* reason, bool warning) { ++ if ((UseNUMA && FLAG_IS_CMDLINE(UseNUMA)) || ++ (UseNUMAInterleaving && FLAG_IS_CMDLINE(UseNUMAInterleaving))) { ++ // Only issue a message if the user explicitly asked for NUMA support ++ if (warning) { ++ log_warning(os)("NUMA support disabled: %s", reason); ++ } else { ++ log_info(os)("NUMA support disabled: %s", reason); ++ } ++ } ++ FLAG_SET_ERGO(UseNUMA, false); ++ FLAG_SET_ERGO(UseNUMAInterleaving, false); ++} ++ + #if defined(IA32) && !defined(ZERO) + /* + * Work-around (execute code at a high address) for broken NX emulation using CS limit, +diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp +index 7c1c51a58..aa6c5280e 100644 +--- a/src/hotspot/os/linux/os_linux.hpp ++++ b/src/hotspot/os/linux/os_linux.hpp +@@ -141,6 +141,7 @@ class os::Linux { + static void load_ACC_library_before_ergo(); + static void libpthread_init(); + static void sched_getcpu_init(); ++ static void chose_numa_nodes(); + static bool libnuma_init(); + static void* libnuma_dlsym(void* handle, const char* name); + // libnuma v2 (libnuma_1.2) symbols +@@ -198,9 +199,12 @@ class os::Linux { + private: + static void numa_init(); + ++ static void disable_numa(const char* reason, bool warning); + typedef int (*sched_getcpu_func_t)(void); + typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); + typedef int (*numa_node_to_cpus_v2_func_t)(int node, void *mask); ++ typedef int (*numa_node_of_cpu_func_t)(int cpu); ++ typedef int (*numa_num_configured_cpus_func_t)(void); + typedef int (*numa_max_node_func_t)(void); + typedef int (*numa_num_configured_nodes_func_t)(void); + typedef int (*numa_available_func_t)(void); +@@ -208,9 +212,22 @@ class os::Linux { + typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); + typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask); + typedef struct bitmask* (*numa_get_membind_func_t)(void); ++ typedef struct bitmask* (*numa_get_mems_allowed_func_t)(void); ++ typedef struct bitmask* (*numa_allocate_cpumask_func_t)(void); ++ typedef struct bitmask* (*numa_allocate_nodemask_func_t)(void); ++ typedef int (*numa_sched_setaffinity_func_t)(int pid, struct bitmask* mask); ++ typedef int (*numa_bitmask_nbytes_func_t)(struct bitmask* mask); ++ typedef struct bitmask* (*numa_bitmask_setbit_func_t)(struct bitmask* mask, int len); ++ typedef struct bitmask* (*numa_bitmask_clearall_func_t)(struct bitmask* mask); + typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void); ++ typedef struct bitmask* (*numa_get_run_node_mask_func_t)(void); + typedef long (*numa_move_pages_func_t)(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags); + typedef void (*numa_set_preferred_func_t)(int node); ++ typedef struct bitmask* (*numa_parse_nodestring_all_func_t)(const char*); ++ typedef int (*numa_run_on_node_mask_func_t)(struct bitmask* mask); ++ typedef void (*numa_set_membind_func_t)(struct bitmask* mask); ++ typedef int (*numa_bitmask_equal_func_t)(struct bitmask* mask, struct bitmask* mask1); ++ typedef void (*numa_bitmask_free_func_t)(struct bitmask* mask); + typedef void (*numa_set_bind_policy_func_t)(int policy); + typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n); + typedef int (*numa_distance_func_t)(int node1, int node2); +@@ -248,6 +265,8 @@ class os::Linux { + static sched_getcpu_func_t _sched_getcpu; + static numa_node_to_cpus_func_t _numa_node_to_cpus; + static numa_node_to_cpus_v2_func_t _numa_node_to_cpus_v2; ++ static numa_node_of_cpu_func_t _numa_node_of_cpu; ++ static numa_num_configured_cpus_func_t _numa_num_configured_cpus; + static numa_max_node_func_t _numa_max_node; + static numa_num_configured_nodes_func_t _numa_num_configured_nodes; + static numa_available_func_t _numa_available; +@@ -258,18 +277,34 @@ class os::Linux { + static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset; + static numa_distance_func_t _numa_distance; + static numa_get_membind_func_t _numa_get_membind; ++ static numa_get_run_node_mask_func_t _numa_get_run_node_mask; ++ static numa_get_mems_allowed_func_t _numa_get_mems_allowed; ++ static numa_allocate_cpumask_func_t _numa_allocate_cpumask; ++ static numa_allocate_nodemask_func_t _numa_allocate_nodemask; ++ static numa_sched_setaffinity_func_t _numa_sched_setaffinity; ++ static numa_bitmask_nbytes_func_t _numa_bitmask_nbytes; ++ static numa_bitmask_setbit_func_t _numa_bitmask_setbit; ++ static numa_bitmask_clearall_func_t _numa_bitmask_clearall; + static numa_get_interleave_mask_func_t _numa_get_interleave_mask; + static numa_move_pages_func_t _numa_move_pages; + static numa_set_preferred_func_t _numa_set_preferred; ++ static numa_parse_nodestring_all_func_t _numa_parse_nodestring_all; ++ static numa_run_on_node_mask_func_t _numa_run_on_node_mask; ++ static numa_bitmask_equal_func_t _numa_bitmask_equal; ++ static numa_set_membind_func_t _numa_set_membind; ++ static numa_bitmask_free_func_t _numa_bitmask_free; + static unsigned long* _numa_all_nodes; + static struct bitmask* _numa_all_nodes_ptr; + static struct bitmask* _numa_nodes_ptr; + static struct bitmask* _numa_interleave_bitmask; + static struct bitmask* _numa_membind_bitmask; ++ static struct bitmask* _numa_cpunodebind_bitmask; + + static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; } + static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; } + static void set_numa_node_to_cpus_v2(numa_node_to_cpus_v2_func_t func) { _numa_node_to_cpus_v2 = func; } ++ static void set_numa_node_of_cpu(numa_node_of_cpu_func_t func) { _numa_node_of_cpu = func; } ++ static void set_numa_num_configured_cpus(numa_num_configured_cpus_func_t func) { _numa_num_configured_cpus = func; } + static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; } + static void set_numa_num_configured_nodes(numa_num_configured_nodes_func_t func) { _numa_num_configured_nodes = func; } + static void set_numa_available(numa_available_func_t func) { _numa_available = func; } +@@ -280,14 +315,28 @@ class os::Linux { + static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; } + static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; } + static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; } ++ static void set_numa_get_run_node_mask(numa_get_run_node_mask_func_t func) { _numa_get_run_node_mask = func; } ++ static void set_numa_get_mems_allowed(numa_get_mems_allowed_func_t func) { _numa_get_mems_allowed = func; } ++ static void set_numa_allocate_cpumask(numa_allocate_cpumask_func_t func) { _numa_allocate_cpumask = func; } ++ static void set_numa_allocate_nodemask(numa_allocate_nodemask_func_t func) { _numa_allocate_nodemask = func; } ++ static void set_numa_sched_setaffinity(numa_sched_setaffinity_func_t func) { _numa_sched_setaffinity = func; } ++ static void set_numa_bitmask_nbytes(numa_bitmask_nbytes_func_t func) { _numa_bitmask_nbytes = func; } ++ static void set_numa_bitmask_setbit(numa_bitmask_setbit_func_t func) { _numa_bitmask_setbit = func; } ++ static void set_numa_bitmask_clearall(numa_bitmask_clearall_func_t func) { _numa_bitmask_clearall = func; } + static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; } + static void set_numa_move_pages(numa_move_pages_func_t func) { _numa_move_pages = func; } + static void set_numa_set_preferred(numa_set_preferred_func_t func) { _numa_set_preferred = func; } ++ static void set_numa_parse_nodestring_all(numa_parse_nodestring_all_func_t func) { _numa_parse_nodestring_all = func; } ++ static void set_numa_run_on_node_mask(numa_run_on_node_mask_func_t func) { _numa_run_on_node_mask = func; } ++ static void set_numa_bitmask_equal(numa_bitmask_equal_func_t func) { _numa_bitmask_equal = func; } ++ static void set_numa_set_membind(numa_set_membind_func_t func) { _numa_set_membind = func; } ++ static void set_numa_bitmask_free(numa_bitmask_free_func_t func) { _numa_bitmask_free = func; } + static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; } + static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == nullptr ? nullptr : *ptr); } + static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == nullptr ? nullptr : *ptr); } + static void set_numa_interleave_bitmask(struct bitmask* ptr) { _numa_interleave_bitmask = ptr ; } + static void set_numa_membind_bitmask(struct bitmask* ptr) { _numa_membind_bitmask = ptr ; } ++ static void set_numa_cpunodebind_bitmask(struct bitmask* ptr) { _numa_cpunodebind_bitmask = ptr ; } + static int sched_getcpu_syscall(void); + + enum NumaAllocationPolicy{ +@@ -393,21 +442,26 @@ class os::Linux { + } + return false; + } +- // Check if bound to only one numa node. +- // Returns true if bound to a single numa node, otherwise returns false. +- static bool is_bound_to_single_node() { ++ // Check if memory is bound to only one numa node. ++ // Returns true if memory is bound to a single numa node, otherwise returns false. ++ static bool is_bound_to_single_mem_node() { + int nodes = 0; + unsigned int node = 0; + unsigned int highest_node_number = 0; + +- if (_numa_membind_bitmask != nullptr && _numa_max_node != nullptr && _numa_bitmask_isbitset != nullptr) { ++ struct bitmask* mem_nodes_bitmask = Linux::_numa_membind_bitmask; ++ if (Linux::is_running_in_interleave_mode()) { ++ mem_nodes_bitmask = Linux::_numa_interleave_bitmask; ++ } ++ ++ if (mem_nodes_bitmask != nullptr && _numa_max_node != nullptr && _numa_bitmask_isbitset != nullptr) { + highest_node_number = _numa_max_node(); + } else { + return false; + } + + for (node = 0; node <= highest_node_number; node++) { +- if (_numa_bitmask_isbitset(_numa_membind_bitmask, node)) { ++ if (_numa_bitmask_isbitset(mem_nodes_bitmask, node)) { + nodes++; + } + } +@@ -418,6 +472,20 @@ class os::Linux { + return false; + } + } ++ // Check if cpu and memory nodes are aligned, returns true if nodes misalign ++ static bool mem_and_cpu_node_mismatch() { ++ NOT_AARCH64(return false;) ++ struct bitmask* mem_nodes_bitmask = Linux::_numa_membind_bitmask; ++ if (Linux::is_running_in_interleave_mode()) { ++ mem_nodes_bitmask = Linux::_numa_interleave_bitmask; ++ } ++ ++ if (mem_nodes_bitmask == nullptr || Linux::_numa_cpunodebind_bitmask == nullptr) { ++ return false; ++ } ++ ++ return !_numa_bitmask_equal(mem_nodes_bitmask, Linux::_numa_cpunodebind_bitmask); ++ } + + static const GrowableArray* numa_nindex_to_node() { + return _nindex_to_node; +@@ -508,7 +576,7 @@ class os::Linux { + } + return _heap_vector_add(val, heap_vector, _inserted); + } +- ++ + static void* heap_vector_get_next(void* heap_vector, void* heap_vector_node, int &_cnt, void** &_items) { + if(_heap_vector_get_next == NULL) { + return NULL; +@@ -579,6 +647,58 @@ class os::Linux { + } + return result; + } ++ ++ static bitmask* numa_parse_nodestring_all(const char* s) { ++ return _numa_parse_nodestring_all != NULL ? _numa_parse_nodestring_all(s) : NULL; ++ } ++ ++ static int numa_num_configured_cpus() { ++ return _numa_num_configured_cpus != NULL ? _numa_num_configured_cpus() : 0; ++ } ++ ++ static bitmask* numa_allocate_cpumask() { ++ return _numa_allocate_cpumask != NULL ? _numa_allocate_cpumask() : NULL; ++ } ++ ++ static bitmask* numa_allocate_nodemask() { ++ return _numa_allocate_nodemask != NULL ? _numa_allocate_nodemask() : NULL; ++ } ++ ++ static int numa_sched_setaffinity(int pid, struct bitmask* mask) { ++ return _numa_sched_setaffinity != NULL ? _numa_sched_setaffinity(pid, mask) : -1; ++ } ++ ++ static int numa_bitmask_nbytes(struct bitmask* mask) { ++ return _numa_bitmask_nbytes != NULL ? _numa_bitmask_nbytes(mask) : 0; ++ } ++ ++ static bitmask* numa_bitmask_setbit(struct bitmask* mask, int len) { ++ return _numa_bitmask_setbit != NULL ? _numa_bitmask_setbit(mask, len) : NULL; ++ } ++ ++ static bitmask* numa_bitmask_clearall(struct bitmask* mask) { ++ return _numa_bitmask_clearall != NULL ? _numa_bitmask_clearall(mask) : NULL; ++ } ++ ++ static int numa_run_on_node_mask(bitmask* bitmask) { ++ return _numa_run_on_node_mask != NULL ? _numa_run_on_node_mask(bitmask) : -1; ++ } ++ ++ static int numa_bitmask_equal(bitmask* bitmask, struct bitmask* bitmask1) { ++ return _numa_bitmask_equal != NULL ? _numa_bitmask_equal(bitmask, bitmask1) : 1; ++ } ++ ++ static void numa_set_membind(bitmask* bitmask) { ++ if (_numa_set_membind != NULL) { ++ _numa_set_membind(bitmask); ++ } ++ } ++ ++ static void numa_bitmask_free(bitmask* bitmask) { ++ if (_numa_bitmask_free != NULL) { ++ _numa_bitmask_free(bitmask); ++ } ++ } + }; + + #endif // OS_LINUX_OS_LINUX_HPP +diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp +index 2be1f63dd..206f4d652 100644 +--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp ++++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp +@@ -390,6 +390,374 @@ int os::extra_bang_size_in_bytes() { + static inline void atomic_copy64(const volatile void *src, volatile void *dst) { + *(jlong *) dst = *(const jlong *) src; + } ++extern char** argv_for_execvp; ++ ++void os::Linux::chose_numa_nodes() { ++ const char* numa_chosen_env = getenv("_JVM_NUMA_BINDING_DONE"); ++ if (numa_chosen_env != NULL && strcmp(numa_chosen_env, "1") == 0) { ++ if (LogNUMANodes) { ++ warning("NUMA binding already done (detected via environment variable), skipping"); ++ } ++ return; ++ } ++ ++ if (NUMANodes == NULL && NUMANodesRandom == 0) { ++ return; ++ } ++ ++ const int MAX_DISTANCE = 999999; ++ ++ int nodes_num = Linux::numa_max_node() + 1; ++ const int MAXNODE = 100; ++ if (nodes_num <= 0 || nodes_num >= MAXNODE) { ++ warning("Invalid NUMA nodes number: %d", nodes_num); ++ return; ++ } ++ ++ // Parse the NUMANodes ++ bool user_specified_nodes[MAXNODE] = {false}; ++ bool has_user_constraint = false; ++ ++ if (NUMANodes != NULL) { ++ if (LogNUMANodes) { ++ warning("NUMANodes parameter specified: %s", NUMANodes); ++ } ++ ++ // Parse the nodestring ++ bitmask* user_nodes_mask = os::Linux::numa_parse_nodestring_all(NUMANodes); ++ if (user_nodes_mask != NULL) { ++ has_user_constraint = true; ++ for (int i = 0; i < nodes_num; i++) { ++ if (_numa_bitmask_isbitset(user_nodes_mask, i)) { ++ user_specified_nodes[i] = true; ++ if (LogNUMANodes) { ++ warning("User specified node %d is allowed", i); ++ } ++ } ++ } ++ os::Linux::numa_bitmask_free(user_nodes_mask); ++ } else { ++ if (LogNUMANodes) { ++ warning("Failed to parse NUMANodes: %s", NUMANodes); ++ warning("Skip NUMANodes parameter."); ++ } ++ } ++ } ++ ++ // Save the original CPU mask specified by system(numactl) ++ cpu_set_t original_cpu_mask; ++ if (sched_getaffinity(0, sizeof(cpu_set_t), &original_cpu_mask) == -1) { ++ perror("sched_getaffinity"); ++ return; ++ } ++ ++ // Within the limit, count available cpus each NUMA node has ++ int cpu_count_per_node[MAXNODE] = {0}; ++ bool node_has_cpu[MAXNODE] = {false}; ++ ++ int cpus_num = os::Linux::numa_num_configured_cpus(); ++ ++ // Traverse all cpus and count which cpus each Node has ++ for (int i = 0; i < cpus_num; i++) { ++ if (CPU_ISSET(i, &original_cpu_mask)) { ++ int node_id = _numa_node_of_cpu(i); ++ if (node_id == -1) { ++ if (LogNUMANodes) { ++ warning("Failed to get NUMA node for CPU %d", i); ++ } ++ } else if (node_id >= 0 && node_id < MAXNODE) { ++ node_has_cpu[node_id] = true; ++ cpu_count_per_node[node_id]++; ++ if (LogNUMANodes) { ++ warning("CPU %d belongs to Node %d", i, node_id); ++ } ++ } ++ } ++ } ++ ++ // Build a list of available nodes ++ int available_nodes[MAXNODE]; ++ int available_nodes_count = 0; ++ for (int i = 0; i < nodes_num; i++) { ++ if (node_has_cpu[i]) { ++ // Set the NUMANodes ++ if (has_user_constraint && !user_specified_nodes[i]) { ++ if (LogNUMANodes) { ++ warning("Node %d: has CPUs but excluded by NUMANodes parameter", i); ++ } ++ continue; ++ } ++ ++ available_nodes[available_nodes_count++] = i; ++ if (LogNUMANodes) { ++ warning("Node %d: available, CPU count = %d", i, cpu_count_per_node[i]); ++ } ++ } else if (has_user_constraint && user_specified_nodes[i]) { ++ // Specified this node, but no available CPU under the system(numactl) limit ++ if (LogNUMANodes) { ++ warning("Node %d: specified in NUMANodes but has no available CPUs in numactl range", i); ++ } ++ } ++ } ++ ++ if (available_nodes_count == 0) { ++ if (LogNUMANodes) { ++ if (has_user_constraint) { ++ warning("No available NUMA nodes found in NUMANodes range: %s", NUMANodes); ++ } else { ++ warning("No available NUMA nodes found"); ++ } ++ } ++ return; ++ } ++ ++ if (LogNUMANodes) { ++ warning("Total available nodes (after NUMANodes filter): %d", available_nodes_count); ++ } ++ ++ // Check the memory binding permissions ++ bitmask * mem_allowed = _numa_get_mems_allowed(); ++ if (!mem_allowed) { ++ if (LogNUMANodes) { ++ warning("Failed to get mems allowed"); ++ } ++ return; ++ } ++ ++ // Determine the number of nodes to be selected ++ int nodes_to_select = NUMANodesRandom; ++ ++ // If NUMANodesRandom is not set or the value is invalid, use all available nodes ++ if (nodes_to_select <= 0 || nodes_to_select > available_nodes_count) { ++ nodes_to_select = available_nodes_count; ++ } ++ ++ if (LogNUMANodes) { ++ if (has_user_constraint) { ++ warning("NUMANodes filter applied: %s, available nodes after filter: %d", ++ NUMANodes, available_nodes_count); ++ } ++ warning("NUMANodesRandom=%lu, will select %d nodes from %d available nodes", ++ NUMANodesRandom, nodes_to_select, available_nodes_count); ++ } ++ ++ // Use PID as the random seed ++ int pid = getpid(); ++ int start_index = pid % available_nodes_count; ++ int start_node = available_nodes[start_index]; ++ ++ // Check whether the starting node can be bound to memory ++ if (!(_numa_bitmask_isbitset(mem_allowed, start_node) && ++ _numa_bitmask_isbitset(_numa_membind_bitmask, start_node))) { ++ // If the starting node is unavailable, find another one ++ start_node = -1; ++ for (int i = 0; i < available_nodes_count; i++) { ++ int node_id = available_nodes[i]; ++ if (_numa_bitmask_isbitset(mem_allowed, node_id) && ++ _numa_bitmask_isbitset(_numa_membind_bitmask, node_id)) { ++ start_node = node_id; ++ break; ++ } ++ } ++ if (start_node == -1) { ++ os::Linux::numa_bitmask_free(mem_allowed); ++ if (LogNUMANodes) { ++ warning("No bindable nodes found!"); ++ } ++ return; ++ } ++ } ++ ++ if (LogNUMANodes) { ++ warning("Start node: %d", start_node); ++ } ++ ++ // Select nodes: Choose the nearest nodes_to_select node based on distance ++ int selected_nodes[MAXNODE]; ++ int selected_count = 0; ++ ++ // First node is the starting node ++ selected_nodes[selected_count++] = start_node; ++ ++ if (nodes_to_select == 1) { ++ if (LogNUMANodes) { ++ warning("Selected node %d", start_node); ++ } ++ } else { ++ // Select by distance ++ bool node_selected[MAXNODE] = {false}; ++ node_selected[start_node] = true; ++ ++ // Select the nearest nodes ++ while (selected_count < nodes_to_select) { ++ int nearest_node = -1; ++ int min_total_distance = MAX_DISTANCE; ++ ++ // Calculate the total distance from it to all selected nodes ++ for (int i = 0; i < available_nodes_count; i++) { ++ int candidate = available_nodes[i]; ++ ++ // Skip the selected nodes and the unbound nodes ++ if (node_selected[candidate]) continue; ++ if (!(_numa_bitmask_isbitset(mem_allowed, candidate) && ++ _numa_bitmask_isbitset(_numa_membind_bitmask, candidate))) { ++ continue; ++ } ++ ++ // Calculate the total distance from the candidate to all selected nodes ++ int total_distance = 0; ++ for (int j = 0; j < selected_count; j++) { ++ int selected = selected_nodes[j]; ++ int dist = _numa_distance(candidate, selected); ++ total_distance += dist; ++ if (LogNUMANodes) { ++ warning("Distance from node %d to node %d: %d", candidate, selected, dist); ++ } ++ } ++ ++ // Find the node with the smallest distance ++ if (total_distance < min_total_distance) { ++ min_total_distance = total_distance; ++ nearest_node = candidate; ++ } ++ } ++ ++ // No more available nodes are found, exit ++ if (nearest_node == -1) { ++ if (LogNUMANodes) { ++ warning("No more bindable nodes available, selected %d nodes", selected_count); ++ } ++ break; ++ } ++ ++ // Select the nearest node ++ selected_nodes[selected_count++] = nearest_node; ++ node_selected[nearest_node] = true; ++ ++ if (LogNUMANodes) { ++ warning("Selected node %d (total distance: %d)", nearest_node, min_total_distance); ++ } ++ } ++ } ++ ++ os::Linux::numa_bitmask_free(mem_allowed); ++ ++ if (selected_count == 0) { ++ if (LogNUMANodes) { ++ warning("Cannot find proper nodes to bind!"); ++ } ++ return; ++ } ++ ++ if (LogNUMANodes) { ++ warning("Final selected nodes count: %d", selected_count); ++ } ++ ++ // New CPU mask: only include the CPU on the selected node ++ cpu_set_t new_cpu_mask; ++ CPU_ZERO(&new_cpu_mask); ++ ++ bitmask * node_cpumask = os::Linux::numa_allocate_cpumask(); ++ if (!node_cpumask) { ++ if (LogNUMANodes) { ++ warning("Cannot allocate bitmask for cpus!"); ++ } ++ return; ++ } ++ ++ // Only retain the CPU on the selected node ++ for (int i = 0; i < selected_count; i++) { ++ int node_id = selected_nodes[i]; ++ ++ if (_numa_node_to_cpus_v2(node_id, node_cpumask) != 0) { ++ if (LogNUMANodes) { ++ warning("Failed to get CPUs for node %d", node_id); ++ } ++ continue; ++ } ++ ++ // Key: Only add cpus that meet both of the following conditions: ++ // 1. Belongs to the selected Node ++ // 2. Within the original limits of system(numactl) ++ for (int cpu = 0; cpu < cpus_num; cpu++) { ++ if (_numa_bitmask_isbitset(node_cpumask, cpu) && ++ CPU_ISSET(cpu, &original_cpu_mask)) { ++ CPU_SET(cpu, &new_cpu_mask); ++ if (LogNUMANodes) { ++ warning("Keeping CPU %d from node %d", cpu, node_id); ++ } ++ } ++ } ++ } ++ ++ os::Linux::numa_bitmask_free(node_cpumask); ++ ++ // Set the bit mask ++ char buf[256] = {0}; ++ int buf_pos = 0; ++ ++ for (int i = 0; i < selected_count; i++) { ++ if (i > 0) { ++ buf_pos += snprintf(buf + buf_pos, sizeof(buf) - buf_pos, ","); ++ } ++ buf_pos += snprintf(buf + buf_pos, sizeof(buf) - buf_pos, "%d", selected_nodes[i]); ++ } ++ ++ bitmask* mask = numa_allocate_nodemask(); ++ numa_bitmask_clearall(mask); ++ for (int i = 0; i < selected_count; i++) { ++ numa_bitmask_setbit(mask, selected_nodes[i]); // Set the bit directly ++ } ++ ++ if (os::Linux::numa_bitmask_equal(mask, os::Linux::_numa_membind_bitmask)) { ++ os::Linux::numa_bitmask_free(mask); ++ if (LogNUMANodes) { ++ warning("Mempolicy is not changed, param: %s", buf); ++ } ++ return; ++ } ++ ++ // Set the NUMA memory binding ++ errno = 0; ++ os::Linux::numa_run_on_node_mask(mask); ++ if (errno) { ++ perror("numa_run_on_node_mask"); ++ } ++ ++ errno = 0; ++ os::Linux::numa_set_membind(mask); ++ int errtmp = errno; ++ os::Linux::numa_bitmask_free(mask); ++ if (errtmp) { ++ perror("numa_set_membind"); ++ } ++ ++ // New CPU affinity ++ if (sched_setaffinity(0, sizeof(cpu_set_t), &new_cpu_mask) == -1) { ++ perror("sched_setaffinity"); ++ if (LogNUMANodes) { ++ warning("Failed to set CPU affinity"); ++ } ++ return; ++ } ++ ++ if (LogNUMANodes) { ++ warning("Successfully bound to %d node(s): %s", selected_count, buf); ++ warning("Final available CPUs:"); ++ for (int cpu = 0; cpu < cpus_num; cpu++) { ++ if (CPU_ISSET(cpu, &new_cpu_mask)) { ++ warning(" CPU %d", cpu); ++ } ++ } ++ } ++ ++ setenv("_JVM_NUMA_BINDING_DONE", "1", 1); ++ ++ execvp(*argv_for_execvp, argv_for_execvp); ++ ++ // If execvp fails, perror will be executed ++ perror("execvp failed"); ++} + + extern "C" { + int SpinPause() { +diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp +index 0d477d67e..bcf3808bb 100644 +--- a/src/hotspot/share/prims/jni.cpp ++++ b/src/hotspot/share/prims/jni.cpp +@@ -3523,6 +3523,12 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) { + DT_RETURN_MARK_DECL(CreateJavaVM, jint + , HOTSPOT_JNI_CREATEJAVAVM_RETURN(_ret_ref)); + ++const char** argv_for_execvp; ++ ++_JNI_IMPORT_OR_EXPORT_ void JNICALL JNI_SetCParam(char** raw_argv) { ++ argv_for_execvp = (const char**)raw_argv; ++} ++ + static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { + HOTSPOT_JNI_CREATEJAVAVM_ENTRY((void **) vm, penv, args); + +diff --git a/src/java.base/share/native/include/jni.h b/src/java.base/share/native/include/jni.h +index c85da1bc6..7dde183f2 100644 +--- a/src/java.base/share/native/include/jni.h ++++ b/src/java.base/share/native/include/jni.h +@@ -1973,6 +1973,9 @@ JNI_GetDefaultJavaVMInitArgs(void *args); + _JNI_IMPORT_OR_EXPORT_ jint JNICALL + JNI_CreateJavaVM(JavaVM **pvm, void **penv, void *args); + ++_JNI_IMPORT_OR_EXPORT_ void JNICALL ++JNI_SetCParam(char **raw_argv); ++ + _JNI_IMPORT_OR_EXPORT_ jint JNICALL + JNI_GetCreatedJavaVMs(JavaVM **, jsize, jsize *); + +diff --git a/src/java.base/share/native/libjli/java.c b/src/java.base/share/native/libjli/java.c +index cc3af9ce1..ecae286ed 100644 +--- a/src/java.base/share/native/libjli/java.c ++++ b/src/java.base/share/native/libjli/java.c +@@ -287,6 +287,7 @@ JLI_Launch(int argc, char ** argv, /* main argc, argv */ + + ifn.CreateJavaVM = 0; + ifn.GetDefaultJavaVMInitArgs = 0; ++ ifn.raw_argv = argv; + + if (JLI_IsTraceLauncher()) { + start = CurrentTimeMicros(); +@@ -1510,6 +1511,7 @@ InitializeJVM(JavaVM **pvm, JNIEnv **penv, InvocationFunctions *ifn) + i, args.options[i].optionString); + } + ++ ifn->SetCParam(ifn->raw_argv); + r = ifn->CreateJavaVM(pvm, (void **)penv, &args); + JLI_MemFree(options); + return r == JNI_OK; +diff --git a/src/java.base/share/native/libjli/java.h b/src/java.base/share/native/libjli/java.h +index f768b58a0..3a583a022 100644 +--- a/src/java.base/share/native/libjli/java.h ++++ b/src/java.base/share/native/libjli/java.h +@@ -77,13 +77,16 @@ + * Pointers to the needed JNI invocation API, initialized by LoadJavaVM. + */ + typedef jint (JNICALL *CreateJavaVM_t)(JavaVM **pvm, void **env, void *args); ++typedef void (JNICALL *SetCParam_t)(char** raw_argv); + typedef jint (JNICALL *GetDefaultJavaVMInitArgs_t)(void *args); + typedef jint (JNICALL *GetCreatedJavaVMs_t)(JavaVM **vmBuf, jsize bufLen, jsize *nVMs); + + typedef struct { + CreateJavaVM_t CreateJavaVM; ++ SetCParam_t SetCParam; + GetDefaultJavaVMInitArgs_t GetDefaultJavaVMInitArgs; + GetCreatedJavaVMs_t GetCreatedJavaVMs; ++ char** raw_argv; + } InvocationFunctions; + + JNIEXPORT int JNICALL +diff --git a/src/java.base/unix/native/libjli/java_md.c b/src/java.base/unix/native/libjli/java_md.c +index d2c04d8c2..abfe49b3b 100644 +--- a/src/java.base/unix/native/libjli/java_md.c ++++ b/src/java.base/unix/native/libjli/java_md.c +@@ -542,6 +542,12 @@ LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn) + return JNI_FALSE; + } + ++ ifn->SetCParam = (SetCParam_t) ++ dlsym(libjvm, "JNI_SetCParam"); ++ if (ifn->SetCParam == NULL) { ++ JLI_ReportErrorMessage(DLL_ERROR2, jvmpath, dlerror()); ++ return JNI_FALSE; ++ } + ifn->GetDefaultJavaVMInitArgs = (GetDefaultJavaVMInitArgs_t) + dlsym(libjvm, "JNI_GetDefaultJavaVMInitArgs"); + if (ifn->GetDefaultJavaVMInitArgs == NULL) { +diff --git a/test/hotspot/jtreg/containers/docker/TestNUMAErrorHandling.java b/test/hotspot/jtreg/containers/docker/TestNUMAErrorHandling.java +new file mode 100644 +index 000000000..9924e8ac4 +--- /dev/null ++++ b/test/hotspot/jtreg/containers/docker/TestNUMAErrorHandling.java +@@ -0,0 +1,113 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/** ++ * @test TestNUMAErrorHandling ++ * @summary Test error handling and edge cases ++ * @library /test/lib ++ * @requires os.family == "linux" ++ * @run driver TestNUMAErrorHandling ++ */ ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.process.ProcessTools; ++ ++public class TestNUMAErrorHandling { ++ ++ public static void main(String[] args) throws Exception { ++ // Test Case 1: Without UseNUMA ++ testWithoutUseNUMA(); ++ ++ // Test Case 2: Negative parameter ++ testNegativeParameter(); ++ ++ // Test Case 3: Without NUMANodes & NUMANodesRandom ++ testNoParametersSet(); ++ ++ // Test Case 4: Invalid NUMANodes ++ testInvalidNodeString(); ++ ++ System.out.println("All error handling tests passed!"); ++ } ++ ++ private static void testWithoutUseNUMA() throws Exception { ++ System.out.println("\n=== Test Case 1: Without UseNUMA ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:NUMANodesRandom=1", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldNotContain("Successfully bound to"); ++ ++ System.out.println("PASS: No binding without UseNUMA flag"); ++ } ++ ++ private static void testNegativeParameter() throws Exception { ++ System.out.println("\n=== Test Case 2: Negative parameter ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodesRandom=-1", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldNotHaveExitValue(0); ++ output.shouldContain("Improperly specified VM option"); ++ ++ System.out.println("PASS: Negative parameter handled"); ++ } ++ ++ private static void testNoParametersSet() throws Exception { ++ System.out.println("\n=== Test Case 3: No parameters set ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldNotContain("will select"); ++ ++ System.out.println("PASS: No binding without parameters"); ++ } ++ ++ private static void testInvalidNodeString() throws Exception { ++ System.out.println("\n=== Test Case 4: Invalid NUMANodes format ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodes=invalid", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldMatch("Failed to parse NUMANodes| is invalid"); ++ ++ System.out.println("PASS: Invalid format handled gracefully"); ++ } ++} +diff --git a/test/hotspot/jtreg/containers/docker/TestNUMANodes.java b/test/hotspot/jtreg/containers/docker/TestNUMANodes.java +new file mode 100644 +index 000000000..677695ab5 +--- /dev/null ++++ b/test/hotspot/jtreg/containers/docker/TestNUMANodes.java +@@ -0,0 +1,177 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/** ++ * @test TestNUMANodes ++ * @summary Test NUMANodes parameter with different node specifications ++ * @library /test/lib ++ * @requires os.family == "linux" ++ * @run driver TestNUMANodes ++ */ ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.process.ProcessTools; ++import java.io.File; ++import java.util.regex.Matcher; ++import java.util.regex.Pattern; ++ ++public class TestNUMANodes { ++ ++ private static int getNumaNodeCount() throws Exception { ++ // Read /sys/devices/system/node/possible for node number ++ File nodeFile = new File("/sys/devices/system/node/possible"); ++ if (!nodeFile.exists()) { ++ return -1; // NUMA is unavailable ++ } ++ ++ ProcessBuilder pb = new ProcessBuilder("numactl", "-H"); ++ Process p = pb.start(); ++ p.waitFor(); ++ ++ // Simply Check if numactl is available ++ if (p.exitValue() != 0) { ++ return -1; ++ } ++ ++ // Determine the number of nodes through the output of numactl-H ++ String output = new String(p.getInputStream().readAllBytes()); ++ Pattern pattern = Pattern.compile("available: (\\d+) nodes"); ++ Matcher matcher = pattern.matcher(output); ++ if (matcher.find()) { ++ return Integer.parseInt(matcher.group(1)); ++ } ++ return -1; ++ } ++ ++ public static void main(String[] args) throws Exception { ++ int nodeCount = getNumaNodeCount(); ++ if (nodeCount <= 1) { ++ System.out.println("SKIP: Need multi-node NUMA system"); ++ return; ++ } ++ ++ // Test Case 1: Single-node "0" ++ testSingleNode(); ++ ++ // Test Case 2: Node range "0-1" ++ testNodeRange(); ++ ++ // Test Case 3: Node enumeration "0,2" ++ if (nodeCount >= 3) { ++ testNodeList(); ++ } ++ ++ // Test Case 4: Mixed format "0-1,3" ++ if (nodeCount >= 4) { ++ testMixedFormat(); ++ } ++ ++ // Test Case 5: Invalid node range ++ testInvalidNodeRange(); ++ ++ System.out.println("All NUMANodes tests passed!"); ++ } ++ ++ private static void testSingleNode() throws Exception { ++ System.out.println("\n=== Test Case 1: NUMANodes=0 ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodes=0", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldContain("NUMANodes parameter specified: 0"); ++ output.shouldContain("User specified node 0 is allowed"); ++ ++ System.out.println("PASS: Single node specification works"); ++ } ++ ++ private static void testNodeRange() throws Exception { ++ System.out.println("\n=== Test Case 2: NUMANodes=0-1 ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodes=0-1", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldContain("NUMANodes parameter specified: 0-1"); ++ output.shouldContain("User specified node 0 is allowed"); ++ output.shouldContain("User specified node 1 is allowed"); ++ ++ System.out.println("PASS: Node range specification works"); ++ } ++ ++ private static void testNodeList() throws Exception { ++ System.out.println("\n=== Test Case 3: NUMANodes=0,2 ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodes=0,2", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldContain("NUMANodes parameter specified: 0,2"); ++ ++ System.out.println("PASS: Node list specification works"); ++ } ++ ++ private static void testMixedFormat() throws Exception { ++ System.out.println("\n=== Test Case 4: NUMANodes=0-1,3 ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodes=0-1,3", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldContain("NUMANodes parameter specified: 0-1,3"); ++ ++ System.out.println("PASS: Mixed format specification works"); ++ } ++ ++ private static void testInvalidNodeRange() throws Exception { ++ System.out.println("\n=== Test Case 5: Invalid NUMANodes=99 ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodes=999", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ // There should be warnings but no core dump ++ output.shouldMatch("Failed to parse NUMANodes|No available NUMA nodes"); ++ ++ System.out.println("PASS: Invalid node range handled gracefully"); ++ } ++} +diff --git a/test/hotspot/jtreg/containers/docker/TestNUMANodesRandom.java b/test/hotspot/jtreg/containers/docker/TestNUMANodesRandom.java +new file mode 100644 +index 000000000..54ef6130d +--- /dev/null ++++ b/test/hotspot/jtreg/containers/docker/TestNUMANodesRandom.java +@@ -0,0 +1,203 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/** ++ * @test TestNUMANodesRandom ++ * @summary Test NUMANodesRandom parameter with different values ++ * @library /test/lib ++ * @requires os.family == "linux" ++ * @run driver TestNUMANodesRandom ++ */ ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.process.ProcessTools; ++import jdk.test.lib.Platform; ++import java.io.File; ++import java.util.ArrayList; ++import java.util.Arrays; ++import java.util.List; ++import java.util.regex.Matcher; ++import java.util.regex.Pattern; ++ ++public class TestNUMANodesRandom { ++ ++ private static int getNumaNodeCount() throws Exception { ++ // Read /sys/devices/system/node/possible for node number ++ File nodeFile = new File("/sys/devices/system/node/possible"); ++ if (!nodeFile.exists()) { ++ return -1; // NUMA is unavailable ++ } ++ ++ ProcessBuilder pb = new ProcessBuilder("numactl", "-H"); ++ Process p = pb.start(); ++ p.waitFor(); ++ ++ // Simply Check if numactl is available ++ if (p.exitValue() != 0) { ++ return -1; ++ } ++ ++ // Determine the number of nodes through the output of numactl-H ++ String output = new String(p.getInputStream().readAllBytes()); ++ Pattern pattern = Pattern.compile("available: (\\d+) nodes"); ++ Matcher matcher = pattern.matcher(output); ++ if (matcher.find()) { ++ return Integer.parseInt(matcher.group(1)); ++ } ++ return -1; ++ } ++ ++ private static boolean hasLibnuma() { ++ try { ++ ProcessBuilder pb = new ProcessBuilder("ldconfig", "-p"); ++ Process p = pb.start(); ++ String output = new String(p.getInputStream().readAllBytes()); ++ return output.contains("libnuma.so"); ++ } catch (Exception e) { ++ return false; ++ } ++ } ++ ++ public static void main(String[] args) throws Exception { ++ // Check the preconditions ++ if (!hasLibnuma()) { ++ System.out.println("SKIP: libnuma not available"); ++ return; ++ } ++ ++ int nodeCount = getNumaNodeCount(); ++ if (nodeCount <= 0) { ++ System.out.println("SKIP: NUMA not available or single node system"); ++ return; ++ } ++ ++ System.out.println("Detected " + nodeCount + " NUMA nodes"); ++ ++ // Test Case 1: Select one random node ++ testSingleNodeRandom(); ++ ++ // Test Case 2: Select multiple random nodes ++ if (nodeCount >= 2) { ++ testMultipleNodesRandom(2); ++ } ++ ++ // Test Case 3: NUMANodesRandom exceeds the number of nodes ++ testRandomExceedingNodeCount(nodeCount); ++ ++ // Test Case 4: NUMANodesRandom=0 (Not enabled) ++ testRandomZero(); ++ ++ // Test Case 5: Verify the environment variable tags ++ testEnvVarPreventsReexecution(); ++ ++ System.out.println("All tests passed!"); ++ } ++ ++ private static void testSingleNodeRandom() throws Exception { ++ System.out.println("\n=== Test Case 1: NUMANodesRandom=1 ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodesRandom=1", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldContain("will select 1 nodes from"); ++ output.shouldContain("Successfully bound to 1 node(s)"); ++ ++ System.out.println("PASS: Single node random selection works"); ++ } ++ ++ private static void testMultipleNodesRandom(int count) throws Exception { ++ System.out.println("\n=== Test Case 2: NUMANodesRandom=" + count + " ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodesRandom=" + count, ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldContain("will select " + count + " nodes from"); ++ output.shouldContain("Distance from node"); ++ output.shouldContain("Successfully bound to " + count + " node(s)"); ++ ++ System.out.println("PASS: Multiple nodes random selection works"); ++ } ++ ++ private static void testRandomExceedingNodeCount(int nodeCount) throws Exception { ++ System.out.println("\n=== Test Case 3: NUMANodesRandom exceeds node count ==="); ++ ++ int exceedingCount = nodeCount + 5; ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodesRandom=" + exceedingCount, ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ // All available nodes should be used ++ output.shouldContain("will select " + nodeCount + " nodes from " + nodeCount); ++ ++ System.out.println("PASS: Exceeding count handled correctly"); ++ } ++ ++ private static void testRandomZero() throws Exception { ++ System.out.println("\n=== Test Case 4: NUMANodesRandom=0 ==="); ++ ++ OutputAnalyzer output = ProcessTools.executeTestJava( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodesRandom=0", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ output.shouldHaveExitValue(0); ++ output.shouldNotContain("Successfully bound to"); ++ ++ System.out.println("PASS: NUMANodesRandom=0 disables binding"); ++ } ++ ++ private static void testEnvVarPreventsReexecution() throws Exception { ++ System.out.println("\n=== Test Case 5: Environment variable prevents re-execution ==="); ++ ++ ProcessBuilder pb = ProcessTools.createTestJavaProcessBuilder( ++ "-XX:+UseNUMA", ++ "-XX:NUMANodesRandom=1", ++ "-XX:+LogNUMANodes", ++ "-version" ++ ); ++ ++ pb.environment().put("_JVM_NUMA_BINDING_DONE", "1"); ++ ++ OutputAnalyzer output = new OutputAnalyzer(pb.start()); ++ output.shouldHaveExitValue(0); ++ output.shouldContain("NUMA binding already done"); ++ output.shouldNotContain("Successfully bound to"); ++ ++ System.out.println("PASS: Environment variable prevents re-execution"); ++ } ++} +-- +2.34.1 + diff --git a/huawei-Add-dynamic-max-heap-size.patch b/huawei-Add-dynamic-max-heap-size.patch new file mode 100644 index 0000000000000000000000000000000000000000..01be25161f2876fcedd1f081070c0b2b9730a0de --- /dev/null +++ b/huawei-Add-dynamic-max-heap-size.patch @@ -0,0 +1,3370 @@ +Date: Tue, 25 Nov 2025 19:49:52 +0800 +Subject: [PATCH 2/8] Add dynamic max heap size + +--- + .../cpu/aarch64/vm_version_aarch64.cpp | 25 ++ + .../cpu/aarch64/vm_version_aarch64.hpp | 10 +- + src/hotspot/os/linux/os_linux.cpp | 41 ++- + src/hotspot/os/linux/os_linux.hpp | 70 ++++- + src/hotspot/share/cds/filemap.cpp | 7 +- + src/hotspot/share/classfile/vmSymbols.hpp | 5 + + src/hotspot/share/gc/g1/g1Arguments.cpp | 14 +- + src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 20 +- + src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 23 +- + .../share/gc/g1/g1HeapSizingPolicy.cpp | 34 ++- + src/hotspot/share/gc/g1/g1MemoryPool.hpp | 7 + + .../share/gc/g1/g1MonitoringSupport.cpp | 8 + + .../share/gc/g1/g1MonitoringSupport.hpp | 3 + + src/hotspot/share/gc/g1/g1Policy.cpp | 8 +- + src/hotspot/share/gc/g1/g1VMOperations.cpp | 139 ++++++++++ + src/hotspot/share/gc/g1/g1VMOperations.hpp | 13 + + src/hotspot/share/gc/g1/heapRegionManager.cpp | 31 ++- + src/hotspot/share/gc/g1/heapRegionManager.hpp | 18 +- + .../share/gc/parallel/parallelArguments.cpp | 3 + + .../gc/parallel/parallelScavengeHeap.cpp | 31 ++- + .../gc/parallel/parallelScavengeHeap.hpp | 3 + + .../gc/parallel/psGenerationCounters.cpp | 9 +- + .../share/gc/parallel/psMemoryPool.hpp | 2 +- + src/hotspot/share/gc/parallel/psOldGen.cpp | 8 +- + src/hotspot/share/gc/parallel/psOldGen.hpp | 23 +- + .../share/gc/parallel/psVMOperations.cpp | 254 ++++++++++++++++++ + .../share/gc/parallel/psVMOperations.hpp | 8 + + .../share/gc/parallel/psVirtualspace.cpp | 7 +- + .../share/gc/parallel/psVirtualspace.hpp | 21 ++ + src/hotspot/share/gc/parallel/psYoungGen.cpp | 8 +- + src/hotspot/share/gc/parallel/psYoungGen.hpp | 25 +- + src/hotspot/share/gc/shared/collectedHeap.cpp | 1 + + src/hotspot/share/gc/shared/collectedHeap.hpp | 11 + + .../share/gc/shared/dynamicMaxHeap.cpp | 163 +++++++++++ + .../share/gc/shared/dynamicMaxHeap.hpp | 61 +++++ + src/hotspot/share/gc/shared/gcArguments.cpp | 6 + + src/hotspot/share/gc/shared/gcCause.cpp | 3 + + src/hotspot/share/gc/shared/gcCause.hpp | 1 + + src/hotspot/share/gc/shared/gc_globals.hpp | 18 ++ + src/hotspot/share/gc/shared/genArguments.cpp | 6 +- + src/hotspot/share/gc/shared/genArguments.hpp | 13 + + .../share/gc/shared/generationCounters.cpp | 16 +- + .../share/gc/shared/generationCounters.hpp | 5 + + .../share/gc/shared/referencePolicy.cpp | 3 + + src/hotspot/share/memory/universe.cpp | 3 + + src/hotspot/share/memory/universe.hpp | 35 +++ + src/hotspot/share/runtime/arguments.cpp | 13 + + src/hotspot/share/runtime/globals.hpp | 3 + + src/hotspot/share/runtime/os.cpp | 7 + + src/hotspot/share/runtime/os.hpp | 2 + + src/hotspot/share/runtime/threads.cpp | 9 + + src/hotspot/share/runtime/vmOperation.hpp | 3 +- + .../share/services/diagnosticCommand.cpp | 115 ++++++++ + .../share/services/diagnosticCommand.hpp | 53 ++++ + .../share/classes/java/nio/Bits.java | 26 ++ + .../share/classes/jdk/internal/misc/VM.java | 6 + + test/hotspot/jtreg/gc/TestSmallHeap.java | 2 +- + .../jtreg/gc/arguments/TestMaxRAMFlags.java | 2 +- + .../jtreg/gc/dynamicmaxheap/BasicTest.java | 94 +++++++ + .../dynamicmaxheap/DirectMemoryBasicTest.java | 76 ++++++ + .../dynamicmaxheap/LimitDirectMemoryTest.java | 92 +++++++ + .../gc/dynamicmaxheap/MemoryPoolTest.java | 139 ++++++++++ + .../jtreg/gc/dynamicmaxheap/OptionsCheck.java | 73 +++++ + .../gc/dynamicmaxheap/RuntimeMemoryTest.java | 96 +++++++ + .../jtreg/gc/dynamicmaxheap/TestBase.java | 50 ++++ + .../LimitDirectMemoryTestBasic.java | 63 +++++ + .../test_classes/NotActiveDirectMemory.java | 41 +++ + .../test_classes/NotActiveHeap.java | 27 ++ + 68 files changed, 2172 insertions(+), 43 deletions(-) + create mode 100644 src/hotspot/share/gc/shared/dynamicMaxHeap.cpp + create mode 100644 src/hotspot/share/gc/shared/dynamicMaxHeap.hpp + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/BasicTest.java + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/DirectMemoryBasicTest.java + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/LimitDirectMemoryTest.java + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/MemoryPoolTest.java + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/OptionsCheck.java + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/RuntimeMemoryTest.java + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/TestBase.java + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/LimitDirectMemoryTestBasic.java + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/NotActiveDirectMemory.java + create mode 100644 test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/NotActiveHeap.java + +diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp +index 6c95dc9c8..62da2a0e8 100644 +--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp +@@ -68,6 +68,31 @@ static SpinWait get_spin_wait_desc() { + return SpinWait{}; + } + ++int VM_Version::get_cpu_model() { ++ int cpu_lines = 0; ++ if (FILE *f = fopen("/proc/cpuinfo", "r")) { ++ char buf[128], *p; ++ while (fgets(buf, sizeof (buf), f) != NULL) { ++ if ((p = strchr(buf, ':')) != NULL) { ++ long v = strtol(p+1, NULL, 0); ++ if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) { ++ _cpu = v; ++ cpu_lines++; ++ } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) { ++ _variant = v; ++ } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) { ++ if (_model != v) _model2 = _model; ++ _model = v; ++ } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) { ++ _revision = v; ++ } ++ } ++ } ++ fclose(f); ++ } ++ return cpu_lines; ++} ++ + void VM_Version::initialize() { + _supports_cx8 = true; + _supports_atomic_getset4 = true; +diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp +index aa7d5db44..a274565e5 100644 +--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp +@@ -145,13 +145,21 @@ enum Ampere_CPU_Model { + static bool supports_##name() { return (_features & CPU_##id) != 0; }; + CPU_FEATURE_FLAGS(CPU_FEATURE_DETECTION) + #undef CPU_FEATURE_DETECTION +- ++ ++ static int get_cpu_model(); + static int cpu_family() { return _cpu; } + static int cpu_model() { return _model; } + static int cpu_model2() { return _model2; } + static int cpu_variant() { return _variant; } + static int cpu_revision() { return _revision; } + ++ static bool is_hisi_enabled() { ++ if (_cpu == CPU_HISILICON && (_model == 0xd01 || _model == 0xd02)) { ++ return true; ++ } ++ return false; ++ } ++ + static bool model_is(int cpu_model) { + return _model == cpu_model || _model2 == cpu_model; + } +diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp +index cf63e9e76..aaa7927a9 100644 +--- a/src/hotspot/os/linux/os_linux.cpp ++++ b/src/hotspot/os/linux/os_linux.cpp +@@ -3439,6 +3439,10 @@ bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { + return res != (uintptr_t) MAP_FAILED; + } + ++bool os::pd_free_heap_physical_memory(char *addr, size_t bytes) { ++ return madvise(addr, bytes, MADV_DONTNEED) == 0; ++} ++ + static address get_stack_commited_bottom(address bottom, size_t size) { + address nbot = bottom; + address ntop = bottom + size; +@@ -4706,8 +4710,41 @@ os::Linux::heap_dict_free_t os::Linux::_heap_dict_free; + os::Linux::heap_vector_add_t os::Linux::_heap_vector_add; + os::Linux::heap_vector_get_next_t os::Linux::_heap_vector_get_next; + os::Linux::heap_vector_free_t os::Linux::_heap_vector_free; ++os::Linux::dmh_g1_can_shrink_t os::Linux::_dmh_g1_can_shrink; ++os::Linux::dmh_g1_get_region_limit_t os::Linux::_dmh_g1_get_region_limit; ++os::Linux::dmh_ps_old_gen_can_shrink_t os::Linux::_dmh_ps_old_gen_can_shrink; ++os::Linux::dmh_ps_young_gen_can_shrink_t os::Linux::_dmh_ps_young_gen_can_shrink; ++ ++void os::Linux::load_ACC_library_before_ergo() { ++ _dmh_g1_can_shrink = CAST_TO_FN_PTR(dmh_g1_can_shrink_t, dlsym(RTLD_DEFAULT, "DynamicMaxHeap_G1CanShrink")); ++ _dmh_g1_get_region_limit = CAST_TO_FN_PTR(dmh_g1_get_region_limit_t, dlsym(RTLD_DEFAULT, "DynamicMaxHeap_G1GetRegionLimit")); ++ _dmh_ps_old_gen_can_shrink = CAST_TO_FN_PTR(dmh_ps_old_gen_can_shrink_t, dlsym(RTLD_DEFAULT, "DynamicMaxHeap_PsOldGenCanShrink")); ++ _dmh_ps_young_gen_can_shrink = CAST_TO_FN_PTR(dmh_ps_young_gen_can_shrink_t, dlsym(RTLD_DEFAULT, "DynamicMaxHeap_PsYoungGenCanShrink")); ++ ++ char path[JVM_MAXPATHLEN]; ++ char ebuf[1024]; ++ void* handle = NULL; ++ if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "jvm21_Acc") || ++ os::dll_locate_lib(path, sizeof(path), "/usr/lib64", "jvm21_Acc")) { ++ handle = dlopen(path, RTLD_LAZY); ++ } ++ if (handle != NULL) { ++ if(_dmh_g1_can_shrink == NULL) { ++ _dmh_g1_can_shrink = CAST_TO_FN_PTR(dmh_g1_can_shrink_t, dlsym(handle, "DynamicMaxHeap_G1CanShrink")); ++ } ++ if(_dmh_g1_get_region_limit == NULL) { ++ _dmh_g1_get_region_limit = CAST_TO_FN_PTR(dmh_g1_get_region_limit_t, dlsym(handle, "DynamicMaxHeap_G1GetRegionLimit")); ++ } ++ if(_dmh_ps_old_gen_can_shrink == NULL) { ++ _dmh_ps_old_gen_can_shrink = CAST_TO_FN_PTR(dmh_ps_old_gen_can_shrink_t, dlsym(handle, "DynamicMaxHeap_PsOldGenCanShrink")); ++ } ++ if(_dmh_ps_young_gen_can_shrink == NULL) { ++ _dmh_ps_young_gen_can_shrink = CAST_TO_FN_PTR(dmh_ps_young_gen_can_shrink_t, dlsym(handle, "DynamicMaxHeap_PsYoungGenCanShrink")); ++ } ++ } ++} + +-void os::Linux::load_plugin_library() { ++void os::Linux::load_ACC_library() { + + #if INCLUDE_JBOLT + _jboltLog_precalc = CAST_TO_FN_PTR(jboltLog_precalc_t, dlsym(RTLD_DEFAULT, "JBoltLog_PreCalc")); +@@ -4891,7 +4928,7 @@ jint os::init_2(void) { + init_adjust_stacksize_for_guard_pages(); + #endif + +- Linux::load_plugin_library(); ++ Linux::load_ACC_library(); + + if (UseNUMA || UseNUMAInterleaving) { + Linux::numa_init(); +diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp +index 5c12ca7f0..7c1c51a58 100644 +--- a/src/hotspot/os/linux/os_linux.hpp ++++ b/src/hotspot/os/linux/os_linux.hpp +@@ -137,7 +137,8 @@ class os::Linux { + static const char *libc_version() { return _libc_version; } + static const char *libpthread_version() { return _libpthread_version; } + +- static void load_plugin_library(); ++ static void load_ACC_library(); ++ static void load_ACC_library_before_ergo(); + static void libpthread_init(); + static void sched_getcpu_init(); + static bool libnuma_init(); +@@ -235,6 +236,15 @@ class os::Linux { + static heap_vector_get_next_t _heap_vector_get_next; + static heap_vector_free_t _heap_vector_free; + ++ typedef bool (*dmh_g1_can_shrink_t)(double used_after_gc_d, size_t _new_max_heap, double maximum_used_percentage, size_t max_heap_size); ++ typedef uint (*dmh_g1_get_region_limit_t)(size_t _new_max_heap, size_t region_size); ++ typedef bool (*dmh_ps_old_gen_can_shrink_t)(size_t _new_max_heap, size_t old_used_bytes, double min_heap_free_ration, size_t alignment); ++ typedef bool (*dmh_ps_young_gen_can_shrink_t)(size_t _new_max_heap, size_t committed_size); ++ static dmh_g1_can_shrink_t _dmh_g1_can_shrink; ++ static dmh_g1_get_region_limit_t _dmh_g1_get_region_limit; ++ static dmh_ps_old_gen_can_shrink_t _dmh_ps_old_gen_can_shrink; ++ static dmh_ps_young_gen_can_shrink_t _dmh_ps_young_gen_can_shrink; ++ + static sched_getcpu_func_t _sched_getcpu; + static numa_node_to_cpus_func_t _numa_node_to_cpus; + static numa_node_to_cpus_v2_func_t _numa_node_to_cpus_v2; +@@ -511,6 +521,64 @@ class os::Linux { + _heap_vector_free(heap_vector); + } + } ++ ++ static bool dmh_g1_can_shrink(double used_after_gc_d, ++ size_t _new_max_heap, ++ double maximum_used_percentage, ++ size_t max_heap_size, ++ bool &is_valid, ++ bool just_check = false) { ++ is_valid = false; ++ bool result = false; ++ if (just_check) { ++ is_valid = (_dmh_g1_can_shrink != NULL); ++ } else if (_dmh_g1_can_shrink != NULL) { ++ is_valid = true; ++ result = _dmh_g1_can_shrink(used_after_gc_d, _new_max_heap, maximum_used_percentage, max_heap_size); ++ } ++ return result; ++ } ++ ++ static uint dmh_g1_get_region_limit(size_t _new_max_heap, size_t region_size, bool &is_valid, bool just_check = false) { ++ is_valid = false; ++ uint result = 0; ++ if (just_check) { ++ is_valid = (_dmh_g1_get_region_limit != NULL); ++ } else if (_dmh_g1_get_region_limit != NULL) { ++ is_valid = true; ++ result = _dmh_g1_get_region_limit(_new_max_heap, region_size); ++ } ++ return result; ++ } ++ ++ static bool dmh_ps_old_gen_can_shrink(size_t _new_max_heap, ++ size_t old_used_bytes, ++ double min_heap_free_ration, ++ size_t alignment, ++ bool &is_valid, ++ bool just_check = false) { ++ is_valid = false; ++ bool result = false; ++ if (just_check) { ++ is_valid = (_dmh_ps_old_gen_can_shrink != NULL); ++ } else if (_dmh_ps_old_gen_can_shrink != NULL) { ++ is_valid = true; ++ result = _dmh_ps_old_gen_can_shrink(_new_max_heap, old_used_bytes, min_heap_free_ration, alignment); ++ } ++ return result; ++ } ++ ++ static bool dmh_ps_young_gen_can_shrink(size_t _new_max_heap, size_t committed_size, bool &is_valid, bool just_check = false){ ++ is_valid = false; ++ bool result = false; ++ if (just_check) { ++ is_valid = (_dmh_ps_young_gen_can_shrink != NULL); ++ } else if (_dmh_ps_young_gen_can_shrink != NULL) { ++ is_valid = true; ++ result = _dmh_ps_young_gen_can_shrink(_new_max_heap, committed_size); ++ } ++ return result; ++ } + }; + + #endif // OS_LINUX_OS_LINUX_HPP +diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp +index c091c1a00..2edc83bac 100644 +--- a/src/hotspot/share/cds/filemap.cpp ++++ b/src/hotspot/share/cds/filemap.cpp +@@ -2093,8 +2093,10 @@ bool FileMapInfo::map_heap_region() { + address heap_end = (address)heap_range.end(); + address mapped_heap_region_end = (address)_mapped_heap_memregion.end(); + assert(heap_end >= mapped_heap_region_end, "must be"); +- assert(heap_end - mapped_heap_region_end < (intx)(HeapRegion::GrainBytes), ++ if (!Universe::is_dynamic_max_heap_enable()) { ++ assert(heap_end - mapped_heap_region_end < (intx)(HeapRegion::GrainBytes), + "must be at the top of the heap to avoid fragmentation"); ++ } + #endif + + ArchiveHeapLoader::set_mapped(); +@@ -2113,6 +2115,9 @@ void FileMapInfo::init_heap_region_relocation() { + + address requested_bottom = (address)archive_range.start(); + address heap_end = (address)heap_range.end(); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ heap_end = (address)heap_range.start() + MaxHeapSize; ++ } + assert(is_aligned(heap_end, HeapRegion::GrainBytes), "must be"); + + // We map the archive heap region at the very top of the heap to avoid fragmentation. +diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp +index 6f8a476e8..77810b3c3 100644 +--- a/src/hotspot/share/classfile/vmSymbols.hpp ++++ b/src/hotspot/share/classfile/vmSymbols.hpp +@@ -788,6 +788,11 @@ + template(url_void_signature, "(Ljava/net/URL;)V") \ + template(url_array_classloader_void_signature, "([Ljava/net/URL;Ljava/lang/ClassLoader;)V") \ + \ ++ /* ElasticMaxDirectMemory */ \ ++ template(java_nio_Bits, "java/nio/Bits") \ ++ template(updateMaxMemory_name, "updateMaxMemory") \ ++ template(updateMaxMemory_signature, "(J)Ljava/lang/String;") \ ++ \ + /* Thread.dump_to_file jcmd */ \ + template(jdk_internal_vm_ThreadDumper, "jdk/internal/vm/ThreadDumper") \ + template(dumpThreads_name, "dumpThreads") \ +diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp +index 28f850938..8c8912991 100644 +--- a/src/hotspot/share/gc/g1/g1Arguments.cpp ++++ b/src/hotspot/share/gc/g1/g1Arguments.cpp +@@ -53,8 +53,17 @@ void G1Arguments::initialize_alignments() { + // There is a circular dependency here. We base the region size on the heap + // size, but the heap size should be aligned with the region size. To get + // around this we use the unaligned values for the heap. +- HeapRegion::setup_heap_region_size(MaxHeapSize); + ++ if (Universe::is_dynamic_max_heap_enable()) { ++ if (!FLAG_IS_CMDLINE(DynamicMaxHeapSizeLimit) && !FLAG_IS_CMDLINE(ElasticMaxHeapSize)) { ++ guarantee(ElasticMaxHeap, "must be"); ++ FLAG_SET_ERGO(DynamicMaxHeapSizeLimit, MaxHeapSize); ++ } ++ HeapRegion::setup_heap_region_size(DynamicMaxHeapSizeLimit); ++ } else { ++ HeapRegion::setup_heap_region_size(MaxHeapSize); ++ } ++ + SpaceAlignment = HeapRegion::GrainBytes; + HeapAlignment = calculate_heap_alignment(SpaceAlignment); + +@@ -263,6 +272,9 @@ CollectedHeap* G1Arguments::create_heap() { + } + + size_t G1Arguments::heap_reserved_size_bytes() { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ return DynamicMaxHeapSizeLimit; ++ } + return MaxHeapSize; + } + +diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +index 32d3b0787..d3ab782f5 100644 +--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp ++++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +@@ -2152,6 +2152,12 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { + } + + size_t G1CollectedHeap::max_capacity() const { ++ // Dynamic Max Heap ++ if (Universe::is_dynamic_max_heap_enable()) { ++ size_t cur_size = current_max_heap_size(); ++ guarantee(cur_size <= max_regions() * HeapRegion::GrainBytes, "must be"); ++ return cur_size; ++ } + return max_regions() * HeapRegion::GrainBytes; + } + +@@ -2916,7 +2922,7 @@ public: + } + }; + +-void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { ++void G1CollectedHeap::rebuild_region_sets(bool free_list_only, bool is_dynamic_max_heap_shrink) { + assert_at_safepoint_on_vm_thread(); + + if (!free_list_only) { +@@ -2932,7 +2938,10 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { + if (!free_list_only) { + set_used(cl.total_used()); + } +- assert_used_and_recalculate_used_equal(this); ++ // don't do this assert if is_dynamic_max_heap_shrink ++ if (!is_dynamic_max_heap_shrink) { ++ assert_used_and_recalculate_used_equal(this); ++ } + } + + // Methods for the mutator alloc region +@@ -3170,3 +3179,10 @@ void G1CollectedHeap::finish_codecache_marking_cycle() { + CodeCache::on_gc_marking_cycle_finish(); + CodeCache::arm_all_nmethods(); + } ++ ++bool G1CollectedHeap::change_max_heap(size_t new_size) { ++ assert_heap_not_locked(); ++ G1_ChangeMaxHeapOp op(new_size); ++ VMThread::execute(&op); ++ return op.resize_success(); ++} +\ No newline at end of file +diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +index 6afaeb6e6..ab469293c 100644 +--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp ++++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +@@ -168,6 +168,8 @@ class G1CollectedHeap : public CollectedHeap { + // Testing classes. + friend class G1CheckRegionAttrTableClosure; + ++ friend class G1_ChangeMaxHeapOp; ++ + private: + G1ServiceThread* _service_thread; + G1ServiceTask* _periodic_gc_task; +@@ -206,7 +208,7 @@ private: + // reflect the contents of the heap. The only exception is the + // humongous set which was not torn down in the first place. If + // free_list_only is true, it will only rebuild the free list. +- void rebuild_region_sets(bool free_list_only); ++ void rebuild_region_sets(bool free_list_only, bool is_dynamic_max_heap_shrink = false); + + // Callback for region mapping changed events. + G1RegionMappingChangedListener _listener; +@@ -1317,6 +1319,25 @@ public: + + // Used to print information about locations in the hs_err file. + bool print_location(outputStream* st, void* addr) const override; ++ ++private: ++ // Dynamic Max Heap ++ // expected DynamicMaxHeap size during full gc (temp value) ++ // 0 means do not adjust ++ // min_gen_size <= _expected_dynamic_max_heap_size <= _reserved size. ++ // will be cleared after DynamicMaxHeap VM operation. ++ size_t _exp_dynamic_max_heap_size; ++public: ++ virtual bool change_max_heap(size_t new_size); ++ size_t exp_dynamic_max_heap_size() const { return _exp_dynamic_max_heap_size; } ++ void set_exp_dynamic_max_heap_size(size_t size) { ++ guarantee(size <= _reserved.byte_size(), "must be"); ++ _exp_dynamic_max_heap_size = size; ++ } ++ void update_gen_max_counter(size_t size) { ++ guarantee(Universe::is_dynamic_max_heap_enable(), "must be"); ++ _monitoring_support->update_max_sizes(size); ++ } + }; + + // Scoped object that performs common pre- and post-gc heap printing operations. +diff --git a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp +index 8645b8470..92b95c004 100644 +--- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp ++++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp +@@ -197,7 +197,7 @@ size_t G1HeapSizingPolicy::young_collection_expansion_amount() { + return expand_bytes; + } + +-static size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) { ++static size_t target_heap_capacity(size_t used_bytes, uintx free_ratio, size_t max_heap_size) { + const double desired_free_percentage = (double) free_ratio / 100.0; + const double desired_used_percentage = 1.0 - desired_free_percentage; + +@@ -207,7 +207,7 @@ static size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) { + double desired_capacity_d = used_bytes_d / desired_used_percentage; + // Let's make sure that they are both under the max heap size, which + // by default will make it fit into a size_t. +- double desired_capacity_upper_bound = (double) MaxHeapSize; ++ double desired_capacity_upper_bound = (double) max_heap_size; + desired_capacity_d = MIN2(desired_capacity_d, desired_capacity_upper_bound); + // We can now safely turn it into size_t's. + return (size_t) desired_capacity_d; +@@ -226,8 +226,10 @@ size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand) { + // results. + _g1h->eden_regions_count() * HeapRegion::GrainBytes; + +- size_t minimum_desired_capacity = target_heap_capacity(used_after_gc, MinHeapFreeRatio); +- size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio); ++ size_t max_heap_size = _g1h->current_max_heap_size(); ++ ++ size_t minimum_desired_capacity = target_heap_capacity(used_after_gc, MinHeapFreeRatio, max_heap_size); ++ size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio, max_heap_size); + + // This assert only makes sense here, before we adjust them + // with respect to the min and max heap size. +@@ -239,7 +241,7 @@ size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand) { + // Should not be greater than the heap max size. No need to adjust + // it with respect to the heap min size as it's a lower bound (i.e., + // we'll try to make the capacity larger than it, not smaller). +- minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize); ++ minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); + // Should not be less than the heap min size. No need to adjust it + // with respect to the heap max size as it's an upper bound (i.e., + // we'll try to make the capacity smaller than it, not greater). +@@ -257,7 +259,27 @@ size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand) { + expand = true; + return expand_bytes; + // No expansion, now see if we want to shrink +- } else if (capacity_after_gc > maximum_desired_capacity) { ++ } ++ ++ size_t exp_size = _g1h->exp_dynamic_max_heap_size(); ++ if (Universe::is_dynamic_max_heap_enable() && ++ (exp_size > 0) && ++ (exp_size < _g1h->capacity()) && ++ (exp_size >= minimum_desired_capacity) && ++ (exp_size <= maximum_desired_capacity)) { ++ // shrink to exp_dynamic_max_heap_size when ++ // 1. exp_dynamic_max_heap_size smaller than capacity ++ // 2. exp_dynamic_max_heap_size bigger than minimum_desired_capacity ++ size_t shrink_bytes = _g1h->capacity() - exp_size; ++ log_debug(gc, ergo, heap)("Attempt heap shrinking for dynamic max heap(capacity higher than expected dynamic max heap after Full GC)." ++ "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B " ++ "expected_dynamic_max_heap: " SIZE_FORMAT "B ", ++ capacity_after_gc, used_after_gc, exp_size); ++ expand = false; ++ return shrink_bytes; ++ } ++ ++ if (capacity_after_gc > maximum_desired_capacity) { + // Capacity too large, compute shrinking size + size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; + +diff --git a/src/hotspot/share/gc/g1/g1MemoryPool.hpp b/src/hotspot/share/gc/g1/g1MemoryPool.hpp +index 4d7128b77..02226bbd5 100644 +--- a/src/hotspot/share/gc/g1/g1MemoryPool.hpp ++++ b/src/hotspot/share/gc/g1/g1MemoryPool.hpp +@@ -61,6 +61,13 @@ protected: + size_t init_size, + size_t max_size, + bool support_usage_threshold); ++ size_t max_size() const override { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ G1CollectedHeap* heap = (G1CollectedHeap*)Universe::heap(); ++ return heap->max_capacity(); ++ } ++ return MemoryPool::max_size(); ++ } + }; + + // Memory pool that represents the G1 eden. +diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp +index c4f2c1284..07cfe56c6 100644 +--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp ++++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp +@@ -323,6 +323,14 @@ void G1MonitoringSupport::update_eden_size() { + } + } + ++void G1MonitoringSupport::update_max_sizes(size_t size) { ++ if (UsePerfData) { ++ _young_gen_counters->update_max_size(size); ++ _old_gen_counters->update_max_size(size); ++ update_sizes(); ++ } ++} ++ + MemoryUsage G1MonitoringSupport::eden_space_memory_usage(size_t initial_size, size_t max_size) { + MutexLocker x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag); + +diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp +index 769a6fe83..c42cb6d6c 100644 +--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp ++++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp +@@ -198,6 +198,9 @@ public: + + void update_eden_size(); + ++ // Dynamic Max Heap ++ void update_max_sizes(size_t size); ++ + // Monitoring support used by + // MemoryService + // jstat counters +diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp +index 70b7d796c..705e2f8a6 100644 +--- a/src/hotspot/share/gc/g1/g1Policy.cpp ++++ b/src/hotspot/share/gc/g1/g1Policy.cpp +@@ -96,8 +96,12 @@ void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) { + _collection_set = collection_set; + + assert(Heap_lock->owned_by_self(), "Locking discipline."); +- +- _young_gen_sizer.adjust_max_new_size(_g1h->max_regions()); ++ ++ if (Universe::is_dynamic_max_heap_enable()) { ++ _young_gen_sizer.adjust_max_new_size((uint)(_g1h->current_max_heap_size() / HeapRegion::GrainBytes)); ++ } else { ++ _young_gen_sizer.adjust_max_new_size(_g1h->max_regions()); ++ } + + _free_regions_at_end_of_collection = _g1h->num_free_regions(); + +diff --git a/src/hotspot/share/gc/g1/g1VMOperations.cpp b/src/hotspot/share/gc/g1/g1VMOperations.cpp +index bdee54c8a..c11a9db19 100644 +--- a/src/hotspot/share/gc/g1/g1VMOperations.cpp ++++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp +@@ -194,3 +194,142 @@ void VM_G1PauseCleanup::work() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + g1h->concurrent_mark()->cleanup(); + } ++ ++G1_ChangeMaxHeapOp::G1_ChangeMaxHeapOp(size_t new_max_heap) : ++ VM_ChangeMaxHeapOp(new_max_heap) { ++} ++ ++/* ++ * No need calculate young/old size, shrink will adjust young automatically. ++ * ensure young_list_length, _young_list_max_length, _young_list_target_length align. ++ * ++ * 1. check if need perform gc: new_heap_max >= minimum_desired_capacity ++ * 2. perform full GC if necessary ++ * 3. update new limit ++ * 4. validation ++ */ ++void G1_ChangeMaxHeapOp::doit() { ++ G1CollectedHeap* heap = (G1CollectedHeap*)Universe::heap(); ++ const size_t min_heap_size = MinHeapSize; ++ const size_t max_heap_size = heap->current_max_heap_size(); ++ bool is_shrink = _new_max_heap < max_heap_size; ++ bool is_valid = false; ++ ++ // step1. calculate maximum_used_percentage for shrink validity check ++ const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; ++ const double maximum_used_percentage = 1.0 - minimum_free_percentage; ++ ++ // step2. trigger GC as needed and resize ++ if (is_shrink) { ++ trigger_gc_shrink(_new_max_heap, maximum_used_percentage, max_heap_size, is_valid); ++ if (!is_valid) { ++ // We should not reach here because we have already checked the existence of ++ // the ACC and disabled this feature when the ACC is absent. ++ log_debug(dynamic, heap)("G1_ElasticMaxHeapOp fail for missing ACC"); ++ return; ++ } ++ } ++ ++ log_debug(dynamic, heap)("G1_ElasticMaxHeapOp: current capacity " SIZE_FORMAT "K, new max heap " SIZE_FORMAT "K", ++ heap->capacity() / K, _new_max_heap / K); ++ ++ // step3. check if can update new limit ++ if (heap->capacity() <= _new_max_heap) { ++ uint dynamic_max_heap_len = os::Linux::dmh_g1_get_region_limit(_new_max_heap, HeapRegion::GrainBytes, is_valid); ++ if (!is_valid) { ++ // We should not reach here because we have already checked the existence of ++ // the ACC and disabled this feature when the ACC is absent. ++ log_debug(dynamic, heap)("G1_ElasticMaxHeapOp fail for missing ACC"); ++ return; ++ } ++ heap->set_current_max_heap_size(_new_max_heap); ++ heap->_hrm.set_dynamic_max_heap_length(dynamic_max_heap_len); ++ // G1 young/old share same max size ++ heap->update_gen_max_counter(_new_max_heap); ++ _resize_success = true; ++ log_debug(dynamic, heap)("G1_ElasticMaxHeapOp success"); ++ } else { ++ log_debug(dynamic, heap)("G1_ElasticMaxHeapOp fail"); ++ } ++} ++ ++void G1_ChangeMaxHeapOp::trigger_gc_shrink(size_t _new_max_heap, ++ double maximum_used_percentage, ++ size_t max_heap_size, ++ bool &is_valid){ ++ G1CollectedHeap* heap = (G1CollectedHeap*)Universe::heap(); ++ G1CollectorState* collector_state = heap->collector_state(); ++ bool triggered_full_gc = false; ++ bool can_shrink = os::Linux::dmh_g1_can_shrink((double) heap->used(), _new_max_heap, maximum_used_percentage, max_heap_size, is_valid); ++ if (!is_valid) { ++ return; ++ } ++ if (!can_shrink) { ++ // trigger Young GC ++ collector_state->set_in_young_only_phase(true); ++ collector_state->set_in_young_gc_before_mixed(true); ++ GCCauseSetter gccs(heap, _gc_cause); ++ bool minor_gc_succeeded = heap->do_collection_pause_at_safepoint(); ++ if (minor_gc_succeeded) { ++ log_debug(dynamic, heap)("G1_ElasticMaxHeapOp heap after Young GC"); ++ LogTarget(Debug, dynamic, heap) lt; ++ if (lt.is_enabled()) { ++ LogStream ls(lt); ++ heap->print_on(&ls); ++ } ++ } ++ can_shrink = os::Linux::dmh_g1_can_shrink((double) heap->used(), _new_max_heap, maximum_used_percentage, max_heap_size, is_valid); ++ if (!is_valid) { ++ return; ++ } ++ if (!can_shrink) { ++ // trigger Full GC and adjust everything in resize_if_necessary_after_full_collection ++ heap->set_exp_dynamic_max_heap_size(_new_max_heap); ++ heap->do_full_collection(true); ++ log_debug(dynamic, heap)("G1_ElasticMaxHeapOp heap after Full GC"); ++ LogTarget(Debug, dynamic, heap) lt; ++ if (lt.is_enabled()) { ++ LogStream ls(lt); ++ heap->print_on(&ls); ++ } ++ heap->set_exp_dynamic_max_heap_size(0); ++ triggered_full_gc = true; ++ } ++ } ++ ++ if (!triggered_full_gc) { ++ // there may be two situations when entering this branch: ++ // 1. first check passed, no GC triggered ++ // 2. first check failed, triggered Young GC, ++ // second check passed ++ // so the shrink has not been completed and it must be valid to shrink ++ g1_shrink_without_full_gc(_new_max_heap); ++ } ++} ++ ++void G1_ChangeMaxHeapOp::g1_shrink_without_full_gc(size_t _new_max_heap) { ++ G1CollectedHeap* heap = (G1CollectedHeap*)Universe::heap(); ++ size_t capacity_before_shrink = heap->capacity(); ++ // _new_max_heap is large enough, do nothing ++ if (_new_max_heap >= capacity_before_shrink) { ++ return; ++ } ++ // Capacity too large, compute shrinking size and shrink ++ size_t shrink_bytes = capacity_before_shrink - _new_max_heap; ++ heap->_verifier->verify_region_sets_optional(); ++ heap->_hrm.remove_all_free_regions(); ++ heap->shrink_helper(shrink_bytes); ++ heap->rebuild_region_sets(true /* free_list_only */, true /* is_dynamic_max_heap_shrink */); ++ heap->_hrm.verify_optional(); ++ heap->_verifier->verify_region_sets_optional(); ++ heap->_verifier->verify_after_gc(); ++ ++ log_debug(dynamic, heap)("G1_ElasticMaxHeapOp: attempt heap shrinking for dynamic max heap %s " ++ "origin capacity " SIZE_FORMAT "K " ++ "new capacity " SIZE_FORMAT "K " ++ "shrink by " SIZE_FORMAT "K", ++ heap->capacity() <= _new_max_heap ? "success" : "fail", ++ capacity_before_shrink / K, ++ heap->capacity() / K, ++ shrink_bytes / K); ++} +\ No newline at end of file +diff --git a/src/hotspot/share/gc/g1/g1VMOperations.hpp b/src/hotspot/share/gc/g1/g1VMOperations.hpp +index cfca9e21d..943f676b3 100644 +--- a/src/hotspot/share/gc/g1/g1VMOperations.hpp ++++ b/src/hotspot/share/gc/g1/g1VMOperations.hpp +@@ -27,6 +27,7 @@ + + #include "gc/shared/gcId.hpp" + #include "gc/shared/gcVMOperations.hpp" ++#include "gc/shared/dynamicMaxHeap.hpp" + + // VM_operations for the G1 collector. + +@@ -109,4 +110,16 @@ public: + void work() override; + }; + ++// Change Dynamic Max Heap Size ++class G1_ChangeMaxHeapOp : public VM_ChangeMaxHeapOp { ++public: ++ G1_ChangeMaxHeapOp(size_t new_max_heap); ++ virtual void doit(); ++ void trigger_gc_shrink(size_t _new_max_heap, ++ double maximum_used_percentage, ++ size_t max_heap_size, ++ bool &is_valid); ++ void g1_shrink_without_full_gc(size_t _new_max_heap); ++}; ++ + #endif // SHARE_GC_G1_G1VMOPERATIONS_HPP +diff --git a/src/hotspot/share/gc/g1/heapRegionManager.cpp b/src/hotspot/share/gc/g1/heapRegionManager.cpp +index fc445142b..1df5a3960 100644 +--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp ++++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp +@@ -86,6 +86,8 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, + + _regions.initialize(heap_storage->reserved(), HeapRegion::GrainBytes); + ++ _dynamic_max_heap_length = (uint)(MaxHeapSize / HeapRegion::GrainBytes); ++ + _committed_map.initialize(reserved_length()); + } + +@@ -313,12 +315,15 @@ uint HeapRegionManager::expand_inactive(uint num_regions) { + + do { + HeapRegionRange regions = _committed_map.next_inactive_range(offset); +- if (regions.length() == 0) { ++ if (regions.length() == 0 || available() == 0) { + // No more unavailable regions. + break; + } + + uint to_expand = MIN2(num_regions - expanded, regions.length()); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ to_expand = MIN2(to_expand, available()); ++ } + reactivate_regions(regions.start(), to_expand); + expanded += to_expand; + offset = regions.end(); +@@ -335,12 +340,15 @@ uint HeapRegionManager::expand_any(uint num_regions, WorkerThreads* pretouch_wor + + do { + HeapRegionRange regions = _committed_map.next_committable_range(offset); +- if (regions.length() == 0) { ++ if (regions.length() == 0 || available() == 0) { + // No more unavailable regions. + break; + } + + uint to_expand = MIN2(num_regions - expanded, regions.length()); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ to_expand = MIN2(to_expand, available()); ++ } + expand(regions.start(), to_expand, pretouch_workers); + expanded += to_expand; + offset = regions.end(); +@@ -352,6 +360,13 @@ uint HeapRegionManager::expand_any(uint num_regions, WorkerThreads* pretouch_wor + uint HeapRegionManager::expand_by(uint num_regions, WorkerThreads* pretouch_workers) { + assert(num_regions > 0, "Must expand at least 1 region"); + ++ if (Universe::is_dynamic_max_heap_enable()) { ++ uint available_regions = available(); ++ guarantee(dynamic_max_heap_length() >= length(), "The current length must not exceed dynamic max heap length"); ++ guarantee(available_regions <= max_length() && available_regions <= dynamic_max_heap_length(), "must be"); ++ num_regions = MIN2(num_regions, available_regions); ++ } ++ + // First "undo" any requests to uncommit memory concurrently by + // reverting such regions to being available. + uint expanded = expand_inactive(num_regions); +@@ -367,6 +382,14 @@ uint HeapRegionManager::expand_by(uint num_regions, WorkerThreads* pretouch_work + + void HeapRegionManager::expand_exact(uint start, uint num_regions, WorkerThreads* pretouch_workers) { + assert(num_regions != 0, "Need to request at least one region"); ++ ++ if (Universe::is_dynamic_max_heap_enable()) { ++ uint available_regions = available(); ++ guarantee(dynamic_max_heap_length() >= length(), "The current length must not exceed dynamic max heap length"); ++ guarantee(available_regions <= max_length() && available_regions <= dynamic_max_heap_length(), "must be"); ++ num_regions = MIN2(num_regions, available_regions); ++ } ++ + uint end = start + num_regions; + + for (uint i = start; i < end; i++) { +@@ -535,7 +558,7 @@ uint HeapRegionManager::find_highest_free(bool* expanded) { + // committed, expand at that index. + for (uint curr = reserved_length(); curr-- > 0;) { + HeapRegion *hr = _regions.get_by_index(curr); +- if (hr == nullptr || !is_available(curr)) { ++ if ((hr == nullptr || !is_available(curr)) && available() >= 1) { + // Found uncommitted and free region, expand to make it available for use. + expand_exact(curr, 1, nullptr); + assert(at(curr)->is_free(), "Region (%u) must be available and free after expand", curr); +@@ -559,7 +582,7 @@ bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* com + // Ensure that each G1 region in the range is free, returning false if not. + // Commit those that are not yet available, and keep count. + for (uint curr_index = start_index; curr_index <= last_index; curr_index++) { +- if (!is_available(curr_index)) { ++ if (!is_available(curr_index) && available() >= 1) { + commits++; + expand_exact(curr_index, 1, pretouch_workers); + } +diff --git a/src/hotspot/share/gc/g1/heapRegionManager.hpp b/src/hotspot/share/gc/g1/heapRegionManager.hpp +index ad985e1f8..5f0e5c251 100644 +--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp ++++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp +@@ -84,6 +84,9 @@ class HeapRegionManager: public CHeapObj { + // Internal only. The highest heap region +1 we allocated a HeapRegion instance for. + uint _allocated_heapregions_length; + ++ // The max number of regions controlled by Dynamic Max Heap ++ uint _dynamic_max_heap_length; ++ + HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); } + HeapWord* heap_end() const {return _regions.end_address_mapped(); } + +@@ -230,7 +233,12 @@ public: + } + + // Return the number of regions available (uncommitted) regions. +- uint available() const { return max_length() - length(); } ++ uint available() const { ++ if(Universe::is_dynamic_max_heap_enable()) { ++ return dynamic_max_heap_length() - length(); ++ } ++ return max_length() - length(); ++ } + + // Return the number of regions currently active and available for use. + uint length() const { return _committed_map.num_active(); } +@@ -241,6 +249,14 @@ public: + // Return maximum number of regions that heap can expand to. + uint max_length() const { return reserved_length(); } + ++ // Return the current maximum number of regions in the heap (dynamic max heap). ++ uint dynamic_max_heap_length() const { return (uint)_dynamic_max_heap_length; } ++ ++ void set_dynamic_max_heap_length(uint len) { ++ guarantee(len <= max_length(), "must be"); ++ _dynamic_max_heap_length = len; ++ } ++ + MemoryUsage get_auxiliary_data_memory_usage() const; + + MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); } +diff --git a/src/hotspot/share/gc/parallel/parallelArguments.cpp b/src/hotspot/share/gc/parallel/parallelArguments.cpp +index 468dc7bdf..7c42afa45 100644 +--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp ++++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp +@@ -137,6 +137,9 @@ void ParallelArguments::initialize_heap_flags_and_sizes() { + } + + size_t ParallelArguments::heap_reserved_size_bytes() { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ return DynamicMaxHeapSizeLimit; ++ } + return MaxHeapSize; + } + +diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +index d34474225..c80751e2c 100644 +--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp ++++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +@@ -62,6 +62,10 @@ PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr; + PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr; + + jint ParallelScavengeHeap::initialize() { ++ if (Universe::is_dynamic_max_heap_enable() && !FLAG_IS_CMDLINE(DynamicMaxHeapSizeLimit) && !FLAG_IS_CMDLINE(ElasticMaxHeapSize)) { ++ guarantee(ElasticMaxHeap, "must be"); ++ FLAG_SET_ERGO(DynamicMaxHeapSizeLimit, MaxHeapSize); ++ } + const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes(); + + ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment); +@@ -70,9 +74,13 @@ jint ParallelScavengeHeap::initialize() { + + initialize_reserved_region(heap_rs); + // Layout the reserved space for the generations. +- ReservedSpace old_rs = heap_rs.first_part(MaxOldSize); +- ReservedSpace young_rs = heap_rs.last_part(MaxOldSize); +- assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap"); ++ size_t max_old_size = MaxOldSize; ++ if (Universe::is_dynamic_max_heap_enable()) { ++ max_old_size = GenArguments::max_old_size(reserved_heap_size); ++ } ++ ReservedSpace old_rs = heap_rs.first_part(max_old_size); ++ ReservedSpace young_rs = heap_rs.last_part(max_old_size); ++ assert(young_rs.size() == MaxNewSize || Universe::is_dynamic_max_heap_enable(), "Didn't reserve all of the heap"); + + PSCardTable* card_table = new PSCardTable(heap_rs.region()); + card_table->initialize(old_rs.base(), young_rs.base()); +@@ -97,8 +105,8 @@ jint ParallelScavengeHeap::initialize() { + MaxOldSize, + "old", 1); + +- assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check"); +- assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check"); ++ assert(young_gen()->max_gen_size() == young_rs.size() || Universe::is_dynamic_max_heap_enable(),"Consistency check"); ++ assert(old_gen()->max_gen_size() == old_rs.size() || Universe::is_dynamic_max_heap_enable(), "Consistency check"); + + double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; + double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; +@@ -211,6 +219,12 @@ bool ParallelScavengeHeap::is_maximal_no_gc() const { + + size_t ParallelScavengeHeap::max_capacity() const { + size_t estimated = reserved_region().byte_size(); ++ // Dynamic Max Heap ++ if (Universe::is_dynamic_max_heap_enable()) { ++ // young_gen()->max_size() is also controlled by DynamicMaxHeap ++ guarantee(current_max_heap_size() <= estimated, "must be"); ++ estimated = current_max_heap_size(); ++ } + if (UseAdaptiveSizePolicy) { + estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size()); + } else { +@@ -888,3 +902,10 @@ void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) { + void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) { + GCLocker::unlock_critical(thread); + } ++ ++bool ParallelScavengeHeap::change_max_heap(size_t new_size) { ++ assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); ++ PS_ChangeMaxHeapOp op(new_size); ++ VMThread::execute(&op); ++ return op.resize_success(); ++} +diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +index e71dc9515..12656d55b 100644 +--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp ++++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +@@ -276,6 +276,9 @@ class ParallelScavengeHeap : public CollectedHeap { + + void pin_object(JavaThread* thread, oop obj) override; + void unpin_object(JavaThread* thread, oop obj) override; ++ ++ // Dynamic Max Heap ++ virtual bool change_max_heap(size_t new_size); + }; + + // Class that can be used to print information about the +diff --git a/src/hotspot/share/gc/parallel/psGenerationCounters.cpp b/src/hotspot/share/gc/parallel/psGenerationCounters.cpp +index 1b0e8d320..e7650a9af 100644 +--- a/src/hotspot/share/gc/parallel/psGenerationCounters.cpp ++++ b/src/hotspot/share/gc/parallel/psGenerationCounters.cpp +@@ -57,8 +57,15 @@ PSGenerationCounters::PSGenerationCounters(const char* name, + min_capacity, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "maxCapacity"); +- PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, ++ // Dynamic Max Heap ++ if (Universe::is_dynamic_max_heap_enable()) { ++ _max_size = PerfDataManager::create_variable(SUN_GC, cname, ++ PerfData::U_Bytes, max_capacity, CHECK); ++ } else { ++ _max_size = NULL; ++ PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, + max_capacity, CHECK); ++ } + + cname = PerfDataManager::counter_name(_name_space, "capacity"); + _current_size = PerfDataManager::create_variable(SUN_GC, cname, +diff --git a/src/hotspot/share/gc/parallel/psMemoryPool.hpp b/src/hotspot/share/gc/parallel/psMemoryPool.hpp +index 58f39cdc7..3dfbb84d5 100644 +--- a/src/hotspot/share/gc/parallel/psMemoryPool.hpp ++++ b/src/hotspot/share/gc/parallel/psMemoryPool.hpp +@@ -40,7 +40,7 @@ public: + + MemoryUsage get_memory_usage(); + size_t used_in_bytes() { return _old_gen->used_in_bytes(); } +- size_t max_size() const { return _old_gen->reserved().byte_size(); } ++ size_t max_size() const { return _old_gen->max_gen_size(); } + }; + + class EdenMutableSpacePool : public CollectedMemoryPool { +diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp +index 8124f018b..18c4285b9 100644 +--- a/src/hotspot/share/gc/parallel/psOldGen.cpp ++++ b/src/hotspot/share/gc/parallel/psOldGen.cpp +@@ -40,7 +40,8 @@ + PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size, + size_t max_size, const char* perf_data_name, int level): + _min_gen_size(min_size), +- _max_gen_size(max_size) ++ _max_gen_size(Universe::is_dynamic_max_heap_enable() ? rs.size() : max_size), ++ _cur_max_gen_size(Universe::is_dynamic_max_heap_enable() ? max_size : -1) + { + initialize(rs, initial_size, GenAlignment, perf_data_name, level); + } +@@ -58,6 +59,9 @@ void PSOldGen::initialize_virtual_space(ReservedSpace rs, + size_t alignment) { + + _virtual_space = new PSVirtualSpace(rs, alignment); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ _virtual_space->set_dynamic_max_heap_size(_cur_max_gen_size); ++ } + if (!_virtual_space->expand_by(initial_size)) { + vm_exit_during_initialization("Could not reserve enough space for " + "object heap"); +@@ -66,7 +70,7 @@ void PSOldGen::initialize_virtual_space(ReservedSpace rs, + + void PSOldGen::initialize_work(const char* perf_data_name, int level) { + MemRegion const reserved_mr = reserved(); +- assert(reserved_mr.byte_size() == max_gen_size(), "invariant"); ++ assert(reserved_mr.byte_size() == max_gen_size() || Universe::is_dynamic_max_heap_enable(), "invariant"); + + // Object start stuff: for all reserved memory + start_array()->initialize(reserved_mr); +diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp +index c26f251f9..02952c48e 100644 +--- a/src/hotspot/share/gc/parallel/psOldGen.hpp ++++ b/src/hotspot/share/gc/parallel/psOldGen.hpp +@@ -48,6 +48,9 @@ class PSOldGen : public CHeapObj { + const size_t _min_gen_size; + const size_t _max_gen_size; + ++ // For Dynamic Max Heap ++ size_t _cur_max_gen_size; ++ + // Block size for parallel iteration + static const size_t IterateBlockSize = 1024 * 1024; + +@@ -108,9 +111,27 @@ class PSOldGen : public CHeapObj { + (HeapWord*)(_virtual_space->high())); + } + +- size_t max_gen_size() const { return _max_gen_size; } ++ size_t max_gen_size() const { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ guarantee(_cur_max_gen_size <= _max_gen_size && _cur_max_gen_size >= _min_gen_size, "must be"); ++ return _cur_max_gen_size; ++ } ++ return _max_gen_size; ++ } + size_t min_gen_size() const { return _min_gen_size; } + ++ // Dynamic Max Heap ++ void set_cur_max_gen_size(size_t new_size) { ++ guarantee(Universe::is_dynamic_max_heap_enable(), "must be"); ++ guarantee(new_size <= _max_gen_size && new_size >= _min_gen_size, "must be"); ++ guarantee(_max_gen_size == _virtual_space->reserved_size(), "must be"); ++ _cur_max_gen_size = new_size; ++ _virtual_space->set_dynamic_max_heap_size(new_size); ++ if (UsePerfData) { ++ _gen_counters->update_max_size(new_size); ++ } ++ } ++ + bool is_in(const void* p) const { + return _virtual_space->is_in_committed((void *)p); + } +diff --git a/src/hotspot/share/gc/parallel/psVMOperations.cpp b/src/hotspot/share/gc/parallel/psVMOperations.cpp +index 47eeffb34..846acddbc 100644 +--- a/src/hotspot/share/gc/parallel/psVMOperations.cpp ++++ b/src/hotspot/share/gc/parallel/psVMOperations.cpp +@@ -27,6 +27,7 @@ + #include "gc/parallel/psScavenge.hpp" + #include "gc/parallel/psVMOperations.hpp" + #include "gc/shared/gcLocker.hpp" ++#include "gc/shared/genArguments.hpp" + #include "utilities/dtrace.hpp" + + // The following methods are used by the parallel scavenge collector +@@ -76,3 +77,256 @@ void VM_ParallelGCSystemGC::doit() { + _full_gc_succeeded = PSParallelCompact::invoke(false); + } + } ++ ++PS_ChangeMaxHeapOp::PS_ChangeMaxHeapOp(size_t new_max_heap) : ++ VM_ChangeMaxHeapOp(new_max_heap) { ++} ++ ++/* ++ * 1. calculate new young/old gen limit size. ++ * 2. trigger Full GC if necessary ++ * 3. check and reset new limitation ++ */ ++void PS_ChangeMaxHeapOp::doit() { ++ ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ++ assert(heap->kind() == CollectedHeap::Parallel, "must be a ParallelScavengeHeap"); ++ ++ // step 1 ++ PSOldGen* old_gen = heap->old_gen(); ++ PSYoungGen* young_gen = heap->young_gen(); ++ size_t cur_heap_limit = heap->current_max_heap_size(); ++ size_t cur_old_limit = old_gen->max_gen_size(); ++ size_t cur_young_limit = young_gen->max_gen_size(); ++ bool is_shrink = _new_max_heap < cur_heap_limit; ++ bool is_valid = false; ++ ++ const size_t young_reserved_size = young_gen->reserved().byte_size(); ++ const size_t young_min_size = young_gen->min_gen_size(); ++ const size_t old_reserved_size = old_gen->reserved().byte_size(); ++ const size_t old_min_size = old_gen->min_gen_size(); ++ ++ guarantee(cur_old_limit + cur_young_limit == cur_heap_limit, "must be"); ++ ++ // fix with young gen size limitation ++ size_t new_young_limit = GenArguments::scale_by_NewRatio_aligned(_new_max_heap, GenAlignment); ++ new_young_limit = MIN2(new_young_limit, young_reserved_size); ++ new_young_limit = MAX2(new_young_limit, young_min_size); ++ // align shrink/expand direction ++ if ((is_shrink && (new_young_limit > cur_young_limit)) || ++ (!is_shrink && (new_young_limit < cur_young_limit))) { ++ new_young_limit = cur_young_limit; ++ } ++ size_t new_old_limit = _new_max_heap - new_young_limit; ++ ++ if (new_old_limit > old_reserved_size) { ++ new_old_limit = old_reserved_size; ++ new_young_limit = _new_max_heap - new_old_limit; ++ } ++ ++ // keep the new_old_limit aligned with shrink/expand direction ++ if ((is_shrink && (new_old_limit > cur_old_limit)) || ++ (!is_shrink && (new_old_limit < cur_old_limit))) { ++ new_old_limit = cur_old_limit; ++ new_young_limit = _new_max_heap - new_old_limit; ++ } ++ ++ // After the final calcuation, check the leagle limit ++ if ((new_old_limit < old_min_size) || ++ (new_old_limit > old_reserved_size) || ++ (new_young_limit < young_min_size) || ++ (new_young_limit > young_reserved_size)) { ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp abort: can not calculate new legal limit:" ++ " new_old_limit: " SIZE_FORMAT "K, " "old gen min size: " SIZE_FORMAT "K, old gen reserved size: " SIZE_FORMAT "K" ++ " new_young_limit: " SIZE_FORMAT "K, " "young gen min size: " SIZE_FORMAT "K, young gen reserved size: " SIZE_FORMAT "K" , ++ (new_old_limit / K), (old_min_size / K), (old_reserved_size / K), ++ (new_young_limit / K), (young_min_size / K), (young_reserved_size / K)); ++ return; ++ } ++ ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp plan: " ++ "desired young gen size (" SIZE_FORMAT "K" "->" SIZE_FORMAT "K), " ++ "desired old gen size (" SIZE_FORMAT "K" "->" SIZE_FORMAT "K)", ++ (cur_young_limit / K), ++ (new_young_limit / K), ++ (cur_old_limit / K), ++ (new_old_limit / K)); ++ if (is_shrink) { ++ guarantee(new_old_limit <= cur_old_limit && new_young_limit <= cur_young_limit, "must be"); ++ } else { ++ guarantee(new_old_limit >= cur_old_limit && new_young_limit >= cur_young_limit, "must be"); ++ } ++ ++ // step2 ++ // Check resize legality ++ if (is_shrink) { ++ // check whether old/young can be resized, trigger full gc as needed ++ bool can_shrink = os::Linux::dmh_ps_old_gen_can_shrink(new_old_limit, ++ heap->old_gen()->used_in_bytes(), ++ MinHeapFreeRatio != 0 ? MinHeapFreeRatio : DynamicMaxHeapShrinkMinFreeRatio, ++ heap->old_gen()->virtual_space()->alignment(), ++ is_valid); ++ if (!is_valid) { ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp fail for missing ACC"); ++ return; ++ } ++ if (can_shrink) { ++ can_shrink = os::Linux::dmh_ps_young_gen_can_shrink(new_young_limit, ++ heap->young_gen()->virtual_space()->committed_size(), ++ is_valid); ++ if (!is_valid) { ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp fail for missing ACC"); ++ return; ++ } ++ } ++ ++ if (!can_shrink) { ++ GCCauseSetter gccs(heap, _gc_cause); ++ heap->do_full_collection(true); ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp heap after Full GC"); ++ LogTarget(Debug, dynamic, heap) lt; ++ if (lt.is_enabled()) { ++ LogStream ls(lt); ++ heap->print_on(&ls); ++ } ++ if (young_gen->used_in_bytes() != 0) { ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp abort: young is not empty after full gc"); ++ return; ++ } ++ } ++ ++ can_shrink = os::Linux::dmh_ps_old_gen_can_shrink(new_old_limit, ++ heap->old_gen()->used_in_bytes(), ++ MinHeapFreeRatio != 0 ? MinHeapFreeRatio : DynamicMaxHeapShrinkMinFreeRatio, ++ heap->old_gen()->virtual_space()->alignment(), ++ is_valid); ++ if (!is_valid) { ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp fail for missing ACC"); ++ return; ++ } ++ ++ if (!can_shrink) { ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp abort: not enough old free for shrink"); ++ return; ++ } ++ ++ // step3 ++ // shrink generation committed size if needed ++ // 1. old gen ++ // 1 old gen can shrink capacity without full gc ++ // 2 old gen have passed shrink valid check since the code is executed here ++ // 3 old gen can shrink capacity if needed ++ // 2. young gen ++ // 1 young gen must shrink capacity after full gc ++ // 2 there may be three situations after shrink valid check in step2 ++ // 1) both old gen and young gen have passed the check, ++ // indicating new_young_limit is big enough, ++ // there is no need to shrink capacity ++ // 2) old gen failed the check and triggered full gc ++ // 3) young gen failed the check and triggered full gc ++ ++ if (old_gen->capacity_in_bytes() > new_old_limit) { ++ size_t desired_free = new_old_limit - old_gen->used_in_bytes(); ++ char* old_high = old_gen->virtual_space()->committed_high_addr(); ++ old_gen->resize(desired_free); ++ char* new_old_high = old_gen->virtual_space()->committed_high_addr(); ++ if (old_gen->capacity_in_bytes() > new_old_limit) { ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp abort: resize old fail " SIZE_FORMAT "K", ++ old_gen->capacity_in_bytes() / K); ++ return; ++ } ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp continue: shrink old success " SIZE_FORMAT "K", ++ old_gen->capacity_in_bytes() / K); ++ if (old_high > new_old_high) { ++ // shrink is caused by dynamic max heap, free physical memory ++ size_t shrink_bytes = old_high - new_old_high; ++ guarantee((shrink_bytes > 0) && (shrink_bytes % os::vm_page_size() == 0), "should be"); ++ bool result = os::free_heap_physical_memory(new_old_high, shrink_bytes); ++ guarantee(result, "free heap physical memory should be successful"); ++ } ++ } ++ ++ if (young_gen->virtual_space()->committed_size() > new_young_limit) { ++ // entering this branch means full gc must have been triggered ++ guarantee(young_gen->eden_space()->is_empty() && ++ young_gen->to_space()->is_empty() && ++ young_gen->from_space()->is_empty(), ++ "must be empty"); ++ ++ char* young_high = young_gen->virtual_space()->committed_high_addr(); ++ if (young_gen->shrink_after_full_gc(new_young_limit) == false) { ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp abort: shrink young fail"); ++ return; ++ } ++ char* new_young_high = young_gen->virtual_space()->committed_high_addr(); ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp continue: shrink young success " SIZE_FORMAT "K", ++ young_gen->virtual_space()->committed_size() / K); ++ if (young_high > new_young_high) { ++ // shrink is caused by dynamic max heap, free physical memory ++ size_t shrink_bytes = young_high - new_young_high; ++ guarantee((shrink_bytes > 0) && (shrink_bytes % os::vm_page_size() == 0), "should be"); ++ bool result = os::free_heap_physical_memory(new_young_high, shrink_bytes); ++ guarantee(result, "free heap physical memory should be successful"); ++ } ++ } ++ } ++ // update young/old gen limit, avoid further expand ++ old_gen->set_cur_max_gen_size(new_old_limit); ++ young_gen->set_cur_max_gen_size(new_young_limit); ++ heap->set_current_max_heap_size(_new_max_heap); ++ _resize_success = true; ++ log_debug(dynamic, heap)("PS_ElasticMaxHeapOp success"); ++} ++ ++// Resize for DynamicHeapSize, shrink to new_size ++bool PSYoungGen::shrink_after_full_gc(size_t new_size) { ++ const size_t alignment = virtual_space()->alignment(); ++ ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ++ size_t orig_size = virtual_space()->committed_size(); ++ guarantee(eden_space()->is_empty() && to_space()->is_empty() && from_space()->is_empty(), "must be empty"); ++ guarantee(new_size % alignment == 0, "must be"); ++ guarantee(new_size < orig_size, "must be"); ++ ++ // shrink virtual space ++ size_t shrink_bytes = virtual_space()->committed_size() - new_size; ++ bool success = virtual_space()->shrink_by(shrink_bytes); ++ log_debug(dynamic, heap)("PSYoungGen::shrink_after_full_gc: shrink virtual space %s " ++ "orig committed " SIZE_FORMAT "K " ++ "current committed " SIZE_FORMAT "K " ++ "shrink by " SIZE_FORMAT "K", ++ success ? "success" : "fail", ++ orig_size / K, ++ virtual_space()->committed_size() / K, ++ shrink_bytes / K); ++ ++ if (!success) { ++ return false; ++ } ++ ++ // caculate new eden/survivor size ++ // shrink with same ratio, let size policy adjust later ++ size_t current_survivor_ratio = eden_space()->capacity_in_bytes() / from_space()->capacity_in_bytes(); ++ current_survivor_ratio = MAX2(current_survivor_ratio, (size_t)1); ++ size_t new_survivor_size = new_size / (current_survivor_ratio + 2); ++ new_survivor_size = align_down(new_survivor_size, SpaceAlignment); ++ new_survivor_size = MAX2(new_survivor_size, SpaceAlignment); ++ size_t new_eden_size = new_size - 2 * new_survivor_size; ++ ++ guarantee(new_eden_size % SpaceAlignment == 0, "must be"); ++ log_debug(dynamic, heap)("PSYoungGen::shrink_after_full_gc: " ++ "new eden size " SIZE_FORMAT "K " ++ "new survivor size " SIZE_FORMAT "K " ++ "new young gen size " SIZE_FORMAT "K", ++ new_eden_size / K, ++ new_survivor_size / K, ++ new_size / K); ++ ++ // setup new eden/survivor space ++ set_space_boundaries(new_eden_size, new_survivor_size); ++ post_resize(); ++ LogTarget(Debug, dynamic, heap) lt; ++ if (lt.is_enabled()) { ++ LogStream ls(lt); ++ print_on(&ls); ++ } ++ return true; ++} +\ No newline at end of file +diff --git a/src/hotspot/share/gc/parallel/psVMOperations.hpp b/src/hotspot/share/gc/parallel/psVMOperations.hpp +index cc49eb631..30485f44d 100644 +--- a/src/hotspot/share/gc/parallel/psVMOperations.hpp ++++ b/src/hotspot/share/gc/parallel/psVMOperations.hpp +@@ -28,6 +28,7 @@ + #include "gc/parallel/parallelScavengeHeap.hpp" + #include "gc/shared/gcCause.hpp" + #include "gc/shared/gcVMOperations.hpp" ++#include "gc/shared/dynamicMaxHeap.hpp" + + class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation { + public: +@@ -48,4 +49,11 @@ class VM_ParallelGCSystemGC: public VM_GC_Operation { + bool full_gc_succeeded() const { return _full_gc_succeeded; } + }; + ++// For ParallelScavengeHeap ++class PS_ChangeMaxHeapOp : public VM_ChangeMaxHeapOp { ++public: ++ PS_ChangeMaxHeapOp(size_t new_max_heap); ++ virtual void doit(); ++}; ++ + #endif // SHARE_GC_PARALLEL_PSVMOPERATIONS_HPP +diff --git a/src/hotspot/share/gc/parallel/psVirtualspace.cpp b/src/hotspot/share/gc/parallel/psVirtualspace.cpp +index f0ccd9a75..0181ab954 100644 +--- a/src/hotspot/share/gc/parallel/psVirtualspace.cpp ++++ b/src/hotspot/share/gc/parallel/psVirtualspace.cpp +@@ -31,7 +31,8 @@ + // PSVirtualSpace + + PSVirtualSpace::PSVirtualSpace(ReservedSpace rs, size_t alignment) : +- _alignment(alignment) ++ _alignment(alignment), ++ _dynamic_max_heap_size(0) + { + set_reserved(rs); + set_committed(reserved_low_addr(), reserved_low_addr()); +@@ -45,11 +46,13 @@ PSVirtualSpace::PSVirtualSpace(): + _reserved_high_addr(nullptr), + _committed_low_addr(nullptr), + _committed_high_addr(nullptr), +- _special(false) { ++ _special(false), ++ _dynamic_max_heap_size(0) { + } + + // Deprecated. + void PSVirtualSpace::initialize(ReservedSpace rs) { ++ _dynamic_max_heap_size = 0; + set_reserved(rs); + set_committed(reserved_low_addr(), reserved_low_addr()); + DEBUG_ONLY(verify()); +diff --git a/src/hotspot/share/gc/parallel/psVirtualspace.hpp b/src/hotspot/share/gc/parallel/psVirtualspace.hpp +index e2cb08160..7407ebc7f 100644 +--- a/src/hotspot/share/gc/parallel/psVirtualspace.hpp ++++ b/src/hotspot/share/gc/parallel/psVirtualspace.hpp +@@ -52,6 +52,9 @@ class PSVirtualSpace : public CHeapObj { + // os::commit_memory() or os::uncommit_memory(). + bool _special; + ++ // Dynamic Max Heap ++ size_t _dynamic_max_heap_size; ++ + public: + PSVirtualSpace(ReservedSpace rs, size_t alignment); + +@@ -88,6 +91,17 @@ class PSVirtualSpace : public CHeapObj { + virtual bool expand_by(size_t bytes); + virtual bool shrink_by(size_t bytes); + void release(); ++ // Dynamic Max Heap ++ void set_dynamic_max_heap_size(size_t new_size) { ++ guarantee(new_size <= reserved_size(), "must be"); ++ guarantee(new_size >= committed_size(), "must be"); ++ _dynamic_max_heap_size = new_size; ++ } ++ size_t dynamic_max_heap_size() const { ++ guarantee(_dynamic_max_heap_size <= reserved_size(), "must be"); ++ guarantee(_dynamic_max_heap_size >= committed_size(), "must be"); ++ return _dynamic_max_heap_size; ++ } + + #ifndef PRODUCT + // Debugging +@@ -131,6 +145,9 @@ inline size_t PSVirtualSpace::reserved_size() const { + } + + inline size_t PSVirtualSpace::uncommitted_size() const { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ return dynamic_max_heap_size() - committed_size(); ++ } + return reserved_size() - committed_size(); + } + +@@ -138,6 +155,10 @@ inline void PSVirtualSpace::set_reserved(char* low_addr, char* high_addr, bool s + _reserved_low_addr = low_addr; + _reserved_high_addr = high_addr; + _special = special; ++ if (Universe::is_dynamic_max_heap_enable()) { ++ guarantee(_dynamic_max_heap_size == 0, "resize virtual NYI"); ++ _dynamic_max_heap_size = high_addr - low_addr; ++ } + } + + inline void PSVirtualSpace::set_reserved(ReservedSpace rs) { +diff --git a/src/hotspot/share/gc/parallel/psYoungGen.cpp b/src/hotspot/share/gc/parallel/psYoungGen.cpp +index 12a6da8c2..8b4a32d99 100644 +--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp ++++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp +@@ -42,7 +42,8 @@ PSYoungGen::PSYoungGen(ReservedSpace rs, size_t initial_size, size_t min_size, s + _from_space(nullptr), + _to_space(nullptr), + _min_gen_size(min_size), +- _max_gen_size(max_size), ++ _max_gen_size(Universe::is_dynamic_max_heap_enable() ? rs.size() : max_size), ++ _cur_max_gen_size(Universe::is_dynamic_max_heap_enable() ? max_size : -1), + _gen_counters(nullptr), + _eden_counters(nullptr), + _from_counters(nullptr), +@@ -56,6 +57,9 @@ void PSYoungGen::initialize_virtual_space(ReservedSpace rs, + size_t alignment) { + assert(initial_size != 0, "Should have a finite size"); + _virtual_space = new PSVirtualSpace(rs, alignment); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ _virtual_space->set_dynamic_max_heap_size(_cur_max_gen_size); ++ } + if (!virtual_space()->expand_by(initial_size)) { + vm_exit_during_initialization("Could not reserve enough space for object heap"); + } +@@ -70,7 +74,7 @@ void PSYoungGen::initialize_work() { + + _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), + (HeapWord*)virtual_space()->high_boundary()); +- assert(_reserved.byte_size() == max_gen_size(), "invariant"); ++ assert(_reserved.byte_size() == max_gen_size() || Universe::is_dynamic_max_heap_enable(), "invariant"); + + MemRegion cmr((HeapWord*)virtual_space()->low(), + (HeapWord*)virtual_space()->high()); +diff --git a/src/hotspot/share/gc/parallel/psYoungGen.hpp b/src/hotspot/share/gc/parallel/psYoungGen.hpp +index 58b649428..f927d6f77 100644 +--- a/src/hotspot/share/gc/parallel/psYoungGen.hpp ++++ b/src/hotspot/share/gc/parallel/psYoungGen.hpp +@@ -48,6 +48,9 @@ class PSYoungGen : public CHeapObj { + const size_t _min_gen_size; + const size_t _max_gen_size; + ++ // For Dynamic Max Heap ++ size_t _cur_max_gen_size; ++ + // Performance counters + PSGenerationCounters* _gen_counters; + SpaceCounters* _eden_counters; +@@ -111,6 +114,9 @@ class PSYoungGen : public CHeapObj { + // not allow us to use these values. + void resize(size_t eden_size, size_t survivor_size); + ++ // Resize for DynamicHeapSize, shrink to new_size ++ bool shrink_after_full_gc(size_t new_size); ++ + // Size info + size_t capacity_in_bytes() const; + size_t used_in_bytes() const; +@@ -121,7 +127,24 @@ class PSYoungGen : public CHeapObj { + size_t free_in_words() const; + + size_t min_gen_size() const { return _min_gen_size; } +- size_t max_gen_size() const { return _max_gen_size; } ++ size_t max_gen_size() const { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ guarantee(_cur_max_gen_size <= _max_gen_size && _cur_max_gen_size >= _min_gen_size, "must be"); ++ return _cur_max_gen_size; ++ } ++ return _max_gen_size; ++ } ++ ++ void set_cur_max_gen_size(size_t new_size) { ++ guarantee(Universe::is_dynamic_max_heap_enable(), "must be"); ++ guarantee(new_size <= _max_gen_size && new_size >= _min_gen_size, "must be"); ++ guarantee(_max_gen_size == _reserved.byte_size(), "must be"); ++ _cur_max_gen_size = new_size; ++ _virtual_space->set_dynamic_max_heap_size(new_size); ++ if (UsePerfData) { ++ _gen_counters->update_max_size(new_size); ++ } ++ } + + bool is_maximal_no_gc() const { + return true; // Never expands except at a GC +diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp +index 89b39195e..ac3a7b8d6 100644 +--- a/src/hotspot/share/gc/shared/collectedHeap.cpp ++++ b/src/hotspot/share/gc/shared/collectedHeap.cpp +@@ -243,6 +243,7 @@ CollectedHeap::CollectedHeap() : + _used_at_last_gc(0), + _is_stw_gc_active(false), + _last_whole_heap_examined_time_ns(os::javaTimeNanos()), ++ _current_max_heap_size(MaxHeapSize), + _total_collections(0), + _total_full_collections(0), + _gc_cause(GCCause::_no_gc), +diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp +index e2613c438..bd675760c 100644 +--- a/src/hotspot/share/gc/shared/collectedHeap.hpp ++++ b/src/hotspot/share/gc/shared/collectedHeap.hpp +@@ -128,6 +128,8 @@ class CollectedHeap : public CHeapObj { + // time-warp warnings. + jlong _last_whole_heap_examined_time_ns; + ++ size_t _current_max_heap_size; ++ + unsigned int _total_collections; // ... started + unsigned int _total_full_collections; // ... started + NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) +@@ -529,6 +531,15 @@ class CollectedHeap : public CHeapObj { + void reset_promotion_should_fail(volatile size_t* count); + void reset_promotion_should_fail(); + #endif // #ifndef PRODUCT ++ ++public: ++ // Dynamic Max Heap ++ virtual bool change_max_heap(size_t new_size){ return false; } ++ bool check_new_max_heap_validity(size_t new_size, outputStream* st); ++ size_t current_max_heap_size() const { return _current_max_heap_size; } ++ void set_current_max_heap_size(size_t new_size) { ++ _current_max_heap_size = new_size; ++ } + }; + + // Class to set and reset the GC cause for a CollectedHeap. +diff --git a/src/hotspot/share/gc/shared/dynamicMaxHeap.cpp b/src/hotspot/share/gc/shared/dynamicMaxHeap.cpp +new file mode 100644 +index 000000000..ab9cce2a6 +--- /dev/null ++++ b/src/hotspot/share/gc/shared/dynamicMaxHeap.cpp +@@ -0,0 +1,163 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++#include "precompiled.hpp" ++#include "dynamicMaxHeap.hpp" ++#include "runtime/globals_extension.hpp" ++#include "os_linux.hpp" ++#include "logging/logConfiguration.hpp" ++ ++size_t DynamicMaxHeapConfig::_initial_max_heap_size = 0; ++ ++VM_ChangeMaxHeapOp::VM_ChangeMaxHeapOp(size_t new_max_heap) : ++ VM_GC_Operation(0, GCCause::_change_max_heap, 0, true) { ++ _new_max_heap = new_max_heap; ++ _resize_success = false; ++} ++ ++bool VM_ChangeMaxHeapOp::skip_operation() const { ++ return false; ++} ++ ++/* ++ * validity check ++ * new current max heap must be: ++ * 1. >= min_heap_byte_size ++ * 2. <= max_heap_byte_size ++ * 3. not equal with current_max_heap_size ++ * ++*/ ++bool CollectedHeap::check_new_max_heap_validity(size_t new_size, outputStream* st) { ++ if (new_size > DynamicMaxHeapSizeLimit) { ++ st->print_cr("%s " SIZE_FORMAT "K exceeds maximum limit " SIZE_FORMAT "K", ++ Universe::dynamic_max_heap_dcmd_name(), ++ (new_size / K), ++ (DynamicMaxHeapSizeLimit / K)); ++ return false; ++ } ++ if (new_size < MinHeapSize) { ++ st->print_cr("%s " SIZE_FORMAT "K below minimum limit " SIZE_FORMAT "K", ++ Universe::dynamic_max_heap_dcmd_name(), ++ (new_size / K), ++ (MinHeapSize / K)); ++ return false; ++ } ++ // don't print log if it is init shrink triggered by DynamicMaxHeapSizeLimit ++ if (new_size == current_max_heap_size()) { ++ st->print_cr("%s " SIZE_FORMAT "K same with current max heap size " SIZE_FORMAT "K", ++ Universe::dynamic_max_heap_dcmd_name(), ++ (new_size / K), ++ (current_max_heap_size() / K)); ++ return false; ++ } ++ return true; ++} ++ ++/* ++ common check for Dynamic Max Heap ++ 1. DynamicMaxHeapSizeLimit/ElasticMaxHeapSize should be used together with Xmx ++ 2. only linux aarch hisi ++ 3. can only used with ACC installed ++ 4. can not fix new/old size ++ 5. must support UseAdaptiveSizePolicy, otherwise all size fixed ++ 6. only G1GC/PSGC implemented now ++ 7. should larger than Xmx ++*/ ++bool DynamicMaxHeapChecker::common_check() { ++ if (!FLAG_IS_CMDLINE(DynamicMaxHeapSizeLimit) && !FLAG_IS_CMDLINE(ElasticMaxHeapSize) && !ElasticMaxHeap) { ++ return false; ++ } ++ if ((FLAG_IS_CMDLINE(DynamicMaxHeapSizeLimit) || FLAG_IS_CMDLINE(ElasticMaxHeapSize)) && !FLAG_IS_CMDLINE(MaxHeapSize)) { ++ warning_and_disable("should be used together with -Xmx/-XX:MaxHeapSize"); ++ return false; ++ } ++#if !defined(LINUX) || !defined(AARCH64) ++ warning_and_disable("can only be assigned on Linux aarch64"); ++ return false; ++#endif ++#ifdef AARCH64 ++ VM_Version::get_cpu_model(); ++ if (!VM_Version::is_hisi_enabled()) { ++ warning_and_disable("can only be assigned on HiSi now"); ++ return false; ++ } ++#endif ++ bool is_valid = false; ++ size_t dummy_param = 0; ++ os::Linux::dmh_g1_get_region_limit(dummy_param, dummy_param, is_valid, true); ++ if (!is_valid) { ++ warning_and_disable("can only used with ACC installed"); ++ return false; ++ } ++ os::Linux::dmh_g1_can_shrink(dummy_param, dummy_param, dummy_param, dummy_param, is_valid, true); ++ if (!is_valid) { ++ warning_and_disable("can only used with ACC installed"); ++ return false; ++ } ++ os::Linux::dmh_ps_old_gen_can_shrink(dummy_param, dummy_param, dummy_param, dummy_param, is_valid, true); ++ if (!is_valid) { ++ warning_and_disable("can only used with ACC installed"); ++ return false; ++ } ++ os::Linux::dmh_ps_young_gen_can_shrink(dummy_param, dummy_param, is_valid, true); ++ if (!is_valid) { ++ warning_and_disable("can only used with ACC installed"); ++ return false; ++ } ++ if (FLAG_IS_CMDLINE(OldSize) || FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { ++ warning_and_disable("can not be used with -XX:OldSize/-XX:NewSize/-XX:MaxNewSize"); ++ return false; ++ } ++ if (!UseAdaptiveSizePolicy) { ++ warning_and_disable("should be used with -XX:+UseAdaptiveSizePolicy"); ++ return false; ++ } ++ if (!UseG1GC && !UseParallelGC) { ++ warning_and_disable("should be used with -XX:+UseG1GC/-XX:+UseParallelGC now"); ++ return false; ++ } ++ if ((FLAG_IS_CMDLINE(DynamicMaxHeapSizeLimit) || FLAG_IS_CMDLINE(ElasticMaxHeapSize)) && DynamicMaxHeapSizeLimit <= MaxHeapSize) { ++ warning_and_disable("should be larger than -Xmx/-XX:MaxHeapSize"); ++ return false; ++ } ++ return true; ++} ++ ++bool DynamicMaxHeapChecker::check_dynamic_max_heap_size_limit() { ++ if (TraceElasticMaxHeap) { ++ LogConfiguration::configure_stdout(LogLevel::Debug, false, LOG_TAGS(dynamic, heap)); ++ } ++ if (FLAG_IS_CMDLINE(ElasticMaxHeapSize)) { ++ FLAG_SET_ERGO(DynamicMaxHeapSizeLimit, ElasticMaxHeapSize); ++ } ++ if (FLAG_IS_CMDLINE(ElasticMaxHeapShrinkMinFreeRatio)) { ++ FLAG_SET_ERGO(DynamicMaxHeapShrinkMinFreeRatio, ElasticMaxHeapShrinkMinFreeRatio); ++ } ++ return common_check(); ++} ++ ++void DynamicMaxHeapChecker::warning_and_disable(const char *reason) { ++ warning("%s feature are not available for reason -XX:%s %s, automatically disabled", ++ Universe::dynamic_max_heap_option_name(), ++ Universe::dynamic_max_heap_size_limit_option_name(), ++ reason); ++ FLAG_SET_DEFAULT(DynamicMaxHeapSizeLimit, ScaleForWordSize(DynamicMaxHeapChecker::_default_dynamic_max_heap_size_limit * M)); ++ Universe::set_dynamic_max_heap_enable(false); ++} +\ No newline at end of file +diff --git a/src/hotspot/share/gc/shared/dynamicMaxHeap.hpp b/src/hotspot/share/gc/shared/dynamicMaxHeap.hpp +new file mode 100644 +index 000000000..7cae52af2 +--- /dev/null ++++ b/src/hotspot/share/gc/shared/dynamicMaxHeap.hpp +@@ -0,0 +1,61 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_DYNAMIC_MAX_HEAP_OPERATION_HPP ++#define SHARE_VM_GC_IMPLEMENTATION_SHARED_DYNAMIC_MAX_HEAP_OPERATION_HPP ++ ++#include "utilities/defaultStream.hpp" ++#include "gc/shared/gcVMOperations.hpp" ++ ++class VM_ChangeMaxHeapOp : public VM_GC_Operation { ++public: ++ VM_ChangeMaxHeapOp(size_t new_max_heap); ++ VMOp_Type type() const { ++ return VMOp_DynamicMaxHeap; ++ } ++ bool resize_success() const { ++ return _resize_success; ++ } ++protected: ++ size_t _new_max_heap; ++ bool _resize_success; ++private: ++ bool skip_operation() const; ++}; ++ ++class DynamicMaxHeapChecker : AllStatic { ++public: ++ static bool common_check(); ++ static bool check_dynamic_max_heap_size_limit(); ++ static void warning_and_disable(const char *reason); ++private: ++ static const int _default_dynamic_max_heap_size_limit = 96; ++}; ++ ++class DynamicMaxHeapConfig : AllStatic { ++public: ++ static size_t initial_max_heap_size() { return _initial_max_heap_size; } ++ static void set_initial_max_heap_size(size_t new_size) { ++ _initial_max_heap_size = new_size; ++ } ++private: ++ static size_t _initial_max_heap_size; ++}; ++#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_DYNAMIC_MAX_HEAP_OPERATION_HPP +\ No newline at end of file +diff --git a/src/hotspot/share/gc/shared/gcArguments.cpp b/src/hotspot/share/gc/shared/gcArguments.cpp +index 0040f28f1..524eb1ccc 100644 +--- a/src/hotspot/share/gc/shared/gcArguments.cpp ++++ b/src/hotspot/share/gc/shared/gcArguments.cpp +@@ -155,6 +155,12 @@ void GCArguments::initialize_heap_flags_and_sizes() { + if (!is_aligned(MaxHeapSize, HeapAlignment)) { + FLAG_SET_ERGO(MaxHeapSize, align_up(MaxHeapSize, HeapAlignment)); + } ++ if (Universe::is_dynamic_max_heap_enable() && !is_aligned(DynamicMaxHeapSizeLimit, HeapAlignment)) { ++ size_t _dynamic_max_heap_size_limit = DynamicMaxHeapSizeLimit; ++ FLAG_SET_ERGO(DynamicMaxHeapSizeLimit, align_up(DynamicMaxHeapSizeLimit, HeapAlignment)); ++ log_debug(dynamic, heap)("align the DynamicMaxHeapSizeLimit " SIZE_FORMAT " up to " SIZE_FORMAT " for heap alignment " SIZE_FORMAT , ++ _dynamic_max_heap_size_limit, DynamicMaxHeapSizeLimit, HeapAlignment); ++ } + + if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { + FLAG_SET_ERGO(MaxHeapSize, InitialHeapSize); +diff --git a/src/hotspot/share/gc/shared/gcCause.cpp b/src/hotspot/share/gc/shared/gcCause.cpp +index 426154fdc..ba4276e5b 100644 +--- a/src/hotspot/share/gc/shared/gcCause.cpp ++++ b/src/hotspot/share/gc/shared/gcCause.cpp +@@ -66,6 +66,9 @@ const char* GCCause::to_string(GCCause::Cause cause) { + case _allocation_failure: + return "Allocation Failure"; + ++ case _change_max_heap: ++ return "Change Max Heap"; ++ + case _codecache_GC_threshold: + return "CodeCache GC Threshold"; + +diff --git a/src/hotspot/share/gc/shared/gcCause.hpp b/src/hotspot/share/gc/shared/gcCause.hpp +index 152ca787f..2a6c73b81 100644 +--- a/src/hotspot/share/gc/shared/gcCause.hpp ++++ b/src/hotspot/share/gc/shared/gcCause.hpp +@@ -58,6 +58,7 @@ class GCCause : public AllStatic { + _no_gc, + _no_cause_specified, + _allocation_failure, ++ _change_max_heap, + + /* implementation specific */ + +diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp +index 8e51632bd..0cdd68b03 100644 +--- a/src/hotspot/share/gc/shared/gc_globals.hpp ++++ b/src/hotspot/share/gc/shared/gc_globals.hpp +@@ -593,6 +593,24 @@ + "Maximum heap size (in bytes)") \ + constraint(MaxHeapSizeConstraintFunc,AfterErgo) \ + \ ++ product(size_t, DynamicMaxHeapSizeLimit, ScaleForWordSize(96*M), \ ++ "The limit of Dynamic maximum heap size (in bytes)") \ ++ \ ++ product(uintx, DynamicMaxHeapShrinkMinFreeRatio, 40, \ ++ "Minimal ratio of free bytes after dynamic max heap shrink") \ ++ \ ++ product(size_t, ElasticMaxHeapSize, ScaleForWordSize(96*M), \ ++ "Elastic maximum heap size (in bytes)") \ ++ \ ++ product(bool, ElasticMaxHeap, false, \ ++ "Allow change max heap size during runtime with jcmd") \ ++ \ ++ product(bool, TraceElasticMaxHeap, false, \ ++ "Trace Elastic Max Heap adjustion logs and failure reasons") \ ++ \ ++ product(uintx, ElasticMaxHeapShrinkMinFreeRatio, 40, \ ++ "minimal ratio of free bytes after elastic max heap shirnk") \ ++ \ + product(size_t, SoftMaxHeapSize, 0, MANAGEABLE, \ + "Soft limit for maximum heap size (in bytes)") \ + constraint(SoftMaxHeapSizeConstraintFunc,AfterMemoryInit) \ +diff --git a/src/hotspot/share/gc/shared/genArguments.cpp b/src/hotspot/share/gc/shared/genArguments.cpp +index 6d569d6f7..9bb91a86f 100644 +--- a/src/hotspot/share/gc/shared/genArguments.cpp ++++ b/src/hotspot/share/gc/shared/genArguments.cpp +@@ -377,7 +377,11 @@ void GenArguments::assert_flags() { + void GenArguments::assert_size_info() { + GCArguments::assert_size_info(); + // GenArguments::initialize_size_info may update the MaxNewSize +- assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ assert(MaxNewSize < MAX2(MaxHeapSize, DynamicMaxHeapSizeLimit), "Ergonomics decided on incompatible maximum young and heap sizes"); ++ } else { ++ assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); ++ } + assert(MinNewSize <= NewSize, "Ergonomics decided on incompatible minimum and initial young gen sizes"); + assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); + assert(MinNewSize % GenAlignment == 0, "_min_young_size alignment"); +diff --git a/src/hotspot/share/gc/shared/genArguments.hpp b/src/hotspot/share/gc/shared/genArguments.hpp +index a4c62ff5a..0d9849f0a 100644 +--- a/src/hotspot/share/gc/shared/genArguments.hpp ++++ b/src/hotspot/share/gc/shared/genArguments.hpp +@@ -36,6 +36,7 @@ extern size_t MaxOldSize; + extern size_t GenAlignment; + + class GenArguments : public GCArguments { ++ friend class PS_ChangeMaxHeapOp; + friend class TestGenCollectorPolicy; // Testing + private: + virtual void initialize_alignments(); +@@ -51,6 +52,18 @@ private: + + protected: + virtual void initialize_heap_flags_and_sizes(); ++public: ++ //dynamic max heap size ++ static size_t max_old_size(size_t size) { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ size_t young_limit = scale_by_NewRatio_aligned(size, GenAlignment); ++ young_limit = MAX3(young_limit, MinNewSize, NewSize); ++ size_t old_limit = size - young_limit; ++ guarantee(old_limit >= MinOldSize && old_limit >= OldSize, "must be"); ++ return old_limit; ++ } ++ return MaxOldSize; ++ } + }; + + #endif // SHARE_GC_SHARED_GENARGUMENTS_HPP +diff --git a/src/hotspot/share/gc/shared/generationCounters.cpp b/src/hotspot/share/gc/shared/generationCounters.cpp +index 05460ea88..e4c594444 100644 +--- a/src/hotspot/share/gc/shared/generationCounters.cpp ++++ b/src/hotspot/share/gc/shared/generationCounters.cpp +@@ -52,8 +52,15 @@ void GenerationCounters::initialize(const char* name, int ordinal, int spaces, + min_capacity, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "maxCapacity"); +- PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, +- max_capacity, CHECK); ++ // Dynamic Max Heap ++ if (Universe::is_dynamic_max_heap_enable()) { ++ _max_size = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, ++ max_capacity, CHECK); ++ } else { ++ _max_size = NULL; ++ PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, ++ max_capacity, CHECK); ++ } + + cname = PerfDataManager::counter_name(_name_space, "capacity"); + _current_size = +@@ -88,3 +95,8 @@ void GenerationCounters::update_all() { + assert(_virtual_space != nullptr, "otherwise, override this method"); + _current_size->set_value(_virtual_space->committed_size()); + } ++ ++void GenerationCounters::update_max_size(size_t size) { ++ guarantee(Universe::is_dynamic_max_heap_enable(), "must be"); ++ _max_size->set_value(size); ++} +diff --git a/src/hotspot/share/gc/shared/generationCounters.hpp b/src/hotspot/share/gc/shared/generationCounters.hpp +index 292ea61d4..e21192c8f 100644 +--- a/src/hotspot/share/gc/shared/generationCounters.hpp ++++ b/src/hotspot/share/gc/shared/generationCounters.hpp +@@ -40,6 +40,8 @@ private: + size_t curr_capacity); + + protected: ++ // Dynamic Max Heap ++ PerfVariable* _max_size; // max size can be change when Dynamic Max Heap is on + PerfVariable* _current_size; + VirtualSpace* _virtual_space; + +@@ -72,6 +74,9 @@ private: + + virtual void update_all(); + ++ // Dynamic Max Heap ++ void update_max_size(size_t size); ++ + const char* name_space() const { return _name_space; } + + }; +diff --git a/src/hotspot/share/gc/shared/referencePolicy.cpp b/src/hotspot/share/gc/shared/referencePolicy.cpp +index 22ef4eabb..f58f334a7 100644 +--- a/src/hotspot/share/gc/shared/referencePolicy.cpp ++++ b/src/hotspot/share/gc/shared/referencePolicy.cpp +@@ -64,6 +64,9 @@ LRUMaxHeapPolicy::LRUMaxHeapPolicy() { + // Capture state (of-the-VM) information needed to evaluate the policy + void LRUMaxHeapPolicy::setup() { + size_t max_heap = MaxHeapSize; ++ if (Universe::is_dynamic_max_heap_enable()) { ++ max_heap = Universe::heap()->current_max_heap_size(); ++ } + max_heap -= Universe::heap()->used_at_last_gc(); + max_heap /= M; + +diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp +index 8b39ea0a9..4b47cd0bc 100644 +--- a/src/hotspot/share/memory/universe.cpp ++++ b/src/hotspot/share/memory/universe.cpp +@@ -168,6 +168,9 @@ OopStorage* Universe::_vm_global = nullptr; + + CollectedHeap* Universe::_collectedHeap = nullptr; + ++// Dynamic Max Heap ++bool Universe::_enable_dynamic_max_heap = false; ++ + objArrayOop Universe::the_empty_class_array () { + return (objArrayOop)_the_empty_class_array.resolve(); + } +diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp +index 8b9c5bd62..3c3062ef0 100644 +--- a/src/hotspot/share/memory/universe.hpp ++++ b/src/hotspot/share/memory/universe.hpp +@@ -29,6 +29,7 @@ + #include "oops/array.hpp" + #include "oops/oopHandle.hpp" + #include "runtime/handles.hpp" ++#include "runtime/globals_extension.hpp" + #include "utilities/growableArray.hpp" + + // Universe is a name space holding known system classes and objects in the VM. +@@ -195,6 +196,9 @@ class Universe: AllStatic { + static int _verify_count; // number of verifies done + static long verify_flags; + ++ // Dynamic Max Heap ++ static bool _enable_dynamic_max_heap; ++ + static uintptr_t _verify_oop_mask; + static uintptr_t _verify_oop_bits; + +@@ -400,6 +404,37 @@ class Universe: AllStatic { + + // Compiler support + static int base_vtable_size() { return _base_vtable_size; } ++ ++ // Dynamic Max Heap ++ static const char* dynamic_max_heap_dcmd_name() { ++ if (FLAG_IS_CMDLINE(ElasticMaxHeapSize)) { ++ return "GC.elastic_max_heap"; ++ } ++ if (FLAG_IS_CMDLINE(DynamicMaxHeapSizeLimit)) { ++ return "GC.change_max_heap"; ++ } ++ return "GC.elastic_max_heap"; ++ } ++ static const char* dynamic_max_heap_option_name() { ++ if (FLAG_IS_CMDLINE(ElasticMaxHeapSize)) { ++ return "ElasticMaxHeap"; ++ } ++ if (FLAG_IS_CMDLINE(DynamicMaxHeapSizeLimit)) { ++ return "DynamicMaxHeap"; ++ } ++ return "ElasticMaxHeap"; ++ } ++ static const char* dynamic_max_heap_size_limit_option_name() { ++ if (FLAG_IS_CMDLINE(ElasticMaxHeapSize)) { ++ return "ElasticMaxHeapSize"; ++ } ++ if (FLAG_IS_CMDLINE(DynamicMaxHeapSizeLimit)) { ++ return "DynamicMaxHeapSizeLimit"; ++ } ++ return "+ElasticMaxHeap"; ++ } ++ static bool is_dynamic_max_heap_enable() { return _enable_dynamic_max_heap; } ++ static void set_dynamic_max_heap_enable(bool a) { _enable_dynamic_max_heap = a; } + }; + + #endif // SHARE_MEMORY_UNIVERSE_HPP +diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp +index fb9baaeaf..0056821fe 100644 +--- a/src/hotspot/share/runtime/arguments.cpp ++++ b/src/hotspot/share/runtime/arguments.cpp +@@ -31,6 +31,7 @@ + #include "classfile/stringTable.hpp" + #include "classfile/symbolTable.hpp" + #include "compiler/compilerDefinitions.hpp" ++#include "gc/shared/dynamicMaxHeap.hpp" + #include "gc/shared/gcArguments.hpp" + #include "gc/shared/gcConfig.hpp" + #include "gc/shared/stringdedup/stringDedup.hpp" +@@ -1470,6 +1471,18 @@ void Arguments::set_use_compressed_oops() { + // to use UseCompressedOops are InitialHeapSize and MinHeapSize. + size_t max_heap_size = MAX3(MaxHeapSize, InitialHeapSize, MinHeapSize); + ++ // DynamicMaxHeap ++ // 1. align DynamicMaxHeapSizeLimit ++ // 2. use DynamicMaxHeapSizeLimit to check whether compressedOops can enabled ++ bool dynamic_max_heap_enable = DynamicMaxHeapChecker::check_dynamic_max_heap_size_limit(); ++ if (dynamic_max_heap_enable) { ++ Universe::set_dynamic_max_heap_enable(true); ++ DynamicMaxHeapConfig::set_initial_max_heap_size((size_t)MaxHeapSize); ++ size_t _heap_alignment = GCArguments::compute_heap_alignment(); ++ uintx aligned_max_heap_size_limit = align_up(DynamicMaxHeapSizeLimit, _heap_alignment); ++ FLAG_SET_ERGO(DynamicMaxHeapSizeLimit, aligned_max_heap_size_limit); ++ max_heap_size = MAX2(max_heap_size, DynamicMaxHeapSizeLimit); ++ } + if (max_heap_size <= max_heap_for_compressed_oops()) { + if (FLAG_IS_DEFAULT(UseCompressedOops)) { + FLAG_SET_ERGO(UseCompressedOops, true); +diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp +index 82a6236c3..55e664a18 100644 +--- a/src/hotspot/share/runtime/globals.hpp ++++ b/src/hotspot/share/runtime/globals.hpp +@@ -2030,6 +2030,9 @@ const int ObjectAlignmentInBytes = 8; + product(bool, StressSecondarySupers, false, DIAGNOSTIC, \ + "Use a terrible hash function in order to generate many collisions.") \ + \ ++ product(bool, ElasticMaxDirectMemory, false, \ ++ "Allow change max direct memory size during runtime with jcmd") \ ++ \ + + // end of RUNTIME_FLAGS + +diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp +index dc0cff5fa..6147ba57f 100644 +--- a/src/hotspot/share/runtime/os.cpp ++++ b/src/hotspot/share/runtime/os.cpp +@@ -463,6 +463,9 @@ static void signal_thread_entry(JavaThread* thread, TRAPS) { + } + + void os::init_before_ergo() { ++#ifdef AARCH64 ++ os::Linux::load_ACC_library_before_ergo(); ++#endif + initialize_initial_active_processor_count(); + // We need to initialize large page support here because ergonomics takes some + // decisions depending on large page support and the calculated large page size. +@@ -1971,6 +1974,10 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable) { + return res; + } + ++bool os::free_heap_physical_memory(char *addr, size_t bytes) { ++ return pd_free_heap_physical_memory(addr, bytes); ++} ++ + bool os::release_memory(char* addr, size_t bytes) { + assert_nonempty_range(addr, bytes); + bool res; +diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp +index b9c297cb8..833edab01 100644 +--- a/src/hotspot/share/runtime/os.hpp ++++ b/src/hotspot/share/runtime/os.hpp +@@ -205,6 +205,7 @@ class os: AllStatic { + size_t alignment_hint, + bool executable, const char* mesg); + static bool pd_uncommit_memory(char* addr, size_t bytes, bool executable); ++ static bool pd_free_heap_physical_memory(char* addr, size_t bytes); + static bool pd_release_memory(char* addr, size_t bytes); + + static char* pd_attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc); +@@ -449,6 +450,7 @@ class os: AllStatic { + size_t alignment_hint, + bool executable, const char* mesg); + static bool uncommit_memory(char* addr, size_t bytes, bool executable = false); ++ static bool free_heap_physical_memory(char* addr, size_t bytes); + static bool release_memory(char* addr, size_t bytes); + + // Does the platform support trimming the native heap? +diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp +index b0bfc0eb2..609248146 100644 +--- a/src/hotspot/share/runtime/threads.cpp ++++ b/src/hotspot/share/runtime/threads.cpp +@@ -858,6 +858,15 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { + } + #endif // INCLUDE_JBOLT + ++ // Dynamic Max Heap: reset heap initial size to MaxHeapSize ++ if (Universe::is_dynamic_max_heap_enable()) { ++ bool success = Universe::heap()->change_max_heap(MaxHeapSize); ++ if (!success) { ++ log_error(dynamic, heap)("VM failed to initialize heap to Xmx " SIZE_FORMAT "K", (MaxHeapSize / K)); ++ vm_exit(1); ++ } ++ } ++ + return JNI_OK; + } + +diff --git a/src/hotspot/share/runtime/vmOperation.hpp b/src/hotspot/share/runtime/vmOperation.hpp +index e889c3590..7ddd72021 100644 +--- a/src/hotspot/share/runtime/vmOperation.hpp ++++ b/src/hotspot/share/runtime/vmOperation.hpp +@@ -117,7 +117,8 @@ + template(GTestStopSafepoint) \ + template(JFROldObject) \ + template(JvmtiPostObjectFree) \ +- template(RendezvousGCThreads) ++ template(RendezvousGCThreads) \ ++ template(DynamicMaxHeap) + + class Thread; + class outputStream; +diff --git a/src/hotspot/share/services/diagnosticCommand.cpp b/src/hotspot/share/services/diagnosticCommand.cpp +index f36f78639..d7b271677 100644 +--- a/src/hotspot/share/services/diagnosticCommand.cpp ++++ b/src/hotspot/share/services/diagnosticCommand.cpp +@@ -34,6 +34,7 @@ + #include "compiler/compileBroker.hpp" + #include "compiler/directivesParser.hpp" + #include "gc/shared/gcVMOperations.hpp" ++#include "gc/shared/gcArguments.hpp" + #include "jvm.h" + #include "memory/metaspace/metaspaceDCmd.hpp" + #include "memory/resourceArea.hpp" +@@ -106,6 +107,9 @@ void DCmd::register_dcmds(){ + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); ++ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); ++ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); ++ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + #if INCLUDE_SERVICES + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(DCmd_Source_Internal | DCmd_Source_AttachAPI, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); +@@ -465,6 +469,117 @@ void FinalizerInfoDCmd::execute(DCmdSource source, TRAPS) { + } + } + ++ChangeMaxHeapDCmd::ChangeMaxHeapDCmd(outputStream* output, bool heap) : ++ DCmdWithParser(output, heap), ++ _new_max_heap_size("change_max_heap", "New max size of heap", "MEMORY SIZE", true) { ++ _dcmdparser.add_dcmd_argument(&_new_max_heap_size); ++} ++ ++int ChangeMaxHeapDCmd::num_arguments(ChangeMaxHeapDCmd* dcmd) { ++ if (dcmd != NULL) { ++ DCmdMark mark(dcmd); ++ return dcmd->_dcmdparser.num_arguments(); ++ } else { ++ return 0; ++ } ++} ++ ++int ChangeMaxHeapDCmd::num_arguments() { ++ ResourceMark rm; ++ ChangeMaxHeapDCmd* dcmd = new ChangeMaxHeapDCmd(NULL, false); ++ return ChangeMaxHeapDCmd::num_arguments(dcmd); ++} ++ ++void ChangeMaxHeapDCmd::execute(DCmdSource source, TRAPS) { ++ if (!Universe::is_dynamic_max_heap_enable()) { ++ output()->print_cr("not supported because -XX:DynamicMaxHeapSizeLimit/-XX:ElasticMaxHeapSize was not specified"); ++ return; ++ } ++ ++ jlong input_max_heap_size = _new_max_heap_size.value()._size; ++ jlong new_max_heap_size = align_up((size_t)input_max_heap_size, HeapAlignment); ++ if (new_max_heap_size != input_max_heap_size) { ++ output()->print_cr("align the given value " SIZE_FORMAT " up to " SIZE_FORMAT "K for heap alignment " SIZE_FORMAT "K", ++ input_max_heap_size, ++ (new_max_heap_size / K), ++ (HeapAlignment / K)); ++ } ++ ++ bool is_validate = Universe::heap()->check_new_max_heap_validity(new_max_heap_size, output()); ++ if (!is_validate) { ++ output()->print_cr("%s fail", Universe::dynamic_max_heap_dcmd_name()); ++ return; ++ } ++ output()->print_cr("%s (" SIZE_FORMAT "K" "->" SIZE_FORMAT "K)(" SIZE_FORMAT "K)", ++ Universe::dynamic_max_heap_dcmd_name(), ++ (Universe::heap()->current_max_heap_size() / K), ++ (new_max_heap_size / K), ++ (DynamicMaxHeapSizeLimit / K)); ++ ++ bool success = Universe::heap()->change_max_heap(new_max_heap_size); ++ if (success) { ++ output()->print_cr("%s success", Universe::dynamic_max_heap_dcmd_name()); ++ } else { ++ output()->print_cr("%s fail", Universe::dynamic_max_heap_dcmd_name()); ++ } ++} ++ ++ElasticMaxHeapDCmd::ElasticMaxHeapDCmd(outputStream* output, bool heap) : ++ ChangeMaxHeapDCmd(output, heap) { ++} ++ ++int ElasticMaxHeapDCmd::num_arguments() { ++ ResourceMark rm; ++ ElasticMaxHeapDCmd* dcmd = new ElasticMaxHeapDCmd(NULL, false); ++ return ChangeMaxHeapDCmd::num_arguments(dcmd); ++} ++ ++ElasticMaxDirectMemoryDCmd::ElasticMaxDirectMemoryDCmd(outputStream* output, bool heap) : ++ DCmdWithParser(output, heap), ++ _new_max_direct_memory("elastic_max_direct_memory", "New max size of direct memory", "MEMORY SIZE", true) { ++ _dcmdparser.add_dcmd_argument(&_new_max_direct_memory); ++} ++ ++int ElasticMaxDirectMemoryDCmd::num_arguments() { ++ ResourceMark rm; ++ ElasticMaxDirectMemoryDCmd* dcmd = new ElasticMaxDirectMemoryDCmd(NULL, false); ++ if (dcmd != NULL) { ++ DCmdMark mark(dcmd); ++ return dcmd->_dcmdparser.num_arguments(); ++ } else { ++ return 0; ++ } ++} ++ ++void ElasticMaxDirectMemoryDCmd::execute(DCmdSource source, TRAPS) { ++ if (!ElasticMaxDirectMemory) { ++ output()->print_cr("not supported because -XX:+ElasticMaxDirectMemory was not specified"); ++ return; ++ } ++ ++ jlong new_max_direct_memory = _new_max_direct_memory.value()._size; ++ Symbol* klass = vmSymbols::java_nio_Bits(); ++ Klass* k = SystemDictionary::resolve_or_fail(klass, true, CHECK); ++ ++ // invoke the updateMaxMemory method ++ JavaValue result(T_OBJECT); ++ JavaCallArguments args; ++ args.push_long(new_max_direct_memory); ++ JavaCalls::call_static(&result, ++ k, ++ vmSymbols::updateMaxMemory_name(), ++ vmSymbols::updateMaxMemory_signature(), ++ &args, ++ CHECK); ++ oop msg = cast_to_oop(result.get_jobject()); ++ if (msg != NULL) { ++ char* text = java_lang_String::as_utf8_string(msg); ++ if (text != NULL) { ++ output()->print_cr("%s", text); ++ } ++ } ++} ++ + #if INCLUDE_SERVICES // Heap dumping/inspection supported + HeapDumpDCmd::HeapDumpDCmd(outputStream* output, bool heap) : + DCmdWithParser(output, heap), +diff --git a/src/hotspot/share/services/diagnosticCommand.hpp b/src/hotspot/share/services/diagnosticCommand.hpp +index ad018259f..a4a67e120 100644 +--- a/src/hotspot/share/services/diagnosticCommand.hpp ++++ b/src/hotspot/share/services/diagnosticCommand.hpp +@@ -312,6 +312,59 @@ public: + virtual void execute(DCmdSource source, TRAPS); + }; + ++class ChangeMaxHeapDCmd : public DCmdWithParser { ++protected: ++ DCmdArgument _new_max_heap_size; ++ static int num_arguments(ChangeMaxHeapDCmd* dcmd); ++public: ++ ChangeMaxHeapDCmd(outputStream* output, bool heap); ++ static const char* name() { return "GC.change_max_heap"; } ++ static const char* description() { ++ return "change dynamic max heap size during runtime."; ++ } ++ static const char* impact() { ++ return "Medium"; ++ } ++ static const JavaPermission permission() { ++ JavaPermission p = {"java.lang.management.ManagementPermission", ++ "monitor", NULL}; ++ return p; ++ } ++ static int num_arguments(); ++ virtual void execute(DCmdSource source, TRAPS); ++}; ++ ++class ElasticMaxHeapDCmd : public ChangeMaxHeapDCmd { ++public: ++ ElasticMaxHeapDCmd(outputStream* output, bool heap); ++ static const char* name() { return "GC.elastic_max_heap"; } ++ static const char* description() { ++ return "try elastic max heap size during runtime."; ++ } ++ static int num_arguments(); ++}; ++ ++class ElasticMaxDirectMemoryDCmd : public DCmdWithParser { ++protected: ++ DCmdArgument _new_max_direct_memory; ++public: ++ ElasticMaxDirectMemoryDCmd(outputStream* output, bool heap); ++ static const char* name() { return "GC.elastic_max_direct_memory"; } ++ static const char* description() { ++ return "try elastic max direct memory during runtime."; ++ } ++ static const char* impact() { ++ return "Medium"; ++ } ++ static const JavaPermission permission() { ++ JavaPermission p = {"java.lang.management.ManagementPermission", ++ "monitor", NULL}; ++ return p; ++ } ++ static int num_arguments(); ++ virtual void execute(DCmdSource source, TRAPS); ++}; ++ + #if INCLUDE_SERVICES // Heap dumping supported + // See also: dump_heap in attachListener.cpp + class HeapDumpDCmd : public DCmdWithParser { +diff --git a/src/java.base/share/classes/java/nio/Bits.java b/src/java.base/share/classes/java/nio/Bits.java +index b11cb4947..9b2b3d23a 100644 +--- a/src/java.base/share/classes/java/nio/Bits.java ++++ b/src/java.base/share/classes/java/nio/Bits.java +@@ -210,6 +210,32 @@ class Bits { // package-private + assert cnt >= 0 && reservedMem >= 0 && totalCap >= 0; + } + ++ static String updateMaxMemory(long newSize) { ++ long reservedMem = RESERVED_MEMORY.get(); ++ StringBuilder sb = new StringBuilder(); ++ if (newSize >= reservedMem) { ++ sb.append("GC.elastic_max_direct_memory ("); ++ sb.append(MAX_MEMORY / 1024).append("K->"); ++ sb.append(newSize / 1024).append("K)"); ++ sb.append("\n"); ++ sb.append("GC.elastic_max_direct_memory success"); ++ // update VM.maxDirectMemory and Bits.MAX_MEMORY ++ // specially, if shrink, in a multi-threaded scenario, ++ // VM.maxDirectMemory() may be inconsistent with the actual direct memory usage. ++ // the new maxMemory will take effect the next time direct memory is allocated. ++ VM.setMaxDirectMemory(newSize); ++ MAX_MEMORY = VM.maxDirectMemory(); ++ } else { ++ sb.append("GC.elastic_max_direct_memory "); ++ sb.append(newSize / 1024).append("K below current reserved direct memory "); ++ sb.append(reservedMem / 1024).append("K"); ++ sb.append("\n"); ++ sb.append("GC.elastic_max_direct_memory fail"); ++ } ++ String output = sb.toString(); ++ return output; ++ } ++ + static final BufferPool BUFFER_POOL = new BufferPool() { + @Override + public String getName() { +diff --git a/src/java.base/share/classes/jdk/internal/misc/VM.java b/src/java.base/share/classes/jdk/internal/misc/VM.java +index 3e7351f63..f1a585e42 100644 +--- a/src/java.base/share/classes/jdk/internal/misc/VM.java ++++ b/src/java.base/share/classes/jdk/internal/misc/VM.java +@@ -145,6 +145,12 @@ public class VM { + return directMemory; + } + ++ // ElasticMaxDirectMemory ++ // update max direct memory size ++ public static void setMaxDirectMemory(long size) { ++ directMemory = size; ++ } ++ + // User-controllable flag that determines if direct buffers should be page + // aligned. The "-XX:+PageAlignDirectMemory" option can be used to force + // buffers, allocated by ByteBuffer.allocateDirect, to be page aligned. +diff --git a/test/hotspot/jtreg/gc/TestSmallHeap.java b/test/hotspot/jtreg/gc/TestSmallHeap.java +index fd6c5860f..5d711e38b 100644 +--- a/test/hotspot/jtreg/gc/TestSmallHeap.java ++++ b/test/hotspot/jtreg/gc/TestSmallHeap.java +@@ -104,7 +104,7 @@ public class TestSmallHeap { + analyzer.shouldHaveExitValue(0); + + expectedMaxHeap = Math.max(expectedMaxHeap, minMaxHeap); +- long maxHeapSize = Long.parseLong(analyzer.firstMatch("MaxHeapSize.+=\\s+(\\d+)",1)); ++ long maxHeapSize = Long.parseLong(analyzer.firstMatch("\\s+MaxHeapSize\\s+=\\s+(\\d+)",1)); + long actualHeapSize = Long.parseLong(analyzer.firstMatch(VerifyHeapSize.actualMsg + "(\\d+)",1)); + Asserts.assertEQ(maxHeapSize, expectedMaxHeap); + Asserts.assertLessThanOrEqual(actualHeapSize, maxHeapSize); +diff --git a/test/hotspot/jtreg/gc/arguments/TestMaxRAMFlags.java b/test/hotspot/jtreg/gc/arguments/TestMaxRAMFlags.java +index 13bb36ecc..330e923c2 100644 +--- a/test/hotspot/jtreg/gc/arguments/TestMaxRAMFlags.java ++++ b/test/hotspot/jtreg/gc/arguments/TestMaxRAMFlags.java +@@ -90,7 +90,7 @@ public class TestMaxRAMFlags { + } + + private static String getFlagValue(String flag, String where) { +- Matcher m = Pattern.compile(flag + "\\s+:?=\\s+\\d+").matcher(where); ++ Matcher m = Pattern.compile("\\s+" + flag + "\\s+:?=\\s+\\d+").matcher(where); + if (!m.find()) { + throw new RuntimeException("Could not find value for flag " + flag + " in output string"); + } +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/BasicTest.java b/test/hotspot/jtreg/gc/dynamicmaxheap/BasicTest.java +new file mode 100644 +index 000000000..96d73e29f +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/BasicTest.java +@@ -0,0 +1,94 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023, 2024 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++package gc.dynamicmaxheap; ++ ++/* ++ * @test ++ * @summary Test Basic Elastic Max Heap resize ++ * @requires (os.family == "linux") & (os.arch == "aarch64") ++ * @library /test/lib ++ * @build gc.dynamicmaxheap.TestBase ++ * @compile test_classes/NotActiveHeap.java ++ * @run driver gc.dynamicmaxheap.BasicTest ++ */ ++ ++import java.lang.reflect.Field; ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.JDKToolFinder; ++import jdk.test.lib.process.ProcessTools; ++import jdk.test.lib.Asserts; ++ ++public class BasicTest extends TestBase { ++ public static void main(String[] args) throws Exception { ++ test("-XX:+UseParallelGC"); ++ test("-XX:+UseG1GC"); ++ } ++ ++ private static void test(String heap_type_or_process_count) throws Exception { ++ String architecture = System.getProperty("os.arch"); ++ // Xms = 100M - 1B, Xmx = 600M - 1B, ElasticMaxHeapSize = 1G - 1B ++ // unaligned arguments should be fine ++ ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder(heap_type_or_process_count, "-XX:+ElasticMaxHeap", "-Xms104857599", "-Xmx629145599", "-XX:ElasticMaxHeapSize=1073741823", "NotActiveHeap"); ++ Process p = pb.start(); ++ try { ++ long pid = p.pid(); ++ System.out.println(pid); ++ ++ // shrink to 500M should be fine for any GC ++ String[] contains1 = { ++ "GC.elastic_max_heap success", ++ "GC.elastic_max_heap (", ++ }; ++ resizeAndCheck(pid, "500M", contains1, null); ++ ++ // expand to 800M should be fine for any GC ++ String[] contains2 = { ++ "GC.elastic_max_heap success", ++ "GC.elastic_max_heap (", ++ }; ++ resizeAndCheck(pid, "800M", contains2, null); ++ ++ // expand to 2G should fail ++ String[] contains3 = { ++ "GC.elastic_max_heap fail", ++ "2097152K exceeds maximum limit", ++ }; ++ resizeAndCheck(pid, "2G", contains3, null); ++ ++ // epxand to 1G should be fine ++ String[] contains4 = { ++ "GC.elastic_max_heap success", ++ "GC.elastic_max_heap (", ++ }; ++ resizeAndCheck(pid, "1G", contains4, null); ++ ++ // shrink to 300M should be fine ++ // unaligned arguments should be fine, new_size = 300M -1B ++ String[] contains5 = { ++ "GC.elastic_max_heap success", ++ "GC.elastic_max_heap (", ++ }; ++ resizeAndCheck(pid, "314572799", contains5, null); ++ } finally { ++ p.destroy(); ++ } ++ } ++} +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/DirectMemoryBasicTest.java b/test/hotspot/jtreg/gc/dynamicmaxheap/DirectMemoryBasicTest.java +new file mode 100644 +index 000000000..5c13e8de4 +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/DirectMemoryBasicTest.java +@@ -0,0 +1,76 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023, 2024 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++/* ++ * @test ++ * @summary Test Basic Elastic Max Direct Memory resize ++ * @requires (os.family == "linux") & (os.arch == "aarch64") ++ * @library /test/lib ++ * @build gc.dynamicmaxheap.TestBase ++ * @compile test_classes/NotActiveDirectMemory.java ++ * @run driver gc.dynamicmaxheap.DirectMemoryBasicTest ++ */ ++ ++package gc.dynamicmaxheap; ++ ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.JDKToolFinder; ++import jdk.test.lib.process.ProcessTools; ++import jdk.test.lib.Asserts; ++ ++public class DirectMemoryBasicTest extends TestBase { ++ public static void main(String[] args) throws Exception { ++ test("-XX:+UseParallelGC"); ++ test("-XX:+UseG1GC"); ++ } ++ ++ private static void test(String heap_type) throws Exception { ++ String architecture = System.getProperty("os.arch"); ++ ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder(heap_type, ++ "-XX:+ElasticMaxDirectMemory", ++ "-Xms100M", ++ "-Xmx100M", ++ "-XX:MaxDirectMemorySize=200M", ++ "NotActiveDirectMemory"); ++ Process p = pb.start(); ++ long pid; ++ try { ++ pid = p.pid(); ++ System.out.println(pid); ++ ++ // NotActiveDirectMemory will alloc 100M direct memory ++ // expand to 300M should be fine for any GC ++ String[] contains1 = { ++ "GC.elastic_max_direct_memory (", ++ "GC.elastic_max_direct_memory success" ++ }; ++ resizeAndCheck(pid, "300M", contains1, null, "GC.elastic_max_direct_memory"); ++ ++ // shrink to 50M should be fail for any GC, ++ String[] contains2 = { ++ "below current reserved direct memory", ++ "GC.elastic_max_direct_memory fail" ++ }; ++ resizeAndCheck(pid, "50M", contains2, null, "GC.elastic_max_direct_memory"); ++ } finally { ++ p.destroy(); ++ } ++ } ++} +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/LimitDirectMemoryTest.java b/test/hotspot/jtreg/gc/dynamicmaxheap/LimitDirectMemoryTest.java +new file mode 100644 +index 000000000..61266a1e0 +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/LimitDirectMemoryTest.java +@@ -0,0 +1,92 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023, 2024 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++/* ++ * @test ++ * @summary Test max direct memory can take effect after resize ++ * @requires (os.family == "linux") & (os.arch == "aarch64") ++ * @library /test/lib ++ * @build gc.dynamicmaxheap.TestBase ++ * @compile test_classes/LimitDirectMemoryTestBasic.java ++ * @run driver gc.dynamicmaxheap.LimitDirectMemoryTest ++ */ ++ ++package gc.dynamicmaxheap; ++ ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.process.ProcessTools; ++ ++public class LimitDirectMemoryTest extends TestBase { ++ public static void main(String[] args) throws Exception { ++ // Test1 ++ // init max direct memory is 200M ++ // expand to 300M and alloc 300M direct memory should be fine ++ String[] contains1 = { ++ "allocation finish!" ++ }; ++ String[] not_contains1 = { ++ "java.lang.OutOfMemoryError: Cannot reserve " ++ }; ++ Test("-XX:+UseParallelGC", "300M", "300", contains1, not_contains1); ++ Test("-XX:+UseG1GC", "300M", "300", contains1, not_contains1); ++ ++ // Test2 ++ // init max direct memory is 200M ++ // shrink to 50M and alloc 100M direct memory should oom ++ String[] contains2 = { ++ "java.lang.OutOfMemoryError: Cannot reserve " ++ }; ++ String[] not_contains2 = { ++ "allocation finish!" ++ }; ++ Test("-XX:+UseParallelGC", "50M", "100", contains2, not_contains2); ++ Test("-XX:+UseG1GC", "50M", "100", contains2, not_contains2); ++ } ++ ++ private static void Test(String heap_type, String new_size, String alloc_size, String[] contains, String[] not_contains) throws Exception { ++ ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder(heap_type, ++ "-Dtest.jdk=" + System.getProperty("test.jdk"), ++ "-XX:+ElasticMaxDirectMemory", ++ "-XX:MaxDirectMemorySize=200M", ++ "-Xms100M", ++ "-Xmx100M", ++ "LimitDirectMemoryTestBasic", ++ new_size, ++ alloc_size); ++ OutputAnalyzer output = new OutputAnalyzer(pb.start()); ++ CheckOutput(output, contains, not_contains); ++ } ++ ++ public static void CheckOutput(OutputAnalyzer output, String[] contains, String[] not_contains) throws Exception { ++ System.out.println(output.getOutput()); ++ if (contains != null) { ++ for (String s : contains) { ++ output.shouldContain(s); ++ } ++ } ++ if (not_contains != null) { ++ for (String s : not_contains) { ++ output.shouldNotContain(s); ++ } ++ } ++ } ++} ++ ++ +\ No newline at end of file +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/MemoryPoolTest.java b/test/hotspot/jtreg/gc/dynamicmaxheap/MemoryPoolTest.java +new file mode 100644 +index 000000000..7f854b125 +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/MemoryPoolTest.java +@@ -0,0 +1,139 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023, 2024 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++package gc.dynamicmaxheap; ++ ++import java.lang.management.*; ++import java.util.*; ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.JDKToolFinder; ++import jdk.test.lib.process.ProcessTools; ++import jdk.test.lib.Asserts; ++ ++/** ++ * @test MemoryPoolTest ++ * @summary test MemoryPool MemoryUsage returns correct max size ++ * @requires (os.family == "linux") & (os.arch == "aarch64") ++ * @library /test/lib ++ * @build gc.dynamicmaxheap.TestBase ++ * @run main/othervm -Xms50M -Xmx2G -XX:+ElasticMaxHeap -XX:+UseParallelGC -Xlog:dynamic+heap=debug gc.dynamicmaxheap.MemoryPoolTest ++ * @run main/othervm -Xms50M -Xmx2G -XX:+ElasticMaxHeap -XX:+UseG1GC -Xlog:dynamic+heap=debug gc.dynamicmaxheap.MemoryPoolTest ++ */ ++public class MemoryPoolTest extends TestBase { ++ static Object[] root_array; ++ static final long M = 1024L * 1024L; ++ static long edenMaxSize; ++ static long survivorMaxSize; ++ static long oldGenMaxSize; ++ public static void main(String[] args) throws Exception { ++ String architecture = System.getProperty("os.arch"); ++ long pid = ProcessTools.getProcessId(); ++ /* ++ * Steps: start with 2G heap ++ * 1. start and allocate about 1G object ++ * 2. get MemoryPool and usage as expected ++ * 3. launch jcmd resize to 100M and expect success ++ * 4. get MemoryPool and usage as expected ++ */ ++ alloc_and_free(1024L * 1024L * 1024L); ++ MemoryMXBean mem = ManagementFactory.getMemoryMXBean(); ++ MemoryUsage usage = mem.getHeapMemoryUsage(); ++ long max = usage.getMax() / M; ++ long committed = usage.getCommitted() / M; ++ long used = usage.getUsed() / M; ++ System.out.println("After alloc -- Heap Max: " + max + ++ "M, Committed: " + committed + ++ "M, Used: " + used + "M"); ++ Asserts.assertGT(max, 1024L); ++ Asserts.assertGTE(max, committed); ++ Asserts.assertGTE(committed, used); ++ ++ // check eden, survivor and old memory pool after alloc ++ get_memory_info(); ++ System.out.println("After alloc -- Eden Max: " + edenMaxSize + ++ "M, Survivor Max: " + survivorMaxSize + ++ "M, Old Max: " + oldGenMaxSize + "M"); ++ long orig_old = oldGenMaxSize; ++ long orig_eden = edenMaxSize; ++ Asserts.assertGT(edenMaxSize + survivorMaxSize + oldGenMaxSize, 1024L); ++ Asserts.assertGTE(max, oldGenMaxSize); ++ Asserts.assertGTE(oldGenMaxSize, edenMaxSize); ++ Asserts.assertGTE(oldGenMaxSize, survivorMaxSize); ++ ++ root_array = null; // release ++ ++ // shrink to 500M should be fine for any GC ++ String[] contains1 = { ++ "GC.elastic_max_heap (", ++ "GC.elastic_max_heap success" ++ }; ++ resizeAndCheck(pid, "100M", contains1, null); ++ mem = ManagementFactory.getMemoryMXBean(); ++ usage = mem.getHeapMemoryUsage(); ++ max = usage.getMax() / M; ++ committed = usage.getCommitted() / M; ++ used = usage.getUsed() / M; ++ System.out.println("After resize -- Heap Max: " + max + ++ "M, Committed: " + committed + ++ "M, Used: " + used + "M"); ++ long target_size = 100L; ++ if (architecture.equals("aarch64")) { ++ target_size = 128L; ++ } ++ Asserts.assertLTE(max, target_size); ++ Asserts.assertGTE(max, committed); ++ Asserts.assertGTE(committed, used); ++ ++ // check eden, survivor and old memory pool after resize ++ get_memory_info(); ++ System.out.println("After resize -- Eden Max: " + edenMaxSize + ++ "M, Survivor Max: " + survivorMaxSize + ++ "M, Old Max: " + oldGenMaxSize + "M"); ++ Asserts.assertLT(oldGenMaxSize, orig_old); ++ Asserts.assertLTE(edenMaxSize, orig_eden); ++ } ++ ++ static void alloc_and_free(long size) { ++ // suppose compressed ++ // each int array size is 8(MarkOop + len) + 4 * len ++ // each object is 1k int[254] ++ int root_len = (int)(size / 1024L); ++ root_array = new Object[root_len]; ++ for (int i = 0; i < root_len; i++) { ++ root_array[i] = new int[254]; ++ } ++ } ++ ++ static void get_memory_info() { ++ List memoryPoolMXBeans = ManagementFactory.getMemoryPoolMXBeans(); ++ for (MemoryPoolMXBean memoryPoolMXBean : memoryPoolMXBeans) { ++ String name = memoryPoolMXBean.getName(); ++ MemoryUsage usage = memoryPoolMXBean.getUsage(); ++ long max_size = usage.getMax() / M; ++ if (name.contains("Eden")) { ++ edenMaxSize = max_size; ++ } else if (name.contains("Survivor")) { ++ survivorMaxSize = max_size; ++ } else if (name.contains("Old")) { ++ oldGenMaxSize = max_size; ++ } ++ } ++ } ++} +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/OptionsCheck.java b/test/hotspot/jtreg/gc/dynamicmaxheap/OptionsCheck.java +new file mode 100644 +index 000000000..13c4120b0 +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/OptionsCheck.java +@@ -0,0 +1,73 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++package gc.dynamicmaxheap; ++ ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.JDKToolFinder; ++import jdk.test.lib.process.ProcessTools; ++import jdk.test.lib.Asserts; ++ ++/** ++ * @test OptionsCheck ++ * @summary test invalid options combinations with elastic max heap ++ * @requires (os.family == "linux") & (os.arch == "aarch64") ++ * @library /test/lib ++ * @build gc.dynamicmaxheap.TestBase ++ * @run driver gc.dynamicmaxheap.OptionsCheck ++ */ ++public class OptionsCheck extends TestBase { ++ public static void main(String[] args) throws Exception { ++ String[] key_output = { ++ "can not be used with", ++ }; ++ String[] key_output2 = { ++ "should be used with", ++ }; ++ LaunchAndCheck(key_output, null, "-XX:+ElasticMaxHeap", "-Xmn200M", "-version"); ++ LaunchAndCheck(key_output, null, "-XX:+ElasticMaxHeap", "-XX:MaxNewSize=300M", "-version"); ++ LaunchAndCheck(key_output, null, "-XX:+ElasticMaxHeap", "-XX:OldSize=1G", "-version"); ++ LaunchAndCheck(key_output2, null, "-XX:+ElasticMaxHeap", "-XX:-UseAdaptiveSizePolicy", "-version"); ++ String[] contains1 = { ++ "-XX:ElasticMaxHeapSize should be used together with -Xmx/-XX:MaxHeapSize" ++ }; ++ LaunchAndCheck(contains1, null, "-XX:+ElasticMaxHeap", "-XX:ElasticMaxHeapSize=100M", "-version"); ++ String[] contains2 = { ++ "-XX:ElasticMaxHeapSize should be larger than -Xmx/-XX:MaxHeapSize" ++ }; ++ LaunchAndCheck(contains2, null, "-XX:+ElasticMaxHeap", "-XX:ElasticMaxHeapSize=1G", "-Xmx2G", "-version"); ++ } ++ ++ public static void LaunchAndCheck(String[] contains, String[] not_contains, String... command) throws Exception { ++ ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder(command); ++ OutputAnalyzer output = new OutputAnalyzer(pb.start()); ++ System.out.println(output.getOutput()); ++ if (contains != null) { ++ for (String s : contains) { ++ output.shouldContain(s); ++ } ++ } ++ if (not_contains != null) { ++ for (String s : contains) { ++ output.shouldNotContain(s); ++ } ++ } ++ } ++} +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/RuntimeMemoryTest.java b/test/hotspot/jtreg/gc/dynamicmaxheap/RuntimeMemoryTest.java +new file mode 100644 +index 000000000..d4d3b9b71 +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/RuntimeMemoryTest.java +@@ -0,0 +1,96 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023, 2024 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++package gc.dynamicmaxheap; ++ ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.JDKToolFinder; ++import jdk.test.lib.process.ProcessTools; ++import jdk.test.lib.Asserts; ++ ++/** ++ * @test RuntimeMemoryTest ++ * @summary test java.lang.Runtime max memory and total memory ++ * @requires (os.family == "linux") & (os.arch == "aarch64") ++ * @library /test/lib ++ * @build gc.dynamicmaxheap.TestBase ++ * @run main/othervm -Xms50M -Xmx2G -XX:+ElasticMaxHeap -XX:+UseParallelGC gc.dynamicmaxheap.RuntimeMemoryTest ++ * @run main/othervm -Xms50M -Xmx2G -XX:+ElasticMaxHeap -XX:+UseG1GC gc.dynamicmaxheap.RuntimeMemoryTest ++ */ ++public class RuntimeMemoryTest extends TestBase { ++ static Object[] root_array; ++ static final long M = 1024L * 1024L; ++ public static void main(String[] args) throws Exception { ++ String architecture = System.getProperty("os.arch"); ++ long pid = ProcessTools.getProcessId(); ++ /* ++ * Steps: start with 2G heap ++ * 1. start and allocate about 1G object ++ * 2. get totalMemory/maxMemory/freeMemory as expected ++ * 3. launch jcmd resize to 100M and expect success ++ * 4. get totalMemory/maxMemory/freeMemory as expected ++ */ ++ Runtime r = Runtime.getRuntime(); ++ // GC rarely happens between these call, should align with size ++ long max = r.maxMemory() / M; ++ long total = r.totalMemory() / M; ++ long free = r.freeMemory() / M; ++ System.out.println("Before alloc -- Max: " + max + "M, Total: " + total + "M, Free: " + free + "M"); ++ alloc_and_free(1024L * 1024L * 1024L); ++ ++ max = r.maxMemory() / M; ++ total = r.totalMemory() / M; ++ free = r.freeMemory() / M; ++ root_array = null; // release ++ System.out.println("After alloc -- Max: " + max + "M, Total: " + total + "M, Free: " + free + "M"); ++ Asserts.assertGT(max, 1024L); ++ Asserts.assertGTE(max, total); ++ Asserts.assertGT(max, free); ++ ++ // shrink to 500M should be fine for any GC ++ String[] contains1 = { ++ "GC.elastic_max_heap (", ++ "GC.elastic_max_heap success" ++ }; ++ resizeAndCheck(pid, "100M", contains1, null); ++ max = r.maxMemory() / M; ++ total = r.totalMemory() / M; ++ free = r.freeMemory() / M; ++ System.out.println("After resize -- Max: " + max + "M, Total: " + total + "M, Free: " + free + "M"); ++ long target_size = 101L; ++ if (architecture.equals("aarch64")) { ++ target_size = 129L; ++ } ++ Asserts.assertLT(max, target_size); ++ Asserts.assertGTE(max, total); ++ Asserts.assertGT(max, free); ++ } ++ ++ static void alloc_and_free(long size) { ++ // suppose compressed ++ // each int array size is 8(MarkOop + len) + 4 * len ++ // each object is 1k int[254] ++ int root_len = (int)(size / 1024L); ++ root_array = new Object[root_len]; ++ for (int i = 0; i < root_len; i++) { ++ root_array[i] = new int[254]; ++ } ++ } ++} +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/TestBase.java b/test/hotspot/jtreg/gc/dynamicmaxheap/TestBase.java +new file mode 100644 +index 000000000..42023e9bb +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/TestBase.java +@@ -0,0 +1,50 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++package gc.dynamicmaxheap; ++ ++import java.lang.reflect.Field; ++import jdk.test.lib.process.OutputAnalyzer; ++import jdk.test.lib.JDKToolFinder; ++import jdk.test.lib.process.ProcessTools; ++import jdk.test.lib.Asserts; ++ ++public class TestBase { ++ // start jcmd and check output string ++ public static void resizeAndCheck(long pid, String new_size, String[] contains, String[] not_contains) throws Exception { ++ resizeAndCheck(pid, new_size, contains, not_contains, "GC.elastic_max_heap"); ++ } ++ public static void resizeAndCheck(long pid, String new_size, String[] contains, String[] not_contains, String type) throws Exception { ++ ProcessBuilder pb = new ProcessBuilder(); ++ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), Long.toString(pid), type, new_size}); ++ OutputAnalyzer output = new OutputAnalyzer(pb.start()); ++ System.out.println(output.getOutput()); ++ if (contains != null) { ++ for (String s : contains) { ++ output.shouldContain(s); ++ } ++ } ++ if (not_contains != null) { ++ for (String s : not_contains) { ++ output.shouldNotContain(s); ++ } ++ } ++ } ++} +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/LimitDirectMemoryTestBasic.java b/test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/LimitDirectMemoryTestBasic.java +new file mode 100644 +index 000000000..75b657539 +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/LimitDirectMemoryTestBasic.java +@@ -0,0 +1,63 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++import jdk.test.lib.process.ProcessTools; ++import jdk.test.lib.JDKToolFinder; ++import java.io.BufferedReader; ++import java.io.IOException; ++import java.io.InputStreamReader; ++import java.nio.ByteBuffer; ++ ++public class LimitDirectMemoryTestBasic { ++ public static void main(String[] args) throws Exception { ++ long pid = ProcessTools.getProcessId(); ++ String new_size = args[0]; ++ int alloc_size = Integer.parseInt(args[1]); ++ ++ resize(pid, new_size); ++ ++ try { ++ // alloc direct memory ++ int single_alloc_size = 1 * 1024 * 1024; ++ ByteBuffer[] buffers = new ByteBuffer[alloc_size]; ++ for (int i = 0; i < alloc_size; i++) { ++ buffers[i] = ByteBuffer.allocateDirect(single_alloc_size); ++ } ++ } catch (OutOfMemoryError e) { ++ System.out.println(e); ++ throw e; ++ } ++ System.out.println("allocation finish!"); ++ } ++ ++ static void resize(long pid, String new_size) { ++ try { ++ Process process = Runtime.getRuntime().exec(JDKToolFinder.getJDKTool("jcmd") + " " + pid + " GC.elastic_max_direct_memory " + new_size); ++ BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); ++ String line; ++ while ((line = reader.readLine()) != null) { ++ System.out.println(line); ++ } ++ reader.close(); ++ } catch (IOException e) { ++ e.printStackTrace(); ++ } ++ } ++} +\ No newline at end of file +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/NotActiveDirectMemory.java b/test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/NotActiveDirectMemory.java +new file mode 100644 +index 000000000..0cec96c67 +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/NotActiveDirectMemory.java +@@ -0,0 +1,41 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++import java.nio.ByteBuffer; ++ ++public class NotActiveDirectMemory { ++ public static void main(String[] args) throws Exception { ++ // alloc 100M direct memory ++ try { ++ int single_alloc_size = 1 * 1024 * 1024; ++ ByteBuffer[] buffers = new ByteBuffer[100]; ++ for (int i = 0; i < 100; i++) { ++ buffers[i] = ByteBuffer.allocateDirect(single_alloc_size); ++ } ++ } catch (OutOfMemoryError e) { ++ System.out.println(e); ++ throw e; ++ } ++ System.out.println("allocation finish!"); ++ while (true) { ++ Thread.sleep(1000); ++ } ++ } ++} +diff --git a/test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/NotActiveHeap.java b/test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/NotActiveHeap.java +new file mode 100644 +index 000000000..e4c29854e +--- /dev/null ++++ b/test/hotspot/jtreg/gc/dynamicmaxheap/test_classes/NotActiveHeap.java +@@ -0,0 +1,27 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Copyright (C) 2023 THL A29 Limited, a Tencent company. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++public class NotActiveHeap { ++ public static void main(String[] args) throws Exception { ++ while (true) { ++ Thread.sleep(1000); ++ } ++ } ++} +-- +2.34.1 + diff --git a/huawei-Fix-kae-testcat-and-EATest-testcast-bug-and-fix-cve-.patch b/huawei-Fix-kae-testcat-and-EATest-testcast-bug-and-fix-cve-.patch new file mode 100644 index 0000000000000000000000000000000000000000..751baa43126395418af9ece7ae2625b20ecf6196 --- /dev/null +++ b/huawei-Fix-kae-testcat-and-EATest-testcast-bug-and-fix-cve-.patch @@ -0,0 +1,414 @@ +Date: Tue, 25 Nov 2025 20:00:13 +0800 +Subject: [PATCH 8/8] Fix kae testcat and EATest testcast bug and fix + cve-2025-64505/64720/65018 + +--- + .../native/libsplashscreen/libpng/pngread.c | 75 ++++++++++++ + .../native/libsplashscreen/libpng/pngrtran.c | 114 ++++++++++++------ + .../native/libsplashscreen/libpng/pngstruct.h | 1 - + test/jdk/com/sun/jdi/EATests.java | 2 +- + .../security/openssl/KAEConfTest.java | 21 +++- + 5 files changed, 170 insertions(+), 43 deletions(-) + +diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngread.c b/src/java.desktop/share/native/libsplashscreen/libpng/pngread.c +index 8a6381e1b..280fc12d3 100644 +--- a/src/java.desktop/share/native/libsplashscreen/libpng/pngread.c ++++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngread.c +@@ -3152,6 +3152,54 @@ png_image_read_colormapped(png_voidp argument) + } + } + ++/* Row reading for interlaced 16-to-8 bit depth conversion with local buffer. */ ++static int ++png_image_read_direct_scaled(png_voidp argument) ++{ ++ png_image_read_control *display = png_voidcast(png_image_read_control*, ++ argument); ++ png_imagep image = display->image; ++ png_structrp png_ptr = image->opaque->png_ptr; ++ png_bytep local_row = png_voidcast(png_bytep, display->local_row); ++ png_bytep first_row = png_voidcast(png_bytep, display->first_row); ++ ptrdiff_t row_bytes = display->row_bytes; ++ int passes; ++ ++ /* Handle interlacing. */ ++ switch (png_ptr->interlaced) ++ { ++ case PNG_INTERLACE_NONE: ++ passes = 1; ++ break; ++ ++ case PNG_INTERLACE_ADAM7: ++ passes = PNG_INTERLACE_ADAM7_PASSES; ++ break; ++ ++ default: ++ png_error(png_ptr, "unknown interlace type"); ++ } ++ ++ /* Read each pass using local_row as intermediate buffer. */ ++ while (--passes >= 0) ++ { ++ png_uint_32 y = image->height; ++ png_bytep output_row = first_row; ++ ++ for (; y > 0; --y) ++ { ++ /* Read into local_row (gets transformed 8-bit data). */ ++ png_read_row(png_ptr, local_row, NULL); ++ ++ /* Copy from local_row to user buffer. */ ++ memcpy(output_row, local_row, (size_t)row_bytes); ++ output_row += row_bytes; ++ } ++ } ++ ++ return 1; ++} ++ + /* Just the row reading part of png_image_read. */ + static int + png_image_read_composite(png_voidp argument) +@@ -3570,6 +3618,7 @@ png_image_read_direct(png_voidp argument) + int linear = (format & PNG_FORMAT_FLAG_LINEAR) != 0; + int do_local_compose = 0; + int do_local_background = 0; /* to avoid double gamma correction bug */ ++ int do_local_scale = 0; /* for interlaced 16-to-8 bit conversion */ + int passes = 0; + + /* Add transforms to ensure the correct output format is produced then check +@@ -3703,8 +3752,16 @@ png_image_read_direct(png_voidp argument) + png_set_expand_16(png_ptr); + + else /* 8-bit output */ ++ { + png_set_scale_16(png_ptr); + ++ /* For interlaced images, use local_row buffer to avoid overflow ++ * in png_combine_row() which writes using IHDR bit-depth. ++ */ ++ if (png_ptr->interlaced != 0) ++ do_local_scale = 1; ++ } ++ + change &= ~PNG_FORMAT_FLAG_LINEAR; + } + +@@ -3980,6 +4037,24 @@ png_image_read_direct(png_voidp argument) + return result; + } + ++ else if (do_local_scale != 0) ++ { ++ /* For interlaced 16-to-8 conversion, use an intermediate row buffer ++ * to avoid buffer overflows in png_combine_row. The local_row is sized ++ * for the transformed (8-bit) output, preventing the overflow that would ++ * occur if png_combine_row wrote 16-bit data directly to the user buffer. ++ */ ++ int result; ++ png_voidp row = png_malloc(png_ptr, png_get_rowbytes(png_ptr, info_ptr)); ++ ++ display->local_row = row; ++ result = png_safe_execute(image, png_image_read_direct_scaled, display); ++ display->local_row = NULL; ++ png_free(png_ptr, row); ++ ++ return result; ++ } ++ + else + { + png_alloc_size_t row_bytes = (png_alloc_size_t)display->row_bytes; +diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngrtran.c b/src/java.desktop/share/native/libsplashscreen/libpng/pngrtran.c +index 4f31f8f07..837dc698d 100644 +--- a/src/java.desktop/share/native/libsplashscreen/libpng/pngrtran.c ++++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngrtran.c +@@ -524,9 +524,19 @@ png_set_quantize(png_structrp png_ptr, png_colorp palette, + { + int i; + ++ /* Initialize the array to index colors. ++ * ++ * Ensure quantize_index can fit 256 elements (PNG_MAX_PALETTE_LENGTH) ++ * rather than num_palette elements. This is to prevent buffer overflows ++ * caused by malformed PNG files with out-of-range palette indices. ++ * ++ * Be careful to avoid leaking memory. Applications are allowed to call ++ * this function more than once per png_struct. ++ */ ++ png_free(png_ptr, png_ptr->quantize_index); + png_ptr->quantize_index = (png_bytep)png_malloc(png_ptr, +- (png_alloc_size_t)num_palette); +- for (i = 0; i < num_palette; i++) ++ PNG_MAX_PALETTE_LENGTH); ++ for (i = 0; i < PNG_MAX_PALETTE_LENGTH; i++) + png_ptr->quantize_index[i] = (png_byte)i; + } + +@@ -538,15 +548,14 @@ png_set_quantize(png_structrp png_ptr, png_colorp palette, + * Perhaps not the best solution, but good enough. + */ + +- int i; ++ png_bytep quantize_sort; ++ int i, j; + +- /* Initialize an array to sort colors */ +- png_ptr->quantize_sort = (png_bytep)png_malloc(png_ptr, ++ /* Initialize the local array to sort colors. */ ++ quantize_sort = (png_bytep)png_malloc(png_ptr, + (png_alloc_size_t)num_palette); +- +- /* Initialize the quantize_sort array */ + for (i = 0; i < num_palette; i++) +- png_ptr->quantize_sort[i] = (png_byte)i; ++ quantize_sort[i] = (png_byte)i; + + /* Find the least used palette entries by starting a + * bubble sort, and running it until we have sorted +@@ -558,19 +567,18 @@ png_set_quantize(png_structrp png_ptr, png_colorp palette, + for (i = num_palette - 1; i >= maximum_colors; i--) + { + int done; /* To stop early if the list is pre-sorted */ +- int j; + + done = 1; + for (j = 0; j < i; j++) + { +- if (histogram[png_ptr->quantize_sort[j]] +- < histogram[png_ptr->quantize_sort[j + 1]]) ++ if (histogram[quantize_sort[j]] ++ < histogram[quantize_sort[j + 1]]) + { + png_byte t; + +- t = png_ptr->quantize_sort[j]; +- png_ptr->quantize_sort[j] = png_ptr->quantize_sort[j + 1]; +- png_ptr->quantize_sort[j + 1] = t; ++ t = quantize_sort[j]; ++ quantize_sort[j] = quantize_sort[j + 1]; ++ quantize_sort[j + 1] = t; + done = 0; + } + } +@@ -582,18 +590,18 @@ png_set_quantize(png_structrp png_ptr, png_colorp palette, + /* Swap the palette around, and set up a table, if necessary */ + if (full_quantize != 0) + { +- int j = num_palette; ++ j = num_palette; + + /* Put all the useful colors within the max, but don't + * move the others. + */ + for (i = 0; i < maximum_colors; i++) + { +- if ((int)png_ptr->quantize_sort[i] >= maximum_colors) ++ if ((int)quantize_sort[i] >= maximum_colors) + { + do + j--; +- while ((int)png_ptr->quantize_sort[j] >= maximum_colors); ++ while ((int)quantize_sort[j] >= maximum_colors); + + palette[i] = palette[j]; + } +@@ -601,7 +609,7 @@ png_set_quantize(png_structrp png_ptr, png_colorp palette, + } + else + { +- int j = num_palette; ++ j = num_palette; + + /* Move all the used colors inside the max limit, and + * develop a translation table. +@@ -609,13 +617,13 @@ png_set_quantize(png_structrp png_ptr, png_colorp palette, + for (i = 0; i < maximum_colors; i++) + { + /* Only move the colors we need to */ +- if ((int)png_ptr->quantize_sort[i] >= maximum_colors) ++ if ((int)quantize_sort[i] >= maximum_colors) + { + png_color tmp_color; + + do + j--; +- while ((int)png_ptr->quantize_sort[j] >= maximum_colors); ++ while ((int)quantize_sort[j] >= maximum_colors); + + tmp_color = palette[j]; + palette[j] = palette[i]; +@@ -653,8 +661,7 @@ png_set_quantize(png_structrp png_ptr, png_colorp palette, + } + } + } +- png_free(png_ptr, png_ptr->quantize_sort); +- png_ptr->quantize_sort = NULL; ++ png_free(png_ptr, quantize_sort); + } + else + { +@@ -1797,19 +1804,51 @@ png_init_read_transformations(png_structrp png_ptr) + } + else /* if (png_ptr->trans_alpha[i] != 0xff) */ + { +- png_byte v, w; +- +- v = png_ptr->gamma_to_1[palette[i].red]; +- png_composite(w, v, png_ptr->trans_alpha[i], back_1.red); +- palette[i].red = png_ptr->gamma_from_1[w]; +- +- v = png_ptr->gamma_to_1[palette[i].green]; +- png_composite(w, v, png_ptr->trans_alpha[i], back_1.green); +- palette[i].green = png_ptr->gamma_from_1[w]; +- +- v = png_ptr->gamma_to_1[palette[i].blue]; +- png_composite(w, v, png_ptr->trans_alpha[i], back_1.blue); +- palette[i].blue = png_ptr->gamma_from_1[w]; ++ if ((png_ptr->flags & PNG_FLAG_OPTIMIZE_ALPHA) != 0) ++ { ++ /* Premultiply only: ++ * component = round((component * alpha) / 255) ++ */ ++ png_uint_32 component; ++ ++ component = png_ptr->gamma_to_1[palette[i].red]; ++ component = ++ (component * png_ptr->trans_alpha[i] + 128) / 255; ++ palette[i].red = png_ptr->gamma_from_1[component]; ++ ++ component = png_ptr->gamma_to_1[palette[i].green]; ++ component = ++ (component * png_ptr->trans_alpha[i] + 128) / 255; ++ palette[i].green = png_ptr->gamma_from_1[component]; ++ ++ component = png_ptr->gamma_to_1[palette[i].blue]; ++ component = ++ (component * png_ptr->trans_alpha[i] + 128) / 255; ++ palette[i].blue = png_ptr->gamma_from_1[component]; ++ } ++ else ++ { ++ /* Composite with background color: ++ * component = ++ * alpha * component + (1 - alpha) * background ++ */ ++ png_byte v, w; ++ ++ v = png_ptr->gamma_to_1[palette[i].red]; ++ png_composite(w, v, ++ png_ptr->trans_alpha[i], back_1.red); ++ palette[i].red = png_ptr->gamma_from_1[w]; ++ ++ v = png_ptr->gamma_to_1[palette[i].green]; ++ png_composite(w, v, ++ png_ptr->trans_alpha[i], back_1.green); ++ palette[i].green = png_ptr->gamma_from_1[w]; ++ ++ v = png_ptr->gamma_to_1[palette[i].blue]; ++ png_composite(w, v, ++ png_ptr->trans_alpha[i], back_1.blue); ++ palette[i].blue = png_ptr->gamma_from_1[w]; ++ } + } + } + else +@@ -5032,13 +5071,8 @@ png_do_read_transformations(png_structrp png_ptr, png_row_infop row_info) + + #ifdef PNG_READ_QUANTIZE_SUPPORTED + if ((png_ptr->transformations & PNG_QUANTIZE) != 0) +- { + png_do_quantize(row_info, png_ptr->row_buf + 1, + png_ptr->palette_lookup, png_ptr->quantize_index); +- +- if (row_info->rowbytes == 0) +- png_error(png_ptr, "png_do_quantize returned rowbytes=0"); +- } + #endif /* READ_QUANTIZE */ + + #ifdef PNG_READ_EXPAND_16_SUPPORTED +diff --git a/src/java.desktop/share/native/libsplashscreen/libpng/pngstruct.h b/src/java.desktop/share/native/libsplashscreen/libpng/pngstruct.h +index d6c446564..82634c352 100644 +--- a/src/java.desktop/share/native/libsplashscreen/libpng/pngstruct.h ++++ b/src/java.desktop/share/native/libsplashscreen/libpng/pngstruct.h +@@ -435,7 +435,6 @@ struct png_struct_def + + #ifdef PNG_READ_QUANTIZE_SUPPORTED + /* The following three members were added at version 1.0.14 and 1.2.4 */ +- png_bytep quantize_sort; /* working sort array */ + png_bytep index_to_palette; /* where the original index currently is + in the palette */ + png_bytep palette_to_index; /* which original index points to this +diff --git a/test/jdk/com/sun/jdi/EATests.java b/test/jdk/com/sun/jdi/EATests.java +index 717cbfff3..7253603fe 100644 +--- a/test/jdk/com/sun/jdi/EATests.java ++++ b/test/jdk/com/sun/jdi/EATests.java +@@ -2090,7 +2090,7 @@ class EARelockingNestedInflated_03Target extends EATestCaseBaseTarget { + // Use new lock. lockInflatedByContention might have been inflated because of recursion. + lockInflatedByContention = new XYVal(1, 1); + // Start thread that tries to enter lockInflatedByContention while the main thread owns it -> inflation +- TestScaffold.newThread(() -> { ++ DebuggeeWrapper.newThread(() -> { + while (true) { + synchronized (testCase) { + try { +diff --git a/test/jdk/org/openeuler/security/openssl/KAEConfTest.java b/test/jdk/org/openeuler/security/openssl/KAEConfTest.java +index 4076aa2fc..e35085152 100644 +--- a/test/jdk/org/openeuler/security/openssl/KAEConfTest.java ++++ b/test/jdk/org/openeuler/security/openssl/KAEConfTest.java +@@ -26,10 +26,16 @@ import org.openeuler.security.openssl.KAEProvider; + + import java.io.File; + import java.io.FileWriter; ++import java.io.BufferedReader; ++import java.io.FileInputStream; ++import java.io.InputStreamReader; ++import java.nio.charset.StandardCharsets; + import java.io.IOException; + import java.nio.file.Files; + import java.util.ArrayList; + import java.util.List; ++import java.util.regex.Matcher; ++import java.util.regex.Pattern; + + /* + * @test +@@ -48,6 +54,7 @@ public class KAEConfTest { + + private static final String SPECIFY_LOG_PATH = System.getProperty("user.dir") + File.separator + "kae.log"; + private static final List files = new ArrayList<>(); ++ private static final Pattern CONFIG_PATTERN = Pattern.compile("^\\s*(?!#.*kae\\.useOpensslVersion)kae\\.useOpensslVersion\\s*=\\s*(.+?)\\s*(#.*)?$"); + + enum Mode { + DEFAULT, +@@ -74,6 +81,16 @@ public class KAEConfTest { + + private static void init(Mode mode) throws IOException { + if (Mode.SPECIFY.equals(mode)) { ++ String default_opensslVersion = "0"; ++ try (BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(System.getProperty("kae.conf", DEFAULT_CONF)), StandardCharsets.UTF_8))) { ++ String line; ++ while ((line = reader.readLine()) != null) { ++ Matcher matcher = CONFIG_PATTERN.matcher(line); ++ if (matcher.matches()) { ++ default_opensslVersion = matcher.group(1).trim(); ++ } ++ } ++ } + System.setProperty("kae.conf", SPECIFY_CONF); + File file = new File(SPECIFY_CONF); + if (!file.exists()) { +@@ -81,7 +98,9 @@ public class KAEConfTest { + } + files.add(file); + try (FileWriter fileWriter = new FileWriter(file)) { +- fileWriter.write("kae.log=true"); ++ fileWriter.write("kae.log=true\n"); ++ // use same opensslVersion with default conf ++ fileWriter.write("kae.useOpensslVersion=" + default_opensslVersion + "\n"); + fileWriter.flush(); + } + } +-- +2.34.1 + diff --git a/huawei-support-Openssl3.patch b/huawei-support-Openssl3.patch new file mode 100644 index 0000000000000000000000000000000000000000..dfc1a156cff70fdaaad6d3826b0eac14cc510298 --- /dev/null +++ b/huawei-support-Openssl3.patch @@ -0,0 +1,8217 @@ +Date: Wed, 26 Nov 2025 10:51:50 +0800 +Subject: [PATCH 1/8] support Openssl3 + +--- + make/modules/jdk.crypto.kaeprovider/Lib.gmk | 2 +- + src/java.base/share/classes/module-info.java | 1 + + .../classes/sun/security/util/KnownOIDs.java | 3 + + .../openeuler/security/openssl/KAEConfig.java | 42 +- + .../security/openssl/KAECurveDB.java | 729 +++++++++ + .../security/openssl/KAEECDHKeyAgreement.java | 12 +- + .../security/openssl/KAEECKeyFactory.java | 308 ++++ + .../openssl/KAEECKeyPairGenerator.java | 17 +- + .../security/openssl/KAEECParameters.java | 240 +++ + .../security/openssl/KAEECPrivateKeyImpl.java | 214 +++ + .../security/openssl/KAEECPublicKeyImpl.java | 132 ++ + .../security/openssl/KAENamedCurve.java | 100 ++ + .../security/openssl/KAEProvider.java | 63 +- + .../security/openssl/KAESM2Cipher.java | 386 +++++ + .../openssl/KAESM2KeyPairGenerator.java | 108 ++ + .../security/openssl/KAESM2Signature.java | 373 +++++ + .../openeuler/security/openssl/KAEUtils.java | 35 + + .../linux/conf/security/kaeprovider.conf | 23 +- + .../security/openssl/kae_cipher_rsa.c | 69 +- + .../security/openssl/kae_cipher_sm2.c | 370 +++++ + .../openeuler/security/openssl/kae_digest.c | 28 +- + .../security/openssl/kae_exception.c | 18 +- + .../security/openssl/kae_exception.h | 2 + + .../org/openeuler/security/openssl/kae_hmac.c | 25 +- + .../security/openssl/kae_keyagreement_dh.c | 17 +- + .../security/openssl/kae_keyagreement_ecdh.c | 23 +- + .../openssl/kae_keypairgenerator_dh.c | 15 +- + .../openssl/kae_keypairgenerator_ec.c | 95 +- + .../openssl/kae_keypairgenerator_rsa.c | 25 +- + .../openeuler/security/openssl/kae_provider.c | 57 +- + .../security/openssl/kae_signature_rsa.c | 47 +- + .../security/openssl/kae_signature_sm2.c | 288 ++++ + .../security/openssl/kae_symmetric_cipher.c | 78 +- + .../org/openeuler/security/openssl/kae_util.c | 25 +- + .../org/openeuler/security/openssl/kae_util.h | 6 +- + .../security/openssl/openssl1_macro.h | 55 + + .../security/openssl/openssl3_macro.h | 37 + + .../openeuler/security/openssl/ssl_utils.c | 1367 +++++++++++++++++ + .../openeuler/security/openssl/ssl_utils.h | 313 ++++ + .../openeuler/security/openssl/ECDHTest.java | 27 +- + .../openssl/KAEDisabledAlgorithmsTest.java | 2 +- + .../security/openssl/KAEGcmlvLenTest.java | 53 + + .../security/openssl/KAETestHelper.java | 3 +- + .../security/openssl/KAEUseEngineTest.java | 60 +- + .../security/openssl/KaeProviderTest.java | 32 +- + .../openeuler/security/openssl/SM2Test.java | 175 +++ + .../jca/PreferredProviderNegativeTest.java | 2 +- + .../security/openssl/SM2CipherBenchmark.java | 116 ++ + .../openssl/SM2SignatureBenchmark.java | 104 ++ + 49 files changed, 5989 insertions(+), 333 deletions(-) + create mode 100644 src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAECurveDB.java + create mode 100644 src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECKeyFactory.java + create mode 100644 src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECParameters.java + create mode 100644 src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECPrivateKeyImpl.java + create mode 100644 src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECPublicKeyImpl.java + create mode 100644 src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAENamedCurve.java + create mode 100644 src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2Cipher.java + create mode 100644 src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2KeyPairGenerator.java + create mode 100644 src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2Signature.java + create mode 100644 src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_cipher_sm2.c + create mode 100644 src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_signature_sm2.c + create mode 100644 src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/openssl1_macro.h + create mode 100644 src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/openssl3_macro.h + create mode 100644 src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/ssl_utils.c + create mode 100644 src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/ssl_utils.h + create mode 100644 test/jdk/org/openeuler/security/openssl/KAEGcmlvLenTest.java + create mode 100644 test/jdk/org/openeuler/security/openssl/SM2Test.java + create mode 100644 test/micro/org/openeuler/bench/security/openssl/SM2CipherBenchmark.java + create mode 100644 test/micro/org/openeuler/bench/security/openssl/SM2SignatureBenchmark.java + +diff --git a/make/modules/jdk.crypto.kaeprovider/Lib.gmk b/make/modules/jdk.crypto.kaeprovider/Lib.gmk +index 2b01e3ed4..02c24ed2e 100644 +--- a/make/modules/jdk.crypto.kaeprovider/Lib.gmk ++++ b/make/modules/jdk.crypto.kaeprovider/Lib.gmk +@@ -36,7 +36,7 @@ ifeq ($(ENABLE_KAE), true) + CXXFLAGS := $(CXXFLAGS_JDKLIB), \ + LDFLAGS := $(LDFLAGS_JDKLIB) \ + $(call SET_SHARED_LIBRARY_ORIGIN), \ +- LIBS_unix := $(LIBDL) -lssl -lcrypto, \ ++ LIBS_unix := $(LIBDL) -lm -pthread, \ + )) + + TARGETS += $(BUILD_LIBJ2KAE) +diff --git a/src/java.base/share/classes/module-info.java b/src/java.base/share/classes/module-info.java +index 8d7441a2e..c2a6b9caf 100644 +--- a/src/java.base/share/classes/module-info.java ++++ b/src/java.base/share/classes/module-info.java +@@ -335,6 +335,7 @@ module java.base { + jdk.crypto.kaeprovider, + jdk.naming.dns; + exports sun.security.pkcs to ++ jdk.crypto.kaeprovider, + jdk.crypto.ec, + jdk.jartool; + exports sun.security.provider to +diff --git a/src/java.base/share/classes/sun/security/util/KnownOIDs.java b/src/java.base/share/classes/sun/security/util/KnownOIDs.java +index 8cd0a03a3..97f599c7b 100644 +--- a/src/java.base/share/classes/sun/security/util/KnownOIDs.java ++++ b/src/java.base/share/classes/sun/security/util/KnownOIDs.java +@@ -328,6 +328,9 @@ public enum KnownOIDs { + brainpoolP384r1("1.3.36.3.3.2.8.1.1.11"), + brainpoolP512r1("1.3.36.3.3.2.8.1.1.13"), + ++ // sm2 ++ sm2p256v1("1.2.156.10197.1.301"), ++ + // Certicom 1.3.132.* + sect163k1("1.3.132.0.1", "sect163k1", "NIST K-163"), + sect163r1("1.3.132.0.2"), +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEConfig.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEConfig.java +index a4eb57770..1b185abd5 100644 +--- a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEConfig.java ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEConfig.java +@@ -51,7 +51,10 @@ public class KAEConfig { + "kae.hmac", + "kae.rsa", + "kae.dh", +- "kae.ec" ++ "kae.ec", ++ // sm2 divide into two parts, be careful with offset when adding new algorithms ++ "kae.sm2.cipher", ++ "kae.sm2.signature" + }; + + // these property names indicate whether KAE hardware acceleration is enabled for each algorithm +@@ -62,9 +65,17 @@ public class KAEConfig { + "kae.hmac.useKaeEngine", + "kae.rsa.useKaeEngine", + "kae.dh.useKaeEngine", +- "kae.ec.useKaeEngine" ++ "kae.ec.useKaeEngine", ++ "kae.sm2.useKaeEngine" + }; + ++ // digestOffset is [kae.md5, kae.sha256, kae.sha384, kae.sm3].length - [kae.digest.useKaeEngine].length ++ private static int digestOffset = 3; ++ ++ // digestAlgorithmLen is [kae.md5, kae.sha256, kae.sha384, kae.sm3].length ++ private static int digestAlgorithmLen = 4; ++ ++ + // algorithm names + private static final String[] algorithmNames = new String[]{ + "md5", +@@ -95,7 +106,8 @@ public class KAEConfig { + "hmac-sha512", + "rsa", + "dh", +- "ec" ++ "ec", ++ "sm2" + }; + + // algorithm name and algorithm index mapping +@@ -201,16 +213,20 @@ public class KAEConfig { + } + useKaeProviderCategoryMap.put(useKaeProviderPropertyNames[i], categoryFlagsForProvider[i]); + } +- int offset = useKaeProviderPropertyNames.length - useKaeEnginePropertyNames.length; +- int digestAlgorithmLen = offset + 1; ++ + // digest + System.arraycopy(categoryFlagsForProvider, 0, useKaeProviderFlags, 0, digestAlgorithmLen); + + // non-digest + for (int i = digestAlgorithmLen; i < useKaeProviderFlags.length; i++) { + Integer algorithmCategoryIndex = algorithmNameCategoryMap.get(algorithmNames[i]); +- if (categoryFlagsForProvider[algorithmCategoryIndex + offset]) { +- useKaeProviderFlags[i] = true; ++ // sm2 special treatment ++ if (algorithmNames[i] == "sm2") { ++ // cipher || signature ++ useKaeProviderFlags[i] = categoryFlagsForProvider[algorithmCategoryIndex + digestOffset] || categoryFlagsForProvider[algorithmCategoryIndex + digestOffset + 1]; ++ } ++ else { ++ useKaeProviderFlags[i] = categoryFlagsForProvider[algorithmCategoryIndex + digestOffset]; + } + } + +@@ -239,7 +255,8 @@ public class KAEConfig { + false, // hmac + true, // rsa + true, // dh +- false // ec ++ false, // ec ++ false // sm2 + }; + for (int i = 0; i < useKaeEnginePropertyNames.length; i++) { + String configValue = privilegedGetOverridable(useKaeEnginePropertyNames[i]); +@@ -248,8 +265,9 @@ public class KAEConfig { + } + } + +- // EC algorithm currently does not support KAE hardware acceleration, temporarily use openssl soft calculation. +- categoryFlagsForEngine[useKaeEnginePropertyNames.length - 1] = false; ++ // EC and SM2 algorithm currently does not support KAE hardware acceleration, temporarily use openssl soft calculation. ++ categoryFlagsForEngine[6] = false; ++ categoryFlagsForEngine[7] = false; + + for (int i = 0; i < useKaeEngineFlags.length; i++) { + Integer algorithmCategoryIndex = algorithmNameCategoryMap.get(algorithmNames[i]); +@@ -301,6 +319,7 @@ public class KAEConfig { + * 4 : rsa + * 5 : dh + * 6 : ec ++ * 7 : sm2 + */ + private static void initAlgorithmNameCategoryMap() { + algorithmNameCategoryMap.put("md5", 0); +@@ -332,6 +351,7 @@ public class KAEConfig { + algorithmNameCategoryMap.put("rsa", 4); + algorithmNameCategoryMap.put("dh", 5); + algorithmNameCategoryMap.put("ec", 6); ++ algorithmNameCategoryMap.put("sm2", 7); + } + + private static void initAlgorithmNameMap() { +@@ -341,7 +361,7 @@ public class KAEConfig { + + private static String[] getDisabledAlgorithms() { + String disabledAlgorithms = privilegedGetOverridable("kae.engine.disabledAlgorithms", +- "sha256,sha384"); ++ "sha256,sha384,sm2"); + return disabledAlgorithms.replaceAll(" ", "").split("\\,"); + } + +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAECurveDB.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAECurveDB.java +new file mode 100644 +index 000000000..519045c78 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAECurveDB.java +@@ -0,0 +1,729 @@ ++/* ++ * Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Oracle designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++package org.openeuler.security.openssl; ++ ++import java.math.BigInteger; ++ ++import java.security.spec.*; ++ ++import java.util.*; ++ ++import sun.security.util.KnownOIDs; ++import sun.security.util.ECUtil; ++ ++/** ++ * Repository for well-known Elliptic Curve parameters. It is used by both ++ * the SunPKCS11 and SunJSSE code. ++ * ++ * @since 1.6 ++ * @author Andreas Sterbenz ++ */ ++public class KAECurveDB { ++ private static final int P = 1; // prime curve ++ private static final int B = 2; // binary curve ++ private static final int PD = 5; // prime curve, mark as default ++ private static final int BD = 6; // binary curve, mark as default ++ ++ private static final Map oidMap = ++ new LinkedHashMap(); ++ private static final Map nameMap = ++ new HashMap(); ++ private static final Map lengthMap = ++ new HashMap(); ++ ++ private static Collection specCollection; ++ ++ // Return a NamedCurve for the specified OID/name or null if unknown. ++ public static KAENamedCurve lookup(String name) { ++ KAENamedCurve spec = oidMap.get(name); ++ if (spec != null) { ++ return spec; ++ } ++ ++ return nameMap.get(name.toLowerCase(Locale.ENGLISH)); ++ } ++ ++ // Return EC parameters for the specified field size. If there are known ++ // NIST recommended parameters for the given length, they are returned. ++ // Otherwise, if there are multiple matches for the given size, an ++ // arbitrary one is returns. ++ // If no parameters are known, the method returns null. ++ // NOTE that this method returns both prime and binary curves. ++ static KAENamedCurve lookup(int length) { ++ return lengthMap.get(length); ++ } ++ ++ // Convert the given ECParameterSpec object to a NamedCurve object. ++ // If params does not represent a known named curve, return null. ++ public static KAENamedCurve lookup(ECParameterSpec params) { ++ if ((params instanceof KAENamedCurve) || (params == null)) { ++ return (KAENamedCurve)params; ++ } ++ ++ // This code allows SunJSSE to work with 3rd party crypto ++ // providers for ECC and not just SunPKCS11. ++ // This can go away once we decide how to expose curve names in the ++ // public API. ++ // Note that it assumes that the 3rd party provider encodes named ++ // curves using the short form, not explicitly. If it did that, then ++ // the SunJSSE TLS ECC extensions are wrong, which could lead to ++ // interoperability problems. ++ int fieldSize = params.getCurve().getField().getFieldSize(); ++ for (KAENamedCurve namedCurve : specCollection) { ++ // ECParameterSpec does not define equals, so check all the ++ // components ourselves. ++ // Quick field size check first ++ if (namedCurve.getCurve().getField().getFieldSize() != fieldSize) { ++ continue; ++ } ++ if (ECUtil.equals(namedCurve, params)) { ++ // everything matches our named curve, return it ++ return namedCurve; ++ } ++ } ++ // no match found ++ return null; ++ } ++ ++ private static BigInteger bi(String s) { ++ return new BigInteger(s, 16); ++ } ++ ++ private static void add(KnownOIDs o, int type, String sfield, ++ String a, String b, String x, String y, String n, int h) { ++ BigInteger p = bi(sfield); ++ ECField field; ++ if ((type == P) || (type == PD)) { ++ field = new ECFieldFp(p); ++ } else if ((type == B) || (type == BD)) { ++ field = new ECFieldF2m(p.bitLength() - 1, p); ++ } else { ++ throw new RuntimeException("Invalid type: " + type); ++ } ++ ++ EllipticCurve curve = new EllipticCurve(field, bi(a), bi(b)); ++ ECPoint g = new ECPoint(bi(x), bi(y)); ++ ++ String oid = o.value(); ++ KAENamedCurve params = new KAENamedCurve(o, curve, g, bi(n), h); ++ if (oidMap.put(oid, params) != null) { ++ throw new RuntimeException("Duplication oid: " + oid); ++ } ++ ++ for (String cn : params.getNameAndAliases()) { ++ if (nameMap.put(cn.toLowerCase(Locale.ENGLISH), ++ params) != null) { ++ throw new RuntimeException("Duplication name: " + cn); ++ } ++ } ++ ++ int len = field.getFieldSize(); ++ if ((type == PD) || (type == BD) || (lengthMap.get(len) == null)) { ++ // add entry if none present for this field size or if ++ // the curve is marked as a default curve. ++ lengthMap.put(len, params); ++ } ++ } ++ ++ static { ++ /* SEC2 prime curves */ ++ add(KnownOIDs.secp112r1, P, ++ "DB7C2ABF62E35E668076BEAD208B", ++ "DB7C2ABF62E35E668076BEAD2088", ++ "659EF8BA043916EEDE8911702B22", ++ "09487239995A5EE76B55F9C2F098", ++ "A89CE5AF8724C0A23E0E0FF77500", ++ "DB7C2ABF62E35E7628DFAC6561C5", ++ 1); ++ ++ add(KnownOIDs.secp112r2, P, ++ "DB7C2ABF62E35E668076BEAD208B", ++ "6127C24C05F38A0AAAF65C0EF02C", ++ "51DEF1815DB5ED74FCC34C85D709", ++ "4BA30AB5E892B4E1649DD0928643", ++ "adcd46f5882e3747def36e956e97", ++ "36DF0AAFD8B8D7597CA10520D04B", ++ 4); ++ ++ add(KnownOIDs.secp128r1, P, ++ "FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF", ++ "FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFC", ++ "E87579C11079F43DD824993C2CEE5ED3", ++ "161FF7528B899B2D0C28607CA52C5B86", ++ "CF5AC8395BAFEB13C02DA292DDED7A83", ++ "FFFFFFFE0000000075A30D1B9038A115", ++ 1); ++ ++ add(KnownOIDs.secp128r2, P, ++ "FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF", ++ "D6031998D1B3BBFEBF59CC9BBFF9AEE1", ++ "5EEEFCA380D02919DC2C6558BB6D8A5D", ++ "7B6AA5D85E572983E6FB32A7CDEBC140", ++ "27B6916A894D3AEE7106FE805FC34B44", ++ "3FFFFFFF7FFFFFFFBE0024720613B5A3", ++ 4); ++ ++ add(KnownOIDs.secp160k1, P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", ++ "0000000000000000000000000000000000000000", ++ "0000000000000000000000000000000000000007", ++ "3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", ++ "938CF935318FDCED6BC28286531733C3F03C4FEE", ++ "0100000000000000000001B8FA16DFAB9ACA16B6B3", ++ 1); ++ ++ add(KnownOIDs.secp160r1, P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFC", ++ "1C97BEFC54BD7A8B65ACF89F81D4D4ADC565FA45", ++ "4A96B5688EF573284664698968C38BB913CBFC82", ++ "23A628553168947D59DCC912042351377AC5FB32", ++ "0100000000000000000001F4C8F927AED3CA752257", ++ 1); ++ ++ add(KnownOIDs.secp160r2, P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC70", ++ "B4E134D3FB59EB8BAB57274904664D5AF50388BA", ++ "52DCB034293A117E1F4FF11B30F7199D3144CE6D", ++ "FEAFFEF2E331F296E071FA0DF9982CFEA7D43F2E", ++ "0100000000000000000000351EE786A818F3A1A16B", ++ 1); ++ ++ add(KnownOIDs.secp192k1, P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", ++ "000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000003", ++ "DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", ++ "9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", ++ "FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", ++ 1); ++ ++ add(KnownOIDs.secp192r1, PD, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC", ++ "64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", ++ "188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", ++ "07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", ++ "FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", ++ 1); ++ ++ add(KnownOIDs.secp224k1, P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", ++ "00000000000000000000000000000000000000000000000000000000", ++ "00000000000000000000000000000000000000000000000000000005", ++ "A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", ++ "7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", ++ "010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", ++ 1); ++ ++ add(KnownOIDs.secp224r1, PD, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE", ++ "B4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4", ++ "B70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21", ++ "BD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D", ++ 1); ++ ++ add(KnownOIDs.secp256k1, P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", ++ "0000000000000000000000000000000000000000000000000000000000000000", ++ "0000000000000000000000000000000000000000000000000000000000000007", ++ "79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", ++ "483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", ++ 1); ++ ++ add(KnownOIDs.secp256r1, PD, ++ "FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF", ++ "FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC", ++ "5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B", ++ "6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296", ++ "4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5", ++ "FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551", ++ 1); ++ ++ add(KnownOIDs.secp384r1, PD, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFC", ++ "B3312FA7E23EE7E4988E056BE3F82D19181D9C6EFE8141120314088F5013875AC656398D8A2ED19D2A85C8EDD3EC2AEF", ++ "AA87CA22BE8B05378EB1C71EF320AD746E1D3B628BA79B9859F741E082542A385502F25DBF55296C3A545E3872760AB7", ++ "3617DE4A96262C6F5D9E98BF9292DC29F8F41DBD289A147CE9DA3113B5F0B8C00A60B1CE1D7E819D7A431D7C90EA0E5F", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC7634D81F4372DDF581A0DB248B0A77AECEC196ACCC52973", ++ 1); ++ ++ add(KnownOIDs.secp521r1, PD, ++ "01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", ++ "01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC", ++ "0051953EB9618E1C9A1F929A21A0B68540EEA2DA725B99B315F3B8B489918EF109E156193951EC7E937B1652C0BD3BB1BF073573DF883D2C34F1EF451FD46B503F00", ++ "00C6858E06B70404E9CD9E3ECB662395B4429C648139053FB521F828AF606B4D3DBAA14B5E77EFE75928FE1DC127A2FFA8DE3348B3C1856A429BF97E7E31C2E5BD66", ++ "011839296A789A3BC0045C8A5FB42C7D1BD998F54449579B446817AFBD17273E662C97EE72995EF42640C550B9013FAD0761353C7086A272C24088BE94769FD16650", ++ "01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA51868783BF2F966B7FCC0148F709A5D03BB5C9B8899C47AEBB6FB71E91386409", ++ 1); ++ ++ /* ANSI X9.62 prime curves */ ++ add(KnownOIDs.prime192v2, P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC", ++ "CC22D6DFB95C6B25E49C0D6364A4E5980C393AA21668D953", ++ "EEA2BAE7E1497842F2DE7769CFE9C989C072AD696F48034A", ++ "6574D11D69B6EC7A672BB82A083DF2F2B0847DE970B2DE15", ++ "FFFFFFFFFFFFFFFFFFFFFFFE5FB1A724DC80418648D8DD31", ++ 1); ++ ++ add(KnownOIDs.prime192v3, P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC", ++ "22123DC2395A05CAA7423DAECCC94760A7D462256BD56916", ++ "7D29778100C65A1DA1783716588DCE2B8B4AEE8E228F1896", ++ "38A90F22637337334B49DCB66A6DC8F9978ACA7648A943B0", ++ "FFFFFFFFFFFFFFFFFFFFFFFF7A62D031C83F4294F640EC13", ++ 1); ++ ++ add(KnownOIDs.prime239v1, P, ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC", ++ "6B016C3BDCF18941D0D654921475CA71A9DB2FB27D1D37796185C2942C0A", ++ "0FFA963CDCA8816CCC33B8642BEDF905C3D358573D3F27FBBD3B3CB9AAAF", ++ "7DEBE8E4E90A5DAE6E4054CA530BA04654B36818CE226B39FCCB7B02F1AE", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFF9E5E9A9F5D9071FBD1522688909D0B", ++ 1); ++ ++ add(KnownOIDs.prime239v2, P, ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC", ++ "617FAB6832576CBBFED50D99F0249C3FEE58B94BA0038C7AE84C8C832F2C", ++ "38AF09D98727705120C921BB5E9E26296A3CDCF2F35757A0EAFD87B830E7", ++ "5B0125E4DBEA0EC7206DA0FC01D9B081329FB555DE6EF460237DFF8BE4BA", ++ "7FFFFFFFFFFFFFFFFFFFFFFF800000CFA7E8594377D414C03821BC582063", ++ 1); ++ ++ add(KnownOIDs.prime239v3, P, ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC", ++ "255705FA2A306654B1F4CB03D6A750A30C250102D4988717D9BA15AB6D3E", ++ "6768AE8E18BB92CFCF005C949AA2C6D94853D0E660BBF854B1C9505FE95A", ++ "1607E6898F390C06BC1D552BAD226F3B6FCFE48B6E818499AF18E3ED6CF3", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFF975DEB41B3A6057C3C432146526551", ++ 1); ++ ++ /* SEC2 binary curves */ ++ add(KnownOIDs.sect113r1, B, ++ "020000000000000000000000000201", ++ "003088250CA6E7C7FE649CE85820F7", ++ "00E8BEE4D3E2260744188BE0E9C723", ++ "009D73616F35F4AB1407D73562C10F", ++ "00A52830277958EE84D1315ED31886", ++ "0100000000000000D9CCEC8A39E56F", ++ 2); ++ ++ add(KnownOIDs.sect113r2, B, ++ "020000000000000000000000000201", ++ "00689918DBEC7E5A0DD6DFC0AA55C7", ++ "0095E9A9EC9B297BD4BF36E059184F", ++ "01A57A6A7B26CA5EF52FCDB8164797", ++ "00B3ADC94ED1FE674C06E695BABA1D", ++ "010000000000000108789B2496AF93", ++ 2); ++ ++ add(KnownOIDs.sect131r1, B, ++ "080000000000000000000000000000010D", ++ "07A11B09A76B562144418FF3FF8C2570B8", ++ "0217C05610884B63B9C6C7291678F9D341", ++ "0081BAF91FDF9833C40F9C181343638399", ++ "078C6E7EA38C001F73C8134B1B4EF9E150", ++ "0400000000000000023123953A9464B54D", ++ 2); ++ ++ add(KnownOIDs.sect131r2, B, ++ "080000000000000000000000000000010D", ++ "03E5A88919D7CAFCBF415F07C2176573B2", ++ "04B8266A46C55657AC734CE38F018F2192", ++ "0356DCD8F2F95031AD652D23951BB366A8", ++ "0648F06D867940A5366D9E265DE9EB240F", ++ "0400000000000000016954A233049BA98F", ++ 2); ++ ++ add(KnownOIDs.sect163k1, B, ++ "0800000000000000000000000000000000000000C9", ++ "000000000000000000000000000000000000000001", ++ "000000000000000000000000000000000000000001", ++ "02FE13C0537BBC11ACAA07D793DE4E6D5E5C94EEE8", ++ "0289070FB05D38FF58321F2E800536D538CCDAA3D9", ++ "04000000000000000000020108A2E0CC0D99F8A5EF", ++ 2); ++ ++ add(KnownOIDs.sect163r1, B, ++ "0800000000000000000000000000000000000000C9", ++ "07B6882CAAEFA84F9554FF8428BD88E246D2782AE2", ++ "0713612DCDDCB40AAB946BDA29CA91F73AF958AFD9", ++ "0369979697AB43897789566789567F787A7876A654", ++ "00435EDB42EFAFB2989D51FEFCE3C80988F41FF883", ++ "03FFFFFFFFFFFFFFFFFFFF48AAB689C29CA710279B", ++ 2); ++ ++ add(KnownOIDs.sect163r2, BD, ++ "0800000000000000000000000000000000000000C9", ++ "000000000000000000000000000000000000000001", ++ "020A601907B8C953CA1481EB10512F78744A3205FD", ++ "03F0EBA16286A2D57EA0991168D4994637E8343E36", ++ "00D51FBC6C71A0094FA2CDD545B11C5C0C797324F1", ++ "040000000000000000000292FE77E70C12A4234C33", ++ 2); ++ ++ add(KnownOIDs.sect193r1, B, ++ "02000000000000000000000000000000000000000000008001", ++ "0017858FEB7A98975169E171F77B4087DE098AC8A911DF7B01", ++ "00FDFB49BFE6C3A89FACADAA7A1E5BBC7CC1C2E5D831478814", ++ "01F481BC5F0FF84A74AD6CDF6FDEF4BF6179625372D8C0C5E1", ++ "0025E399F2903712CCF3EA9E3A1AD17FB0B3201B6AF7CE1B05", ++ "01000000000000000000000000C7F34A778F443ACC920EBA49", ++ 2); ++ ++ add(KnownOIDs.sect193r2, B, ++ "02000000000000000000000000000000000000000000008001", ++ "0163F35A5137C2CE3EA6ED8667190B0BC43ECD69977702709B", ++ "00C9BB9E8927D4D64C377E2AB2856A5B16E3EFB7F61D4316AE", ++ "00D9B67D192E0367C803F39E1A7E82CA14A651350AAE617E8F", ++ "01CE94335607C304AC29E7DEFBD9CA01F596F927224CDECF6C", ++ "010000000000000000000000015AAB561B005413CCD4EE99D5", ++ 2); ++ ++ add(KnownOIDs.sect233k1, BD, ++ "020000000000000000000000000000000000000004000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "017232BA853A7E731AF129F22FF4149563A419C26BF50A4C9D6EEFAD6126", ++ "01DB537DECE819B7F70F555A67C427A8CD9BF18AEB9B56E0C11056FAE6A3", ++ "008000000000000000000000000000069D5BB915BCD46EFB1AD5F173ABDF", ++ 4); ++ ++ add(KnownOIDs.sect233r1, B, ++ "020000000000000000000000000000000000000004000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "0066647EDE6C332C7F8C0923BB58213B333B20E9CE4281FE115F7D8F90AD", ++ "00FAC9DFCBAC8313BB2139F1BB755FEF65BC391F8B36F8F8EB7371FD558B", ++ "01006A08A41903350678E58528BEBF8A0BEFF867A7CA36716F7E01F81052", ++ "01000000000000000000000000000013E974E72F8A6922031D2603CFE0D7", ++ 2); ++ ++ add(KnownOIDs.sect239k1, B, ++ "800000000000000000004000000000000000000000000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "29A0B6A887A983E9730988A68727A8B2D126C44CC2CC7B2A6555193035DC", ++ "76310804F12E549BDB011C103089E73510ACB275FC312A5DC6B76553F0CA", ++ "2000000000000000000000000000005A79FEC67CB6E91F1C1DA800E478A5", ++ 4); ++ ++ add(KnownOIDs.sect283k1, BD, ++ "0800000000000000000000000000000000000000000000000000000000000000000010A1", ++ "000000000000000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0503213F78CA44883F1A3B8162F188E553CD265F23C1567A16876913B0C2AC2458492836", ++ "01CCDA380F1C9E318D90F95D07E5426FE87E45C0E8184698E45962364E34116177DD2259", ++ "01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE9AE2ED07577265DFF7F94451E061E163C61", ++ 4); ++ ++ add(KnownOIDs.sect283r1, B, ++ "0800000000000000000000000000000000000000000000000000000000000000000010A1", ++ "000000000000000000000000000000000000000000000000000000000000000000000001", ++ "027B680AC8B8596DA5A4AF8A19A0303FCA97FD7645309FA2A581485AF6263E313B79A2F5", ++ "05F939258DB7DD90E1934F8C70B0DFEC2EED25B8557EAC9C80E2E198F8CDBECD86B12053", ++ "03676854FE24141CB98FE6D4B20D02B4516FF702350EDDB0826779C813F0DF45BE8112F4", ++ "03FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEF90399660FC938A90165B042A7CEFADB307", ++ 2); ++ ++ add(KnownOIDs.sect409k1, BD, ++ "02000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000001", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0060F05F658F49C1AD3AB1890F7184210EFD0987E307C84C27ACCFB8F9F67CC2C460189EB5AAAA62EE222EB1B35540CFE9023746", ++ "01E369050B7C4E42ACBA1DACBF04299C3460782F918EA427E6325165E9EA10E3DA5F6C42E9C55215AA9CA27A5863EC48D8E0286B", ++ "007FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE5F83B2D4EA20400EC4557D5ED3E3E7CA5B4B5C83B8E01E5FCF", ++ 4); ++ ++ add(KnownOIDs.sect409r1, B, ++ "02000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000001", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0021A5C2C8EE9FEB5C4B9A753B7B476B7FD6422EF1F3DD674761FA99D6AC27C8A9A197B272822F6CD57A55AA4F50AE317B13545F", ++ "015D4860D088DDB3496B0C6064756260441CDE4AF1771D4DB01FFE5B34E59703DC255A868A1180515603AEAB60794E54BB7996A7", ++ "0061B1CFAB6BE5F32BBFA78324ED106A7636B9C5A7BD198D0158AA4F5488D08F38514F1FDF4B4F40D2181B3681C364BA0273C706", ++ "010000000000000000000000000000000000000000000000000001E2AAD6A612F33307BE5FA47C3C9E052F838164CD37D9A21173", ++ 2); ++ ++ add(KnownOIDs.sect571k1, BD, ++ "080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000425", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "026EB7A859923FBC82189631F8103FE4AC9CA2970012D5D46024804801841CA44370958493B205E647DA304DB4CEB08CBBD1BA39494776FB988B47174DCA88C7E2945283A01C8972", ++ "0349DC807F4FBF374F4AEADE3BCA95314DD58CEC9F307A54FFC61EFC006D8A2C9D4979C0AC44AEA74FBEBBB9F772AEDCB620B01A7BA7AF1B320430C8591984F601CD4C143EF1C7A3", ++ "020000000000000000000000000000000000000000000000000000000000000000000000131850E1F19A63E4B391A8DB917F4138B630D84BE5D639381E91DEB45CFE778F637C1001", ++ 4); ++ ++ add(KnownOIDs.sect571r1, B, ++ "080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000425", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "02F40E7E2221F295DE297117B7F3D62F5C6A97FFCB8CEFF1CD6BA8CE4A9A18AD84FFABBD8EFA59332BE7AD6756A66E294AFD185A78FF12AA520E4DE739BACA0C7FFEFF7F2955727A", ++ "0303001D34B856296C16C0D40D3CD7750A93D1D2955FA80AA5F40FC8DB7B2ABDBDE53950F4C0D293CDD711A35B67FB1499AE60038614F1394ABFA3B4C850D927E1E7769C8EEC2D19", ++ "037BF27342DA639B6DCCFFFEB73D69D78C6C27A6009CBBCA1980F8533921E8A684423E43BAB08A576291AF8F461BB2A8B3531D2F0485C19B16E2F1516E23DD3C1A4827AF1B8AC15B", ++ "03FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE661CE18FF55987308059B186823851EC7DD9CA1161DE93D5174D66E8382E9BB2FE84E47", ++ 2); ++ ++ /* ANSI X9.62 binary curves */ ++ add(KnownOIDs.c2tnb191v1, B, ++ "800000000000000000000000000000000000000000000201", ++ "2866537B676752636A68F56554E12640276B649EF7526267", ++ "2E45EF571F00786F67B0081B9495A3D95462F5DE0AA185EC", ++ "36B3DAF8A23206F9C4F299D7B21A9C369137F2C84AE1AA0D", ++ "765BE73433B3F95E332932E70EA245CA2418EA0EF98018FB", ++ "40000000000000000000000004A20E90C39067C893BBB9A5", ++ 2); ++ ++ add(KnownOIDs.c2tnb191v2, B, ++ "800000000000000000000000000000000000000000000201", ++ "401028774D7777C7B7666D1366EA432071274F89FF01E718", ++ "0620048D28BCBD03B6249C99182B7C8CD19700C362C46A01", ++ "3809B2B7CC1B28CC5A87926AAD83FD28789E81E2C9E3BF10", ++ "17434386626D14F3DBF01760D9213A3E1CF37AEC437D668A", ++ "20000000000000000000000050508CB89F652824E06B8173", ++ 4); ++ ++ add(KnownOIDs.c2tnb191v3, B, ++ "800000000000000000000000000000000000000000000201", ++ "6C01074756099122221056911C77D77E77A777E7E7E77FCB", ++ "71FE1AF926CF847989EFEF8DB459F66394D90F32AD3F15E8", ++ "375D4CE24FDE434489DE8746E71786015009E66E38A926DD", ++ "545A39176196575D985999366E6AD34CE0A77CD7127B06BE", ++ "155555555555555555555555610C0B196812BFB6288A3EA3", ++ 6); ++ ++ add(KnownOIDs.c2tnb239v1, B, ++ "800000000000000000000000000000000000000000000000001000000001", ++ "32010857077C5431123A46B808906756F543423E8D27877578125778AC76", ++ "790408F2EEDAF392B012EDEFB3392F30F4327C0CA3F31FC383C422AA8C16", ++ "57927098FA932E7C0A96D3FD5B706EF7E5F5C156E16B7E7C86038552E91D", ++ "61D8EE5077C33FECF6F1A16B268DE469C3C7744EA9A971649FC7A9616305", ++ "2000000000000000000000000000000F4D42FFE1492A4993F1CAD666E447", ++ 4); ++ ++ add(KnownOIDs.c2tnb239v2, B, ++ "800000000000000000000000000000000000000000000000001000000001", ++ "4230017757A767FAE42398569B746325D45313AF0766266479B75654E65F", ++ "5037EA654196CFF0CD82B2C14A2FCF2E3FF8775285B545722F03EACDB74B", ++ "28F9D04E900069C8DC47A08534FE76D2B900B7D7EF31F5709F200C4CA205", ++ "5667334C45AFF3B5A03BAD9DD75E2C71A99362567D5453F7FA6E227EC833", ++ "1555555555555555555555555555553C6F2885259C31E3FCDF154624522D", ++ 6); ++ ++ add(KnownOIDs.c2tnb239v3, B, ++ "800000000000000000000000000000000000000000000000001000000001", ++ "01238774666A67766D6676F778E676B66999176666E687666D8766C66A9F", ++ "6A941977BA9F6A435199ACFC51067ED587F519C5ECB541B8E44111DE1D40", ++ "70F6E9D04D289C4E89913CE3530BFDE903977D42B146D539BF1BDE4E9C92", ++ "2E5A0EAF6E5E1305B9004DCE5C0ED7FE59A35608F33837C816D80B79F461", ++ "0CCCCCCCCCCCCCCCCCCCCCCCCCCCCCAC4912D2D9DF903EF9888B8A0E4CFF", ++ 0xA); ++ ++ add(KnownOIDs.c2tnb359v1, B, ++ "800000000000000000000000000000000000000000000000000000000000000000000000100000000000000001", ++ "5667676A654B20754F356EA92017D946567C46675556F19556A04616B567D223A5E05656FB549016A96656A557", ++ "2472E2D0197C49363F1FE7F5B6DB075D52B6947D135D8CA445805D39BC345626089687742B6329E70680231988", ++ "3C258EF3047767E7EDE0F1FDAA79DAEE3841366A132E163ACED4ED2401DF9C6BDCDE98E8E707C07A2239B1B097", ++ "53D7E08529547048121E9C95F3791DD804963948F34FAE7BF44EA82365DC7868FE57E4AE2DE211305A407104BD", ++ "01AF286BCA1AF286BCA1AF286BCA1AF286BCA1AF286BC9FB8F6B85C556892C20A7EB964FE7719E74F490758D3B", ++ 0x4C); ++ ++ add(KnownOIDs.c2tnb431r1, B, ++ "800000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000001", ++ "1A827EF00DD6FC0E234CAF046C6A5D8A85395B236CC4AD2CF32A0CADBDC9DDF620B0EB9906D0957F6C6FEACD615468DF104DE296CD8F", ++ "10D9B4A3D9047D8B154359ABFB1B7F5485B04CEB868237DDC9DEDA982A679A5A919B626D4E50A8DD731B107A9962381FB5D807BF2618", ++ "120FC05D3C67A99DE161D2F4092622FECA701BE4F50F4758714E8A87BBF2A658EF8C21E7C5EFE965361F6C2999C0C247B0DBD70CE6B7", ++ "20D0AF8903A96F8D5FA2C255745D3C451B302C9346D9B7E485E7BCE41F6B591F3E8F6ADDCBB0BC4C2F947A7DE1A89B625D6A598B3760", ++ "0340340340340340340340340340340340340340340340340340340323C313FAB50589703B5EC68D3587FEC60D161CC149C1AD4A91", ++ 0x2760); ++ ++ /* ANSI X9.62 binary curves from the 1998 standard but forbidden ++ * in the 2005 version of the standard. ++ * We don't register them but leave them here for the time being in ++ * case we need to support them after all. ++ */ ++/* ++ add(KnownOIDs.c2pnb163v1, B, ++ "080000000000000000000000000000000000000107", ++ "072546B5435234A422E0789675F432C89435DE5242", ++ "00C9517D06D5240D3CFF38C74B20B6CD4D6F9DD4D9", ++ "07AF69989546103D79329FCC3D74880F33BBE803CB", ++ "01EC23211B5966ADEA1D3F87F7EA5848AEF0B7CA9F", ++ "0400000000000000000001E60FC8821CC74DAEAFC1", ++ 2); ++ ++ add(KnownOIDs.c2pnb163v2, B, ++ "080000000000000000000000000000000000000107", ++ "0108B39E77C4B108BED981ED0E890E117C511CF072", ++ "0667ACEB38AF4E488C407433FFAE4F1C811638DF20", ++ "0024266E4EB5106D0A964D92C4860E2671DB9B6CC5", ++ "079F684DDF6684C5CD258B3890021B2386DFD19FC5", ++ "03FFFFFFFFFFFFFFFFFFFDF64DE1151ADBB78F10A7", ++ 2); ++ ++ add(KnownOIDs.c2pnb163v3, B, ++ "080000000000000000000000000000000000000107", ++ "07A526C63D3E25A256A007699F5447E32AE456B50E", ++ "03F7061798EB99E238FD6F1BF95B48FEEB4854252B", ++ "02F9F87B7C574D0BDECF8A22E6524775F98CDEBDCB", ++ "05B935590C155E17EA48EB3FF3718B893DF59A05D0", ++ "03FFFFFFFFFFFFFFFFFFFE1AEE140F110AFF961309", ++ 2); ++ ++ add(KnownOIDs.c2pnb176w1, B, ++ "0100000000000000000000000000000000080000000007", ++ "E4E6DB2995065C407D9D39B8D0967B96704BA8E9C90B", ++ "5DDA470ABE6414DE8EC133AE28E9BBD7FCEC0AE0FFF2", ++ "8D16C2866798B600F9F08BB4A8E860F3298CE04A5798", ++ "6FA4539C2DADDDD6BAB5167D61B436E1D92BB16A562C", ++ "00010092537397ECA4F6145799D62B0A19CE06FE26AD", ++ 0xFF6E); ++ ++ add(KnownOIDs.c2pnb208w1, B, ++ "010000000000000000000000000000000800000000000000000007", ++ "0000000000000000000000000000000000000000000000000000", ++ "C8619ED45A62E6212E1160349E2BFA844439FAFC2A3FD1638F9E", ++ "89FDFBE4ABE193DF9559ECF07AC0CE78554E2784EB8C1ED1A57A", ++ "0F55B51A06E78E9AC38A035FF520D8B01781BEB1A6BB08617DE3", ++ "000101BAF95C9723C57B6C21DA2EFF2D5ED588BDD5717E212F9D", ++ 0xFE48); ++ ++ add(KnownOIDs.c2pnb272w1, B, ++ "010000000000000000000000000000000000000000000000000000010000000000000B", ++ "91A091F03B5FBA4AB2CCF49C4EDD220FB028712D42BE752B2C40094DBACDB586FB20", ++ "7167EFC92BB2E3CE7C8AAAFF34E12A9C557003D7C73A6FAF003F99F6CC8482E540F7", ++ "6108BABB2CEEBCF787058A056CBE0CFE622D7723A289E08A07AE13EF0D10D171DD8D", ++ "10C7695716851EEF6BA7F6872E6142FBD241B830FF5EFCACECCAB05E02005DDE9D23", ++ "000100FAF51354E0E39E4892DF6E319C72C8161603FA45AA7B998A167B8F1E629521", ++ 0xFF06); ++ ++ add(KnownOIDs.c2pnb304w1, B, ++ "010000000000000000000000000000000000000000000000000000000000000000000000000807", ++ "FD0D693149A118F651E6DCE6802085377E5F882D1B510B44160074C1288078365A0396C8E681", ++ "BDDB97E555A50A908E43B01C798EA5DAA6788F1EA2794EFCF57166B8C14039601E55827340BE", ++ "197B07845E9BE2D96ADB0F5F3C7F2CFFBD7A3EB8B6FEC35C7FD67F26DDF6285A644F740A2614", ++ "E19FBEB76E0DA171517ECF401B50289BF014103288527A9B416A105E80260B549FDC1B92C03B", ++ "000101D556572AABAC800101D556572AABAC8001022D5C91DD173F8FB561DA6899164443051D", ++ 0xFE2E); ++ ++ add(KnownOIDs.c2pnb368w1, B, ++ "0100000000000000000000000000000000000000000000000000000000000000000000002000000000000000000007", ++ "E0D2EE25095206F5E2A4F9ED229F1F256E79A0E2B455970D8D0D865BD94778C576D62F0AB7519CCD2A1A906AE30D", ++ "FC1217D4320A90452C760A58EDCD30C8DD069B3C34453837A34ED50CB54917E1C2112D84D164F444F8F74786046A", ++ "1085E2755381DCCCE3C1557AFA10C2F0C0C2825646C5B34A394CBCFA8BC16B22E7E789E927BE216F02E1FB136A5F", ++ "7B3EB1BDDCBA62D5D8B2059B525797FC73822C59059C623A45FF3843CEE8F87CD1855ADAA81E2A0750B80FDA2310", ++ "00010090512DA9AF72B08349D98A5DD4C7B0532ECA51CE03E2D10F3B7AC579BD87E909AE40A6F131E9CFCE5BD967", ++ 0xFF70); ++*/ ++ ++ /* ++ * Brainpool curves (RFC 5639) ++ * (Twisted curves are not included) ++ */ ++ ++ add(KnownOIDs.brainpoolP160r1, P, ++ "E95E4A5F737059DC60DFC7AD95B3D8139515620F", ++ "340E7BE2A280EB74E2BE61BADA745D97E8F7C300", ++ "1E589A8595423412134FAA2DBDEC95C8D8675E58", ++ "BED5AF16EA3F6A4F62938C4631EB5AF7BDBCDBC3", ++ "1667CB477A1A8EC338F94741669C976316DA6321", ++ "E95E4A5F737059DC60DF5991D45029409E60FC09", ++ 1); ++ ++ add(KnownOIDs.brainpoolP192r1, P, ++ "C302F41D932A36CDA7A3463093D18DB78FCE476DE1A86297", ++ "6A91174076B1E0E19C39C031FE8685C1CAE040E5C69A28EF", ++ "469A28EF7C28CCA3DC721D044F4496BCCA7EF4146FBF25C9", ++ "C0A0647EAAB6A48753B033C56CB0F0900A2F5C4853375FD6", ++ "14B690866ABD5BB88B5F4828C1490002E6773FA2FA299B8F", ++ "C302F41D932A36CDA7A3462F9E9E916B5BE8F1029AC4ACC1", ++ 1); ++ ++ add(KnownOIDs.brainpoolP224r1, P, ++ "D7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF", ++ "68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43", ++ "2580F63CCFE44138870713B1A92369E33E2135D266DBB372386C400B", ++ "0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D", ++ "58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD", ++ "D7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F", ++ 1); ++ ++ add(KnownOIDs.brainpoolP256r1, P, ++ "A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", ++ "7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9", ++ "26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6", ++ "8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", ++ "547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", ++ "A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", ++ 1); ++ ++ add(KnownOIDs.brainpoolP320r1, P, ++ "D35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28FCD412B1F1B32E27", ++ "3EE30B568FBAB0F883CCEBD46D3F3BB8A2A73513F5EB79DA66190EB085FFA9F492F375A97D860EB4", ++ "520883949DFDBC42D3AD198640688A6FE13F41349554B49ACC31DCCD884539816F5EB4AC8FB1F1A6", ++ "43BD7E9AFB53D8B85289BCC48EE5BFE6F20137D10A087EB6E7871E2A10A599C710AF8D0D39E20611", ++ "14FDD05545EC1CC8AB4093247F77275E0743FFED117182EAA9C77877AAAC6AC7D35245D1692E8EE1", ++ "D35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658E98691555B44C59311", ++ 1); ++ ++ add(KnownOIDs.brainpoolP384r1, P, ++ "8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", ++ "7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F90F8AA5814A503AD4EB04A8C7DD22CE2826", ++ "04A8C7DD22CE28268B39B55416F0447C2FB77DE107DCD2A62E880EA53EEB62D57CB4390295DBC9943AB78696FA504C11", ++ "1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", ++ "8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", ++ "8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", ++ 1); ++ ++ add(KnownOIDs.brainpoolP512r1, P, ++ "AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", ++ "7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA", ++ "3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CADC083E67984050B75EBAE5DD2809BD638016F723", ++ "81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", ++ "7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", ++ "AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", ++ 1); ++ ++ /* SM2 prime curves */ ++ add(KnownOIDs.sm2p256v1, P, ++ "FFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00000000FFFFFFFFFFFFFFFF", ++ "FFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00000000FFFFFFFFFFFFFFFC", ++ "28E9FA9E9D9F5E344D5A9E4BCF6509A7F39789F515AB8F92DDBCBD414D940E93", ++ "32C4AE2C1F1981195F9904466A39C9948FE30BBFF2660BE1715A4589334C74C7", ++ "BC3736A2F4F6779C59BDCEE36B692153D0A9877CC62A474002DF32E52139F0A0", ++ "FFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFF7203DF6B21C6052B53BBF40939D54123", ++ 1); ++ ++ specCollection = Collections.unmodifiableCollection(oidMap.values()); ++ } ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECDHKeyAgreement.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECDHKeyAgreement.java +index 29dc09889..9f701c93e 100644 +--- a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECDHKeyAgreement.java ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECDHKeyAgreement.java +@@ -27,6 +27,7 @@ + package org.openeuler.security.openssl; + + import sun.security.ec.ECKeyFactory; ++import sun.security.util.NamedCurve; + + import java.math.BigInteger; + import java.security.InvalidAlgorithmParameterException; +@@ -95,7 +96,16 @@ public class KAEECDHKeyAgreement extends KeyAgreementSpi { + // Bits to bytes. + expectedSecretLen = (keyLenBits + 7) >> 3; + +- curveName = KAEUtils.getCurveBySize(keyLenBits); ++ // Using KAENamedCurve.name can be inaccurate. need ObjectId ++ if (params instanceof KAENamedCurve) { ++ curveName = KAEUtils.getCurveByAlias(((KAENamedCurve) params).getObjectId()); ++ }else if (params instanceof NamedCurve) { ++ curveName = KAEUtils.getCurveByAlias(((NamedCurve) params).getObjectId()); ++ }else { ++ KAENamedCurve curve = KAECurveDB.lookup(params); ++ curveName = KAEUtils.getCurveByAlias(curve.getObjectId()); ++ } ++ + if (curveName == null) { + throw new InvalidParameterException("unknown keyLenBits " + keyLenBits); + } +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECKeyFactory.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECKeyFactory.java +new file mode 100644 +index 000000000..c02d868e3 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECKeyFactory.java +@@ -0,0 +1,308 @@ ++/* ++ * Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Oracle designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++package org.openeuler.security.openssl; ++ ++import java.util.Arrays; ++ ++import java.security.*; ++import java.security.interfaces.*; ++import java.security.spec.*; ++ ++/** ++ * KeyFactory for EC keys. Keys must be instances of PublicKey or PrivateKey ++ * and getAlgorithm() must return "EC". For such keys, it supports conversion ++ * between the following: ++ * ++ * For public keys: ++ * . PublicKey with an X.509 encoding ++ * . ECPublicKey ++ * . ECPublicKeySpec ++ * . X509EncodedKeySpec ++ * ++ * For private keys: ++ * . PrivateKey with a PKCS#8 encoding ++ * . ECPrivateKey ++ * . ECPrivateKeySpec ++ * . PKCS8EncodedKeySpec ++ * ++ * @since 1.6 ++ * @author Andreas Sterbenz ++ */ ++public final class KAEECKeyFactory extends KeyFactorySpi { ++ ++ // Used by translateKey() ++ private static KeyFactory instance; ++ ++ private static KeyFactory getInstance() { ++ if (instance == null) { ++ try { ++ instance = KeyFactory.getInstance("EC", "KAEProvider"); ++ } catch (NoSuchProviderException e) { ++ throw new RuntimeException(e); ++ } catch (NoSuchAlgorithmException e) { ++ throw new RuntimeException(e); ++ } ++ } ++ ++ return instance; ++ } ++ ++ public KAEECKeyFactory() { ++ // empty ++ } ++ ++ /** ++ * Static method to convert Key into a useable instance of ++ * ECPublicKey or ECPrivateKey. Check the key and convert it ++ * to a Sun key if necessary. If the key is not an EC key ++ * or cannot be used, throw an InvalidKeyException. ++ * ++ * The difference between this method and engineTranslateKey() is that ++ * we do not convert keys of other providers that are already an ++ * instance of ECPublicKey or ECPrivateKey. ++ * ++ * To be used by future Java ECDSA and ECDH implementations. ++ */ ++ public static ECKey toECKey(Key key) throws InvalidKeyException { ++ if (key instanceof ECKey) { ++ ECKey ecKey = (ECKey)key; ++ checkKey(ecKey); ++ return ecKey; ++ } else { ++ /* ++ * We don't call the engineTranslateKey method directly ++ * because KeyFactory.translateKey adds code to loop through ++ * all key factories. ++ */ ++ return (ECKey)getInstance().translateKey(key); ++ } ++ } ++ ++ /** ++ * Check that the given EC key is valid. ++ */ ++ private static void checkKey(ECKey key) throws InvalidKeyException { ++ // check for subinterfaces, omit additional checks for our keys ++ if (key instanceof ECPublicKey) { ++ if (key instanceof KAEECPublicKeyImpl) { ++ return; ++ } ++ } else if (key instanceof ECPrivateKey) { ++ if (key instanceof KAEECPrivateKeyImpl) { ++ return; ++ } ++ } else { ++ throw new InvalidKeyException("Neither a public nor a private key"); ++ } ++ // ECKey does not extend Key, so we need to do a cast ++ String keyAlg = ((Key)key).getAlgorithm(); ++ if (!keyAlg.equals("EC") && !keyAlg.equals("SM2")) { ++ throw new InvalidKeyException("Not an EC key: " + keyAlg); ++ } ++ // XXX further sanity checks about whether this key uses supported ++ // fields, point formats, etc. would go here ++ } ++ ++ /** ++ * Translate an EC key into a Sun EC key. If conversion is ++ * not possible, throw an InvalidKeyException. ++ * See also JCA doc. ++ */ ++ protected Key engineTranslateKey(Key key) throws InvalidKeyException { ++ if (key == null) { ++ throw new InvalidKeyException("Key must not be null"); ++ } ++ String keyAlg = key.getAlgorithm(); ++ if (!keyAlg.equals("EC") && !keyAlg.equals("SM2")) { ++ throw new InvalidKeyException("Not an EC key: " + keyAlg); ++ } ++ if (key instanceof PublicKey) { ++ return implTranslatePublicKey((PublicKey)key); ++ } else if (key instanceof PrivateKey) { ++ return implTranslatePrivateKey((PrivateKey)key); ++ } else { ++ throw new InvalidKeyException("Neither a public nor a private key"); ++ } ++ } ++ ++ // see JCA doc ++ protected PublicKey engineGeneratePublic(KeySpec keySpec) ++ throws InvalidKeySpecException { ++ try { ++ return implGeneratePublic(keySpec); ++ } catch (InvalidKeySpecException e) { ++ throw e; ++ } catch (GeneralSecurityException e) { ++ throw new InvalidKeySpecException(e); ++ } ++ } ++ ++ // see JCA doc ++ protected PrivateKey engineGeneratePrivate(KeySpec keySpec) ++ throws InvalidKeySpecException { ++ try { ++ return implGeneratePrivate(keySpec); ++ } catch (InvalidKeySpecException e) { ++ throw e; ++ } catch (GeneralSecurityException e) { ++ throw new InvalidKeySpecException(e); ++ } ++ } ++ ++ // internal implementation of translateKey() for public keys. See JCA doc ++ private PublicKey implTranslatePublicKey(PublicKey key) ++ throws InvalidKeyException { ++ if (key instanceof ECPublicKey) { ++ if (key instanceof KAEECPublicKeyImpl) { ++ return key; ++ } ++ ECPublicKey ecKey = (ECPublicKey)key; ++ return new KAEECPublicKeyImpl( ++ ecKey.getW(), ++ ecKey.getParams() ++ ); ++ } else if ("X.509".equals(key.getFormat())) { ++ byte[] encoded = key.getEncoded(); ++ return new KAEECPublicKeyImpl(encoded); ++ } else { ++ throw new InvalidKeyException("Public keys must be instance " ++ + "of ECPublicKey or have X.509 encoding"); ++ } ++ } ++ ++ // internal implementation of translateKey() for private keys. See JCA doc ++ private PrivateKey implTranslatePrivateKey(PrivateKey key) ++ throws InvalidKeyException { ++ if (key instanceof ECPrivateKey) { ++ if (key instanceof KAEECPrivateKeyImpl) { ++ return key; ++ } ++ ECPrivateKey ecKey = (ECPrivateKey)key; ++ return new KAEECPrivateKeyImpl( ++ ecKey.getS(), ++ ecKey.getParams() ++ ); ++ } else if ("PKCS#8".equals(key.getFormat())) { ++ byte[] encoded = key.getEncoded(); ++ try { ++ return new KAEECPrivateKeyImpl(encoded); ++ } finally { ++ Arrays.fill(encoded, (byte)0); ++ } ++ } else { ++ throw new InvalidKeyException("Private keys must be instance " ++ + "of ECPrivateKey or have PKCS#8 encoding"); ++ } ++ } ++ ++ // internal implementation of generatePublic. See JCA doc ++ private PublicKey implGeneratePublic(KeySpec keySpec) ++ throws GeneralSecurityException { ++ if (keySpec instanceof X509EncodedKeySpec) { ++ X509EncodedKeySpec x509Spec = (X509EncodedKeySpec)keySpec; ++ return new KAEECPublicKeyImpl(x509Spec.getEncoded()); ++ } else if (keySpec instanceof ECPublicKeySpec) { ++ ECPublicKeySpec ecSpec = (ECPublicKeySpec)keySpec; ++ return new KAEECPublicKeyImpl( ++ ecSpec.getW(), ++ ecSpec.getParams() ++ ); ++ } else { ++ throw new InvalidKeySpecException("Only ECPublicKeySpec " ++ + "and X509EncodedKeySpec supported for EC public keys"); ++ } ++ } ++ ++ // internal implementation of generatePrivate. See JCA doc ++ private PrivateKey implGeneratePrivate(KeySpec keySpec) ++ throws GeneralSecurityException { ++ if (keySpec instanceof PKCS8EncodedKeySpec) { ++ PKCS8EncodedKeySpec pkcsSpec = (PKCS8EncodedKeySpec)keySpec; ++ byte[] encoded = pkcsSpec.getEncoded(); ++ try { ++ return new KAEECPrivateKeyImpl(encoded); ++ } finally { ++ Arrays.fill(encoded, (byte) 0); ++ } ++ } else if (keySpec instanceof ECPrivateKeySpec) { ++ ECPrivateKeySpec ecSpec = (ECPrivateKeySpec)keySpec; ++ return new KAEECPrivateKeyImpl(ecSpec.getS(), ecSpec.getParams()); ++ } else { ++ throw new InvalidKeySpecException("Only ECPrivateKeySpec " ++ + "and PKCS8EncodedKeySpec supported for EC private keys"); ++ } ++ } ++ ++ protected T engineGetKeySpec(Key key, Class keySpec) ++ throws InvalidKeySpecException { ++ try { ++ // convert key to one of our keys ++ // this also verifies that the key is a valid EC key and ensures ++ // that the encoding is X.509/PKCS#8 for public/private keys ++ key = engineTranslateKey(key); ++ } catch (InvalidKeyException e) { ++ throw new InvalidKeySpecException(e); ++ } ++ if (key instanceof ECPublicKey) { ++ ECPublicKey ecKey = (ECPublicKey)key; ++ if (keySpec.isAssignableFrom(ECPublicKeySpec.class)) { ++ return keySpec.cast(new ECPublicKeySpec( ++ ecKey.getW(), ++ ecKey.getParams() ++ )); ++ } else if (keySpec.isAssignableFrom(X509EncodedKeySpec.class)) { ++ return keySpec.cast(new X509EncodedKeySpec(key.getEncoded())); ++ } else { ++ throw new InvalidKeySpecException ++ ("KeySpec must be ECPublicKeySpec or " ++ + "X509EncodedKeySpec for EC public keys"); ++ } ++ } else if (key instanceof ECPrivateKey) { ++ if (keySpec.isAssignableFrom(PKCS8EncodedKeySpec.class)) { ++ byte[] encoded = key.getEncoded(); ++ try { ++ return keySpec.cast(new PKCS8EncodedKeySpec(encoded)); ++ } finally { ++ Arrays.fill(encoded, (byte)0); ++ } ++ } else if (keySpec.isAssignableFrom(ECPrivateKeySpec.class)) { ++ ECPrivateKey ecKey = (ECPrivateKey)key; ++ return keySpec.cast(new ECPrivateKeySpec( ++ ecKey.getS(), ++ ecKey.getParams() ++ )); ++ } else { ++ throw new InvalidKeySpecException ++ ("KeySpec must be ECPrivateKeySpec or " ++ + "PKCS8EncodedKeySpec for EC private keys"); ++ } ++ } else { ++ // should not occur, caught in engineTranslateKey() ++ throw new InvalidKeySpecException("Neither public nor private key"); ++ } ++ } ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECKeyPairGenerator.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECKeyPairGenerator.java +index 73d8551b1..42da07282 100644 +--- a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECKeyPairGenerator.java ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECKeyPairGenerator.java +@@ -26,9 +26,6 @@ + + package org.openeuler.security.openssl; + +-import sun.security.ec.ECPrivateKeyImpl; +-import sun.security.ec.ECPublicKeyImpl; +- + import java.math.BigInteger; + import java.security.InvalidAlgorithmParameterException; + import java.security.InvalidParameterException; +@@ -63,7 +60,7 @@ public class KAEECKeyPairGenerator extends KeyPairGeneratorSpi { + this.param = getParamsByCurve(curveName); + } + +- private ECParameterSpec getParamsByCurve(String curveName) { ++ protected ECParameterSpec getParamsByCurve(String curveName) { + byte[][] params = nativeGenerateParam(curveName); + if (params == null) { + throw new InvalidParameterException("unknown curve " + curveName); +@@ -125,18 +122,18 @@ public class KAEECKeyPairGenerator extends KeyPairGeneratorSpi { + BigInteger s = new BigInteger(keys[2]); + ECPoint w = new ECPoint(wX, wY); + +- ECPrivateKeyImpl privateKey = null; +- ECPublicKeyImpl publicKey = null; ++ KAEECPrivateKeyImpl privateKey; ++ KAEECPublicKeyImpl publicKey; + try { +- Class pubKeyImpl = Class.forName("sun.security.ec.ECPublicKeyImpl"); ++ Class pubKeyImpl = Class.forName("org.openeuler.security.openssl.KAEECPublicKeyImpl"); + Constructor conPubKeyImpl = pubKeyImpl.getDeclaredConstructor(ECPoint.class, ECParameterSpec.class); + conPubKeyImpl.setAccessible(true); +- publicKey = (ECPublicKeyImpl) conPubKeyImpl.newInstance(w, param); ++ publicKey = (KAEECPublicKeyImpl) conPubKeyImpl.newInstance(w, param); + +- Class priKeyImpl = Class.forName("sun.security.ec.ECPrivateKeyImpl"); ++ Class priKeyImpl = Class.forName("org.openeuler.security.openssl.KAEECPrivateKeyImpl"); + Constructor conPriKeyImpl = priKeyImpl.getDeclaredConstructor(BigInteger.class, ECParameterSpec.class); + conPriKeyImpl.setAccessible(true); +- privateKey = (ECPrivateKeyImpl) conPriKeyImpl.newInstance(s, param); ++ privateKey = (KAEECPrivateKeyImpl) conPriKeyImpl.newInstance(s, param); + } catch (ClassNotFoundException | NoSuchMethodException | InstantiationException | + IllegalAccessException | InvocationTargetException e) { + throw new ProviderException(e); +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECParameters.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECParameters.java +new file mode 100644 +index 000000000..948106ef9 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECParameters.java +@@ -0,0 +1,240 @@ ++/* ++ * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Oracle designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++package org.openeuler.security.openssl; ++ ++import sun.security.util.DerValue; ++import sun.security.util.ECKeySizeParameterSpec; ++import sun.security.util.ObjectIdentifier; ++ ++import java.io.IOException; ++ ++import java.security.*; ++import java.security.spec.*; ++ ++/** ++ * This class implements encoding and decoding of Elliptic Curve parameters ++ * as specified in RFC 3279. ++ * ++ * However, only named curves are currently supported. ++ * ++ * ASN.1 from RFC 3279 follows. Note that X9.62 (2005) has added some additional ++ * options. ++ * ++ *

++ *    EcpkParameters ::= CHOICE {
++ *      ecParameters  ECParameters,
++ *      namedCurve    OBJECT IDENTIFIER,
++ *      implicitlyCA  NULL }
++ *
++ *    ECParameters ::= SEQUENCE {
++ *       version   ECPVer,          -- version is always 1
++ *       fieldID   FieldID,         -- identifies the finite field over
++ *                                  -- which the curve is defined
++ *       curve     Curve,           -- coefficients a and b of the
++ *                                  -- elliptic curve
++ *       base      ECPoint,         -- specifies the base point P
++ *                                  -- on the elliptic curve
++ *       order     INTEGER,         -- the order n of the base point
++ *       cofactor  INTEGER OPTIONAL -- The integer h = #E(Fq)/n
++ *       }
++ *
++ *    ECPVer ::= INTEGER {ecpVer1(1)}
++ *
++ *    Curve ::= SEQUENCE {
++ *       a         FieldElement,
++ *       b         FieldElement,
++ *       seed      BIT STRING OPTIONAL }
++ *
++ *    FieldElement ::= OCTET STRING
++ *
++ *    ECPoint ::= OCTET STRING
++ * 
++ * ++ * @since 1.6 ++ * @author Andreas Sterbenz ++ */ ++public final class KAEECParameters extends AlgorithmParametersSpi { ++ ++ // used by ECPublicKeyImpl and ECPrivateKeyImpl ++ public static AlgorithmParameters getAlgorithmParameters(ECParameterSpec spec) ++ throws InvalidKeyException { ++ try { ++ AlgorithmParameters params = ++ AlgorithmParameters.getInstance("EC", "KAEProvider"); ++ params.init(spec); ++ return params; ++ } catch (GeneralSecurityException e) { ++ throw new InvalidKeyException("EC parameters error", e); ++ } ++ } ++ ++ /* ++ * The parameters these AlgorithmParameters object represents. ++ * Currently, it is always an instance of NamedCurve. ++ */ ++ private KAENamedCurve namedCurve; ++ ++ // A public constructor is required by AlgorithmParameters class. ++ public KAEECParameters() { ++ // empty ++ } ++ ++ // AlgorithmParameterSpi methods ++ ++ protected void engineInit(AlgorithmParameterSpec paramSpec) ++ throws InvalidParameterSpecException { ++ ++ if (paramSpec == null) { ++ throw new InvalidParameterSpecException ++ ("paramSpec must not be null"); ++ } ++ ++ if (paramSpec instanceof KAENamedCurve) { ++ namedCurve = (KAENamedCurve)paramSpec; ++ return; ++ } ++ ++ if (paramSpec instanceof ECParameterSpec) { ++ namedCurve = KAECurveDB.lookup((ECParameterSpec)paramSpec); ++ } else if (paramSpec instanceof ECGenParameterSpec) { ++ String name = ((ECGenParameterSpec)paramSpec).getName(); ++ namedCurve = KAECurveDB.lookup(name); ++ } else if (paramSpec instanceof ECKeySizeParameterSpec) { ++ int keySize = ((ECKeySizeParameterSpec)paramSpec).getKeySize(); ++ namedCurve = KAECurveDB.lookup(keySize); ++ } else { ++ throw new InvalidParameterSpecException ++ ("Only ECParameterSpec and ECGenParameterSpec supported"); ++ } ++ ++ if (namedCurve == null) { ++ throw new InvalidParameterSpecException( ++ "Not a supported curve: " + paramSpec); ++ } ++ } ++ ++ protected void engineInit(byte[] params) throws IOException { ++ DerValue encodedParams = new DerValue(params); ++ if (encodedParams.tag == DerValue.tag_ObjectId) { ++ ObjectIdentifier oid = encodedParams.getOID(); ++ KAENamedCurve spec = KAECurveDB.lookup(oid.toString()); ++ if (spec == null) { ++ throw new IOException("Unknown named curve: " + oid); ++ } ++ ++ namedCurve = spec; ++ return; ++ } ++ ++ throw new IOException("Only named ECParameters supported"); ++ ++ // The code below is incomplete. ++ // It is left as a starting point for a complete parsing implementation. ++ ++/* ++ if (encodedParams.tag != DerValue.tag_Sequence) { ++ throw new IOException("Unsupported EC parameters, tag: " + ++ encodedParams.tag); ++ } ++ ++ encodedParams.data.reset(); ++ ++ DerInputStream in = encodedParams.data; ++ ++ int version = in.getInteger(); ++ if (version != 1) { ++ throw new IOException("Unsupported EC parameters version: " + ++ version); ++ } ++ ECField field = parseField(in); ++ EllipticCurve curve = parseCurve(in, field); ++ ECPoint point = parsePoint(in, curve); ++ ++ BigInteger order = in.getBigInteger(); ++ int cofactor = 0; ++ ++ if (in.available() != 0) { ++ cofactor = in.getInteger(); ++ } ++ ++ // XXX HashAlgorithm optional ++ ++ if (encodedParams.data.available() != 0) { ++ throw new IOException("encoded params have " + ++ encodedParams.data.available() + ++ " extra bytes"); ++ } ++ ++ return new ECParameterSpec(curve, point, order, cofactor); ++*/ ++ } ++ ++ protected void engineInit(byte[] params, String decodingMethod) ++ throws IOException { ++ engineInit(params); ++ } ++ ++ protected T ++ engineGetParameterSpec(Class spec) ++ throws InvalidParameterSpecException { ++ ++ if (spec.isAssignableFrom(ECParameterSpec.class)) { ++ return spec.cast(namedCurve); ++ } ++ ++ if (spec.isAssignableFrom(ECGenParameterSpec.class)) { ++ // Ensure the name is the Object ID ++ String name = namedCurve.getObjectId(); ++ return spec.cast(new ECGenParameterSpec(name)); ++ } ++ ++ if (spec.isAssignableFrom(ECKeySizeParameterSpec.class)) { ++ int keySize = namedCurve.getCurve().getField().getFieldSize(); ++ return spec.cast(new ECKeySizeParameterSpec(keySize)); ++ } ++ ++ throw new InvalidParameterSpecException( ++ "Only ECParameterSpec and ECGenParameterSpec supported"); ++ } ++ ++ protected byte[] engineGetEncoded() throws IOException { ++ return namedCurve.getEncoded(); ++ } ++ ++ protected byte[] engineGetEncoded(String encodingMethod) ++ throws IOException { ++ return engineGetEncoded(); ++ } ++ ++ protected String engineToString() { ++ if (namedCurve == null) { ++ return "Not initialized"; ++ } ++ ++ return namedCurve.toString(); ++ } ++} +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECPrivateKeyImpl.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECPrivateKeyImpl.java +new file mode 100644 +index 000000000..23ea8f90d +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECPrivateKeyImpl.java +@@ -0,0 +1,214 @@ ++/* ++ * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Oracle designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++package org.openeuler.security.openssl; ++ ++import java.util.Arrays; ++ ++import java.io.IOException; ++import java.math.BigInteger; ++ ++import java.security.*; ++import java.security.interfaces.*; ++import java.security.spec.*; ++ ++import sun.security.util.ECUtil; ++import sun.security.util.ArrayUtil; ++import sun.security.util.DerInputStream; ++import sun.security.util.DerOutputStream; ++import sun.security.util.DerValue; ++import sun.security.x509.AlgorithmId; ++import sun.security.pkcs.PKCS8Key; ++ ++/** ++ * Key implementation for EC private keys. ++ *

++ * ASN.1 syntax for EC private keys from SEC 1 v1.5 (draft): ++ * ++ *

++ * EXPLICIT TAGS
++ *
++ * ECPrivateKey ::= SEQUENCE {
++ *   version INTEGER { ecPrivkeyVer1(1) } (ecPrivkeyVer1),
++ *   privateKey OCTET STRING,
++ *   parameters [0] ECDomainParameters {{ SECGCurveNames }} OPTIONAL,
++ *   publicKey [1] BIT STRING OPTIONAL
++ * }
++ * 
++ * ++ * We currently ignore the optional parameters and publicKey fields. We ++ * require that the parameters are encoded as part of the AlgorithmIdentifier, ++ * not in the private key structure. ++ * ++ * @since 1.6 ++ * @author Andreas Sterbenz ++ */ ++@SuppressWarnings("serial") ++public final class KAEECPrivateKeyImpl extends PKCS8Key implements ECPrivateKey { ++ ++ private static final long serialVersionUID = 88695385615075129L; ++ ++ private BigInteger s; // private value ++ private byte[] arrayS; // private value as a little-endian array ++ private ECParameterSpec params; ++ ++ /** ++ * Construct a key from its encoding. Called by the ECKeyFactory. ++ */ ++ public KAEECPrivateKeyImpl(byte[] encoded) throws InvalidKeyException { ++ super(encoded); ++ parseKeyBits(); ++ } ++ ++ /** ++ * Construct a key from its components. Used by the ++ * KeyFactory. ++ */ ++ public KAEECPrivateKeyImpl(BigInteger s, ECParameterSpec params) ++ throws InvalidKeyException { ++ this.s = s; ++ this.params = params; ++ makeEncoding(s); ++ ++ } ++ ++ KAEECPrivateKeyImpl(byte[] s, ECParameterSpec params) ++ throws InvalidKeyException { ++ this.arrayS = s.clone(); ++ this.params = params; ++ makeEncoding(s); ++ } ++ ++ private void makeEncoding(byte[] s) throws InvalidKeyException { ++ algid = new AlgorithmId ++ (AlgorithmId.EC_oid, KAEECParameters.getAlgorithmParameters(params)); ++ try { ++ DerOutputStream out = new DerOutputStream(); ++ out.putInteger(1); // version 1 ++ byte[] privBytes = s.clone(); ++ ArrayUtil.reverse(privBytes); ++ out.putOctetString(privBytes); ++ Arrays.fill(privBytes, (byte)0); ++ DerValue val = DerValue.wrap(DerValue.tag_Sequence, out); ++ key = val.toByteArray(); ++ val.clear(); ++ } catch (Exception exc) { ++ // should never occur ++ throw new InvalidKeyException(exc); ++ } ++ } ++ ++ private void makeEncoding(BigInteger s) throws InvalidKeyException { ++ algid = new AlgorithmId ++ (AlgorithmId.EC_oid, KAEECParameters.getAlgorithmParameters(params)); ++ try { ++ byte[] sArr = s.toByteArray(); ++ // convert to fixed-length array ++ int numOctets = (params.getOrder().bitLength() + 7) / 8; ++ byte[] sOctets = new byte[numOctets]; ++ int inPos = Math.max(sArr.length - sOctets.length, 0); ++ int outPos = Math.max(sOctets.length - sArr.length, 0); ++ int length = Math.min(sArr.length, sOctets.length); ++ System.arraycopy(sArr, inPos, sOctets, outPos, length); ++ Arrays.fill(sArr, (byte)0); ++ DerOutputStream out = new DerOutputStream(); ++ out.putInteger(1); // version 1 ++ out.putOctetString(sOctets); ++ Arrays.fill(sOctets, (byte)0); ++ DerValue val = DerValue.wrap(DerValue.tag_Sequence, out); ++ key = val.toByteArray(); ++ val.clear(); ++ } catch (Exception exc) { ++ throw new AssertionError("Should not happen", exc); ++ } ++ } ++ ++ // see JCA doc ++ public String getAlgorithm() { ++ return "EC"; ++ } ++ ++ // see JCA doc ++ public BigInteger getS() { ++ if (s == null) { ++ byte[] arrCopy = arrayS.clone(); ++ ArrayUtil.reverse(arrCopy); ++ s = new BigInteger(1, arrCopy); ++ Arrays.fill(arrCopy, (byte)0); ++ } ++ return s; ++ } ++ ++ public byte[] getArrayS() { ++ if (arrayS == null) { ++ arrayS = ECUtil.sArray(getS(), params); ++ } ++ return arrayS.clone(); ++ } ++ ++ // see JCA doc ++ public ECParameterSpec getParams() { ++ return params; ++ } ++ ++ private void parseKeyBits() throws InvalidKeyException { ++ try { ++ DerInputStream in = new DerInputStream(key); ++ DerValue derValue = in.getDerValue(); ++ if (derValue.tag != DerValue.tag_Sequence) { ++ throw new IOException("Not a SEQUENCE"); ++ } ++ DerInputStream data = derValue.data; ++ int version = data.getInteger(); ++ if (version != 1) { ++ throw new IOException("Version must be 1"); ++ } ++ byte[] privData = data.getOctetString(); ++ ArrayUtil.reverse(privData); ++ arrayS = privData; ++ while (data.available() != 0) { ++ DerValue value = data.getDerValue(); ++ if (value.isContextSpecific((byte) 0)) { ++ // ignore for now ++ } else if (value.isContextSpecific((byte) 1)) { ++ // ignore for now ++ } else { ++ throw new InvalidKeyException("Unexpected value: " + value); ++ } ++ } ++ AlgorithmParameters algParams = this.algid.getParameters(); ++ if (algParams == null) { ++ throw new InvalidKeyException("EC domain parameters must be " ++ + "encoded in the algorithm identifier"); ++ } ++ params = algParams.getParameterSpec(ECParameterSpec.class); ++ } catch (IOException e) { ++ throw new InvalidKeyException("Invalid EC private key", e); ++ } catch (InvalidParameterSpecException e) { ++ throw new InvalidKeyException("Invalid EC private key", e); ++ } ++ } ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECPublicKeyImpl.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECPublicKeyImpl.java +new file mode 100644 +index 000000000..99e6e7517 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEECPublicKeyImpl.java +@@ -0,0 +1,132 @@ ++/* ++ * Copyright (c) 2006, 2023, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Oracle designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++package org.openeuler.security.openssl; ++ ++import java.io.IOException; ++ ++import java.security.*; ++import java.security.interfaces.*; ++import java.security.spec.*; ++ ++import sun.security.util.ECUtil; ++ ++import sun.security.x509.*; ++ ++/** ++ * Key implementation for EC public keys. ++ * ++ * @since 1.6 ++ * @author Andreas Sterbenz ++ */ ++@SuppressWarnings("serial") ++public final class KAEECPublicKeyImpl extends X509Key implements ECPublicKey { ++ ++ private static final long serialVersionUID = -2462037275160462289L; ++ ++ private ECPoint w; ++ private ECParameterSpec params; ++ ++ /** ++ * Construct a key from its components. Used by the ++ * ECKeyFactory. ++ */ ++ @SuppressWarnings("deprecation") ++ public KAEECPublicKeyImpl(ECPoint w, ECParameterSpec params) ++ throws InvalidKeyException { ++ this.w = w; ++ this.params = params; ++ // generate the encoding ++ algid = new AlgorithmId ++ (AlgorithmId.EC_oid, KAEECParameters.getAlgorithmParameters(params)); ++ key = ECUtil.encodePoint(w, params.getCurve()); ++ } ++ ++ /** ++ * Construct a key from its encoding. ++ */ ++ public KAEECPublicKeyImpl(byte[] encoded) throws InvalidKeyException { ++ decode(encoded); ++ } ++ ++ // see JCA doc ++ public String getAlgorithm() { ++ return "EC"; ++ } ++ ++ // see JCA doc ++ public ECPoint getW() { ++ return w; ++ } ++ ++ // see JCA doc ++ public ECParameterSpec getParams() { ++ return params; ++ } ++ ++ // Internal API to get the encoded point. Currently used by SunPKCS11. ++ // This may change/go away depending on what we do with the public API. ++ @SuppressWarnings("deprecation") ++ public byte[] getEncodedPublicValue() { ++ return key.clone(); ++ } ++ ++ /** ++ * Parse the key. Called by X509Key. ++ */ ++ @SuppressWarnings("deprecation") ++ protected void parseKeyBits() throws InvalidKeyException { ++ AlgorithmParameters algParams = this.algid.getParameters(); ++ if (algParams == null) { ++ throw new InvalidKeyException("EC domain parameters must be " + ++ "encoded in the algorithm identifier"); ++ } ++ ++ try { ++ params = algParams.getParameterSpec(ECParameterSpec.class); ++ w = ECUtil.decodePoint(key, params.getCurve()); ++ } catch (IOException e) { ++ throw new InvalidKeyException("Invalid EC key", e); ++ } catch (InvalidParameterSpecException e) { ++ throw new InvalidKeyException("Invalid EC key", e); ++ } ++ } ++ ++ // return a string representation of this key for debugging ++ public String toString() { ++ return "Sun EC public key, " + params.getCurve().getField().getFieldSize() ++ + " bits\n public x coord: " + w.getAffineX() ++ + "\n public y coord: " + w.getAffineY() ++ + "\n parameters: " + params; ++ } ++ ++ private Object writeReplace() throws java.io.ObjectStreamException { ++ return new KeyRep(KeyRep.Type.PUBLIC, ++ getAlgorithm(), ++ getFormat(), ++ getEncoded()); ++ } ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAENamedCurve.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAENamedCurve.java +new file mode 100644 +index 000000000..e71c1c274 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAENamedCurve.java +@@ -0,0 +1,100 @@ ++/* ++ * Copyright (c) 2006, 2020, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Oracle designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++package org.openeuler.security.openssl; ++ ++import sun.security.util.DerOutputStream; ++import sun.security.util.ObjectIdentifier; ++import sun.security.util.KnownOIDs; ++ ++import java.io.IOException; ++import java.math.BigInteger; ++ ++import java.security.spec.*; ++ ++/** ++ * Contains Elliptic Curve parameters. ++ * ++ * @since 1.6 ++ * @author Andreas Sterbenz ++ */ ++public final class KAENamedCurve extends ECParameterSpec { ++ // friendly names with stdName followed by aliases ++ private final String[] nameAndAliases; ++ ++ // well known OID ++ private final String oid; ++ ++ // encoded form (as NamedCurve identified via OID) ++ private final byte[] encoded; ++ ++ KAENamedCurve(KnownOIDs ko, EllipticCurve curve, ++ ECPoint g, BigInteger n, int h) { ++ super(curve, g, n, h); ++ String[] aliases = ko.aliases(); ++ this.nameAndAliases = new String[aliases.length + 1]; ++ nameAndAliases[0] = ko.stdName(); ++ System.arraycopy(aliases, 0, nameAndAliases, 1, aliases.length); ++ ++ this.oid = ko.value(); ++ ++ DerOutputStream out = new DerOutputStream(); ++ try { ++ out.putOID(ObjectIdentifier.of(ko)); ++ } catch (Exception e) { ++ throw new RuntimeException("Internal error", e); ++ } ++ encoded = out.toByteArray(); ++ } ++ ++ // returns the curve's standard name followed by its aliases ++ public String[] getNameAndAliases() { ++ return nameAndAliases; ++ } ++ ++ public byte[] getEncoded() { ++ return encoded.clone(); ++ } ++ ++ public String getObjectId() { ++ return oid; ++ } ++ ++ public String toString() { ++ StringBuilder sb = new StringBuilder(nameAndAliases[0]); ++ if (nameAndAliases.length > 1) { ++ sb.append(" ["); ++ int j = 1; ++ while (j < nameAndAliases.length - 1) { ++ sb.append(nameAndAliases[j++]); ++ sb.append(','); ++ } ++ sb.append(nameAndAliases[j] + "]"); ++ } ++ sb.append(" (" + oid + ")"); ++ return sb.toString(); ++ } ++} +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEProvider.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEProvider.java +index 2b1cb2449..a7085c829 100644 +--- a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEProvider.java ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEProvider.java +@@ -64,18 +64,29 @@ public class KAEProvider extends Provider { + + // init openssl + private static void initOpenssl() { +- boolean useGlobalMode = useGlobalMode(); + String engineId = getEngineId(); + boolean[] algorithmKaeFlags = KAEConfig.getUseKaeEngineFlags(); + Throwable throwable = null; + try { +- initOpenssl(useGlobalMode, engineId, algorithmKaeFlags); ++ Integer useOpensslVersion = useOpensslVersion(); ++ int v = initOpenssl(useOpensslVersion, engineId, algorithmKaeFlags); ++ if (kaeDebug != null) { ++ kaeDebug.println("Use Openssl " + v); ++ } ++ } catch (ExceptionInInitializerError t) { ++ throwable = (Throwable) t; ++ printKaeLog(engineId, t); ++ throw t; + } catch (Throwable t) { + throwable = t; + if (kaeDebug != null) { + kaeDebug.println("initOpenssl failed : " + throwable.getMessage()); + } + } ++ printKaeLog(engineId, throwable); ++ } ++ ++ private static void printKaeLog(String engineId, Throwable throwable) { + boolean[] engineFlags = getEngineFlags(); + boolean[] kaeProviderFlags = KAEConfig.getUseKaeProviderFlags(); + KAELog.log(engineId, throwable, engineFlags, kaeProviderFlags); +@@ -86,11 +97,23 @@ public class KAEProvider extends Provider { + return KAEConfig.privilegedGetOverridable("kae.engine.id", DEFAULT_ENGINE_ID); + } + +- // whether to set libcrypto.so to GLOBAL mode, by default libcrypto.so is LOCAL mode +- private static boolean useGlobalMode() { ++ // whether prefer use openssl 1 ++ private static int useOpensslVersion() throws ExceptionInInitializerError { + String explicitLoad = KAEConfig.privilegedGetOverridable( +- "kae.libcrypto.useGlobalMode", "false"); +- return Boolean.parseBoolean(explicitLoad); ++ "kae.useOpensslVersion", "0"); ++ int version = 0; ++ try { ++ if (explicitLoad.trim().isEmpty()) { ++ throw new ExceptionInInitializerError("initOpenssl failed : kae.useOpensslVersion set to empty value"); ++ } ++ version = Integer.parseInt(explicitLoad); ++ if (version != 0 && version != 1 && version != 3) { ++ throw new ExceptionInInitializerError("initOpenssl failed : unknown openssl version " + version); ++ } ++ } catch (NumberFormatException e) { ++ throw new ExceptionInInitializerError("initOpenssl failed : cannot convert " + explicitLoad + " to Integer"); ++ } ++ return version; + } + + @SuppressWarnings({"deprecation", "this-escape"}) +@@ -127,6 +150,12 @@ public class KAEProvider extends Provider { + if (KAEConfig.useKaeProvider("kae.ec")) { + putEC(); + } ++ if (KAEConfig.useKaeProvider("kae.sm2.cipher")) { ++ putSM2Cipher(); ++ } ++ if (KAEConfig.useKaeProvider("kae.sm2.signature")) { ++ putSM2Signature(); ++ } + } + + private void putAES() { +@@ -316,10 +345,30 @@ public class KAEProvider extends Provider { + put("KeyPairGenerator.EC", "org.openeuler.security.openssl.KAEECKeyPairGenerator"); + put("Alg.Alias.KeyPairGenerator.EllipticCurve", "EC"); + put("KeyAgreement.ECDH", "org.openeuler.security.openssl.KAEECDHKeyAgreement"); ++ ++ put("KeyFactory.EC", "org.openeuler.security.openssl.KAEECKeyFactory"); ++ put("Alg.Alias.KeyFactory.EllipticCurve", "EC"); ++ ++ put("AlgorithmParameters.EC", "org.openeuler.security.openssl.KAEECParameters"); ++ put("Alg.Alias.AlgorithmParameters.EllipticCurve", "EC"); ++ put("Alg.Alias.AlgorithmParameters.1.2.840.10045.2.1", "EC"); ++ } ++ ++ private void putSM2Cipher() { ++ put("KeyPairGenerator.SM2", "org.openeuler.security.openssl.KAESM2KeyPairGenerator"); ++ put("KeyFactory.SM2", "org.openeuler.security.openssl.KAEECKeyFactory"); ++ put("AlgorithmParameters.EC", "org.openeuler.security.openssl.KAEECParameters"); ++ put("AlgorithmParameters.SM2", "org.openeuler.security.openssl.KAEECParameters"); ++ put("Alg.Alias.AlgorithmParameters.1.2.156.10197.1.301", "SM2"); ++ put("Cipher.SM2","org.openeuler.security.openssl.KAESM2Cipher"); ++ } ++ ++ private void putSM2Signature() { ++ put("Signature.SM3withSM2", "org.openeuler.security.openssl.KAESM2Signature$SM3withSM2"); + } + + // init openssl +- static native void initOpenssl(boolean useGlobalMode, String engineId, boolean[] algorithmKaeFlags) ++ static native int initOpenssl(int useOpensslVersion, String engineId, boolean[] algorithmKaeFlags) + throws RuntimeException; + + static native boolean[] getEngineFlags(); +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2Cipher.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2Cipher.java +new file mode 100644 +index 000000000..2cbd5bb51 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2Cipher.java +@@ -0,0 +1,386 @@ ++/* ++ * Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Huawei designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Huawei in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please visit https://gitee.com/openeuler/bgmprovider if you need additional ++ * information or have any questions. ++ */ ++ ++package org.openeuler.security.openssl; ++ ++import javax.crypto.*; ++import java.io.ByteArrayOutputStream; ++import java.lang.ref.PhantomReference; ++import java.lang.ref.ReferenceQueue; ++import java.security.*; ++import java.security.interfaces.*; ++import java.security.spec.AlgorithmParameterSpec; ++import java.util.Arrays; ++import java.util.Locale; ++import java.util.Set; ++import java.util.concurrent.ConcurrentSkipListSet; ++ ++import static org.openeuler.security.openssl.KAEUtils.asUnsignedByteArray; ++ ++public class KAESM2Cipher extends CipherSpi { ++ // buffer for the data ++ private KAEByteArrayOutputStream byteBuf = new KAEByteArrayOutputStream(); ++ ++ private ECKey ecKey; ++ private int cipherMode = -1; ++ ++ // sm2 key holder ++ private KAESM2KeyHolder sm2KeyHolder; ++ ++ // see JCE spec ++ @Override ++ protected void engineSetMode(String mode) throws NoSuchAlgorithmException { ++ String modeName = mode.toUpperCase(Locale.ROOT); ++ ++ if (!modeName.equals("NONE")) { ++ throw new IllegalArgumentException("can't support mode " + mode); ++ } ++ } ++ ++ // see JCE spec ++ @Override ++ protected void engineSetPadding(String padding) throws NoSuchPaddingException { ++ String paddingName = padding.toUpperCase(Locale.ROOT); ++ ++ if (!paddingName.equals("NOPADDING")) { ++ throw new NoSuchPaddingException("padding not available with KAESM2Cipher"); ++ } ++ } ++ ++ // see JCE spec ++ @Override ++ protected int engineGetBlockSize() { ++ return 0; ++ } ++ ++ // see JCE spec ++ @Override ++ protected int engineGetOutputSize(int inputLen) { ++ throw new UnsupportedOperationException("engineGetOutputSize"); ++ } ++ ++ // see JCE spec ++ @Override ++ protected byte[] engineGetIV() { ++ return null; ++ } ++ ++ // see JCE spec ++ @Override ++ protected AlgorithmParameters engineGetParameters() { ++ return null; ++ } ++ ++ // see JCE spec ++ @Override ++ protected byte[] engineWrap(Key key) ++ throws IllegalBlockSizeException, InvalidKeyException { ++ if (key == null) { ++ throw new InvalidKeyException("Key cannot be null"); ++ } ++ byte[] encoded = key.getEncoded(); ++ if ((encoded == null) || (encoded.length == 0)) { ++ throw new InvalidKeyException("Cannot get an encoding of " + ++ "the key to be wrapped"); ++ } ++ try { ++ return engineDoFinal(encoded, 0, encoded.length); ++ } catch (BadPaddingException e) { ++ throw new InvalidKeyException("Wrapping failed", e); ++ } ++ } ++ ++ // see JCE spec ++ @Override ++ protected Key engineUnwrap(byte[] wrappedKey, String wrappedKeyAlgorithm, int wrappedKeyType) ++ throws InvalidKeyException, NoSuchAlgorithmException { ++ if (wrappedKey == null || wrappedKey.length == 0) { ++ throw new InvalidKeyException("The wrappedKey cannot be null or empty"); ++ } ++ byte[] unWrappedKey; ++ try { ++ unWrappedKey = engineDoFinal(wrappedKey, 0, wrappedKey.length); ++ } catch (IllegalBlockSizeException | BadPaddingException e) { ++ throw new InvalidKeyException("Unwrapping failed", e); ++ } ++ return KAEUtils.ConstructKeys.constructKey(unWrappedKey, wrappedKeyAlgorithm, wrappedKeyType); ++ } ++ ++ // see JCE spec ++ @Override ++ protected void engineInit(int opmode, Key key, SecureRandom random) throws InvalidKeyException { ++ try { ++ engineInit(opmode, key, (AlgorithmParameterSpec) null, random); ++ } catch (InvalidAlgorithmParameterException e) { ++ throw new IllegalArgumentException("cannot handle supplied parameter spec: " + e.getMessage()); ++ } ++ } ++ ++ // see JCE spec ++ @Override ++ protected void engineInit(int opmode, Key key, AlgorithmParameterSpec params, SecureRandom random) throws InvalidKeyException, InvalidAlgorithmParameterException { ++ if (opmode == Cipher.ENCRYPT_MODE || opmode == Cipher.WRAP_MODE) { ++ if (key instanceof KAEECPublicKeyImpl) { ++ this.ecKey = (KAEECPublicKeyImpl) key; ++ } else if (key instanceof ECPublicKey) { ++ this.ecKey = (ECPublicKey) key; ++ } else { ++ throw new InvalidKeyException("must use public EC key for encryption"); ++ } ++ } else if (opmode == Cipher.DECRYPT_MODE || opmode == Cipher.UNWRAP_MODE) { ++ if (key instanceof KAEECPrivateKeyImpl) { ++ this.ecKey = (KAEECPrivateKeyImpl) key; ++ } else if (key instanceof ECPrivateKey) { ++ this.ecKey = (ECPrivateKey) key; ++ } else { ++ throw new InvalidKeyException("must use private EC key for decryption"); ++ } ++ } else { ++ throw new InvalidParameterException("wrong cipher mode, must be ENCRYPT_MODE or WRAP_MODE or DECRYPT_MODE or UNWRAP_MODE"); ++ } ++ ++ try { ++ sm2KeyHolder = new KAESM2KeyHolder(this, ecKey); ++ } catch (InvalidKeyException e) { ++ throw new RuntimeException(e); ++ } ++ this.cipherMode = opmode; ++ this.byteBuf.reset(); ++ } ++ ++ // see JCE spec ++ @Override ++ protected void engineInit(int opmode, Key key, AlgorithmParameters params, SecureRandom random) throws InvalidKeyException, InvalidAlgorithmParameterException { ++ AlgorithmParameterSpec paramSpec = null; ++ if (params != null) { ++ throw new InvalidAlgorithmParameterException("cannot recognise parameters: " + params.getClass().getName()); ++ } ++ engineInit(opmode, key, paramSpec, random); ++ } ++ ++ // see JCE spec ++ @Override ++ protected byte[] engineUpdate(byte[] input, int inputOffset, int inputLen) { ++ byteBuf.write(input, inputOffset, inputLen); ++ return null; ++ } ++ ++ // see JCE spec ++ @Override ++ protected int engineUpdate(byte[] input, int inputOffset, int inputLen, byte[] output, int outputOffset) throws ShortBufferException { ++ engineUpdate(input, inputOffset, inputLen); ++ return 0; ++ } ++ ++ // see JCE spec ++ @Override ++ protected byte[] engineDoFinal(byte[] input, int inputOffset, int inputLen) ++ throws IllegalBlockSizeException, BadPaddingException { ++ if (inputLen != 0) { ++ byteBuf.write(input, inputOffset, inputLen); ++ } ++ if(byteBuf.size() == 0){ ++ throw new IllegalBlockSizeException("input buffer too short"); ++ } ++ ++ if (sm2KeyHolder == null) { ++ try { ++ sm2KeyHolder = new KAESM2KeyHolder(this, ecKey); ++ } catch (InvalidKeyException e) { ++ throw new RuntimeException(e); ++ } ++ } ++ ++ long keyAddress = sm2KeyHolder.keyAddress; ++ byte[] out; ++ try { ++ if (cipherMode == Cipher.ENCRYPT_MODE || cipherMode == Cipher.WRAP_MODE) { ++ try { ++ out = nativeSM2Encrypt(keyAddress, byteBuf.toByteArray(), byteBuf.size()); ++ } catch (RuntimeException e) { ++ throw new RuntimeException("KAESM2Cipher native encryption failed: " , e); ++ } ++ } else if (cipherMode == Cipher.DECRYPT_MODE || cipherMode == Cipher.UNWRAP_MODE) { ++ try { ++ out = nativeSM2Decrypt(keyAddress, byteBuf.toByteArray(), byteBuf.size()); ++ } catch (RuntimeException e) { ++ throw new RuntimeException("KAESM2Cipher native decryption failed: " , e); ++ } ++ } else { ++ throw new IllegalStateException("cipher not initialised"); ++ } ++ } finally { ++ byteBuf.reset(); ++ resetKeyHolder(); ++ } ++ return out; ++ } ++ ++ // see JCE spec ++ @Override ++ protected int engineDoFinal(byte[] input, int inputOffset, int inputLen, byte[] output, int outputOffset) ++ throws ShortBufferException, IllegalBlockSizeException, BadPaddingException { ++ byte[] buffer = engineDoFinal(input, inputOffset, inputLen); ++ System.arraycopy(buffer, 0, output, outputOffset, buffer.length); ++ return buffer.length; ++ } ++ ++ /** ++ * The sm2 openssl key holder , use PhantomReference in case of native memory leaks ++ */ ++ private static class KAESM2KeyHolder extends PhantomReference ++ implements Comparable { ++ private static ReferenceQueue referenceQueue = new ReferenceQueue<>(); ++ private static Set referenceList = new ConcurrentSkipListSet<>(); ++ private final long keyAddress; ++ ++ private static boolean disableKaeDispose = Boolean.getBoolean("kae.disableKaeDispose"); ++ ++ KAESM2KeyHolder(KAESM2Cipher sm2Cipher, ECKey sm2Key) throws InvalidKeyException { ++ super(sm2Cipher, referenceQueue); ++ this.keyAddress = getKeyAddress(sm2Key); ++ if (!disableKaeDispose) { ++ referenceList.add(this); ++ drainRefQueueBounded(); ++ } ++ } ++ ++ private static void drainRefQueueBounded() { ++ while (true) { ++ KAESM2KeyHolder next = (KAESM2KeyHolder) referenceQueue.poll(); ++ if (next == null) { ++ break; ++ } ++ next.dispose(true); ++ } ++ } ++ ++ void dispose(boolean needFree) { ++ if (!disableKaeDispose) { ++ referenceList.remove(this); ++ try { ++ if (needFree) { ++ nativeFreeKey(keyAddress); ++ } ++ } finally { ++ this.clear(); ++ } ++ } else { ++ nativeFreeKey(keyAddress); ++ } ++ } ++ ++ @Override ++ public int compareTo(KAESM2KeyHolder other) { ++ if (this.keyAddress == other.keyAddress) { ++ return 0; ++ } else { ++ return (this.keyAddress < other.keyAddress) ? -1 : 1; ++ } ++ } ++ ++ private long getKeyAddress(ECKey sm2Key) throws InvalidKeyException { ++ long address; ++ if (sm2Key instanceof ECPrivateKey) { // ECPrivateKeyImpl ++ address = getKeyAddress((ECPrivateKey) sm2Key); ++ } else if (sm2Key instanceof ECPublicKey) { // ECPublicKeyImpl ++ address = getKeyAddress((ECPublicKey) sm2Key); ++ } else { ++ throw new InvalidKeyException("Invalid SM2Key implement " + sm2Key.getClass()); ++ } ++ return address; ++ } ++ ++ private long getKeyAddress(ECPrivateKey key) throws InvalidKeyException { ++ checkKey(key); ++ long address; ++ int curveLen = (key.getParams().getCurve().getField().getFieldSize() + 7) / 8; ++ try { ++ address = nativeCreateSM2PrivateKey(asUnsignedByteArray(curveLen, key.getS()), false); ++ return address; ++ } catch (RuntimeException e) { ++ throw new InvalidKeyException(e); ++ } ++ } ++ ++ private long getKeyAddress(ECPublicKey key) throws InvalidKeyException { ++ checkKey(key); ++ long address; ++ int curveLen = (key.getParams().getCurve().getField().getFieldSize() + 7) / 8; ++ try { ++ address = nativeCreateSM2PublicKey( ++ asUnsignedByteArray(curveLen, key.getW().getAffineX()), ++ asUnsignedByteArray(curveLen, key.getW().getAffineY()) ++ ); ++ return address; ++ } catch (RuntimeException e) { ++ throw new InvalidKeyException(e); ++ } ++ } ++ ++ private void checkKey(ECPrivateKey key) throws InvalidKeyException { ++ if (key.getS() == null) { ++ throw new InvalidKeyException("Invalid SM2 private key"); ++ } ++ } ++ ++ private void checkKey(ECPublicKey key) throws InvalidKeyException { ++ if (key.getW() == null || key.getW().getAffineX() == null || key.getW().getAffineY() == null) { ++ throw new InvalidKeyException("Invalid SM2 public key"); ++ } ++ } ++ } ++ ++ // reset the key holder ++ private void resetKeyHolder() { ++ if (sm2KeyHolder != null) { ++ sm2KeyHolder.dispose(true); ++ sm2KeyHolder = null; ++ } ++ } ++ ++ // create KAE sm2 private key ++ protected static native long nativeCreateSM2PublicKey(byte[] x, byte[] y); ++ ++ // create KAE sm2 public key ++ protected static native long nativeCreateSM2PrivateKey(byte[] key, boolean sign); ++ ++ // free the key ++ protected static native void nativeFreeKey(long keyAddress); ++ ++ // Encrypt message using sm2 algorithm ++ protected static native byte[] nativeSM2Encrypt(long keyAddress, byte[] input, int inputLen); ++ ++ // Decrypt message using sm2 algorithm ++ protected static native byte[] nativeSM2Decrypt(long keyAddress, byte[] input, int inputLen); ++ ++ private static class KAEByteArrayOutputStream extends ByteArrayOutputStream { ++ @Override ++ public synchronized void reset() { ++ // Clear data. ++ Arrays.fill(buf, (byte) 0); ++ super.reset(); ++ } ++ } ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2KeyPairGenerator.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2KeyPairGenerator.java +new file mode 100644 +index 000000000..1bc9365d3 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2KeyPairGenerator.java +@@ -0,0 +1,108 @@ ++/* ++ * Copyright (c) 2024, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++package org.openeuler.security.openssl; ++ ++import java.math.BigInteger; ++import java.security.InvalidAlgorithmParameterException; ++import java.security.InvalidKeyException; ++import java.security.InvalidParameterException; ++import java.security.KeyPair; ++import java.security.ProviderException; ++import java.security.SecureRandom; ++import java.security.spec.AlgorithmParameterSpec; ++import java.security.spec.ECFieldFp; ++import java.security.spec.ECGenParameterSpec; ++import java.security.spec.ECParameterSpec; ++import java.security.spec.ECPoint; ++import java.security.spec.EllipticCurve; ++ ++public class KAESM2KeyPairGenerator extends KAEECKeyPairGenerator { ++ private static final String SUPPORTED_CURVE_NAME = "sm2p256v1"; ++ private static final int SUPPORTED_KEY_SIZE = 256; ++ private ECParameterSpec param = null; ++ ++ @Override ++ public void initialize(int keysize, SecureRandom random) { ++ if (keysize != SUPPORTED_KEY_SIZE) { ++ throw new InvalidParameterException("unknown key size " + keysize); ++ } ++ String curveName = KAEUtils.getCurveByAlias(SUPPORTED_CURVE_NAME); ++ param = getParamsByCurve(curveName); ++ } ++ ++ @Override ++ public void initialize(AlgorithmParameterSpec param, SecureRandom random) ++ throws InvalidAlgorithmParameterException { ++ if (param instanceof ECParameterSpec) { ++ this.param = (ECParameterSpec) param; ++ } else if (param instanceof ECGenParameterSpec) { ++ ECGenParameterSpec ecParam = (ECGenParameterSpec)param; ++ if (!SUPPORTED_CURVE_NAME.equals(ecParam.getName())) { ++ throw new InvalidAlgorithmParameterException("Only support sm2p256v1"); ++ } ++ String curveName = KAEUtils.getCurveByAlias(SUPPORTED_CURVE_NAME); ++ this.param = getParamsByCurve(curveName); ++ } else { ++ throw new InvalidAlgorithmParameterException("ECParameterSpec or ECGenParameterSpec for EC"); ++ } ++ } ++ ++ @Override ++ public KeyPair generateKeyPair() { ++ if (param == null) { ++ String curveName = KAEUtils.getCurveByAlias(SUPPORTED_CURVE_NAME); ++ param = getParamsByCurve(curveName); ++ } ++ EllipticCurve curve = param.getCurve(); ++ ECFieldFp field = (ECFieldFp) curve.getField(); ++ BigInteger p = field.getP(); ++ BigInteger a = curve.getA(); ++ BigInteger b = curve.getB(); ++ ECPoint generator = param.getGenerator(); ++ BigInteger x = generator.getAffineX(); ++ BigInteger y = generator.getAffineY(); ++ BigInteger order = param.getOrder(); ++ int cofactor = param.getCofactor(); ++ ++ byte[][] keys = nativeGenerateKeyPair(p.toByteArray(), a.toByteArray(), ++ b.toByteArray(), x.toByteArray(), y.toByteArray(), order.toByteArray(), cofactor); ++ if (keys == null) { ++ throw new RuntimeException("nativeGenerateKeyPair failed"); ++ } ++ BigInteger wX = new BigInteger(keys[0]); ++ BigInteger wY = new BigInteger(keys[1]); ++ BigInteger s = new BigInteger(keys[2]); ++ ECPoint w = new ECPoint(wX, wY); ++ ++ KAEECPrivateKeyImpl privateKey; ++ KAEECPublicKeyImpl publicKey; ++ try { ++ publicKey = new KAEECPublicKeyImpl(w, param); ++ privateKey = new KAEECPrivateKeyImpl(s, param); ++ } catch (InvalidKeyException e) { ++ throw new ProviderException(e); ++ } ++ return new KeyPair(publicKey, privateKey); ++ } ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2Signature.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2Signature.java +new file mode 100644 +index 000000000..7e9ad85ce +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAESM2Signature.java +@@ -0,0 +1,373 @@ ++/* ++ * Copyright (c) 2024, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Oracle designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++package org.openeuler.security.openssl; ++ ++import java.lang.ref.PhantomReference; ++import java.lang.ref.ReferenceQueue; ++import java.lang.reflect.Field; ++import java.nio.charset.StandardCharsets; ++import java.security.*; ++import java.security.interfaces.ECPrivateKey; ++import java.security.interfaces.ECPublicKey; ++import java.security.spec.AlgorithmParameterSpec; ++import java.util.Set; ++import java.util.concurrent.ConcurrentSkipListSet; ++ ++import static org.openeuler.security.openssl.KAEUtils.asUnsignedByteArray; ++ ++/** ++ * We only support support SM2 signatures with SM3 as the digest algorithm. ++ */ ++public abstract class KAESM2Signature extends SignatureSpi { ++ /** ++ * The current mode, signature or signature verification. ++ */ ++ enum Mode { ++ SIGNATURE, ++ VERIFY ++ } ++ ++ /** ++ * Message digest algorithm name used for signing. Currently, only SM3 is supported. ++ */ ++ enum DigestName { ++ SM3("SM3"); ++ ++ private final String digestName; ++ ++ DigestName(String digestName) { ++ this.digestName = digestName; ++ } ++ ++ public String getDigestValue() { ++ return digestName; ++ } ++ } ++ ++ // message digest algorithm name we use ++ private final DigestName digestName; ++ ++ // private key, if initialized for signing ++ private ECPrivateKey privateKey; ++ ++ // public key, if initialized for verifying ++ private ECPublicKey publicKey; ++ ++ // openssl context, save initialization information and updated messages. ++ private SM2SignCtxHolder ctxHolder; ++ ++ // openssl context copy, reset after signature or verification ++ private SM2SignCtxHolder ctxHolderCopy; ++ ++ // the current mode ++ private Mode mode; ++ ++ // initialized or not ++ private boolean initialized = false; ++ ++ // default value ++ private String id = "1234567812345678"; ++ ++ public KAESM2Signature() throws NoSuchAlgorithmException{ ++ this(DigestName.SM3.getDigestValue()); ++ } ++ ++ public KAESM2Signature(String digest) throws NoSuchAlgorithmException{ ++ if ("SM3".equals(digest)){ ++ this.digestName = DigestName.SM3; ++ }else { ++ throw new NoSuchAlgorithmException("KAESM2Signature not support the " + digest + "digest algorithm"); ++ } ++ } ++ ++ /** ++ * Initializes this signature object with the specified ++ * public key for verification operations. ++ * ++ * @param publicKey the public key of the identity whose signature is ++ * going to be verified. ++ */ ++ @Override ++ protected void engineInitVerify(PublicKey publicKey) throws InvalidKeyException { ++ this.publicKey = (ECPublicKey) KAEECKeyFactory.toECKey(publicKey); ++ long keyAddress; ++ try { ++ int curveLen = (this.publicKey.getParams().getCurve().getField().getFieldSize() + 7) / 8; ++ keyAddress = KAESM2Cipher.nativeCreateSM2PublicKey( ++ asUnsignedByteArray(curveLen, this.publicKey.getW().getAffineX()), ++ asUnsignedByteArray(curveLen, this.publicKey.getW().getAffineY())); ++ } catch (RuntimeException e) { ++ throw new RuntimeException("KAESM2Signature nativeCreateSM2PublicKey failed", e); ++ } ++ try { ++ long verifyCtx = nativeInitSM2Ctx(keyAddress, digestName.getDigestValue(), id, Boolean.FALSE); ++ if (verifyCtx == 0){ ++ throw new InvalidKeyException("engineInitSign verifyCtx is invalid"); ++ } ++ this.ctxHolder = new SM2SignCtxHolder(this, verifyCtx); ++ } catch (RuntimeException e) { ++ throw new RuntimeException("KAESM2Signature nativeInitSM2Ctx failed", e); ++ }finally { ++ KAESM2Cipher.nativeFreeKey(keyAddress); ++ } ++ this.mode = Mode.VERIFY; ++ this.initialized = true; ++ } ++ ++ /** ++ * Initializes this signature object with the specified ++ * private key for signing operations. ++ * ++ * @param privateKey the private key of the identity whose signature ++ * will be generated. ++ */ ++ @Override ++ protected void engineInitSign(PrivateKey privateKey) throws InvalidKeyException { ++ this.privateKey = (ECPrivateKey) KAEECKeyFactory.toECKey(privateKey); ++ long keyAddress; ++ try { ++ int curveLen = (this.privateKey.getParams().getCurve().getField().getFieldSize() + 7) / 8; ++ keyAddress = KAESM2Cipher.nativeCreateSM2PrivateKey(asUnsignedByteArray(curveLen, this.privateKey.getS()), true); ++ } catch (RuntimeException e) { ++ throw new InvalidKeyException("KAESM2Signature nativeCreateSM2PrivateKey failed", e); ++ } ++ try { ++ long signCtx = nativeInitSM2Ctx(keyAddress, digestName.getDigestValue(), id, Boolean.TRUE); ++ if (signCtx == 0){ ++ throw new InvalidKeyException("engineInitSign signCtx is invalid"); ++ } ++ this.ctxHolder = new SM2SignCtxHolder(this, signCtx); ++ } catch (RuntimeException e) { ++ throw new RuntimeException("KAESM2Signature nativeInitSM2Ctx failed", e); ++ }finally { ++ KAESM2Cipher.nativeFreeKey(keyAddress); ++ } ++ this.mode = Mode.SIGNATURE; ++ this.initialized = true; ++ } ++ ++ // update the signature with the plaintext data. See JCA doc ++ @Override ++ protected void engineUpdate(byte b) throws SignatureException { ++ byte[] msg = new byte[1]; ++ msg[0] = b; ++ engineUpdate(msg, 0, 1); ++ } ++ ++ // update the signature with the plaintext data. See JCA doc ++ @Override ++ protected void engineUpdate(byte[] b, int off, int len) throws SignatureException { ++ if(!initialized || ctxHolder == null){ ++ throw new SignatureException("The engine is not initialized"); ++ } ++ byte[] msg = new byte[len]; ++ System.arraycopy(b, off, msg, 0, len); ++ if (ctxHolderCopy == null) { ++ ctxHolderCopy = createCtxHolder(this, ctxHolder.ctxAddress); ++ } ++ try { ++ if(this.mode == Mode.SIGNATURE){ ++ nativeSM2Update(ctxHolderCopy.ctxAddress, msg, len, Boolean.TRUE); ++ }else { ++ // Mode.VERIFY ++ nativeSM2Update(ctxHolderCopy.ctxAddress, msg, len, Boolean.FALSE); ++ } ++ } catch (RuntimeException e) { ++ throw new RuntimeException("KAESM2Signature nativeSM2Update Failed", e); ++ } ++ } ++ ++ // see JCE spec ++ @Override ++ protected byte[] engineSign() throws SignatureException { ++ if(!initialized || ctxHolder == null){ ++ throw new SignatureException("The engine is not initialized"); ++ } ++ if (ctxHolderCopy == null) { ++ ctxHolderCopy = createCtxHolder(this, ctxHolder.ctxAddress); ++ } ++ byte[] sigBytes; ++ try { ++ sigBytes = nativeSM2SignFinal(ctxHolderCopy.ctxAddress); ++ } catch (SignatureException e){ ++ throw new RuntimeException("KAESM2Signature nativeSM2SignFinal Failed", e); ++ }finally { ++ resetCtxHolderCopy(); ++ } ++ return sigBytes; ++ } ++ ++ // see JCE spec ++ @Override ++ protected boolean engineVerify(byte[] sigBytes) throws SignatureException { ++ if(!initialized || ctxHolder == null){ ++ throw new SignatureException("The engine is not initialized"); ++ } ++ if (ctxHolderCopy == null) { ++ ctxHolderCopy = createCtxHolder(this, ctxHolder.ctxAddress); ++ } ++ try { ++ return nativeSM2VerifyFinal(ctxHolderCopy.ctxAddress, sigBytes, sigBytes.length); ++ } catch (SignatureException e){ ++ throw new RuntimeException("KAESM2Signature nativeSM2VerifyFinal Failed", e); ++ }finally { ++ resetCtxHolderCopy(); ++ } ++ } ++ ++ // set parameter, not supported. See JCA doc ++ @Deprecated ++ @Override ++ protected void engineSetParameter(String param, Object value) throws InvalidParameterException { ++ throw new UnsupportedOperationException("setParameter() not supported"); ++ } ++ ++ @Override ++ protected void engineSetParameter(AlgorithmParameterSpec params) ++ throws InvalidAlgorithmParameterException { ++ if (params == null) { ++ throw new InvalidAlgorithmParameterException("params is null"); ++ } ++ ++ try { ++ Class clazz = params.getClass(); ++ Field field = clazz.getDeclaredField("id"); ++ field.setAccessible(true); ++ byte[] idValue = (byte[]) field.get(params); ++ this.id = new String(idValue, StandardCharsets.UTF_8); ++ } catch (IllegalAccessException | NoSuchFieldException e) { ++ throw new InvalidAlgorithmParameterException("Failed to get id field from params"); ++ } ++ } ++ ++ // get parameter, not supported. See JCA doc ++ @Deprecated ++ @Override ++ protected Object engineGetParameter(String param) throws InvalidParameterException { ++ throw new UnsupportedOperationException("getParameter() not supported"); ++ } ++ ++ /** ++ * The sm2 sign openssl md_ctx holder , use PhantomReference in case of native memory leaks ++ */ ++ private static class SM2SignCtxHolder extends PhantomReference ++ implements Comparable { ++ private static ReferenceQueue referenceQueue = new ReferenceQueue<>(); ++ private static Set referenceList = new ConcurrentSkipListSet<>(); ++ private final long ctxAddress; ++ ++ private static boolean disableKaeDispose = Boolean.getBoolean("kae.disableKaeDispose"); ++ ++ SM2SignCtxHolder(KAESM2Signature sm2Cipher, long ctxAddress) { ++ super(sm2Cipher, referenceQueue); ++ this.ctxAddress = ctxAddress; ++ if (!disableKaeDispose) { ++ referenceList.add(this); ++ drainRefQueueBounded(); ++ } ++ } ++ ++ private static void drainRefQueueBounded() { ++ while (true) { ++ SM2SignCtxHolder next = (SM2SignCtxHolder) referenceQueue.poll(); ++ if (next == null) { ++ break; ++ } ++ next.dispose(true); ++ } ++ } ++ ++ void dispose(boolean needFree) { ++ if (!disableKaeDispose) { ++ referenceList.remove(this); ++ try { ++ if (needFree) { ++ nativeFreeSM2Ctx(ctxAddress); ++ } ++ } finally { ++ this.clear(); ++ } ++ } else { ++ nativeFreeSM2Ctx(ctxAddress); ++ } ++ } ++ ++ @Override ++ public int compareTo(SM2SignCtxHolder other) { ++ if (this.ctxAddress == other.ctxAddress) { ++ return 0; ++ } else { ++ return (this.ctxAddress < other.ctxAddress) ? -1 : 1; ++ } ++ } ++ } ++ ++ // reset the ctx holder ++ private void resetCtxHolderCopy() { ++ if (ctxHolderCopy != null) { ++ ctxHolderCopy.dispose(true); ++ ctxHolderCopy = null; ++ } ++ } ++ ++ private SM2SignCtxHolder createCtxHolder(KAESM2Signature kaesm2Signature, long ctxAddress) { ++ long addr; ++ try { ++ addr = nativeClone(ctxAddress); ++ } catch (RuntimeException e) { ++ throw new RuntimeException("SM2SignCtxHolder nativeClone failed", e); ++ } ++ if (addr == 0) { ++ throw new RuntimeException("SM2SignCtxHolder nativeClone EVP_MD_CTX failed"); ++ } ++ return new SM2SignCtxHolder(kaesm2Signature, addr); ++ } ++ ++ // clone the sign ctx ++ protected static native long nativeClone(long ctxAddress); ++ ++ // free the sign ctx ++ protected static native void nativeFreeSM2Ctx(long ctxAddress); ++ ++ // init openssl sm2 signature context ++ protected static native long nativeInitSM2Ctx(long keyAddress, String digestName, String id, boolean isSign); ++ ++ // update openssl sm2 signature text ++ protected static native void nativeSM2Update(long ctxAddress, byte[] msg, int msgLen, boolean isSign); ++ ++ // sm2 signature do final ++ protected static native byte[] nativeSM2SignFinal(long ctxAddress) throws SignatureException; ++ ++ // sm2 verification do final ++ protected static native boolean nativeSM2VerifyFinal(long ctxAddress, byte[] sigBytes, int sigLen) throws SignatureException; ++ ++ static public class SM3withSM2 ++ extends KAESM2Signature { ++ public SM3withSM2() throws NoSuchAlgorithmException { ++ super(DigestName.SM3.getDigestValue()); ++ } ++ } ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEUtils.java b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEUtils.java +index a4a005285..093905e43 100644 +--- a/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEUtils.java ++++ b/src/jdk.crypto.kaeprovider/linux/classes/org/openeuler/security/openssl/KAEUtils.java +@@ -27,6 +27,7 @@ package org.openeuler.security.openssl; + import javax.crypto.Cipher; + import javax.crypto.SecretKey; + import javax.crypto.spec.SecretKeySpec; ++import java.math.BigInteger; + import java.security.*; + import java.security.spec.InvalidKeySpecException; + import java.security.spec.PKCS8EncodedKeySpec; +@@ -198,16 +199,50 @@ class KAEUtils { + } + } + ++ /** ++ * Return the passed in value as an unsigned byte array of the specified length, padded with ++ * leading zeros as necessary.. ++ * ++ * @param length the fixed length of the result ++ * @param value the value to be converted. ++ * @return a byte array padded to a fixed length with leading zeros. ++ */ ++ protected static byte[] asUnsignedByteArray(int length, BigInteger value) { ++ byte[] bytes = value.toByteArray(); ++ if (bytes.length == length) { ++ return bytes; ++ } ++ ++ int start = (bytes[0] == 0 && bytes.length != 1) ? 1 : 0; ++ int count = bytes.length - start; ++ ++ if (count > length) { ++ throw new IllegalArgumentException("standard length exceeded for value"); ++ } ++ ++ byte[] tmp = new byte[length]; ++ System.arraycopy(bytes, start, tmp, tmp.length - count, count); ++ return tmp; ++ } ++ + private static void initECDH() { + SIZE_TO_CURVE.put(224, "secp224r1"); + SIZE_TO_CURVE.put(256, "prime256v1"); + SIZE_TO_CURVE.put(384, "secp384r1"); + SIZE_TO_CURVE.put(521, "secp521r1"); ++ ++ CURVE_ALIAS.put("secp224r1", "secp224r1"); ++ CURVE_ALIAS.put("prime256v1", "prime256v1"); ++ CURVE_ALIAS.put("secp384r1", "secp384r1"); ++ CURVE_ALIAS.put("secp521r1", "secp521r1"); ++ + CURVE_ALIAS.put("secp256r1", "prime256v1"); ++ CURVE_ALIAS.put("sm2p256v1", "SM2"); + CURVE_ALIAS.put("1.3.132.0.33", "secp224r1"); + CURVE_ALIAS.put("1.3.132.0.34", "secp384r1"); + CURVE_ALIAS.put("1.3.132.0.35", "secp521r1"); + CURVE_ALIAS.put("1.2.840.10045.3.1.7", "prime256v1"); ++ CURVE_ALIAS.put("1.2.156.10197.1.301", "SM2"); + } + + static String getCurveBySize(int size) { +diff --git a/src/jdk.crypto.kaeprovider/linux/conf/security/kaeprovider.conf b/src/jdk.crypto.kaeprovider/linux/conf/security/kaeprovider.conf +index 49ff98fd8..40c6c2c10 100644 +--- a/src/jdk.crypto.kaeprovider/linux/conf/security/kaeprovider.conf ++++ b/src/jdk.crypto.kaeprovider/linux/conf/security/kaeprovider.conf +@@ -18,22 +18,20 @@ + # kae.rsa=false + # kae.dh=false + # kae.ec=false ++# kae.sm2.cipher=false ++# kae.sm2.signature=false + + # Configure engine id, the default value is kae. + # kae.engine.id=kae + +-# Configure whether libcrypto.so uses GLOBAL mode, uses LOCAL mode by default. +-# If you use uadk_engine, you need to enable this option. +-# kae.libcrypto.useGlobalMode=false +- + # The following configuration will only take effect when using KAEProvider. + # Configure whether to enable KAE hardware acceleration for each category of algorithm. + # The configurable value are as follows: + # true : enable KAE hardware acceleration by default + # false: use openssl soft calculation by default + # The digest/sm4/rsa/dh category algorithm enable KAE hardware acceleration by default. +-# The aes/hmac/ec category algorithm use openssl soft calculation by default. +-# The ec category algorithm configuration does not take effect temporarily. and it ++# The aes/hmac/ec/sm2 category algorithm use openssl soft calculation by default. ++# The ec/sm2 category algorithm configuration does not take effect temporarily. and it + # currently does not support KAE hardware acceleration, temporarily use openssl soft calculation. + # kae.digest.useKaeEngine=true + # kae.aes.useKaeEngine=false +@@ -42,15 +40,17 @@ + # kae.rsa.useKaeEngine=true + # kae.dh.useKaeEngine=true + # kae.ec.useKaeEngine=false ++# kae.sm2.useKaeEngine=false + # + # Some engines do not fully support certain categories of algorithms, for example, the digest +-# algorithm implemented by kae engine only supports md5 and sm3.For more information, please refer to: ++# algorithm implemented by kae engine only supports md5 and sm3, asymmetric cipher not support sm2. ++# For more information, please refer to: + # KAE : https://github.com/kunpengcompute/KAE#:~:text=Digest%20algorithm%3A%20SM3/MD5 + # UADK: https://gitee.com/openeuler/uadk/wikis/%E4%BD%BF%E7%94%A8%E6%96%87%E6%A1%A3/UADK%20quick%20start#11-uadk + # + # Users can disable unsupported algorithms through the following property configuration. + # Disable algorithm to enable KAE hardware acceleration, use openssl soft algorithm instead. +-# The sha256, sha384 algorithms are disabled by default. ++# The sha256, sha384 , sm2 algorithms are disabled by default. + # digest : md5,sha256,sha384,sm3 + # aes : aes-128-ecb,aes-128-cbc,aes-128-ctr,aes-128-gcm, + # aes-192-ecb,aes-192-cbc,aes-192-ctr,aes-192-gcm, +@@ -60,13 +60,18 @@ + # rsa : rsa + # dh : dh + # ec : ec +-# kae.engine.disabledAlgorithms=sha256,sha384 ++# sm2 : sm2 ++# kae.engine.disabledAlgorithms=sha256,sha384,sm2 + + # SM4 max chunk size of each encryption or decryption. + # when input data does not have an accessible byte[]. + # The default value is 4096, when configuring a non-positive Integer type, use the default value of 4096. + # kae.sm4.maxChunkSize=4096 + ++# Which Openssl version prefer to use. Default value is 0, representing that default policy(firstly find 3, if 3 not exist then find 1) ++# If definite version is decided, can set value to 1 or 3 to choose definitely version, other value will report error. ++# kae.useOpensslVersion=0 ++# + # Enable engine load log. + # kae.log=true + # +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_cipher_rsa.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_cipher_rsa.c +index 557d3965b..919595392 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_cipher_rsa.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_cipher_rsa.c +@@ -27,6 +27,7 @@ + #include "kae_log.h" + #include "kae_util.h" + #include "kae_exception.h" ++#include "ssl_utils.h" + #include "org_openeuler_security_openssl_KAERSACipher.h" + + typedef int RSACryptOperation(int, const unsigned char*, unsigned char*, RSA*, int); +@@ -49,7 +50,7 @@ static int RSACryptNotOAEPPadding(JNIEnv* env, jlong keyAddress, jint inLen, jby + EVP_PKEY* pkey = (EVP_PKEY*) keyAddress; + + // rsa = pkey->rsa +- RSA* rsa = EVP_PKEY_get1_RSA(pkey); ++ RSA* rsa = SSL_UTILS_EVP_PKEY_get1_RSA(pkey); + if (rsa == NULL) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_get1_RSA", KAE_ThrowRuntimeException); + return 0; +@@ -82,7 +83,7 @@ cleanup: + (*env)->ReleaseByteArrayElements(env, in, inBytes, 0); + } + if (rsa != NULL) { +- RSA_free(rsa); ++ SSL_UTILS_RSA_free(rsa); + } + return resultSize; + } +@@ -91,7 +92,7 @@ cleanup: + * set rsa padding + */ + static bool SetRSAPadding(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, int paddingType) { +- if (EVP_PKEY_CTX_set_rsa_padding(pkeyCtx, paddingType) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set_rsa_padding(pkeyCtx, paddingType) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set_rsa_padding", KAE_ThrowInvalidAlgorithmParameterException); + return false; + } +@@ -102,12 +103,12 @@ static bool SetRSAPadding(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, int paddingType) { + * set rsa mgf1 md + */ + static bool SetRSAMgf1Md(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, const char* mgf1MdAlgoUTF) { +- EVP_MD* mgf1MD = (EVP_MD*)EVP_get_digestbyname(mgf1MdAlgoUTF); ++ EVP_MD* mgf1MD = (EVP_MD*)SSL_UTILS_EVP_get_digestbyname(mgf1MdAlgoUTF); + if (mgf1MD == NULL) { + KAE_ThrowFromOpenssl(env, "EVP_get_digestbyname", KAE_ThrowInvalidAlgorithmParameterException); + return false; + } +- if (EVP_PKEY_CTX_set_rsa_mgf1_md(pkeyCtx, mgf1MD) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set_rsa_mgf1_md(pkeyCtx, mgf1MD) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set_rsa_mgf1_md", KAE_ThrowInvalidAlgorithmParameterException); + return false; + } +@@ -118,12 +119,12 @@ static bool SetRSAMgf1Md(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, const char* mgf1MdA + * set rsa oaep md + */ + static bool SetRSAOaepMd(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, const char* oaepMdAlgoUTF) { +- EVP_MD* oaepMD = (EVP_MD*)EVP_get_digestbyname(oaepMdAlgoUTF); ++ EVP_MD* oaepMD = (EVP_MD*)SSL_UTILS_EVP_get_digestbyname(oaepMdAlgoUTF); + if (oaepMD == NULL) { + KAE_ThrowFromOpenssl(env, "EVP_get_digestbyname", KAE_ThrowInvalidAlgorithmParameterException); + return false; + } +- if (EVP_PKEY_CTX_set_rsa_oaep_md(pkeyCtx, oaepMD) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set_rsa_oaep_md(pkeyCtx, oaepMD) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set_rsa_oaep_md", KAE_ThrowInvalidAlgorithmParameterException); + return false; + } +@@ -134,7 +135,7 @@ static bool SetRSAOaepMd(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, const char* oaepMdA + * set rsa oaep label + */ + static bool SetRSAOaepLabel(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, jbyte* labelBytes, jsize labelSize) { +- if (EVP_PKEY_CTX_set0_rsa_oaep_label(pkeyCtx, labelBytes, labelSize) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set0_rsa_oaep_label(pkeyCtx, labelBytes, labelSize) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set0_rsa_oaep_label", KAE_ThrowInvalidAlgorithmParameterException); + return false; + } +@@ -159,7 +160,7 @@ static void ReleaseRSACryptOAEPResource(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, + if (inBytes != NULL) { + (*env)->ReleaseByteArrayElements(env, in, inBytes, 0); + } +- EVP_PKEY_CTX_free(pkeyCtx); ++ SSL_UTILS_EVP_PKEY_CTX_free(pkeyCtx); + } + + static int RSACryptOAEPPadding(JNIEnv* env, jlong keyAddress, jint inLen, jbyteArray in, jbyteArray out, +@@ -184,7 +185,7 @@ static int RSACryptOAEPPadding(JNIEnv* env, jlong keyAddress, jint inLen, jbyteA + + // new ctx + // rsa encrypt/decrypt init +- if ((pkeyCtx = EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL || cryptInitOperation(pkeyCtx) <= 0) { ++ if ((pkeyCtx = SSL_UTILS_EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL || cryptInitOperation(pkeyCtx) <= 0) { + KAE_ThrowFromOpenssl(env, pkeyCtx == NULL ? "EVP_PKEY_CTX_new" : cryptInitName, KAE_ThrowInvalidKeyException); + goto cleanup; + } +@@ -290,29 +291,30 @@ JNIEXPORT jlong JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeC + } + + // new pkey +- pkey = EVP_PKEY_new(); ++ pkey = SSL_UTILS_EVP_PKEY_new(); + if (pkey == NULL) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_new", KAE_ThrowRuntimeException); + goto cleanup; + } + + // new rsa +- rsa = RSA_new_method(kaeEngine); ++ rsa = SSL_UTILS_RSA_new_method(kaeEngine); + if (rsa == NULL) { + KAE_ThrowFromOpenssl(env, "RSA_new_method", KAE_ThrowRuntimeException); + goto cleanup; + } + + // set rsa private crt key params n,e,d,p,q,dmp1,dmp1,iqmp +- if (RSA_set0_key(rsa, bnN, bnE, bnD) <= 0 || +- RSA_set0_factors(rsa, bnP, bnQ) <= 0 || +- RSA_set0_crt_params(rsa, bnDMP1, bnDMQ1, bnIQMP) <= 0) { ++ if (SSL_UTILS_RSA_set0_key(rsa, bnN, bnE, bnD) <= 0 || ++ SSL_UTILS_RSA_set0_factors(rsa, bnP, bnQ) <= 0 || ++ SSL_UTILS_RSA_set0_crt_params(rsa, bnDMP1, bnDMQ1, bnIQMP) <= 0) { + KAE_ThrowFromOpenssl(env, "RSA set param", KAE_ThrowRuntimeException); + goto cleanup; + } + + // assign rsa to pkey +- int result = EVP_PKEY_assign_RSA(pkey, rsa); ++ //change from macro, "EVP_PKEY_assign_RSA(pkey,rsa)" is same as EVP_PKEY_assign((pkey),EVP_PKEY_RSA, (rsa)) ++ int result = SSL_UTILS_EVP_PKEY_assign_RSA(pkey, rsa); + if (result <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_assign_RSA", KAE_ThrowRuntimeException); + goto cleanup; +@@ -320,8 +322,8 @@ JNIEXPORT jlong JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeC + return (jlong)pkey; + cleanup: + ReleaseRSAParams(bnN, bnE, bnD, bnP, bnQ, bnDMP1, bnDMQ1, bnIQMP); +- RSA_free(rsa); +- EVP_PKEY_free(pkey); ++ SSL_UTILS_RSA_free(rsa); ++ SSL_UTILS_EVP_PKEY_free(pkey); + return 0; + } + +@@ -353,27 +355,28 @@ JNIEXPORT jlong JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeC + } + + // new rsa +- rsa = RSA_new_method(kaeEngine); ++ rsa = SSL_UTILS_RSA_new_method(kaeEngine); + if (rsa == NULL) { + KAE_ThrowFromOpenssl(env, "RSA_new_method", KAE_ThrowRuntimeException); + goto cleanup; + } + + // new EVP_PKEY +- pkey = EVP_PKEY_new(); ++ pkey = SSL_UTILS_EVP_PKEY_new(); + if (pkey == NULL) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_new", KAE_ThrowRuntimeException); + goto cleanup; + } + + // set rsa public key params n and e +- if (RSA_set0_key(rsa, bnN, bnE, NULL) <= 0) { ++ if (SSL_UTILS_RSA_set0_key(rsa, bnN, bnE, NULL) <= 0) { + KAE_ThrowFromOpenssl(env, "RSA_set0_key", KAE_ThrowRuntimeException); + goto cleanup; + } + + // assign rsa to pkey +- int result = EVP_PKEY_assign_RSA(pkey, rsa); ++ //change from macro, "EVP_PKEY_assign_RSA(pkey,rsa)" is same as EVP_PKEY_assign((pkey),EVP_PKEY_RSA, (rsa)) ++ int result = SSL_UTILS_EVP_PKEY_assign_RSA(pkey, rsa); + if (result <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_assign_RSA", KAE_ThrowRuntimeException); + goto cleanup; +@@ -382,8 +385,8 @@ JNIEXPORT jlong JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeC + cleanup: + KAE_ReleaseBigNumFromByteArray(bnN); + KAE_ReleaseBigNumFromByteArray(bnE); +- RSA_free(rsa); +- EVP_PKEY_free(pkey); ++ SSL_UTILS_RSA_free(rsa); ++ SSL_UTILS_EVP_PKEY_free(pkey); + return 0; + } + +@@ -394,7 +397,7 @@ cleanup: + */ + JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeRSAPrivateEncrypt(JNIEnv* env, + jclass cls, jlong keyAddress, jint inLen, jbyteArray in, jbyteArray out, jint paddingType) { +- return RSACryptNotOAEPPadding(env, keyAddress, inLen, in, out, paddingType, RSA_private_encrypt, ++ return RSACryptNotOAEPPadding(env, keyAddress, inLen, in, out, paddingType, SSL_UTILS_RSA_private_encrypt, + "RSA_private_encrypt"); + } + +@@ -405,7 +408,7 @@ JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeRS + */ + JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeRSAPrivateDecrypt(JNIEnv* env, + jclass cls, jlong keyAddress, jint inLen, jbyteArray in, jbyteArray out, jint paddingType) { +- return RSACryptNotOAEPPadding(env, keyAddress, inLen, in, out, paddingType, RSA_private_decrypt, ++ return RSACryptNotOAEPPadding(env, keyAddress, inLen, in, out, paddingType, SSL_UTILS_RSA_private_decrypt, + "RSA_private_decrypt"); + } + +@@ -416,7 +419,7 @@ JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeRS + */ + JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeRSAPublicEncrypt(JNIEnv* env, + jclass cls, jlong keyAddress, jint inLen, jbyteArray in, jbyteArray out, jint paddingType) { +- return RSACryptNotOAEPPadding(env, keyAddress, inLen, in, out, paddingType, RSA_public_encrypt, ++ return RSACryptNotOAEPPadding(env, keyAddress, inLen, in, out, paddingType, SSL_UTILS_RSA_public_encrypt, + "RSA_public_encrypt"); + } + +@@ -427,7 +430,7 @@ JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeRS + */ + JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeRSAPublicDecrypt(JNIEnv* env, + jclass cls, jlong keyAddress, jint inLen, jbyteArray in, jbyteArray out, jint paddingType) { +- return RSACryptNotOAEPPadding(env, keyAddress, inLen, in, out, paddingType, RSA_public_decrypt, ++ return RSACryptNotOAEPPadding(env, keyAddress, inLen, in, out, paddingType, SSL_UTILS_RSA_public_decrypt, + "RSA_public_decrypt"); + } + +@@ -440,8 +443,8 @@ JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeRS + jclass cls, jlong keyAddress, jint inLen, jbyteArray in, jbyteArray out, + jint paddingType, jstring oaepMdAlgo, jstring mgf1MdAlgo, jbyteArray label) { + return RSACryptOAEPPadding(env, keyAddress, inLen, in, out, paddingType, oaepMdAlgo, mgf1MdAlgo, label, +- EVP_PKEY_encrypt_init, "EVP_PKEY_encrypt_init", +- EVP_PKEY_encrypt, "EVP_PKEY_encrypt"); ++ SSL_UTILS_EVP_PKEY_encrypt_init, "EVP_PKEY_encrypt_init", ++ SSL_UTILS_EVP_PKEY_encrypt, "EVP_PKEY_encrypt"); + } + + /* +@@ -453,8 +456,8 @@ JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeRS + jclass cls, jlong keyAddress, jint inLen, jbyteArray in, jbyteArray out, jint paddingType, + jstring oaepMdAlgo, jstring mgf1MdAlgo, jbyteArray label) { + return RSACryptOAEPPadding(env, keyAddress, inLen, in, out, paddingType, oaepMdAlgo, mgf1MdAlgo, label, +- EVP_PKEY_decrypt_init, "EVP_PKEY_decrypt_init", +- EVP_PKEY_decrypt, "EVP_PKEY_decrypt"); ++ SSL_UTILS_EVP_PKEY_decrypt_init, "EVP_PKEY_decrypt_init", ++ SSL_UTILS_EVP_PKEY_decrypt, "EVP_PKEY_decrypt"); + } + + /* +@@ -466,6 +469,6 @@ JNIEXPORT void JNICALL Java_org_openeuler_security_openssl_KAERSACipher_nativeFr + jclass cls, jlong keyAddress) { + EVP_PKEY* pkey = (EVP_PKEY*) keyAddress; + if (pkey != NULL) { +- EVP_PKEY_free(pkey); ++ SSL_UTILS_EVP_PKEY_free(pkey); + } + } +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_cipher_sm2.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_cipher_sm2.c +new file mode 100644 +index 000000000..915a88dda +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_cipher_sm2.c +@@ -0,0 +1,370 @@ ++/* ++ * Copyright (c) 2024, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include ++#include ++#include ++#include ++#include "kae_util.h" ++#include "kae_log.h" ++#include "kae_exception.h" ++#include "ssl_utils.h" ++#include "org_openeuler_security_openssl_KAESM2Cipher.h" ++ ++static void FreeSM2KeyParam(BIGNUM* bn_x, BIGNUM* bn_y, BIGNUM* bn_key, EC_GROUP* group, EC_POINT* pt) ++{ ++ if (bn_x != NULL) { ++ KAE_ReleaseBigNumFromByteArray(bn_x); ++ } ++ if (bn_y != NULL) { ++ KAE_ReleaseBigNumFromByteArray(bn_y); ++ } ++ if (bn_key != NULL) { ++ KAE_ReleaseBigNumFromByteArray_Clear(bn_key); ++ } ++ if (group != NULL) { ++ SSL_UTILS_EC_GROUP_free(group); ++ } ++ if (pt != NULL) { ++ SSL_UTILS_EC_POINT_free(pt); ++ } ++} ++ ++/* ++ * SM2 encrypt or decrypt, follow the steps below ++ */ ++static jbyteArray SM2_Crypt(JNIEnv *env, jlong keyAddress, jbyteArray inArr, jint inLen, bool isEncrypt) { ++ unsigned char* inbytes = NULL; ++ unsigned char* outbytes = NULL; ++ size_t outLen = 0; ++ jbyteArray outArr = NULL; ++ EVP_PKEY* pkey = NULL; ++ EVP_PKEY_CTX* ctx = NULL; ++ ENGINE* kaeEngine = NULL; ++ ++ // init Engine ++ kaeEngine = GetEngineByAlgorithmIndex(SM2_INDEX); ++ KAE_TRACE("SM2_Crypt: kaeEngine => %p", kaeEngine); ++ ++ if ((inbytes = (unsigned char*)malloc(inLen)) == NULL) { ++ KAE_ThrowOOMException(env, "malloc failed"); ++ goto cleanup; ++ } ++ memset(inbytes, 0, inLen); ++ ++ // get inArr bytes ++ (*env)->GetByteArrayRegion(env, inArr, 0, inLen, (jbyte*)inbytes); ++ if (inbytes == NULL) { ++ KAE_ThrowNullPointerException(env, "GetByteArrayRegion failed"); ++ goto cleanup; ++ } ++ ++ pkey = (EVP_PKEY*) keyAddress; ++ ++ // new ctx ++ if ((ctx = SSL_UTILS_EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_new", KAE_ThrowInvalidKeyException); ++ goto cleanup; ++ } ++ ++ // sm2 encrypt/decrypt init ++ if (isEncrypt) { ++ // init encrypt ctx ++ if (SSL_UTILS_EVP_PKEY_encrypt_init(ctx) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_encrypt_init", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // calculated outArr length ++ if (SSL_UTILS_EVP_PKEY_encrypt(ctx, NULL, &outLen, inbytes, inLen) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_encrypt failed. calculated outArr length", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ }else { ++ // init decrypt ctx ++ if (SSL_UTILS_EVP_PKEY_decrypt_init(ctx) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_decrypt_init", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // calculated outArr length ++ if (SSL_UTILS_EVP_PKEY_decrypt(ctx, NULL, &outLen, inbytes, inLen) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_decrypt failed. calculated outArr length", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ } ++ ++ if ((outbytes = (unsigned char*)malloc(outLen)) == NULL) { ++ KAE_ThrowOOMException(env, "malloc failed"); ++ goto cleanup; ++ } ++ memset(outbytes, 0, outLen); ++ ++ if (isEncrypt) { ++ // sm2 encrypt dofinal ++ if (SSL_UTILS_EVP_PKEY_encrypt(ctx, outbytes, &outLen, inbytes, inLen) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_encrypt failed. sm2 encrypt dofinal", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ }else { ++ // sm2 decrypt dofinal ++ if (SSL_UTILS_EVP_PKEY_decrypt(ctx, outbytes, &outLen, inbytes, inLen) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_decrypt failed. sm2 decrypt dofinal", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ } ++ KAE_TRACE("SM2_Crypt: finished"); ++ ++ if ((outArr = (*env)->NewByteArray(env, outLen)) == NULL) { ++ KAE_ThrowNullPointerException(env, "NewByteArray failed"); ++ goto cleanup; ++ } ++ (*env)->SetByteArrayRegion(env, outArr, 0, outLen, (jbyte*)outbytes); ++cleanup: ++ if (inbytes != NULL) { ++ memset(inbytes, 0, inLen); ++ free(inbytes); ++ } ++ if (outbytes != NULL) { ++ memset(outbytes, 0, outLen); ++ free(outbytes); ++ } ++ SSL_UTILS_EVP_PKEY_CTX_free(ctx); ++ return outArr; ++} ++ ++/* ++ * Class: KAESM2Cipher ++ * Method: nativeCreateSM2PublicKey ++ * Signature: ([B[B)J ++ */ ++JNIEXPORT jlong JNICALL Java_org_openeuler_security_openssl_KAESM2Cipher_nativeCreateSM2PublicKey(JNIEnv *env, ++ jclass cls, jbyteArray xArr, jbyteArray yArr) { ++ BIGNUM* bn_x = NULL; ++ BIGNUM* bn_y = NULL; ++ EC_GROUP* group = NULL; ++ EC_POINT* pubkey_pt = NULL; ++ EC_KEY* eckey = NULL; ++ EVP_PKEY* pkey = NULL; ++ ++ // convert to big num ++ if ((bn_x = KAE_GetBigNumFromByteArray(env, xArr)) == NULL || ++ (bn_y = KAE_GetBigNumFromByteArray(env, yArr)) == NULL) { ++ goto cleanup; ++ } ++ ++ // new EC_GROUP by curve_name ++ if ((group = SSL_UTILS_EC_GROUP_new_by_curve_name(NID_sm2)) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EC_GROUP_new_by_curve_name", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // new EC_POINT ++ if((pubkey_pt = SSL_UTILS_EC_POINT_new(group)) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EC_POINT_new", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // set the x and y coordinates ++ if(SSL_UTILS_EC_POINT_set_affine_coordinates_GFp(group, pubkey_pt, bn_x, bn_y, NULL) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EC_POINT_set_affine_coordinates_GFp", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // new EC_KEY ++ if ((eckey = SSL_UTILS_EC_KEY_new_by_curve_name(NID_sm2)) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EC_KEY_new_by_curve_name", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ // set ec_key by publickey_point ++ if (SSL_UTILS_EC_KEY_set_public_key(eckey ,pubkey_pt) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EC_KEY_set_public_key", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // new EVP_PKEY ++ if ((pkey = SSL_UTILS_EVP_PKEY_new()) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_new", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // set the pkey by the ec_key ++ // Changed from macro, "EVP_PKEY_assign_EC_KEY(pkey,eckey)" is "EVP_PKEY_assign((pkey),EVP_PKEY_EC, (char *)(eckey))" in openssl 1 and 3 ++ if (SSL_UTILS_EVP_PKEY_assign_EC_KEY(pkey , eckey) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_assign_EC_KEY", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // set the alias type of the key ++ // EVP_PKEY_set_alias_type is removed since openssl 3 ++ if (SSL_UTILS_EVP_PKEY_set_alias_type(pkey, EVP_PKEY_SM2) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_set_alias_type", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ FreeSM2KeyParam(bn_x, bn_y, NULL, group, pubkey_pt); ++ KAE_TRACE("KAESM2Cipher_nativeCreateSM2PublicKey: finished"); ++ return (jlong)pkey; ++cleanup: ++ FreeSM2KeyParam(bn_x, bn_y, NULL, group, pubkey_pt); ++ if (eckey != NULL) { ++ SSL_UTILS_EC_KEY_free(eckey); ++ } ++ if (pkey != NULL) { ++ SSL_UTILS_EVP_PKEY_free(pkey); ++ } ++ return 0; ++} ++ ++/* ++ * Class: KAESM2Cipher ++ * Method: nativeCreateSM2PrivateKey ++ * Signature: ([B[B)J ++ */ ++JNIEXPORT jlong JNICALL Java_org_openeuler_security_openssl_KAESM2Cipher_nativeCreateSM2PrivateKey(JNIEnv *env, ++ jclass cls, jbyteArray keyArr, jboolean sign) { ++ BIGNUM* bn_key = NULL; ++ EC_KEY* eckey = NULL; ++ EVP_PKEY* pkey = NULL; ++ EC_GROUP* group = NULL; ++ EC_POINT* pt = NULL; ++ ++ // convert to big num ++ if ((bn_key = KAE_GetBigNumFromByteArray(env, keyArr)) == NULL) { ++ goto cleanup; ++ } ++ ++ // new EC_KEY ++ if ((eckey = SSL_UTILS_EC_KEY_new_by_curve_name(NID_sm2)) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EC_KEY_new_by_curve_name", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // set the ec_key by bn_key ++ if ((SSL_UTILS_EC_KEY_set_private_key(eckey ,bn_key)) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EC_KEY_set_private_key", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // new group by curve_name ++ if ((group = SSL_UTILS_EC_GROUP_new_by_curve_name(NID_sm2)) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EC_GROUP_new_by_curve_name", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ if (sign) { ++ // new EC_POINT ++ if ((pt = SSL_UTILS_EC_POINT_new(group)) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EC_POINT_new", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // calculation of EC_POINT by EC_POINT_mul functions ++ if (SSL_UTILS_EC_POINT_mul(group, pt, bn_key, NULL, NULL, NULL) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EC_POINT_mul", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // set ec_key by ec_point ++ if (SSL_UTILS_EC_KEY_set_public_key(eckey ,pt) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EC_KEY_set_public_key", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ } ++ ++ // new EVP_PKEY ++ if ((pkey = SSL_UTILS_EVP_PKEY_new()) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_new", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // set the pkey by the ec_key ++ // Changed from macro, "EVP_PKEY_assign_EC_KEY(pkey,eckey)" is "EVP_PKEY_assign((pkey),EVP_PKEY_EC, (char *)(eckey))" in openssl 1 and 3 ++ if (SSL_UTILS_EVP_PKEY_assign_EC_KEY(pkey , eckey) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_assign_EC_KEY", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // set the alias type of the key ++ // EVP_PKEY_set_alias_type is removed since openssl 3 ++ if (SSL_UTILS_EVP_PKEY_set_alias_type(pkey, EVP_PKEY_SM2) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_set_alias_type", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ FreeSM2KeyParam(NULL, NULL, bn_key, group, pt); ++ KAE_TRACE("KAESM2Cipher_nativeCreateSM2PrivateKey: finished"); ++ return (jlong)pkey; ++cleanup: ++ FreeSM2KeyParam(NULL, NULL, bn_key, group, pt); ++ if (eckey != NULL) { ++ SSL_UTILS_EC_KEY_free(eckey); ++ } ++ if (pkey != NULL) { ++ SSL_UTILS_EVP_PKEY_free(pkey); ++ } ++ return 0; ++} ++ ++/* ++ * Class: KAESM2Cipher ++ * Method: nativeFreeKey ++ * Signature: (J)V ++ */ ++JNIEXPORT void JNICALL Java_org_openeuler_security_openssl_KAESM2Cipher_nativeFreeKey(JNIEnv *env, ++ jclass cls, jlong keyAddress) { ++ KAE_TRACE("KAESM2Cipher_nativeFreeKey(keyAddress = %p)", keyAddress); ++ ++ if(keyAddress == 0){ ++ KAE_ThrowInvalidKeyException(env, "nativeFreeKey failed. keyAddress is Invalid"); ++ return; ++ } ++ EVP_PKEY* pkey = (EVP_PKEY*) keyAddress; ++ if (pkey != NULL) { ++ SSL_UTILS_EVP_PKEY_free(pkey); ++ } ++ ++ KAE_TRACE("KAESM2Cipher_nativeFreeKey: finished"); ++} ++ ++/* ++ * Class: KAESM2Cipher ++ * Method: nativeSM2Encrypt ++ * Signature: (J[BI)[B ++ */ ++JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAESM2Cipher_nativeSM2Encrypt(JNIEnv *env, ++ jclass cls, jlong keyAddress, jbyteArray inArr, jint inLen) { ++ KAE_TRACE("KAESM2Cipher_nativeSM2Encrypt(keyAddress = %p, inArr = %p, inLen = %d)", keyAddress, inArr, inLen); ++ return SM2_Crypt(env, keyAddress, inArr, inLen, true); ++} ++ ++/* ++ * Class: KAESM2Cipher ++ * Method: nativeSM2Decrypt ++ * Signature: (J[BI)[B ++ */ ++JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAESM2Cipher_nativeSM2Decrypt(JNIEnv *env, ++ jclass cls, jlong keyAddress, jbyteArray inArr, jint inLen) { ++ KAE_TRACE("KAESM2Cipher_nativeSM2Decrypt(keyAddress = %p, inArr = %p, inLen = %d)", keyAddress, inArr, inLen); ++ return SM2_Crypt(env, keyAddress, inArr, inLen, false); ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_digest.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_digest.c +index 82bf477da..bd9d4f36e 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_digest.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_digest.c +@@ -27,6 +27,7 @@ + #include "kae_exception.h" + #include "kae_log.h" + #include "kae_util.h" ++#include "ssl_utils.h" + #include "org_openeuler_security_openssl_KAEDigest.h" + + #define DIGEST_STACK_SIZE 1024 +@@ -53,7 +54,7 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeInit(JNIEnv *env, jclass cls + const char* algo_utf = (*env)->GetStringUTFChars(env, algorithmName, 0); + kaeEngine = GetDigestEngineByAlgorithmName(algo_utf); + KAE_TRACE("KAEDigest_nativeInit: kaeEngine => %p", kaeEngine); +- EVP_MD* md = (EVP_MD*) EVP_get_digestbyname(algo_utf); ++ EVP_MD* md = (EVP_MD*) SSL_UTILS_EVP_get_digestbyname(algo_utf); + (*env)->ReleaseStringUTFChars(env, algorithmName, algo_utf); + if (md == NULL) { + KAE_TRACE("%s not supported", algo_utf); +@@ -61,7 +62,8 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeInit(JNIEnv *env, jclass cls + } + KAE_TRACE("KAEDigest_nativeInit: create md => %p", md); + +- ctx = EVP_MD_CTX_create(); ++ // change from macro, "# define EVP_MD_CTX_create() EVP_MD_CTX_new()" in openssl 1 and 3 ++ ctx = SSL_UTILS_EVP_MD_CTX_create(); + if (ctx == NULL) { + KAE_ThrowOOMException(env, "create EVP_MD_CTX fail"); + return 0; +@@ -69,7 +71,7 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeInit(JNIEnv *env, jclass cls + KAE_TRACE("KAEDigest_nativeInit: create ctx => %p", ctx); + + // EVP_DigestInit_ex +- int result_code = EVP_DigestInit_ex(ctx, md, kaeEngine); ++ int result_code = SSL_UTILS_EVP_DigestInit_ex(ctx, md, kaeEngine); + if (result_code == 0) { + KAE_ThrowFromOpenssl(env, "EVP_DigestInit_ex failed", KAE_ThrowRuntimeException); + goto cleanup; +@@ -80,7 +82,8 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeInit(JNIEnv *env, jclass cls + return (jlong) ctx; + + cleanup: +- EVP_MD_CTX_destroy(ctx); ++ // changed from macro, "# define EVP_MD_CTX_destroy(ctx) EVP_MD_CTX_free((ctx))" in openssl 1 and 3 ++ SSL_UTILS_EVP_MD_CTX_destroy(ctx); + return 0; + } + +@@ -105,7 +108,7 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeUpdate(JNIEnv *env, jclass c + if (in_size <= DIGEST_STACK_SIZE) { // allocation on the stack + jbyte buffer[DIGEST_STACK_SIZE]; + (*env)->GetByteArrayRegion(env, input, offset, inLen, buffer); +- result_code = EVP_DigestUpdate(ctx, buffer, inLen); ++ result_code = SSL_UTILS_EVP_DigestUpdate(ctx, buffer, inLen); + } else { // data chunk + jint remaining = in_size; + jint buf_size = (remaining >= DIGEST_CHUNK_SIZE) ? DIGEST_CHUNK_SIZE : remaining; +@@ -117,7 +120,7 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeUpdate(JNIEnv *env, jclass c + while (remaining > 0) { + jint chunk_size = (remaining >= buf_size) ? buf_size : remaining; + (*env)->GetByteArrayRegion(env, input, in_offset, chunk_size, buffer); +- result_code = EVP_DigestUpdate(ctx, buffer, chunk_size); ++ result_code = SSL_UTILS_EVP_DigestUpdate(ctx, buffer, chunk_size); + if (!result_code) { + break; + } +@@ -163,7 +166,7 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeDigest(JNIEnv *env, jclass c + } + + // EVP_DigestFinal_ex +- int result_code = EVP_DigestFinal_ex(ctx, md, &bytesWritten); ++ int result_code = SSL_UTILS_EVP_DigestFinal_ex(ctx, md, &bytesWritten); + if (result_code == 0) { + KAE_ThrowFromOpenssl(env, "EVP_DigestFinal_ex failed", KAE_ThrowRuntimeException); + goto cleanup; +@@ -193,14 +196,15 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeClone(JNIEnv *env, jclass cl + return 0; + } + +- EVP_MD_CTX* ctxCopy = EVP_MD_CTX_create(); ++ // change from macro, "# define EVP_MD_CTX_create() EVP_MD_CTX_new()" in openssl 1 and 3 ++ EVP_MD_CTX* ctxCopy = SSL_UTILS_EVP_MD_CTX_create(); + if (ctxCopy == NULL) { + KAE_ThrowOOMException(env, "create EVP_MD_CTX fail"); + return 0; + } + KAE_TRACE("KAEDigest_nativeClone: create ctxCopy => %p", ctxCopy); + +- int result_code = EVP_MD_CTX_copy_ex(ctxCopy, ctx); ++ int result_code = SSL_UTILS_EVP_MD_CTX_copy_ex(ctxCopy, ctx); + if (result_code == 0) { + KAE_ThrowFromOpenssl(env, "EVP_MD_CTX_copy_ex failed", KAE_ThrowRuntimeException); + goto cleanup; +@@ -210,7 +214,8 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeClone(JNIEnv *env, jclass cl + return (jlong) ctxCopy; + + cleanup: +- EVP_MD_CTX_destroy(ctxCopy); ++ // changed from macro, "# define EVP_MD_CTX_destroy(ctx) EVP_MD_CTX_free((ctx))" in openssl 1 and 3 ++ SSL_UTILS_EVP_MD_CTX_destroy(ctxCopy); + return 0; + } + +@@ -225,7 +230,8 @@ Java_org_openeuler_security_openssl_KAEDigest_nativeFree(JNIEnv *env, jclass cls + EVP_MD_CTX* ctx = (EVP_MD_CTX*) ctxAddress; + KAE_TRACE("KAEDigest_nativeFree(ctx = %p)", ctx); + if (ctx != NULL) { +- EVP_MD_CTX_destroy(ctx); ++ // changed from macro, "# define EVP_MD_CTX_destroy(ctx) EVP_MD_CTX_free((ctx))" in openssl 1 and 3 ++ SSL_UTILS_EVP_MD_CTX_destroy(ctx); + } + + KAE_TRACE("KAEDigest_nativeFree: finished"); +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_exception.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_exception.c +index c0579ebf4..277da3208 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_exception.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_exception.c +@@ -27,6 +27,7 @@ + #include "kae_log.h" + #include "kae_exception.h" + #include "openssl_ad.h" ++#include "ssl_utils.h" + + void KAE_ThrowByName(JNIEnv* env, const char* name, const char* msg) { + jclass cls = (*env)->FindClass(env, name); +@@ -77,6 +78,10 @@ void KAE_ThrowBadPaddingException(JNIEnv* env, const char* msg) { + KAE_ThrowByName(env, "javax/crypto/BadPaddingException", msg); + } + ++void KAE_ThrowExceptionInInitializerError(JNIEnv* env, const char* msg) { ++ KAE_ThrowByName(env, "java/lang/ExceptionInInitializerError", msg); ++} ++ + void KAE_ThrowInvalidKeyException(JNIEnv* env, const char* msg) { + KAE_ThrowByName(env, "java/security/InvalidKeyException", msg); + } +@@ -93,7 +98,7 @@ void KAE_ThrowFromOpenssl(JNIEnv* env, const char* msg, void (* defaultException + unsigned long err; + static const int ESTRING_SIZE = 256; + +- err = ERR_get_error_line_data(&file, &line, &data, &flags); ++ err = SSL_UTILS_ERR_get_error_line_data(&file, &line, &data, &flags); + if (err == 0) { + defaultException(env, msg); + return; +@@ -101,10 +106,11 @@ void KAE_ThrowFromOpenssl(JNIEnv* env, const char* msg, void (* defaultException + + if (!(*env)->ExceptionCheck(env)) { + char estring[ESTRING_SIZE]; +- ERR_error_string_n(err, estring, ESTRING_SIZE); +- int lib = ERR_GET_LIB(err); +- int func = ERR_GET_FUNC(err); +- int reason = ERR_GET_REASON(err); ++ SSL_UTILS_ERR_error_string_n(err, estring, ESTRING_SIZE); ++ // Those functions below are macros ++ int lib = SSL_UTILS_ERR_GET_LIB(err); ++ int func = SSL_UTILS_ERR_GET_FUNC(err); ++ int reason = SSL_UTILS_ERR_GET_REASON(err); + KAE_TRACE("OpenSSL error in %s: err=%lx, lib=%x, func=%x, reason=%x, file=%s, line=%d, estring=%s, data=%s", msg, err, + lib, func, reason, file, line, estring, (flags & ERR_TXT_STRING) ? data : "(no data)"); + // Ignore exceptions in RSA_verify_PKCS1_PSS_mgf1 function +@@ -119,7 +125,7 @@ void KAE_ThrowFromOpenssl(JNIEnv* env, const char* msg, void (* defaultException + } + } + +- ERR_clear_error(); ++ SSL_UTILS_ERR_clear_error(); + } + + void KAE_ThrowAEADBadTagException(JNIEnv *env, const char *msg) { +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_exception.h b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_exception.h +index f33f993e4..b5b328fd3 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_exception.h ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_exception.h +@@ -43,6 +43,8 @@ void KAE_ThrowRuntimeException(JNIEnv* env, const char* msg); + + void KAE_ThrowBadPaddingException(JNIEnv* env, const char* msg); + ++void KAE_ThrowExceptionInInitializerError(JNIEnv* env, const char* msg); ++ + /* Throw InvalidKeyException */ + void KAE_ThrowInvalidKeyException(JNIEnv* env, const char* msg); + +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_hmac.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_hmac.c +index ad7b86ecd..440219321 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_hmac.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_hmac.c +@@ -27,6 +27,7 @@ + #include "kae_exception.h" + #include "kae_log.h" + #include "kae_util.h" ++#include "ssl_utils.h" + + static const EVP_MD* EVPGetDigestByName(JNIEnv* env, const char* algo) + { +@@ -38,17 +39,17 @@ static const EVP_MD* EVPGetDigestByName(JNIEnv* env, const char* algo) + static const EVP_MD* sha512 = NULL; + + if (strcasecmp(algo, "md5") == 0) { +- return md5 == NULL ? md5 = EVP_get_digestbyname(algo) : md5; ++ return md5 == NULL ? md5 = SSL_UTILS_EVP_get_digestbyname(algo) : md5; + } else if (strcasecmp(algo, "sha1") == 0) { +- return sha1 == NULL ? sha1 = EVP_get_digestbyname(algo) : sha1; ++ return sha1 == NULL ? sha1 = SSL_UTILS_EVP_get_digestbyname(algo) : sha1; + } else if (strcasecmp(algo, "sha224") == 0) { +- return sha224 == NULL ? sha224 = EVP_get_digestbyname(algo) : sha224; ++ return sha224 == NULL ? sha224 = SSL_UTILS_EVP_get_digestbyname(algo) : sha224; + } else if (strcasecmp(algo, "sha256") == 0) { +- return sha256 == NULL ? sha256 = EVP_get_digestbyname(algo) : sha256; ++ return sha256 == NULL ? sha256 = SSL_UTILS_EVP_get_digestbyname(algo) : sha256; + } else if (strcasecmp(algo, "sha384") == 0) { +- return sha384 == NULL ? sha384 = EVP_get_digestbyname(algo) : sha384; ++ return sha384 == NULL ? sha384 = SSL_UTILS_EVP_get_digestbyname(algo) : sha384; + } else if (strcasecmp(algo, "sha512") == 0) { +- return sha512 == NULL ? sha512 = EVP_get_digestbyname(algo) : sha512; ++ return sha512 == NULL ? sha512 = SSL_UTILS_EVP_get_digestbyname(algo) : sha512; + } else { + KAE_ThrowRuntimeException(env, "EVPGetDigestByName error"); + return 0; +@@ -96,14 +97,14 @@ JNIEXPORT jlong JNICALL Java_org_openeuler_security_openssl_KAEHMac_nativeInit + (*env)->GetByteArrayRegion(env, key, 0, key_len, key_buffer); + + // create a hmac context +- ctx = HMAC_CTX_new(); ++ ctx = SSL_UTILS_HMAC_CTX_new(); + if (ctx == NULL) { + KAE_ThrowRuntimeException(env, "Hmac_CTX_new invoked failed"); + goto cleanup; + } + + // init hmac context with sc_key and evp_md +- int result_code = HMAC_Init_ex(ctx, key_buffer, key_len, md, kaeEngine); ++ int result_code = SSL_UTILS_HMAC_Init_ex(ctx, key_buffer, key_len, md, kaeEngine); + if (result_code == 0) { + KAE_ThrowRuntimeException(env, "Hmac_Init_ex invoked failed"); + goto cleanup; +@@ -113,7 +114,7 @@ JNIEXPORT jlong JNICALL Java_org_openeuler_security_openssl_KAEHMac_nativeInit + + cleanup: + free(key_buffer); +- HMAC_CTX_free(ctx); ++ SSL_UTILS_HMAC_CTX_free(ctx); + return 0; + } + +@@ -146,7 +147,7 @@ JNIEXPORT void JNICALL Java_org_openeuler_security_openssl_KAEHMac_nativeUpdate + return; + } + (*env)->GetByteArrayRegion(env, input, in_offset, in_len, buffer); +- if (!HMAC_Update(ctx, (unsigned char*) buffer, in_len)) { ++ if (!SSL_UTILS_HMAC_Update(ctx, (unsigned char*) buffer, in_len)) { + KAE_ThrowRuntimeException(env, "Hmac_Update invoked failed"); + } + free(buffer); +@@ -179,7 +180,7 @@ JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAEHMac_nativeFinal + } + // do final + unsigned int bytesWritten = 0; +- int result_code = HMAC_Final(ctx, (unsigned char*) temp_result, &bytesWritten); ++ int result_code = SSL_UTILS_HMAC_Final(ctx, (unsigned char*) temp_result, &bytesWritten); + if (result_code == 0) { + KAE_ThrowRuntimeException(env, "Hmac_Final invoked failed"); + goto cleanup; +@@ -203,6 +204,6 @@ JNIEXPORT void JNICALL Java_org_openeuler_security_openssl_KAEHMac_nativeFree + (JNIEnv* env, jclass cls, jlong hmac_ctx) { + HMAC_CTX* ctx = (HMAC_CTX*) hmac_ctx; + if (ctx != NULL) { +- HMAC_CTX_free(ctx); ++ SSL_UTILS_HMAC_CTX_free(ctx); + } + } +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keyagreement_dh.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keyagreement_dh.c +index e9dfa4094..f9e50fb85 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keyagreement_dh.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keyagreement_dh.c +@@ -30,6 +30,7 @@ + #include "kae_util.h" + #include "kae_exception.h" + #include "kae_log.h" ++#include "ssl_utils.h" + #include "org_openeuler_security_openssl_KAEDHKeyAgreement.h" + + +@@ -63,7 +64,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAEDHKeyAgreeme + } + memset(secret, 0, pSizeInByte); + +- if ((dh = DH_new_method(kaeEngine)) == NULL) { ++ if ((dh = SSL_UTILS_DH_new_method(kaeEngine)) == NULL) { + KAE_ThrowOOMException(env, "Allocate DH failed in nativeComputeKey."); + goto cleanup; + } +@@ -88,29 +89,29 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAEDHKeyAgreeme + goto cleanup; + } + +- if ((computeKeyRetBn = BN_new()) == NULL) { ++ if ((computeKeyRetBn = SSL_UTILS_BN_new()) == NULL) { + KAE_ThrowOOMException(env, "Allocate BN failed."); + goto cleanup; + } + +- if (!DH_set0_pqg(dh, BN_dup(p_bn), NULL, BN_dup(g_bn))) { ++ if (!SSL_UTILS_DH_set0_pqg(dh, SSL_UTILS_BN_dup(p_bn), NULL, SSL_UTILS_BN_dup(g_bn))) { + KAE_ThrowRuntimeException(env, "DH_set0_pqg failed."); + goto cleanup; + } + +- if (!DH_set0_key(dh, NULL, BN_dup(x_bn))) { ++ if (!SSL_UTILS_DH_set0_key(dh, NULL, SSL_UTILS_BN_dup(x_bn))) { + KAE_ThrowRuntimeException(env, "DH_set0_key failed."); + goto cleanup; + } + +- computekeyLength = DH_compute_key(secret, y_bn, dh); ++ computekeyLength = SSL_UTILS_DH_compute_key(secret, y_bn, dh); + + if (computekeyLength <= 0 ) { + KAE_ThrowRuntimeException(env, "DH_compute_key failed."); + goto cleanup; + } + +- BN_bin2bn(secret, computekeyLength, computeKeyRetBn); ++ SSL_UTILS_BN_bin2bn(secret, computekeyLength, computeKeyRetBn); + + retByteArray = KAE_GetByteArrayFromBigNum(env, computeKeyRetBn); + if (retByteArray == NULL) { +@@ -121,7 +122,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAEDHKeyAgreeme + + cleanup: + if (dh != NULL) +- DH_free(dh); ++ SSL_UTILS_DH_free(dh); + if (y_bn != NULL) + KAE_ReleaseBigNumFromByteArray(y_bn); + if (x_bn != NULL) +@@ -135,7 +136,7 @@ cleanup: + free(secret); + } + if (computeKeyRetBn != NULL) +- BN_free(computeKeyRetBn); ++ SSL_UTILS_BN_free(computeKeyRetBn); + + return retByteArray; + } +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keyagreement_ecdh.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keyagreement_ecdh.c +index b0877a25e..49b47c3e8 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keyagreement_ecdh.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keyagreement_ecdh.c +@@ -27,6 +27,7 @@ + #include "kae_log.h" + #include "kae_exception.h" + #include "kae_util.h" ++#include "ssl_utils.h" + #include "org_openeuler_security_openssl_KAEECDHKeyAgreement.h" + + static void FreeGenerateSecretParam(BIGNUM* s, BIGNUM* wX, BIGNUM* wY, +@@ -36,13 +37,13 @@ static void FreeGenerateSecretParam(BIGNUM* s, BIGNUM* wX, BIGNUM* wY, + KAE_ReleaseBigNumFromByteArray(wX); + KAE_ReleaseBigNumFromByteArray(wY); + if (pub != NULL) { +- EC_POINT_free(pub); ++ SSL_UTILS_EC_POINT_free(pub); + } + if (eckey != NULL) { +- EC_KEY_free(eckey); ++ SSL_UTILS_EC_KEY_free(eckey); + } + if (group != NULL) { +- EC_GROUP_free(group); ++ SSL_UTILS_EC_GROUP_free(group); + } + if (shareKey != NULL) { + memset(shareKey, 0, shareKeyLen); +@@ -67,13 +68,13 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAEECDHKeyAgree + jbyteArray javaBytes = NULL; + unsigned char* shareKey = NULL; + const char *curve = (*env)->GetStringUTFChars(env, curveName, 0); +- int nid = OBJ_sn2nid(curve); ++ int nid = SSL_UTILS_OBJ_sn2nid(curve); + (*env)->ReleaseStringUTFChars(env, curveName, curve); + + // Initialization of secret key. + int expectSecretLen = 0; + +- if ((nid == NID_undef) || (group = EC_GROUP_new_by_curve_name(nid)) == NULL) { ++ if ((nid == NID_undef) || (group = SSL_UTILS_EC_GROUP_new_by_curve_name(nid)) == NULL) { + goto cleanup; + } + if ((s = KAE_GetBigNumFromByteArray(env, sArr)) == NULL || (wX = KAE_GetBigNumFromByteArray(env, wXArr)) == NULL +@@ -81,21 +82,21 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAEECDHKeyAgree + KAE_ThrowOOMException(env, "failed to allocate BN_new"); + goto cleanup; + } +- if ((eckey = EC_KEY_new()) == NULL || !EC_KEY_set_group(eckey, group)) { ++ if ((eckey = SSL_UTILS_EC_KEY_new()) == NULL || !SSL_UTILS_EC_KEY_set_group(eckey, group)) { + goto cleanup; + } +- if ((pub = EC_POINT_new(group)) == NULL) { ++ if ((pub = SSL_UTILS_EC_POINT_new(group)) == NULL) { + goto cleanup; + } +- if (!EC_POINT_set_affine_coordinates_GFp(group, pub, wX, wY, NULL)) { ++ if (!SSL_UTILS_EC_POINT_set_affine_coordinates_GFp(group, pub, wX, wY, NULL)) { + goto cleanup; + } +- if (!EC_KEY_set_public_key(eckey, pub) || !EC_KEY_set_private_key(eckey, s)) { ++ if (!SSL_UTILS_EC_KEY_set_public_key(eckey, pub) || !SSL_UTILS_EC_KEY_set_private_key(eckey, s)) { + goto cleanup; + } + + // Get the length of secret key, in bytes. +- expectSecretLen = (EC_GROUP_get_degree(group) + 7) / 8; ++ expectSecretLen = (SSL_UTILS_EC_GROUP_get_degree(group) + 7) / 8; + + if ((shareKey = malloc(expectSecretLen)) == NULL) { + KAE_ThrowOOMException(env, "malloc error"); +@@ -104,7 +105,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAEECDHKeyAgree + memset(shareKey, 0, expectSecretLen); + + // Perform ecdh keyagreement. +- if (ECDH_compute_key(shareKey, expectSecretLen, pub, eckey, NULL) != expectSecretLen) { ++ if (SSL_UTILS_ECDH_compute_key(shareKey, expectSecretLen, pub, eckey, NULL) != expectSecretLen) { + goto cleanup; + } + +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_dh.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_dh.c +index 6315cc6ee..0c7faba86 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_dh.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_dh.c +@@ -28,6 +28,7 @@ + #include + #include "kae_util.h" + #include "kae_log.h" ++#include "ssl_utils.h" + #include "org_openeuler_security_openssl_KAEDHKeyPairGenerator.h" + #include "kae_exception.h" + +@@ -55,7 +56,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_openeuler_security_openssl_KAEDHKeyPairG + + KAE_TRACE("Java_org_openeuler_security_openssl_KAEDHKeyPairGenerator_nativeGenerateKeyPair start !"); + +- if ((dh = DH_new_method(kaeEngine)) == NULL) { ++ if ((dh = SSL_UTILS_DH_new_method(kaeEngine)) == NULL) { + KAE_ThrowOOMException(env, "Allocate DH failed in nativeGenerateKeyPair!"); + goto cleanup; + } +@@ -70,15 +71,15 @@ JNIEXPORT jobjectArray JNICALL Java_org_openeuler_security_openssl_KAEDHKeyPairG + goto cleanup; + } + +- if (!DH_set0_pqg(dh, BN_dup(p_bn), NULL, BN_dup(g_bn))) { ++ if (!SSL_UTILS_DH_set0_pqg(dh, SSL_UTILS_BN_dup(p_bn), NULL, SSL_UTILS_BN_dup(g_bn))) { + KAE_ThrowRuntimeException(env, "DH_set0_pqg failed in nativeGenerateKeyPair."); + goto cleanup; + } + + // Return value is fixed to 1, nothing to check. +- DH_set_length(dh, lSize); ++ SSL_UTILS_DH_set_length(dh, lSize); + +- if (!DH_generate_key(dh)) { ++ if (!SSL_UTILS_DH_generate_key(dh)) { + KAE_ThrowInvalidAlgorithmParameterException(env, "DH generate key failed in nativeGenerateKeyPair."); + goto cleanup; + } +@@ -94,8 +95,8 @@ JNIEXPORT jobjectArray JNICALL Java_org_openeuler_security_openssl_KAEDHKeyPairG + } + + // Return the ptr of private key in dh. +- pri_key_bn = DH_get0_priv_key(dh); +- pub_key_bn = DH_get0_pub_key(dh); ++ pri_key_bn = SSL_UTILS_DH_get0_priv_key(dh); ++ pub_key_bn = SSL_UTILS_DH_get0_pub_key(dh); + + pub_key = KAE_GetByteArrayFromBigNum(env, pub_key_bn); + if (pub_key == NULL) { +@@ -116,7 +117,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_openeuler_security_openssl_KAEDHKeyPairG + + cleanup: + if (dh != NULL) +- DH_free(dh); ++ SSL_UTILS_DH_free(dh); + if (p_bn != NULL) + KAE_ReleaseBigNumFromByteArray(p_bn); + if (g_bn != NULL) +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_ec.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_ec.c +index 0449f8a26..63928444b 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_ec.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_ec.c +@@ -27,6 +27,7 @@ + #include "kae_util.h" + #include "kae_exception.h" + #include "kae_log.h" ++#include "ssl_utils.h" + #include "org_openeuler_security_openssl_KAEECKeyPairGenerator.h" + + #define KAE_EC_PARAM_NUM_SIZE 7 +@@ -54,13 +55,13 @@ static void FreeECDHCurveParam(JNIEnv* env, BIGNUM* p, BIGNUM* a, BIGNUM* b, jby + jbyteArray paramA, jbyteArray paramB) + { + if (p != NULL) { +- BN_free(p); ++ SSL_UTILS_BN_free(p); + } + if (a != NULL) { +- BN_free(a); ++ SSL_UTILS_BN_free(a); + } + if (b != NULL) { +- BN_free(b); ++ SSL_UTILS_BN_free(b); + } + if (paramP != NULL) { + (*env)->DeleteLocalRef(env, paramP); +@@ -82,11 +83,11 @@ static bool SetECDHCurve(JNIEnv* env, EC_GROUP* group, jobjectArray params) + jbyteArray paramP = NULL; + jbyteArray paramA = NULL; + jbyteArray paramB = NULL; +- if ((p = BN_new()) == NULL || (a = BN_new()) == NULL || (b = BN_new()) == NULL) { ++ if ((p = SSL_UTILS_BN_new()) == NULL || (a = SSL_UTILS_BN_new()) == NULL || (b = SSL_UTILS_BN_new()) == NULL) { + KAE_ThrowOOMException(env, "failed to allocate BN_new"); + goto cleanup; + } +- if (!EC_GROUP_get_curve_GFp(group, p, a, b, NULL)) { ++ if (!SSL_UTILS_EC_GROUP_get_curve_GFp(group, p, a, b, NULL)) { + goto cleanup; + } + +@@ -123,15 +124,15 @@ static bool SetECDHPoint(JNIEnv* env, EC_GROUP* group, jobjectArray params) + const EC_POINT* generator = NULL; + jbyteArray paramX = NULL; + jbyteArray paramY = NULL; +- if ((x = BN_new()) == NULL || (y = BN_new()) == NULL) { ++ if ((x = SSL_UTILS_BN_new()) == NULL || (y = SSL_UTILS_BN_new()) == NULL) { + KAE_ThrowOOMException(env, "failed to allocate BN_new"); + goto cleanup; + } +- if ((generator = EC_GROUP_get0_generator(group)) == NULL) { ++ if ((generator = SSL_UTILS_EC_GROUP_get0_generator(group)) == NULL) { + KAE_ThrowOOMException(env, "failed to allocate ec generator"); + goto cleanup; + } +- if (!EC_POINT_get_affine_coordinates_GFp(group, generator, x, y, NULL)) { ++ if (!SSL_UTILS_EC_POINT_get_affine_coordinates_GFp(group, generator, x, y, NULL)) { + KAE_ThrowFromOpenssl(env, "EC_POINT_set_affine_coordinates_GFp", KAE_ThrowRuntimeException); + goto cleanup; + } +@@ -147,18 +148,18 @@ static bool SetECDHPoint(JNIEnv* env, EC_GROUP* group, jobjectArray params) + goto cleanup; + } + (*env)->SetObjectArrayElement(env, params, ecdhY, paramY); +- BN_free(x); +- BN_free(y); ++ SSL_UTILS_BN_free(x); ++ SSL_UTILS_BN_free(y); + (*env)->DeleteLocalRef(env, paramX); + (*env)->DeleteLocalRef(env, paramY); + return true; + + cleanup: + if (x != NULL) { +- BN_free(x); ++ SSL_UTILS_BN_free(x); + } + if (y != NULL) { +- BN_free(y); ++ SSL_UTILS_BN_free(y); + } + if (paramX != NULL) { + (*env)->DeleteLocalRef(env, paramX); +@@ -176,10 +177,10 @@ static bool SetECDHOrderAndCofactor(JNIEnv* env, EC_GROUP* group, jobjectArray p + BIGNUM* cofactor = NULL; + jbyteArray paramOrder = NULL; + jbyteArray paramCofactor = NULL; +- if ((order = BN_new()) == NULL || (cofactor = BN_new()) == NULL) { ++ if ((order = SSL_UTILS_BN_new()) == NULL || (cofactor = SSL_UTILS_BN_new()) == NULL) { + goto cleanup; + } +- if (!EC_GROUP_get_order(group, order, NULL)) { ++ if (!SSL_UTILS_EC_GROUP_get_order(group, order, NULL)) { + goto cleanup; + } + +@@ -188,7 +189,7 @@ static bool SetECDHOrderAndCofactor(JNIEnv* env, EC_GROUP* group, jobjectArray p + goto cleanup; + } + (*env)->SetObjectArrayElement(env, params, ecdhOrder, paramOrder); +- if (!EC_GROUP_get_cofactor(group, cofactor, NULL)) { ++ if (!SSL_UTILS_EC_GROUP_get_cofactor(group, cofactor, NULL)) { + goto cleanup; + } + +@@ -197,18 +198,18 @@ static bool SetECDHOrderAndCofactor(JNIEnv* env, EC_GROUP* group, jobjectArray p + goto cleanup; + } + (*env)->SetObjectArrayElement(env, params, ecdhCofactor, paramCofactor); +- BN_free(order); +- BN_free(cofactor); ++ SSL_UTILS_BN_free(order); ++ SSL_UTILS_BN_free(cofactor); + (*env)->DeleteLocalRef(env, paramOrder); + (*env)->DeleteLocalRef(env, paramCofactor); + return true; + + cleanup: + if (order != NULL) { +- BN_free(order); ++ SSL_UTILS_BN_free(order); + } + if (cofactor != NULL) { +- BN_free(cofactor); ++ SSL_UTILS_BN_free(cofactor); + } + if (paramOrder != NULL) { + (*env)->DeleteLocalRef(env, paramOrder); +@@ -223,10 +224,10 @@ static void FreeECDHKeyParam(JNIEnv* env, + BIGNUM* wX, BIGNUM* wY, jbyteArray keyWX, jbyteArray keyWY, jbyteArray keyS) + { + if (wX != NULL) { +- BN_free(wX); ++ SSL_UTILS_BN_free(wX); + } + if (wY != NULL) { +- BN_free(wY); ++ SSL_UTILS_BN_free(wY); + } + if (keyWX != NULL) { + (*env)->DeleteLocalRef(env, keyWX); +@@ -250,16 +251,16 @@ static bool SetECDHKey(JNIEnv* env, const EC_GROUP* group, jobjectArray params, + jbyteArray keyWX = NULL; + jbyteArray keyWY = NULL; + jbyteArray keyS = NULL; +- if ((wX = BN_new()) == NULL || (wY = BN_new()) == NULL) { ++ if ((wX = SSL_UTILS_BN_new()) == NULL || (wY = SSL_UTILS_BN_new()) == NULL) { + KAE_ThrowOOMException(env, "failed to allocate array"); + goto cleanup; + } + +- if ((pub = EC_KEY_get0_public_key(eckey)) == NULL || +- !EC_POINT_get_affine_coordinates_GFp(group, pub, wX, wY, NULL)) { ++ if ((pub = SSL_UTILS_EC_KEY_get0_public_key(eckey)) == NULL || ++ !SSL_UTILS_EC_POINT_get_affine_coordinates_GFp(group, pub, wX, wY, NULL)) { + goto cleanup; + } +- if ((s = EC_KEY_get0_private_key(eckey)) == NULL) { ++ if ((s = SSL_UTILS_EC_KEY_get0_private_key(eckey)) == NULL) { + goto cleanup; + } + +@@ -374,42 +375,42 @@ static EC_GROUP* GetGroupByParam(JNIEnv* env, jbyteArray pArr, jbyteArray aArr, + EC_POINT* generator = NULL; + if ((p = KAE_GetBigNumFromByteArray(env, pArr)) == NULL || (a = KAE_GetBigNumFromByteArray(env, aArr)) == NULL || + (b = KAE_GetBigNumFromByteArray(env, bArr)) == NULL || (x = KAE_GetBigNumFromByteArray(env, xArr)) == NULL || +- (y = KAE_GetBigNumFromByteArray(env, yArr)) == NULL || (cofactor = BN_new()) == NULL || +- (order = KAE_GetBigNumFromByteArray(env, orderArr)) == NULL || !BN_set_word(cofactor, cofactorInt)) { ++ (y = KAE_GetBigNumFromByteArray(env, yArr)) == NULL || (cofactor = SSL_UTILS_BN_new()) == NULL || ++ (order = KAE_GetBigNumFromByteArray(env, orderArr)) == NULL || !SSL_UTILS_BN_set_word(cofactor, cofactorInt)) { + goto cleanup; + } + + // Create the curve. +- if ((ctx = BN_CTX_new()) == NULL || (group = EC_GROUP_new_curve_GFp(p, a, b, ctx)) == NULL) { ++ if ((ctx = SSL_UTILS_BN_CTX_new()) == NULL || (group = SSL_UTILS_EC_GROUP_new_curve_GFp(p, a, b, ctx)) == NULL) { + goto cleanup; + } + + // Create the generator and set x, y. +- if ((generator = EC_POINT_new(group)) == NULL || +- !EC_POINT_set_affine_coordinates_GFp(group, generator, x, y, ctx)) { ++ if ((generator = SSL_UTILS_EC_POINT_new(group)) == NULL || ++ !SSL_UTILS_EC_POINT_set_affine_coordinates_GFp(group, generator, x, y, ctx)) { + goto cleanup; + } + + // Set the generator, order and cofactor. +- if (!EC_GROUP_set_generator(group, generator, order, cofactor)) { ++ if (!SSL_UTILS_EC_GROUP_set_generator(group, generator, order, cofactor)) { + goto cleanup; + } + + FreeECDHParam(p, a, b, x, y, order, cofactor); +- EC_POINT_free(generator); +- BN_CTX_free(ctx); ++ SSL_UTILS_EC_POINT_free(generator); ++ SSL_UTILS_BN_CTX_free(ctx); + return group; + + cleanup: + FreeECDHParam(p, a, b, x, y, order, cofactor); + if (group != NULL) { +- EC_GROUP_free(group); ++ SSL_UTILS_EC_GROUP_free(group); + } + if (generator != NULL) { +- EC_POINT_free(generator); ++ SSL_UTILS_EC_POINT_free(generator); + } + if (ctx != NULL) { +- BN_CTX_free(ctx); ++ SSL_UTILS_BN_CTX_free(ctx); + } + return NULL; + } +@@ -428,26 +429,26 @@ JNIEXPORT jobjectArray JNICALL Java_org_openeuler_security_openssl_KAEECKeyPairG + + const char *curve = (*env)->GetStringUTFChars(env, curveName, 0); + KAE_TRACE("KAEECKeyPairGenerator_nativeGenerateParam(curveName = %s)", curve); +- int nid = OBJ_sn2nid(curve); ++ int nid = SSL_UTILS_OBJ_sn2nid(curve); + (*env)->ReleaseStringUTFChars(env, curveName, curve); + if (nid == NID_undef) { + goto cleanup; + } + // Construct a builtin curve. +- if ((group = EC_GROUP_new_by_curve_name(nid)) == NULL) { ++ if ((group = SSL_UTILS_EC_GROUP_new_by_curve_name(nid)) == NULL) { + goto cleanup; + } + ecdhParam = NewECDHParam(env, group); + + if (group != NULL) { +- EC_GROUP_free(group); ++ SSL_UTILS_EC_GROUP_free(group); + } + KAE_TRACE("KAEECKeyPairGenerator_nativeGenerateParam success, ecdhParam = %p", ecdhParam); + return ecdhParam; + + cleanup: + if (group != NULL) { +- EC_GROUP_free(group); ++ SSL_UTILS_EC_GROUP_free(group); + } + if (ecdhParam != NULL) { + (*env)->DeleteLocalRef(env, ecdhParam); +@@ -471,32 +472,32 @@ JNIEXPORT jobjectArray JNICALL Java_org_openeuler_security_openssl_KAEECKeyPairG + if ((group = GetGroupByParam(env, pArr, aArr, bArr, xArr, yArr, orderArr, cofactorInt)) == NULL) { + goto cleanup; + } +- if ((eckey = EC_KEY_new()) == NULL) { ++ if ((eckey = SSL_UTILS_EC_KEY_new()) == NULL) { + goto cleanup; + } +- if (!EC_KEY_set_group(eckey, group)) { ++ if (!SSL_UTILS_EC_KEY_set_group(eckey, group)) { + goto cleanup; + } + // Generates a new public and private key for the supplied eckey object. + // Refer to {@link https://www.openssl.org/docs/man1.1.0/man3/EC_KEY_generate_key.html} for details. +- if (!EC_KEY_generate_key(eckey)) { ++ if (!SSL_UTILS_EC_KEY_generate_key(eckey)) { + goto cleanup; + } + + ecdhKey = NewECDHKey(env, group, eckey); + +- EC_KEY_free(eckey); +- EC_GROUP_free(group); ++ SSL_UTILS_EC_KEY_free(eckey); ++ SSL_UTILS_EC_GROUP_free(group); + + KAE_TRACE("KAEECKeyPairGenerator_nativeGenerateKeyPair success, ecdhKey = %p", ecdhKey); + return ecdhKey; + + cleanup: + if (eckey != NULL) { +- EC_KEY_free(eckey); ++ SSL_UTILS_EC_KEY_free(eckey); + } + if (group != NULL) { +- EC_GROUP_free(group); ++ SSL_UTILS_EC_GROUP_free(group); + } + if (ecdhKey != NULL) { + (*env)->DeleteLocalRef(env, ecdhKey); +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_rsa.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_rsa.c +index 84c2ed109..a553d8793 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_rsa.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_keypairgenerator_rsa.c +@@ -26,6 +26,7 @@ + #include "kae_log.h" + #include "kae_util.h" + #include "kae_exception.h" ++#include "ssl_utils.h" + #include "org_openeuler_security_openssl_KAERSAKeyPairGenerator.h" + #define KAE_RSA_PARAM_SIZE 8 + +@@ -46,14 +47,14 @@ static const char* rsaParamNames[] = {"n", "e", "d", "p", "q", "dmp1", "dmq1", " + + // rsa get rsa param function list + static const BIGNUM* (* GetRSAParamFunctionList[])(const RSA*) = { +- RSA_get0_n, +- RSA_get0_e, +- RSA_get0_d, +- RSA_get0_p, +- RSA_get0_q, +- RSA_get0_dmp1, +- RSA_get0_dmq1, +- RSA_get0_iqmp ++ SSL_UTILS_RSA_get0_n, ++ SSL_UTILS_RSA_get0_e, ++ SSL_UTILS_RSA_get0_d, ++ SSL_UTILS_RSA_get0_p, ++ SSL_UTILS_RSA_get0_q, ++ SSL_UTILS_RSA_get0_dmp1, ++ SSL_UTILS_RSA_get0_dmq1, ++ SSL_UTILS_RSA_get0_iqmp + }; + + /* +@@ -67,7 +68,7 @@ static RSA* NewRSA(JNIEnv* env, jint keySize, jbyteArray publicExponent) { + KAE_TRACE("NewRSA: kaeEngine => %p", kaeEngine); + + // new rsa +- RSA* rsa = RSA_new_method(kaeEngine); ++ RSA* rsa = SSL_UTILS_RSA_new_method(kaeEngine); + if (rsa == NULL) { + KAE_ThrowFromOpenssl(env, "RSA_new_method", KAE_ThrowRuntimeException); + return NULL; +@@ -80,10 +81,10 @@ static RSA* NewRSA(JNIEnv* env, jint keySize, jbyteArray publicExponent) { + } + + // generate rsa key +- int result_code = RSA_generate_key_ex(rsa, keySize, exponent, NULL); ++ int result_code = SSL_UTILS_RSA_generate_key_ex(rsa, keySize, exponent, NULL); + KAE_ReleaseBigNumFromByteArray(exponent); + if (result_code <= 0) { +- RSA_free(rsa); ++ SSL_UTILS_RSA_free(rsa); + KAE_ThrowFromOpenssl(env, "RSA_generate_key_ex", KAE_ThrowRuntimeException); + return NULL; + } +@@ -95,7 +96,7 @@ static RSA* NewRSA(JNIEnv* env, jint keySize, jbyteArray publicExponent) { + */ + static void ReleaseRSA(RSA* rsa) { + if (rsa != NULL) { +- RSA_free(rsa); ++ SSL_UTILS_RSA_free(rsa); + } + } + +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_provider.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_provider.c +index f4f71005a..ea609bd9b 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_provider.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_provider.c +@@ -27,69 +27,50 @@ + #include + #include "kae_exception.h" + #include "kae_util.h" ++#include "kae_log.h" ++#include "ssl_utils.h" + #include "org_openeuler_security_openssl_KAEProvider.h" + +-#define KAE_OPENSSL_LIBRARY "libcrypto.so" +- + /* + * Class: Java_org_openeuler_security_openssl_KAEProvider + * Method: initOpenssl + * Signature: ()V + */ +-JNIEXPORT void JNICALL Java_org_openeuler_security_openssl_KAEProvider_initOpenssl +- (JNIEnv *env, jclass cls, jboolean useGlobalMode, jstring engineId, jbooleanArray algorithmKaeFlags) { +- SSL_load_error_strings(); +- ERR_load_BIO_strings(); +- OpenSSL_add_all_algorithms(); +- +- /* +- * If the same shared object is opened again with dlopen(), the same object handle is returned. +- * The dynamic linker maintains reference counts for object handles. +- * An object that was previously opened with RTLD_LOCAL can be promoted to RTLD_GLOBAL in a subsequent dlopen(). +- * +- * RTLD_GLOBAL +- * The symbols defined by this shared object will be made +- * available for symbol resolution of subsequently loaded +- * shared objects. +- * RTLD_LOCAL +- * This is the converse of RTLD_GLOBAL, and the default if +- * neither flag is specified. Symbols defined in this shared +- * object are not made available to resolve references in +- * subsequently loaded shared objects. +- * For more information see https://man7.org/linux/man-pages/man3/dlopen.3.html. +- */ +- if (useGlobalMode) { +- char msg[1024]; +- void *handle = NULL; +- // Promote the flags of the loaded libcrypto.so library from RTLD_LOCAL to RTLD_GLOBAL +- handle = dlopen(KAE_OPENSSL_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); +- if (handle == NULL) { +- snprintf(msg, sizeof(msg), "Cannot load %s (%s)!", KAE_OPENSSL_LIBRARY, dlerror()); +- KAE_ThrowByName(env, "java/lang/UnsatisfiedLinkError", msg); +- return; +- } +- dlclose(handle); ++JNIEXPORT int JNICALL Java_org_openeuler_security_openssl_KAEProvider_initOpenssl ++ (JNIEnv *env, jclass cls, jint useOpensslVersion, jstring engineId, jbooleanArray algorithmKaeFlags) { ++ // Load openssl functions by dlsym(), according to current libssl.so file version. ++ jboolean init_result = SSL_UTILS_func_ptr_init(env, useOpensslVersion); ++ if (!init_result) { ++ return -1; + } + ++ // Change from macro, SSL_load_error_strings is a macro in openssl 1 and 3. ++ SSL_UTILS_SSL_load_error_strings(); ++ SSL_UTILS_ERR_load_BIO_strings(); ++ // Change from macro, OpenSSL_add_all_algorithms ia a macro, defined by OPENSSL_LOAD_CONF value. ++ SSL_UTILS_OpenSSL_add_all_algorithms(); ++ + // check if KaeEngine holder is already set + ENGINE* e = GetKaeEngine(); + if (e != NULL) { +- ENGINE_free(e); ++ SSL_UTILS_ENGINE_free(e); + e = NULL; + } + + // determine whether KAE is loaded successfully + const char* id = (*env)->GetStringUTFChars(env, engineId, 0); +- e = ENGINE_by_id(id); ++ e = SSL_UTILS_ENGINE_by_id(id); + (*env)->ReleaseStringUTFChars(env, engineId, id); + if (e == NULL) { + KAE_ThrowFromOpenssl(env, "ENGINE_by_id", KAE_ThrowRuntimeException); +- return; ++ return -1; + } + SetKaeEngine(e); + + // initialize the engine for each algorithm + initEngines(env, algorithmKaeFlags); ++ ++ return get_sslVersion(); + } + + /* +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_signature_rsa.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_signature_rsa.c +index 496ebc775..6030b7cf7 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_signature_rsa.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_signature_rsa.c +@@ -27,11 +27,12 @@ + #include "kae_log.h" + #include "kae_util.h" + #include "kae_exception.h" ++#include "ssl_utils.h" + + // get EVP_MD by digestName + static const EVP_MD* getEvpMd(JNIEnv* env, jstring digestName) { + const char* digestNameUtf = (*env)->GetStringUTFChars(env, digestName, 0); +- const EVP_MD* md = (EVP_MD*)EVP_get_digestbyname(digestNameUtf); ++ const EVP_MD* md = (EVP_MD*)SSL_UTILS_EVP_get_digestbyname(digestNameUtf); + (*env)->ReleaseStringUTFChars(env, digestName, digestNameUtf); + if (md == NULL) { + KAE_ThrowSignatureException(env, "Unsupported digest algorithm."); +@@ -49,7 +50,7 @@ static void signRelease(JNIEnv* env, jbyteArray digestValue, jbyte* digestBytes, + free(sigBytes); + } + if (pkeyCtx != NULL) { +- EVP_PKEY_CTX_free(pkeyCtx); ++ SSL_UTILS_EVP_PKEY_CTX_free(pkeyCtx); + } + } + +@@ -63,14 +64,14 @@ static void verifyRelease(JNIEnv* env, jbyteArray digestValue, jbyte* digestByte + (*env)->ReleaseByteArrayElements(env, sigValue, sigBytes, 0); + } + if (pkeyCtx != NULL) { +- EVP_PKEY_CTX_free(pkeyCtx); ++ SSL_UTILS_EVP_PKEY_CTX_free(pkeyCtx); + } + } + + // set rsa PkeyCtx parameters + static bool setRsaPkeyCtxParameters(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, jint paddingType, jstring digestName) { + // set rsa padding +- if (EVP_PKEY_CTX_set_rsa_padding(pkeyCtx, paddingType) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set_rsa_padding(pkeyCtx, paddingType) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set_rsa_padding", KAE_ThrowSignatureException); + return false; + } +@@ -81,7 +82,7 @@ static bool setRsaPkeyCtxParameters(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, jint pad + return false; + } + +- if (EVP_PKEY_CTX_set_signature_md(pkeyCtx, md) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set_signature_md(pkeyCtx, md) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set_signature_md", KAE_ThrowSignatureException); + return false; + } +@@ -103,13 +104,13 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAERSASignature + static ENGINE* kaeEngine = NULL; + kaeEngine = (kaeEngine == NULL) ? GetKaeEngine() : kaeEngine; + // new EVP_PKEY_CTX +- if ((pkeyCtx = EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { ++ if ((pkeyCtx = SSL_UTILS_EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_new", KAE_ThrowSignatureException); + goto cleanup; + } + + // sign init +- if (EVP_PKEY_sign_init(pkeyCtx) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_sign_init(pkeyCtx) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_sign_init", KAE_ThrowSignatureException); + goto cleanup; + } +@@ -120,7 +121,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAERSASignature + } + + // sign +- size_t sigLen = (size_t)EVP_PKEY_size(pkey); ++ size_t sigLen = (size_t)SSL_UTILS_EVP_PKEY_size(pkey); + if (sigLen <= 0) { + KAE_ThrowSignatureException(env, "The sigLen size cannot be zero or negative"); + goto cleanup; +@@ -134,7 +135,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAERSASignature + goto cleanup; + } + size_t digestLen = (size_t)(*env)->GetArrayLength(env, digestValue); +- if (EVP_PKEY_sign(pkeyCtx, (unsigned char*)sigBytes, &sigLen, ++ if (SSL_UTILS_EVP_PKEY_sign(pkeyCtx, (unsigned char*)sigBytes, &sigLen, + (const unsigned char*)digestBytes, digestLen) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_sign", KAE_ThrowSignatureException); + goto cleanup; +@@ -167,13 +168,13 @@ JNIEXPORT jboolean JNICALL Java_org_openeuler_security_openssl_KAERSASignatureNa + static ENGINE* kaeEngine = NULL; + kaeEngine = (kaeEngine == NULL) ? GetKaeEngine() : kaeEngine; + // new EVP_PKEY_CTX +- if ((pkeyCtx = EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { ++ if ((pkeyCtx = SSL_UTILS_EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_new", KAE_ThrowSignatureException); + goto cleanup; + } + + // verify init +- if (EVP_PKEY_verify_init(pkeyCtx) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_verify_init(pkeyCtx) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_sign_init", KAE_ThrowSignatureException); + goto cleanup; + } +@@ -194,7 +195,7 @@ JNIEXPORT jboolean JNICALL Java_org_openeuler_security_openssl_KAERSASignatureNa + } + size_t sigLen = (size_t)(*env)->GetArrayLength(env, sigValue); + size_t digestLen = (size_t)(*env)->GetArrayLength(env, digestValue); +- if (EVP_PKEY_verify(pkeyCtx, (const unsigned char*)sigBytes, sigLen, ++ if (SSL_UTILS_EVP_PKEY_verify(pkeyCtx, (const unsigned char*)sigBytes, sigLen, + (const unsigned char*)digestBytes, digestLen) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_verify", KAE_ThrowSignatureException); + goto cleanup; +@@ -210,7 +211,7 @@ cleanup: + static bool setPssPkeyCtxParameters(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, jint paddingType, jstring digestName, + jstring mgf1DigestName, jint saltLen) { + // set rsa padding +- if (EVP_PKEY_CTX_set_rsa_padding(pkeyCtx, paddingType) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set_rsa_padding(pkeyCtx, paddingType) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set_rsa_padding", KAE_ThrowSignatureException); + return false; + } +@@ -220,7 +221,7 @@ static bool setPssPkeyCtxParameters(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, jint pad + if (md == NULL) { + return false; + } +- if (EVP_PKEY_CTX_set_signature_md(pkeyCtx, md) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set_signature_md(pkeyCtx, md) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set_signature_md", KAE_ThrowSignatureException); + return false; + } +@@ -230,13 +231,13 @@ static bool setPssPkeyCtxParameters(JNIEnv* env, EVP_PKEY_CTX* pkeyCtx, jint pad + if (mgf1Md == NULL) { + return false; + } +- if (EVP_PKEY_CTX_set_rsa_mgf1_md(pkeyCtx, mgf1Md) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set_rsa_mgf1_md(pkeyCtx, mgf1Md) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set_rsa_mgf1_md", KAE_ThrowSignatureException); + return false; + } + + // set salt len +- if (EVP_PKEY_CTX_set_rsa_pss_saltlen(pkeyCtx, saltLen) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_CTX_set_rsa_pss_saltlen(pkeyCtx, saltLen) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set_rsa_pss_saltlen", KAE_ThrowSignatureException); + return false; + } +@@ -260,13 +261,13 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAERSASignature + KAE_TRACE("KAERSASignatureNative_pssSign: kaeEngine => %p", kaeEngine); + + // new EVP_PKEY_CTX +- if ((pkeyCtx = EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { ++ if ((pkeyCtx = SSL_UTILS_EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_new", KAE_ThrowSignatureException); + goto cleanup; + } + + // sign init +- if (EVP_PKEY_sign_init(pkeyCtx) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_sign_init(pkeyCtx) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_sign_init", KAE_ThrowSignatureException); + goto cleanup; + } +@@ -277,7 +278,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAERSASignature + } + + // sign +- size_t sigLen = (size_t)EVP_PKEY_size(pkey); ++ size_t sigLen = (size_t)SSL_UTILS_EVP_PKEY_size(pkey); + if (sigLen <= 0) { + KAE_ThrowSignatureException(env, "The sigLen size cannot be zero or negative"); + goto cleanup; +@@ -291,7 +292,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAERSASignature + goto cleanup; + } + size_t digestLen = (size_t)(*env)->GetArrayLength(env, digestValue); +- if (EVP_PKEY_sign(pkeyCtx, (unsigned char*)sigBytes, &sigLen, ++ if (SSL_UTILS_EVP_PKEY_sign(pkeyCtx, (unsigned char*)sigBytes, &sigLen, + (const unsigned char*)digestBytes, digestLen) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_sign", KAE_ThrowSignatureException); + goto cleanup; +@@ -326,13 +327,13 @@ JNIEXPORT jboolean JNICALL Java_org_openeuler_security_openssl_KAERSASignatureNa + KAE_TRACE("KAERSASignatureNative_pssVerify: kaeEngine => %p", kaeEngine); + + // new EVP_PKEY_CTX +- if ((pkeyCtx = EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { ++ if ((pkeyCtx = SSL_UTILS_EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_new", KAE_ThrowSignatureException); + goto cleanup; + } + + // verify init +- if (EVP_PKEY_verify_init(pkeyCtx) <= 0) { ++ if (SSL_UTILS_EVP_PKEY_verify_init(pkeyCtx) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_sign_init", KAE_ThrowSignatureException); + goto cleanup; + } +@@ -353,7 +354,7 @@ JNIEXPORT jboolean JNICALL Java_org_openeuler_security_openssl_KAERSASignatureNa + } + size_t sigLen = (size_t)(*env)->GetArrayLength(env, sigValue); + size_t digestLen = (size_t)(*env)->GetArrayLength(env, digestValue); +- if (EVP_PKEY_verify(pkeyCtx, (const unsigned char*)sigBytes, sigLen, ++ if (SSL_UTILS_EVP_PKEY_verify(pkeyCtx, (const unsigned char*)sigBytes, sigLen, + (const unsigned char*)digestBytes, digestLen) <= 0) { + KAE_ThrowFromOpenssl(env, "EVP_PKEY_verify", KAE_ThrowSignatureException); + goto cleanup; +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_signature_sm2.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_signature_sm2.c +new file mode 100644 +index 000000000..9025911bf +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_signature_sm2.c +@@ -0,0 +1,288 @@ ++/* ++ * Copyright (c) 2024, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "org_openeuler_security_openssl_KAESM2Signature.h" ++#include "kae_util.h" ++#include "kae_log.h" ++#include "kae_exception.h" ++#include "ssl_utils.h" ++#include ++#include ++#include ++#include ++ ++static const EVP_MD* GetEVP_MDByName(JNIEnv *env, const char* algo) ++{ ++ static const EVP_MD* sm3 = NULL; ++ ++ if (strcasecmp(algo, "SM3") == 0) { ++ return sm3 == NULL ? sm3 = SSL_UTILS_EVP_sm3() : sm3; ++ } else { ++ KAE_ThrowRuntimeException(env, "GetEVP_MDByName error"); ++ return NULL; ++ } ++} ++ ++/* ++* Class: org_openeuler_security_openssl_KAESM2Signature ++* Method: nativeClone ++* Signature: (J)J ++*/ ++JNIEXPORT jlong JNICALL ++Java_org_openeuler_security_openssl_KAESM2Signature_nativeClone(JNIEnv *env, jclass cls, jlong ctxAddress) ++{ ++ EVP_MD_CTX* ctx = (EVP_MD_CTX*) ctxAddress; ++ KAE_TRACE("KAESM2Signature_nativeClone: ctx = %p", ctx); ++ if (ctx == NULL) { ++ return 0; ++ } ++ ++ // EVP_MD_CTX_create is macro in openssl 1 and 3 ++ EVP_MD_CTX* ctxCopy = SSL_UTILS_EVP_MD_CTX_create(); ++ ++ if (ctxCopy == NULL) { ++ KAE_ThrowOOMException(env, "create EVP_MD_CTX fail"); ++ return 0; ++ } ++ KAE_TRACE("KAESM2Signature_nativeClone: create ctxCopy => %p", ctxCopy); ++ ++ int result_code = SSL_UTILS_EVP_MD_CTX_copy_ex(ctxCopy, ctx); ++ if (result_code == 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_MD_CTX_copy_ex failed", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ KAE_TRACE("KAESM2Signature_nativeClone EVP_MD_CTX_copy_ex(ctxCopy = %p, ctx = %p) success", ctxCopy, ctx); ++ KAE_TRACE("KAESM2Signature_nativeClone: finished"); ++ return (jlong) ctxCopy; ++ ++cleanup: ++ if (ctxCopy != NULL) { ++ SSL_UTILS_EVP_MD_CTX_free(ctxCopy); ++ } ++ return 0; ++} ++ ++/* ++ * Class: org_openeuler_security_openssl_KAESM2Signature ++ * Method: nativeFreeSM2Ctx ++ * Signature: (J)V ++ */ ++JNIEXPORT void JNICALL Java_org_openeuler_security_openssl_KAESM2Signature_nativeFreeSM2Ctx(JNIEnv *env, ++ jclass cls, jlong ctxAddress) ++{ ++ if(ctxAddress == 0){ ++ KAE_ThrowInvalidKeyException(env, "nativeFreeSM2Ctx failed. ctxAddress is Invalid"); ++ } ++ EVP_MD_CTX *md_ctx = (EVP_MD_CTX*) ctxAddress; ++ if (md_ctx != NULL) { ++ SSL_UTILS_EVP_MD_CTX_free(md_ctx); ++ } ++ KAE_TRACE("KAESM2Signature_nativeFreeSM2Ctx: finished"); ++} ++ ++/* ++ * Class: org_openeuler_security_openssl_KAESM2Signature ++ * Method: nativeInitSM2Ctx ++ * Signature: (JLjava/lang/String;Z)J ++ */ ++JNIEXPORT jlong JNICALL Java_org_openeuler_security_openssl_KAESM2Signature_nativeInitSM2Ctx(JNIEnv *env, ++ jclass cls, jlong keyAddress, jstring digestName, jstring id, jboolean isSign) ++{ ++ EVP_MD_CTX* md_ctx = NULL; ++ EVP_PKEY_CTX* pctx = NULL; ++ EVP_PKEY* pkey = NULL; ++ pkey = (EVP_PKEY*) keyAddress; ++ ENGINE* kaeEngine = NULL; ++ ++ // init engine ++ kaeEngine = GetEngineByAlgorithmIndex(SM2_INDEX); ++ KAE_TRACE("KAESM2Signature_nativeInitSM2Ctx: kaeEngine => %p", kaeEngine); ++ ++ const char* algo = (*env)->GetStringUTFChars(env, digestName, 0); ++ const char* sm2_id = (*env)->GetStringUTFChars(env, id, 0); ++ ++ // new pkey_ctx ++ if ((pctx = SSL_UTILS_EVP_PKEY_CTX_new(pkey, kaeEngine)) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_new", KAE_ThrowInvalidKeyException); ++ goto cleanup; ++ } ++ ++ // set default_sm_id in pkey_ctx ++ // EVP_PKEY_CTX_set1_id is macro in openssl 1 ++ if (SSL_UTILS_EVP_PKEY_CTX_set1_id(pctx, sm2_id, strlen(sm2_id)) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_PKEY_CTX_set1_id", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // new md_ctx ++ if ((md_ctx = SSL_UTILS_EVP_MD_CTX_new()) == NULL) { ++ KAE_ThrowFromOpenssl(env, "EVP_MD_CTX_new", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ // set pkey_ctx in md_ctx ++ SSL_UTILS_EVP_MD_CTX_set_pkey_ctx(md_ctx, pctx); ++ ++ // init md_ctx ++ if(isSign){ ++ if (SSL_UTILS_EVP_DigestSignInit(md_ctx, NULL, GetEVP_MDByName(env, algo), kaeEngine, pkey) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_DigestSignInit", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ }else { ++ if (SSL_UTILS_EVP_DigestVerifyInit(md_ctx, NULL, GetEVP_MDByName(env, algo), kaeEngine, pkey) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_DigestVerifyInit", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ } ++ (*env)->ReleaseStringUTFChars(env, digestName, algo); ++ (*env)->ReleaseStringUTFChars(env, id, sm2_id); ++ return (jlong)md_ctx; ++cleanup: ++ (*env)->ReleaseStringUTFChars(env, digestName, algo); ++ (*env)->ReleaseStringUTFChars(env, id, sm2_id); ++ if (pctx != NULL) { ++ SSL_UTILS_EVP_PKEY_CTX_free(pctx); ++ } ++ if (md_ctx != NULL) { ++ SSL_UTILS_EVP_MD_CTX_free(md_ctx); ++ } ++ return 0; ++} ++ ++/* ++ * Class: org_openeuler_security_openssl_KAESM2Signature ++ * Method: nativeSM2Update ++ * Signature: (J[BIZ)V ++ */ ++JNIEXPORT void JNICALL Java_org_openeuler_security_openssl_KAESM2Signature_nativeSM2Update(JNIEnv *env, ++ jclass cls, jlong ctxAddress, jbyteArray msgArr, jint msgLen, jboolean isSign) ++{ ++ EVP_MD_CTX* md_ctx = NULL; ++ unsigned char* msg = NULL; ++ md_ctx = (EVP_MD_CTX*) ctxAddress; ++ ++ if ((msg = (unsigned char*)malloc(msgLen)) == NULL) { ++ KAE_ThrowOOMException(env, "malloc error"); ++ goto cleanup; ++ } ++ memset(msg, 0, msgLen); ++ ++ (*env)->GetByteArrayRegion(env, msgArr, 0, msgLen, (jbyte*)msg); ++ ++ if(isSign){ ++ if (SSL_UTILS_EVP_DigestSignUpdate(md_ctx, msg, msgLen) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_DigestSignUpdate", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ }else { ++ if (SSL_UTILS_EVP_DigestVerifyUpdate(md_ctx, msg, msgLen) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_DigestVerifyUpdate", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ } ++ KAE_TRACE("KAESM2Signature_nativeSM2Update: finished"); ++cleanup: ++ if (msg != NULL) { ++ memset(msg, 0, msgLen); ++ free(msg); ++ } ++} ++ ++/* ++ * Class: org_openeuler_security_openssl_KAESM2Signature ++ * Method: nativeSM2SignFinal ++ * Signature: (J)[B ++ */ ++JNIEXPORT jbyteArray JNICALL Java_org_openeuler_security_openssl_KAESM2Signature_nativeSM2SignFinal(JNIEnv *env, ++ jclass cls, jlong ctxAddress) ++{ ++ EVP_MD_CTX* md_ctx = NULL; ++ unsigned char* sig = NULL; ++ size_t sig_len = 0; ++ jbyteArray sigByteArray = NULL; ++ md_ctx = (EVP_MD_CTX*) ctxAddress; ++ ++ // determine the size of the signature ++ if (SSL_UTILS_EVP_DigestSignFinal(md_ctx, NULL, &sig_len) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_DigestSignFinal", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ if ((sig = malloc(sig_len)) == NULL) { ++ KAE_ThrowOOMException(env, "malloc error"); ++ goto cleanup; ++ } ++ memset(sig, 0, sig_len); ++ ++ // sign ++ if (SSL_UTILS_EVP_DigestSignFinal(md_ctx, sig, &sig_len) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_DigestSignFinal", KAE_ThrowSignatureException); ++ goto cleanup; ++ } ++ ++ if ((sigByteArray = (*env)->NewByteArray(env, sig_len)) == NULL) { ++ goto cleanup; ++ } ++ (*env)->SetByteArrayRegion(env, sigByteArray, 0, sig_len, (jbyte*)sig); ++ KAE_TRACE("KAESM2Signature_nativeSM2SignFinal: finished"); ++cleanup: ++ if (sig != NULL) { ++ memset(sig, 0, sig_len); ++ free(sig); ++ } ++ return sigByteArray; ++} ++ ++/* ++ * Class: org_openeuler_security_openssl_KAESM2Signature ++ * Method: nativeSM2VerifyFinal ++ * Signature: (J[B)Z ++ */ ++JNIEXPORT jboolean JNICALL Java_org_openeuler_security_openssl_KAESM2Signature_nativeSM2VerifyFinal(JNIEnv *env, ++ jclass cls, jlong ctxAddress, jbyteArray sigBytesArr, jint sigLen) ++{ ++ EVP_MD_CTX* md_ctx = NULL; ++ unsigned char* sigBytes = NULL; ++ jboolean isSuccess = JNI_FALSE; ++ md_ctx = (EVP_MD_CTX*) ctxAddress; ++ ++ if ((sigBytes = (unsigned char*)malloc(sigLen)) == NULL) { ++ KAE_ThrowOOMException(env, "malloc error"); ++ goto cleanup; ++ } ++ (*env)->GetByteArrayRegion(env, sigBytesArr, 0, sigLen, (jbyte*)sigBytes); ++ ++ // verify ++ if (SSL_UTILS_EVP_DigestVerifyFinal(md_ctx, sigBytes, sigLen) <= 0) { ++ KAE_ThrowFromOpenssl(env, "EVP_DigestVerifyFinal", KAE_ThrowSignatureException); ++ goto cleanup; ++ } ++ isSuccess = JNI_TRUE; ++cleanup: ++ if (sigBytes != NULL) { ++ memset(sigBytes, 0, sigLen); ++ free(sigBytes); ++ } ++ return isSuccess; ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_symmetric_cipher.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_symmetric_cipher.c +index 67151f53a..54a6fb52e 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_symmetric_cipher.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_symmetric_cipher.c +@@ -29,6 +29,7 @@ + #include "kae_exception.h" + #include "kae_log.h" + #include "kae_util.h" ++#include "ssl_utils.h" + #include "org_openeuler_security_openssl_KAESymmetricCipherBase.h" + + bool StartsWith(const char* str1, const char* str2) +@@ -61,13 +62,13 @@ static const EVP_CIPHER* EVPGetSm4CipherByName(JNIEnv* env, const char* algo) + static const EVP_CIPHER* sm4Ofb = NULL; + + if (strcasecmp(algo, "sm4-ecb") == 0) { +- return sm4Ecb == NULL ? sm4Ecb = EVP_get_cipherbyname(algo) : sm4Ecb; ++ return sm4Ecb == NULL ? sm4Ecb = SSL_UTILS_EVP_get_cipherbyname(algo) : sm4Ecb; + } else if (strcasecmp(algo, "sm4-cbc") == 0) { +- return sm4Cbc == NULL ? sm4Cbc = EVP_get_cipherbyname(algo) : sm4Cbc; ++ return sm4Cbc == NULL ? sm4Cbc = SSL_UTILS_EVP_get_cipherbyname(algo) : sm4Cbc; + } else if (strcasecmp(algo, "sm4-ctr") == 0) { +- return sm4Ctr == NULL ? sm4Ctr = EVP_get_cipherbyname(algo) : sm4Ctr; ++ return sm4Ctr == NULL ? sm4Ctr = SSL_UTILS_EVP_get_cipherbyname(algo) : sm4Ctr; + } else if (strcasecmp(algo, "sm4-ofb") == 0) { +- return sm4Ofb == NULL ? sm4Ofb = EVP_get_cipherbyname(algo) : sm4Ofb; ++ return sm4Ofb == NULL ? sm4Ofb = SSL_UTILS_EVP_get_cipherbyname(algo) : sm4Ofb; + } else { + KAE_ThrowRuntimeException(env, "EVPGetSm4CipherByName error"); + return 0; +@@ -90,29 +91,29 @@ static const EVP_CIPHER* EVPGetAesCipherByName(JNIEnv* env, const char* algo) + static const EVP_CIPHER* aes256Gcm = NULL; + + if (strcasecmp(algo, "aes-128-ecb") == 0) { +- return aes128Ecb == NULL ? aes128Ecb = EVP_get_cipherbyname(algo) : aes128Ecb; ++ return aes128Ecb == NULL ? aes128Ecb = SSL_UTILS_EVP_get_cipherbyname(algo) : aes128Ecb; + } else if (strcasecmp(algo, "aes-128-cbc") == 0) { +- return aes128Cbc == NULL ? aes128Cbc = EVP_get_cipherbyname(algo) : aes128Cbc; ++ return aes128Cbc == NULL ? aes128Cbc = SSL_UTILS_EVP_get_cipherbyname(algo) : aes128Cbc; + } else if (strcasecmp(algo, "aes-128-ctr") == 0) { +- return aes128Ctr == NULL ? aes128Ctr = EVP_get_cipherbyname(algo) : aes128Ctr; ++ return aes128Ctr == NULL ? aes128Ctr = SSL_UTILS_EVP_get_cipherbyname(algo) : aes128Ctr; + } else if (strcasecmp(algo, "aes-128-gcm") == 0) { +- return aes128Gcm == NULL ? aes128Gcm = EVP_get_cipherbyname(algo) : aes128Gcm; ++ return aes128Gcm == NULL ? aes128Gcm = SSL_UTILS_EVP_get_cipherbyname(algo) : aes128Gcm; + } else if (strcasecmp(algo, "aes-192-ecb") == 0) { +- return aes192Ecb == NULL ? aes192Ecb = EVP_get_cipherbyname(algo) : aes192Ecb; ++ return aes192Ecb == NULL ? aes192Ecb = SSL_UTILS_EVP_get_cipherbyname(algo) : aes192Ecb; + } else if (strcasecmp(algo, "aes-192-cbc") == 0) { +- return aes192Cbc == NULL ? aes192Cbc = EVP_get_cipherbyname(algo) : aes192Cbc; ++ return aes192Cbc == NULL ? aes192Cbc = SSL_UTILS_EVP_get_cipherbyname(algo) : aes192Cbc; + } else if (strcasecmp(algo, "aes-192-ctr") == 0) { +- return aes192Ctr == NULL ? aes192Ctr = EVP_get_cipherbyname(algo) : aes192Ctr; ++ return aes192Ctr == NULL ? aes192Ctr = SSL_UTILS_EVP_get_cipherbyname(algo) : aes192Ctr; + } else if (strcasecmp(algo, "aes-192-gcm") == 0) { +- return aes192Gcm == NULL ? aes192Gcm = EVP_get_cipherbyname(algo) : aes192Gcm; ++ return aes192Gcm == NULL ? aes192Gcm = SSL_UTILS_EVP_get_cipherbyname(algo) : aes192Gcm; + } else if (strcasecmp(algo, "aes-256-ecb") == 0) { +- return aes256Ecb == NULL ? aes256Ecb = EVP_get_cipherbyname(algo) : aes256Ecb; ++ return aes256Ecb == NULL ? aes256Ecb = SSL_UTILS_EVP_get_cipherbyname(algo) : aes256Ecb; + } else if (strcasecmp(algo, "aes-256-cbc") == 0) { +- return aes256Cbc == NULL ? aes256Cbc = EVP_get_cipherbyname(algo) : aes256Cbc; ++ return aes256Cbc == NULL ? aes256Cbc = SSL_UTILS_EVP_get_cipherbyname(algo) : aes256Cbc; + } else if (strcasecmp(algo, "aes-256-ctr") == 0) { +- return aes256Ctr == NULL ? aes256Ctr = EVP_get_cipherbyname(algo) : aes256Ctr; ++ return aes256Ctr == NULL ? aes256Ctr = SSL_UTILS_EVP_get_cipherbyname(algo) : aes256Ctr; + } else if (strcasecmp(algo, "aes-256-gcm") == 0) { +- return aes256Gcm == NULL ? aes256Gcm = EVP_get_cipherbyname(algo) : aes256Gcm; ++ return aes256Gcm == NULL ? aes256Gcm = SSL_UTILS_EVP_get_cipherbyname(algo) : aes256Gcm; + } else { + KAE_ThrowRuntimeException(env, "EVPGetAesCipherByName error"); + return 0; +@@ -146,6 +147,7 @@ Java_org_openeuler_security_openssl_KAESymmetricCipherBase_nativeInit(JNIEnv* en + const EVP_CIPHER* cipher = NULL; + ENGINE* kaeEngine = NULL; + int keyLength = (*env)->GetArrayLength(env, key); ++ int ivLength = 0; + + const char* algo = (*env)->GetStringUTFChars(env, cipherType, 0); + if (StartsWith("aes", algo)) { +@@ -158,38 +160,54 @@ Java_org_openeuler_security_openssl_KAESymmetricCipherBase_nativeInit(JNIEnv* en + + KAE_TRACE("KAESymmetricCipherBase_nativeInit: kaeEngine => %p", kaeEngine); + +- (*env)->ReleaseStringUTFChars(env, cipherType, algo); + if (cipher == NULL) { + KAE_ThrowOOMException(env, "create EVP_CIPHER fail"); + goto cleanup; + } +- if ((ctx = EVP_CIPHER_CTX_new()) == NULL) { ++ if ((ctx = SSL_UTILS_EVP_CIPHER_CTX_new()) == NULL) { + KAE_ThrowOOMException(env, "create EVP_CIPHER_CTX fail"); + goto cleanup; + } + + if (iv != NULL) { + ivBytes = (*env)->GetByteArrayElements(env, iv, NULL); ++ ivLength = (*env)->GetArrayLength(env, iv); + } + if (key != NULL) { + keyBytes = (*env)->GetByteArrayElements(env, key, NULL); + } + +- if (!EVP_CipherInit_ex(ctx, cipher, kaeEngine, (const unsigned char*)keyBytes, +- (const unsigned char*)ivBytes, encrypt ? 1 : 0)) { ++ if (!SSL_UTILS_EVP_CipherInit_ex(ctx, cipher, kaeEngine, NULL, ++ NULL, encrypt ? 1 : 0)) { + KAE_ThrowFromOpenssl(env, "EVP_CipherInit_ex failed", KAE_ThrowRuntimeException); + goto cleanup; + } + +- EVP_CIPHER_CTX_set_padding(ctx, padding ? 1 : 0); ++ if (strcasecmp(algo + 8, "gcm") == 0) { ++ /* Set IV length if default 12 bytes (96 bits) is not appropriate */ ++ if(!SSL_UTILS_EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivLength, NULL)) { ++ KAE_ThrowFromOpenssl(env, "EVP_CIPHER_CTX_ctrl failed", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ } + ++ if (!SSL_UTILS_EVP_CipherInit_ex(ctx, NULL, kaeEngine, (const unsigned char*)keyBytes, ++ (const unsigned char*)ivBytes, encrypt ? 1 : 0)) { ++ KAE_ThrowFromOpenssl(env, "EVP_CipherInit_ex int key & iv failed", KAE_ThrowRuntimeException); ++ goto cleanup; ++ } ++ ++ SSL_UTILS_EVP_CIPHER_CTX_set_padding(ctx, padding ? 1 : 0); ++ ++ (*env)->ReleaseStringUTFChars(env, cipherType, algo); + FreeMemoryFromInit(env, iv, ivBytes, key, keyBytes, keyLength); + return (jlong)ctx; + + cleanup: + if (ctx != NULL) { +- EVP_CIPHER_CTX_free(ctx); ++ SSL_UTILS_EVP_CIPHER_CTX_free(ctx); + } ++ (*env)->ReleaseStringUTFChars(env, cipherType, algo); + FreeMemoryFromInit(env, iv, ivBytes, key, keyBytes, keyLength); + return 0; + } +@@ -247,13 +265,13 @@ Java_org_openeuler_security_openssl_KAESymmetricCipherBase_nativeUpdate(JNIEnv* + (*env)->GetByteArrayRegion(env, gcmAAD, 0, aadLen, (jbyte*)aad); + + // Specify aad. +- if (EVP_CipherUpdate(ctx, NULL, &bytesWritten, aad, aadLen) == 0) { ++ if (SSL_UTILS_EVP_CipherUpdate(ctx, NULL, &bytesWritten, aad, aadLen) == 0) { + KAE_ThrowFromOpenssl(env, "EVP_CipherUpdate failed", KAE_ThrowRuntimeException); + goto cleanup; + } + } + +- if (EVP_CipherUpdate(ctx, out, &bytesWritten, in, inLen) == 0) { ++ if (SSL_UTILS_EVP_CipherUpdate(ctx, out, &bytesWritten, in, inLen) == 0) { + KAE_ThrowFromOpenssl(env, "EVP_CipherUpdate failed", KAE_ThrowRuntimeException); + goto cleanup; + } +@@ -291,7 +309,7 @@ Java_org_openeuler_security_openssl_KAESymmetricCipherBase_nativeFinal(JNIEnv* e + } + memset(out, 0, outLen); + int bytesWritten = 0; +- int result_code = EVP_CipherFinal_ex(ctx, out, &bytesWritten); ++ int result_code = SSL_UTILS_EVP_CipherFinal_ex(ctx, out, &bytesWritten); + if (result_code == 0) { + KAE_ThrowFromOpenssl(env, "EVP_CipherFinal_ex failed", KAE_ThrowBadPaddingException); + goto cleanup; +@@ -348,7 +366,7 @@ JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAESymmetricCipherBas + goto cleanup; + } + memset(out, 0, outLen); +- if (EVP_CipherFinal_ex(ctx, out, &bytesWritten) == 0) { ++ if (SSL_UTILS_EVP_CipherFinal_ex(ctx, out, &bytesWritten) == 0) { + KAE_ThrowFromOpenssl(env, "EVP_CipherFinal_ex failed", KAE_ThrowBadPaddingException); + goto cleanup; + } +@@ -356,7 +374,7 @@ JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAESymmetricCipherBas + + // Writes tagLength bytes of the tag value to the buffer. + // Refer to {@link https://www.openssl.org/docs/man1.1.0/man3/EVP_CIPHER_CTX_ctrl.html} for details. +- if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tagLength, out + bytesWritten) == 0) { ++ if (SSL_UTILS_EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tagLength, out + bytesWritten) == 0) { + KAE_ThrowFromOpenssl(env, "EVP_CIPHER_CTX_ctrl failed", KAE_ThrowRuntimeException); + goto cleanup; + } +@@ -375,14 +393,14 @@ JNIEXPORT jint JNICALL Java_org_openeuler_security_openssl_KAESymmetricCipherBas + (*env)->GetByteArrayRegion(env, gcmTagArr, 0, tagLength, (jbyte*)gcmTag); + // Sets the expected gcmTag to tagLength bytes from gcmTag. + // Refer to {@link https://www.openssl.org/docs/man1.1.0/man3/EVP_CIPHER_CTX_ctrl.html} for details. +- if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tagLength, gcmTag) == 0) { ++ if (SSL_UTILS_EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tagLength, gcmTag) == 0) { + KAE_ThrowFromOpenssl(env, "EVP_CTRL_AEAD_SET_TAG failed", KAE_ThrowRuntimeException); + goto cleanup; + } + + (*env)->GetByteArrayRegion(env, outArr, 0, outOfs, (jbyte*)gcmOut); + // Finalise: note get no output for GCM +- if (EVP_CipherFinal_ex(ctx, gcmOut, &bytesWritten) == 0) { ++ if (SSL_UTILS_EVP_CipherFinal_ex(ctx, gcmOut, &bytesWritten) == 0) { + KAE_ThrowFromOpenssl(env, "EVP_CipherFinal_ex failed", KAE_ThrowAEADBadTagException); + goto cleanup; + } +@@ -407,7 +425,7 @@ Java_org_openeuler_security_openssl_KAESymmetricCipherBase_nativeFree(JNIEnv* en + EVP_CIPHER_CTX* ctx = (EVP_CIPHER_CTX*)ctxAddress; + KAE_TRACE("KAESymmetricCipherBase_nativeFree(ctx = %p)", ctx); + if (ctx != NULL) { +- EVP_CIPHER_CTX_free(ctx); ++ SSL_UTILS_EVP_CIPHER_CTX_free(ctx); + ctx = NULL; + } + +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_util.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_util.c +index 471ae834b..6bdd9b5ad 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_util.c ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_util.c +@@ -25,6 +25,7 @@ + #include + #include "kae_util.h" + #include "kae_exception.h" ++#include "ssl_utils.h" + + static ENGINE* kaeEngine = NULL; + +@@ -48,7 +49,7 @@ BIGNUM* KAE_GetBigNumFromByteArray(JNIEnv* env, jbyteArray byteArray) { + return NULL; + } + +- BIGNUM* bn = BN_new(); ++ BIGNUM* bn = SSL_UTILS_BN_new(); + if (bn == NULL) { + KAE_ThrowFromOpenssl(env, "BN_new", KAE_ThrowRuntimeException); + return NULL; +@@ -59,7 +60,7 @@ BIGNUM* KAE_GetBigNumFromByteArray(JNIEnv* env, jbyteArray byteArray) { + KAE_ThrowNullPointerException(env,"GetByteArrayElements failed"); + goto cleanup; + } +- BIGNUM* result = BN_bin2bn((const unsigned char*) bytes, len, bn); ++ BIGNUM* result = SSL_UTILS_BN_bin2bn((const unsigned char*) bytes, len, bn); + (*env)->ReleaseByteArrayElements(env, byteArray, bytes, 0); + if (result == NULL) { + KAE_ThrowFromOpenssl(env, "BN_bin2bn", KAE_ThrowRuntimeException); +@@ -68,13 +69,19 @@ BIGNUM* KAE_GetBigNumFromByteArray(JNIEnv* env, jbyteArray byteArray) { + return bn; + + cleanup: +- BN_free(bn); ++ SSL_UTILS_BN_free(bn); + return NULL; + } + + void KAE_ReleaseBigNumFromByteArray(BIGNUM* bn) { + if (bn != NULL) { +- BN_free(bn); ++ SSL_UTILS_BN_free(bn); ++ } ++} ++ ++void KAE_ReleaseBigNumFromByteArray_Clear(BIGNUM* bn) { ++ if (bn != NULL) { ++ SSL_UTILS_BN_clear_free(bn); + } + } + +@@ -83,7 +90,8 @@ jbyteArray KAE_GetByteArrayFromBigNum(JNIEnv* env, const BIGNUM* bn) { + return NULL; + } + // bn size need plus 1, for example 65535 , BN_num_bytes return 2 +- int bnSize = BN_num_bytes(bn); ++ // Changed from macro, BN_num_bytes(bn) is ((BN_num_bits(bn)+7)/8); ++ int bnSize = SSL_UTILS_BN_num_bytes(bn); + if (bnSize <= 0) { + return NULL; + } +@@ -99,7 +107,7 @@ jbyteArray KAE_GetByteArrayFromBigNum(JNIEnv* env, const BIGNUM* bn) { + return NULL; + } + unsigned char* tmp = (unsigned char*) bytes; +- if (BN_bn2bin(bn, tmp + 1) <= 0) { ++ if (SSL_UTILS_BN_bn2bin(bn, tmp + 1) <= 0) { + KAE_ThrowFromOpenssl(env, "BN_bn2bin", KAE_ThrowRuntimeException); + javaBytes = NULL; + goto cleanup; +@@ -111,7 +119,7 @@ cleanup: + return javaBytes; + } + +-#define ENGINE_LENGTH (EC_INDEX + 1) ++#define ENGINE_LENGTH (SM2_INDEX + 1) + static ENGINE* engines[ENGINE_LENGTH] = {NULL}; + static jboolean engineFlags[ENGINE_LENGTH] = {JNI_FALSE}; + static KAEAlgorithm kaeAlgorithms[ENGINE_LENGTH] = { +@@ -143,7 +151,8 @@ static KAEAlgorithm kaeAlgorithms[ENGINE_LENGTH] = { + {HMAC_SHA512_INDEX, "hmac-sha512"}, + {RSA_INDEX, "rsa"}, + {DH_INDEX, "dh"}, +- {EC_INDEX, "ec"} ++ {EC_INDEX, "ec"}, ++ {SM2_INDEX, "sm2"} + }; + + void initEngines(JNIEnv* env, jbooleanArray algorithmKaeFlags) { +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_util.h b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_util.h +index 6eb980d62..5df310ba7 100644 +--- a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_util.h ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/kae_util.h +@@ -56,7 +56,8 @@ typedef enum { + HMAC_SHA512_INDEX, + RSA_INDEX, + DH_INDEX, +- EC_INDEX ++ EC_INDEX, ++ SM2_INDEX + } AlgorithmIndex; + + typedef struct { +@@ -70,6 +71,9 @@ BIGNUM* KAE_GetBigNumFromByteArray(JNIEnv* env, jbyteArray byteArray); + /* release BIGNUM allocat from */ + void KAE_ReleaseBigNumFromByteArray(BIGNUM* bn); + ++/* release BIGNUM allocat from and clear data*/ ++void KAE_ReleaseBigNumFromByteArray_Clear(BIGNUM* bn); ++ + /* BIGNUM convert to jbyteArray */ + jbyteArray KAE_GetByteArrayFromBigNum(JNIEnv* env, const BIGNUM* bn); + +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/openssl1_macro.h b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/openssl1_macro.h +new file mode 100644 +index 000000000..2bc3ad3f5 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/openssl1_macro.h +@@ -0,0 +1,55 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef OPENSSL1_MACRO_H ++#define OPENSSL1_MACRO_H ++ ++#define SSL1_EVP_PKEY_OP_UNDEFINED 0 ++#define SSL1_EVP_PKEY_OP_PARAMGEN (1 << 1) ++#define SSL1_EVP_PKEY_OP_KEYGEN (1 << 2) ++#define SSL1_EVP_PKEY_OP_SIGN (1 << 3) ++#define SSL1_EVP_PKEY_OP_VERIFY (1 << 4) ++#define SSL1_EVP_PKEY_OP_VERIFYRECOVER (1 << 5) ++#define SSL1_EVP_PKEY_OP_SIGNCTX (1 << 6) ++#define SSL1_EVP_PKEY_OP_VERIFYCTX (1 << 7) ++#define SSL1_EVP_PKEY_OP_ENCRYPT (1 << 8) ++#define SSL1_EVP_PKEY_OP_DECRYPT (1 << 9) ++#define SSL1_EVP_PKEY_OP_DERIVE (1 << 10) ++ ++#define SSL1_EVP_PKEY_ALG_CTRL 0x1000 ++#define SSL1_EVP_PKEY_CTRL_RSA_PADDING (SSL1_EVP_PKEY_ALG_CTRL + 1) ++#define SSL1_EVP_PKEY_OP_TYPE_SIG \ ++ (SSL1_EVP_PKEY_OP_SIGN | SSL1_EVP_PKEY_OP_VERIFY | SSL1_EVP_PKEY_OP_VERIFYRECOVER | SSL1_EVP_PKEY_OP_SIGNCTX | \ ++ SSL1_EVP_PKEY_OP_VERIFYCTX) ++#define SSL1_EVP_PKEY_CTRL_MD 1 ++#define SSL1_EVP_PKEY_OP_TYPE_CRYPT (SSL1_EVP_PKEY_OP_ENCRYPT | SSL1_EVP_PKEY_OP_DECRYPT) ++#define SSL1_EVP_PKEY_CTRL_RSA_MGF1_MD (SSL1_EVP_PKEY_ALG_CTRL + 5) ++#define SSL1_EVP_PKEY_CTRL_RSA_PSS_SALTLEN (SSL1_EVP_PKEY_ALG_CTRL + 2) ++#define SSL1_EVP_PKEY_CTRL_RSA_OAEP_LABEL (SSL1_EVP_PKEY_ALG_CTRL + 10) ++#define SSL1_EVP_PKEY_CTRL_RSA_OAEP_MD (SSL1_EVP_PKEY_ALG_CTRL + 9) ++#define SSL1_EVP_PKEY_CTRL_SET1_ID (SSL1_EVP_PKEY_ALG_CTRL + 11) ++ ++#define SSL1_NID_rsaEncryption 6 ++#define SSL1_EVP_PKEY_RSA SSL1_NID_rsaEncryption ++ ++#endif // OPENSSL1_MACRO_H +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/openssl3_macro.h b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/openssl3_macro.h +new file mode 100644 +index 000000000..b0fe3fa95 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/openssl3_macro.h +@@ -0,0 +1,37 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef OPENSSL3_MACRO_H ++#define OPENSSL3_MACRO_H ++ ++#define SSL3_ERR_LIB_SYS 2 ++#define SSL3_ERR_LIB_OFFSET 23L ++#define SSL3_ERR_LIB_MASK 0xFF ++#define SSL3_INT_MAX __INT_MAX__ ++#define SSL3_ERR_SYSTEM_FLAG ((unsigned int)SSL3_INT_MAX + 1) ++#define SSL3_ERR_SYSTEM_MASK ((unsigned int)SSL3_INT_MAX) ++#define SSL3_ERR_REASON_MASK 0x7FFFFF ++ ++#define SSL3_ERR_SYSTEM_ERROR(errcode) (((errcode)&SSL3_ERR_SYSTEM_FLAG) != 0) ++ ++#endif // OPENSSL3_MACRO_H +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/ssl_utils.c b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/ssl_utils.c +new file mode 100644 +index 000000000..6cd9c61d1 +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/ssl_utils.c +@@ -0,0 +1,1367 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include ++#include ++#include ++#include "kae_exception.h" ++#include "kae_log.h" ++#include "openssl1_macro.h" ++#include "openssl3_macro.h" ++#include "ssl_utils.h" ++ ++typedef char *(*OpenSSL_version_func_t)(int t); ++typedef RSA *(*RSA_new_method_func_t)(ENGINE *engine); ++typedef int (*RSA_generate_key_ex_func_t)(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb); ++typedef void (*RSA_free_func_t)(RSA *rsa); ++typedef int (*OPENSSL_init_ssl_func_t)(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings); ++typedef int (*ERR_load_BIO_strings_func_t)(void); ++typedef int (*OPENSSL_init_crypto_func_t)(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings); ++typedef int (*ENGINE_free_func_t)(ENGINE *e); ++typedef ENGINE *(*ENGINE_by_id_func_t)(const char *id); ++typedef EVP_MD *(*EVP_get_digestbyname_func_t)(const char *name); ++typedef void (*EVP_PKEY_CTX_free_func_t)(EVP_PKEY_CTX *ctx); ++typedef int (*EVP_PKEY_CTX_set_rsa_padding_func_t)(EVP_PKEY_CTX *ctx, int pad_mode); ++typedef int (*EVP_PKEY_CTX_set_signature_md_func_t)(EVP_PKEY_CTX *ctx, const EVP_MD *md); ++typedef EVP_PKEY_CTX *(*EVP_PKEY_CTX_new_func_t)(EVP_PKEY *pkey, ENGINE *e); ++typedef int (*EVP_PKEY_sign_init_func_t)(EVP_PKEY_CTX *ctx); ++typedef int (*EVP_PKEY_sign_func_t)( ++ EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, const unsigned char *tbs, size_t tbslen); ++typedef int (*EVP_PKEY_verify_init_func_t)(EVP_PKEY_CTX *ctx); ++typedef int (*EVP_PKEY_verify_func_t)( ++ EVP_PKEY_CTX *ctx, const unsigned char *sig, size_t siglen, const unsigned char *tbs, size_t tbslen); ++typedef int (*EVP_PKEY_CTX_set_rsa_mgf1_md_func_t)(EVP_PKEY_CTX *ctx, const EVP_MD *md); ++typedef int (*EVP_PKEY_CTX_set_rsa_pss_saltlen_func_t)(EVP_PKEY_CTX *ctx, int len); ++typedef int (*EVP_PKEY_size_func_t)(const EVP_PKEY *pkey); ++typedef EVP_CIPHER *(*EVP_get_cipherbyname_func_t)(const char *name); ++typedef EVP_CIPHER_CTX *(*EVP_CIPHER_CTX_new_func_t)(void); ++typedef int (*EVP_CipherInit_ex_func_t)(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *type, ENGINE *impl, ++ const unsigned char *key, const unsigned char *iv, int enc); ++typedef int (*EVP_CIPHER_CTX_set_padding_func_t)(EVP_CIPHER_CTX *ctx, int pad); ++typedef void (*EVP_CIPHER_CTX_free_func_t)(EVP_CIPHER_CTX *ctx); ++typedef int (*EVP_CipherUpdate_func_t)( ++ EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl, const unsigned char *in, int inl); ++typedef int (*EVP_CipherFinal_ex_func_t)(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl); ++typedef int (*EVP_CIPHER_CTX_ctrl_func_t)(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr); ++typedef BIGNUM *(*BN_new_func_t)(void); ++typedef BIGNUM *(*BN_bin2bn_func_t)(const unsigned char *s, int len, BIGNUM *ret); ++typedef void (*BN_free_func_t)(BIGNUM *a); ++typedef int (*EVP_PKEY_CTX_set0_rsa_oaep_label_func_t)(EVP_PKEY_CTX *ctx, void *label, int llen); ++typedef int (*EVP_PKEY_CTX_set_rsa_oaep_md_func_t)(EVP_PKEY_CTX *ctx, const EVP_MD *md); ++typedef EVP_PKEY *(*EVP_PKEY_new_func_t)(void); ++typedef int (*RSA_set0_key_func_t)(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d); ++typedef int (*RSA_set0_factors_func_t)(RSA *r, BIGNUM *p, BIGNUM *q); ++typedef int (*RSA_set0_crt_params_func_t)(RSA *r, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp); ++typedef void (*EVP_PKEY_free_func_t)(EVP_PKEY *x); ++typedef int (*RSA_private_encrypt_func_t)( ++ int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); ++typedef int (*RSA_private_decrypt_func_t)( ++ int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); ++typedef int (*RSA_public_encrypt_func_t)(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); ++typedef int (*RSA_public_decrypt_func_t)(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); ++typedef int (*EVP_PKEY_encrypt_init_func_t)(EVP_PKEY_CTX *ctx); ++typedef int (*EVP_PKEY_encrypt_func_t)( ++ EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen); ++typedef int (*EVP_PKEY_decrypt_init_func_t)(EVP_PKEY_CTX *ctx); ++typedef int (*EVP_PKEY_decrypt_func_t)( ++ EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen); ++typedef EVP_MD_CTX *(*EVP_MD_CTX_new_func_t)(void); ++typedef int (*EVP_DigestInit_ex_func_t)(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl); ++typedef void (*EVP_MD_CTX_free_func_t)(EVP_MD_CTX *ctx); ++typedef int (*EVP_DigestUpdate_func_t)(EVP_MD_CTX *ctx, const void *data, size_t count); ++typedef int (*EVP_DigestFinal_ex_func_t)(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *size); ++typedef int (*EVP_MD_CTX_copy_ex_func_t)(EVP_MD_CTX *out, const EVP_MD_CTX *in); ++typedef unsigned long (*ERR_get_error_line_data_func_t)(const char **file, int *line, const char **data, int *flags); ++typedef void (*ERR_error_string_n_func_t)(unsigned long e, char *buf, size_t len); ++typedef void (*ERR_clear_error_func_t)(void); ++typedef HMAC_CTX *(*HMAC_CTX_new_func_t)(void); ++typedef int (*HMAC_Init_ex_func_t)(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl); ++typedef void (*HMAC_CTX_free_func_t)(HMAC_CTX *ctx); ++typedef int (*HMAC_Update_func_t)(HMAC_CTX *ctx, const unsigned char *data, size_t len); ++typedef int (*HMAC_Final_func_t)(HMAC_CTX *ctx, unsigned char *md, unsigned int *len); ++typedef DH *(*DH_new_method_func_t)(ENGINE *engine); ++typedef int (*DH_set0_pqg_func_t)(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g); ++typedef int (*DH_set0_key_func_t)(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key); ++typedef int (*DH_compute_key_func_t)(unsigned char *key, const BIGNUM *pub_key, DH *dh); ++typedef void (*DH_free_func_t)(DH *r); ++typedef void (*EC_POINT_free_func_t)(EC_POINT *point); ++typedef void (*EC_KEY_free_func_t)(EC_KEY *r); ++typedef void (*EC_GROUP_free_func_t)(EC_GROUP *group); ++typedef int (*OBJ_sn2nid_func_t)(const char *s); ++typedef EC_GROUP *(*EC_GROUP_new_by_curve_name_func_t)(int nid); ++typedef EC_KEY *(*EC_KEY_new_func_t)(void); ++typedef int (*EC_KEY_set_group_func_t)(EC_KEY *key, const EC_GROUP *group); ++typedef EC_POINT *(*EC_POINT_new_func_t)(const EC_GROUP *group); ++typedef int (*EC_POINT_set_affine_coordinates_GFp_func_t)( ++ const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx); ++typedef int (*EC_KEY_set_public_key_func_t)(EC_KEY *key, const EC_POINT *pub_key); ++typedef int (*EC_KEY_set_private_key_func_t)(EC_KEY *key, const BIGNUM *priv_key); ++typedef int (*EC_GROUP_get_degree_func_t)(const EC_GROUP *group); ++typedef int (*ECDH_compute_key_func_t)(void *out, size_t outlen, const EC_POINT *pub_key, const EC_KEY *eckey, ++ void *(*KDF)(const void *in, size_t inlen, void *out, size_t *outlen)); ++typedef int (*DH_set_length_func_t)(DH *dh, long length); ++typedef int (*DH_generate_key_func_t)(DH *dh); ++typedef BIGNUM *(*DH_get0_priv_key_func_t)(const DH *dh); ++typedef BIGNUM *(*DH_get0_pub_key_func_t)(const DH *dh); ++typedef int (*EC_GROUP_get_curve_GFp_func_t)(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, BN_CTX *ctx); ++typedef EC_POINT *(*EC_GROUP_get0_generator_func_t)(const EC_GROUP *group); ++typedef int (*EC_POINT_get_affine_coordinates_GFp_func_t)( ++ const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx); ++typedef int (*EC_GROUP_get_order_func_t)(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx); ++typedef int (*EC_GROUP_get_cofactor_func_t)(const EC_GROUP *group, BIGNUM *cofactor, BN_CTX *ctx); ++typedef EC_POINT *(*EC_KEY_get0_public_key_func_t)(const EC_KEY *key); ++typedef BIGNUM *(*EC_KEY_get0_private_key_func_t)(const EC_KEY *key); ++typedef int (*BN_set_word_func_t)(BIGNUM *a, BN_ULONG w); ++typedef EC_GROUP *(*EC_GROUP_new_curve_GFp_func_t)(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); ++typedef int (*EC_GROUP_set_generator_func_t)( ++ EC_GROUP *group, const EC_POINT *generator, const BIGNUM *order, const BIGNUM *cofactor); ++typedef void (*BN_CTX_free_func_t)(BN_CTX *ctx); ++typedef int (*EC_KEY_generate_key_func_t)(EC_KEY *eckey); ++typedef RSA *(*EVP_PKEY_get1_RSA_func_t)(EVP_PKEY *pkey); ++typedef BIGNUM *(*BN_dup_func_t)(const BIGNUM *a); ++typedef BN_CTX *(*BN_CTX_new_func_t)(void); ++typedef int (*EVP_PKEY_assign_func_t)(EVP_PKEY *pkey, int type, void *key); ++typedef int (*BN_bn2bin_func_t)(const BIGNUM *a, unsigned char *to); ++typedef const BIGNUM *(*RSA_get0_n_func_t)(const RSA *r); ++typedef const BIGNUM *(*RSA_get0_e_func_t)(const RSA *r); ++typedef const BIGNUM *(*RSA_get0_d_func_t)(const RSA *r); ++typedef const BIGNUM *(*RSA_get0_p_func_t)(const RSA *r); ++typedef const BIGNUM *(*RSA_get0_q_func_t)(const RSA *r); ++typedef const BIGNUM *(*RSA_get0_dmp1_func_t)(const RSA *r); ++typedef const BIGNUM *(*RSA_get0_dmq1_func_t)(const RSA *r); ++typedef const BIGNUM *(*RSA_get0_iqmp_func_t)(const RSA *r); ++typedef int (*EVP_PKEY_get_size_func_t)(const EVP_PKEY *pkey); ++typedef int (*RSA_pkey_ctx_ctrl_func_t)(EVP_PKEY_CTX *ctx, int optype, int cmd, int p1, void *p2); ++typedef int (*EVP_PKEY_CTX_ctrl_func_t)(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd, int p1, void *p2); ++typedef int (*BN_num_bits_func_t)(const BIGNUM *a); ++typedef int (*ERR_GET_LIB_func_t)(unsigned long errcode); ++typedef int (*ERR_GET_REASON_func_t)(unsigned long errcode); ++typedef void (*BN_clear_free_func_t)(BIGNUM *a); ++typedef EVP_MD *(*EVP_sm3_func_t)(void); ++typedef int (*EVP_PKEY_CTX_set1_id_func_t)(EVP_PKEY_CTX *ctx, const void *id, int len); ++typedef void (*EVP_MD_CTX_set_pkey_ctx_func_t)(EVP_MD_CTX *ctx, EVP_PKEY_CTX *pctx); ++typedef int (*EVP_DigestSignInit_func_t)( ++ EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); ++typedef int (*EVP_DigestVerifyInit_func_t)( ++ EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); ++typedef int (*EVP_DigestSignUpdate_func_t)(EVP_MD_CTX *ctx, const void *data, size_t dsize); ++typedef int (*EVP_DigestVerifyUpdate_func_t)(EVP_MD_CTX *ctx, const void *data, size_t dsize); ++typedef int (*EVP_DigestSignFinal_func_t)(EVP_MD_CTX *ctx, unsigned char *sigret, size_t *siglen); ++typedef int (*EVP_DigestVerifyFinal_func_t)(EVP_MD_CTX *ctx, const unsigned char *sig, size_t siglen); ++typedef EC_KEY *(*EC_KEY_new_by_curve_name_func_t)(int nid); ++typedef int (*EC_POINT_mul_func_t)(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *point, ++ const BIGNUM *p_scalar, BN_CTX *ctx); ++typedef int (*EVP_PKEY_set_alias_type_func_t)(EVP_PKEY *pkey, int type); ++ ++static void *_lib_handle; ++static int _sslVersion = 0; ++const int V1 = 1; ++const int V3 = 3; ++const char *OPENSSL_VERSION_1_1 = "OpenSSL 1.1."; ++const char *OPENSSL_VERSION_3_X = "OpenSSL 3."; ++const char *OPENSSL_ENGINES_VERSION_1_1 = "/usr/local/lib/engines-1."; ++const char *OPENSSL_ENGINES_VERSION_3_X = "/usr/local/lib/engines-3."; ++ ++static OpenSSL_version_func_t _OpenSSL_version; ++static RSA_new_method_func_t _RSA_new_method; ++static RSA_generate_key_ex_func_t _RSA_generate_key_ex; ++static RSA_free_func_t _RSA_free; ++static OPENSSL_init_ssl_func_t _OPENSSL_init_ssl; ++static ERR_load_BIO_strings_func_t _ERR_load_BIO_strings; ++static OPENSSL_init_crypto_func_t _OPENSSL_init_crypto; ++static ENGINE_free_func_t _ENGINE_free; ++static ENGINE_by_id_func_t _ENGINE_by_id; ++static EVP_get_digestbyname_func_t _EVP_get_digestbyname; ++static EVP_PKEY_CTX_free_func_t _EVP_PKEY_CTX_free; ++static EVP_PKEY_CTX_set_rsa_padding_func_t _EVP_PKEY_CTX_set_rsa_padding; ++static EVP_PKEY_CTX_set_signature_md_func_t _EVP_PKEY_CTX_set_signature_md; ++static EVP_PKEY_CTX_new_func_t _EVP_PKEY_CTX_new; ++static EVP_PKEY_sign_init_func_t _EVP_PKEY_sign_init; ++static EVP_PKEY_sign_func_t _EVP_PKEY_sign; ++static EVP_PKEY_verify_init_func_t _EVP_PKEY_verify_init; ++static EVP_PKEY_verify_func_t _EVP_PKEY_verify; ++static EVP_PKEY_CTX_set_rsa_mgf1_md_func_t _EVP_PKEY_CTX_set_rsa_mgf1_md; ++static EVP_PKEY_CTX_set_rsa_pss_saltlen_func_t _EVP_PKEY_CTX_set_rsa_pss_saltlen; ++static EVP_PKEY_size_func_t _EVP_PKEY_size; ++static EVP_get_cipherbyname_func_t _EVP_get_cipherbyname; ++static EVP_CIPHER_CTX_new_func_t _EVP_CIPHER_CTX_new; ++static EVP_CipherInit_ex_func_t _EVP_CipherInit_ex; ++static EVP_CIPHER_CTX_set_padding_func_t _EVP_CIPHER_CTX_set_padding; ++static EVP_CIPHER_CTX_free_func_t _EVP_CIPHER_CTX_free; ++static EVP_CipherUpdate_func_t _EVP_CipherUpdate; ++static EVP_CipherFinal_ex_func_t _EVP_CipherFinal_ex; ++static EVP_CIPHER_CTX_ctrl_func_t _EVP_CIPHER_CTX_ctrl; ++static BN_new_func_t _BN_new; ++static BN_bin2bn_func_t _BN_bin2bn; ++static BN_free_func_t _BN_free; ++static EVP_PKEY_CTX_set0_rsa_oaep_label_func_t _EVP_PKEY_CTX_set0_rsa_oaep_label; ++static EVP_PKEY_CTX_set_rsa_oaep_md_func_t _EVP_PKEY_CTX_set_rsa_oaep_md; ++static EVP_PKEY_new_func_t _EVP_PKEY_new; ++static RSA_set0_key_func_t _RSA_set0_key; ++static RSA_set0_factors_func_t _RSA_set0_factors; ++static RSA_set0_crt_params_func_t _RSA_set0_crt_params; ++static EVP_PKEY_free_func_t _EVP_PKEY_free; ++static RSA_private_encrypt_func_t _RSA_private_encrypt; ++static RSA_private_decrypt_func_t _RSA_private_decrypt; ++static RSA_public_encrypt_func_t _RSA_public_encrypt; ++static RSA_public_decrypt_func_t _RSA_public_decrypt; ++static EVP_PKEY_encrypt_init_func_t _EVP_PKEY_encrypt_init; ++static EVP_PKEY_encrypt_func_t _EVP_PKEY_encrypt; ++static EVP_PKEY_decrypt_init_func_t _EVP_PKEY_decrypt_init; ++static EVP_PKEY_decrypt_func_t _EVP_PKEY_decrypt; ++static EVP_MD_CTX_new_func_t _EVP_MD_CTX_new; ++static EVP_DigestInit_ex_func_t _EVP_DigestInit_ex; ++static EVP_MD_CTX_free_func_t _EVP_MD_CTX_free; ++static EVP_DigestUpdate_func_t _EVP_DigestUpdate; ++static EVP_DigestFinal_ex_func_t _EVP_DigestFinal_ex; ++static EVP_MD_CTX_copy_ex_func_t _EVP_MD_CTX_copy_ex; ++static ERR_get_error_line_data_func_t _ERR_get_error_line_data; ++static ERR_error_string_n_func_t _ERR_error_string_n; ++static ERR_clear_error_func_t _ERR_clear_error; ++static HMAC_CTX_new_func_t _HMAC_CTX_new; ++static HMAC_Init_ex_func_t _HMAC_Init_ex; ++static HMAC_CTX_free_func_t _HMAC_CTX_free; ++static HMAC_Update_func_t _HMAC_Update; ++static HMAC_Final_func_t _HMAC_Final; ++static DH_new_method_func_t _DH_new_method; ++static DH_set0_pqg_func_t _DH_set0_pqg; ++static DH_set0_key_func_t _DH_set0_key; ++static DH_compute_key_func_t _DH_compute_key; ++static DH_free_func_t _DH_free; ++static EC_POINT_free_func_t _EC_POINT_free; ++static EC_KEY_free_func_t _EC_KEY_free; ++static EC_GROUP_free_func_t _EC_GROUP_free; ++static OBJ_sn2nid_func_t _OBJ_sn2nid; ++static EC_GROUP_new_by_curve_name_func_t _EC_GROUP_new_by_curve_name; ++static EC_KEY_new_func_t _EC_KEY_new; ++static EC_KEY_set_group_func_t _EC_KEY_set_group; ++static EC_POINT_new_func_t _EC_POINT_new; ++static EC_POINT_set_affine_coordinates_GFp_func_t _EC_POINT_set_affine_coordinates_GFp; ++static EC_KEY_set_public_key_func_t _EC_KEY_set_public_key; ++static EC_KEY_set_private_key_func_t _EC_KEY_set_private_key; ++static EC_GROUP_get_degree_func_t _EC_GROUP_get_degree; ++static ECDH_compute_key_func_t _ECDH_compute_key; ++static DH_set_length_func_t _DH_set_length; ++static DH_generate_key_func_t _DH_generate_key; ++static DH_get0_priv_key_func_t _DH_get0_priv_key; ++static DH_get0_pub_key_func_t _DH_get0_pub_key; ++static EC_GROUP_get_curve_GFp_func_t _EC_GROUP_get_curve_GFp; ++static EC_GROUP_get0_generator_func_t _EC_GROUP_get0_generator; ++static EC_POINT_get_affine_coordinates_GFp_func_t _EC_POINT_get_affine_coordinates_GFp; ++static EC_GROUP_get_order_func_t _EC_GROUP_get_order; ++static EC_GROUP_get_cofactor_func_t _EC_GROUP_get_cofactor; ++static EC_KEY_get0_public_key_func_t _EC_KEY_get0_public_key; ++static EC_KEY_get0_private_key_func_t _EC_KEY_get0_private_key; ++static BN_set_word_func_t _BN_set_word; ++static EC_GROUP_new_curve_GFp_func_t _EC_GROUP_new_curve_GFp; ++static EC_GROUP_set_generator_func_t _EC_GROUP_set_generator; ++static BN_CTX_free_func_t _BN_CTX_free; ++static EC_KEY_generate_key_func_t _EC_KEY_generate_key; ++static EVP_PKEY_get1_RSA_func_t _EVP_PKEY_get1_RSA; ++static BN_dup_func_t _BN_dup; ++static BN_CTX_new_func_t _BN_CTX_new; ++static EVP_PKEY_assign_func_t _EVP_PKEY_assign; ++static BN_bn2bin_func_t _BN_bn2bin; ++static RSA_get0_n_func_t _RSA_get0_n; ++static RSA_get0_e_func_t _RSA_get0_e; ++static RSA_get0_d_func_t _RSA_get0_d; ++static RSA_get0_p_func_t _RSA_get0_p; ++static RSA_get0_q_func_t _RSA_get0_q; ++static RSA_get0_dmp1_func_t _RSA_get0_dmp1; ++static RSA_get0_dmq1_func_t _RSA_get0_dmq1; ++static RSA_get0_iqmp_func_t _RSA_get0_iqmp; ++static EVP_PKEY_get_size_func_t _EVP_PKEY_get_size; ++static RSA_pkey_ctx_ctrl_func_t _RSA_pkey_ctx_ctrl; ++static EVP_PKEY_CTX_ctrl_func_t _EVP_PKEY_CTX_ctrl; ++static BN_num_bits_func_t _BN_num_bits; ++static ERR_GET_LIB_func_t _ERR_GET_LIB; ++static ERR_GET_REASON_func_t _ERR_GET_REASON; ++static BN_clear_free_func_t _BN_clear_free; ++static EVP_sm3_func_t _EVP_sm3; ++static EVP_PKEY_CTX_set1_id_func_t _EVP_PKEY_CTX_set1_id; ++static EVP_MD_CTX_set_pkey_ctx_func_t _EVP_MD_CTX_set_pkey_ctx; ++static EVP_DigestSignInit_func_t _EVP_DigestSignInit; ++static EVP_DigestVerifyInit_func_t _EVP_DigestVerifyInit; ++static EVP_DigestSignUpdate_func_t _EVP_DigestSignUpdate; ++static EVP_DigestVerifyUpdate_func_t _EVP_DigestVerifyUpdate; ++static EVP_DigestSignFinal_func_t _EVP_DigestSignFinal; ++static EVP_DigestVerifyFinal_func_t _EVP_DigestVerifyFinal; ++static EC_KEY_new_by_curve_name_func_t _EC_KEY_new_by_curve_name; ++static EC_POINT_mul_func_t _EC_POINT_mul; ++static EVP_PKEY_set_alias_type_func_t _EVP_PKEY_set_alias_type; ++ ++const int COMMON_FUNC_START_INDEX = 0; ++const int COMMON_FUNC_END_INDEX = 111; ++const int V1_FUNC_START_INDEX = 112; ++const int V1_FUNC_END_INDEX = 113; ++const int V3_FUNC_START_INDEX = 114; ++const int V3_FUNC_END_INDEX = 123; ++ ++const char *SSL_UTILS_OpenSSL_version(int t) ++{ ++ return (*_OpenSSL_version)(t); ++} ++const BIGNUM *SSL_UTILS_RSA_get0_n(const RSA *r) ++{ ++ return (*_RSA_get0_n)(r); ++} ++ ++const BIGNUM *SSL_UTILS_RSA_get0_e(const RSA *r) ++{ ++ return (*_RSA_get0_e)(r); ++} ++ ++const BIGNUM *SSL_UTILS_RSA_get0_d(const RSA *r) ++{ ++ return (*_RSA_get0_d)(r); ++} ++ ++const BIGNUM *SSL_UTILS_RSA_get0_p(const RSA *r) ++{ ++ return (*_RSA_get0_p)(r); ++} ++ ++const BIGNUM *SSL_UTILS_RSA_get0_q(const RSA *r) ++{ ++ return (*_RSA_get0_q)(r); ++} ++ ++const BIGNUM *SSL_UTILS_RSA_get0_dmp1(const RSA *r) ++{ ++ return (*_RSA_get0_dmp1)(r); ++} ++ ++const BIGNUM *SSL_UTILS_RSA_get0_dmq1(const RSA *r) ++{ ++ return (*_RSA_get0_dmq1)(r); ++} ++ ++const BIGNUM *SSL_UTILS_RSA_get0_iqmp(const RSA *r) ++{ ++ return (*_RSA_get0_iqmp)(r); ++} ++ ++RSA *SSL_UTILS_RSA_new_method(ENGINE *engine) ++{ ++ return (*_RSA_new_method)(engine); ++} ++ ++int SSL_UTILS_RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) ++{ ++ return (*_RSA_generate_key_ex)(rsa, bits, e_value, cb); ++} ++ ++void SSL_UTILS_RSA_free(RSA *rsa) ++{ ++ (*_RSA_free)(rsa); ++} ++ ++int SSL_UTILS_OPENSSL_init_ssl(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings) ++{ ++ return (*_OPENSSL_init_ssl)(opts, settings); ++} ++ ++int SSL_UTILS_ERR_load_BIO_strings() ++{ ++ return (*_ERR_load_BIO_strings)(); ++} ++ ++int SSL_UTILS_OPENSSL_init_crypto(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings) ++{ ++ return (*_OPENSSL_init_crypto)(opts, settings); ++} ++ ++int SSL_UTILS_ENGINE_free(ENGINE *e) ++{ ++ return (*_ENGINE_free)(e); ++} ++ ++ENGINE *SSL_UTILS_ENGINE_by_id(const char *id) ++{ ++ return (*_ENGINE_by_id)(id); ++} ++ ++EVP_MD *SSL_UTILS_EVP_get_digestbyname(const char *name) ++{ ++ return (*_EVP_get_digestbyname)(name); ++} ++ ++void SSL_UTILS_EVP_PKEY_CTX_free(EVP_PKEY_CTX *ctx) ++{ ++ (*_EVP_PKEY_CTX_free)(ctx); ++} ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_rsa_padding(EVP_PKEY_CTX *ctx, int pad_mode) ++{ ++ // EVP_PKEY_CTX_set_rsa_padding is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ KAE_TRACE("SSL_UTILS_EVP_PKEY_CTX_set_rsa_padding, openssl version is 1"); ++ return (*_RSA_pkey_ctx_ctrl)(ctx, -1, SSL1_EVP_PKEY_CTRL_RSA_PADDING, pad_mode, NULL); ++ } ++ return (*_EVP_PKEY_CTX_set_rsa_padding)(ctx, pad_mode); ++} ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_signature_md(EVP_PKEY_CTX *ctx, const EVP_MD *md) ++{ ++ // EVP_PKEY_CTX_set_signature_md is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (*_EVP_PKEY_CTX_ctrl)(ctx, -1, SSL1_EVP_PKEY_OP_TYPE_SIG, SSL1_EVP_PKEY_CTRL_MD, 0, (void *)(md)); ++ } ++ return (*_EVP_PKEY_CTX_set_signature_md)(ctx, md); ++} ++ ++EVP_PKEY_CTX *SSL_UTILS_EVP_PKEY_CTX_new(EVP_PKEY *pkey, ENGINE *e) ++{ ++ return (*_EVP_PKEY_CTX_new)(pkey, e); ++} ++ ++int SSL_UTILS_EVP_PKEY_sign_init(EVP_PKEY_CTX *ctx) ++{ ++ return (*_EVP_PKEY_sign_init)(ctx); ++} ++ ++int SSL_UTILS_EVP_PKEY_sign( ++ EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, const unsigned char *tbs, size_t tbslen) ++{ ++ return (*_EVP_PKEY_sign)(ctx, sig, siglen, tbs, tbslen); ++} ++ ++int SSL_UTILS_EVP_PKEY_verify_init(EVP_PKEY_CTX *ctx) ++{ ++ return (*_EVP_PKEY_verify_init)(ctx); ++} ++ ++int SSL_UTILS_EVP_PKEY_verify( ++ EVP_PKEY_CTX *ctx, const unsigned char *sig, size_t siglen, const unsigned char *tbs, size_t tbslen) ++{ ++ return (*_EVP_PKEY_verify)(ctx, sig, siglen, tbs, tbslen); ++} ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_rsa_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD *md) ++{ ++ // RSA_pkey_ctx_ctrl is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (*_RSA_pkey_ctx_ctrl)(ctx, ++ SSL1_EVP_PKEY_OP_TYPE_SIG | SSL1_EVP_PKEY_OP_TYPE_CRYPT, ++ SSL1_EVP_PKEY_CTRL_RSA_MGF1_MD, ++ 0, ++ (void *)(md)); ++ } ++ return (*_EVP_PKEY_CTX_set_rsa_mgf1_md)(ctx, md); ++} ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_rsa_pss_saltlen(EVP_PKEY_CTX *ctx, int len) ++{ ++ // EVP_PKEY_CTX_set_rsa_pss_saltlen is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (*_RSA_pkey_ctx_ctrl)( ++ ctx, (SSL1_EVP_PKEY_OP_SIGN | SSL1_EVP_PKEY_OP_VERIFY), SSL1_EVP_PKEY_CTRL_RSA_PSS_SALTLEN, len, NULL); ++ } ++ return (*_EVP_PKEY_CTX_set_rsa_pss_saltlen)(ctx, len); ++} ++ ++int SSL_UTILS_EVP_PKEY_size(const EVP_PKEY *pkey) ++{ ++ // EVP_PKEY_size is macro in openssl 3 ++ if (get_sslVersion() == V3) { ++ return (*_EVP_PKEY_get_size)(pkey); ++ } ++ return (*_EVP_PKEY_size)(pkey); ++} ++ ++EVP_CIPHER *SSL_UTILS_EVP_get_cipherbyname(const char *name) ++{ ++ return (*_EVP_get_cipherbyname)(name); ++} ++ ++EVP_CIPHER_CTX *SSL_UTILS_EVP_CIPHER_CTX_new(void) ++{ ++ return (*_EVP_CIPHER_CTX_new)(); ++} ++ ++int SSL_UTILS_EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *type, ENGINE *impl, const unsigned char *key, ++ const unsigned char *iv, int enc) ++{ ++ return (*_EVP_CipherInit_ex)(ctx, type, impl, key, iv, enc); ++} ++ ++int SSL_UTILS_EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad) ++{ ++ return (*_EVP_CIPHER_CTX_set_padding)(ctx, pad); ++} ++ ++void SSL_UTILS_EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx) ++{ ++ (*_EVP_CIPHER_CTX_free)(ctx); ++} ++ ++int SSL_UTILS_EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl, const unsigned char *in, int inl) ++{ ++ return (*_EVP_CipherUpdate)(ctx, out, outl, in, inl); ++} ++ ++int SSL_UTILS_EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl) ++{ ++ return (*_EVP_CipherFinal_ex)(ctx, out, outl); ++} ++int SSL_UTILS_EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) ++{ ++ return (*_EVP_CIPHER_CTX_ctrl)(ctx, type, arg, ptr); ++} ++BIGNUM *SSL_UTILS_BN_new(void) ++{ ++ return (*_BN_new)(); ++} ++BIGNUM *SSL_UTILS_BN_bin2bn(const unsigned char *s, int len, BIGNUM *ret) ++{ ++ return (*_BN_bin2bn)(s, len, ret); ++} ++void SSL_UTILS_BN_free(BIGNUM *a) ++{ ++ (*_BN_free)(a); ++} ++ ++int SSL_UTILS_EVP_PKEY_CTX_set0_rsa_oaep_label(EVP_PKEY_CTX *ctx, void *label, int llen) ++{ ++ // EVP_PKEY_CTX_set0_rsa_oaep_label is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (*_EVP_PKEY_CTX_ctrl)(ctx, ++ SSL1_EVP_PKEY_RSA, ++ SSL1_EVP_PKEY_OP_TYPE_CRYPT, ++ SSL1_EVP_PKEY_CTRL_RSA_OAEP_LABEL, ++ llen, ++ (void *)(label)); ++ } ++ return (*_EVP_PKEY_CTX_set0_rsa_oaep_label)(ctx, label, llen); ++} ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_rsa_oaep_md(EVP_PKEY_CTX *ctx, const EVP_MD *md) ++{ ++ // EVP_PKEY_CTX_set_rsa_oaep_md is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (*_EVP_PKEY_CTX_ctrl)( ++ ctx, SSL1_EVP_PKEY_RSA, SSL1_EVP_PKEY_OP_TYPE_CRYPT, SSL1_EVP_PKEY_CTRL_RSA_OAEP_MD, 0, (void *)(md)); ++ } ++ return (*_EVP_PKEY_CTX_set_rsa_oaep_md)(ctx, md); ++} ++EVP_PKEY *SSL_UTILS_EVP_PKEY_new(void) ++{ ++ return (*_EVP_PKEY_new)(); ++} ++int SSL_UTILS_RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d) ++{ ++ return (*_RSA_set0_key)(r, n, e, d); ++} ++int SSL_UTILS_RSA_set0_factors(RSA *r, BIGNUM *p, BIGNUM *q) ++{ ++ return (*_RSA_set0_factors)(r, p, q); ++} ++int SSL_UTILS_RSA_set0_crt_params(RSA *r, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp) ++{ ++ return (*_RSA_set0_crt_params)(r, dmp1, dmq1, iqmp); ++} ++void SSL_UTILS_EVP_PKEY_free(EVP_PKEY *x) ++{ ++ (*_EVP_PKEY_free)(x); ++} ++int SSL_UTILS_RSA_private_encrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding) ++{ ++ return (*_RSA_private_encrypt)(flen, from, to, rsa, padding); ++} ++int SSL_UTILS_RSA_private_decrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding) ++{ ++ return (*_RSA_private_decrypt)(flen, from, to, rsa, padding); ++} ++int SSL_UTILS_RSA_public_encrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding) ++{ ++ return (*_RSA_public_encrypt)(flen, from, to, rsa, padding); ++} ++int SSL_UTILS_RSA_public_decrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding) ++{ ++ return (*_RSA_public_decrypt)(flen, from, to, rsa, padding); ++} ++int SSL_UTILS_EVP_PKEY_encrypt_init(EVP_PKEY_CTX *ctx) ++{ ++ return (*_EVP_PKEY_encrypt_init)(ctx); ++} ++int SSL_UTILS_EVP_PKEY_encrypt( ++ EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen) ++{ ++ return (*_EVP_PKEY_encrypt)(ctx, out, outlen, in, inlen); ++} ++int SSL_UTILS_EVP_PKEY_decrypt_init(EVP_PKEY_CTX *ctx) ++{ ++ return (*_EVP_PKEY_decrypt_init)(ctx); ++} ++int SSL_UTILS_EVP_PKEY_decrypt( ++ EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen) ++{ ++ return (*_EVP_PKEY_decrypt)(ctx, out, outlen, in, inlen); ++} ++EVP_MD_CTX *SSL_UTILS_EVP_MD_CTX_new(void) ++{ ++ return (*_EVP_MD_CTX_new)(); ++} ++int SSL_UTILS_EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl) ++{ ++ return (*_EVP_DigestInit_ex)(ctx, type, impl); ++} ++void SSL_UTILS_EVP_MD_CTX_free(EVP_MD_CTX *ctx) ++{ ++ return (*_EVP_MD_CTX_free)(ctx); ++} ++int SSL_UTILS_EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *data, size_t count) ++{ ++ return (*_EVP_DigestUpdate)(ctx, data, count); ++} ++int SSL_UTILS_EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *size) ++{ ++ return (*_EVP_DigestFinal_ex)(ctx, md, size); ++} ++int SSL_UTILS_EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in) ++{ ++ return (*_EVP_MD_CTX_copy_ex)(out, in); ++} ++unsigned long SSL_UTILS_ERR_get_error_line_data(const char **file, int *line, const char **data, int *flags) ++{ ++ return (*_ERR_get_error_line_data)(file, line, data, flags); ++} ++void SSL_UTILS_ERR_error_string_n(unsigned long e, char *buf, size_t len) ++{ ++ (*_ERR_error_string_n)(e, buf, len); ++} ++void SSL_UTILS_ERR_clear_error(void) ++{ ++ (*_ERR_clear_error)(); ++} ++HMAC_CTX *SSL_UTILS_HMAC_CTX_new(void) ++{ ++ return (*_HMAC_CTX_new)(); ++} ++int SSL_UTILS_HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl) ++{ ++ return (*_HMAC_Init_ex)(ctx, key, len, md, impl); ++} ++void SSL_UTILS_HMAC_CTX_free(HMAC_CTX *ctx) ++{ ++ (*_HMAC_CTX_free)(ctx); ++} ++int SSL_UTILS_HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len) ++{ ++ return (*_HMAC_Update)(ctx, data, len); ++} ++int SSL_UTILS_HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len) ++{ ++ return (*_HMAC_Final)(ctx, md, len); ++} ++DH *SSL_UTILS_DH_new_method(ENGINE *engine) ++{ ++ return (*_DH_new_method)(engine); ++} ++int SSL_UTILS_DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g) ++{ ++ return (*_DH_set0_pqg)(dh, p, q, g); ++} ++int SSL_UTILS_DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key) ++{ ++ return (*_DH_set0_key)(dh, pub_key, priv_key); ++} ++int SSL_UTILS_DH_compute_key(unsigned char *key, const BIGNUM *pub_key, DH *dh) ++{ ++ return (*_DH_compute_key)(key, pub_key, dh); ++} ++void SSL_UTILS_DH_free(DH *r) ++{ ++ (*_DH_free)(r); ++} ++void SSL_UTILS_EC_POINT_free(EC_POINT *point) ++{ ++ (*_EC_POINT_free)(point); ++} ++void SSL_UTILS_EC_KEY_free(EC_KEY *r) ++{ ++ (*_EC_KEY_free)(r); ++} ++void SSL_UTILS_EC_GROUP_free(EC_GROUP *group) ++{ ++ (*_EC_GROUP_free)(group); ++} ++int SSL_UTILS_OBJ_sn2nid(const char *s) ++{ ++ return (*_OBJ_sn2nid)(s); ++} ++EC_GROUP *SSL_UTILS_EC_GROUP_new_by_curve_name(int nid) ++{ ++ return (*_EC_GROUP_new_by_curve_name)(nid); ++} ++EC_KEY *SSL_UTILS_EC_KEY_new(void) ++{ ++ return (*_EC_KEY_new)(); ++} ++int SSL_UTILS_EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group) ++{ ++ return (*_EC_KEY_set_group)(key, group); ++} ++EC_POINT *SSL_UTILS_EC_POINT_new(const EC_GROUP *group) ++{ ++ return (*_EC_POINT_new)(group); ++} ++int SSL_UTILS_EC_POINT_set_affine_coordinates_GFp( ++ const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx) ++{ ++ return (*_EC_POINT_set_affine_coordinates_GFp)(group, point, x, y, ctx); ++} ++int SSL_UTILS_EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub_key) ++{ ++ return (*_EC_KEY_set_public_key)(key, pub_key); ++} ++int SSL_UTILS_EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *priv_key) ++{ ++ return (*_EC_KEY_set_private_key)(key, priv_key); ++} ++int SSL_UTILS_EC_GROUP_get_degree(const EC_GROUP *group) ++{ ++ return (*_EC_GROUP_get_degree)(group); ++} ++ ++int SSL_UTILS_ECDH_compute_key(void *out, size_t outlen, const EC_POINT *pub_key, const EC_KEY *eckey, ++ void *(*KDF)(const void *in, size_t inlen, void *out, size_t *outlen)) ++{ ++ return (*_ECDH_compute_key)(out, outlen, pub_key, eckey, KDF); ++} ++int SSL_UTILS_DH_set_length(DH *dh, long length) ++{ ++ return (*_DH_set_length)(dh, length); ++} ++int SSL_UTILS_DH_generate_key(DH *dh) ++{ ++ return (*_DH_generate_key)(dh); ++} ++const BIGNUM *SSL_UTILS_DH_get0_priv_key(const DH *dh) ++{ ++ return (*_DH_get0_priv_key)(dh); ++} ++const BIGNUM *SSL_UTILS_DH_get0_pub_key(const DH *dh) ++{ ++ return (*_DH_get0_pub_key)(dh); ++} ++int SSL_UTILS_EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, BN_CTX *ctx) ++{ ++ return (*_EC_GROUP_get_curve_GFp)(group, p, a, b, ctx); ++} ++const EC_POINT *SSL_UTILS_EC_GROUP_get0_generator(const EC_GROUP *group) ++{ ++ return (*_EC_GROUP_get0_generator)(group); ++} ++int SSL_UTILS_EC_POINT_get_affine_coordinates_GFp( ++ const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx) ++{ ++ return (*_EC_POINT_get_affine_coordinates_GFp)(group, point, x, y, ctx); ++} ++int SSL_UTILS_EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx) ++{ ++ return (*_EC_GROUP_get_order)(group, order, ctx); ++} ++int SSL_UTILS_EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, BN_CTX *ctx) ++{ ++ return (*_EC_GROUP_get_cofactor)(group, cofactor, ctx); ++} ++const EC_POINT *SSL_UTILS_EC_KEY_get0_public_key(const EC_KEY *key) ++{ ++ return (*_EC_KEY_get0_public_key)(key); ++} ++const BIGNUM *SSL_UTILS_EC_KEY_get0_private_key(const EC_KEY *key) ++{ ++ return (*_EC_KEY_get0_private_key)(key); ++} ++int SSL_UTILS_BN_set_word(BIGNUM *a, BN_ULONG w) ++{ ++ return (*_BN_set_word)(a, w); ++} ++EC_GROUP *SSL_UTILS_EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) ++{ ++ return (*_EC_GROUP_new_curve_GFp)(p, a, b, ctx); ++} ++int SSL_UTILS_EC_GROUP_set_generator( ++ EC_GROUP *group, const EC_POINT *generator, const BIGNUM *order, const BIGNUM *cofactor) ++{ ++ return (*_EC_GROUP_set_generator)(group, generator, order, cofactor); ++} ++void SSL_UTILS_BN_CTX_free(BN_CTX *ctx) ++{ ++ (*_BN_CTX_free)(ctx); ++} ++int SSL_UTILS_EC_KEY_generate_key(EC_KEY *eckey) ++{ ++ return (*_EC_KEY_generate_key)(eckey); ++} ++RSA *SSL_UTILS_EVP_PKEY_get1_RSA(EVP_PKEY *pkey) ++{ ++ return (*_EVP_PKEY_get1_RSA)(pkey); ++} ++BIGNUM *SSL_UTILS_BN_dup(const BIGNUM *a) ++{ ++ return (*_BN_dup)(a); ++} ++BN_CTX *SSL_UTILS_BN_CTX_new(void) ++{ ++ return (*_BN_CTX_new)(); ++} ++int SSL_UTILS_EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key) ++{ ++ return (*_EVP_PKEY_assign)(pkey, type, key); ++} ++int SSL_UTILS_BN_bn2bin(const BIGNUM *a, unsigned char *to) ++{ ++ return (*_BN_bn2bin)(a, to); ++} ++int SSL_UTILS_BN_num_bits(const BIGNUM *a) ++{ ++ return (*_BN_num_bits)(a); ++} ++ ++int SSL_UTILS_ERR_GET_REASON(unsigned long errcode) ++{ ++ // ERR_GET_REASON is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (int)((errcode)&0xFFFL); ++ } ++ // ERR_GET_REASON is static in openssl 3. Here is Implementation below. ++ if (SSL3_ERR_SYSTEM_ERROR(errcode)) ++ return errcode & SSL3_ERR_SYSTEM_MASK; ++ return errcode & SSL3_ERR_REASON_MASK; ++} ++ ++int SSL_UTILS_ERR_GET_FUNC(unsigned long errcode) ++{ ++ // ERR_GET_FUNC is a macro in openssl 1,and removed since openssl 3. ++ return (int)(((errcode) >> 12L) & 0xFFFL); ++} ++ ++int SSL_UTILS_ERR_GET_LIB(unsigned long errcode) ++{ ++ // ERR_GET_LIB is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (int)(((errcode) >> 24L) & 0x0FFL); ++ } ++ // ERR_GET_REASON is static in openssl 3. Here is Implementation below. ++ if (SSL3_ERR_SYSTEM_ERROR(errcode)) ++ return SSL3_ERR_LIB_SYS; ++ return (errcode >> SSL3_ERR_LIB_OFFSET) & SSL3_ERR_LIB_MASK; ++} ++ ++void SSL_UTILS_BN_clear_free(BIGNUM *a) ++{ ++ (*_BN_clear_free)(a); ++} ++ ++EVP_MD *SSL_UTILS_EVP_sm3(void) ++{ ++ return (*_EVP_sm3)(); ++} ++ ++int SSL_UTILS_EVP_PKEY_CTX_set1_id(EVP_PKEY_CTX *ctx, const void *id, int len) ++{ ++ // EVP_PKEY_CTX_set1_id is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (*_EVP_PKEY_CTX_ctrl)(ctx, -1, -1, SSL1_EVP_PKEY_CTRL_SET1_ID, (int)len, (void *)(id)); ++ } ++ return (*_EVP_PKEY_CTX_set1_id)(ctx, id, len); ++} ++ ++void SSL_UTILS_EVP_MD_CTX_set_pkey_ctx(EVP_MD_CTX *ctx, EVP_PKEY_CTX *pctx) ++{ ++ (*_EVP_MD_CTX_set_pkey_ctx)(ctx, pctx); ++} ++ ++int SSL_UTILS_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey) ++{ ++ return (*_EVP_DigestSignInit)(ctx, pctx, type, e, pkey); ++} ++ ++int SSL_UTILS_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey) ++{ ++ return (*_EVP_DigestVerifyInit)(ctx, pctx, type, e, pkey); ++} ++ ++int SSL_UTILS_EVP_DigestSignUpdate(EVP_MD_CTX *ctx, const void *data, size_t dsize) ++{ ++ // EVP_DigestSignUpdate is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (*_EVP_DigestUpdate)(ctx, data, dsize); ++ } ++ return (*_EVP_DigestSignUpdate)(ctx, data, dsize); ++} ++ ++int SSL_UTILS_EVP_DigestVerifyUpdate(EVP_MD_CTX *ctx, const void *data, size_t dsize) ++{ ++ // EVP_DigestVerifyUpdate is macro in openssl 1 ++ if (get_sslVersion() == V1) { ++ return (*_EVP_DigestUpdate)(ctx, data, dsize); ++ } ++ return (*_EVP_DigestVerifyUpdate)(ctx, data, dsize); ++} ++ ++int SSL_UTILS_EVP_DigestSignFinal(EVP_MD_CTX *ctx, unsigned char *sigret, size_t *siglen) ++{ ++ return (*_EVP_DigestSignFinal)(ctx, sigret, siglen); ++} ++ ++int SSL_UTILS_EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sig, size_t siglen) ++{ ++ return (*_EVP_DigestVerifyFinal)(ctx, sig, siglen); ++} ++ ++EC_KEY *SSL_UTILS_EC_KEY_new_by_curve_name(int nid) ++{ ++ return (*_EC_KEY_new_by_curve_name)(nid); ++} ++ ++int SSL_UTILS_EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *point, ++ const BIGNUM *p_scalar, BN_CTX *ctx) ++{ ++ return (*_EC_POINT_mul)(group, r, g_scalar, point, p_scalar, ctx); ++} ++ ++int SSL_UTILS_EVP_PKEY_set_alias_type(EVP_PKEY *pkey, int type) ++{ ++ // EVP_PKEY_set_alias_type is removed from openssl 3. ++ if (get_sslVersion() == V3) { ++ // https://github.com/openssl/openssl/issues/14379 ++ KAE_TRACE("OpenSSL hint: EVP_PKEY_set_alias_type is deprecated on openssl 3."); ++ return 1; ++ } ++ return (*_EVP_PKEY_set_alias_type)(pkey, type); ++} ++ ++int SSL_UTILS_EVP_PKEY_assign_RSA(EVP_PKEY *pkey, void *key) ++{ ++ // change from macro, "EVP_PKEY_assign_RSA(pkey,rsa)" is same as EVP_PKEY_assign((pkey),EVP_PKEY_RSA, (rsa)) ++ return SSL_UTILS_EVP_PKEY_assign((pkey), EVP_PKEY_RSA, (char *)(key)); ++} ++ ++int SSL_UTILS_EVP_PKEY_assign_EC_KEY(EVP_PKEY *pkey, void *key) ++{ ++ // Changed from macro, "EVP_PKEY_assign_EC_KEY(pkey,eckey)" is "EVP_PKEY_assign((pkey),EVP_PKEY_EC, (char ++ // *)(eckey))" in openssl 1 and 3 ++ return SSL_UTILS_EVP_PKEY_assign((pkey), EVP_PKEY_EC, (char *)(key)); ++} ++ ++void SSL_UTILS_EVP_MD_CTX_destroy(EVP_MD_CTX *ctx) ++{ ++ // changed from macro, "# define EVP_MD_CTX_destroy(ctx) EVP_MD_CTX_free((ctx))" in openssl 1 and 3 ++ SSL_UTILS_EVP_MD_CTX_free(ctx); ++} ++ ++EVP_MD_CTX *SSL_UTILS_EVP_MD_CTX_create(void) ++{ ++ return SSL_UTILS_EVP_MD_CTX_new(); ++} ++ ++int SSL_UTILS_SSL_load_error_strings() ++{ ++ // Change from macro, SSL_load_error_strings is a macro in openssl 1 and 3. ++ return SSL_UTILS_OPENSSL_init_ssl(OPENSSL_INIT_LOAD_SSL_STRINGS | OPENSSL_INIT_LOAD_CRYPTO_STRINGS, NULL); ++} ++ ++int SSL_UTILS_OpenSSL_add_all_algorithms() ++{ ++// Change from macro, OpenSSL_add_all_algorithms ia a macro, defined by OPENSSL_LOAD_CONF value. ++#ifdef OPENSSL_LOAD_CONF ++ return SSL_UTILS_OPENSSL_init_crypto( ++ OPENSSL_INIT_ADD_ALL_CIPHERS | OPENSSL_INIT_ADD_ALL_DIGESTS | OPENSSL_INIT_LOAD_CONFIG, NULL); ++#else ++ return SSL_UTILS_OPENSSL_init_crypto(OPENSSL_INIT_ADD_ALL_CIPHERS | OPENSSL_INIT_ADD_ALL_DIGESTS, NULL); ++#endif ++} ++ ++int SSL_UTILS_BN_num_bytes(const BIGNUM *a) ++{ ++ // Changed from macro, BN_num_bytes(bn) is ((BN_num_bits(bn)+7)/8); ++ return ((SSL_UTILS_BN_num_bits(a) + 7) / 8); ++} ++ ++const char * origin_func_name[] = { ++ // COMMOM_FUNC ++ "RSA_new_method", ++ "RSA_generate_key_ex", ++ "RSA_free", ++ "OPENSSL_init_ssl", ++ "ERR_load_BIO_strings", ++ "OPENSSL_init_crypto", ++ "ENGINE_free", ++ "ENGINE_by_id", ++ "EVP_get_digestbyname", ++ "EVP_PKEY_CTX_free", ++ "EVP_PKEY_CTX_new", ++ "EVP_PKEY_sign_init", ++ "EVP_PKEY_sign", ++ "EVP_PKEY_verify_init", ++ "EVP_PKEY_verify", ++ "EVP_get_cipherbyname", ++ "EVP_CIPHER_CTX_new", ++ "EVP_CipherInit_ex", ++ "EVP_CIPHER_CTX_set_padding", ++ "EVP_CIPHER_CTX_free", ++ "EVP_CipherUpdate", ++ "EVP_CipherFinal_ex", ++ "EVP_CIPHER_CTX_ctrl", ++ "BN_new", ++ "BN_bin2bn", ++ "BN_free", ++ "EVP_PKEY_new", ++ "RSA_set0_key", ++ "RSA_set0_factors", ++ "RSA_set0_crt_params", ++ "EVP_PKEY_free", ++ "RSA_private_encrypt", ++ "RSA_private_decrypt", ++ "RSA_public_encrypt", ++ "RSA_public_decrypt", ++ "EVP_PKEY_encrypt_init", ++ "EVP_PKEY_encrypt", ++ "EVP_PKEY_decrypt_init", ++ "EVP_PKEY_decrypt", ++ "EVP_MD_CTX_new", ++ "EVP_DigestInit_ex", ++ "EVP_MD_CTX_free", ++ "EVP_DigestUpdate", ++ "EVP_DigestFinal_ex", ++ "EVP_MD_CTX_copy_ex", ++ "ERR_get_error_line_data", ++ "ERR_error_string_n", ++ "ERR_clear_error", ++ "HMAC_CTX_new", ++ "HMAC_Init_ex", ++ "HMAC_CTX_free", ++ "HMAC_Update", ++ "HMAC_Final", ++ "DH_new_method", ++ "DH_set0_pqg", ++ "DH_set0_key", ++ "DH_compute_key", ++ "DH_free", ++ "EC_POINT_free", ++ "EC_KEY_free", ++ "EC_GROUP_free", ++ "OBJ_sn2nid", ++ "EC_GROUP_new_by_curve_name", ++ "EC_KEY_new", ++ "EC_KEY_set_group", ++ "EC_POINT_new", ++ "EC_POINT_set_affine_coordinates_GFp", ++ "EC_KEY_set_public_key", ++ "EC_KEY_set_private_key", ++ "EC_GROUP_get_degree", ++ "ECDH_compute_key", ++ "DH_set_length", ++ "DH_generate_key", ++ "DH_get0_priv_key", ++ "DH_get0_pub_key", ++ "EC_GROUP_get_curve_GFp", ++ "EC_GROUP_get0_generator", ++ "EC_POINT_get_affine_coordinates_GFp", ++ "EC_GROUP_get_order", ++ "EC_GROUP_get_cofactor", ++ "EC_KEY_get0_public_key", ++ "EC_KEY_get0_private_key", ++ "BN_set_word", ++ "EC_GROUP_new_curve_GFp", ++ "EC_GROUP_set_generator", ++ "BN_CTX_free", ++ "EC_KEY_generate_key", ++ "EVP_PKEY_get1_RSA", ++ "BN_dup", ++ "BN_CTX_new", ++ "EVP_PKEY_assign", ++ "BN_bn2bin", ++ "RSA_get0_n", ++ "RSA_get0_e", ++ "RSA_get0_d", ++ "RSA_get0_p", ++ "RSA_get0_q", ++ "RSA_get0_dmp1", ++ "RSA_get0_dmq1", ++ "RSA_get0_iqmp", ++ "RSA_pkey_ctx_ctrl", ++ "EVP_PKEY_CTX_ctrl", ++ "BN_num_bits", ++ "BN_clear_free", ++ "EVP_sm3", ++ "EVP_MD_CTX_set_pkey_ctx", ++ "EVP_DigestSignInit", ++ "EVP_DigestVerifyInit", ++ "EVP_DigestSignFinal", ++ "EVP_DigestVerifyFinal", ++ "EC_KEY_new_by_curve_name", ++ "EC_POINT_mul", ++ // V1_FUNC ++ "EVP_PKEY_size", ++ "EVP_PKEY_set_alias_type", ++ // V3_FUNC ++ "EVP_PKEY_get_size", ++ "EVP_PKEY_CTX_set0_rsa_oaep_label", ++ "EVP_PKEY_CTX_set_signature_md", ++ "EVP_PKEY_CTX_set_rsa_oaep_md", ++ "EVP_PKEY_CTX_set_rsa_mgf1_md", ++ "EVP_PKEY_CTX_set_rsa_pss_saltlen", ++ "EVP_PKEY_CTX_set_rsa_padding", ++ "EVP_PKEY_CTX_set1_id", ++ "EVP_DigestSignUpdate", ++ "EVP_DigestVerifyUpdate" ++}; ++ ++void ** kae_ssl_func[] = { ++ // COMMON_FUNC ++ (void**)&_RSA_new_method, ++ (void**)&_RSA_generate_key_ex, ++ (void**)&_RSA_free, ++ (void**)&_OPENSSL_init_ssl, ++ (void**)&_ERR_load_BIO_strings, ++ (void**)&_OPENSSL_init_crypto, ++ (void**)&_ENGINE_free, ++ (void**)&_ENGINE_by_id, ++ (void**)&_EVP_get_digestbyname, ++ (void**)&_EVP_PKEY_CTX_free, ++ (void**)&_EVP_PKEY_CTX_new, ++ (void**)&_EVP_PKEY_sign_init, ++ (void**)&_EVP_PKEY_sign, ++ (void**)&_EVP_PKEY_verify_init, ++ (void**)&_EVP_PKEY_verify, ++ (void**)&_EVP_get_cipherbyname, ++ (void**)&_EVP_CIPHER_CTX_new, ++ (void**)&_EVP_CipherInit_ex, ++ (void**)&_EVP_CIPHER_CTX_set_padding, ++ (void**)&_EVP_CIPHER_CTX_free, ++ (void**)&_EVP_CipherUpdate, ++ (void**)&_EVP_CipherFinal_ex, ++ (void**)&_EVP_CIPHER_CTX_ctrl, ++ (void**)&_BN_new, ++ (void**)&_BN_bin2bn, ++ (void**)&_BN_free, ++ (void**)&_EVP_PKEY_new, ++ (void**)&_RSA_set0_key, ++ (void**)&_RSA_set0_factors, ++ (void**)&_RSA_set0_crt_params, ++ (void**)&_EVP_PKEY_free, ++ (void**)&_RSA_private_encrypt, ++ (void**)&_RSA_private_decrypt, ++ (void**)&_RSA_public_encrypt, ++ (void**)&_RSA_public_decrypt, ++ (void**)&_EVP_PKEY_encrypt_init, ++ (void**)&_EVP_PKEY_encrypt, ++ (void**)&_EVP_PKEY_decrypt_init, ++ (void**)&_EVP_PKEY_decrypt, ++ (void**)&_EVP_MD_CTX_new, ++ (void**)&_EVP_DigestInit_ex, ++ (void**)&_EVP_MD_CTX_free, ++ (void**)&_EVP_DigestUpdate, ++ (void**)&_EVP_DigestFinal_ex, ++ (void**)&_EVP_MD_CTX_copy_ex, ++ (void**)&_ERR_get_error_line_data, ++ (void**)&_ERR_error_string_n, ++ (void**)&_ERR_clear_error, ++ (void**)&_HMAC_CTX_new, ++ (void**)&_HMAC_Init_ex, ++ (void**)&_HMAC_CTX_free, ++ (void**)&_HMAC_Update, ++ (void**)&_HMAC_Final, ++ (void**)&_DH_new_method, ++ (void**)&_DH_set0_pqg, ++ (void**)&_DH_set0_key, ++ (void**)&_DH_compute_key, ++ (void**)&_DH_free, ++ (void**)&_EC_POINT_free, ++ (void**)&_EC_KEY_free, ++ (void**)&_EC_GROUP_free, ++ (void**)&_OBJ_sn2nid, ++ (void**)&_EC_GROUP_new_by_curve_name, ++ (void**)&_EC_KEY_new, ++ (void**)&_EC_KEY_set_group, ++ (void**)&_EC_POINT_new, ++ (void**)&_EC_POINT_set_affine_coordinates_GFp, ++ (void**)&_EC_KEY_set_public_key, ++ (void**)&_EC_KEY_set_private_key, ++ (void**)&_EC_GROUP_get_degree, ++ (void**)&_ECDH_compute_key, ++ (void**)&_DH_set_length, ++ (void**)&_DH_generate_key, ++ (void**)&_DH_get0_priv_key, ++ (void**)&_DH_get0_pub_key, ++ (void**)&_EC_GROUP_get_curve_GFp, ++ (void**)&_EC_GROUP_get0_generator, ++ (void**)&_EC_POINT_get_affine_coordinates_GFp, ++ (void**)&_EC_GROUP_get_order, ++ (void**)&_EC_GROUP_get_cofactor, ++ (void**)&_EC_KEY_get0_public_key, ++ (void**)&_EC_KEY_get0_private_key, ++ (void**)&_BN_set_word, ++ (void**)&_EC_GROUP_new_curve_GFp, ++ (void**)&_EC_GROUP_set_generator, ++ (void**)&_BN_CTX_free, ++ (void**)&_EC_KEY_generate_key, ++ (void**)&_EVP_PKEY_get1_RSA, ++ (void**)&_BN_dup, ++ (void**)&_BN_CTX_new, ++ (void**)&_EVP_PKEY_assign, ++ (void**)&_BN_bn2bin, ++ (void**)&_RSA_get0_n, ++ (void**)&_RSA_get0_e, ++ (void**)&_RSA_get0_d, ++ (void**)&_RSA_get0_p, ++ (void**)&_RSA_get0_q, ++ (void**)&_RSA_get0_dmp1, ++ (void**)&_RSA_get0_dmq1, ++ (void**)&_RSA_get0_iqmp, ++ (void**)&_RSA_pkey_ctx_ctrl, ++ (void**)&_EVP_PKEY_CTX_ctrl, ++ (void**)&_BN_num_bits, ++ (void**)&_BN_clear_free, ++ (void**)&_EVP_sm3, ++ (void**)&_EVP_MD_CTX_set_pkey_ctx, ++ (void**)&_EVP_DigestSignInit, ++ (void**)&_EVP_DigestVerifyInit, ++ (void**)&_EVP_DigestSignFinal, ++ (void**)&_EVP_DigestVerifyFinal, ++ (void**)&_EC_KEY_new_by_curve_name, ++ (void**)&_EC_POINT_mul, ++ // V1_FUNC ++ (void**)&_EVP_PKEY_size, ++ (void**)&_EVP_PKEY_set_alias_type, ++ // V3_FUNC ++ (void**)&_EVP_PKEY_get_size, ++ (void**)&_EVP_PKEY_CTX_set0_rsa_oaep_label, ++ (void**)&_EVP_PKEY_CTX_set_signature_md, ++ (void**)&_EVP_PKEY_CTX_set_rsa_oaep_md, ++ (void**)&_EVP_PKEY_CTX_set_rsa_mgf1_md, ++ (void**)&_EVP_PKEY_CTX_set_rsa_pss_saltlen, ++ (void**)&_EVP_PKEY_CTX_set_rsa_padding, ++ (void**)&_EVP_PKEY_CTX_set1_id, ++ (void**)&_EVP_DigestSignUpdate, ++ (void**)&_EVP_DigestVerifyUpdate ++}; ++ ++void SSL_UTILS_func_dl(JNIEnv *env) ++{ ++ for(int i = COMMON_FUNC_START_INDEX; i <= COMMON_FUNC_END_INDEX; i++){ ++ *kae_ssl_func[i] = dlsym(_lib_handle, origin_func_name[i]); ++ if (*kae_ssl_func[i] == NULL) { ++ dlclose(_lib_handle); ++ KAE_ThrowExceptionInInitializerError(env, "OpenSSL error while Openssl common function pointer assignment, nullpointer found."); ++ return; ++ } ++ } ++ ++ if (get_sslVersion() == V1) { ++ for(int i = V1_FUNC_START_INDEX; i <= V1_FUNC_END_INDEX; i++){ ++ *kae_ssl_func[i] = dlsym(_lib_handle, origin_func_name[i]); ++ if (*kae_ssl_func[i] == NULL) { ++ dlclose(_lib_handle); ++ KAE_ThrowExceptionInInitializerError(env, "OpenSSL error while Openssl 1 unique function pointer assignment, nullpointer found."); ++ return; ++ } ++ } ++ } ++ ++ if (get_sslVersion() == V3) { ++ for(int i = V3_FUNC_START_INDEX; i <= V3_FUNC_END_INDEX; i++){ ++ *kae_ssl_func[i] = dlsym(_lib_handle, origin_func_name[i]); ++ if (*kae_ssl_func[i] == NULL) { ++ dlclose(_lib_handle); ++ KAE_ThrowExceptionInInitializerError(env, "OpenSSL error while Openssl 3 unique function pointer assignment, nullpointer found."); ++ return; ++ } ++ } ++ } ++} ++ ++jboolean SSL_UTILS_func_ptr_init(JNIEnv *env, jint useOpensslVersion) ++{ ++ jboolean init_result = JNI_TRUE; ++ _lib_handle = open_ssl_lib(env, useOpensslVersion, &init_result); ++ if (!init_result) { ++ return init_result; ++ } ++ SSL_UTILS_func_dl(env); ++ return init_result; ++} ++ ++void *open_ssl_lib(JNIEnv *env, jint useOpensslVersion, jboolean *init_result) ++{ ++ // default priorly use openssl3 ++ _sslVersion = V3; ++ char *lib_name = "libssl.so.3"; ++ if (useOpensslVersion == 1) { ++ _sslVersion = V1; ++ lib_name = "libssl.so.1.1"; ++ } ++ void *res = NULL; ++ // set model RTLD_NOW | RTLD_GLOBAL Otherwise openssl3 env cannot get KAEEngine ++ res = dlopen(lib_name, RTLD_NOW | RTLD_GLOBAL); ++ if (res == NULL && useOpensslVersion == 0) { ++ _sslVersion = V1; ++ lib_name = "libssl.so.1.1"; ++ res = dlopen(lib_name, RTLD_NOW | RTLD_GLOBAL); ++ } ++ ++ if (res == NULL) { ++ *init_result = JNI_FALSE; ++ char* prefix = "OpenSSL error while opening openssl lib, no matching libssl found: "; ++ char* msg = (char*)malloc(strlen(prefix) + strlen(lib_name) + 1); ++ strcpy(msg, prefix); ++ strcat(msg, lib_name); ++ KAE_ThrowExceptionInInitializerError(env, msg); ++ return res; ++ } ++ ++ // check engine with openssl version ++ check_openSSL_Engine(env, init_result, lib_name); ++ if (!*init_result) { ++ dlclose(res); ++ res = NULL; ++ } ++ return res; ++} ++ ++void check_openSSL_Engine(JNIEnv *env, jboolean *init_result, char *lib_name) ++{ ++ char *openssl_engines_path = getenv("OPENSSL_ENGINES"); ++ // openssl_engines_path == null not use KAE Engine, only user KAE Engine check ++ if (openssl_engines_path != NULL) { ++ if (0 == strncmp("libssl.so.1.1", lib_name, strlen(lib_name)) && 0 != strncmp(openssl_engines_path, OPENSSL_ENGINES_VERSION_1_1, strlen(OPENSSL_ENGINES_VERSION_1_1))) { ++ *init_result = JNI_FALSE; ++ KAE_ThrowExceptionInInitializerError(env, "The version of OPENSSL_ENGINES in the environment is inconsistent with the version of the loaded OpenSSL library(libssl.so.1.1). Please check jdk config kae.useOpensslVersion or OPENSSL_ENGINES"); ++ return; ++ } ++ if (0 == strncmp("libssl.so.3", lib_name, strlen(lib_name)) && 0 != strncmp(openssl_engines_path, OPENSSL_ENGINES_VERSION_3_X, strlen(OPENSSL_ENGINES_VERSION_3_X))) { ++ *init_result = JNI_FALSE; ++ KAE_ThrowExceptionInInitializerError(env, "The version of OPENSSL_ENGINES in the environment is inconsistent with the version of the loaded OpenSSL library(libssl.so.3). Please check jdk config kae.useOpensslVersion or OPENSSL_ENGINES"); ++ return; ++ } ++ } ++} ++ ++int get_sslVersion() ++{ ++ return _sslVersion; ++} +\ No newline at end of file +diff --git a/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/ssl_utils.h b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/ssl_utils.h +new file mode 100644 +index 000000000..b49c79cdb +--- /dev/null ++++ b/src/jdk.crypto.kaeprovider/linux/native/libj2kae/org/openeuler/security/openssl/ssl_utils.h +@@ -0,0 +1,313 @@ ++/* ++ * Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef SSL_UTILS_H ++#define SSL_UTILS_H ++ ++#include ++ ++const BIGNUM *SSL_UTILS_RSA_get0_n(const RSA *r); ++ ++const BIGNUM *SSL_UTILS_RSA_get0_e(const RSA *r); ++ ++const BIGNUM *SSL_UTILS_RSA_get0_d(const RSA *r); ++ ++const BIGNUM *SSL_UTILS_RSA_get0_p(const RSA *r); ++ ++const BIGNUM *SSL_UTILS_RSA_get0_q(const RSA *r); ++ ++const BIGNUM *SSL_UTILS_RSA_get0_dmp1(const RSA *r); ++ ++const BIGNUM *SSL_UTILS_RSA_get0_dmq1(const RSA *r); ++ ++const BIGNUM *SSL_UTILS_RSA_get0_iqmp(const RSA *r); ++ ++RSA *SSL_UTILS_RSA_new_method(ENGINE *engine); ++ ++int SSL_UTILS_RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb); ++ ++void SSL_UTILS_RSA_free(RSA *rsa); ++ ++int SSL_UTILS_ERR_load_BIO_strings(); ++ ++int SSL_UTILS_OpenSSL_add_all_algorithms(); ++ ++int SSL_UTILS_ENGINE_free(ENGINE *e); ++ ++ENGINE *SSL_UTILS_ENGINE_by_id(const char *id); ++ ++EVP_MD *SSL_UTILS_EVP_get_digestbyname(const char *name); ++ ++void SSL_UTILS_EVP_PKEY_CTX_free(EVP_PKEY_CTX *ctx); ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_rsa_padding(EVP_PKEY_CTX *ctx, int pad_mode); ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_signature_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); ++ ++EVP_PKEY_CTX *SSL_UTILS_EVP_PKEY_CTX_new(EVP_PKEY *pkey, ENGINE *e); ++ ++int SSL_UTILS_EVP_PKEY_sign_init(EVP_PKEY_CTX *ctx); ++ ++int SSL_UTILS_EVP_PKEY_sign( ++ EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, const unsigned char *tbs, size_t tbslen); ++ ++int SSL_UTILS_EVP_PKEY_verify_init(EVP_PKEY_CTX *ctx); ++ ++int SSL_UTILS_EVP_PKEY_verify( ++ EVP_PKEY_CTX *ctx, const unsigned char *sig, size_t siglen, const unsigned char *tbs, size_t tbslen); ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_rsa_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_rsa_pss_saltlen(EVP_PKEY_CTX *ctx, int len); ++ ++int SSL_UTILS_EVP_PKEY_size(const EVP_PKEY *pkey); ++ ++EVP_CIPHER *SSL_UTILS_EVP_get_cipherbyname(const char *name); ++ ++EVP_CIPHER_CTX *SSL_UTILS_EVP_CIPHER_CTX_new(void); ++ ++int SSL_UTILS_EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *type, ENGINE *impl, const unsigned char *key, ++ const unsigned char *iv, int enc); ++ ++int SSL_UTILS_EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad); ++ ++void SSL_UTILS_EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx); ++ ++int SSL_UTILS_EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl, const unsigned char *in, int inl); ++ ++int SSL_UTILS_EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl); ++ ++int SSL_UTILS_EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr); ++ ++BIGNUM *SSL_UTILS_BN_new(void); ++ ++BIGNUM *SSL_UTILS_BN_bin2bn(const unsigned char *s, int len, BIGNUM *ret); ++ ++void SSL_UTILS_BN_free(BIGNUM *a); ++ ++int SSL_UTILS_EVP_PKEY_CTX_set0_rsa_oaep_label(EVP_PKEY_CTX *ctx, void *label, int llen); ++ ++int SSL_UTILS_EVP_PKEY_CTX_set_rsa_oaep_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); ++ ++EVP_PKEY *SSL_UTILS_EVP_PKEY_new(void); ++ ++int SSL_UTILS_RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d); ++ ++int SSL_UTILS_RSA_set0_factors(RSA *r, BIGNUM *p, BIGNUM *q); ++ ++int SSL_UTILS_RSA_set0_crt_params(RSA *r, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp); ++ ++void SSL_UTILS_EVP_PKEY_free(EVP_PKEY *x); ++ ++int SSL_UTILS_RSA_private_encrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); ++ ++int SSL_UTILS_RSA_private_decrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); ++ ++int SSL_UTILS_RSA_public_encrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); ++ ++int SSL_UTILS_RSA_public_decrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); ++ ++int SSL_UTILS_EVP_PKEY_encrypt_init(EVP_PKEY_CTX *ctx); ++ ++int SSL_UTILS_EVP_PKEY_encrypt( ++ EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen); ++ ++int SSL_UTILS_EVP_PKEY_decrypt_init(EVP_PKEY_CTX *ctx); ++ ++int SSL_UTILS_EVP_PKEY_decrypt( ++ EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen); ++ ++EVP_MD_CTX *SSL_UTILS_EVP_MD_CTX_new(void); ++ ++int SSL_UTILS_EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl); ++ ++void SSL_UTILS_EVP_MD_CTX_free(EVP_MD_CTX *ctx); ++ ++int SSL_UTILS_EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *data, size_t count); ++ ++int SSL_UTILS_EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *size); ++ ++int SSL_UTILS_EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in); ++ ++unsigned long SSL_UTILS_ERR_get_error_line_data(const char **file, int *line, const char **data, int *flags); ++ ++void SSL_UTILS_ERR_error_string_n(unsigned long e, char *buf, size_t len); ++ ++void SSL_UTILS_ERR_clear_error(void); ++ ++HMAC_CTX *SSL_UTILS_HMAC_CTX_new(void); ++ ++int SSL_UTILS_HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl); ++ ++void SSL_UTILS_HMAC_CTX_free(HMAC_CTX *ctx); ++ ++int SSL_UTILS_HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len); ++ ++int SSL_UTILS_HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len); ++ ++DH *SSL_UTILS_DH_new_method(ENGINE *engine); ++ ++int SSL_UTILS_DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g); ++ ++int SSL_UTILS_DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key); ++ ++int SSL_UTILS_DH_compute_key(unsigned char *key, const BIGNUM *pub_key, DH *dh); ++ ++void SSL_UTILS_DH_free(DH *r); ++ ++void SSL_UTILS_EC_POINT_free(EC_POINT *point); ++ ++void SSL_UTILS_EC_KEY_free(EC_KEY *r); ++ ++void SSL_UTILS_EC_GROUP_free(EC_GROUP *group); ++ ++int SSL_UTILS_OBJ_sn2nid(const char *s); ++ ++EC_GROUP *SSL_UTILS_EC_GROUP_new_by_curve_name(int nid); ++ ++EC_KEY *SSL_UTILS_EC_KEY_new(void); ++ ++int SSL_UTILS_EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group); ++ ++EC_POINT *SSL_UTILS_EC_POINT_new(const EC_GROUP *group); ++ ++int SSL_UTILS_EC_POINT_set_affine_coordinates_GFp( ++ const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx); ++ ++int SSL_UTILS_EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub_key); ++ ++int SSL_UTILS_EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *priv_key); ++ ++int SSL_UTILS_EC_GROUP_get_degree(const EC_GROUP *group); ++ ++int SSL_UTILS_ECDH_compute_key(void *out, size_t outlen, const EC_POINT *pub_key, const EC_KEY *eckey, ++ void *(*KDF)(const void *in, size_t inlen, void *out, size_t *outlen)); ++ ++int SSL_UTILS_DH_set_length(DH *dh, long length); ++ ++int SSL_UTILS_DH_generate_key(DH *dh); ++ ++const BIGNUM *SSL_UTILS_DH_get0_priv_key(const DH *dh); ++ ++const BIGNUM *SSL_UTILS_DH_get0_pub_key(const DH *dh); ++ ++int SSL_UTILS_EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, BN_CTX *ctx); ++ ++const EC_POINT *SSL_UTILS_EC_GROUP_get0_generator(const EC_GROUP *group); ++ ++int SSL_UTILS_EC_POINT_get_affine_coordinates_GFp( ++ const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx); ++ ++int SSL_UTILS_EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx); ++ ++int SSL_UTILS_EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, BN_CTX *ctx); ++ ++const EC_POINT *SSL_UTILS_EC_KEY_get0_public_key(const EC_KEY *key); ++ ++const BIGNUM *SSL_UTILS_EC_KEY_get0_private_key(const EC_KEY *key); ++ ++int SSL_UTILS_BN_set_word(BIGNUM *a, BN_ULONG w); ++ ++EC_GROUP *SSL_UTILS_EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); ++ ++int SSL_UTILS_EC_GROUP_set_generator( ++ EC_GROUP *group, const EC_POINT *generator, const BIGNUM *order, const BIGNUM *cofactor); ++ ++void SSL_UTILS_BN_CTX_free(BN_CTX *ctx); ++ ++int SSL_UTILS_EC_KEY_generate_key(EC_KEY *eckey); ++ ++RSA *SSL_UTILS_EVP_PKEY_get1_RSA(EVP_PKEY *pkey); ++ ++BIGNUM *SSL_UTILS_BN_dup(const BIGNUM *a); ++ ++BN_CTX *SSL_UTILS_BN_CTX_new(void); ++ ++int SSL_UTILS_EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key); ++ ++int SSL_UTILS_BN_bn2bin(const BIGNUM *a, unsigned char *to); ++ ++void SSL_UTILS_func_dl(JNIEnv *env); ++ ++jboolean SSL_UTILS_func_ptr_init(JNIEnv *env, jint useOpensslVersion); ++ ++void *open_ssl_lib(JNIEnv *env, jint useOpensslVersion, jboolean *init_result); ++ ++void check_openSSL_Engine(JNIEnv *env, jboolean *init_result, char *lib_name); ++ ++int get_sslVersion(); ++ ++int SSL_UTILS_OPENSSL_init_ssl(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings); ++ ++int SSL_UTILS_OPENSSL_init_crypto(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings); ++ ++int SSL_UTILS_BN_num_bits(const BIGNUM *a); ++ ++int SSL_UTILS_ERR_GET_REASON(unsigned long errcode); ++ ++int SSL_UTILS_ERR_GET_FUNC(unsigned long errcode); ++ ++int SSL_UTILS_ERR_GET_LIB(unsigned long errcode); ++ ++void SSL_UTILS_BN_clear_free(BIGNUM *a); ++ ++EVP_MD *SSL_UTILS_EVP_sm3(void); ++ ++int SSL_UTILS_EVP_PKEY_CTX_set1_id(EVP_PKEY_CTX *ctx, const void *id, int len); ++ ++void SSL_UTILS_EVP_MD_CTX_set_pkey_ctx(EVP_MD_CTX *ctx, EVP_PKEY_CTX *pctx); ++ ++int SSL_UTILS_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); ++ ++int SSL_UTILS_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); ++ ++int SSL_UTILS_EVP_DigestSignUpdate(EVP_MD_CTX *ctx, const void *data, size_t dsize); ++ ++int SSL_UTILS_EVP_DigestVerifyUpdate(EVP_MD_CTX *ctx, const void *data, size_t dsize); ++ ++int SSL_UTILS_EVP_DigestSignFinal(EVP_MD_CTX *ctx, unsigned char *sigret, size_t *siglen); ++ ++int SSL_UTILS_EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sig, size_t siglen); ++ ++EC_KEY *SSL_UTILS_EC_KEY_new_by_curve_name(int nid); ++ ++int SSL_UTILS_EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *point, ++ const BIGNUM *p_scalar, BN_CTX *ctx); ++ ++int SSL_UTILS_EVP_PKEY_set_alias_type(EVP_PKEY *pkey, int type); ++ ++int SSL_UTILS_EVP_PKEY_assign_RSA(EVP_PKEY *pkey, void *key); ++ ++int SSL_UTILS_EVP_PKEY_assign_EC_KEY(EVP_PKEY *pkey, void *key); ++ ++void SSL_UTILS_EVP_MD_CTX_destroy(EVP_MD_CTX *ctx); ++ ++EVP_MD_CTX *SSL_UTILS_EVP_MD_CTX_create(void); ++ ++int SSL_UTILS_SSL_load_error_strings(); ++ ++int SSL_UTILS_OpenSSL_add_all_algorithms(); ++ ++int SSL_UTILS_BN_num_bytes(const BIGNUM *a); ++ ++#endif // SSL_UTILS_H +\ No newline at end of file +diff --git a/test/jdk/org/openeuler/security/openssl/ECDHTest.java b/test/jdk/org/openeuler/security/openssl/ECDHTest.java +index 791fec252..9a229a311 100644 +--- a/test/jdk/org/openeuler/security/openssl/ECDHTest.java ++++ b/test/jdk/org/openeuler/security/openssl/ECDHTest.java +@@ -21,22 +21,18 @@ + * questions. + */ + ++import org.openeuler.security.openssl.KAEECPrivateKeyImpl; ++import org.openeuler.security.openssl.KAEECPublicKeyImpl; + import org.openeuler.security.openssl.KAEProvider; +-import sun.security.ec.ECPrivateKeyImpl; +-import sun.security.ec.ECPublicKeyImpl; + + import javax.crypto.KeyAgreement; + import java.math.BigInteger; +-import java.security.KeyPair; +-import java.security.KeyPairGenerator; +-import java.security.Provider; +-import java.security.Security; ++import java.security.*; + import java.security.spec.ECFieldFp; + import java.security.spec.ECParameterSpec; + import java.security.spec.ECPoint; + import java.security.spec.EllipticCurve; + import java.util.Arrays; +-import java.nio.charset.StandardCharsets; + import java.security.spec.*; + import java.security.KeyFactory; + import java.security.interfaces.ECPrivateKey; +@@ -78,31 +74,30 @@ public class ECDHTest { + + KeyFactory keyFactory = KeyFactory.getInstance("EC"); + ECPrivateKeySpec privateKeySpec = new ECPrivateKeySpec(new BigInteger("20135071615800221517902437867016717688420688735490569283842831828983"), PARAMS); +- ECPrivateKeyImpl ecPrivKey = (ECPrivateKeyImpl)keyFactory.generatePrivate(privateKeySpec); +- ++ ECPrivateKey ecPrivKey = (ECPrivateKey) keyFactory.generatePrivate(privateKeySpec); + ECPoint ecPoint = new ECPoint(new BigInteger("9490267631555585552004372465967099662885480699902812460349461311384"), new BigInteger("1974573604976093871117393045089050409882519645527397292712281520811")); + ECPublicKeySpec publicKeySpec = new ECPublicKeySpec(ecPoint,PARAMS); +- ECPublicKeyImpl ecPublicKey = (ECPublicKeyImpl)keyFactory.generatePublic(publicKeySpec); +- testKeyAgreement(ecPrivKey, ecPublicKey, new byte[]{-88, -65, 43, -84, 26, 43, 46, 106, 20, 39, -76, 30, -71, 72, -102, 120, 108, -92, -86, -14, -96, -42, 93, -40, -43, -25, 15, -62}); ++ ECPublicKey ecPublicKey = (ECPublicKey)keyFactory.generatePublic(publicKeySpec); ++ testKeyAgreement((KAEECPrivateKeyImpl) ecPrivKey, (KAEECPublicKeyImpl) ecPublicKey, new byte[]{-88, -65, 43, -84, 26, 43, 46, 106, 20, 39, -76, 30, -71, 72, -102, 120, 108, -92, -86, -14, -96, -42, 93, -40, -43, -25, 15, -62}); + } + + public static void testKeyPairByParam(ECParameterSpec PARAMS) throws Exception { + keyPairGenerator = KeyPairGenerator.getInstance(algorithm); + keyPairGenerator.initialize(PARAMS); + KeyPair keyPair = keyPairGenerator.generateKeyPair(); +- ECPrivateKeyImpl ecPrivKey = (ECPrivateKeyImpl) keyPair.getPrivate(); +- ECPublicKeyImpl ecPublicKey = (ECPublicKeyImpl) keyPair.getPublic(); ++ PrivateKey ecPriKey = keyPair.getPrivate(); ++ PublicKey ecPublicKey = keyPair.getPublic(); + } + + public static void testKeyPairByKeySize(int keySize) throws Exception { + keyPairGenerator = KeyPairGenerator.getInstance(algorithm); + keyPairGenerator.initialize(keySize); + KeyPair keyPair = keyPairGenerator.generateKeyPair(); +- ECPrivateKeyImpl ecPrivKey = (ECPrivateKeyImpl) keyPair.getPrivate(); +- ECPublicKeyImpl ecPublicKey = (ECPublicKeyImpl) keyPair.getPublic(); ++ PrivateKey ecPriKey = keyPair.getPrivate(); ++ PublicKey ecPublicKey = keyPair.getPublic(); + } + +- public static void testKeyAgreement(ECPrivateKeyImpl ecPrivKey, ECPublicKeyImpl ecPublicKey, byte[] expectRes) throws Exception { ++ public static void testKeyAgreement(KAEECPrivateKeyImpl ecPrivKey, KAEECPublicKeyImpl ecPublicKey, byte[] expectRes) throws Exception { + KeyAgreement keyAgreement = KeyAgreement.getInstance("ECDH"); + keyAgreement.init(ecPrivKey); + keyAgreement.doPhase(ecPublicKey, true); +diff --git a/test/jdk/org/openeuler/security/openssl/KAEDisabledAlgorithmsTest.java b/test/jdk/org/openeuler/security/openssl/KAEDisabledAlgorithmsTest.java +index e8d767f0c..a6839283f 100644 +--- a/test/jdk/org/openeuler/security/openssl/KAEDisabledAlgorithmsTest.java ++++ b/test/jdk/org/openeuler/security/openssl/KAEDisabledAlgorithmsTest.java +@@ -150,7 +150,7 @@ public class KAEDisabledAlgorithmsTest { + } + + // test other algorithms that are not disabled (except ec) +- for (int i = 0; i < useKaeEngineFlags.length - 1; i++) { ++ for (int i = 0; i < useKaeEngineFlags.length - 2; i++) { + if (!disabledAlgorithmIndexSet.contains(i) && !useKaeEngineFlags[i]) { + throw new RuntimeException(KAETestHelper.getAlgorithmName(i) + " algorithm is not disabled"); + } +diff --git a/test/jdk/org/openeuler/security/openssl/KAEGcmlvLenTest.java b/test/jdk/org/openeuler/security/openssl/KAEGcmlvLenTest.java +new file mode 100644 +index 000000000..935e4f889 +--- /dev/null ++++ b/test/jdk/org/openeuler/security/openssl/KAEGcmlvLenTest.java +@@ -0,0 +1,53 @@ ++import org.openeuler.security.openssl.KAEProvider; ++ ++import javax.crypto.Cipher; ++import javax.crypto.spec.GCMParameterSpec; ++import javax.crypto.spec.SecretKeySpec; ++import java.nio.charset.StandardCharsets; ++import java.security.Security; ++import java.util.Arrays; ++ ++/** ++ * @test ++ * @summary Basic test for AES/GCM Iv ++ * @modules jdk.crypto.kaeprovider/org.openeuler.security.openssl ++ * @requires os.arch=="aarch64" ++ * @run main KAEGcmIvLenTest ++ */ ++public class KAEGcmIvLenTest { ++ private static String plainText = "helloworldhellow"; // 16bytes for NoPadding ++ private static String shortPlainText = "helloworld"; // 5 bytes for padding ++ private static SecretKeySpec ks = new SecretKeySpec("AESEncryptionKey".getBytes(StandardCharsets.UTF_8), "AES"); // key has 16 bytes ++ private static int[] ivLens = {12, 16}; ++ public static void main(String[] args) throws Exception { ++ Security.addProvider(new KAEProvider()); ++ for (int ivLen : ivLens) { ++ testGcm(plainText,"AES/GCM/NoPadding", "KAEProvider", "SunJCE", ivLen); ++ testGcm(plainText,"AES/GCM/NoPadding", "SunJCE", "KAEProvider", ivLen); ++ testGcm(shortPlainText,"AES/GCM/NoPadding", "KAEProvider", "SunJCE", ivLen); ++ testGcm(shortPlainText,"AES/GCM/NoPadding", "SunJCE", "KAEProvider", ivLen); ++ } ++ ++ } ++ ++ private static void testGcm(String plainText, String algo, String encryptProvider, String decryptProvider, int ivLen) throws Exception { ++ Cipher enCipher = Cipher.getInstance(algo, encryptProvider); ++ enCipher.init(Cipher.ENCRYPT_MODE, ks, getIv(ivLen)); ++ byte[] cipherText = enCipher.doFinal(plainText.getBytes()); ++ ++ Cipher deCipher = Cipher.getInstance(algo, decryptProvider); ++ deCipher.init(Cipher.DECRYPT_MODE, ks, getIv(ivLen)); ++ byte[] origin = deCipher.doFinal(cipherText); ++ ++ if (!Arrays.equals(plainText.getBytes(), origin)) { ++ throw new RuntimeException("gcm decryption failed, algo = " + algo); ++ } ++ } ++ ++ private static GCMParameterSpec getIv(int ivLen) { ++ if (ivLen == 16) { ++ return new GCMParameterSpec(128, "abcdefghabcdefgh".getBytes(StandardCharsets.UTF_8)); ++ } ++ return new GCMParameterSpec(96, "abcdefghabcd".getBytes(StandardCharsets.UTF_8)); ++ } ++} +\ No newline at end of file +diff --git a/test/jdk/org/openeuler/security/openssl/KAETestHelper.java b/test/jdk/org/openeuler/security/openssl/KAETestHelper.java +index 59ad91ddc..27a75290a 100644 +--- a/test/jdk/org/openeuler/security/openssl/KAETestHelper.java ++++ b/test/jdk/org/openeuler/security/openssl/KAETestHelper.java +@@ -67,7 +67,8 @@ class KAETestHelper { + "hmac-sha512", + "rsa", + "dh", +- "ec" ++ "ec", ++ "sm2" + }; + private static final Map ALGORITHM_NAME_MAP = new HashMap<>(); + +diff --git a/test/jdk/org/openeuler/security/openssl/KAEUseEngineTest.java b/test/jdk/org/openeuler/security/openssl/KAEUseEngineTest.java +index a5b9b5386..8ebb7b7f6 100644 +--- a/test/jdk/org/openeuler/security/openssl/KAEUseEngineTest.java ++++ b/test/jdk/org/openeuler/security/openssl/KAEUseEngineTest.java +@@ -44,7 +44,8 @@ import java.util.Map; + * @run main/othervm -Dkae.log=true -Dkae.rsa.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.dh.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.ec.useKaeEngine=true KAEUseEngineTest +- * @run main/othervm -Dkae.log=true -Dall.test=enable -Dkae.digest.useKaeEngine=true -Dkae.aes.useKaeEngine=true -Dkae.sm4.useKaeEngine=true -Dkae.hmac.useKaeEngine=true -Dkae.rsa.useKaeEngine=true -Dkae.dh.useKaeEngine=true -Dkae.ec.useKaeEngine=true KAEUseEngineTest ++ * @run main/othervm -Dkae.log=true -Dkae.sm2.useKaeEngine=true KAEUseEngineTest ++ * @run main/othervm -Dkae.log=true -Dall.test=enable -Dkae.digest.useKaeEngine=true -Dkae.aes.useKaeEngine=true -Dkae.sm4.useKaeEngine=true -Dkae.hmac.useKaeEngine=true -Dkae.rsa.useKaeEngine=true -Dkae.dh.useKaeEngine=true -Dkae.ec.useKaeEngine=true -Dkae.sm2.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.digest.useKaeEngine=false KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.aes.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.sm4.useKaeEngine=true KAEUseEngineTest +@@ -52,8 +53,8 @@ import java.util.Map; + * @run main/othervm -Dkae.log=true -Dkae.rsa.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.dh.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.ec.useKaeEngine=true KAEUseEngineTest +- * @run main/othervm -Dkae.log=true -Dall.test=disable -Dkae.digest.useKaeEngine=false -Dkae.aes.useKaeEngine=false -Dkae.sm4.useKaeEngine=false -Dkae.hmac.useKaeEngine=false -Dkae.rsa.useKaeEngine=false -Dkae.dh.useKaeEngine=false -Dkae.ec.useKaeEngine=false KAEUseEngineTest +- * @run main/othervm -Dkae.log=true -Dall.test=default -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true KAEUseEngineTest ++ * @run main/othervm -Dkae.log=true -Dkae.sm2.useKaeEngine=true KAEUseEngineTest ++ * @run main/othervm -Dkae.log=true -Dall.test=disable -Dkae.digest.useKaeEngine=false -Dkae.aes.useKaeEngine=false -Dkae.sm4.useKaeEngine=false -Dkae.hmac.useKaeEngine=false -Dkae.rsa.useKaeEngine=false -Dkae.dh.useKaeEngine=false -Dkae.ec.useKaeEngine=false -Dkae.sm2.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.digest.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.aes.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.sm4.useKaeEngine=true KAEUseEngineTest +@@ -61,7 +62,8 @@ import java.util.Map; + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.rsa.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.dh.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.ec.useKaeEngine=true KAEUseEngineTest +- * @run main/othervm -Dkae.log=true -Dall.test=enable -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.digest.useKaeEngine=true -Dkae.aes.useKaeEngine=true -Dkae.sm4.useKaeEngine=true -Dkae.hmac.useKaeEngine=true -Dkae.rsa.useKaeEngine=true -Dkae.dh.useKaeEngine=true -Dkae.ec.useKaeEngine=true KAEUseEngineTest ++ * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.sm2.useKaeEngine=true KAEUseEngineTest ++ * @run main/othervm -Dkae.log=true -Dall.test=enable -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.digest.useKaeEngine=true -Dkae.aes.useKaeEngine=true -Dkae.sm4.useKaeEngine=true -Dkae.hmac.useKaeEngine=true -Dkae.rsa.useKaeEngine=true -Dkae.dh.useKaeEngine=true -Dkae.ec.useKaeEngine=true -Dkae.sm2.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.digest.useKaeEngine=false KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.aes.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.sm4.useKaeEngine=true KAEUseEngineTest +@@ -69,94 +71,105 @@ import java.util.Map; + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.rsa.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.dh.useKaeEngine=true KAEUseEngineTest + * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.ec.useKaeEngine=true KAEUseEngineTest +- * @run main/othervm -Dkae.log=true -Dall.test=disable -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.digest.useKaeEngine=false -Dkae.aes.useKaeEngine=false -Dkae.sm4.useKaeEngine=false -Dkae.hmac.useKaeEngine=false -Dkae.rsa.useKaeEngine=false -Dkae.dh.useKaeEngine=false -Dkae.ec.useKaeEngine=false KAEUseEngineTest ++ * @run main/othervm -Dkae.log=true -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.sm2.useKaeEngine=true KAEUseEngineTest ++ * @run main/othervm -Dkae.log=true -Dall.test=disable -Dkae.engine.id=uadk_engine -Dkae.libcrypto.useGlobalMode=true -Dkae.digest.useKaeEngine=false -Dkae.aes.useKaeEngine=false -Dkae.sm4.useKaeEngine=false -Dkae.hmac.useKaeEngine=false -Dkae.rsa.useKaeEngine=false -Dkae.dh.useKaeEngine=false -Dkae.ec.useKaeEngine=false -Dkae.sm2.useKaeEngine=false KAEUseEngineTest + */ + public class KAEUseEngineTest { + enum Mode { + DEFAULT(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }), + DIGEST_ENABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 0, true), + AES_ENABLE(new boolean[]{ + true, false, false, true, true, true, true, true, true, true, + true, true, true, true, true, true, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 1, true), + SM4_ENABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 2, true), + HMAC_ENABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- true, true, true, true, true, true, true, true, false ++ true, true, true, true, true, true, true, true, false, false + }, 3, true), + RSA_ENABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 4, true), + DH_ENABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 5, true), + EC_ENABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 6, true), ++ SM2_ENABLE(new boolean[]{ ++ true, false, false, true, false, false, false, false, false, false, ++ false, false, false, false, false, false, true, true, true, true, ++ false, false, false, false, false, false, true, true, false, false ++ }, 7, true), + ALL_ENABLE(new boolean[]{ + true, false, false, true, true, true, true, true, true, true, + true, true, true, true, true, true, true, true, true, true, +- true, true, true, true, true, true, true, true, false ++ true, true, true, true, true, true, true, true, false, false + }, true), + DIGEST_DISABLE(new boolean[]{ + false, false, false, false, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 0, false), + AES_DISABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 1, false), + SM4_DISABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, false, false, false, false, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 2, false), + HMAC_DISABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 3, false), + RSA_DISABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, false, true, false ++ false, false, false, false, false, false, false, true, false, false + }, 4, false), + DH_DISABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, false, false ++ false, false, false, false, false, false, true, false, false, false + }, 5, false), + EC_DISABLE(new boolean[]{ + true, false, false, true, false, false, false, false, false, false, + false, false, false, false, false, false, true, true, true, true, +- false, false, false, false, false, false, true, true, false ++ false, false, false, false, false, false, true, true, false, false + }, 6, false), ++ SM2_DISABLE(new boolean[]{ ++ true, false, false, true, false, false, false, false, false, false, ++ false, false, false, false, false, false, true, true, true, true, ++ false, false, false, false, false, false, true, true, false, false ++ }, 7, false), + ALL_DISABLE(new boolean[]{ + false, false, false, false, false, false, false, false, false, false, + false, false, false, false, false, false, false, false, false, false, +- false, false, false, false, false, false, false, false, false ++ false, false, false, false, false, false, false, false, false, false + }, false); + private final boolean[] expectedResult; + private final Integer propertyNameIndex; +@@ -208,7 +221,8 @@ public class KAEUseEngineTest { + "kae.hmac.useKaeEngine", + "kae.rsa.useKaeEngine", + "kae.dh.useKaeEngine", +- "kae.ec.useKaeEngine" ++ "kae.ec.useKaeEngine", ++ "kae.sm2.useKaeEngine" + }; + + private static final List files = new ArrayList<>(); +diff --git a/test/jdk/org/openeuler/security/openssl/KaeProviderTest.java b/test/jdk/org/openeuler/security/openssl/KaeProviderTest.java +index 0f4425b6d..f4fb07d50 100644 +--- a/test/jdk/org/openeuler/security/openssl/KaeProviderTest.java ++++ b/test/jdk/org/openeuler/security/openssl/KaeProviderTest.java +@@ -26,10 +26,7 @@ import org.openeuler.security.openssl.KAEProvider; + import javax.crypto.Cipher; + import javax.crypto.Mac; + import javax.crypto.NoSuchPaddingException; +-import java.security.KeyPairGenerator; +-import java.security.MessageDigest; +-import java.security.NoSuchAlgorithmException; +-import java.security.Security; ++import java.security.*; + + /** + * @test +@@ -54,7 +51,9 @@ public class KaeProviderTest { + "kae.hmac", + "kae.rsa", + "kae.dh", +- "kae.ec" ++ "kae.ec", ++ "kae.sm2.cipher", ++ "kae.sm2.signature" + }; + + private static final String KAE = "KAEProvider"; +@@ -86,6 +85,7 @@ public class KaeProviderTest { + testRsa(); + testDh(); + testEc(); ++ testSM2(); + } + + public static void testMd5() throws NoSuchAlgorithmException { +@@ -151,6 +151,28 @@ public class KaeProviderTest { + judge("kae.ec",keyPairGenerator.getProvider().getName()); + } + ++ public static void testSM2() throws NoSuchAlgorithmException, NoSuchPaddingException { ++ try { ++ KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("SM2"); ++ judge("kae.sm2.cipher",keyPairGenerator.getProvider().getName()); ++ Cipher cipher = Cipher.getInstance("SM2"); ++ judge("kae.sm2.cipher",cipher.getProvider().getName()); ++ ++ } catch (NoSuchAlgorithmException e) { ++ if(Boolean.parseBoolean(System.getProperty("kae.sm2.cipher"))){ ++ throw e; ++ } ++ } ++ try { ++ Signature signature = Signature.getInstance("SM3WithSM2"); ++ judge("kae.sm2.signature",signature.getProvider().getName()); ++ } catch (NoSuchAlgorithmException e) { ++ if(Boolean.parseBoolean(System.getProperty("kae.sm2.signature"))){ ++ throw e; ++ } ++ } ++ } ++ + private static void judge(String algorithm , String providerName){ + String value = System.getProperty(algorithm); + if (value == null) { +diff --git a/test/jdk/org/openeuler/security/openssl/SM2Test.java b/test/jdk/org/openeuler/security/openssl/SM2Test.java +new file mode 100644 +index 000000000..d7b4a5d56 +--- /dev/null ++++ b/test/jdk/org/openeuler/security/openssl/SM2Test.java +@@ -0,0 +1,175 @@ ++/* ++ * Copyright (c) 2024, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++import org.openeuler.security.openssl.KAEProvider; ++ ++import javax.crypto.Cipher; ++import java.security.*; ++import java.security.spec.PKCS8EncodedKeySpec; ++import java.security.spec.X509EncodedKeySpec; ++import java.util.Arrays; ++ ++/** ++ * @test ++ * @summary Basic test for SM2 ++ * @modules jdk.crypto.kaeprovider/org.openeuler.security.openssl ++ * @requires os.arch=="aarch64" ++ * @run main SM2Test ++ */ ++ ++public class SM2Test { ++ private static final byte[] INFO = "SM2 test".getBytes(); ++ private static final byte[] PUBLIC_KEY_BYTES = new byte[]{ ++ 48, 89, 48, 19, 6, 7, 42, -122, 72, -50, 61, 2, 1, 6, 8, 42, ++ -127, 28, -49, 85, 1, -126, 45, 3, 66, 0, 4, 10, -36, -22, -20, 17, ++ 26, 86, -114, -52, -78, 79, -22, 116, -47, -70, -33, 112, 32, -18, 92, -45, ++ -58, 20, 36, -5, 55, 68, -95, -57, -121, 10, 33, -76, 54, 24, -119, -104, ++ 61, -24, -113, 46, -57, 36, -78, -37, -95, -113, -52, -88, -5, 22, -67, 101, ++ 94, 37, 2, -58, 55, -35, 15, -21, 31, -49, -80 ++ }; ++ private static final byte[] PRIVATE_KEY_BYTES = new byte[]{ ++ 48, -127, -109, 2, 1, 0, 48, 19, 6, 7, 42, -122, 72, -50, 61, 2, ++ 1, 6, 8, 42, -127, 28, -49, 85, 1, -126, 45, 4, 121, 48, 119, 2, ++ 1, 1, 4, 32, -104, 71, 54, -41, 24, 66, 82, -45, 114, -113, -121, -105, ++ -35, 35, 9, 49, -8, 119, 44, 118, 80, -20, 47, -38, -69, -47, 121, -8, ++ -73, -33, 4, 54, -96, 10, 6, 8, 42, -127, 28, -49, 85, 1, -126, 45, ++ -95, 68, 3, 66, 0, 4, 10, -36, -22, -20, 17, 26, 86, -114, -52, -78, ++ 79, -22, 116, -47, -70, -33, 112, 32, -18, 92, -45, -58, 20, 36, -5, 55, ++ 68, -95, -57, -121, 10, 33, -76, 54, 24, -119, -104, 61, -24, -113, 46, -57, ++ 36, -78, -37, -95, -113, -52, -88, -5, 22, -67, 101, 94, 37, 2, -58, 55, ++ -35, 15, -21, 31, -49, -80 ++ }; ++ ++ private static final byte[] ENCRYPTED_BYTES = new byte[]{ ++ 48, 113, 2, 33, 0, -91, 51, 29, -122, -26, 120, 43, 27, 115, -57, -98, ++ -124, 114, -30, -83, 69, -69, -38, -54, -38, 127, 90, -89, -40, 114, -9, 99, ++ 111, 121, 55, -81, 109, 2, 32, 6, -103, 108, -59, -11, -108, -7, 116, 34, ++ -8, -29, 58, -43, -109, -121, -66, -62, -82, 92, 117, 100, -28, 63, -103, -32, ++ -81, 10, 4, -46, 114, 49, 34, 4, 32, 18, 66, 110, 22, -3, -101, -122, ++ 46, 21, 25, 29, 35, -82, -119, 38, -10, -19, -30, 69, -100, -118, -105, 116, ++ -105, -65, -110, -24, -42, -17, 84, -66, 82, 4, 8, 7, 14, 4, 64, 95, 31, 87, 93 ++ }; ++ ++ private static PrivateKey privateKey; ++ ++ private static PublicKey publicKey; ++ ++ public static void main(String[] args) throws Exception { ++ init(); ++ testDecryptByPrivateKey(); ++ testEncryptByPublicKey(); ++ testEncryptByPrivateKey(); ++ testSignature(); ++ testWrapAndUnwrap(); ++ } ++ ++ /** ++ * Init private key and public key ++ */ ++ public static void init() throws Exception { ++ Security.insertProviderAt(new KAEProvider(), 1); ++ KeyFactory keyFactory = KeyFactory.getInstance("SM2"); ++ publicKey = keyFactory.generatePublic(new X509EncodedKeySpec(PUBLIC_KEY_BYTES)); ++ privateKey = keyFactory.generatePrivate(new PKCS8EncodedKeySpec(PRIVATE_KEY_BYTES)); ++ } ++ ++ /** ++ * Test private key decryption ++ */ ++ public static void testDecryptByPrivateKey() throws Exception { ++ byte[] decryptBytes = decrypt(privateKey, ENCRYPTED_BYTES); ++ if(!Arrays.equals(INFO, decryptBytes)) { ++ throw new RuntimeException("testDecryptByPrivateKey failed"); ++ } ++ } ++ ++ /** ++ * Test public key encryption and private key decryption ++ */ ++ public static void testEncryptByPublicKey() throws Exception { ++ byte[] encryptBytes = encrypt(publicKey, INFO); ++ byte[] decryptBytes = decrypt(privateKey, encryptBytes); ++ if(!Arrays.equals(INFO, decryptBytes)) { ++ throw new RuntimeException("testEncryptByPublicKey failed"); ++ } ++ } ++ ++ /** ++ * Test private key encryption and public key decryption ++ */ ++ public static void testEncryptByPrivateKey() throws Exception { ++ try { ++ encrypt(privateKey, INFO); ++ throw new RuntimeException("testEncryptByPrivateKey failed"); ++ }catch (InvalidKeyException e){ ++ // catch InvalidKeyException is normal ++ } ++ } ++ ++ public static void testSignature() throws Exception { ++ ++ Signature sign = Signature.getInstance("SM3withSM2"); ++ sign.initSign(privateKey); ++ sign.update(INFO); ++ byte[] signInfo = sign.sign(); ++ ++ sign.initVerify(publicKey); ++ sign.update(INFO); ++ if (!sign.verify(signInfo)) { ++ throw new RuntimeException("sm2 testSignature failed."); ++ } ++ } ++ ++ public static void testWrapAndUnwrap() throws Exception { ++ KeyPair keyPair = generateKeyPair(); ++ KeyPair wrapKeyPair = generateKeyPair(); ++ Cipher cipher = Cipher.getInstance("SM2"); ++ cipher.init(Cipher.WRAP_MODE, keyPair.getPublic()); ++ byte[] wrappedKeyBytes = cipher.wrap(wrapKeyPair.getPublic()); ++ cipher.init(Cipher.UNWRAP_MODE, keyPair.getPrivate()); ++ Key unWrappedKey = cipher.unwrap(wrappedKeyBytes, "SM2", Cipher.PUBLIC_KEY); ++ if(!Arrays.equals(wrapKeyPair.getPublic().getEncoded(), unWrappedKey.getEncoded())) { ++ throw new RuntimeException("testWrapAndUnwrap failed"); ++ } ++ } ++ ++ private static KeyPair generateKeyPair() throws Exception { ++ KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("SM2"); ++ return keyPairGenerator.generateKeyPair(); ++ } ++ ++ private static byte[] doCrypt(int opmode, Key key, byte[] input) throws Exception { ++ Cipher cipher = Cipher.getInstance("SM2"); ++ cipher.init(opmode, key); ++ cipher.update(input); ++ return cipher.doFinal(); ++ } ++ ++ private static byte[] encrypt(Key key, byte[] input) throws Exception { ++ return doCrypt(Cipher.ENCRYPT_MODE, key, input); ++ } ++ ++ private static byte[] decrypt(Key key, byte[] input) throws Exception { ++ return doCrypt(Cipher.DECRYPT_MODE, key, input); ++ } ++} +\ No newline at end of file +diff --git a/test/jdk/sun/security/jca/PreferredProviderNegativeTest.java b/test/jdk/sun/security/jca/PreferredProviderNegativeTest.java +index 52cfbab4e..3b5544ac2 100644 +--- a/test/jdk/sun/security/jca/PreferredProviderNegativeTest.java ++++ b/test/jdk/sun/security/jca/PreferredProviderNegativeTest.java +@@ -108,7 +108,7 @@ public class PreferredProviderNegativeTest { + String expected; + String value = args[1]; + +- if (Security.getProperty("security.provider.1").equals("KAEProvider")) { ++ if (Security.getProperty("security.provider.1").equals("org.openeuler.security.openssl.KAEProvider")) { + expected = "KAEProvider"; + } else { + expected = "SunJCE"; +diff --git a/test/micro/org/openeuler/bench/security/openssl/SM2CipherBenchmark.java b/test/micro/org/openeuler/bench/security/openssl/SM2CipherBenchmark.java +new file mode 100644 +index 000000000..0171504ed +--- /dev/null ++++ b/test/micro/org/openeuler/bench/security/openssl/SM2CipherBenchmark.java +@@ -0,0 +1,116 @@ ++/* ++ * Copyright (c) 2024, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++package org.openeuler.bench.security.openssl; ++ ++import org.openeuler.security.openssl.KAEProvider; ++import org.openjdk.jmh.annotations.*; ++ ++import java.security.*; ++import java.util.Random; ++import java.util.concurrent.TimeUnit; ++ ++import javax.crypto.BadPaddingException; ++import javax.crypto.Cipher; ++import javax.crypto.IllegalBlockSizeException; ++import javax.crypto.NoSuchPaddingException; ++ ++/** ++ * SM2 Cipher Benchmark ++ */ ++@BenchmarkMode(Mode.Throughput) ++@OutputTimeUnit(TimeUnit.SECONDS) ++@Warmup(iterations = 3, time = 3, timeUnit = TimeUnit.SECONDS) ++@Measurement(iterations = 8, time = 2, timeUnit = TimeUnit.SECONDS) ++@Fork(jvmArgsPrepend = {"-Xms100G", "-Xmx100G", "-XX:+AlwaysPreTouch"}, value = 5) ++@Threads(1) ++@State(Scope.Thread) ++public class SM2CipherBenchmark { ++ public static final int SET_SIZE = 128; ++ byte[][] data; ++ int index = 0; ++ ++ @Param({"SM2"}) ++ private String algorithm; ++ ++ @Param({"" + 1024, "" + 10 * 1024, "" + 100 * 1024, "" + 1024 * 1024}) ++ private int dataSize; ++ ++ @Param({"KAEProvider"}) ++ private String provider; ++ ++ public Provider prov = null; ++ ++ private KeyPair keyPair; ++ ++ private byte[][] encryptedData; ++ private Cipher encryptCipher; ++ private Cipher decryptCipher; ++ ++ @Setup ++ public void setup() throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeyException, ++ IllegalBlockSizeException, BadPaddingException { ++ Security.addProvider(new KAEProvider()); ++ prov = Security.getProvider(provider); ++ ++ KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("SM2"); ++ keyPair = keyPairGenerator.generateKeyPair(); ++ ++ encryptCipher = (prov == null) ? Cipher.getInstance(algorithm) : Cipher.getInstance(algorithm, prov); ++ encryptCipher.init(Cipher.ENCRYPT_MODE, keyPair.getPublic()); ++ decryptCipher = (prov == null) ? Cipher.getInstance(algorithm) : Cipher.getInstance(algorithm, prov); ++ decryptCipher.init(Cipher.DECRYPT_MODE, keyPair.getPrivate()); ++ ++ data = fillRandom(new byte[SET_SIZE][dataSize]); ++ encryptedData = fillEncrypted(data, encryptCipher); ++ } ++ ++ @Benchmark ++ public byte[] encrypt() throws IllegalBlockSizeException, BadPaddingException { ++ byte[] d = data[index]; ++ index = (index + 1) % SET_SIZE; ++ return encryptCipher.doFinal(d); ++ } ++ ++ @Benchmark ++ public byte[] decrypt() throws IllegalBlockSizeException, BadPaddingException { ++ byte[] e = encryptedData[index]; ++ index = (index + 1) % SET_SIZE; ++ return decryptCipher.doFinal(e); ++ } ++ public static byte[][] fillRandom(byte[][] data) { ++ Random rnd = new Random(); ++ for (byte[] d : data) { ++ rnd.nextBytes(d); ++ } ++ return data; ++ } ++ ++ public static byte[][] fillEncrypted(byte[][] data, Cipher encryptCipher) ++ throws IllegalBlockSizeException, BadPaddingException { ++ byte[][] encryptedData = new byte[data.length][]; ++ for (int i = 0; i < encryptedData.length; i++) { ++ encryptedData[i] = encryptCipher.doFinal(data[i]); ++ } ++ return encryptedData; ++ } ++} +\ No newline at end of file +diff --git a/test/micro/org/openeuler/bench/security/openssl/SM2SignatureBenchmark.java b/test/micro/org/openeuler/bench/security/openssl/SM2SignatureBenchmark.java +new file mode 100644 +index 000000000..3fef9e505 +--- /dev/null ++++ b/test/micro/org/openeuler/bench/security/openssl/SM2SignatureBenchmark.java +@@ -0,0 +1,104 @@ ++/* ++ * Copyright (c) 2024, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++package org.openeuler.bench.security.openssl; ++ ++import org.openeuler.security.openssl.KAEProvider; ++import org.openjdk.jmh.annotations.*; ++ ++import java.security.*; ++import java.util.concurrent.TimeUnit; ++ ++/** ++ * SM2 Signature Benchmark ++ */ ++@BenchmarkMode(Mode.Throughput) ++@OutputTimeUnit(TimeUnit.SECONDS) ++@Warmup(iterations = 3, time = 3, timeUnit = TimeUnit.SECONDS) ++@Measurement(iterations = 8, time = 2, timeUnit = TimeUnit.SECONDS) ++@Fork(jvmArgsPrepend = {"-Xms100G", "-Xmx100G", "-XX:+AlwaysPreTouch"}, value = 5) ++@Threads(1) ++@State(Scope.Thread) ++public class SM2SignatureBenchmark { ++ public static final int SET_SIZE = 128; ++ byte[][] data; ++ int index = 0; ++ ++ @Param({"SM3withSM2"}) ++ private String algorithm; ++ ++ @Param({"" + 1024, "" + 10 * 1024, "" + 100 * 1024, "" + 256 * 1024, "" + 1024 * 1024, "" + 10 * 1024 * 1024}) ++ private int dataSize; ++ ++ @Param({"KAEProvider"}) ++ private String provider; ++ ++ public Provider prov = null; ++ ++ private KeyPair keyPair; ++ ++ private byte[][] sigData; ++ ++ @Setup ++ public void setup() throws Exception { ++ Security.addProvider(new KAEProvider()); ++ prov = Security.getProvider(provider); ++ ++ KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("SM2"); ++ keyPair = keyPairGenerator.generateKeyPair(); ++ ++ data = new byte[SET_SIZE][dataSize]; ++ sigData = getSigBytes(data); ++ } ++ ++ private byte[][] getSigBytes(byte[][] data) throws Exception { ++ byte[][] sigBytes = new byte[data.length][]; ++ Signature signature = prov != null ? Signature.getInstance(algorithm, prov) : ++ Signature.getInstance(algorithm); ++ signature.initSign(keyPair.getPrivate()); ++ for (int i = 0; i < sigBytes.length; i++) { ++ signature.update(data[i]); ++ sigBytes[i] = signature.sign(); ++ } ++ return sigBytes; ++ } ++ ++ @Benchmark ++ public void sign() throws NoSuchAlgorithmException, InvalidKeyException, SignatureException { ++ Signature signature = prov != null ? Signature.getInstance(algorithm, prov) : ++ Signature.getInstance(algorithm); ++ signature.initSign(keyPair.getPrivate()); ++ signature.update(data[index]); ++ signature.sign(); ++ index = (index + 1) % SET_SIZE; ++ } ++ ++ @Benchmark ++ public void verify() throws NoSuchAlgorithmException, InvalidKeyException, SignatureException { ++ Signature signature = prov != null ? Signature.getInstance(algorithm, prov) : ++ Signature.getInstance(algorithm); ++ signature.initVerify(keyPair.getPublic()); ++ signature.update(data[index]); ++ signature.verify(sigData[index]); ++ index = (index + 1) % SET_SIZE; ++ } ++} +\ No newline at end of file +-- +2.34.1 + diff --git a/openjdk-21.spec b/openjdk-21.spec index 7c36f8e8b09c509d3d574833cc1c3002ff5fcc03..29a9b049805ddfc4fab9cee0e537ff4d56fe3beb 100644 --- a/openjdk-21.spec +++ b/openjdk-21.spec @@ -905,7 +905,7 @@ Name: java-21-%{origin} Version: %{newjavaver}.%{buildver} # This package needs `.rolling` as part of Release so as to not conflict on install with # java-X-openjdk. I.e. when latest rolling release is also an LTS release packaged as -Release: 5 +Release: 6 # java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons # and this change was brought into RHEL-4. java-1.5.0-ibm packages @@ -1040,6 +1040,14 @@ Patch95: huawei-remove-provides-in-kaeprovider-module-info.patch #21.0.9 Patch96: huawei-posix_spawn-clock_gettime-clock_getres-__cxa_thread_.patch +Patch97: huawei-support-Openssl3.patch +Patch98: huawei-Add-dynamic-max-heap-size.patch +Patch99: huawei-Add-Compact-Object-Headers-feature-for-AArch64.patch +Patch100: huawei-Add-NUMARandom-feature.patch +Patch101: huawei-Add-Jprofilecache-feature.patch +Patch102: Backport-JDK-8269870-PS-Membar-in-PSPromotionManager-copy_unmarke.patch +Patch103: huawei-AbortVMOnException-option-support-matching-multiple-.patch +Patch104: huawei-Fix-kae-testcat-and-EATest-testcast-bug-and-fix-cve-.patch ############################################ # @@ -1360,6 +1368,14 @@ pushd %{top_level_dir_name} %patch94 -p1 %patch95 -p1 %patch96 -p1 +%patch97 -p1 +%patch98 -p1 +%patch99 -p1 +%patch100 -p1 +%patch101 -p1 +%patch102 -p1 +%patch103 -p1 +%patch104 -p1 popd # openjdk %endif @@ -1952,6 +1968,16 @@ cjc.mainProgram(args) -- the returns from copy_jdk_configs.lua should not affect %changelog +* Tue Nov 25 2025 Benhsuai5D - 1:21.0.9.10-6 +- add Backport-JDK-8269870-PS-Membar-in-PSPromotionManager-copy_unmarke.patch +- add huawei-AbortVMOnException-option-support-matching-multiple-.patch +- add huawei-Add-Compact-Object-Headers-feature-for-AArch64.patch +- add huawei-Add-Jprofilecache-feature.patch +- add huawei-Add-NUMARandom-feature.patch +- add huawei-Add-dynamic-max-heap-size.patch +- add huawei-Fix-kae-testcat-and-EATest-testcast-bug-and-fix-cve-.patch +- add huawei-support-Openssl3.patch + * Thu Nov 20 2025 zhangshihui - 1:21.0.9.10-5 - RISC-V add Zicond