From ce101796daf66dcc356f0918c06d40f1931ca0f7 Mon Sep 17 00:00:00 2001 From: zhuhongbo Date: Wed, 13 Aug 2025 09:59:33 +0800 Subject: [PATCH] add arch loongarch64 --- 0001-add-sw_64-support.patch | 56155 ++++++++++++++++ 0001-fix-loongarch64-build-error.patch | 67 + 0001-fix-sw_64-build-error.patch | 1081 + 0001-port-chromium_qt-to-loongarch64.patch | 1712 + 0002-fix-third_party-for-loongarch64.patch | 37 + 0003-port-breakpad-for-loongarch64.patch | 1059 + ...t-ffmpeg-to-loongarch64-for-chromium.patch | 956 + ...oongarch64-for-chromium-add-la64-rel.patch | 8174 +++ ...y-for-loongarch64-add-files-for-la64.patch | 6055 ++ 0007-port-icu-for-loongarch64.patch | 25 + 0008-port-lss-for-loongarch64.patch | 295 + 0009-port-pdfium-for-loongarch64.patch | 45 + 0010-port-swiftshader-for-loongarch64.patch | 318 + 0011-port-webrtc-for-loongarch64.patch | 274 + 0012-port-v8-for-loongarch64.patch | 45399 +++++++++++++ ...gine-can-be-compiled-for-loongarch64.patch | 193 + 0014-fix-compile-errors-for-mips64el.patch | 41 + ...piler-internal-error-for-loongarch64.patch | 25 + 0016-fix-compile-error-for-loongarch64.patch | 54 + mipsel-code-range-size.patch | 19 + mipsel-link-atomic.patch | 26 + mipsel-linux-5.patch | 43 + mipsel-no-dav1d.patch | 17 + mipsel-ptrace-include.patch | 20 + qt5-qtwebengine.spec | 92 +- run-unbundling-script.patch | 21 + sandbox-time64-syscalls.patch | 89 + system-icu-utf.patch | 561 + system-lcms2.patch | 81 + system-nspr-prtime.patch | 51 + verbose-gn-bootstrap.patch | 16 + 31 files changed, 122994 insertions(+), 7 deletions(-) create mode 100644 0001-add-sw_64-support.patch create mode 100644 0001-fix-loongarch64-build-error.patch create mode 100644 0001-fix-sw_64-build-error.patch create mode 100644 0001-port-chromium_qt-to-loongarch64.patch create mode 100644 0002-fix-third_party-for-loongarch64.patch create mode 100644 0003-port-breakpad-for-loongarch64.patch create mode 100644 0004-port-ffmpeg-to-loongarch64-for-chromium.patch create mode 100644 0005-port-ffmpeg-to-loongarch64-for-chromium-add-la64-rel.patch create mode 100644 0006-fix-third_party-for-loongarch64-add-files-for-la64.patch create mode 100644 0007-port-icu-for-loongarch64.patch create mode 100644 0008-port-lss-for-loongarch64.patch create mode 100644 0009-port-pdfium-for-loongarch64.patch create mode 100644 0010-port-swiftshader-for-loongarch64.patch create mode 100644 0011-port-webrtc-for-loongarch64.patch create mode 100644 0012-port-v8-for-loongarch64.patch create mode 100644 0013-make-qtwebengine-can-be-compiled-for-loongarch64.patch create mode 100644 0014-fix-compile-errors-for-mips64el.patch create mode 100644 0015-fix-compiler-internal-error-for-loongarch64.patch create mode 100644 0016-fix-compile-error-for-loongarch64.patch create mode 100644 mipsel-code-range-size.patch create mode 100644 mipsel-link-atomic.patch create mode 100644 mipsel-linux-5.patch create mode 100644 mipsel-no-dav1d.patch create mode 100644 mipsel-ptrace-include.patch create mode 100644 run-unbundling-script.patch create mode 100644 sandbox-time64-syscalls.patch create mode 100644 system-icu-utf.patch create mode 100644 system-lcms2.patch create mode 100644 system-nspr-prtime.patch create mode 100644 verbose-gn-bootstrap.patch diff --git a/0001-add-sw_64-support.patch b/0001-add-sw_64-support.patch new file mode 100644 index 0000000..9be6047 --- /dev/null +++ b/0001-add-sw_64-support.patch @@ -0,0 +1,56155 @@ +From 06a8da33b6b7788fa064ee8db728f6f319605305 Mon Sep 17 00:00:00 2001 +From: root +Date: Wed, 5 Jun 2024 14:10:24 +0800 +Subject: [PATCH] add sw_64 support + +--- + configure.pri | 1 + + mkspecs/features/functions.prf | 1 + + .../page_allocator_constants.h | 2 + + .../partition_alloc_constants.h | 2 + + .../chromium/base/process/launch_posix.cc | 2 +- + .../double-conversion/utils.h | 2 +- + src/3rdparty/chromium/build/build_config.h | 5 + + src/3rdparty/chromium/build/config/sw.gni | 38 + + src/3rdparty/chromium/media/media_options.gni | 2 +- + src/3rdparty/chromium/sandbox/features.gni | 2 +- + .../linux/bpf_dsl/linux_syscall_ranges.h | 5 + + .../sandbox/linux/bpf_dsl/seccomp_macros.h | 60 + + .../syscall_parameters_restrictions.cc | 4 +- + .../linux/seccomp-bpf-helpers/syscall_sets.cc | 12 +- + .../sandbox/linux/seccomp-bpf/syscall.cc | 24 +- + .../sandbox/linux/seccomp-bpf/syscall.h | 2 +- + .../sandbox/linux/services/credentials.cc | 2 +- + .../linux/system_headers/linux_seccomp.h | 7 + + .../linux/system_headers/linux_signal.h | 24 + + .../linux/system_headers/linux_syscalls.h | 4 + + .../linux/system_headers/linux_ucontext.h | 2 + + .../system_headers/sw_64_linux_syscalls.h | 32 + + .../system_headers/sw_64_linux_ucontext.h | 36 + + .../sandbox/linux/bpf_cdm_policy_linux.cc | 2 +- + .../sandbox/linux/bpf_gpu_policy_linux.cc | 4 +- + .../sandbox/linux/bpf_utility_policy_linux.cc | 2 +- + .../linux/sandbox_seccomp_bpf_linux.cc | 6 +- + src/3rdparty/chromium/skia/BUILD.gn | 6 + + .../absl/debugging/internal/examine_stack.cc | 2 + + .../chromium/third_party/angle/gni/angle.gni | 2 +- + .../blink/renderer/platform/heap/asm/BUILD.gn | 6 +- + .../platform/heap/asm/SaveRegisters_sw64.S | 43 + + .../boringssl/src/include/openssl/base.h | 3 + + .../dump_writer_common/raw_context_cpu.h | 2 + + .../linux/dump_writer_common/thread_info.cc | 27 + + .../linux/dump_writer_common/thread_info.h | 2 +- + .../dump_writer_common/ucontext_reader.cc | 12 + + .../client/linux/handler/exception_handler.cc | 12 +- + .../client/linux/handler/exception_handler.h | 2 +- + .../microdump_writer/microdump_writer.cc | 8 +- + .../minidump_writer/linux_core_dumper.cc | 7 + + .../linux/minidump_writer/linux_dumper.h | 2 +- + .../minidump_writer/linux_ptrace_dumper.cc | 3 + + .../linux/minidump_writer/minidump_writer.cc | 10 +- + .../linux/minidump_writer/minidump_writer.h | 2 +- + .../src/common/linux/memory_mapped_file.cc | 2 +- + .../breakpad/src/common/memory_allocator.h | 6 + + .../common/minidump_cpu_sw64.h | 165 + + .../google_breakpad/common/minidump_format.h | 1 + + .../minidump/minidump_misc_info_writer.cc | 2 + + .../crashpad/snapshot/capture_memory.cc | 4 + + .../crashpad/snapshot/cpu_architecture.h | 4 +- + .../crashpad/crashpad/snapshot/cpu_context.h | 24 + + .../snapshot/linux/cpu_context_linux.h | 38 + + .../linux/exception_snapshot_linux.cc | 52 + + .../snapshot/linux/exception_snapshot_linux.h | 2 + + .../snapshot/linux/process_reader_linux.cc | 2 + + .../crashpad/snapshot/linux/signal_context.h | 103 + + .../snapshot/linux/system_snapshot_linux.cc | 11 + + .../snapshot/linux/thread_snapshot_linux.cc | 11 + + .../snapshot/linux/thread_snapshot_linux.h | 2 + + .../crashpad/crashpad/util/linux/ptracer.cc | 33 + + .../crashpad/util/linux/thread_info.h | 17 +- + .../third_party/ffmpeg/ffmpeg_options.gni | 3 + + .../icu/source/i18n/double-conversion-utils.h | 2 +- + .../chromium/third_party/libvpx/BUILD.gn | 3 + + .../source/config/linux/sw_64/vp8_rtcd.h | 357 + + .../source/config/linux/sw_64/vp9_rtcd.h | 275 + + .../source/config/linux/sw_64/vpx_config.asm | 98 + + .../source/config/linux/sw_64/vpx_config.c | 10 + + .../source/config/linux/sw_64/vpx_config.h | 107 + + .../source/config/linux/sw_64/vpx_dsp_rtcd.h | 3868 +++++++ + .../config/linux/sw_64/vpx_scale_rtcd.h | 96 + + .../third_party/lss/linux_syscall_support.h | 204 +- + .../modules/desktop_capture/differ_block.cc | 2 +- + .../third_party/webrtc/rtc_base/system/arch.h | 4 + + src/3rdparty/chromium/v8/BUILD.gn | 37 + + .../chromium/v8/src/base/build_config.h | 14 +- + src/3rdparty/chromium/v8/src/base/cpu.cc | 4 +- + .../v8/src/base/platform/platform-posix.cc | 5 + + .../builtins-sharedarraybuffer-gen.cc | 4 +- + .../chromium/v8/src/builtins/builtins.cc | 2 +- + .../v8/src/builtins/sw64/builtins-sw64.cc | 3222 ++++++ + .../chromium/v8/src/codegen/assembler-arch.h | 2 + + .../chromium/v8/src/codegen/assembler-inl.h | 2 + + .../v8/src/codegen/assembler-sw64-inl.h | 329 + + .../chromium/v8/src/codegen/constants-arch.h | 2 + + .../chromium/v8/src/codegen/cpu-features.h | 4 + + .../v8/src/codegen/external-reference.cc | 98 +- + .../v8/src/codegen/external-reference.h | 8 + + .../v8/src/codegen/interface-descriptors.cc | 2 +- + .../chromium/v8/src/codegen/macro-assembler.h | 3 + + .../chromium/v8/src/codegen/register-arch.h | 2 + + .../v8/src/codegen/register-configuration.cc | 2 + + .../chromium/v8/src/codegen/reloc-info.cc | 3 +- + .../chromium/v8/src/codegen/sw64/OWNERS | 3 + + .../v8/src/codegen/sw64/assembler-sw64-inl.h | 329 + + .../v8/src/codegen/sw64/assembler-sw64.cc | 4379 ++++++++ + .../v8/src/codegen/sw64/assembler-sw64.h | 1636 +++ + .../v8/src/codegen/sw64/constants-sw64.cc | 160 + + .../v8/src/codegen/sw64/constants-sw64.h | 2756 +++++ + .../chromium/v8/src/codegen/sw64/cpu-sw64.cc | 40 + + .../sw64/interface-descriptors-sw64.cc | 351 + + .../src/codegen/sw64/macro-assembler-sw64.cc | 5089 ++++++++++ + .../src/codegen/sw64/macro-assembler-sw64.h | 1321 +++ + .../v8/src/codegen/sw64/register-sw64.h | 406 + + src/3rdparty/chromium/v8/src/common/globals.h | 3 + + .../src/compiler/backend/instruction-codes.h | 2 + + .../compiler/backend/instruction-selector.cc | 4 +- + .../v8/src/compiler/backend/sw64/OWNERS | 3 + + .../backend/sw64/code-generator-sw64.cc | 4209 ++++++++ + .../backend/sw64/instruction-codes-sw64.h | 427 + + .../sw64/instruction-scheduler-sw64.cc | 1558 +++ + .../backend/sw64/instruction-selector-sw64.cc | 3291 ++++++ + .../sw64/unwinding-info-writer-sw64.cc | 108 + + .../backend/sw64/unwinding-info-writer-sw64.h | 73 + + .../compiler/backend/unwinding-info-writer.h | 2 + + .../chromium/v8/src/compiler/c-linkage.cc | 10 + + .../chromium/v8/src/debug/debug-evaluate.cc | 2 + + .../chromium/v8/src/debug/sw64/debug-sw64.cc | 57 + + .../src/deoptimizer/sw64/deoptimizer-sw64.cc | 250 + + .../chromium/v8/src/diagnostics/gdb-jit.cc | 2 + + .../chromium/v8/src/diagnostics/perf-jit.h | 3 + + .../v8/src/diagnostics/sw64/disasm-sw64.cc | 3439 +++++++ + .../v8/src/execution/frame-constants.h | 2 + + .../chromium/v8/src/execution/simulator.h | 2 + + .../execution/sw64/frame-constants-sw64.cc | 32 + + .../src/execution/sw64/frame-constants-sw64.h | 78 + + .../v8/src/execution/sw64/simulator-sw64.cc | 8980 +++++++++++++++++ + .../v8/src/execution/sw64/simulator-sw64.h | 620 ++ + .../chromium/v8/src/flags/flag-definitions.h | 2 +- + .../src/interpreter/interpreter-assembler.cc | 2 +- + .../chromium/v8/src/libsampler/sampler.cc | 4 + + src/3rdparty/chromium/v8/src/logging/log.cc | 2 + + src/3rdparty/chromium/v8/src/objects/code.h | 2 + + .../chromium/v8/src/profiler/tick-sample.cc | 6 + + .../src/regexp/regexp-macro-assembler-arch.h | 2 + + .../v8/src/regexp/regexp-macro-assembler.h | 1 + + src/3rdparty/chromium/v8/src/regexp/regexp.cc | 3 + + .../chromium/v8/src/regexp/sw64/OWNERS | 3 + + .../sw64/regexp-macro-assembler-sw64.cc | 1370 +++ + .../regexp/sw64/regexp-macro-assembler-sw64.h | 224 + + .../v8/src/runtime/runtime-atomics.cc | 2 +- + .../chromium/v8/src/runtime/runtime-utils.h | 13 + + .../chromium/v8/src/snapshot/deserializer.h | 2 +- + .../wasm/baseline/liftoff-assembler-defs.h | 29 + + .../v8/src/wasm/baseline/liftoff-assembler.h | 2 + + .../baseline/sw64/liftoff-assembler-sw64.h | 2951 ++++++ + .../v8/src/wasm/jump-table-assembler.cc | 35 + + .../v8/src/wasm/jump-table-assembler.h | 5 + + .../chromium/v8/src/wasm/wasm-debug.cc | 4 + + .../chromium/v8/src/wasm/wasm-linkage.h | 10 + + src/3rdparty/gn/tools/gn/args.cc | 3 + + src/3rdparty/gn/util/build_config.h | 5 + + 155 files changed, 55592 insertions(+), 64 deletions(-) + create mode 100644 "\\" + create mode 100644 src/3rdparty/chromium/build/config/sw.gni + create mode 100644 src/3rdparty/chromium/sandbox/linux/system_headers/sw_64_linux_syscalls.h + create mode 100644 src/3rdparty/chromium/sandbox/linux/system_headers/sw_64_linux_ucontext.h + create mode 100644 src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/SaveRegisters_sw64.S + create mode 100644 src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_cpu_sw64.h + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vp8_rtcd.h + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vp9_rtcd.h + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.asm + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.c + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.h + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_dsp_rtcd.h + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_scale_rtcd.h + create mode 100755 src/3rdparty/chromium/v8/src/builtins/sw64/builtins-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/codegen/assembler-sw64-inl.h + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/OWNERS + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64-inl.h + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64.h + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/constants-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/constants-sw64.h + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/cpu-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/interface-descriptors-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.h + create mode 100755 src/3rdparty/chromium/v8/src/codegen/sw64/register-sw64.h + create mode 100755 src/3rdparty/chromium/v8/src/compiler/backend/sw64/OWNERS + create mode 100755 src/3rdparty/chromium/v8/src/compiler/backend/sw64/code-generator-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-codes-sw64.h + create mode 100755 src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-scheduler-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-selector-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.h + create mode 100755 src/3rdparty/chromium/v8/src/debug/sw64/debug-sw64.cc + create mode 100644 src/3rdparty/chromium/v8/src/deoptimizer/sw64/deoptimizer-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/diagnostics/sw64/disasm-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/execution/sw64/frame-constants-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/execution/sw64/frame-constants-sw64.h + create mode 100755 src/3rdparty/chromium/v8/src/execution/sw64/simulator-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/execution/sw64/simulator-sw64.h + create mode 100755 src/3rdparty/chromium/v8/src/regexp/sw64/OWNERS + create mode 100755 src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.cc + create mode 100755 src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.h + create mode 100755 src/3rdparty/chromium/v8/src/wasm/baseline/sw64/liftoff-assembler-sw64.h + +diff --git a/configure.pri b/configure.pri +index d3ba9b147..a6b4b3b41 100644 +--- a/configure.pri ++++ b/configure.pri +@@ -143,6 +143,7 @@ defineTest(qtConfTest_detectArch) { + contains(QT_ARCH, "i386")|contains(QT_ARCH, "x86_64"): return(true) + contains(QT_ARCH, "arm")|contains(QT_ARCH, "arm64"): return(true) + contains(QT_ARCH, "mips"): return(true) ++ contains(QT_ARCH, "sw_64"): return(true) + qtLog("Architecture not supported.") + return(false) + } +diff --git a/mkspecs/features/functions.prf b/mkspecs/features/functions.prf +index d3ceb4c5e..edb798175 100644 +--- a/mkspecs/features/functions.prf ++++ b/mkspecs/features/functions.prf +@@ -106,6 +106,7 @@ defineReplace(gnArch) { + contains(qtArch, "arm64"): return(arm64) + contains(qtArch, "mips"): return(mipsel) + contains(qtArch, "mips64"): return(mips64el) ++ contains(qtArch, "sw_64"): return(sw_64) + return(unknown) + } + +diff --git a/src/3rdparty/chromium/base/allocator/partition_allocator/page_allocator_constants.h b/src/3rdparty/chromium/base/allocator/partition_allocator/page_allocator_constants.h +index 54158270e..a4efaa394 100644 +--- a/src/3rdparty/chromium/base/allocator/partition_allocator/page_allocator_constants.h ++++ b/src/3rdparty/chromium/base/allocator/partition_allocator/page_allocator_constants.h +@@ -14,6 +14,8 @@ namespace base { + static constexpr size_t kPageAllocationGranularityShift = 16; // 64KB + #elif defined(_MIPS_ARCH_LOONGSON) + static constexpr size_t kPageAllocationGranularityShift = 14; // 16KB ++#elif defined(__sw_64__) ++static constexpr size_t kPageAllocationGranularityShift = 13; //8kb + #else + static constexpr size_t kPageAllocationGranularityShift = 12; // 4KB + #endif +diff --git a/src/3rdparty/chromium/base/allocator/partition_allocator/partition_alloc_constants.h b/src/3rdparty/chromium/base/allocator/partition_allocator/partition_alloc_constants.h +index b6efa1f2b..03a651ee7 100644 +--- a/src/3rdparty/chromium/base/allocator/partition_allocator/partition_alloc_constants.h ++++ b/src/3rdparty/chromium/base/allocator/partition_allocator/partition_alloc_constants.h +@@ -41,6 +41,8 @@ static const size_t kPartitionPageShift = 18; // 256KB + static const size_t kPartitionPageShift = 16; // 64 KiB + #elif defined(ARCH_CPU_PPC64) + static const size_t kPartitionPageShift = 18; // 256 KiB ++#elif defined(__sw_64__) ++static const size_t kPartitionPageShift = 15; // 16 KiB + #else + static const size_t kPartitionPageShift = 14; // 16 KiB + #endif +diff --git a/src/3rdparty/chromium/base/process/launch_posix.cc b/src/3rdparty/chromium/base/process/launch_posix.cc +index 9b7573fdc..e8b1d5571 100644 +--- a/src/3rdparty/chromium/base/process/launch_posix.cc ++++ b/src/3rdparty/chromium/base/process/launch_posix.cc +@@ -703,7 +703,7 @@ NOINLINE pid_t CloneAndLongjmpInChild(unsigned long flags, + alignas(16) char stack_buf[PTHREAD_STACK_MIN]; + #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \ + defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_S390_FAMILY) || \ +- defined(ARCH_CPU_PPC64_FAMILY) ++ defined(ARCH_CPU_PPC64_FAMILY) || defined(ARCH_CPU_SW64) + // The stack grows downward. + void* stack = stack_buf + sizeof(stack_buf); + #else +diff --git a/src/3rdparty/chromium/base/third_party/double_conversion/double-conversion/utils.h b/src/3rdparty/chromium/base/third_party/double_conversion/double-conversion/utils.h +index 471c3da84..58c31f1b8 100644 +--- a/src/3rdparty/chromium/base/third_party/double_conversion/double-conversion/utils.h ++++ b/src/3rdparty/chromium/base/third_party/double_conversion/double-conversion/utils.h +@@ -99,7 +99,7 @@ int main(int argc, char** argv) { + #if defined(_M_X64) || defined(__x86_64__) || \ + defined(__ARMEL__) || defined(__avr32__) || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(__hppa__) || defined(__ia64__) || \ +- defined(__mips__) || \ ++ defined(__mips__) || defined(__sw_64__) || \ + defined(__nios2__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \ + defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ +diff --git a/src/3rdparty/chromium/build/build_config.h b/src/3rdparty/chromium/build/build_config.h +index d3cdd2db4..09687bb0f 100644 +--- a/src/3rdparty/chromium/build/build_config.h ++++ b/src/3rdparty/chromium/build/build_config.h +@@ -166,6 +166,11 @@ + #define ARCH_CPU_32_BITS 1 + #define ARCH_CPU_BIG_ENDIAN 1 + #endif ++#elif defined(__sw_64__) ++#define ARCH_CPU_SW64_FAMILY 1 ++#define ARCH_CPU_SW64 1 ++#define ARCH_CPU_64_BITS 1 ++#define ARCH_CPU_LITTLE_ENDIAN 1 + #else + #error Please add support for your architecture in build/build_config.h + #endif +diff --git a/src/3rdparty/chromium/build/config/sw.gni b/src/3rdparty/chromium/build/config/sw.gni +new file mode 100644 +index 000000000..2dcf19e52 +--- /dev/null ++++ b/src/3rdparty/chromium/build/config/sw.gni +@@ -0,0 +1,38 @@ ++# Copyright 2015 The Chromium Authors. All rights reserved. ++# Use of this source code is governed by a BSD-style license that ++# found in the LICENSE file. ++ ++import("//build/config/v8_target_cpu.gni") ++ ++# NOT SURE, TODO at BUILD.gn ++if (current_cpu == "sw_64") { ++ declare_args() { ++ # MIPS arch variant. Possible values are: ++ # "r1" ++ # "r2" ++ # "r6" ++ # "loongson3" ++ sw_arch_variant = "r1" ++ ++ # MIPS DSP ASE revision. Possible values are: ++ # 0: unavailable ++ # 1: revision 1 ++ # 2: revision 2 ++ sw_dsp_rev = 0 ++ ++ # MIPS SIMD Arch compilation flag. ++ sw_use_msa = false ++ ++ # MIPS floating-point ABI. Possible values are: ++ # "hard": sets the GCC -mhard-float option. ++ # "soft": sets the GCC -msoft-float option. ++ sw_float_abi = "hard" ++ ++ # MIPS32 floating-point register width. Possible values are: ++ # "fp32": sets the GCC -mfp32 option. ++ # "fp64": sets the GCC -mfp64 option. ++ # "fpxx": sets the GCC -mfpxx option. ++ sw_fpu_mode = "fp32" ++ } ++} ++ +diff --git a/src/3rdparty/chromium/media/media_options.gni b/src/3rdparty/chromium/media/media_options.gni +index 011bd47ca..756749b43 100644 +--- a/src/3rdparty/chromium/media/media_options.gni ++++ b/src/3rdparty/chromium/media/media_options.gni +@@ -93,7 +93,7 @@ declare_args() { + # are combined and we could override more logging than expected. + enable_logging_override = !use_jumbo_build && is_chromecast + +- enable_dav1d_decoder = !is_android && !is_ios ++ enable_dav1d_decoder = !is_android && !is_ios && target_cpu != "sw_64" + + # Enable browser managed persistent metadata storage for EME persistent + # session and persistent usage record session. +diff --git a/src/3rdparty/chromium/sandbox/features.gni b/src/3rdparty/chromium/sandbox/features.gni +index 09280d35f..90cbefa36 100644 +--- a/src/3rdparty/chromium/sandbox/features.gni ++++ b/src/3rdparty/chromium/sandbox/features.gni +@@ -11,6 +11,6 @@ import("//build/config/nacl/config.gni") + use_seccomp_bpf = (is_linux || is_android) && + (current_cpu == "x86" || current_cpu == "x64" || + current_cpu == "arm" || current_cpu == "arm64" || +- current_cpu == "mipsel" || current_cpu == "mips64el") ++ current_cpu == "mipsel" || current_cpu == "mips64el" || current_cpu == "sw_64" ) + + use_seccomp_bpf = use_seccomp_bpf || is_nacl_nonsfi +diff --git a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +index 313511f22..9978fe567 100644 +--- a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h ++++ b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +@@ -56,6 +56,11 @@ + #define MAX_PUBLIC_SYSCALL __NR_syscalls + #define MAX_SYSCALL MAX_PUBLIC_SYSCALL + ++#elif defined(__sw_64__) ++#define MIN_SYSCALL 0u ++#define MAX_PUBLIC_SYSCALL 514u ++#define MAX_SYSCALL MAX_PUBLIC_SYSCALL ++ + #else + #error "Unsupported architecture" + #endif +diff --git a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/seccomp_macros.h b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/seccomp_macros.h +index 1a407b952..5dda20088 100644 +--- a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/seccomp_macros.h ++++ b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/seccomp_macros.h +@@ -245,6 +245,66 @@ struct regs_struct { + #define SECCOMP_PT_PARM3(_regs) (_regs).REG_a2 + #define SECCOMP_PT_PARM4(_regs) (_regs).REG_a3 + ++#elif defined(__sw_64__) ++#define SECCOMP_ARCH AUDIT_ARCH_SW64 ++#define SYSCALL_EIGHT_ARGS ++// NOT SURE ++// See in the Linux kernel. ++#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.sc_regs[_reg]) ++// Based on MIPS n64 ABI syscall convention. ++// On MIPS, when an indirect syscall is being made (syscall(__NR_foo)), ++// the real identifier (__NR_foo) is not in v0, but in a0. ++#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, 2) ++#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, 2) ++#define SECCOMP_IP(_ctx) (_ctx)->uc_mcontext.sc_pc ++#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, 4) ++#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, 5) ++#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, 6) ++#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, 7) ++#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, 8) ++#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, 9) ++#define SECCOMP_PARM7(_ctx) SECCOMP_REG(_ctx, 10) ++#define SECCOMP_PARM8(_ctx) SECCOMP_REG(_ctx, 11) ++#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr)) ++#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch)) ++#define SECCOMP_IP_MSB_IDX (offsetof(struct arch_seccomp_data, \ ++ instruction_pointer) + 4) ++#define SECCOMP_IP_LSB_IDX (offsetof(struct arch_seccomp_data, \ ++ instruction_pointer) + 0) ++#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \ ++ 8*(nr) + 4) ++#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \ ++ 8*(nr) + 0) ++ ++// On MIPS we don't have structures like user_regs or user_regs_struct in ++// sys/user.h that we could use, so we just define regs_struct directly. ++struct regs_struct { ++ unsigned long long regs[32]; ++}; ++ ++#define REG_a7 regs[11] ++#define REG_a6 regs[10] ++#define REG_a5 regs[9] ++#define REG_a4 regs[8] ++#define REG_a3 regs[7] ++#define REG_a2 regs[6] ++#define REG_a1 regs[5] ++#define REG_a0 regs[4] ++#define REG_v1 regs[3] ++#define REG_v0 regs[2] ++ ++#define SECCOMP_PT_RESULT(_regs) (_regs).REG_v0 ++#define SECCOMP_PT_SYSCALL(_regs) (_regs).REG_v0 ++#define SECCOMP_PT_PARM1(_regs) (_regs).REG_a0 ++#define SECCOMP_PT_PARM2(_regs) (_regs).REG_a1 ++#define SECCOMP_PT_PARM3(_regs) (_regs).REG_a2 ++#define SECCOMP_PT_PARM4(_regs) (_regs).REG_a3 ++#define SECCOMP_PT_PARM5(_regs) (_regs).REG_a4 ++#define SECCOMP_PT_PARM6(_regs) (_regs).REG_a5 ++#define SECCOMP_PT_PARM7(_regs) (_regs).REG_a6 ++#define SECCOMP_PT_PARM8(_regs) (_regs).REG_a7 ++ ++ + #elif defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS) + #define SECCOMP_ARCH AUDIT_ARCH_MIPSEL64 + #define SYSCALL_EIGHT_ARGS +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc +index 5e0131ac4..38d36075d 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc +@@ -36,7 +36,7 @@ + #include + #include + #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && !defined(__arm__) && \ +- !defined(__aarch64__) && !defined(PTRACE_GET_THREAD_AREA) ++ !defined(__aarch64__) && !defined(PTRACE_GET_THREAD_AREA) && !defined(__sw_64__) + // Also include asm/ptrace-abi.h since ptrace.h in older libc (for instance + // the one in Ubuntu 16.04 LTS) is missing PTRACE_GET_THREAD_AREA. + // asm/ptrace-abi.h doesn't exist on arm32 and PTRACE_GET_THREAD_AREA isn't +@@ -418,7 +418,7 @@ ResultExpr RestrictPrlimitToGetrlimit(pid_t target_pid) { + ResultExpr RestrictPtrace() { + const Arg request(0); + return Switch(request).CASES(( +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__sw_64__) + PTRACE_GETREGS, + PTRACE_GETFPREGS, + #if defined(TRACE_GET_THREAD_AREA) +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +index d9d18822f..cae75fe12 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +@@ -87,7 +87,9 @@ bool SyscallSets::IsFileSystem(int sysno) { + #if defined(__i386__) || defined(__arm__) + case __NR_chown32: + #endif ++#if !defined(__sw_64__) + case __NR_creat: ++#endif + case __NR_futimesat: // Should be called utimesat ? + case __NR_lchown: + case __NR_link: +@@ -236,16 +238,18 @@ bool SyscallSets::IsDeniedFileSystemAccessViaFd(int sysno) { + bool SyscallSets::IsGetSimpleId(int sysno) { + switch (sysno) { + case __NR_capget: ++#if !defined(__sw_64__) + case __NR_getegid: + case __NR_geteuid: + case __NR_getgid: ++ case __NR_getuid: ++ case __NR_getppid: ++#endif + case __NR_getgroups: + case __NR_getpid: +- case __NR_getppid: + case __NR_getresgid: + case __NR_getsid: + case __NR_gettid: +- case __NR_getuid: + case __NR_getresuid: + #if defined(__i386__) || defined(__arm__) + case __NR_getegid32: +@@ -630,7 +634,7 @@ bool SyscallSets::IsSeccomp(int sysno) { + bool SyscallSets::IsAllowedBasicScheduler(int sysno) { + switch (sysno) { + case __NR_sched_yield: +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__sw_64__) + case __NR_pause: + #endif + case __NR_nanosleep: +@@ -701,7 +705,9 @@ bool SyscallSets::IsFsControl(int sysno) { + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) + case __NR_umount: + #endif ++#if !defined(__sw_64__) + case __NR_umount2: ++#endif + return true; + default: + return false; +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.cc +index 34edabd2b..aadbc88de 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.cc +@@ -16,7 +16,7 @@ namespace sandbox { + namespace { + + #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \ +- defined(ARCH_CPU_MIPS_FAMILY) ++ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_SW64_FAMILY) + // Number that's not currently used by any Linux kernel ABIs. + const int kInvalidSyscallNumber = 0x351d3; + #else +@@ -310,6 +310,10 @@ asm(// We need to be able to tell the kernel exactly where we made a + "2:ret\n" + ".cfi_endproc\n" + ".size SyscallAsm, .-SyscallAsm\n" ++#elif defined(__sw_64__) ++ ".text\n" ++ ".option pic2\n" ++ "ret\n" + #endif + ); // asm + +@@ -317,7 +321,7 @@ asm(// We need to be able to tell the kernel exactly where we made a + extern "C" { + intptr_t SyscallAsm(intptr_t nr, const intptr_t args[6]); + } +-#elif defined(__mips__) ++#elif defined(__mips__) || defined(__sw_64__) + extern "C" { + intptr_t SyscallAsm(intptr_t nr, const intptr_t args[8]); + } +@@ -351,7 +355,7 @@ intptr_t Syscall::Call(int nr, + + // TODO(nedeljko): Enable use of more than six parameters on architectures + // where that makes sense. +-#if defined(__mips__) ++#if defined(__mips__) || defined(__sw_64__) + const intptr_t args[8] = {p0, p1, p2, p3, p4, p5, p6, p7}; + #else + DCHECK_EQ(p6, 0) << " Support for syscalls with more than six arguments not " +@@ -405,7 +409,7 @@ intptr_t Syscall::Call(int nr, + ); + ret = inout; + } +-#elif defined(__mips__) ++#elif defined(__mips__) || defined(__sw_64__) + intptr_t err_status; + intptr_t ret = Syscall::SandboxSyscallRaw(nr, args, &err_status); + +@@ -434,7 +438,7 @@ intptr_t Syscall::Call(int nr, + } + + void Syscall::PutValueInUcontext(intptr_t ret_val, ucontext_t* ctx) { +-#if defined(__mips__) ++#if defined(__mips__) || defined(__sw_64__) + // Mips ABI states that on error a3 CPU register has non zero value and if + // there is no error, it should be zero. + if (ret_val <= -1 && ret_val >= -4095) { +@@ -476,6 +480,16 @@ intptr_t Syscall::SandboxSyscallRaw(int nr, + + return ret; + } ++ ++#elif defined(__sw_64__) ++// NOT SURE TODO ++intptr_t Syscall::SandboxSyscallRaw(int nr, ++ const intptr_t* args, ++ intptr_t* err_ret) { ++ register intptr_t ret = nr; ++ return ret; ++} ++ + #endif // defined(__mips__) + + } // namespace sandbox +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.h b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.h +index 3b02a6723..99d4eef4a 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.h ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.h +@@ -143,7 +143,7 @@ class SANDBOX_EXPORT Syscall { + intptr_t p6, + intptr_t p7); + +-#if defined(__mips__) ++#if defined(__mips__) || defined(__sw_64__) + // This function basically does on MIPS what SandboxSyscall() is doing on + // other architectures. However, because of specificity of MIPS regarding + // handling syscall errors, SandboxSyscall() is made as a wrapper for this +diff --git a/src/3rdparty/chromium/sandbox/linux/services/credentials.cc b/src/3rdparty/chromium/sandbox/linux/services/credentials.cc +index d7b5d8c44..f74b0300b 100644 +--- a/src/3rdparty/chromium/sandbox/linux/services/credentials.cc ++++ b/src/3rdparty/chromium/sandbox/linux/services/credentials.cc +@@ -81,7 +81,7 @@ bool ChrootToSafeEmptyDir() { + pid_t pid = -1; + alignas(16) char stack_buf[PTHREAD_STACK_MIN]; + #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \ +- defined(ARCH_CPU_MIPS_FAMILY) ++ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_SW64_FAMILY) + // The stack grows downward. + void* stack = stack_buf + sizeof(stack_buf); + #else +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_seccomp.h b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_seccomp.h +index a60fe2ad3..a48e4fa1e 100644 +--- a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_seccomp.h ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_seccomp.h +@@ -29,6 +29,9 @@ + #ifndef EM_AARCH64 + #define EM_AARCH64 183 + #endif ++#ifndef EM_SW_64 ++#define EM_SW_64 0x9906 ++#endif + + #ifndef __AUDIT_ARCH_64BIT + #define __AUDIT_ARCH_64BIT 0x80000000 +@@ -42,6 +45,10 @@ + #ifndef AUDIT_ARCH_I386 + #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) + #endif ++#ifndef AUDIT_ARCH_SW64 ++#define AUDIT_ARCH_SW64 (EM_SW_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) ++#endif ++ + #ifndef AUDIT_ARCH_X86_64 + #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) + #endif +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_signal.h b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_signal.h +index f5a736761..297439cc9 100644 +--- a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_signal.h ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_signal.h +@@ -60,6 +60,30 @@ + #define LINUX_SA_RESTART 0x10000000 + + #define LINUX_SIG_DFL 0 ++#elif defined(__sw_64__) ++ ++#define LINUX_SIGHUP 1 ++#define LINUX_SIGINT 2 ++#define LINUX_SIGQUIT 3 ++#define LINUX_SIGABRT 6 ++#define LINUX_SIGBUS 10 ++#define LINUX_SIGSEGV 11 ++#define LINUX_SIGSYS 12 ++#define LINUX_SIGPIPE 13 ++#define LINUX_SIGTERM 15 ++#define LINUX_SIGUSR1 30 ++#define LINUX_SIGUSR2 31 ++#define LINUX_SIGCHLD 20 ++ ++#define LINUX_SIG_BLOCK 1 ++#define LINUX_SIG_UNBLOCK 2 ++ ++#define LINUX_SA_SIGINFO 0x00000040 ++#define LINUX_SA_NODEFER 0x00000008 ++#define LINUX_SA_RESTART 0x00000002 ++ ++#define LINUX_SIG_DFL 0 ++ + + #else + #error "Unsupported platform" +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_syscalls.h +index 2b78a0cc3..b9db79a98 100644 +--- a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_syscalls.h ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_syscalls.h +@@ -34,6 +34,10 @@ + #if defined(__aarch64__) + #include "sandbox/linux/system_headers/arm64_linux_syscalls.h" + #endif ++#if defined(__sw_64__) ++#include "sandbox/linux/system_headers/sw_64_linux_syscalls.h" ++#endif ++ + + #endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SYSCALLS_H_ + +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_ucontext.h b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_ucontext.h +index 22ce78027..468f811a3 100644 +--- a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_ucontext.h ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_ucontext.h +@@ -11,6 +11,8 @@ + #include "sandbox/linux/system_headers/arm_linux_ucontext.h" + #elif defined(__i386__) + #include "sandbox/linux/system_headers/i386_linux_ucontext.h" ++#elif defined(__sw_64__) ++#include "sandbox/linux/system_headers/sw_64_linux_ucontext.h" + #else + #error "No support for your architecture in PNaCl header" + #endif +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/sw_64_linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/sw_64_linux_syscalls.h +new file mode 100644 +index 000000000..abb1b1158 +--- /dev/null ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/sw_64_linux_syscalls.h +@@ -0,0 +1,32 @@ ++// Copyright 2014 The Chromium Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Generated from the Linux kernel's calls.S. ++#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_SW_64_LINUX_SYSCALLS_H_ ++#define SANDBOX_LINUX_SYSTEM_HEADERS_SW_64_LINUX_SYSCALLS_H_ ++ ++#if !defined(__sw_64__) ++#error "Including header on wrong architecture" ++#endif ++ ++// __NR_osf_syscall, is defined in . ++#include ++ ++//#define __NR_creat Unknown ++//#define __NR_getegid Ignore ++//#define __NR_geteuid Ignore ++//#define __NR_getgid Ignore ++ ++#ifndef __NR_getpid ++#define __NR_getpid 0x9e ++#endif ++ ++//#define __NR_getppid Ignore ++//#define __NR_getuid Ignore ++//#define __NR_pause Ignore ++//#define __NR_umount2 Ignore ++ ++#endif ++ ++ +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/sw_64_linux_ucontext.h b/src/3rdparty/chromium/sandbox/linux/system_headers/sw_64_linux_ucontext.h +new file mode 100644 +index 000000000..098169781 +--- /dev/null ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/sw_64_linux_ucontext.h +@@ -0,0 +1,36 @@ ++// Copyright 2014 The Chromium Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_SW64_LINUX_UCONTEXT_H_ ++#define SANDBOX_LINUX_SYSTEM_HEADERS_SW64_LINUX_UCONTEXT_H_ ++ ++#include ++ ++// This is mostly copied from breakpad (common/android/include/sys/ucontext.h), ++// except we do use sigset_t for uc_sigmask instead of a custom type. ++#if !defined(__BIONIC_HAVE_UCONTEXT_T) ++// Ensure that 'stack_t' is defined. ++#include ++ ++// We also need greg_t for the sandbox, include it in this header as well. ++typedef unsigned long greg_t; ++ ++typedef struct sigcontext mcontext_t; ++ ++typedef struct ucontext { ++ uint32_t uc_flags; ++ struct ucontext* uc_link; ++ stack_t uc_stack; ++ mcontext_t uc_mcontext; ++ sigset_t uc_sigmask; ++ // Other fields are not used by Google Breakpad. Don't define them. ++} ucontext_t; ++ ++#else ++#include ++#endif // __BIONIC_HAVE_UCONTEXT_T ++ ++#endif // SANDBOX_LINUX_SYSTEM_HEADERS_SW64_LINUX_UCONTEXT_H_ ++ ++ +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cdm_policy_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cdm_policy_linux.cc +index 9d39e5d5d..3ea8e7d31 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cdm_policy_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cdm_policy_linux.cc +@@ -33,7 +33,7 @@ ResultExpr CdmProcessPolicy::EvaluateSyscall(int sysno) const { + case __NR_ftruncate: + case __NR_fallocate: + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__sw_64__) + case __NR_getrlimit: + #endif + #if defined(__i386__) || defined(__arm__) +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_gpu_policy_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_gpu_policy_linux.cc +index 66214334d..677ac79cb 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_gpu_policy_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_gpu_policy_linux.cc +@@ -54,7 +54,7 @@ ResultExpr GpuProcessPolicy::EvaluateSyscall(int sysno) const { + case __NR_getdents64: + case __NR_ioctl: + return Allow(); +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || defined(__sw_64__) + // The Nvidia driver uses flags not in the baseline policy + // (MAP_LOCKED | MAP_EXECUTABLE | MAP_32BIT) + case __NR_mmap: +@@ -75,7 +75,7 @@ ResultExpr GpuProcessPolicy::EvaluateSyscall(int sysno) const { + if (SyscallSets::IsEventFd(sysno)) + return Allow(); + +-#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(USE_X11) ++#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(USE_X11) && !defined(__sw_64__) + if (SyscallSets::IsSystemVSharedMemory(sysno)) + return Allow(); + #endif +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_utility_policy_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_utility_policy_linux.cc +index 192081eea..3636bbcd8 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_utility_policy_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_utility_policy_linux.cc +@@ -34,7 +34,7 @@ ResultExpr UtilityProcessPolicy::EvaluateSyscall(int sysno) const { + case __NR_fdatasync: + case __NR_fsync: + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__sw_64__) + case __NR_getrlimit: + #endif + #if defined(__i386__) || defined(__arm__) +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/sandbox_seccomp_bpf_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/sandbox_seccomp_bpf_linux.cc +index f5d096b10..31f8262d9 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/sandbox_seccomp_bpf_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/sandbox_seccomp_bpf_linux.cc +@@ -65,7 +65,7 @@ using sandbox::bpf_dsl::ResultExpr; + + // Make sure that seccomp-bpf does not get disabled by mistake. Also make sure + // that we think twice about this when adding a new architecture. +-#if !defined(ARCH_CPU_ARM64) && !defined(ARCH_CPU_MIPS64EL) ++#if !defined(ARCH_CPU_ARM64) && !defined(ARCH_CPU_MIPS64EL) && !defined(ARCH_CPU_SW64) + #error "Seccomp-bpf disabled on supported architecture!" + #endif // !defined(ARCH_CPU_ARM64) && !defined(ARCH_CPU_MIPS64EL) + +@@ -127,6 +127,9 @@ std::unique_ptr GetGpuProcessSandbox( + + // Is seccomp BPF globally enabled? + bool SandboxSeccompBPF::IsSeccompBPFDesired() { ++#if defined(__sw_64__) ++ return false; ++#else + #if BUILDFLAG(USE_SECCOMP_BPF) + const base::CommandLine& command_line = + *base::CommandLine::ForCurrentProcess(); +@@ -135,6 +138,7 @@ bool SandboxSeccompBPF::IsSeccompBPFDesired() { + #else + return false; + #endif // USE_SECCOMP_BPF ++#endif + } + + bool SandboxSeccompBPF::SupportsSandbox() { +diff --git a/src/3rdparty/chromium/skia/BUILD.gn b/src/3rdparty/chromium/skia/BUILD.gn +index f5992c505..70a971d3e 100644 +--- a/src/3rdparty/chromium/skia/BUILD.gn ++++ b/src/3rdparty/chromium/skia/BUILD.gn +@@ -21,6 +21,10 @@ if (current_cpu == "arm") { + if (current_cpu == "mipsel" || current_cpu == "mips64el") { + import("//build/config/mips.gni") + } ++if (current_cpu == "sw_64") { ++ import("//build/config/sw.gni") ++} ++ + + skia_support_gpu = !is_ios + skia_support_pdf = !is_ios && enable_basic_printing +@@ -794,6 +798,8 @@ skia_source_set("skia_opts") { + sources = skia_opts.none_sources + } else if (current_cpu == "ppc64") { + sources = skia_opts.none_sources ++ } else if (current_cpu == "sw_64") { ++ sources = skia_opts.none_sources + } else if (current_cpu == "s390x") { + sources = skia_opts.none_sources + } else { +diff --git a/src/3rdparty/chromium/third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc b/src/3rdparty/chromium/third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc +index a3dd893a9..a0018dc2c 100644 +--- a/src/3rdparty/chromium/third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc ++++ b/src/3rdparty/chromium/third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc +@@ -62,6 +62,8 @@ void* GetProgramCounter(void* vuc) { + #elif defined(__x86_64__) + if (16 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs)) + return reinterpret_cast(context->uc_mcontext.gregs[16]); ++#elif defined(__sw_64__) ++ return reinterpret_cast(context->uc_mcontext.sc_pc); + #else + #error "Undefined Architecture." + #endif +diff --git a/src/3rdparty/chromium/third_party/angle/gni/angle.gni b/src/3rdparty/chromium/third_party/angle/gni/angle.gni +index 1c8ad4802..10b506902 100644 +--- a/src/3rdparty/chromium/third_party/angle/gni/angle.gni ++++ b/src/3rdparty/chromium/third_party/angle/gni/angle.gni +@@ -54,7 +54,7 @@ angle_data_dir = "angledata" + declare_args() { + if (current_cpu == "arm64" || current_cpu == "x64" || + current_cpu == "mips64el" || current_cpu == "s390x" || +- current_cpu == "ppc64") { ++ current_cpu == "ppc64" || current_cpu == "sw_64") { + angle_64bit_current_cpu = true + } else if (current_cpu == "arm" || current_cpu == "x86" || + current_cpu == "mipsel" || current_cpu == "s390" || +diff --git a/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/BUILD.gn b/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/BUILD.gn +index fe44daf27..c26723a2d 100644 +--- a/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/BUILD.gn ++++ b/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/BUILD.gn +@@ -38,9 +38,9 @@ if (current_cpu == "x86" || current_cpu == "x64") { + sources = [ "SaveRegisters_mips64.S" ] + } else if (current_cpu == "ppc64") { + sources = [ "SaveRegisters_ppc64.S" ] +- } +- +- if (current_cpu == "arm") { ++ } else if (current_cpu == "sw_64") { ++ sources = [ "SaveRegisters_sw64.S" ] ++ }if (current_cpu == "arm") { + defines = [ "ARM=1" ] + } + } +diff --git a/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/SaveRegisters_sw64.S b/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/SaveRegisters_sw64.S +new file mode 100644 +index 000000000..676eae3c3 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/SaveRegisters_sw64.S +@@ -0,0 +1,43 @@ ++// Copyright 2014 The Chromium Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++/* ++ * typedef void (*PushAllRegistersCallback)(ThreadState*, intptr_t*); ++ * extern "C" void PushAllRegisters(ThreadState*, PushAllRegistersCallback) ++ */ ++ ++#include ++ ++.type PushAllRegisters, %function ++.global PushAllRegisters ++.hidden PushAllRegisters ++PushAllRegisters: ++ // Push all callee-saves registers to get them ++ // on the stack for conservative stack scanning. ++ // Reserve space for callee-saved registers and return address. ++ subl sp,64,sp ++ // Save the callee-saved registers and the return address. ++ stl s0,0(sp) ++ stl s1,8(sp) ++ stl s2,16(sp) ++ stl s3,24(sp) ++ stl s4,32(sp) ++ stl s5,40(sp) ++ stl ra,48(sp) ++ // Note: the callee-saved floating point registers do not need to be ++ // copied to the stack, because fp registers never hold heap pointers ++ // and so do not need to be kept visible to the garbage collector. ++ // Pass the two first arguments untouched in a0 and the ++ // stack pointer to the callback. ++ bis $31, a1, t12 ++ bis $31, sp, a1 ++ call ra,(t12),0 ++ // Restore return address, adjust stack and return. ++ // Note: the copied registers do not need to be reloaded here, ++ // because they were preserved by the called routine. ++ ldl ra,48(sp) ++ addl sp,64,sp ++ ret $31,(ra),0 ++ ++ +diff --git a/src/3rdparty/chromium/third_party/boringssl/src/include/openssl/base.h b/src/3rdparty/chromium/third_party/boringssl/src/include/openssl/base.h +index 8d73f7747..f1a694507 100644 +--- a/src/3rdparty/chromium/third_party/boringssl/src/include/openssl/base.h ++++ b/src/3rdparty/chromium/third_party/boringssl/src/include/openssl/base.h +@@ -114,6 +114,9 @@ extern "C" { + #define OPENSSL_32_BIT + #elif defined(__myriad2__) + #define OPENSSL_32_BIT ++#elif defined(__sw_64__) ++#define OPENSSL_64_BIT ++#define OPENSSL_SW64 + #else + // Note BoringSSL only supports standard 32-bit and 64-bit two's-complement, + // little-endian architectures. Functions will not produce the correct answer +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h +index 07d9171a0..2a81a4226 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h +@@ -44,6 +44,8 @@ typedef MDRawContextARM RawContextCPU; + typedef MDRawContextARM64_Old RawContextCPU; + #elif defined(__mips__) + typedef MDRawContextMIPS RawContextCPU; ++#elif defined(__sw_64__) ++typedef MDRawContextSW64 RawContextCPU; + #else + #error "This code has not been ported to your platform yet." + #endif +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc +index aae1dc13b..c633a27dc 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc +@@ -228,6 +228,25 @@ void ThreadInfo::FillCPUContext(RawContextCPU* out) const { + MD_FLOATINGSAVEAREA_ARM64_FPR_COUNT * 16); + } + ++#elif defined(__sw_64__) ++// NOT SURE TODO ++uintptr_t ThreadInfo::GetInstructionPointer() const { ++ return mcontext.sc_pc; ++} ++ ++void ThreadInfo::FillCPUContext(RawContextCPU* out) const { ++ out->context_flags = MD_CONTEXT_SW64_FULL; ++ ++ for (int i = 0; i < MD_CONTEXT_SW64_GPR_COUNT; ++i) ++ out->iregs[i] = mcontext.sc_regs[i]; ++ ++ out->pc = mcontext.sc_pc; ++ ++ for (int i = 0; i < MD_FLOATINGSAVEAREA_SW64_FPR_COUNT; ++i) ++ out->float_save.regs[i] = mcontext.sc_fpregs[i]; ++} ++ ++ + #elif defined(__mips__) + + uintptr_t ThreadInfo::GetInstructionPointer() const { +@@ -279,12 +298,16 @@ void ThreadInfo::GetGeneralPurposeRegisters(void** gp_regs, size_t* size) { + *gp_regs = mcontext.gregs; + if (size) + *size = sizeof(mcontext.gregs); ++#else ++#if defined(__sw_64__) ++ return; + #else + if (gp_regs) + *gp_regs = ®s; + if (size) + *size = sizeof(regs); + #endif ++#endif + } + + void ThreadInfo::GetFloatingPointRegisters(void** fp_regs, size_t* size) { +@@ -294,12 +317,16 @@ void ThreadInfo::GetFloatingPointRegisters(void** fp_regs, size_t* size) { + *fp_regs = &mcontext.fpregs; + if (size) + *size = sizeof(mcontext.fpregs); ++#else ++#if defined(__sw_64__) ++ return; + #else + if (fp_regs) + *fp_regs = &fpregs; + if (size) + *size = sizeof(fpregs); + #endif ++#endif + } + + } // namespace google_breakpad +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h +index fb216fa6d..bd4692ab3 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h +@@ -68,7 +68,7 @@ struct ThreadInfo { + // Use the structures defined in + struct user_regs_struct regs; + struct user_fpsimd_struct fpregs; +-#elif defined(__mips__) ++#elif defined(__mips__) || defined(__sw_64__) + // Use the structure defined in . + mcontext_t mcontext; + #endif +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc +index 6ee6cc1e4..8b0ae1fb0 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc +@@ -181,6 +181,18 @@ void UContextReader::FillCPUContext(RawContextCPU *out, const ucontext_t *uc) { + my_memset(&out->float_save.regs, 0, sizeof(out->float_save.regs)); + my_memset(&out->float_save.extra, 0, sizeof(out->float_save.extra)); + } ++#elif defined(__sw_64__) ++ ++uintptr_t UContextReader::GetStackPointer(const ucontext_t* uc) { ++ return uc->uc_mcontext.sc_regs[MD_CONTEXT_SW64_REG_SP]; ++} ++ ++uintptr_t UContextReader::GetInstructionPointer(const ucontext_t* uc) { ++ return uc->uc_mcontext.sc_pc; ++} ++ ++void UContextReader::FillCPUContext(RawContextCPU *out, const ucontext_t *uc) { ++} + + #elif defined(__aarch64__) + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc +index b895f6d7a..44a5e3222 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc +@@ -86,6 +86,7 @@ + #include + #include + #include ++#include + + #include "common/basictypes.h" + #include "common/linux/linux_libc_support.h" +@@ -104,6 +105,10 @@ + #ifndef PR_SET_PTRACER + #define PR_SET_PTRACER 0x59616d61 + #endif ++#if defined(__sw_64__) ++#define sys_sigaltstack sigaltstack ++#define sys_clone clone ++#endif + + namespace google_breakpad { + +@@ -461,7 +466,7 @@ bool ExceptionHandler::HandleSignal(int /*sig*/, siginfo_t* info, void* uc) { + memcpy(&g_crash_context_.float_state, fp_ptr, + sizeof(g_crash_context_.float_state)); + } +-#elif !defined(__ARM_EABI__) && !defined(__mips__) ++#elif !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__sw_64__) + // FP state is not part of user ABI on ARM Linux. + // In case of MIPS Linux FP state is already part of ucontext_t + // and 'float_state' is not a member of CrashContext. +@@ -701,7 +706,7 @@ bool ExceptionHandler::WriteMinidump() { + } + #endif + +-#if !defined(__ARM_EABI__) && !defined(__aarch64__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__aarch64__) && !defined(__mips__) && !defined(__sw_64__) + // FPU state is not part of ARM EABI ucontext_t. + memcpy(&context.float_state, context.context.uc_mcontext.fpregs, + sizeof(context.float_state)); +@@ -726,6 +731,9 @@ bool ExceptionHandler::WriteMinidump() { + #elif defined(__mips__) + context.siginfo.si_addr = + reinterpret_cast(context.context.uc_mcontext.pc); ++#elif defined(__sw_64__) ++ context.siginfo.si_addr = ++ reinterpret_cast(context.context.uc_mcontext.sc_pc); + #else + #error "This code has not been ported to your platform yet." + #endif +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h +index f44483ff0..9118368e5 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h +@@ -192,7 +192,7 @@ class ExceptionHandler { + siginfo_t siginfo; + pid_t tid; // the crashing thread. + ucontext_t context; +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__sw_64__) + // #ifdef this out because FP state is not part of user ABI for Linux ARM. + // In case of MIPS Linux FP state is already part of ucontext_t so + // 'float_state' is not required. +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc +index fa3c1713a..52f3ffcb5 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc +@@ -138,7 +138,7 @@ class MicrodumpWriter { + const MicrodumpExtraInfo& microdump_extra_info, + LinuxDumper* dumper) + : ucontext_(context ? &context->context : NULL), +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__sw_64__) && !defined(__sw_64__) + float_state_(context ? &context->float_state : NULL), + #endif + dumper_(dumper), +@@ -337,6 +337,8 @@ class MicrodumpWriter { + # else + # error "This mips ABI is currently not supported (n32)" + #endif ++#elif defined(__sw_64__) ++ const char kArch[] = "sw_64"; + #else + #error "This code has not been ported to your platform yet" + #endif +@@ -409,7 +411,7 @@ class MicrodumpWriter { + void DumpCPUState() { + RawContextCPU cpu; + my_memset(&cpu, 0, sizeof(RawContextCPU)); +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__sw_64__) + UContextReader::FillCPUContext(&cpu, ucontext_, float_state_); + #else + UContextReader::FillCPUContext(&cpu, ucontext_); +@@ -605,7 +607,7 @@ class MicrodumpWriter { + void* Alloc(unsigned bytes) { return dumper_->allocator()->Alloc(bytes); } + + const ucontext_t* const ucontext_; +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__sw_64__) + const google_breakpad::fpstate_t* const float_state_; + #endif + LinuxDumper* dumper_; +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc +index 415068983..b35052039 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc +@@ -112,6 +112,10 @@ bool LinuxCoreDumper::GetThreadInfoByIndex(size_t index, ThreadInfo* info) { + #elif defined(__mips__) + stack_pointer = + reinterpret_cast(info->mcontext.gregs[MD_CONTEXT_MIPS_REG_SP]); ++#elif defined(__sw_64__) ++ //NOT SURE ++ stack_pointer = ++ reinterpret_cast(info->mcontext.sc_regs[MD_CONTEXT_SW64_REG_SP]); + #else + #error "This code hasn't been ported to your platform yet." + #endif +@@ -208,6 +212,9 @@ bool LinuxCoreDumper::EnumerateThreads() { + info.mcontext.mdlo = status->pr_reg[EF_LO]; + info.mcontext.mdhi = status->pr_reg[EF_HI]; + info.mcontext.pc = status->pr_reg[EF_CP0_EPC]; ++#elif defined(__sw_64__) ++ for (int i = 0; i <= 31; i++) ++ info.mcontext.sc_regs[i] = status->pr_reg[i]; + #else // __mips__ + memcpy(&info.regs, status->pr_reg, sizeof(info.regs)); + #endif // __mips__ +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h +index f4a75d906..1c4bece7a 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h +@@ -62,7 +62,7 @@ namespace google_breakpad { + #if defined(__i386) || defined(__ARM_EABI__) || \ + (defined(__mips__) && _MIPS_SIM == _ABIO32) + typedef Elf32_auxv_t elf_aux_entry; +-#elif defined(__x86_64) || defined(__aarch64__) || \ ++#elif defined(__x86_64) || defined(__aarch64__) || defined(__sw_64__) || \ + (defined(__mips__) && _MIPS_SIM != _ABIO32) + typedef Elf64_auxv_t elf_aux_entry; + #endif +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc +index e3ddb81a6..3da66154c 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc +@@ -298,6 +298,9 @@ bool LinuxPtraceDumper::GetThreadInfoByIndex(size_t index, ThreadInfo* info) { + #elif defined(__mips__) + stack_pointer = + reinterpret_cast(info->mcontext.gregs[MD_CONTEXT_MIPS_REG_SP]); ++#elif defined(__sw_64__) ++ stack_pointer = ++ reinterpret_cast(info->mcontext.sc_regs[MD_CONTEXT_SW64_REG_SP]); + #else + #error "This code hasn't been ported to your platform yet." + #endif +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc +index f8cdf2a1c..92efcc0e3 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc +@@ -136,7 +136,7 @@ class MinidumpWriter { + : fd_(minidump_fd), + path_(minidump_path), + ucontext_(context ? &context->context : NULL), +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__sw_64__) + float_state_(context ? &context->float_state : NULL), + #endif + dumper_(dumper), +@@ -468,7 +468,7 @@ class MinidumpWriter { + if (!cpu.Allocate()) + return false; + my_memset(cpu.get(), 0, sizeof(RawContextCPU)); +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__sw_64__) + UContextReader::FillCPUContext(cpu.get(), ucontext_, float_state_); + #else + UContextReader::FillCPUContext(cpu.get(), ucontext_); +@@ -897,7 +897,7 @@ class MinidumpWriter { + dirent->location.rva = 0; + } + +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || defined(__sw_64__) + bool WriteCPUInformation(MDRawSystemInfo* sys_info) { + char vendor_id[sizeof(sys_info->cpu.x86_cpu_info.vendor_id) + 1] = {0}; + static const char vendor_id_name[] = "vendor_id"; +@@ -925,6 +925,8 @@ class MinidumpWriter { + # else + # error "This mips ABI is currently not supported (n32)" + #endif ++#elif defined(__sw_64__) ++ MD_CPU_ARCHITECTURE_SW64; + #elif defined(__i386__) + MD_CPU_ARCHITECTURE_X86; + #else +@@ -1333,7 +1335,7 @@ class MinidumpWriter { + const char* path_; // Path to the file where the minidum should be written. + + const ucontext_t* const ucontext_; // also from the signal handler +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__sw_64__) + const google_breakpad::fpstate_t* const float_state_; // ditto + #endif + LinuxDumper* dumper_; +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.h +index d1dc33121..aca17ad66 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.h +@@ -47,7 +47,7 @@ class ExceptionHandler; + + #if defined(__aarch64__) + typedef struct fpsimd_context fpstate_t; +-#elif !defined(__ARM_EABI__) && !defined(__mips__) ++#elif !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__sw_64__) + typedef struct _libc_fpstate fpstate_t; + #endif + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc +index 4e938269f..3d902a308 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc +@@ -65,7 +65,7 @@ bool MemoryMappedFile::Map(const char* path, size_t offset) { + } + + #if defined(__x86_64__) || defined(__aarch64__) || \ +- (defined(__mips__) && _MIPS_SIM == _ABI64) ++ (defined(__mips__) && _MIPS_SIM == _ABI64) || defined(__sw_64__) + + struct kernel_stat st; + if (sys_fstat(fd, &st) == -1 || st.st_size < 0) { +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/memory_allocator.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/memory_allocator.h +index a3159ea46..7f83cccc6 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/memory_allocator.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/memory_allocator.h +@@ -49,6 +49,12 @@ + #else + #include "third_party/lss/linux_syscall_support.h" + #endif ++#if defined(__sw_64__) ++#define sys_mmap mmap ++#define sys_munmap munmap ++#endif ++ ++ + + namespace google_breakpad { + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_cpu_sw64.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_cpu_sw64.h +new file mode 100644 +index 000000000..c0f9d181b +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_cpu_sw64.h +@@ -0,0 +1,165 @@ ++/* Copyright (c) 2013, Google Inc. ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are ++ * met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above ++ * copyright notice, this list of conditions and the following disclaimer ++ * in the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Google Inc. nor the names of its ++ * contributors may be used to endorse or promote products derived from ++ * this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ ++ ++/* minidump_format.h: A cross-platform reimplementation of minidump-related ++ * portions of DbgHelp.h from the Windows Platform SDK. ++ * ++ * (This is C99 source, please don't corrupt it with C++.) ++ * ++ * This file contains the necessary definitions to read minidump files ++ * produced on SW64. These files may be read on any platform provided ++ * that the alignments of these structures on the processing system are ++ * identical to the alignments of these structures on the producing system. ++ * For this reason, precise-sized types are used. The structures defined ++ * by this file have been laid out to minimize alignment problems by ++ * ensuring that all members are aligned on their natural boundaries. ++ * In some cases, tail-padding may be significant when different ABIs specify ++ * different tail-padding behaviors. To avoid problems when reading or ++ * writing affected structures, MD_*_SIZE macros are provided where needed, ++ * containing the useful size of the structures without padding. ++ * ++ * Structures that are defined by Microsoft to contain a zero-length array ++ * are instead defined here to contain an array with one element, as ++ * zero-length arrays are forbidden by standard C and C++. In these cases, ++ * *_minsize constants are provided to be used in place of sizeof. For a ++ * cleaner interface to these sizes when using C++, see minidump_size.h. ++ * ++ * These structures are also sufficient to populate minidump files. ++ * ++ * Because precise data type sizes are crucial for this implementation to ++ * function properly and portably, a set of primitive types with known sizes ++ * are used as the basis of each structure defined by this file. ++ * ++ * Author: Chris Dearman ++ */ ++ ++/* ++ * SW64 support ++ */ ++ ++#ifndef GOOGLE_BREAKPAD_COMMON_MINIDUMP_CPU_SW64_H__ ++#define GOOGLE_BREAKPAD_COMMON_MINIDUMP_CPU_SW64_H__ ++ ++#define MD_CONTEXT_SW64_GPR_COUNT 32 ++#define MD_FLOATINGSAVEAREA_SW64_FPR_COUNT 32 ++#define MD_CONTEXT_SW64_DSP_COUNT 0 ++ ++/* ++ * Note that these structures *do not* map directly to the CONTEXT ++ * structure defined in WinNT.h in the Windows Mobile SDK. That structure ++ * does not accomodate VFPv3, and I'm unsure if it was ever used in the ++ * wild anyway, as Windows CE only seems to produce "cedumps" which ++ * are not exactly minidumps. ++ */ ++typedef struct { ++ /* 32 64-bit floating point registers, f0..f31 */ ++ uint64_t regs[MD_FLOATINGSAVEAREA_SW64_FPR_COUNT]; ++ ++ uint32_t fpcsr; /* FPU status register. */ ++ uint32_t fir; /* FPU implementation register. */ ++} MDFloatingSaveAreaSW64; ++ ++typedef struct { ++ /* The next field determines the layout of the structure, and which parts ++ * of it are populated. ++ */ ++ uint32_t context_flags; ++ uint32_t _pad0; ++ ++ /* 32 64-bit integer registers, r0..r31. ++ * Note the following fixed uses: ++ * r29 is the stack pointer. ++ * r31 is the return address. ++ */ ++ uint64_t iregs[MD_CONTEXT_SW64_GPR_COUNT]; ++ uint64_t pc; ++ ++ ++ /* The next field is included with MD_CONTEXT_SW64_FLOATING_POINT. */ ++ MDFloatingSaveAreaSW64 float_save; ++ ++} MDRawContextSW64; ++ ++/* Indices into iregs for registers with a dedicated or conventional ++ * purpose. ++ */ ++enum MDSW64RegisterNumbers { ++ MD_CONTEXT_SW64_REG_S0 = 9, ++ MD_CONTEXT_SW64_REG_S1 = 10, ++ MD_CONTEXT_SW64_REG_S2 = 11, ++ MD_CONTEXT_SW64_REG_S3 = 12, ++ MD_CONTEXT_SW64_REG_S4 = 13, ++ MD_CONTEXT_SW64_REG_S5 = 14, ++ MD_CONTEXT_SW64_REG_S6 = 15, ++ MD_CONTEXT_SW64_REG_S7 = 27, //t12 ++ MD_CONTEXT_SW64_REG_GP = 29, ++ MD_CONTEXT_SW64_REG_SP = 30, ++ MD_CONTEXT_SW64_REG_FP = 15, ++ MD_CONTEXT_SW64_REG_RA = 26, ++}; ++ ++/* For (MDRawContextSW64).context_flags. These values indicate the type of ++ * context stored in the structure. */ ++/* CONTEXT_SW64 from the Windows CE 5.0 SDK. This value isn't correct ++ * because this bit can be used for flags. Presumably this value was ++ * never actually used in minidumps, but only in "CEDumps" which ++ * are a whole parallel minidump file format for Windows CE. ++ * Therefore, Breakpad defines its own value for SW64 CPUs. ++ */ ++#define MD_CONTEXT_SW64 0x00040000 ++#define MD_CONTEXT_SW64_INTEGER (MD_CONTEXT_SW64 | 0x00000002) ++#define MD_CONTEXT_SW64_FLOATING_POINT (MD_CONTEXT_SW64 | 0x00000004) ++#define MD_CONTEXT_SW64_DSP (MD_CONTEXT_SW64 | 0x00000008) ++ ++#define MD_CONTEXT_SW64_FULL (MD_CONTEXT_SW64_INTEGER | \ ++ MD_CONTEXT_SW64_FLOATING_POINT | \ ++ MD_CONTEXT_SW64_DSP) ++ ++#define MD_CONTEXT_SW64_ALL (MD_CONTEXT_SW64_INTEGER | \ ++ MD_CONTEXT_SW64_FLOATING_POINT \ ++ MD_CONTEXT_SW64_DSP) ++ ++/** ++ * Breakpad defines for SW6464 ++ */ ++#define MD_CONTEXT_SW6464 0x00080000 ++#define MD_CONTEXT_SW6464_INTEGER (MD_CONTEXT_SW6464 | 0x00000002) ++#define MD_CONTEXT_SW6464_FLOATING_POINT (MD_CONTEXT_SW6464 | 0x00000004) ++#define MD_CONTEXT_SW6464_DSP (MD_CONTEXT_SW6464 | 0x00000008) ++ ++#define MD_CONTEXT_SW6464_FULL (MD_CONTEXT_SW6464_INTEGER | \ ++ MD_CONTEXT_SW6464_FLOATING_POINT | \ ++ MD_CONTEXT_SW6464_DSP) ++ ++#define MD_CONTEXT_SW6464_ALL (MD_CONTEXT_SW6464_INTEGER | \ ++ MD_CONTEXT_SW6464_FLOATING_POINT \ ++ MD_CONTEXT_SW6464_DSP) ++ ++#endif // GOOGLE_BREAKPAD_COMMON_MINIDUMP_CPU_SW64_H__ ++ +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_format.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_format.h +index 6eceddbbf..202aec622 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_format.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_format.h +@@ -660,6 +660,7 @@ typedef enum { + MD_CPU_ARCHITECTURE_PPC64 = 0x8002, /* Breakpad-defined value for PPC64 */ + MD_CPU_ARCHITECTURE_ARM64_OLD = 0x8003, /* Breakpad-defined value for ARM64 */ + MD_CPU_ARCHITECTURE_MIPS64 = 0x8004, /* Breakpad-defined value for MIPS64 */ ++ MD_CPU_ARCHITECTURE_SW64 = 0x8005, /* Breakpad-defined value for SW64 */ + MD_CPU_ARCHITECTURE_UNKNOWN = 0xffff /* PROCESSOR_ARCHITECTURE_UNKNOWN */ + } MDCPUArchitecture; + +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc +index a13407605..60db94824 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc +@@ -126,6 +126,8 @@ std::string MinidumpMiscInfoDebugBuildString() { + static constexpr char kCPU[] = "mips"; + #elif defined(ARCH_CPU_MIPS64EL) + static constexpr char kCPU[] = "mips64"; ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ static constexpr char kCPU[] = "sw_64"; + #else + #error define kCPU for this CPU + #endif +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/capture_memory.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/capture_memory.cc +index a51626ccd..0894ef36a 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/capture_memory.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/capture_memory.cc +@@ -111,6 +111,10 @@ void CaptureMemory::PointedToByContext(const CPUContext& context, + for (size_t i = 0; i < base::size(context.mipsel->regs); ++i) { + MaybeCaptureMemoryAround(delegate, context.mipsel->regs[i]); + } ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ for (size_t i = 0; i < base::size(context.mipsel->regs); ++i) { ++ MaybeCaptureMemoryAround(delegate, context.mipsel->regs[i]); ++ } + #else + #error Port. + #endif +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_architecture.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_architecture.h +index 811a72095..0029e66a3 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_architecture.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_architecture.h +@@ -43,7 +43,9 @@ enum CPUArchitecture { + kCPUArchitectureMIPSEL, + + //! \brief 64-bit MIPSEL. +- kCPUArchitectureMIPS64EL ++ kCPUArchitectureMIPS64EL, ++ ++ kCPUArchitectureSW64EL + }; + + } // namespace crashpad +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_context.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_context.h +index fb23c4679..b812777c8 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_context.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_context.h +@@ -351,6 +351,29 @@ struct CPUContextMIPS64 { + uint64_t fpcsr; + uint64_t fir; + }; ++//! \brief A context structure carrying SW64 CPU state. ++struct CPUContextSW64 { ++ uint64_t regs[32]; ++ uint64_t mdlo; ++ uint64_t mdhi; ++ uint64_t cp0_epc; ++ uint64_t cp0_badvaddr; ++ uint64_t cp0_status; ++ uint64_t cp0_cause; ++ uint64_t hi[3]; ++ uint64_t lo[3]; ++ uint64_t dsp_control; ++ union { ++ double dregs[32]; ++ struct { ++ float _fp_fregs; ++ uint32_t _fp_pad; ++ } fregs[32]; ++ } fpregs; ++ uint64_t fpcsr; ++ uint64_t fir; ++}; ++ + + //! \brief A context structure capable of carrying the context of any supported + //! CPU architecture. +@@ -382,6 +405,7 @@ struct CPUContext { + CPUContextARM64* arm64; + CPUContextMIPS* mipsel; + CPUContextMIPS64* mips64; ++ CPUContextSW64* sw64; + }; + }; + +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/cpu_context_linux.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/cpu_context_linux.h +index 9f46a4897..8088736de 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/cpu_context_linux.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/cpu_context_linux.h +@@ -174,6 +174,44 @@ void InitializeCPUContextMIPS( + + #endif // ARCH_CPU_MIPS_FAMILY || DOXYGEN + ++#if defined(ARCH_CPU_SW64_FAMILY) || DOXYGEN ++ ++//! \brief Initializes a CPUContextMIPS structure from native context ++//! structures on Linux. ++//! ++//! This function has template specializations for MIPSEL and MIPS64EL ++//! architecture contexts, using ContextTraits32 or ContextTraits64 as template ++//! parameter, respectively. ++//! ++//! \param[in] thread_context The native thread context. ++//! \param[in] float_context The native float context. ++//! \param[out] context The CPUContextMIPS structure to initialize. ++template ++void InitializeCPUContextSW64( ++ const typename Traits::SignalThreadContext& thread_context, ++ const typename Traits::SignalFloatContext& float_context, ++ typename Traits::CPUContext* context) { ++ static_assert(sizeof(context->regs) == sizeof(thread_context.regs), ++ "registers size mismatch"); ++ static_assert(sizeof(context->fpregs) == sizeof(float_context.fpregs), ++ "fp registers size mismatch"); ++ memcpy(&context->regs, &thread_context.regs, sizeof(context->regs)); ++ context->mdlo = thread_context.lo; ++ context->mdhi = thread_context.hi; ++ context->cp0_epc = thread_context.cp0_epc; ++ context->cp0_badvaddr = thread_context.cp0_badvaddr; ++ context->cp0_status = thread_context.cp0_status; ++ context->cp0_cause = thread_context.cp0_cause; ++ ++ memcpy(&context->fpregs, &float_context.fpregs, sizeof(context->fpregs)); ++ context->fpcsr = float_context.fpcsr; ++ context->fir = float_context.fpu_id; ++}; ++ ++#endif // ARCH_CPU_MIPS_FAMILY || DOXYGEN ++ ++ ++ + } // namespace internal + } // namespace crashpad + +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc +index cd40b3b12..788790080 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc +@@ -267,6 +267,58 @@ bool ExceptionSnapshotLinux::ReadContext( + } + } while (true); + } ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ ++template ++static bool ReadContext(ProcessReaderLinux* reader, ++ LinuxVMAddress context_address, ++ typename Traits::CPUContext* dest_context) { ++ const ProcessMemory* memory = reader->Memory(); ++ ++ LinuxVMAddress gregs_address = context_address + ++ offsetof(UContext, mcontext) + ++ offsetof(typename Traits::MContext, gregs); ++ ++ typename Traits::SignalThreadContext thread_context; ++ if (!memory->Read(gregs_address, sizeof(thread_context), &thread_context)) { ++ LOG(ERROR) << "Couldn't read gregs"; ++ return false; ++ } ++ ++ LinuxVMAddress fpregs_address = context_address + ++ offsetof(UContext, mcontext) + ++ offsetof(typename Traits::MContext, fpregs); ++ ++ typename Traits::SignalFloatContext fp_context; ++ if (!memory->Read(fpregs_address, sizeof(fp_context), &fp_context)) { ++ LOG(ERROR) << "Couldn't read fpregs"; ++ return false; ++ } ++ ++ InitializeCPUContextSW64(thread_context, fp_context, dest_context); ++ ++ return true; ++} ++ ++template <> ++bool ExceptionSnapshotLinux::ReadContext( ++ ProcessReaderLinux* reader, ++ LinuxVMAddress context_address) { ++ ++ return false; ++} ++ ++template <> ++bool ExceptionSnapshotLinux::ReadContext( ++ ProcessReaderLinux* reader, ++ LinuxVMAddress context_address) { ++ context_.architecture = kCPUArchitectureSW64EL; ++ context_.sw64 = &context_union_.sw64; ++ ++ return internal::ReadContext( ++ reader, context_address, context_.mips64); ++} ++ + + #elif defined(ARCH_CPU_MIPS_FAMILY) + +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.h +index ea0cd2106..74d30a983 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.h +@@ -84,6 +84,8 @@ class ExceptionSnapshotLinux final : public ExceptionSnapshot { + #elif defined(ARCH_CPU_MIPS_FAMILY) + CPUContextMIPS mipsel; + CPUContextMIPS64 mips64; ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ CPUContextSW64 sw64; + #endif + } context_union_; + CPUContext context_; +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc +index b96abfe74..542277b56 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc +@@ -108,6 +108,8 @@ void ProcessReaderLinux::Thread::InitializeStack(ProcessReaderLinux* reader) { + #elif defined(ARCH_CPU_MIPS_FAMILY) + stack_pointer = reader->Is64Bit() ? thread_info.thread_context.t64.regs[29] + : thread_info.thread_context.t32.regs[29]; ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ stack_pointer = thread_info.thread_context.t64.regs[29]; + #else + #error Port. + #endif +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/signal_context.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/signal_context.h +index 110024680..9ce0cbc80 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/signal_context.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/signal_context.h +@@ -421,6 +421,109 @@ static_assert(offsetof(UContext, mcontext.fpregs) == + offsetof(ucontext_t, uc_mcontext.fpregs), + "context offset mismatch"); + #endif ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ ++struct MContext32 { ++ uint32_t regmask; ++ uint32_t status; ++ uint64_t pc; ++ uint64_t gregs[32]; ++ struct { ++ float _fp_fregs; ++ unsigned int _fp_pad; ++ } fpregs[32]; ++ uint32_t fp_owned; ++ uint32_t fpc_csr; ++ uint32_t fpc_eir; ++ uint32_t used_math; ++ uint32_t dsp; ++ uint64_t mdhi; ++ uint64_t mdlo; ++ uint32_t hi1; ++ uint32_t lo1; ++ uint32_t hi2; ++ uint32_t lo2; ++ uint32_t hi3; ++ uint32_t lo3; ++}; ++ ++struct MContext64 { ++ uint64_t gregs[32]; ++ double fpregs[32]; ++ uint64_t mdhi; ++ uint64_t hi1; ++ uint64_t hi2; ++ uint64_t hi3; ++ uint64_t mdlo; ++ uint64_t lo1; ++ uint64_t lo2; ++ uint64_t lo3; ++ uint64_t pc; ++ uint32_t fpc_csr; ++ uint32_t used_math; ++ uint32_t dsp; ++ uint32_t __glibc_reserved1; ++}; ++ ++struct SignalThreadContext32 { ++ uint64_t regs[32]; ++ uint32_t lo; ++ uint32_t hi; ++ uint32_t cp0_epc; ++ uint32_t cp0_badvaddr; ++ uint32_t cp0_status; ++ uint32_t cp0_cause; ++ ++ SignalThreadContext32() {} ++ explicit SignalThreadContext32( ++ const struct ThreadContext::t32_t& thread_context) { ++ for (size_t reg = 0; reg < 32; ++reg) { ++ regs[reg] = thread_context.regs[reg]; ++ } ++ lo = thread_context.lo; ++ hi = thread_context.hi; ++ cp0_epc = thread_context.cp0_epc; ++ cp0_badvaddr = thread_context.cp0_badvaddr; ++ cp0_status = thread_context.cp0_status; ++ cp0_cause = thread_context.cp0_cause; ++ } ++}; ++ ++struct ContextTraits32 : public Traits32 { ++ using MContext = MContext32; ++ using SignalThreadContext = SignalThreadContext32; ++ using SignalFloatContext = FloatContext::f32_t; ++ using CPUContext = CPUContextMIPS; ++}; ++ ++struct ContextTraits64 : public Traits64 { ++ using MContext = MContext64; ++ using SignalThreadContext = ThreadContext::t64_t; ++ using SignalFloatContext = FloatContext::f64_t; ++ using CPUContext = CPUContextMIPS64; ++}; ++ ++template ++struct UContext { ++ typename Traits::ULong flags; ++ typename Traits::Address link; ++ SignalStack stack; ++ typename Traits::ULong_32Only alignment_padding_; ++ typename Traits::MContext mcontext; ++ Sigset sigmask; ++}; ++ ++#if 0//TODO ++static_assert(offsetof(UContext, mcontext) == ++ offsetof(ucontext_t, uc_mcontext), ++ "context offset mismtach"); ++static_assert(offsetof(UContext, mcontext.gregs) == ++ offsetof(ucontext_t, uc_mcontext.gregs), ++ "context offset mismatch"); ++static_assert(offsetof(UContext, mcontext.fpregs) == ++ offsetof(ucontext_t, uc_mcontext.fpregs), ++ "context offset mismatch"); ++#endif + + #else + #error Port. +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc +index 8564d3d45..d97dddf11 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc +@@ -203,6 +203,8 @@ CPUArchitecture SystemSnapshotLinux::GetCPUArchitecture() const { + #elif defined(ARCH_CPU_MIPS_FAMILY) + return process_reader_->Is64Bit() ? kCPUArchitectureMIPS64EL + : kCPUArchitectureMIPSEL; ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ return kCPUArchitectureSW64EL; + #else + #error port to your architecture + #endif +@@ -218,6 +220,9 @@ uint32_t SystemSnapshotLinux::CPURevision() const { + #elif defined(ARCH_CPU_MIPS_FAMILY) + // Not implementable on MIPS + return 0; ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ // Not implementable on SW64 ++ return 0; + #else + #error port to your architecture + #endif +@@ -238,6 +243,9 @@ std::string SystemSnapshotLinux::CPUVendor() const { + #elif defined(ARCH_CPU_MIPS_FAMILY) + // Not implementable on MIPS + return std::string(); ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ // Not implementable on SW64 ++ return std::string(); + #else + #error port to your architecture + #endif +@@ -371,6 +379,9 @@ bool SystemSnapshotLinux::NXEnabled() const { + #elif defined(ARCH_CPU_MIPS_FAMILY) + // Not implementable on MIPS + return false; ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ // Not implementable on SW64 ++ return false; + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc +index e3e2bebdd..26745f0a7 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc +@@ -186,6 +186,17 @@ bool ThreadSnapshotLinux::Initialize(ProcessReaderLinux* process_reader, + thread.thread_info.float_context.f32, + context_.mipsel); + } ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ if (process_reader->Is64Bit()) { ++ context_.architecture = kCPUArchitectureSW64EL; ++ context_.sw64 = &context_union_.sw64; ++#if 0//TODO ++ InitializeCPUContextSW64( ++ thread.thread_info.thread_context.t64, ++ thread.thread_info.float_context.f64, ++ context_.sw64); ++#endif ++ } + #else + #error Port. + #endif +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h +index 44cc6f6d9..5a31a2bfa 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h +@@ -68,6 +68,8 @@ class ThreadSnapshotLinux final : public ThreadSnapshot { + #elif defined(ARCH_CPU_MIPS_FAMILY) + CPUContextMIPS mipsel; + CPUContextMIPS64 mips64; ++#elif defined(ARCH_CPU_SW64_FAMILY) ++ CPUContextSW64 sw64; + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/ptracer.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/ptracer.cc +index 557e0d363..3e9dbef27 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/ptracer.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/ptracer.cc +@@ -397,6 +397,39 @@ bool GetThreadArea64(pid_t tid, + *address = FromPointerCast(result); + return true; + } ++#elif defined(ARCH_CPU_SW64) ++// NOT SURE TODO ++bool GetGeneralPurposeRegistersLegacy(pid_t tid, ++ ThreadContext* context, ++ bool can_log) { ++ return false; ++} ++ ++bool GetFloatingPointRegisters64(pid_t tid, ++ FloatContext* context, ++ bool can_log) { ++ return false; ++} ++ ++bool GetFloatingPointRegisters32(pid_t tid, ++ FloatContext* context, ++ bool can_log) { ++ return false; ++} ++ ++bool GetThreadArea32(pid_t tid, ++ const ThreadContext& context, ++ LinuxVMAddress* address, ++ bool can_log) { ++ return false; ++} ++ ++bool GetThreadArea64(pid_t tid, ++ const ThreadContext& context, ++ LinuxVMAddress* address, ++ bool can_log) { ++ return false; ++} + + #else + #error Port. +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h +index 5b55c24a7..eff1aec87 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h +@@ -14,6 +14,10 @@ + + #ifndef CRASHPAD_UTIL_LINUX_THREAD_INFO_H_ + #define CRASHPAD_UTIL_LINUX_THREAD_INFO_H_ ++#if defined(__sw_64__) ++#include ++#endif ++ + + #include + #include +@@ -67,7 +71,7 @@ union ThreadContext { + uint32_t pc; + uint32_t cpsr; + uint32_t orig_r0; +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_SW64_FAMILY) + // Reflects output format of static int gpr32_get(), defined in + // arch/mips/kernel/ptrace.c in kernel source + uint32_t padding0_[6]; +@@ -122,7 +126,7 @@ union ThreadContext { + uint64_t sp; + uint64_t pc; + uint64_t pstate; +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_SW64_FAMILY) + // Reflects output format of static int gpr64_get(), defined in + // arch/mips/kernel/ptrace.c in kernel source + uint64_t regs[32]; +@@ -143,6 +147,9 @@ union ThreadContext { + using NativeThreadContext = user_regs; + #elif defined(ARCH_CPU_MIPS_FAMILY) + // No appropriate NativeThreadsContext type available for MIPS ++#elif defined(ARCH_CPU_SW64_FAMILY) ++// NOT SURE just for build ++ using NativeThreadContext = t64_t; + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY || ARCH_CPU_ARM64 +@@ -209,7 +216,7 @@ union FloatContext { + + bool have_fpregs; + bool have_vfp; +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_SW64_FAMILY) + // Reflects data format filled by ptrace_getfpregs() in + // arch/mips/kernel/ptrace.c + struct { +@@ -246,7 +253,7 @@ union FloatContext { + uint32_t fpsr; + uint32_t fpcr; + uint8_t padding[8]; +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_SW64_FAMILY) + // Reflects data format filled by ptrace_getfpregs() in + // arch/mips/kernel/ptrace.c + double fpregs[32]; +@@ -280,6 +287,8 @@ union FloatContext { + static_assert(sizeof(f64) == sizeof(user_fpsimd_struct), "Size mismatch"); + #elif defined(ARCH_CPU_MIPS_FAMILY) + // No appropriate floating point context native type for available MIPS. ++#elif defined(ARCH_CPU_SW64) ++ using user_fpsimd_struct = f64_t; + #else + #error Port. + #endif // ARCH_CPU_X86 +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/ffmpeg_options.gni b/src/3rdparty/chromium/third_party/ffmpeg/ffmpeg_options.gni +index 0654f3dda..e9405e633 100644 +--- a/src/3rdparty/chromium/third_party/ffmpeg/ffmpeg_options.gni ++++ b/src/3rdparty/chromium/third_party/ffmpeg/ffmpeg_options.gni +@@ -52,6 +52,9 @@ declare_args() { + + use_system_ffmpeg = false + } ++if (current_cpu == "sw_64") { ++ use_system_ffmpeg = true ++} + + assert(ffmpeg_branding == "Chromium" || + ffmpeg_branding == "Chrome" || +diff --git a/src/3rdparty/chromium/third_party/icu/source/i18n/double-conversion-utils.h b/src/3rdparty/chromium/third_party/icu/source/i18n/double-conversion-utils.h +index c5439ca15..7b73c6f68 100644 +--- a/src/3rdparty/chromium/third_party/icu/source/i18n/double-conversion-utils.h ++++ b/src/3rdparty/chromium/third_party/icu/source/i18n/double-conversion-utils.h +@@ -103,7 +103,7 @@ int main(int argc, char** argv) { + #if defined(_M_X64) || defined(__x86_64__) || \ + defined(__ARMEL__) || defined(__avr32__) || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(__hppa__) || defined(__ia64__) || \ +- defined(__mips__) || \ ++ defined(__mips__) || defined(__sw_64__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \ + defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ + defined(__sparc__) || defined(__sparc) || defined(__s390__) || \ +diff --git a/src/3rdparty/chromium/third_party/libvpx/BUILD.gn b/src/3rdparty/chromium/third_party/libvpx/BUILD.gn +index 9b92313b4..48e82e9f5 100644 +--- a/src/3rdparty/chromium/third_party/libvpx/BUILD.gn ++++ b/src/3rdparty/chromium/third_party/libvpx/BUILD.gn +@@ -342,6 +342,9 @@ static_library("bundled_libvpx") { + } else { + sources = libvpx_srcs_arm64 + } ++ }else if (current_cpu == "sw_64") { ++ sources = libvpx_srcs_generic ++ #libvpx_srcs_generic + } + + configs -= [ "//build/config/compiler:chromium_code" ] +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vp8_rtcd.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vp8_rtcd.h +new file mode 100644 +index 000000000..aa475b55f +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vp8_rtcd.h +@@ -0,0 +1,357 @@ ++// This file is generated. Do not edit. ++#ifndef VP8_RTCD_H_ ++#define VP8_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * VP8 ++ */ ++ ++struct blockd; ++struct macroblockd; ++struct loop_filter_info; ++ ++/* Encoder forward decls */ ++struct block; ++struct macroblock; ++struct variance_vtable; ++union int_mv; ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++void vp8_bilinear_predict16x16_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c ++ ++void vp8_bilinear_predict4x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c ++ ++void vp8_bilinear_predict8x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c ++ ++void vp8_bilinear_predict8x8_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c ++ ++void vp8_blend_b_c(unsigned char* y, ++ unsigned char* u, ++ unsigned char* v, ++ int y_1, ++ int u_1, ++ int v_1, ++ int alpha, ++ int stride); ++#define vp8_blend_b vp8_blend_b_c ++ ++void vp8_blend_mb_inner_c(unsigned char* y, ++ unsigned char* u, ++ unsigned char* v, ++ int y_1, ++ int u_1, ++ int v_1, ++ int alpha, ++ int stride); ++#define vp8_blend_mb_inner vp8_blend_mb_inner_c ++ ++void vp8_blend_mb_outer_c(unsigned char* y, ++ unsigned char* u, ++ unsigned char* v, ++ int y_1, ++ int u_1, ++ int v_1, ++ int alpha, ++ int stride); ++#define vp8_blend_mb_outer vp8_blend_mb_outer_c ++ ++int vp8_block_error_c(short* coeff, short* dqcoeff); ++#define vp8_block_error vp8_block_error_c ++ ++void vp8_copy32xn_c(const unsigned char* src_ptr, ++ int src_stride, ++ unsigned char* dst_ptr, ++ int dst_stride, ++ int height); ++#define vp8_copy32xn vp8_copy32xn_c ++ ++void vp8_copy_mem16x16_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride); ++#define vp8_copy_mem16x16 vp8_copy_mem16x16_c ++ ++void vp8_copy_mem8x4_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride); ++#define vp8_copy_mem8x4 vp8_copy_mem8x4_c ++ ++void vp8_copy_mem8x8_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride); ++#define vp8_copy_mem8x8 vp8_copy_mem8x8_c ++ ++void vp8_dc_only_idct_add_c(short input_dc, ++ unsigned char* pred_ptr, ++ int pred_stride, ++ unsigned char* dst_ptr, ++ int dst_stride); ++#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c ++ ++int vp8_denoiser_filter_c(unsigned char* mc_running_avg_y, ++ int mc_avg_y_stride, ++ unsigned char* running_avg_y, ++ int avg_y_stride, ++ unsigned char* sig, ++ int sig_stride, ++ unsigned int motion_magnitude, ++ int increase_denoising); ++#define vp8_denoiser_filter vp8_denoiser_filter_c ++ ++int vp8_denoiser_filter_uv_c(unsigned char* mc_running_avg, ++ int mc_avg_stride, ++ unsigned char* running_avg, ++ int avg_stride, ++ unsigned char* sig, ++ int sig_stride, ++ unsigned int motion_magnitude, ++ int increase_denoising); ++#define vp8_denoiser_filter_uv vp8_denoiser_filter_uv_c ++ ++void vp8_dequant_idct_add_c(short* input, ++ short* dq, ++ unsigned char* dest, ++ int stride); ++#define vp8_dequant_idct_add vp8_dequant_idct_add_c ++ ++void vp8_dequant_idct_add_uv_block_c(short* q, ++ short* dq, ++ unsigned char* dst_u, ++ unsigned char* dst_v, ++ int stride, ++ char* eobs); ++#define vp8_dequant_idct_add_uv_block vp8_dequant_idct_add_uv_block_c ++ ++void vp8_dequant_idct_add_y_block_c(short* q, ++ short* dq, ++ unsigned char* dst, ++ int stride, ++ char* eobs); ++#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c ++ ++void vp8_dequantize_b_c(struct blockd*, short* DQC); ++#define vp8_dequantize_b vp8_dequantize_b_c ++ ++int vp8_diamond_search_sad_c(struct macroblock* x, ++ struct block* b, ++ struct blockd* d, ++ union int_mv* ref_mv, ++ union int_mv* best_mv, ++ int search_param, ++ int sad_per_bit, ++ int* num00, ++ struct variance_vtable* fn_ptr, ++ int* mvcost[2], ++ union int_mv* center_mv); ++#define vp8_diamond_search_sad vp8_diamond_search_sad_c ++ ++void vp8_fast_quantize_b_c(struct block*, struct blockd*); ++#define vp8_fast_quantize_b vp8_fast_quantize_b_c ++ ++void vp8_filter_by_weight16x16_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride, ++ int src_weight); ++#define vp8_filter_by_weight16x16 vp8_filter_by_weight16x16_c ++ ++void vp8_filter_by_weight4x4_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride, ++ int src_weight); ++#define vp8_filter_by_weight4x4 vp8_filter_by_weight4x4_c ++ ++void vp8_filter_by_weight8x8_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride, ++ int src_weight); ++#define vp8_filter_by_weight8x8 vp8_filter_by_weight8x8_c ++ ++int vp8_full_search_sad_c(struct macroblock* x, ++ struct block* b, ++ struct blockd* d, ++ union int_mv* ref_mv, ++ int sad_per_bit, ++ int distance, ++ struct variance_vtable* fn_ptr, ++ int* mvcost[2], ++ union int_mv* center_mv); ++#define vp8_full_search_sad vp8_full_search_sad_c ++ ++void vp8_loop_filter_bh_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_bh vp8_loop_filter_bh_c ++ ++void vp8_loop_filter_bv_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_bv vp8_loop_filter_bv_c ++ ++void vp8_loop_filter_mbh_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c ++ ++void vp8_loop_filter_mbv_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c ++ ++void vp8_loop_filter_bhs_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c ++ ++void vp8_loop_filter_bvs_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c ++ ++void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c ++ ++void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c ++ ++int vp8_mbblock_error_c(struct macroblock* mb, int dc); ++#define vp8_mbblock_error vp8_mbblock_error_c ++ ++int vp8_mbuverror_c(struct macroblock* mb); ++#define vp8_mbuverror vp8_mbuverror_c ++ ++int vp8_refining_search_sad_c(struct macroblock* x, ++ struct block* b, ++ struct blockd* d, ++ union int_mv* ref_mv, ++ int error_per_bit, ++ int search_range, ++ struct variance_vtable* fn_ptr, ++ int* mvcost[2], ++ union int_mv* center_mv); ++#define vp8_refining_search_sad vp8_refining_search_sad_c ++ ++void vp8_regular_quantize_b_c(struct block*, struct blockd*); ++#define vp8_regular_quantize_b vp8_regular_quantize_b_c ++ ++void vp8_short_fdct4x4_c(short* input, short* output, int pitch); ++#define vp8_short_fdct4x4 vp8_short_fdct4x4_c ++ ++void vp8_short_fdct8x4_c(short* input, short* output, int pitch); ++#define vp8_short_fdct8x4 vp8_short_fdct8x4_c ++ ++void vp8_short_idct4x4llm_c(short* input, ++ unsigned char* pred_ptr, ++ int pred_stride, ++ unsigned char* dst_ptr, ++ int dst_stride); ++#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c ++ ++void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff); ++#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c ++ ++void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff); ++#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c ++ ++void vp8_short_walsh4x4_c(short* input, short* output, int pitch); ++#define vp8_short_walsh4x4 vp8_short_walsh4x4_c ++ ++void vp8_sixtap_predict16x16_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c ++ ++void vp8_sixtap_predict4x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c ++ ++void vp8_sixtap_predict8x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c ++ ++void vp8_sixtap_predict8x8_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c ++ ++void vp8_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vp9_rtcd.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vp9_rtcd.h +new file mode 100644 +index 000000000..009139314 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vp9_rtcd.h +@@ -0,0 +1,275 @@ ++// This file is generated. Do not edit. ++#ifndef VP9_RTCD_H_ ++#define VP9_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * VP9 ++ */ ++ ++#include "vp9/common/vp9_common.h" ++#include "vp9/common/vp9_enums.h" ++#include "vp9/common/vp9_filter.h" ++#include "vpx/vpx_integer.h" ++ ++struct macroblockd; ++ ++/* Encoder forward decls */ ++struct macroblock; ++struct vp9_variance_vtable; ++struct search_site_config; ++struct mv; ++union int_mv; ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++int64_t vp9_block_error_c(const tran_low_t* coeff, ++ const tran_low_t* dqcoeff, ++ intptr_t block_size, ++ int64_t* ssz); ++#define vp9_block_error vp9_block_error_c ++ ++int64_t vp9_block_error_fp_c(const tran_low_t* coeff, ++ const tran_low_t* dqcoeff, ++ int block_size); ++#define vp9_block_error_fp vp9_block_error_fp_c ++ ++int vp9_denoiser_filter_c(const uint8_t* sig, ++ int sig_stride, ++ const uint8_t* mc_avg, ++ int mc_avg_stride, ++ uint8_t* avg, ++ int avg_stride, ++ int increase_denoising, ++ BLOCK_SIZE bs, ++ int motion_magnitude); ++#define vp9_denoiser_filter vp9_denoiser_filter_c ++ ++int vp9_diamond_search_sad_c(const struct macroblock* x, ++ const struct search_site_config* cfg, ++ struct mv* ref_mv, ++ struct mv* best_mv, ++ int search_param, ++ int sad_per_bit, ++ int* num00, ++ const struct vp9_variance_vtable* fn_ptr, ++ const struct mv* center_mv); ++#define vp9_diamond_search_sad vp9_diamond_search_sad_c ++ ++void vp9_fht16x16_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_fht16x16 vp9_fht16x16_c ++ ++void vp9_fht4x4_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_fht4x4 vp9_fht4x4_c ++ ++void vp9_fht8x8_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_fht8x8 vp9_fht8x8_c ++ ++void vp9_filter_by_weight16x16_c(const uint8_t* src, ++ int src_stride, ++ uint8_t* dst, ++ int dst_stride, ++ int src_weight); ++#define vp9_filter_by_weight16x16 vp9_filter_by_weight16x16_c ++ ++void vp9_filter_by_weight8x8_c(const uint8_t* src, ++ int src_stride, ++ uint8_t* dst, ++ int dst_stride, ++ int src_weight); ++#define vp9_filter_by_weight8x8 vp9_filter_by_weight8x8_c ++ ++void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vp9_fwht4x4 vp9_fwht4x4_c ++ ++int64_t vp9_highbd_block_error_c(const tran_low_t* coeff, ++ const tran_low_t* dqcoeff, ++ intptr_t block_size, ++ int64_t* ssz, ++ int bd); ++#define vp9_highbd_block_error vp9_highbd_block_error_c ++ ++void vp9_highbd_fht16x16_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_highbd_fht16x16 vp9_highbd_fht16x16_c ++ ++void vp9_highbd_fht4x4_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_highbd_fht4x4 vp9_highbd_fht4x4_c ++ ++void vp9_highbd_fht8x8_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_highbd_fht8x8 vp9_highbd_fht8x8_c ++ ++void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c ++ ++void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int tx_type, ++ int bd); ++#define vp9_highbd_iht16x16_256_add vp9_highbd_iht16x16_256_add_c ++ ++void vp9_highbd_iht4x4_16_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int tx_type, ++ int bd); ++#define vp9_highbd_iht4x4_16_add vp9_highbd_iht4x4_16_add_c ++ ++void vp9_highbd_iht8x8_64_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int tx_type, ++ int bd); ++#define vp9_highbd_iht8x8_64_add vp9_highbd_iht8x8_64_add_c ++ ++void vp9_highbd_mbpost_proc_across_ip_c(uint16_t* src, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vp9_highbd_mbpost_proc_across_ip vp9_highbd_mbpost_proc_across_ip_c ++ ++void vp9_highbd_mbpost_proc_down_c(uint16_t* dst, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vp9_highbd_mbpost_proc_down vp9_highbd_mbpost_proc_down_c ++ ++void vp9_highbd_post_proc_down_and_across_c(const uint16_t* src_ptr, ++ uint16_t* dst_ptr, ++ int src_pixels_per_line, ++ int dst_pixels_per_line, ++ int rows, ++ int cols, ++ int flimit); ++#define vp9_highbd_post_proc_down_and_across \ ++ vp9_highbd_post_proc_down_and_across_c ++ ++void vp9_highbd_quantize_fp_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vp9_highbd_quantize_fp vp9_highbd_quantize_fp_c ++ ++void vp9_highbd_quantize_fp_32x32_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vp9_highbd_quantize_fp_32x32 vp9_highbd_quantize_fp_32x32_c ++ ++void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1, ++ unsigned int stride, ++ const uint8_t* frame2, ++ unsigned int block_width, ++ unsigned int block_height, ++ int strength, ++ int* blk_fw, ++ int use_32x32, ++ uint32_t* accumulator, ++ uint16_t* count); ++#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c ++ ++void vp9_iht16x16_256_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride, ++ int tx_type); ++#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c ++ ++void vp9_iht4x4_16_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride, ++ int tx_type); ++#define vp9_iht4x4_16_add vp9_iht4x4_16_add_c ++ ++void vp9_iht8x8_64_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride, ++ int tx_type); ++#define vp9_iht8x8_64_add vp9_iht8x8_64_add_c ++ ++void vp9_quantize_fp_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vp9_quantize_fp vp9_quantize_fp_c ++ ++void vp9_quantize_fp_32x32_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vp9_quantize_fp_32x32 vp9_quantize_fp_32x32_c ++ ++void vp9_scale_and_extend_frame_c(const struct yv12_buffer_config* src, ++ struct yv12_buffer_config* dst, ++ INTERP_FILTER filter_type, ++ int phase_scaler); ++#define vp9_scale_and_extend_frame vp9_scale_and_extend_frame_c ++ ++void vp9_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.asm b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.asm +new file mode 100644 +index 000000000..00712e52b +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.asm +@@ -0,0 +1,98 @@ ++@ This file was created from a .asm file ++@ using the ads2gas.pl script. ++ .syntax unified ++.equ VPX_ARCH_ARM , 0 ++.equ ARCH_ARM , 0 ++.equ VPX_ARCH_MIPS , 0 ++.equ ARCH_MIPS , 0 ++.equ VPX_ARCH_X86 , 0 ++.equ ARCH_X86 , 0 ++.equ VPX_ARCH_X86_64 , 0 ++.equ ARCH_X86_64 , 0 ++.equ VPX_ARCH_PPC , 0 ++.equ ARCH_PPC , 0 ++.equ HAVE_NEON , 0 ++.equ HAVE_NEON_ASM , 0 ++.equ HAVE_MIPS32 , 0 ++.equ HAVE_DSPR2 , 0 ++.equ HAVE_MSA , 0 ++.equ HAVE_MIPS64 , 0 ++.equ HAVE_MMX , 0 ++.equ HAVE_SSE , 0 ++.equ HAVE_SSE2 , 0 ++.equ HAVE_SSE3 , 0 ++.equ HAVE_SSSE3 , 0 ++.equ HAVE_SSE4_1 , 0 ++.equ HAVE_AVX , 0 ++.equ HAVE_AVX2 , 0 ++.equ HAVE_AVX512 , 0 ++.equ HAVE_VSX , 0 ++.equ HAVE_MMI , 0 ++.equ HAVE_VPX_PORTS , 1 ++.equ HAVE_PTHREAD_H , 1 ++.equ HAVE_UNISTD_H , 0 ++.equ CONFIG_DEPENDENCY_TRACKING , 1 ++.equ CONFIG_EXTERNAL_BUILD , 1 ++.equ CONFIG_INSTALL_DOCS , 0 ++.equ CONFIG_INSTALL_BINS , 1 ++.equ CONFIG_INSTALL_LIBS , 1 ++.equ CONFIG_INSTALL_SRCS , 0 ++.equ CONFIG_DEBUG , 0 ++.equ CONFIG_GPROF , 0 ++.equ CONFIG_GCOV , 0 ++.equ CONFIG_RVCT , 0 ++.equ CONFIG_GCC , 1 ++.equ CONFIG_MSVS , 0 ++.equ CONFIG_PIC , 0 ++.equ CONFIG_BIG_ENDIAN , 0 ++.equ CONFIG_CODEC_SRCS , 0 ++.equ CONFIG_DEBUG_LIBS , 0 ++.equ CONFIG_DEQUANT_TOKENS , 0 ++.equ CONFIG_DC_RECON , 0 ++.equ CONFIG_RUNTIME_CPU_DETECT , 0 ++.equ CONFIG_POSTPROC , 1 ++.equ CONFIG_VP9_POSTPROC , 1 ++.equ CONFIG_MULTITHREAD , 1 ++.equ CONFIG_INTERNAL_STATS , 0 ++.equ CONFIG_VP8_ENCODER , 1 ++.equ CONFIG_VP8_DECODER , 1 ++.equ CONFIG_VP9_ENCODER , 1 ++.equ CONFIG_VP9_DECODER , 1 ++.equ CONFIG_VP8 , 1 ++.equ CONFIG_VP9 , 1 ++.equ CONFIG_ENCODERS , 1 ++.equ CONFIG_DECODERS , 1 ++.equ CONFIG_STATIC_MSVCRT , 0 ++.equ CONFIG_SPATIAL_RESAMPLING , 1 ++.equ CONFIG_REALTIME_ONLY , 1 ++.equ CONFIG_ONTHEFLY_BITPACKING , 0 ++.equ CONFIG_ERROR_CONCEALMENT , 0 ++.equ CONFIG_SHARED , 0 ++.equ CONFIG_STATIC , 1 ++.equ CONFIG_SMALL , 0 ++.equ CONFIG_POSTPROC_VISUALIZER , 0 ++.equ CONFIG_OS_SUPPORT , 1 ++.equ CONFIG_UNIT_TESTS , 1 ++.equ CONFIG_WEBM_IO , 1 ++.equ CONFIG_LIBYUV , 0 ++.equ CONFIG_DECODE_PERF_TESTS , 0 ++.equ CONFIG_ENCODE_PERF_TESTS , 0 ++.equ CONFIG_MULTI_RES_ENCODING , 1 ++.equ CONFIG_TEMPORAL_DENOISING , 1 ++.equ CONFIG_VP9_TEMPORAL_DENOISING , 1 ++.equ CONFIG_CONSISTENT_RECODE , 0 ++.equ CONFIG_COEFFICIENT_RANGE_CHECKING , 0 ++.equ CONFIG_VP9_HIGHBITDEPTH , 1 ++.equ CONFIG_BETTER_HW_COMPATIBILITY , 0 ++.equ CONFIG_EXPERIMENTAL , 0 ++.equ CONFIG_SIZE_LIMIT , 1 ++.equ CONFIG_ALWAYS_ADJUST_BPM , 0 ++.equ CONFIG_BITSTREAM_DEBUG , 0 ++.equ CONFIG_MISMATCH_DEBUG , 0 ++.equ CONFIG_FP_MB_STATS , 0 ++.equ CONFIG_EMULATE_HARDWARE , 0 ++.equ CONFIG_NON_GREEDY_MV , 0 ++.equ CONFIG_RATE_CTRL , 0 ++.equ DECODE_WIDTH_LIMIT , 16384 ++.equ DECODE_HEIGHT_LIMIT , 16384 ++ .section .note.GNU-stack,"",%progbits +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.c b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.c +new file mode 100644 +index 000000000..8aad25ff1 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.c +@@ -0,0 +1,10 @@ ++/* Copyright (c) 2011 The WebM project authors. All Rights Reserved. */ ++/* */ ++/* Use of this source code is governed by a BSD-style license */ ++/* that can be found in the LICENSE file in the root of the source */ ++/* tree. An additional intellectual property rights grant can be found */ ++/* in the file PATENTS. All contributing project authors may */ ++/* be found in the AUTHORS file in the root of the source tree. */ ++#include "vpx/vpx_codec.h" ++static const char* const cfg = "--target=generic-gnu --enable-vp9-highbitdepth --enable-external-build --enable-postproc --enable-multi-res-encoding --enable-temporal-denoising --enable-vp9-temporal-denoising --enable-vp9-postproc --size-limit=16384x16384 --enable-realtime-only --disable-install-docs --disable-libyuv"; ++const char *vpx_codec_build_config(void) {return cfg;} +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.h +new file mode 100644 +index 000000000..fddb76bd2 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_config.h +@@ -0,0 +1,107 @@ ++/* Copyright (c) 2011 The WebM project authors. All Rights Reserved. */ ++/* */ ++/* Use of this source code is governed by a BSD-style license */ ++/* that can be found in the LICENSE file in the root of the source */ ++/* tree. An additional intellectual property rights grant can be found */ ++/* in the file PATENTS. All contributing project authors may */ ++/* be found in the AUTHORS file in the root of the source tree. */ ++/* This file automatically generated by configure. Do not edit! */ ++#ifndef VPX_CONFIG_H ++#define VPX_CONFIG_H ++#define RESTRICT ++#define INLINE inline ++#define VPX_ARCH_ARM 0 ++#define ARCH_ARM 0 ++#define VPX_ARCH_MIPS 0 ++#define ARCH_MIPS 0 ++#define VPX_ARCH_X86 0 ++#define ARCH_X86 0 ++#define VPX_ARCH_X86_64 0 ++#define ARCH_X86_64 0 ++#define VPX_ARCH_PPC 0 ++#define ARCH_PPC 0 ++#define HAVE_NEON 0 ++#define HAVE_NEON_ASM 0 ++#define HAVE_MIPS32 0 ++#define HAVE_DSPR2 0 ++#define HAVE_MSA 0 ++#define HAVE_MIPS64 0 ++#define HAVE_MMX 0 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSSE3 0 ++#define HAVE_SSE4_1 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_AVX512 0 ++#define HAVE_VSX 0 ++#define HAVE_MMI 0 ++#define HAVE_VPX_PORTS 1 ++#define HAVE_PTHREAD_H 1 ++#define HAVE_UNISTD_H 0 ++#define CONFIG_DEPENDENCY_TRACKING 1 ++#define CONFIG_EXTERNAL_BUILD 1 ++#define CONFIG_INSTALL_DOCS 0 ++#define CONFIG_INSTALL_BINS 1 ++#define CONFIG_INSTALL_LIBS 1 ++#define CONFIG_INSTALL_SRCS 0 ++#define CONFIG_DEBUG 0 ++#define CONFIG_GPROF 0 ++#define CONFIG_GCOV 0 ++#define CONFIG_RVCT 0 ++#define CONFIG_GCC 1 ++#define CONFIG_MSVS 0 ++#define CONFIG_PIC 0 ++#define CONFIG_BIG_ENDIAN 0 ++#define CONFIG_CODEC_SRCS 0 ++#define CONFIG_DEBUG_LIBS 0 ++#define CONFIG_DEQUANT_TOKENS 0 ++#define CONFIG_DC_RECON 0 ++#define CONFIG_RUNTIME_CPU_DETECT 0 ++#define CONFIG_POSTPROC 1 ++#define CONFIG_VP9_POSTPROC 1 ++#define CONFIG_MULTITHREAD 1 ++#define CONFIG_INTERNAL_STATS 0 ++#define CONFIG_VP8_ENCODER 1 ++#define CONFIG_VP8_DECODER 1 ++#define CONFIG_VP9_ENCODER 1 ++#define CONFIG_VP9_DECODER 1 ++#define CONFIG_VP8 1 ++#define CONFIG_VP9 1 ++#define CONFIG_ENCODERS 1 ++#define CONFIG_DECODERS 1 ++#define CONFIG_STATIC_MSVCRT 0 ++#define CONFIG_SPATIAL_RESAMPLING 1 ++#define CONFIG_REALTIME_ONLY 1 ++#define CONFIG_ONTHEFLY_BITPACKING 0 ++#define CONFIG_ERROR_CONCEALMENT 0 ++#define CONFIG_SHARED 0 ++#define CONFIG_STATIC 1 ++#define CONFIG_SMALL 0 ++#define CONFIG_POSTPROC_VISUALIZER 0 ++#define CONFIG_OS_SUPPORT 1 ++#define CONFIG_UNIT_TESTS 1 ++#define CONFIG_WEBM_IO 1 ++#define CONFIG_LIBYUV 0 ++#define CONFIG_DECODE_PERF_TESTS 0 ++#define CONFIG_ENCODE_PERF_TESTS 0 ++#define CONFIG_MULTI_RES_ENCODING 1 ++#define CONFIG_TEMPORAL_DENOISING 1 ++#define CONFIG_VP9_TEMPORAL_DENOISING 1 ++#define CONFIG_CONSISTENT_RECODE 0 ++#define CONFIG_COEFFICIENT_RANGE_CHECKING 0 ++#define CONFIG_VP9_HIGHBITDEPTH 1 ++#define CONFIG_BETTER_HW_COMPATIBILITY 0 ++#define CONFIG_EXPERIMENTAL 0 ++#define CONFIG_SIZE_LIMIT 1 ++#define CONFIG_ALWAYS_ADJUST_BPM 0 ++#define CONFIG_BITSTREAM_DEBUG 0 ++#define CONFIG_MISMATCH_DEBUG 0 ++#define CONFIG_FP_MB_STATS 0 ++#define CONFIG_EMULATE_HARDWARE 0 ++#define CONFIG_NON_GREEDY_MV 0 ++#define CONFIG_RATE_CTRL 0 ++#define DECODE_WIDTH_LIMIT 16384 ++#define DECODE_HEIGHT_LIMIT 16384 ++#endif /* VPX_CONFIG_H */ +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_dsp_rtcd.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_dsp_rtcd.h +new file mode 100644 +index 000000000..8ba4d8805 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_dsp_rtcd.h +@@ -0,0 +1,3868 @@ ++// This file is generated. Do not edit. ++#ifndef VPX_DSP_RTCD_H_ ++#define VPX_DSP_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * DSP ++ */ ++ ++#include "vpx/vpx_integer.h" ++#include "vpx_dsp/vpx_dsp_common.h" ++#include "vpx_dsp/vpx_filter.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++unsigned int vpx_avg_4x4_c(const uint8_t*, int p); ++#define vpx_avg_4x4 vpx_avg_4x4_c ++ ++unsigned int vpx_avg_8x8_c(const uint8_t*, int p); ++#define vpx_avg_8x8 vpx_avg_8x8_c ++ ++void vpx_comp_avg_pred_c(uint8_t* comp_pred, ++ const uint8_t* pred, ++ int width, ++ int height, ++ const uint8_t* ref, ++ int ref_stride); ++#define vpx_comp_avg_pred vpx_comp_avg_pred_c ++ ++void vpx_convolve8_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8 vpx_convolve8_c ++ ++void vpx_convolve8_avg_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_avg vpx_convolve8_avg_c ++ ++void vpx_convolve8_avg_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_avg_horiz vpx_convolve8_avg_horiz_c ++ ++void vpx_convolve8_avg_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_avg_vert vpx_convolve8_avg_vert_c ++ ++void vpx_convolve8_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_horiz vpx_convolve8_horiz_c ++ ++void vpx_convolve8_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_vert vpx_convolve8_vert_c ++ ++void vpx_convolve_avg_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve_avg vpx_convolve_avg_c ++ ++void vpx_convolve_copy_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve_copy vpx_convolve_copy_c ++ ++void vpx_d117_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c ++ ++void vpx_d117_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c ++ ++void vpx_d117_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c ++ ++void vpx_d117_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c ++ ++void vpx_d135_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c ++ ++void vpx_d135_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c ++ ++void vpx_d135_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c ++ ++void vpx_d135_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c ++ ++void vpx_d153_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c ++ ++void vpx_d153_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c ++ ++void vpx_d153_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c ++ ++void vpx_d153_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c ++ ++void vpx_d207_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c ++ ++void vpx_d207_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c ++ ++void vpx_d207_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c ++ ++void vpx_d207_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c ++ ++void vpx_d45_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_c ++ ++void vpx_d45_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c ++ ++void vpx_d45_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c ++ ++void vpx_d45_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c ++ ++void vpx_d45e_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c ++ ++void vpx_d63_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c ++ ++void vpx_d63_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c ++ ++void vpx_d63_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c ++ ++void vpx_d63_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c ++ ++void vpx_d63e_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c ++ ++void vpx_dc_128_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_c ++ ++void vpx_dc_128_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_c ++ ++void vpx_dc_128_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c ++ ++void vpx_dc_128_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c ++ ++void vpx_dc_left_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_c ++ ++void vpx_dc_left_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_c ++ ++void vpx_dc_left_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c ++ ++void vpx_dc_left_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c ++ ++void vpx_dc_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_c ++ ++void vpx_dc_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_c ++ ++void vpx_dc_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c ++ ++void vpx_dc_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c ++ ++void vpx_dc_top_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_c ++ ++void vpx_dc_top_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_c ++ ++void vpx_dc_top_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c ++ ++void vpx_dc_top_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c ++ ++void vpx_fdct16x16_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct16x16 vpx_fdct16x16_c ++ ++void vpx_fdct16x16_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct16x16_1 vpx_fdct16x16_1_c ++ ++void vpx_fdct32x32_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct32x32 vpx_fdct32x32_c ++ ++void vpx_fdct32x32_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct32x32_1 vpx_fdct32x32_1_c ++ ++void vpx_fdct32x32_rd_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct32x32_rd vpx_fdct32x32_rd_c ++ ++void vpx_fdct4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct4x4 vpx_fdct4x4_c ++ ++void vpx_fdct4x4_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct4x4_1 vpx_fdct4x4_1_c ++ ++void vpx_fdct8x8_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct8x8 vpx_fdct8x8_c ++ ++void vpx_fdct8x8_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct8x8_1 vpx_fdct8x8_1_c ++ ++void vpx_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_get16x16var vpx_get16x16var_c ++ ++unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr, ++ int src_stride, ++ const unsigned char* ref_ptr, ++ int ref_stride); ++#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c ++ ++void vpx_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_get8x8var vpx_get8x8var_c ++ ++unsigned int vpx_get_mb_ss_c(const int16_t*); ++#define vpx_get_mb_ss vpx_get_mb_ss_c ++ ++void vpx_h_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_c ++ ++void vpx_h_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_c ++ ++void vpx_h_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c ++ ++void vpx_h_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c ++ ++void vpx_hadamard_16x16_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_hadamard_16x16 vpx_hadamard_16x16_c ++ ++void vpx_hadamard_32x32_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_hadamard_32x32 vpx_hadamard_32x32_c ++ ++void vpx_hadamard_8x8_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_hadamard_8x8 vpx_hadamard_8x8_c ++ ++void vpx_he_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c ++ ++void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c ++ ++void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c ++ ++unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_c ++ ++unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c ++ ++unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c ++ ++unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance16x16 \ ++ vpx_highbd_10_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance16x32 \ ++ vpx_highbd_10_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance16x8 \ ++ vpx_highbd_10_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance32x16 \ ++ vpx_highbd_10_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance32x32 \ ++ vpx_highbd_10_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance32x64 \ ++ vpx_highbd_10_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance4x4 \ ++ vpx_highbd_10_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance4x8 \ ++ vpx_highbd_10_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance64x32 \ ++ vpx_highbd_10_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance64x64 \ ++ vpx_highbd_10_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance8x16 \ ++ vpx_highbd_10_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance8x4 \ ++ vpx_highbd_10_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance8x8 \ ++ vpx_highbd_10_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance16x16 \ ++ vpx_highbd_10_sub_pixel_variance16x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance16x32 \ ++ vpx_highbd_10_sub_pixel_variance16x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance16x8 \ ++ vpx_highbd_10_sub_pixel_variance16x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance32x16 \ ++ vpx_highbd_10_sub_pixel_variance32x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance32x32 \ ++ vpx_highbd_10_sub_pixel_variance32x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance32x64 \ ++ vpx_highbd_10_sub_pixel_variance32x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance4x4 \ ++ vpx_highbd_10_sub_pixel_variance4x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance4x8 \ ++ vpx_highbd_10_sub_pixel_variance4x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance64x32 \ ++ vpx_highbd_10_sub_pixel_variance64x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance64x64 \ ++ vpx_highbd_10_sub_pixel_variance64x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance8x16 \ ++ vpx_highbd_10_sub_pixel_variance8x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance8x4 \ ++ vpx_highbd_10_sub_pixel_variance8x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance8x8 \ ++ vpx_highbd_10_sub_pixel_variance8x8_c ++ ++unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_c ++ ++unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_c ++ ++unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_c ++ ++unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_c ++ ++unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_c ++ ++unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_c ++ ++unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c ++ ++unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c ++ ++unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_c ++ ++unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_c ++ ++unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_c ++ ++unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c ++ ++unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_c ++ ++void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c ++ ++void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c ++ ++unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_c ++ ++unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c ++ ++unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c ++ ++unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance16x16 \ ++ vpx_highbd_12_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance16x32 \ ++ vpx_highbd_12_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance16x8 \ ++ vpx_highbd_12_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance32x16 \ ++ vpx_highbd_12_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance32x32 \ ++ vpx_highbd_12_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance32x64 \ ++ vpx_highbd_12_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance4x4 \ ++ vpx_highbd_12_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance4x8 \ ++ vpx_highbd_12_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance64x32 \ ++ vpx_highbd_12_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance64x64 \ ++ vpx_highbd_12_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance8x16 \ ++ vpx_highbd_12_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance8x4 \ ++ vpx_highbd_12_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance8x8 \ ++ vpx_highbd_12_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance16x16 \ ++ vpx_highbd_12_sub_pixel_variance16x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance16x32 \ ++ vpx_highbd_12_sub_pixel_variance16x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance16x8 \ ++ vpx_highbd_12_sub_pixel_variance16x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance32x16 \ ++ vpx_highbd_12_sub_pixel_variance32x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance32x32 \ ++ vpx_highbd_12_sub_pixel_variance32x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance32x64 \ ++ vpx_highbd_12_sub_pixel_variance32x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance4x4 \ ++ vpx_highbd_12_sub_pixel_variance4x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance4x8 \ ++ vpx_highbd_12_sub_pixel_variance4x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance64x32 \ ++ vpx_highbd_12_sub_pixel_variance64x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance64x64 \ ++ vpx_highbd_12_sub_pixel_variance64x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance8x16 \ ++ vpx_highbd_12_sub_pixel_variance8x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance8x4 \ ++ vpx_highbd_12_sub_pixel_variance8x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance8x8 \ ++ vpx_highbd_12_sub_pixel_variance8x8_c ++ ++unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_c ++ ++unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_c ++ ++unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_c ++ ++unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_c ++ ++unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_c ++ ++unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_c ++ ++unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c ++ ++unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c ++ ++unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_c ++ ++unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_c ++ ++unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_c ++ ++unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c ++ ++unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_c ++ ++void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c ++ ++void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c ++ ++unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_c ++ ++unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c ++ ++unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c ++ ++unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance16x16 \ ++ vpx_highbd_8_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance16x32 \ ++ vpx_highbd_8_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance16x8 \ ++ vpx_highbd_8_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance32x16 \ ++ vpx_highbd_8_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance32x32 \ ++ vpx_highbd_8_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance32x64 \ ++ vpx_highbd_8_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance4x4 \ ++ vpx_highbd_8_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance4x8 \ ++ vpx_highbd_8_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance64x32 \ ++ vpx_highbd_8_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance64x64 \ ++ vpx_highbd_8_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance8x16 \ ++ vpx_highbd_8_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance8x4 \ ++ vpx_highbd_8_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance8x8 \ ++ vpx_highbd_8_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance16x16 \ ++ vpx_highbd_8_sub_pixel_variance16x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance16x32 \ ++ vpx_highbd_8_sub_pixel_variance16x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance16x8 \ ++ vpx_highbd_8_sub_pixel_variance16x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance32x16 \ ++ vpx_highbd_8_sub_pixel_variance32x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance32x32 \ ++ vpx_highbd_8_sub_pixel_variance32x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance32x64 \ ++ vpx_highbd_8_sub_pixel_variance32x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance64x32 \ ++ vpx_highbd_8_sub_pixel_variance64x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance64x64 \ ++ vpx_highbd_8_sub_pixel_variance64x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance8x16 \ ++ vpx_highbd_8_sub_pixel_variance8x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance8x4 vpx_highbd_8_sub_pixel_variance8x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance8x8 vpx_highbd_8_sub_pixel_variance8x8_c ++ ++unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_c ++ ++unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_c ++ ++unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_c ++ ++unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_c ++ ++unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_c ++ ++unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_c ++ ++unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c ++ ++unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c ++ ++unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_c ++ ++unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_c ++ ++unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_c ++ ++unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c ++ ++unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_c ++ ++unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p); ++#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_c ++ ++unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p); ++#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_c ++ ++void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred, ++ const uint16_t* pred, ++ int width, ++ int height, ++ const uint16_t* ref, ++ int ref_stride); ++#define vpx_highbd_comp_avg_pred vpx_highbd_comp_avg_pred_c ++ ++void vpx_highbd_convolve8_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8 vpx_highbd_convolve8_c ++ ++void vpx_highbd_convolve8_avg_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_avg vpx_highbd_convolve8_avg_c ++ ++void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_avg_horiz vpx_highbd_convolve8_avg_horiz_c ++ ++void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_avg_vert vpx_highbd_convolve8_avg_vert_c ++ ++void vpx_highbd_convolve8_horiz_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_horiz vpx_highbd_convolve8_horiz_c ++ ++void vpx_highbd_convolve8_vert_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_vert vpx_highbd_convolve8_vert_c ++ ++void vpx_highbd_convolve_avg_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve_avg vpx_highbd_convolve_avg_c ++ ++void vpx_highbd_convolve_copy_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve_copy vpx_highbd_convolve_copy_c ++ ++void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_16x16 vpx_highbd_d117_predictor_16x16_c ++ ++void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_32x32 vpx_highbd_d117_predictor_32x32_c ++ ++void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_c ++ ++void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_8x8 vpx_highbd_d117_predictor_8x8_c ++ ++void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_16x16 vpx_highbd_d135_predictor_16x16_c ++ ++void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_32x32 vpx_highbd_d135_predictor_32x32_c ++ ++void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_c ++ ++void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_8x8 vpx_highbd_d135_predictor_8x8_c ++ ++void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_16x16 vpx_highbd_d153_predictor_16x16_c ++ ++void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_32x32 vpx_highbd_d153_predictor_32x32_c ++ ++void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_c ++ ++void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_8x8 vpx_highbd_d153_predictor_8x8_c ++ ++void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_16x16 vpx_highbd_d207_predictor_16x16_c ++ ++void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_32x32 vpx_highbd_d207_predictor_32x32_c ++ ++void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_c ++ ++void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_8x8 vpx_highbd_d207_predictor_8x8_c ++ ++void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_16x16 vpx_highbd_d45_predictor_16x16_c ++ ++void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_32x32 vpx_highbd_d45_predictor_32x32_c ++ ++void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_4x4 vpx_highbd_d45_predictor_4x4_c ++ ++void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_8x8 vpx_highbd_d45_predictor_8x8_c ++ ++void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_16x16 vpx_highbd_d63_predictor_16x16_c ++ ++void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_32x32 vpx_highbd_d63_predictor_32x32_c ++ ++void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_c ++ ++void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_8x8 vpx_highbd_d63_predictor_8x8_c ++ ++void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_c ++ ++void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_c ++ ++void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_c ++ ++void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_c ++ ++void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_16x16 vpx_highbd_dc_left_predictor_16x16_c ++ ++void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_32x32 vpx_highbd_dc_left_predictor_32x32_c ++ ++void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_c ++ ++void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_c ++ ++void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_c ++ ++void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_c ++ ++void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_c ++ ++void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_c ++ ++void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_c ++ ++void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_c ++ ++void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_c ++ ++void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_8x8 vpx_highbd_dc_top_predictor_8x8_c ++ ++void vpx_highbd_fdct16x16_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct16x16 vpx_highbd_fdct16x16_c ++ ++void vpx_highbd_fdct16x16_1_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct16x16_1 vpx_highbd_fdct16x16_1_c ++ ++void vpx_highbd_fdct32x32_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct32x32 vpx_highbd_fdct32x32_c ++ ++void vpx_highbd_fdct32x32_1_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct32x32_1 vpx_highbd_fdct32x32_1_c ++ ++void vpx_highbd_fdct32x32_rd_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct32x32_rd vpx_highbd_fdct32x32_rd_c ++ ++void vpx_highbd_fdct4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_highbd_fdct4x4 vpx_highbd_fdct4x4_c ++ ++void vpx_highbd_fdct8x8_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_highbd_fdct8x8 vpx_highbd_fdct8x8_c ++ ++void vpx_highbd_fdct8x8_1_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c ++ ++void vpx_highbd_h_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_c ++ ++void vpx_highbd_h_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_c ++ ++void vpx_highbd_h_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_c ++ ++void vpx_highbd_h_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_c ++ ++void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c ++ ++void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c ++ ++void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c ++ ++void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_10_add vpx_highbd_idct16x16_10_add_c ++ ++void vpx_highbd_idct16x16_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_1_add vpx_highbd_idct16x16_1_add_c ++ ++void vpx_highbd_idct16x16_256_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_256_add vpx_highbd_idct16x16_256_add_c ++ ++void vpx_highbd_idct16x16_38_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_38_add vpx_highbd_idct16x16_38_add_c ++ ++void vpx_highbd_idct32x32_1024_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_1024_add vpx_highbd_idct32x32_1024_add_c ++ ++void vpx_highbd_idct32x32_135_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_135_add vpx_highbd_idct32x32_135_add_c ++ ++void vpx_highbd_idct32x32_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_1_add vpx_highbd_idct32x32_1_add_c ++ ++void vpx_highbd_idct32x32_34_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_34_add vpx_highbd_idct32x32_34_add_c ++ ++void vpx_highbd_idct4x4_16_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct4x4_16_add vpx_highbd_idct4x4_16_add_c ++ ++void vpx_highbd_idct4x4_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct4x4_1_add vpx_highbd_idct4x4_1_add_c ++ ++void vpx_highbd_idct8x8_12_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct8x8_12_add vpx_highbd_idct8x8_12_add_c ++ ++void vpx_highbd_idct8x8_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct8x8_1_add vpx_highbd_idct8x8_1_add_c ++ ++void vpx_highbd_idct8x8_64_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct8x8_64_add vpx_highbd_idct8x8_64_add_c ++ ++void vpx_highbd_iwht4x4_16_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_iwht4x4_16_add vpx_highbd_iwht4x4_16_add_c ++ ++void vpx_highbd_iwht4x4_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_iwht4x4_1_add vpx_highbd_iwht4x4_1_add_c ++ ++void vpx_highbd_lpf_horizontal_16_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_16 vpx_highbd_lpf_horizontal_16_c ++ ++void vpx_highbd_lpf_horizontal_16_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_16_dual vpx_highbd_lpf_horizontal_16_dual_c ++ ++void vpx_highbd_lpf_horizontal_4_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_4 vpx_highbd_lpf_horizontal_4_c ++ ++void vpx_highbd_lpf_horizontal_4_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_horizontal_4_dual vpx_highbd_lpf_horizontal_4_dual_c ++ ++void vpx_highbd_lpf_horizontal_8_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_8 vpx_highbd_lpf_horizontal_8_c ++ ++void vpx_highbd_lpf_horizontal_8_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_horizontal_8_dual vpx_highbd_lpf_horizontal_8_dual_c ++ ++void vpx_highbd_lpf_vertical_16_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_16 vpx_highbd_lpf_vertical_16_c ++ ++void vpx_highbd_lpf_vertical_16_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_16_dual vpx_highbd_lpf_vertical_16_dual_c ++ ++void vpx_highbd_lpf_vertical_4_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_4 vpx_highbd_lpf_vertical_4_c ++ ++void vpx_highbd_lpf_vertical_4_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_vertical_4_dual vpx_highbd_lpf_vertical_4_dual_c ++ ++void vpx_highbd_lpf_vertical_8_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_8 vpx_highbd_lpf_vertical_8_c ++ ++void vpx_highbd_lpf_vertical_8_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_c ++ ++void vpx_highbd_minmax_8x8_c(const uint8_t* s8, ++ int p, ++ const uint8_t* d8, ++ int dp, ++ int* min, ++ int* max); ++#define vpx_highbd_minmax_8x8 vpx_highbd_minmax_8x8_c ++ ++void vpx_highbd_quantize_b_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* zbin_ptr, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ const int16_t* quant_shift_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vpx_highbd_quantize_b vpx_highbd_quantize_b_c ++ ++void vpx_highbd_quantize_b_32x32_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* zbin_ptr, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ const int16_t* quant_shift_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vpx_highbd_quantize_b_32x32 vpx_highbd_quantize_b_32x32_c ++ ++unsigned int vpx_highbd_sad16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad16x16 vpx_highbd_sad16x16_c ++ ++unsigned int vpx_highbd_sad16x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad16x16_avg vpx_highbd_sad16x16_avg_c ++ ++void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_c ++ ++unsigned int vpx_highbd_sad16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad16x32 vpx_highbd_sad16x32_c ++ ++unsigned int vpx_highbd_sad16x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad16x32_avg vpx_highbd_sad16x32_avg_c ++ ++void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_c ++ ++unsigned int vpx_highbd_sad16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad16x8 vpx_highbd_sad16x8_c ++ ++unsigned int vpx_highbd_sad16x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad16x8_avg vpx_highbd_sad16x8_avg_c ++ ++void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_c ++ ++unsigned int vpx_highbd_sad32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad32x16 vpx_highbd_sad32x16_c ++ ++unsigned int vpx_highbd_sad32x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad32x16_avg vpx_highbd_sad32x16_avg_c ++ ++void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_c ++ ++unsigned int vpx_highbd_sad32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad32x32 vpx_highbd_sad32x32_c ++ ++unsigned int vpx_highbd_sad32x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad32x32_avg vpx_highbd_sad32x32_avg_c ++ ++void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_c ++ ++unsigned int vpx_highbd_sad32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad32x64 vpx_highbd_sad32x64_c ++ ++unsigned int vpx_highbd_sad32x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad32x64_avg vpx_highbd_sad32x64_avg_c ++ ++void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_c ++ ++unsigned int vpx_highbd_sad4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad4x4 vpx_highbd_sad4x4_c ++ ++unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad4x4_avg vpx_highbd_sad4x4_avg_c ++ ++void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_c ++ ++unsigned int vpx_highbd_sad4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad4x8 vpx_highbd_sad4x8_c ++ ++unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad4x8_avg vpx_highbd_sad4x8_avg_c ++ ++void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_c ++ ++unsigned int vpx_highbd_sad64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad64x32 vpx_highbd_sad64x32_c ++ ++unsigned int vpx_highbd_sad64x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad64x32_avg vpx_highbd_sad64x32_avg_c ++ ++void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_c ++ ++unsigned int vpx_highbd_sad64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad64x64 vpx_highbd_sad64x64_c ++ ++unsigned int vpx_highbd_sad64x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad64x64_avg vpx_highbd_sad64x64_avg_c ++ ++void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_c ++ ++unsigned int vpx_highbd_sad8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad8x16 vpx_highbd_sad8x16_c ++ ++unsigned int vpx_highbd_sad8x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad8x16_avg vpx_highbd_sad8x16_avg_c ++ ++void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_c ++ ++unsigned int vpx_highbd_sad8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad8x4 vpx_highbd_sad8x4_c ++ ++unsigned int vpx_highbd_sad8x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad8x4_avg vpx_highbd_sad8x4_avg_c ++ ++void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_c ++ ++unsigned int vpx_highbd_sad8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad8x8 vpx_highbd_sad8x8_c ++ ++unsigned int vpx_highbd_sad8x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad8x8_avg vpx_highbd_sad8x8_avg_c ++ ++void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_c ++ ++int vpx_highbd_satd_c(const tran_low_t* coeff, int length); ++#define vpx_highbd_satd vpx_highbd_satd_c ++ ++void vpx_highbd_subtract_block_c(int rows, ++ int cols, ++ int16_t* diff_ptr, ++ ptrdiff_t diff_stride, ++ const uint8_t* src8_ptr, ++ ptrdiff_t src_stride, ++ const uint8_t* pred8_ptr, ++ ptrdiff_t pred_stride, ++ int bd); ++#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c ++ ++void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_c ++ ++void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_c ++ ++void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_c ++ ++void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_c ++ ++void vpx_highbd_v_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_c ++ ++void vpx_highbd_v_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_c ++ ++void vpx_highbd_v_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_c ++ ++void vpx_highbd_v_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_8x8 vpx_highbd_v_predictor_8x8_c ++ ++void vpx_idct16x16_10_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct16x16_10_add vpx_idct16x16_10_add_c ++ ++void vpx_idct16x16_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct16x16_1_add vpx_idct16x16_1_add_c ++ ++void vpx_idct16x16_256_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride); ++#define vpx_idct16x16_256_add vpx_idct16x16_256_add_c ++ ++void vpx_idct16x16_38_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct16x16_38_add vpx_idct16x16_38_add_c ++ ++void vpx_idct32x32_1024_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride); ++#define vpx_idct32x32_1024_add vpx_idct32x32_1024_add_c ++ ++void vpx_idct32x32_135_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride); ++#define vpx_idct32x32_135_add vpx_idct32x32_135_add_c ++ ++void vpx_idct32x32_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct32x32_1_add vpx_idct32x32_1_add_c ++ ++void vpx_idct32x32_34_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct32x32_34_add vpx_idct32x32_34_add_c ++ ++void vpx_idct4x4_16_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct4x4_16_add vpx_idct4x4_16_add_c ++ ++void vpx_idct4x4_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct4x4_1_add vpx_idct4x4_1_add_c ++ ++void vpx_idct8x8_12_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct8x8_12_add vpx_idct8x8_12_add_c ++ ++void vpx_idct8x8_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct8x8_1_add vpx_idct8x8_1_add_c ++ ++void vpx_idct8x8_64_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct8x8_64_add vpx_idct8x8_64_add_c ++ ++int16_t vpx_int_pro_col_c(const uint8_t* ref, const int width); ++#define vpx_int_pro_col vpx_int_pro_col_c ++ ++void vpx_int_pro_row_c(int16_t* hbuf, ++ const uint8_t* ref, ++ const int ref_stride, ++ const int height); ++#define vpx_int_pro_row vpx_int_pro_row_c ++ ++void vpx_iwht4x4_16_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_iwht4x4_16_add vpx_iwht4x4_16_add_c ++ ++void vpx_iwht4x4_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_iwht4x4_1_add vpx_iwht4x4_1_add_c ++ ++void vpx_lpf_horizontal_16_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_16 vpx_lpf_horizontal_16_c ++ ++void vpx_lpf_horizontal_16_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_16_dual vpx_lpf_horizontal_16_dual_c ++ ++void vpx_lpf_horizontal_4_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_4 vpx_lpf_horizontal_4_c ++ ++void vpx_lpf_horizontal_4_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_horizontal_4_dual vpx_lpf_horizontal_4_dual_c ++ ++void vpx_lpf_horizontal_8_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_8 vpx_lpf_horizontal_8_c ++ ++void vpx_lpf_horizontal_8_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_horizontal_8_dual vpx_lpf_horizontal_8_dual_c ++ ++void vpx_lpf_vertical_16_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_16 vpx_lpf_vertical_16_c ++ ++void vpx_lpf_vertical_16_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_16_dual vpx_lpf_vertical_16_dual_c ++ ++void vpx_lpf_vertical_4_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_4 vpx_lpf_vertical_4_c ++ ++void vpx_lpf_vertical_4_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_vertical_4_dual vpx_lpf_vertical_4_dual_c ++ ++void vpx_lpf_vertical_8_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_8 vpx_lpf_vertical_8_c ++ ++void vpx_lpf_vertical_8_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c ++ ++void vpx_mbpost_proc_across_ip_c(unsigned char* src, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vpx_mbpost_proc_across_ip vpx_mbpost_proc_across_ip_c ++ ++void vpx_mbpost_proc_down_c(unsigned char* dst, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vpx_mbpost_proc_down vpx_mbpost_proc_down_c ++ ++void vpx_minmax_8x8_c(const uint8_t* s, ++ int p, ++ const uint8_t* d, ++ int dp, ++ int* min, ++ int* max); ++#define vpx_minmax_8x8 vpx_minmax_8x8_c ++ ++unsigned int vpx_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse16x16 vpx_mse16x16_c ++ ++unsigned int vpx_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse16x8 vpx_mse16x8_c ++ ++unsigned int vpx_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse8x16 vpx_mse8x16_c ++ ++unsigned int vpx_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse8x8 vpx_mse8x8_c ++ ++void vpx_plane_add_noise_c(uint8_t* start, ++ const int8_t* noise, ++ int blackclamp, ++ int whiteclamp, ++ int width, ++ int height, ++ int pitch); ++#define vpx_plane_add_noise vpx_plane_add_noise_c ++ ++void vpx_post_proc_down_and_across_mb_row_c(unsigned char* src, ++ unsigned char* dst, ++ int src_pitch, ++ int dst_pitch, ++ int cols, ++ unsigned char* flimits, ++ int size); ++#define vpx_post_proc_down_and_across_mb_row \ ++ vpx_post_proc_down_and_across_mb_row_c ++ ++void vpx_quantize_b_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* zbin_ptr, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ const int16_t* quant_shift_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vpx_quantize_b vpx_quantize_b_c ++ ++void vpx_quantize_b_32x32_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* zbin_ptr, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ const int16_t* quant_shift_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vpx_quantize_b_32x32 vpx_quantize_b_32x32_c ++ ++unsigned int vpx_sad16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad16x16 vpx_sad16x16_c ++ ++unsigned int vpx_sad16x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad16x16_avg vpx_sad16x16_avg_c ++ ++void vpx_sad16x16x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x16x3 vpx_sad16x16x3_c ++ ++void vpx_sad16x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x16x4d vpx_sad16x16x4d_c ++ ++void vpx_sad16x16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x16x8 vpx_sad16x16x8_c ++ ++unsigned int vpx_sad16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad16x32 vpx_sad16x32_c ++ ++unsigned int vpx_sad16x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad16x32_avg vpx_sad16x32_avg_c ++ ++void vpx_sad16x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x32x4d vpx_sad16x32x4d_c ++ ++unsigned int vpx_sad16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad16x8 vpx_sad16x8_c ++ ++unsigned int vpx_sad16x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad16x8_avg vpx_sad16x8_avg_c ++ ++void vpx_sad16x8x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x8x3 vpx_sad16x8x3_c ++ ++void vpx_sad16x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x8x4d vpx_sad16x8x4d_c ++ ++void vpx_sad16x8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x8x8 vpx_sad16x8x8_c ++ ++unsigned int vpx_sad32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad32x16 vpx_sad32x16_c ++ ++unsigned int vpx_sad32x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad32x16_avg vpx_sad32x16_avg_c ++ ++void vpx_sad32x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad32x16x4d vpx_sad32x16x4d_c ++ ++unsigned int vpx_sad32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad32x32 vpx_sad32x32_c ++ ++unsigned int vpx_sad32x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad32x32_avg vpx_sad32x32_avg_c ++ ++void vpx_sad32x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad32x32x4d vpx_sad32x32x4d_c ++ ++void vpx_sad32x32x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad32x32x8 vpx_sad32x32x8_c ++ ++unsigned int vpx_sad32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad32x64 vpx_sad32x64_c ++ ++unsigned int vpx_sad32x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad32x64_avg vpx_sad32x64_avg_c ++ ++void vpx_sad32x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad32x64x4d vpx_sad32x64x4d_c ++ ++unsigned int vpx_sad4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad4x4 vpx_sad4x4_c ++ ++unsigned int vpx_sad4x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad4x4_avg vpx_sad4x4_avg_c ++ ++void vpx_sad4x4x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad4x4x3 vpx_sad4x4x3_c ++ ++void vpx_sad4x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad4x4x4d vpx_sad4x4x4d_c ++ ++void vpx_sad4x4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad4x4x8 vpx_sad4x4x8_c ++ ++unsigned int vpx_sad4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad4x8 vpx_sad4x8_c ++ ++unsigned int vpx_sad4x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad4x8_avg vpx_sad4x8_avg_c ++ ++void vpx_sad4x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad4x8x4d vpx_sad4x8x4d_c ++ ++unsigned int vpx_sad64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad64x32 vpx_sad64x32_c ++ ++unsigned int vpx_sad64x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad64x32_avg vpx_sad64x32_avg_c ++ ++void vpx_sad64x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad64x32x4d vpx_sad64x32x4d_c ++ ++unsigned int vpx_sad64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad64x64 vpx_sad64x64_c ++ ++unsigned int vpx_sad64x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad64x64_avg vpx_sad64x64_avg_c ++ ++void vpx_sad64x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad64x64x4d vpx_sad64x64x4d_c ++ ++unsigned int vpx_sad8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad8x16 vpx_sad8x16_c ++ ++unsigned int vpx_sad8x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad8x16_avg vpx_sad8x16_avg_c ++ ++void vpx_sad8x16x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x16x3 vpx_sad8x16x3_c ++ ++void vpx_sad8x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x16x4d vpx_sad8x16x4d_c ++ ++void vpx_sad8x16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x16x8 vpx_sad8x16x8_c ++ ++unsigned int vpx_sad8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad8x4 vpx_sad8x4_c ++ ++unsigned int vpx_sad8x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad8x4_avg vpx_sad8x4_avg_c ++ ++void vpx_sad8x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x4x4d vpx_sad8x4x4d_c ++ ++unsigned int vpx_sad8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad8x8 vpx_sad8x8_c ++ ++unsigned int vpx_sad8x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad8x8_avg vpx_sad8x8_avg_c ++ ++void vpx_sad8x8x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x8x3 vpx_sad8x8x3_c ++ ++void vpx_sad8x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x8x4d vpx_sad8x8x4d_c ++ ++void vpx_sad8x8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x8x8 vpx_sad8x8x8_c ++ ++int vpx_satd_c(const tran_low_t* coeff, int length); ++#define vpx_satd vpx_satd_c ++ ++void vpx_scaled_2d_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_2d vpx_scaled_2d_c ++ ++void vpx_scaled_avg_2d_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_avg_2d vpx_scaled_avg_2d_c ++ ++void vpx_scaled_avg_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_avg_horiz vpx_scaled_avg_horiz_c ++ ++void vpx_scaled_avg_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_avg_vert vpx_scaled_avg_vert_c ++ ++void vpx_scaled_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_horiz vpx_scaled_horiz_c ++ ++void vpx_scaled_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_vert vpx_scaled_vert_c ++ ++uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_c ++ ++uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_c ++ ++uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_c ++ ++uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_c ++ ++uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_c ++ ++uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_c ++ ++uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_c ++ ++uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_c ++ ++uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_c ++ ++uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_c ++ ++uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_c ++ ++uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_c ++ ++uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance8x8 vpx_sub_pixel_variance8x8_c ++ ++void vpx_subtract_block_c(int rows, ++ int cols, ++ int16_t* diff_ptr, ++ ptrdiff_t diff_stride, ++ const uint8_t* src_ptr, ++ ptrdiff_t src_stride, ++ const uint8_t* pred_ptr, ++ ptrdiff_t pred_stride); ++#define vpx_subtract_block vpx_subtract_block_c ++ ++uint64_t vpx_sum_squares_2d_i16_c(const int16_t* src, int stride, int size); ++#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_c ++ ++void vpx_tm_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_c ++ ++void vpx_tm_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_c ++ ++void vpx_tm_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c ++ ++void vpx_tm_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c ++ ++void vpx_v_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_c ++ ++void vpx_v_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_c ++ ++void vpx_v_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c ++ ++void vpx_v_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c ++ ++unsigned int vpx_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance16x16 vpx_variance16x16_c ++ ++unsigned int vpx_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance16x32 vpx_variance16x32_c ++ ++unsigned int vpx_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance16x8 vpx_variance16x8_c ++ ++unsigned int vpx_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance32x16 vpx_variance32x16_c ++ ++unsigned int vpx_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance32x32 vpx_variance32x32_c ++ ++unsigned int vpx_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance32x64 vpx_variance32x64_c ++ ++unsigned int vpx_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance4x4 vpx_variance4x4_c ++ ++unsigned int vpx_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance4x8 vpx_variance4x8_c ++ ++unsigned int vpx_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance64x32 vpx_variance64x32_c ++ ++unsigned int vpx_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance64x64 vpx_variance64x64_c ++ ++unsigned int vpx_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance8x16 vpx_variance8x16_c ++ ++unsigned int vpx_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance8x4 vpx_variance8x4_c ++ ++unsigned int vpx_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance8x8 vpx_variance8x8_c ++ ++void vpx_ve_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c ++ ++int vpx_vector_var_c(const int16_t* ref, const int16_t* src, const int bwl); ++#define vpx_vector_var vpx_vector_var_c ++ ++void vpx_dsp_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_scale_rtcd.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_scale_rtcd.h +new file mode 100644 +index 000000000..c5196db4d +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/sw_64/vpx_scale_rtcd.h +@@ -0,0 +1,96 @@ ++// This file is generated. Do not edit. ++#ifndef VPX_SCALE_RTCD_H_ ++#define VPX_SCALE_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++void vp8_horizontal_line_2_1_scale_c(const unsigned char* source, ++ unsigned int source_width, ++ unsigned char* dest, ++ unsigned int dest_width); ++#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c ++ ++void vp8_horizontal_line_5_3_scale_c(const unsigned char* source, ++ unsigned int source_width, ++ unsigned char* dest, ++ unsigned int dest_width); ++#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c ++ ++void vp8_horizontal_line_5_4_scale_c(const unsigned char* source, ++ unsigned int source_width, ++ unsigned char* dest, ++ unsigned int dest_width); ++#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c ++ ++void vp8_vertical_band_2_1_scale_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c ++ ++void vp8_vertical_band_2_1_scale_i_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c ++ ++void vp8_vertical_band_5_3_scale_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c ++ ++void vp8_vertical_band_5_4_scale_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c ++ ++void vp8_yv12_copy_frame_c(const struct yv12_buffer_config* src_ybc, ++ struct yv12_buffer_config* dst_ybc); ++#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c ++ ++void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config* ybf); ++#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c ++ ++void vpx_extend_frame_borders_c(struct yv12_buffer_config* ybf); ++#define vpx_extend_frame_borders vpx_extend_frame_borders_c ++ ++void vpx_extend_frame_inner_borders_c(struct yv12_buffer_config* ybf); ++#define vpx_extend_frame_inner_borders vpx_extend_frame_inner_borders_c ++ ++void vpx_yv12_copy_frame_c(const struct yv12_buffer_config* src_ybc, ++ struct yv12_buffer_config* dst_ybc); ++#define vpx_yv12_copy_frame vpx_yv12_copy_frame_c ++ ++void vpx_yv12_copy_y_c(const struct yv12_buffer_config* src_ybc, ++ struct yv12_buffer_config* dst_ybc); ++#define vpx_yv12_copy_y vpx_yv12_copy_y_c ++ ++void vpx_scale_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +diff --git a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h b/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h +index d2baee9d2..320fcb3f1 100644 +--- a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h ++++ b/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h +@@ -88,7 +88,7 @@ + */ + #if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) || \ + defined(__mips__) || defined(__PPC__) || defined(__ARM_EABI__) || \ +- defined(__aarch64__) || defined(__s390__)) \ ++ defined(__aarch64__) || defined(__s390__) || defined(__sw_64__)) \ + && (defined(__linux) || defined(__ANDROID__)) + + #ifndef SYS_CPLUSPLUS +@@ -377,6 +377,26 @@ struct kernel_stat64 { + unsigned __pad2; + unsigned long long st_blocks; + }; ++ ++#elif defined(__sw_64__) ++struct kernel_stat { ++ unsigned int st_dev; ++ unsigned int st_ino; ++ unsigned int st_mode; ++ unsigned int st_nlink; ++ unsigned int st_uid; ++ unsigned int st_gid; ++ unsigned int st_rdev; ++ long int st_size; ++ unsigned long int st_atime_; ++ unsigned long int st_mtime_; ++ unsigned long int st_ctime_; ++ unsigned int st_blksize; ++ int st_blocks; ++ unsigned int st_flags; ++ unsigned int st_gen; ++}; ++ + #elif defined __PPC__ + struct kernel_stat64 { + unsigned long long st_dev; +@@ -820,6 +840,42 @@ struct kernel_statfs { + #ifndef FUTEX_TRYLOCK_PI_PRIVATE + #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) + #endif ++#if defined(__sw_64__) ++#ifndef __NR_fork ++#define __NR_fork 2 ++#endif ++#ifndef __NR_rt_sigaction ++#define __NR_rt_sigaction 352 ++#define __NR_rt_sigprocmask 353 ++#endif ++#ifndef __NR_stat64 ++#define __NR_stat64 425 ++#endif ++#ifndef __NR_fstat64 ++#define __NR_fstat64 427 ++#endif ++#ifndef __NR_socket ++#define __NR_socket 97 ++#endif ++#ifndef __NR_getdents64 ++#define __NR_getdents64 377 ++#endif ++#ifndef __NR_gettid ++#define __NR_gettid 378 ++#endif ++#ifndef __NR_getxpid ++#define __NR_getxpid 178 ++#endif ++#ifndef __NR_getpid ++#define __NR_getpid __NR_getxpid ++#endif ++#ifndef __NR_futex ++#define __NR_futex 394 ++#endif ++#ifndef __NR_openat ++#define __NR_openat 450 ++#endif ++#endif + + + #if defined(__x86_64__) +@@ -1872,7 +1928,7 @@ struct kernel_statfs { + } \ + return (type) (res); \ + } while (0) +- #elif defined(__mips__) ++ #elif defined(__mips__) || defined(__sw_64__) + /* On MIPS, failing system calls return -1, and set errno in a + * separate CPU register. + */ +@@ -2813,6 +2869,140 @@ struct kernel_statfs { + } + LSS_RETURN(int, __res); + } ++ ++#elif defined(__sw_64__) ++ #undef LSS_REG ++ #define LSS_REG(r,a) register unsigned long __r##r __asm__("$"#r) = \ ++ (unsigned long)(a) ++ #undef LSS_BODY ++ #undef LSS_SYSCALL_CLOBBERS ++ #define LSS_SYSCALL_CLOBBERS "$30", "memory" ++ #define LSS_BODY(type,name,r19,...) \ ++ register unsigned long __v0 __asm__("$0") = __NR_##name; \ ++ __asm__ __volatile__ ("sys_call 0x83\n" \ ++ : "=r"(__v0), r19 (__r19) \ ++ : "0"(__v0), ##__VA_ARGS__ \ ++ : LSS_SYSCALL_CLOBBERS); \ ++ LSS_RETURN(type, __v0, __r19) ++ #undef _syscall0 ++ #define _syscall0(type, name) \ ++ type LSS_NAME(name)(void) { \ ++ register unsigned long __r19 __asm__("$19"); \ ++ LSS_BODY(type, name, "=r"); \ ++ } ++ #undef _syscall1 ++ #define _syscall1(type, name, type1, arg1) \ ++ type LSS_NAME(name)(type1 arg1) { \ ++ register unsigned long __r19 __asm__("$19"); \ ++ LSS_REG(16, arg1); LSS_BODY(type, name, "=r", "r"(__r16)); \ ++ } ++ #undef _syscall2 ++ #define _syscall2(type, name, type1, arg1, type2, arg2) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2) { \ ++ register unsigned long __r19 __asm__("$19"); \ ++ LSS_REG(16, arg1); LSS_REG(17, arg2); \ ++ LSS_BODY(type, name, "=r", "r"(__r16), "r"(__r17)); \ ++ } ++ #undef _syscall3 ++ #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ ++ register unsigned long __r19 __asm__("$19"); \ ++ LSS_REG(16, arg1); LSS_REG(17, arg2); LSS_REG(18, arg3); \ ++ LSS_BODY(type, name, "=r", "r"(__r16), "r"(__r17), "r"(__r18)); \ ++ } ++ #undef _syscall4 ++ #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ ++ LSS_REG(16, arg1); LSS_REG(17, arg2); LSS_REG(18, arg3); \ ++ LSS_REG(19, arg4); \ ++ LSS_BODY(type, name, "+r", "r"(__r16), "r"(__r17), "r"(__r18)); \ ++ } ++ #undef _syscall5 ++ #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ ++ type5,arg5) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ ++ type5 arg5) { \ ++ LSS_REG(16, arg1); LSS_REG(17, arg2); LSS_REG(18, arg3); \ ++ LSS_REG(19, arg4); LSS_REG(20, arg5); \ ++ LSS_BODY(type, name, "+r", "r"(__r16), "r"(__r17), "r"(__r18), \ ++ "r"(__r20)); \ ++ } ++ #undef _syscall6 ++ #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ ++ type5,arg5,type6,arg6) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ ++ type5 arg5,type6 arg6) { \ ++ LSS_REG(16, arg1); LSS_REG(17, arg2); LSS_REG(18, arg3); \ ++ LSS_REG(19, arg4); LSS_REG(20, arg5); LSS_REG(21, arg6); \ ++ LSS_BODY(type, name, "+r", "r"(__r16), "r"(__r17), "r"(__r18), \ ++ "r"(__r20), "r"(__r21)); \ ++ } ++ LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, ++ int flags, void *arg, int *parent_tidptr, ++ void *newtls, int *child_tidptr) { ++ register unsigned long __v0 __asm__("$0") = -EINVAL; ++ register unsigned long __r19 __asm__("$19") = (unsigned long)newtls; ++ { ++ register int __flags __asm__("$16") = flags; ++ register void *__stack __asm__("$17") = child_stack; ++ register void *__ptid __asm__("$18") = parent_tidptr; ++ register int *__ctid __asm__("$20") = child_tidptr; ++ register int (*__func)(void *) __asm__ ("$1") = fn; ++ register void *__args __asm__("$2") = arg; ++ __asm__ __volatile__( ++ "ldi $30,-16($30)\n" ++ /* if (fn == NULL || child_stack == NULL) ++ * return -EINVAL; ++ */ ++ "beq $1,1f\n" //t0=fn ++ "beq $17,1f\n" ++ ++ /* Push "arg" and "fn" onto the stack that will be ++ * used by the child. ++ */ ++ "ldi $17, -32($17)\n" ++ "stl $1 ,0($17)\n" //fn ++ "stl $2 ,8($17)\n" //arg ++ ++ /* $7 = syscall_clone($4 = flags, ++ * $5 = child_stack, ++ * $6 = parent_tidptr, ++ * $7 = newtls, ++ * $8 = child_tidptr) ++ */ ++ "ldi $0,312($31)\n" ++ "sys_call 0x83\n" ++ ++ /* if ($7 != 0) ++ * return $2; ++ */ ++ "bne $19,1f\n" ++ "bne $0,1f\n" ++ ++ /* In the child, now. Call "fn(arg)". ++ */ ++ "ldl $27,0($30)\n" //NOTE: in sunway must use r27, or fn will not be called ++ "ldl $16,8($30)\n" ++ "ldi $30, 32($30)\n" ++ "call $26, ($27)\n" ++ ++ /* Call _exit($2) ++ */ ++ "bis $31,$0,$16\n" ++ "ldi $0,1($31)\n" ++ "sys_call 0x83\n" ++ ++ "1:\n" ++ "ldi $30, 16($30)\n" ++ : "+r" (__v0), "+r" (__r19) ++ : "i"(__NR_clone), "i"(__NR_exit), "r"(fn), ++ "r"(__stack), "r"(__flags), "r"(arg), ++ "r"(__ptid), "r"(__ctid) ++ : "$30", "memory"); ++ } ++ LSS_RETURN(int, __v0, __r19); ++ } ++ + #elif defined(__mips__) + #undef LSS_REG + #define LSS_REG(r,a) register unsigned long __r##r __asm__("$"#r) = \ +@@ -3383,6 +3573,10 @@ struct kernel_statfs { + // fork is polyfilled below when not available. + LSS_INLINE _syscall0(pid_t, fork) + #endif ++ #if defined(__sw_64__) ++ LSS_INLINE _syscall2(int, fstat64, int, f, ++ struct stat*, b) ++ #endif + LSS_INLINE _syscall2(int, fstat, int, f, + struct kernel_stat*, b) + LSS_INLINE _syscall2(int, fstatfs, int, f, +@@ -3403,13 +3597,17 @@ struct kernel_statfs { + struct kernel_dirent*, d, int, c) + LSS_INLINE _syscall3(int, getdents64, int, f, + struct kernel_dirent64*, d, int, c) ++#if !defined(__sw_64__) + LSS_INLINE _syscall0(gid_t, getegid) + LSS_INLINE _syscall0(uid_t, geteuid) ++#endif + #if defined(__NR_getpgrp) + LSS_INLINE _syscall0(pid_t, getpgrp) + #endif + LSS_INLINE _syscall0(pid_t, getpid) ++#if !defined(__sw_64__) + LSS_INLINE _syscall0(pid_t, getppid) ++#endif + LSS_INLINE _syscall2(int, getpriority, int, a, + int, b) + LSS_INLINE _syscall3(int, getresgid, gid_t *, r, +@@ -3566,7 +3764,7 @@ struct kernel_statfs { + unsigned *, node, void *, unused) + #endif + #if defined(__x86_64__) || \ +- (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32) ++ (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32) || defined(__sw_64__) + LSS_INLINE _syscall3(int, recvmsg, int, s, + struct kernel_msghdr*, m, int, f) + LSS_INLINE _syscall3(int, sendmsg, int, s, +diff --git a/src/3rdparty/chromium/third_party/webrtc/modules/desktop_capture/differ_block.cc b/src/3rdparty/chromium/third_party/webrtc/modules/desktop_capture/differ_block.cc +index dd9ab457e..e45fd94d0 100644 +--- a/src/3rdparty/chromium/third_party/webrtc/modules/desktop_capture/differ_block.cc ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/desktop_capture/differ_block.cc +@@ -30,7 +30,7 @@ bool VectorDifference(const uint8_t* image1, const uint8_t* image2) { + static bool (*diff_proc)(const uint8_t*, const uint8_t*) = nullptr; + + if (!diff_proc) { +-#if defined(WEBRTC_ARCH_ARM_FAMILY) || defined(WEBRTC_ARCH_MIPS_FAMILY) ++#if defined(WEBRTC_ARCH_ARM_FAMILY) || defined(WEBRTC_ARCH_MIPS_FAMILY) || defined(WEBRTC_ARCH_SW64_FAMILY) + // For ARM and MIPS processors, always use C version. + // TODO(hclam): Implement a NEON version. + diff_proc = &VectorDifference_C; +diff --git a/src/3rdparty/chromium/third_party/webrtc/rtc_base/system/arch.h b/src/3rdparty/chromium/third_party/webrtc/rtc_base/system/arch.h +index ed216e660..e4b2d8aef 100644 +--- a/src/3rdparty/chromium/third_party/webrtc/rtc_base/system/arch.h ++++ b/src/3rdparty/chromium/third_party/webrtc/rtc_base/system/arch.h +@@ -50,6 +50,10 @@ + #elif defined(__EMSCRIPTEN__) + #define WEBRTC_ARCH_32_BITS + #define WEBRTC_ARCH_LITTLE_ENDIAN ++#elif defined(__sw_64__) ++#define WEBRTC_ARCH_SW64_FAMILY ++#define WEBRTC_ARCH_64_BITS ++#define WEBRTC_ARCH_LITTLE_ENDIAN + #else + #error Please add support for your architecture in rtc_base/system/arch.h + #endif +diff --git a/src/3rdparty/chromium/v8/BUILD.gn b/src/3rdparty/chromium/v8/BUILD.gn +index ab20142de..b185cac63 100644 +--- a/src/3rdparty/chromium/v8/BUILD.gn ++++ b/src/3rdparty/chromium/v8/BUILD.gn +@@ -586,6 +586,9 @@ config("toolchain") { + } + } + } ++ if (v8_current_cpu == "sw_64") { ++ defines += [ "V8_TARGET_ARCH_SW64", "SW64" ] ++ } + + # Mips64el/mipsel simulators. + if (target_is_simulator && +@@ -1675,6 +1678,11 @@ v8_source_set("v8_initializers") { + ### gcmole(arch:x64) ### + "src/builtins/x64/builtins-x64.cc", + ] ++ } else if (v8_current_cpu == "sw_64") { ++ sources += [ ++ ### gcmole(arch:sw64) ### ++ "src/builtins/sw64/builtins-sw64.cc", ++ ] + } else if (v8_current_cpu == "arm") { + sources += [ + ### gcmole(arch:arm) ### +@@ -3308,6 +3316,35 @@ v8_source_set("v8_base_without_compiler") { + # to be excluded, see the comments inside. + "src/codegen/arm64/instructions-arm64-constants.cc", + ] ++ } else if (v8_current_cpu == "sw_64") { ++ sources += [ ### gcmole(arch:sw_64) ### ++ "src/codegen/sw64/assembler-sw64-inl.h", ++ "src/codegen/sw64/assembler-sw64.cc", ++ "src/codegen/sw64/assembler-sw64.h", ++ "src/codegen/sw64/constants-sw64.cc", ++ "src/codegen/sw64/constants-sw64.h", ++ "src/codegen/sw64/cpu-sw64.cc", ++ "src/codegen/sw64/interface-descriptors-sw64.cc", ++ "src/codegen/sw64/macro-assembler-sw64.cc", ++ "src/codegen/sw64/macro-assembler-sw64.h", ++ "src/codegen/sw64/register-sw64.h", ++ "src/compiler/backend/sw64/code-generator-sw64.cc", ++ "src/compiler/backend/sw64/instruction-codes-sw64.h", ++ "src/compiler/backend/sw64/instruction-scheduler-sw64.cc", ++ "src/compiler/backend/sw64/instruction-selector-sw64.cc", ++ "src/compiler/backend/sw64/unwinding-info-writer-sw64.cc", ++ "src/compiler/backend/sw64/unwinding-info-writer-sw64.h", ++ "src/debug/sw64/debug-sw64.cc", ++ "src/deoptimizer/sw64/deoptimizer-sw64.cc", ++ "src/diagnostics/sw64/disasm-sw64.cc", ++ "src/execution/sw64/frame-constants-sw64.cc", ++ "src/execution/sw64/frame-constants-sw64.h", ++ "src/execution/sw64/simulator-sw64.cc", ++ "src/execution/sw64/simulator-sw64.h", ++ "src/regexp/sw64/regexp-macro-assembler-sw64.cc", ++ "src/regexp/sw64/regexp-macro-assembler-sw64.h", ++ "src/wasm/baseline/sw64/liftoff-assembler-sw64.h", ++ ] + } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { + sources += [ ### gcmole(arch:mipsel) ### + "src/codegen/mips/assembler-mips-inl.h", +diff --git a/src/3rdparty/chromium/v8/src/base/build_config.h b/src/3rdparty/chromium/v8/src/base/build_config.h +index 8d142c456..425b7966d 100644 +--- a/src/3rdparty/chromium/v8/src/base/build_config.h ++++ b/src/3rdparty/chromium/v8/src/base/build_config.h +@@ -46,6 +46,9 @@ + #else + #define V8_HOST_ARCH_32_BIT 1 + #endif ++#elif defined(__sw_64__) ++#define V8_HOST_ARCH_SW64 1 ++#define V8_HOST_ARCH_64_BIT 1 + #else + #error "Host architecture was not detected as supported by v8" + #endif +@@ -77,7 +80,7 @@ + // environment as presented by the compiler. + #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ + !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \ +- !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 ++ !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_SW64 + #if defined(_M_X64) || defined(__x86_64__) + #define V8_TARGET_ARCH_X64 1 + #elif defined(_M_IX86) || defined(__i386__) +@@ -94,6 +97,8 @@ + #define V8_TARGET_ARCH_PPC64 1 + #elif defined(_ARCH_PPC) + #define V8_TARGET_ARCH_PPC 1 ++#elif defined(__sw_64__) ++#define V8_TARGET_ARCH_SW64 1 + #else + #error Target architecture was not detected as supported by v8 + #endif +@@ -128,6 +133,8 @@ + #else + #define V8_TARGET_ARCH_32_BIT 1 + #endif ++#elif V8_TARGET_ARCH_SW64 ++#define V8_TARGET_ARCH_64_BIT 1 + #else + #error Unknown target architecture pointer size + #endif +@@ -156,6 +163,9 @@ + #if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64)) + #error Target architecture mips64 is only supported on mips64 and x64 host + #endif ++#if (V8_TARGET_ARCH_SW64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_SW64)) ++#error Target architecture sw64 is only supported on sw64 and x64 host ++#endif + + // Determine architecture endianness. + #if V8_TARGET_ARCH_IA32 +@@ -190,6 +200,8 @@ + #else + #define V8_TARGET_BIG_ENDIAN 1 + #endif ++#elif V8_TARGET_ARCH_SW64 ++#define V8_TARGET_LITTLE_ENDIAN 1 + #else + #error Unknown target architecture endianness + #endif +diff --git a/src/3rdparty/chromium/v8/src/base/cpu.cc b/src/3rdparty/chromium/v8/src/base/cpu.cc +index f1c48fa13..1295cd4d0 100644 +--- a/src/3rdparty/chromium/v8/src/base/cpu.cc ++++ b/src/3rdparty/chromium/v8/src/base/cpu.cc +@@ -75,7 +75,7 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { + + #endif // !V8_LIBC_MSVCRT + +-#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 ++#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_SW64 + + #if V8_OS_LINUX + +@@ -585,7 +585,7 @@ CPU::CPU() + + #endif // V8_OS_LINUX + +-#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 ++#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_SW64 + + // Simple detection of FPU at runtime for Linux. + // It is based on /proc/cpuinfo, which reveals hardware configuration +diff --git a/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc b/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc +index 1e600c789..7464ef5fb 100644 +--- a/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc ++++ b/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc +@@ -297,6 +297,9 @@ void* OS::GetRandomMmapAddr() { + // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance + // to fulfill request. + raw_addr &= uint64_t{0xFFFFFF0000}; ++#elif V8_TARGET_ARCH_SW64 ++ raw_addr &= uint64_t{0x07FF0000}; ++ raw_addr += 0x20000000000; + #else + raw_addr &= 0x3FFFF000; + +@@ -479,6 +482,8 @@ void OS::DebugBreak() { + #elif V8_HOST_ARCH_S390 + // Software breakpoint instruction is 0x0001 + asm volatile(".word 0x0001"); ++#elif V8_HOST_ARCH_SW64 ++ asm("sys_call 0x80"); + #else + #error Unsupported host architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/src/3rdparty/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc +index 3049b01d2..aadabbb5c 100644 +--- a/src/3rdparty/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc ++++ b/src/3rdparty/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc +@@ -408,7 +408,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { + TNode index_word = ValidateAtomicAccess(array, index, context); + + #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ +- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X ++ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_SW64 + TNode index_number = ChangeUintPtrToTagged(index_word); + Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array, + index_number, old_value, new_value)); +@@ -543,7 +543,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( + TNode index_word = ValidateAtomicAccess(array, index, context); + + #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ +- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X ++ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_SW64 + TNode index_number = ChangeUintPtrToTagged(index_word); + Return(CallRuntime(runtime_function, context, array, index_number, value)); + #else +diff --git a/src/3rdparty/chromium/v8/src/builtins/builtins.cc b/src/3rdparty/chromium/v8/src/builtins/builtins.cc +index 34f7ddc18..4f44967bd 100644 +--- a/src/3rdparty/chromium/v8/src/builtins/builtins.cc ++++ b/src/3rdparty/chromium/v8/src/builtins/builtins.cc +@@ -466,7 +466,7 @@ bool Builtins::CodeObjectIsExecutable(int builtin_index) { + case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit: + return true; + default: +-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ++#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_SW64 + // TODO(Loongson): Move non-JS linkage builtins code objects into RO_SPACE + // caused MIPS platform to crash, and we need some time to handle it. Now + // disable this change temporarily on MIPS platform. +diff --git a/src/3rdparty/chromium/v8/src/builtins/sw64/builtins-sw64.cc b/src/3rdparty/chromium/v8/src/builtins/sw64/builtins-sw64.cc +new file mode 100755 +index 000000000..c9565819f +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/builtins/sw64/builtins-sw64.cc +@@ -0,0 +1,3222 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/api/api-arguments.h" ++#include "src/codegen/code-factory.h" ++#include "src/debug/debug.h" ++#include "src/deoptimizer/deoptimizer.h" ++#include "src/execution/frame-constants.h" ++#include "src/execution/frames.h" ++#include "src/logging/counters.h" ++// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. ++#include "src/codegen/macro-assembler-inl.h" ++#include "src/codegen/sw64/constants-sw64.h" ++#include "src/codegen/register-configuration.h" ++#include "src/heap/heap-inl.h" ++#include "src/objects/cell.h" ++#include "src/objects/foreign.h" ++#include "src/objects/heap-number.h" ++#include "src/objects/js-generator.h" ++#include "src/objects/objects-inl.h" ++#include "src/objects/smi.h" ++#include "src/runtime/runtime.h" ++#include "src/wasm/wasm-linkage.h" ++#include "src/wasm/wasm-objects.h" ++ ++namespace v8 { ++namespace internal { ++ ++#define __ ACCESS_MASM(masm) ++ ++void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) { ++ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), ++ RelocInfo::CODE_TARGET); ++} ++ ++static void GenerateTailCallToReturnedCode(MacroAssembler* masm, ++ Runtime::FunctionId function_id) { ++ // ----------- S t a t e ------------- ++ // -- a0 : actual argument count ++ // -- a1 : target function (preserved for callee) ++ // -- a3 : new target (preserved for callee) ++ // ----------------------------------- ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ // Push a copy of the target function, the new target and the actual ++ // argument count. ++ // Push function as parameter to the runtime call. ++ __ SmiTag(kJavaScriptCallArgCountRegister); ++ __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, ++ kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister); ++ ++ __ CallRuntime(function_id, 1); ++ // Restore target function, new target and actual argument count. ++ __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, ++ kJavaScriptCallArgCountRegister); ++ __ SmiUntag(kJavaScriptCallArgCountRegister); ++ } ++ ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Addl(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Jump(a2); ++} ++ ++namespace { ++ ++enum StackLimitKind { kInterruptStackLimit, kRealStackLimit }; ++ ++void LoadStackLimit(MacroAssembler* masm, Register destination, ++ StackLimitKind kind) { ++ DCHECK(masm->root_array_available()); ++ Isolate* isolate = masm->isolate(); ++ ExternalReference limit = ++ kind == StackLimitKind::kRealStackLimit ++ ? ExternalReference::address_of_real_jslimit(isolate) ++ : ExternalReference::address_of_jslimit(isolate); ++ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); ++ ++ intptr_t offset = ++ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); ++ CHECK(is_int32(offset)); ++ __ Ldl(destination, MemOperand(kRootRegister, static_cast(offset))); ++} ++ ++void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : number of arguments ++ // -- a1 : constructor function ++ // -- a3 : new target ++ // -- cp : context ++ // -- ra : return address ++ // -- sp[...]: constructor arguments ++ // ----------------------------------- ++ ++ // Enter a construct frame. ++ { ++ FrameScope scope(masm, StackFrame::CONSTRUCT); ++ ++ // Preserve the incoming parameters on the stack. ++ __ SmiTag(a0); ++ __ Push(cp, a0); ++ __ SmiUntag(a0); ++ ++ // Set up pointer to last argument (skip receiver). ++ __ Addl( ++ t2, fp, ++ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); ++ // Copy arguments and receiver to the expression stack. ++ __ PushArray(t2, a0, t3, t0); ++ // The receiver for the builtin/api call. ++ __ PushRoot(RootIndex::kTheHoleValue); ++ ++ // Call the function. ++ // a0: number of arguments (untagged) ++ // a1: constructor function ++ // a3: new target ++ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); ++ ++ // Restore context from the frame. ++ __ Ldl(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); ++ // Restore smi-tagged arguments count from the frame. ++ __ Ldl(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); ++ // Leave construct frame. ++ } ++ ++ // Remove caller arguments from the stack and return. ++ __ SmiScale(t3, t3, kPointerSizeLog2); ++ __ Addl(sp, sp, t3); ++ __ Addl(sp, sp, kPointerSize); ++ __ Ret(); ++} ++ ++static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, ++ Register scratch1, Register scratch2, ++ Label* stack_overflow) { ++ // Check the stack for overflow. We are not trying to catch ++ // interruptions (e.g. debug break and preemption) here, so the "real stack ++ // limit" is checked. ++ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit); ++ // Make scratch1 the space we have left. The stack might already be overflowed ++ // here which will cause scratch1 to become negative. ++ __ subl(sp, scratch1, scratch1); ++ // Check if the arguments will overflow the stack. ++ __ slll(num_args, kPointerSizeLog2, scratch2); ++ // Signed comparison. ++ __ Branch(stack_overflow, le, scratch1, Operand(scratch2)); ++} ++ ++} // namespace ++ ++// The construct stub for ES5 constructor functions and ES6 class constructors. ++void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0: number of arguments (untagged) ++ // -- a1: constructor function ++ // -- a3: new target ++ // -- cp: context ++ // -- ra: return address ++ // -- sp[...]: constructor arguments ++ // ----------------------------------- ++ ++ // Enter a construct frame. ++ { ++ FrameScope scope(masm, StackFrame::CONSTRUCT); ++ Label post_instantiation_deopt_entry, not_create_implicit_receiver; ++ ++ // Preserve the incoming parameters on the stack. ++ __ SmiTag(a0); ++ __ Push(cp, a0, a1); ++ __ PushRoot(RootIndex::kUndefinedValue); ++ __ Push(a3); ++ ++ // ----------- S t a t e ------------- ++ // -- sp[0*kPointerSize]: new target ++ // -- sp[1*kPointerSize]: padding ++ // -- a1 and sp[2*kPointerSize]: constructor function ++ // -- sp[3*kPointerSize]: number of arguments (tagged) ++ // -- sp[4*kPointerSize]: context ++ // ----------------------------------- ++ ++ __ Ldl(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset)); ++ __ DecodeField(t2); ++ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor, ++ ¬_create_implicit_receiver); ++ ++ // If not derived class constructor: Allocate the new receiver object. ++ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, ++ t2, t3); ++ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), ++ RelocInfo::CODE_TARGET); ++ __ Branch(&post_instantiation_deopt_entry); ++ ++ // Else: use TheHoleValue as receiver for constructor call ++ __ bind(¬_create_implicit_receiver); ++ __ LoadRoot(v0, RootIndex::kTheHoleValue); ++ ++ // ----------- S t a t e ------------- ++ // -- v0: receiver ++ // -- Slot 4 / sp[0*kPointerSize]: new target ++ // -- Slot 3 / sp[1*kPointerSize]: padding ++ // -- Slot 2 / sp[2*kPointerSize]: constructor function ++ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) ++ // -- Slot 0 / sp[4*kPointerSize]: context ++ // ----------------------------------- ++ // Deoptimizer enters here. ++ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( ++ masm->pc_offset()); ++ __ bind(&post_instantiation_deopt_entry); ++ ++ // Restore new target. ++ __ Pop(a3); ++ ++ // Push the allocated receiver to the stack. ++ __ Push(v0); ++ ++ // We need two copies because we may have to return the original one ++ // and the calling conventions dictate that the called function pops the ++ // receiver. The second copy is pushed after the arguments, we saved in t9 ++ // since v0 will store the return value of callRuntime. ++ __ mov(t9, v0); ++ ++ // Set up pointer to last argument. ++ __ Addl(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset + ++ kSystemPointerSize)); ++ ++ // ----------- S t a t e ------------- ++ // -- r3: new target ++ // -- sp[0*kPointerSize]: implicit receiver ++ // -- sp[1*kPointerSize]: implicit receiver ++ // -- sp[2*kPointerSize]: padding ++ // -- sp[3*kPointerSize]: constructor function ++ // -- sp[4*kPointerSize]: number of arguments (tagged) ++ // -- sp[5*kPointerSize]: context ++ // ----------------------------------- ++ ++ // Restore constructor function and argument count. ++ __ Ldl(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); ++ __ Ldl(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); ++ __ SmiUntag(a0); ++ ++ Label enough_stack_space, stack_overflow; ++ Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow); ++ __ Branch(&enough_stack_space); ++ ++ __ bind(&stack_overflow); ++ // Restore the context from the frame. ++ __ Ldl(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ sys_call(0x80); ++ ++ __ bind(&enough_stack_space); ++ ++ // Copy arguments and receiver to the expression stack. ++ __ PushArray(t2, a0, t0, t1); ++ // We need two copies because we may have to return the original one ++ // and the calling conventions dictate that the called function pops the ++ // receiver. The second copy is pushed after the arguments, ++ __ Push(t9); ++ ++ // Call the function. ++ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); ++ ++ // ----------- S t a t e ------------- ++ // -- v0: constructor result ++ // -- sp[0*kPointerSize]: implicit receiver ++ // -- sp[1*kPointerSize]: padding ++ // -- sp[2*kPointerSize]: constructor function ++ // -- sp[3*kPointerSize]: number of arguments ++ // -- sp[4*kPointerSize]: context ++ // ----------------------------------- ++ ++ // Store offset of return address for deoptimizer. ++ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset( ++ masm->pc_offset()); ++ ++ // Restore the context from the frame. ++ __ Ldl(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); ++ ++ // If the result is an object (in the ECMA sense), we should get rid ++ // of the receiver and use the result; see ECMA-262 section 13.2.2-7 ++ // on page 74. ++ Label use_receiver, do_throw, leave_frame; ++ ++ // If the result is undefined, we jump out to using the implicit receiver. ++ __ JumpIfRoot(v0, RootIndex::kUndefinedValue, &use_receiver); ++ ++ // Otherwise we do a smi check and fall through to check if the return value ++ // is a valid receiver. ++ ++ // If the result is a smi, it is *not* an object in the ECMA sense. ++ __ JumpIfSmi(v0, &use_receiver); ++ ++ // If the type of the result (stored in its map) is less than ++ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense. ++ __ GetObjectType(v0, t2, t2); ++ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); ++ __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE)); ++ __ Branch(&use_receiver); ++ ++ __ bind(&do_throw); ++ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); ++ ++ // Throw away the result of the constructor invocation and use the ++ // on-stack receiver as the result. ++ __ bind(&use_receiver); ++ __ Ldl(v0, MemOperand(sp, 0 * kPointerSize)); ++ __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw); ++ ++ __ bind(&leave_frame); ++ // Restore smi-tagged arguments count from the frame. ++ __ Ldl(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); ++ // Leave construct frame. ++ } ++ // Remove caller arguments from the stack and return. ++ __ SmiScale(a4, a1, kPointerSizeLog2); ++ __ Addl(sp, sp, a4); ++ __ Addl(sp, sp, kPointerSize); ++ __ Ret(); ++} ++ ++void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { ++ Generate_JSBuiltinsConstructStubHelper(masm); ++} ++ ++static void GetSharedFunctionInfoBytecode(MacroAssembler* masm, ++ Register sfi_data, ++ Register scratch1) { ++ Label done; ++ ++ __ GetObjectType(sfi_data, scratch1, scratch1); ++ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); ++ __ Ldl(sfi_data, ++ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); ++ ++ __ bind(&done); ++} ++ ++// static ++void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- v0 : the value to pass to the generator ++ // -- a1 : the JSGeneratorObject to resume ++ // -- ra : return address ++ // ----------------------------------- ++ __ AssertGeneratorObject(a1); ++ ++ // Store input value into generator object. ++ __ Stl(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); ++ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3, ++ kRAHasNotBeenSaved, kDontSaveFPRegs); ++ ++ // Load suspended function and context. ++ __ Ldl(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); ++ __ Ldl(cp, FieldMemOperand(a4, JSFunction::kContextOffset)); ++ ++ // Flood function if we are stepping. ++ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; ++ Label stepping_prepared; ++ ExternalReference debug_hook = ++ ExternalReference::debug_hook_on_function_call_address(masm->isolate()); ++ __ li(a5, debug_hook); ++ __ Ldb(a5, MemOperand(a5)); ++ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg)); ++ ++ // Flood function if we need to continue stepping in the suspended generator. ++ ExternalReference debug_suspended_generator = ++ ExternalReference::debug_suspended_generator_address(masm->isolate()); ++ __ li(a5, debug_suspended_generator); ++ __ Ldl(a5, MemOperand(a5)); ++ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5)); ++ __ bind(&stepping_prepared); ++ ++ // Check the stack for overflow. We are not trying to catch interruptions ++ // (i.e. debug break and preemption) here, so check the "real stack limit". ++ Label stack_overflow; ++ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); ++ __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg)); ++ ++ // ----------- S t a t e ------------- ++ // -- a1 : the JSGeneratorObject to resume ++ // -- a4 : generator function ++ // -- cp : generator context ++ // -- ra : return address ++ // -- sp[0] : generator receiver ++ // ----------------------------------- ++ ++ // Push holes for arguments to generator function. Since the parser forced ++ // context allocation for any variables in generators, the actual argument ++ // values have already been copied into the context and these dummy values ++ // will never be used. ++ __ Ldl(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldhu(a3, ++ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ Ldl(t1, ++ FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset)); ++ { ++ Label done_loop, loop; ++ __ bind(&loop); ++ __ Subl(a3, a3, Operand(1)); ++ __ Branch(&done_loop, lt, a3, Operand(zero_reg)); ++ __ s8addl(a3, t1, kScratchReg); DCHECK_EQ(kPointerSizeLog2, 3); ++ __ Ldl(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); ++ __ Push(kScratchReg); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ // Push receiver. ++ __ Ldl(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); ++ __ Push(kScratchReg); ++ } ++ ++ // Underlying function needs to have bytecode available. ++ if (FLAG_debug_code) { ++ __ Ldl(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldl(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); ++ GetSharedFunctionInfoBytecode(masm, a3, a0); ++ __ GetObjectType(a3, a3, a3); ++ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, ++ Operand(BYTECODE_ARRAY_TYPE)); ++ } ++ ++ // Resume (Ignition/TurboFan) generator object. ++ { ++ __ Ldl(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldhu(a0, FieldMemOperand( ++ a0, SharedFunctionInfo::kFormalParameterCountOffset)); ++ // We abuse new.target both to indicate that this is a resume call and to ++ // pass in the generator object. In ordinary calls, new.target is always ++ // undefined because generator functions are non-constructable. ++ __ Move(a3, a1); ++ __ Move(a1, a4); ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ldl(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); ++ __ Addl(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Jump(a2); ++ } ++ ++ __ bind(&prepare_step_in_if_stepping); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1, a4); ++ // Push hole as receiver since we do not use it for stepping. ++ __ PushRoot(RootIndex::kTheHoleValue); ++ __ CallRuntime(Runtime::kDebugOnFunctionCall); ++ __ Pop(a1); ++ } ++ __ Ldl(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); ++ __ Branch(&stepping_prepared); ++ ++ __ bind(&prepare_step_in_suspended_generator); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); ++ __ Pop(a1); ++ } ++ __ Ldl(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); ++ __ Branch(&stepping_prepared); ++ ++ __ bind(&stack_overflow); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ __ sys_call(0x80); // This should be unreachable. ++ } ++} ++ ++void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kThrowConstructedNonConstructable); ++} ++ ++// Clobbers scratch1 and scratch2; preserves all other registers. ++static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, ++ Register scratch1, Register scratch2) { ++ // Check the stack for overflow. We are not trying to catch ++ // interruptions (e.g. debug break and preemption) here, so the "real stack ++ // limit" is checked. ++ Label okay; ++ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit); ++ // Make a2 the space we have left. The stack might already be overflowed ++ // here which will cause r2 to become negative. ++ __ Subl(scratch1, sp, scratch1); ++ // Check if the arguments will overflow the stack. ++ __ slll(argc, kPointerSizeLog2,scratch2); ++ __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison. ++ ++ // Out of stack space. ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ ++ __ bind(&okay); ++} ++ ++namespace { ++ ++// Called with the native C calling convention. The corresponding function ++// signature is either: ++// ++// using JSEntryFunction = GeneratedCode; ++// or ++// using JSEntryFunction = GeneratedCode; ++void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ++ Builtins::Name entry_trampoline) { ++ Label invoke, handler_entry, exit; ++ ++ { ++ NoRootArrayScope no_root_array(masm); ++ ++ // TODO(plind): unify the ABI description here. ++ // Registers: ++ // either ++ // a0: root register value ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ // or ++ // a0: root register value ++ // a1: microtask_queue ++ // ++ // Stack: ++ // 0 arg slots on sw64 ++ ++ // Save callee saved registers on the stack. ++ __ MultiPush(kCalleeSaved | ra.bit()); ++ ++ // Save callee-saved FPU registers. ++ __ MultiPushFPU(kCalleeSavedFPU); ++ // Set up the reserved register for 0.0. ++ //__ Move(kDoubleRegZero, 0.0); ++ ++ // Initialize the root register. ++ // C calling convention. The first argument is passed in a0. ++ __ mov(kRootRegister, a0); ++ } ++ ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ ++ // We build an EntryFrame. ++ __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used. ++ __ li(s2, Operand(StackFrame::TypeToMarker(type))); ++ __ li(s3, Operand(StackFrame::TypeToMarker(type))); ++ ExternalReference c_entry_fp = ExternalReference::Create( ++ IsolateAddressId::kCEntryFPAddress, masm->isolate()); ++ __ li(t8, c_entry_fp); ++ __ Ldl(t8, MemOperand(t8)); ++ __ Push(s1, s2, s3, t8); ++ // Set up frame pointer for the frame to be pushed. ++ __ addl(sp, -EntryFrameConstants::kCallerFPOffset, fp); // negtive imm ++ ++ // Registers: ++ // either ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ // or ++ // a1: microtask_queue ++ // ++ // Stack: ++ // caller fp | ++ // function slot | entry frame ++ // context slot | ++ // bad fp (0xFF...F) | ++ // callee saved registers + ra ++ // [ O32: 4 args slots] ++ // args ++ ++ // If this is the outermost JS call, set js_entry_sp value. ++ Label non_outermost_js; ++ ExternalReference js_entry_sp = ExternalReference::Create( ++ IsolateAddressId::kJSEntrySPAddress, masm->isolate()); ++ __ li(s1, js_entry_sp); ++ __ Ldl(s2, MemOperand(s1)); ++ __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg)); ++ __ Stl(fp, MemOperand(s1)); ++ __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); ++ Label cont; ++ __ br(&cont); ++ //__ nop(); // Branch delay slot nop. ++ __ bind(&non_outermost_js); ++ __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME)); ++ __ bind(&cont); ++ __ push(s3); ++ ++ // Jump to a faked try block that does the invoke, with a faked catch ++ // block that sets the pending exception. ++ __ jmp(&invoke); ++ __ bind(&handler_entry); ++ ++ // Store the current pc as the handler offset. It's used later to create the ++ // handler table. ++ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos()); ++ ++ // Caught exception: Store result (exception) in the pending exception ++ // field in the JSEnv and return a failure sentinel. Coming in here the ++ // fp will be invalid because the PushStackHandler below sets it to 0 to ++ // signal the existence of the JSEntry frame. ++ __ li(s1, ExternalReference::Create( ++ IsolateAddressId::kPendingExceptionAddress, masm->isolate())); ++ __ Stl(v0, MemOperand(s1)); // We come back from 'invoke'. result is in v0. ++ __ LoadRoot(v0, RootIndex::kException); ++ __ br(&exit); // b exposes branch delay slot. ++ //__ nop(); // Branch delay slot nop. ++ ++ // Invoke: Link this frame into the handler chain. ++ __ bind(&invoke); ++ __ PushStackHandler(); ++ // If an exception not caught by another handler occurs, this handler ++ // returns control to the code after the bal(&invoke) above, which ++ // restores all kCalleeSaved registers (including cp and fp) to their ++ // saved values before returning a failure to C. ++ // ++ // Registers: ++ // either ++ // a0: root register value ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ // or ++ // a0: root register value ++ // a1: microtask_queue ++ // ++ // Stack: ++ // handler frame ++ // entry frame ++ // callee saved registers + ra ++ // [ O32: 4 args slots] ++ // args ++ // ++ // Invoke the function by calling through JS entry trampoline builtin and ++ // pop the faked function when we return. ++ ++ Handle trampoline_code = ++ masm->isolate()->builtins()->builtin_handle(entry_trampoline); ++ __ Call(trampoline_code, RelocInfo::CODE_TARGET); ++ ++ // Unlink this frame from the handler chain. ++ __ PopStackHandler(); ++ ++ __ bind(&exit); // v0 holds result ++ // Check if the current stack frame is marked as the outermost JS frame. ++ Label non_outermost_js_2; ++ __ pop(a5); ++ __ Branch(&non_outermost_js_2, ne, a5, ++ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); ++ __ li(a5, js_entry_sp); ++ __ Stl(zero_reg, MemOperand(a5)); ++ __ bind(&non_outermost_js_2); ++ ++ // Restore the top frame descriptors from the stack. ++ __ pop(a5); ++ __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, ++ masm->isolate())); ++ __ Stl(a5, MemOperand(a4)); ++ ++ // Reset the stack to the callee saved registers. ++ __ addl(sp, -EntryFrameConstants::kCallerFPOffset, sp); // negative imm ++ ++ // Restore callee-saved fpu registers. ++ __ MultiPopFPU(kCalleeSavedFPU); ++ ++ // Restore callee saved registers from the stack. ++ __ MultiPop(kCalleeSaved | ra.bit()); ++ // Return. ++ __ Jump(ra); ++} ++ ++} // namespace ++ ++void Builtins::Generate_JSEntry(MacroAssembler* masm) { ++ Generate_JSEntryVariant(masm, StackFrame::ENTRY, ++ Builtins::kJSEntryTrampoline); ++} ++ ++void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) { ++ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY, ++ Builtins::kJSConstructEntryTrampoline); ++} ++ ++void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) { ++ Generate_JSEntryVariant(masm, StackFrame::ENTRY, ++ Builtins::kRunMicrotasksTrampoline); ++} ++ ++static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ++ bool is_construct) { ++ // ----------- S t a t e ------------- ++ // -- a1: new.target ++ // -- a2: function ++ // -- a3: receiver_pointer ++ // -- a4: argc ++ // -- a5: argv ++ // ----------------------------------- ++ ++ // Enter an internal frame. ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ ++ // Setup the context (we need to use the caller context from the isolate). ++ ExternalReference context_address = ExternalReference::Create( ++ IsolateAddressId::kContextAddress, masm->isolate()); ++ __ li(cp, context_address); ++ __ Ldl(cp, MemOperand(cp)); ++ ++ // Push the function onto the stack. ++ __ Push(a2); ++ ++ // Check if we have enough stack space to push all arguments. ++ __ addl(a4, 1, t9); ++ Generate_CheckStackOverflow(masm, t9, a0, s2); ++ ++ // Copy arguments to the stack in a loop. ++ // a4: argc ++ // a5: argv, i.e. points to first arg ++ Label loop, entry; ++ __ s8addl(a4, a5, s1); DCHECK_EQ(kPointerSizeLog2, 3); ++ __ br(&entry); ++ // s1 points past last arg. ++ __ bind(&loop); ++ __ subl(s1, kPointerSize, s1); ++ __ Ldl(s2, MemOperand(s1)); // Read next parameter. ++ __ Ldl(s2, MemOperand(s2)); // Dereference handle. ++ __ push(s2); // Push parameter. ++ __ bind(&entry); ++ __ Branch(&loop, ne, a5, Operand(s1)); ++ ++ // Push the receive. ++ __ Push(a3); ++ ++ // a0: argc ++ // a1: function ++ // a3: new.target ++ __ mov(a3, a1); ++ __ mov(a1, a2); ++ __ mov(a0, a4); ++ ++ // Initialize all JavaScript callee-saved registers, since they will be seen ++ // by the garbage collector as part of handlers. ++ __ LoadRoot(a4, RootIndex::kUndefinedValue); ++ __ mov(a5, a4); ++ __ mov(s1, a4); ++ __ mov(s2, a4); ++ __ mov(s3, a4); //OK. ++ // s4 holds the root address. Do not clobber. ++ // s5 is cp. Do not init. ++ ++ // Invoke the code. ++ Handle builtin = is_construct ++ ? BUILTIN_CODE(masm->isolate(), Construct) ++ : masm->isolate()->builtins()->Call(); ++ __ Call(builtin, RelocInfo::CODE_TARGET); ++ ++ // Leave internal frame. ++ } ++ __ Jump(ra); ++} ++ ++void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { ++ Generate_JSEntryTrampolineHelper(masm, false); ++} ++ ++void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { ++ Generate_JSEntryTrampolineHelper(masm, true); ++} ++ ++void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { ++ // a1: microtask_queue ++ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1); ++ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); ++} ++ ++static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, ++ Register optimized_code, ++ Register closure, ++ Register scratch1, ++ Register scratch2) { ++ // Store code entry in the closure. ++ __ Stl(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); ++ __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. ++ __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, ++ kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, ++ OMIT_SMI_CHECK); ++} ++ ++static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { ++ Register args_count = scratch; ++ ++ // Get the arguments + receiver count. ++ __ Ldl(args_count, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ Ldw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset)); ++ ++ // Leave the frame (also dropping the register file). ++ __ LeaveFrame(StackFrame::INTERPRETED); ++ ++ // Drop receiver + arguments. ++ __ Addl(sp, sp, args_count); ++} ++ ++// Tail-call |function_id| if |smi_entry| == |marker| ++static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, ++ Register smi_entry, ++ OptimizationMarker marker, ++ Runtime::FunctionId function_id) { ++ Label no_match; ++ __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker))); ++ GenerateTailCallToReturnedCode(masm, function_id); ++ __ bind(&no_match); ++} ++ ++static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ++ Register optimized_code_entry, ++ Register scratch1, Register scratch2) { ++ // ----------- S t a t e ------------- ++ // -- a0 : actual argument count ++ // -- a3 : new target (preserved for callee if needed, and caller) ++ // -- a1 : target function (preserved for callee if needed, and caller) ++ // ----------------------------------- ++ DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2)); ++ ++ Register closure = a1; ++ ++ // Check if the optimized code is marked for deopt. If it is, call the ++ // runtime to clear it. ++ Label found_deoptimized_code; ++ __ Ldl(a5, ++ FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); ++ __ Ldw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset)); ++ __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit)); ++ __ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg)); ++ ++ // Optimized code is good, get it into the closure and link the closure into ++ // the optimized functions list, then tail call the optimized code. ++ // The feedback vector is no longer used, so re-use it as a scratch ++ // register. ++ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, ++ scratch1, scratch2); ++ ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Addl(a2, optimized_code_entry, ++ Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Jump(a2); ++ ++ // Optimized code slot contains deoptimized code, evict it and re-enter the ++ // closure's code. ++ __ bind(&found_deoptimized_code); ++ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); ++ } ++ ++static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ++ Register optimization_marker) { ++ // ----------- S t a t e ------------- ++ // -- a0 : actual argument count ++ // -- a3 : new target (preserved for callee if needed, and caller) ++ // -- a1 : target function (preserved for callee if needed, and caller) ++ // -- feedback vector (preserved for caller if needed) ++ // -- optimization_marker : a Smi containing a non-zero optimization marker. ++ // ----------------------------------- ++ DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker)); ++ ++ // TODO(v8:8394): The logging of first execution will break if ++ // feedback vectors are not allocated. We need to find a different way of ++ // logging these events if required. ++ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, ++ OptimizationMarker::kLogFirstExecution, ++ Runtime::kFunctionFirstExecution); ++ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, ++ OptimizationMarker::kCompileOptimized, ++ Runtime::kCompileOptimized_NotConcurrent); ++ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, ++ OptimizationMarker::kCompileOptimizedConcurrent, ++ Runtime::kCompileOptimized_Concurrent); ++ ++ // Otherwise, the marker is InOptimizationQueue, so fall through hoping ++ // that an interrupt will eventually update the slot with optimized code. ++ if (FLAG_debug_code) { ++ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel, ++ optimization_marker, ++ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); ++ } ++} ++ ++// Advance the current bytecode offset. This simulates what all bytecode ++// handlers do upon completion of the underlying operation. Will bail out to a ++// label if the bytecode (without prefix) is a return bytecode. Will not advance ++// the bytecode offset if the current bytecode is a JumpLoop, instead just ++// re-executing the JumpLoop to jump to the correct bytecode. ++static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ++ Register bytecode_array, ++ Register bytecode_offset, ++ Register bytecode, Register scratch1, ++ Register scratch2, Register scratch3, ++ Label* if_return) { ++ Register bytecode_size_table = scratch1; ++ ++ // The bytecode offset value will be increased by one in wide and extra wide ++ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we ++ // will restore the original bytecode. In order to simplify the code, we have ++ // a backup of it. ++ Register original_bytecode_offset = scratch3; ++ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode, ++ bytecode_size_table, original_bytecode_offset)); ++ __ Move(original_bytecode_offset, bytecode_offset); ++ __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address()); ++ ++ // Check if the bytecode is a Wide or ExtraWide prefix bytecode. ++ Label process_bytecode, extra_wide; ++ STATIC_ASSERT(0 == static_cast(interpreter::Bytecode::kWide)); ++ STATIC_ASSERT(1 == static_cast(interpreter::Bytecode::kExtraWide)); ++ STATIC_ASSERT(2 == static_cast(interpreter::Bytecode::kDebugBreakWide)); ++ STATIC_ASSERT(3 == ++ static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); ++ __ Branch(&process_bytecode, hi, bytecode, Operand(3)); ++ __ And(scratch2, bytecode, Operand(1)); ++ __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg)); ++ ++ // Load the next bytecode and update table to the wide scaled table. ++ __ Addl(bytecode_offset, bytecode_offset, Operand(1)); ++ __ Addl(scratch2, bytecode_array, bytecode_offset); ++ __ Ldbu(bytecode, MemOperand(scratch2)); ++ __ Addl(bytecode_size_table, bytecode_size_table, ++ Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount)); ++ __ jmp(&process_bytecode); ++ ++ __ bind(&extra_wide); ++ // Load the next bytecode and update table to the extra wide scaled table. ++ __ Addl(bytecode_offset, bytecode_offset, Operand(1)); ++ __ Addl(scratch2, bytecode_array, bytecode_offset); ++ __ Ldbu(bytecode, MemOperand(scratch2)); ++ __ Addl(bytecode_size_table, bytecode_size_table, ++ Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); ++ ++ __ bind(&process_bytecode); ++ ++// Bailout to the return label if this is a return bytecode. ++#define JUMP_IF_EQUAL(NAME) \ ++ __ Branch(if_return, eq, bytecode, \ ++ Operand(static_cast(interpreter::Bytecode::k##NAME))); ++ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) ++#undef JUMP_IF_EQUAL ++ ++ // If this is a JumpLoop, re-execute it to perform the jump to the beginning ++ // of the loop. ++ Label end, not_jump_loop; ++ __ Branch(¬_jump_loop, ne, bytecode, ++ Operand(static_cast(interpreter::Bytecode::kJumpLoop))); ++ // We need to restore the original bytecode_offset since we might have ++ // increased it to skip the wide / extra-wide prefix bytecode. ++ __ Move(bytecode_offset, original_bytecode_offset); ++ __ jmp(&end); ++ ++ __ bind(¬_jump_loop); ++ // Otherwise, load the size of the current bytecode and advance the offset. ++ __ s4addl(bytecode, bytecode_size_table, scratch2); ++ __ Ldw(scratch2, MemOperand(scratch2)); ++ __ Addl(bytecode_offset, bytecode_offset, scratch2); ++ ++ __ bind(&end); ++} ++ ++// Generate code for entering a JS function with the interpreter. ++// On entry to the function the receiver and arguments have been pushed on the ++// stack left to right. ++// ++// The live registers are: ++// o a0 : actual argument count (not including the receiver) ++// o a1: the JS function object being called. ++// o a3: the incoming new target or generator object ++// o cp: our context ++// o fp: the caller's frame pointer ++// o sp: stack pointer ++// o ra: return address ++// ++// The function builds an interpreter frame. See InterpreterFrameConstants in ++// frames.h for its layout. ++void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ++ Register closure = a1; ++ Register feedback_vector = a2; ++ ++ // Get the bytecode array from the function object and load it into ++ // kInterpreterBytecodeArrayRegister. ++ __ Ldl(kScratchReg, ++ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldl(kInterpreterBytecodeArrayRegister, ++ FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); ++ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, ++ kScratchReg); ++ ++ // The bytecode array could have been flushed from the shared function info, ++ // if so, call into CompileLazy. ++ Label compile_lazy; ++ __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg); ++ __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE)); ++ ++ // Load the feedback vector from the closure. ++ __ Ldl(feedback_vector, ++ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); ++ __ Ldl(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); ++ ++ Label push_stack_frame; ++ // Check if feedback vector is valid. If valid, check for optimized code ++ // and update invocation count. Otherwise, setup the stack frame. ++ __ Ldl(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); ++ __ Ldhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); ++ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); ++ ++ // Read off the optimized code slot in the feedback vector, and if there ++ // is optimized code or an optimization marker, call that instead. ++ Register optimized_code_entry = a4; ++ __ Ldl(optimized_code_entry, ++ FieldMemOperand(feedback_vector, ++ FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); ++ ++ // Check if the optimized code slot is not empty. ++ Label optimized_code_slot_not_empty; ++ ++ __ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry, ++ Operand(Smi::FromEnum(OptimizationMarker::kNone))); ++ ++ Label not_optimized; ++ __ bind(¬_optimized); ++ ++ // Increment invocation count for the function. ++ __ Ldw(a4, FieldMemOperand(feedback_vector, ++ FeedbackVector::kInvocationCountOffset)); ++ __ Addw(a4, a4, Operand(1)); ++ __ Stw(a4, FieldMemOperand(feedback_vector, ++ FeedbackVector::kInvocationCountOffset)); ++ ++ // Open a frame scope to indicate that there is a frame on the stack. The ++ // MANUAL indicates that the scope shouldn't actually generate code to set up ++ // the frame (that is done below). ++ __ bind(&push_stack_frame); ++ FrameScope frame_scope(masm, StackFrame::MANUAL); ++ __ PushStandardFrame(closure); ++ ++ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are ++ // 8-bit fields next to each other, so we could just optimize by writing a ++ // 16-bit. These static asserts guard our assumption is valid. ++ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == ++ BytecodeArray::kOsrNestingLevelOffset + kCharSize); ++ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); ++ __ Sth(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, ++ BytecodeArray::kOsrNestingLevelOffset)); ++ ++ // Load initial bytecode offset. ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ ++ // Push bytecode array and Smi tagged bytecode array offset. ++ __ SmiTag(a4, kInterpreterBytecodeOffsetRegister); ++ __ Push(kInterpreterBytecodeArrayRegister, a4); ++ ++ // Allocate the local and temporary register file on the stack. ++ Label stack_overflow; ++ { ++ // Load frame size (word) from the BytecodeArray object. ++ __ Ldw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister, ++ BytecodeArray::kFrameSizeOffset)); ++ ++ // Do a stack check to ensure we don't go over the limit. ++ __ Subl(a5, sp, Operand(a4)); ++ LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit); ++ __ Branch(&stack_overflow, lo, a5, Operand(a2)); ++ ++ // If ok, push undefined as the initial value for all register file entries. ++ Label loop_header; ++ Label loop_check; ++ __ LoadRoot(a5, RootIndex::kUndefinedValue); ++ __ Branch(&loop_check); ++ __ bind(&loop_header); ++ // TODO(rmcilroy): Consider doing more than one push per loop iteration. ++ __ push(a5); ++ // Continue loop if not done. ++ __ bind(&loop_check); ++ __ Subl(a4, a4, Operand(kPointerSize)); ++ __ Branch(&loop_header, ge, a4, Operand(zero_reg)); ++ } ++ ++ // If the bytecode array has a valid incoming new target or generator object ++ // register, initialize it with incoming value which was passed in r3. ++ Label no_incoming_new_target_or_generator_register; ++ __ Ldw(a5, FieldMemOperand( ++ kInterpreterBytecodeArrayRegister, ++ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); ++ __ Branch(&no_incoming_new_target_or_generator_register, eq, a5, ++ Operand(zero_reg)); ++ __ s8addl(a5, fp, a5); DCHECK_EQ(kPointerSizeLog2, 3); ++ __ Stl(a3, MemOperand(a5)); ++ __ bind(&no_incoming_new_target_or_generator_register); ++ ++ // Perform interrupt stack check. ++ // TODO(solanes): Merge with the real stack limit check above. ++ Label stack_check_interrupt, after_stack_check_interrupt; ++ LoadStackLimit(masm, a5, StackLimitKind::kInterruptStackLimit); ++ __ Branch(&stack_check_interrupt, lo, sp, Operand(a5)); ++ __ bind(&after_stack_check_interrupt); ++ ++ // Load accumulator as undefined. ++ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); ++ ++ // Load the dispatch table into a register and dispatch to the bytecode ++ // handler at the current bytecode offset. ++ Label do_dispatch; ++ __ bind(&do_dispatch); ++ __ li(kInterpreterDispatchTableRegister, ++ ExternalReference::interpreter_dispatch_table_address(masm->isolate())); ++ __ Addl(a0, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ldbu(t10, MemOperand(a0)); ++ __ s8addl(t10, kInterpreterDispatchTableRegister, kScratchReg); DCHECK_EQ(kPointerSizeLog2, 3); ++ __ Ldl(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg)); ++ __ Call(kJavaScriptCallCodeStartRegister); ++ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); ++ ++ // Any returns to the entry trampoline are either due to the return bytecode ++ // or the interpreter tail calling a builtin and then a dispatch. ++ ++ // Get bytecode array and bytecode offset from the stack frame. ++ __ Ldl(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ Ldl(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ __ SmiUntag(kInterpreterBytecodeOffsetRegister); ++ ++ // Either return, or advance to the next bytecode and dispatch. ++ Label do_return; ++ __ Addl(a1, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ldbu(a1, MemOperand(a1)); ++ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister, a1, a2, a3, ++ a4, &do_return); ++ __ jmp(&do_dispatch); ++ ++ __ bind(&do_return); ++ // The return value is in v0. ++ LeaveInterpreterFrame(masm, t0); ++ __ Jump(ra); ++ ++ __ bind(&stack_check_interrupt); ++ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset ++ // for the call to the StackGuard. ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag + ++ kFunctionEntryBytecodeOffset))); ++ __ Stl(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ __ CallRuntime(Runtime::kStackGuard); ++ ++ // After the call, restore the bytecode array, bytecode offset and accumulator ++ // registers again. Also, restore the bytecode offset in the stack to its ++ // previous value. ++ __ Ldl(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); ++ ++ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister); ++ __ Stl(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ ++ __ jmp(&after_stack_check_interrupt); ++ ++ __ bind(&optimized_code_slot_not_empty); ++ Label maybe_has_optimized_code; ++ // Check if optimized code marker is actually a weak reference to the ++ // optimized code as opposed to an optimization marker. ++ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); ++ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); ++ // Fall through if there's no runnable optimized code. ++ __ jmp(¬_optimized); ++ ++ __ bind(&maybe_has_optimized_code); ++ // Load code entry from the weak reference, if it was cleared, resume ++ // execution of unoptimized code. ++ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); ++ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5); ++ ++ __ bind(&compile_lazy); ++ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); ++ // Unreachable code. ++ __ sys_call(0x80); ++ ++ __ bind(&stack_overflow); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ sys_call(0x80); ++} ++ ++static void Generate_InterpreterPushArgs(MacroAssembler* masm, ++ Register num_args, ++ Register start_address, ++ Register scratch, ++ Register scratch2) { ++ // Find the address of the last argument. ++ __ Subl(scratch, num_args, Operand(1)); ++ __ slll(scratch, kPointerSizeLog2, scratch); ++ __ Subl(start_address, start_address, scratch); ++ ++ // Push the arguments. ++ __ PushArray(start_address, num_args, scratch, scratch2, ++ TurboAssembler::PushArrayOrder::kReverse); ++} ++ ++// static ++void Builtins::Generate_InterpreterPushArgsThenCallImpl( ++ MacroAssembler* masm, ConvertReceiverMode receiver_mode, ++ InterpreterPushArgsMode mode) { ++ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a2 : the address of the first argument to be pushed. Subsequent ++ // arguments should be consecutive above this, in the same order as ++ // they are to be pushed onto the stack. ++ // -- a1 : the target to call (can be any Object). ++ // ----------------------------------- ++ Label stack_overflow; ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ // The spread argument should not be pushed. ++ __ Subl(a0, a0, Operand(1)); ++ } ++ ++ __ Addl(a3, a0, Operand(1)); // Add one for receiver. ++ ++ Generate_StackOverflowCheck(masm, a3, a4, t0, &stack_overflow); ++ ++ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { ++ // Don't copy receiver. ++ __ mov(a3, a0); ++ } ++ ++ // This function modifies a2, t0 and a4. ++ Generate_InterpreterPushArgs(masm, a3, a2, a4, t0); ++ ++ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { ++ __ PushRoot(RootIndex::kUndefinedValue); ++ } ++ ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ // Pass the spread in the register a2. ++ // a2 already points to the penultime argument, the spread ++ // is below that. ++ __ Ldl(a2, MemOperand(a2, -kSystemPointerSize)); ++ } ++ ++ // Call the target. ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread), ++ RelocInfo::CODE_TARGET); ++ } else { ++ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny), ++ RelocInfo::CODE_TARGET); ++ } ++ ++ __ bind(&stack_overflow); ++ { ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ sys_call(0x80); ++ } ++} ++ ++// static ++void Builtins::Generate_InterpreterPushArgsThenConstructImpl( ++ MacroAssembler* masm, InterpreterPushArgsMode mode) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argument count (not including receiver) ++ // -- a3 : new target ++ // -- a1 : constructor to call ++ // -- a2 : allocation site feedback if available, undefined otherwise. ++ // -- a4 : address of the first argument ++ // ----------------------------------- ++ Label stack_overflow; ++ __ addl(a0, 1, t9); ++ Generate_StackOverflowCheck(masm, t9, a5, t0, &stack_overflow); ++ ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ // The spread argument should not be pushed. ++ __ Subl(a0, a0, Operand(1)); ++ } ++ ++ // Push the arguments, This function modifies t0, a4 and a5. ++ Generate_InterpreterPushArgs(masm, a0, a4, a5, t0); ++ ++ // Push a slot for the receiver. ++ __ push(zero_reg); ++ ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ // Pass the spread in the register a2. ++ // a4 already points to the penultimate argument, the spread ++ // lies in the next interpreter register. ++ __ Ldl(a2, MemOperand(a4, -kSystemPointerSize)); ++ } else { ++ __ AssertUndefinedOrAllocationSite(a2, t0); ++ } ++ ++ if (mode == InterpreterPushArgsMode::kArrayFunction) { ++ __ AssertFunction(a1); ++ ++ // Tail call to the function-specific construct stub (still in the caller ++ // context at this point). ++ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl), ++ RelocInfo::CODE_TARGET); ++ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ // Call the constructor with a0, a1, and a3 unmodified. ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread), ++ RelocInfo::CODE_TARGET); ++ } else { ++ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode); ++ // Call the constructor with a0, a1, and a3 unmodified. ++ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); ++ } ++ ++ __ bind(&stack_overflow); ++ { ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ sys_call(0x80); ++ } ++} ++ ++static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ++ // Set the return address to the correct point in the interpreter entry ++ // trampoline. ++ Label builtin_trampoline, trampoline_loaded; ++ Smi interpreter_entry_return_pc_offset( ++ masm->isolate()->heap()->interpreter_entry_return_pc_offset()); ++ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero()); ++ ++ // If the SFI function_data is an InterpreterData, the function will have a ++ // custom copy of the interpreter entry trampoline for profiling. If so, ++ // get the custom trampoline, otherwise grab the entry address of the global ++ // trampoline. ++ __ Ldl(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ __ Ldl(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldl(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); ++ __ GetObjectType(t0, kInterpreterDispatchTableRegister, ++ kInterpreterDispatchTableRegister); ++ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, ++ Operand(INTERPRETER_DATA_TYPE)); ++ ++ __ Ldl(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); ++ __ Addl(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Branch(&trampoline_loaded); ++ ++ __ bind(&builtin_trampoline); ++ __ li(t0, ExternalReference:: ++ address_of_interpreter_entry_trampoline_instruction_start( ++ masm->isolate())); ++ __ Ldl(t0, MemOperand(t0)); ++ ++ __ bind(&trampoline_loaded); ++ __ Addl(ra, t0, Operand(interpreter_entry_return_pc_offset.value())); ++ ++ // Initialize the dispatch table register. ++ __ li(kInterpreterDispatchTableRegister, ++ ExternalReference::interpreter_dispatch_table_address(masm->isolate())); ++ ++ // Get the bytecode array pointer from the frame. ++ __ Ldl(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ ++ if (FLAG_debug_code) { ++ // Check function data field is actually a BytecodeArray object. ++ __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg); ++ __ Assert(ne, ++ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, ++ kScratchReg, Operand(zero_reg)); ++ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); ++ __ Assert(eq, ++ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, ++ a1, Operand(BYTECODE_ARRAY_TYPE)); ++ } ++ ++ // Get the target bytecode offset from the frame. ++ __ SmiUntag(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ ++ if (FLAG_debug_code) { ++ Label okay; ++ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ // Unreachable code. ++ __ sys_call(0x80); ++ __ bind(&okay); ++ } ++ ++ // Dispatch to the target bytecode. ++ __ Addl(a1, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ldbu(t10, MemOperand(a1)); ++ __ s8addl(t10, kInterpreterDispatchTableRegister, a1); DCHECK_EQ(kPointerSizeLog2, 3); ++ __ Ldl(kJavaScriptCallCodeStartRegister, MemOperand(a1)); ++ __ Jump(kJavaScriptCallCodeStartRegister); ++} ++ ++void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { ++ // Advance the current bytecode offset stored within the given interpreter ++ // stack frame. This simulates what all bytecode handlers do upon completion ++ // of the underlying operation. ++ __ Ldl(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ Ldl(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ __ SmiUntag(kInterpreterBytecodeOffsetRegister); ++ ++ Label enter_bytecode, function_entry_bytecode; ++ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + ++ kFunctionEntryBytecodeOffset)); ++ ++ // Load the current bytecode. ++ __ Addl(a1, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ldbu(a1, MemOperand(a1)); ++ ++ // Advance to the next bytecode. ++ Label if_return; ++ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister, a1, a2, a3, ++ a4, &if_return); ++ ++ __ bind(&enter_bytecode); ++ // Convert new bytecode offset to a Smi and save in the stackframe. ++ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister); ++ __ Stl(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ ++ Generate_InterpreterEnterBytecode(masm); ++ ++ __ bind(&function_entry_bytecode); ++ // If the code deoptimizes during the implicit function entry stack interrupt ++ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is ++ // not a valid bytecode offset. Detect this case and advance to the first ++ // actual bytecode. ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ __ Branch(&enter_bytecode); ++ ++ // We should never take the if_return path. ++ __ bind(&if_return); ++ __ Abort(AbortReason::kInvalidBytecodeAdvance); ++} ++ ++void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { ++ Generate_InterpreterEnterBytecode(masm); ++} ++ ++namespace { ++void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, ++ bool java_script_builtin, ++ bool with_result) { ++ const RegisterConfiguration* config(RegisterConfiguration::Default()); ++ int allocatable_register_count = config->num_allocatable_general_registers(); ++ Register scratch = t3; ++ if (with_result) { ++ if (java_script_builtin) { ++ __ mov(scratch, v0); ++ } else { ++ // Overwrite the hole inserted by the deoptimizer with the return value from ++ // the LAZY deopt point. ++ __ Stl(v0, ++ MemOperand( ++ sp, config->num_allocatable_general_registers() * kPointerSize + ++ BuiltinContinuationFrameConstants::kFixedFrameSize)); ++ } ++ } ++ for (int i = allocatable_register_count - 1; i >= 0; --i) { ++ int code = config->GetAllocatableGeneralCode(i); ++ __ Pop(Register::from_code(code)); ++ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) { ++ __ SmiUntag(Register::from_code(code)); ++ } ++ } ++ ++ if (with_result && java_script_builtin) { ++ // Overwrite the hole inserted by the deoptimizer with the return value from ++ // the LAZY deopt point. t0 contains the arguments count, the return value ++ // from LAZY is always the last argument. ++ __ Addl(a0, a0, ++ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); ++ __ Dlsa(t0, sp, a0, kSystemPointerSizeLog2); ++ __ Stl(scratch, MemOperand(t0)); ++ // Recover arguments count. ++ __ Subl(a0, a0, ++ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); ++ } ++ ++ __ Ldl(fp, MemOperand( ++ sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); ++ // Load builtin index (stored as a Smi) and use it to get the builtin start ++ // address from the builtins table. ++ __ Pop(t0); ++ __ Addl(sp, sp, ++ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); ++ __ Pop(ra); ++ __ LoadEntryFromBuiltinIndex(t0); ++ __ Jump(t0); ++} ++} // namespace ++ ++void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, false, false); ++} ++ ++void Builtins::Generate_ContinueToCodeStubBuiltinWithResult( ++ MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, false, true); ++} ++ ++void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, true, false); ++} ++ ++void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult( ++ MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, true, true); ++} ++ ++void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kNotifyDeoptimized); ++ } ++ ++ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code()); ++ __ Ldl(v0, MemOperand(sp, 0 * kPointerSize)); ++ // Safe to fill delay slot Addw will emit one instruction. ++ __ Addl(sp, sp, Operand(1 * kPointerSize)); // Remove state. ++ __ Ret(); ++} ++ ++void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kCompileForOnStackReplacement); ++ } ++ ++ // If the code object is null, just return to the caller. ++ __ Ret(eq, v0, Operand(Smi::zero())); ++ ++ // Drop the handler frame that is be sitting on top of the actual ++ // JavaScript frame. This is the case then OSR is triggered from bytecode. ++ __ LeaveFrame(StackFrame::STUB); ++ ++ // Load deoptimization data from the code object. ++ // = [#deoptimization_data_offset] ++ __ Ldl(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); ++ ++ // Load the OSR entrypoint offset from the deoptimization data. ++ // = [#header_size + #osr_pc_offset] ++ __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( ++ DeoptimizationData::kOsrPcOffsetIndex) - ++ kHeapObjectTag)); ++ ++ // Compute the target address = code_obj + header_size + osr_offset ++ // = + #header_size + ++ __ Addl(v0, v0, a1); ++ __ Addl(ra, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ ++ // And "return" to the OSR entry point of the function. ++ __ Ret(); ++} ++ ++// static ++void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argc ++ // -- sp[0] : argArray ++ // -- sp[4] : thisArg ++ // -- sp[8] : receiver ++ // ----------------------------------- ++ ++ Register argc = a0; ++ Register arg_array = a2; ++ Register receiver = a1; ++ Register this_arg = a5; ++ Register undefined_value = a3; ++ Register scratch = a4; ++ ++ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); ++ ++ // 1. Load receiver into a1, argArray into a2 (if present), remove all ++ // arguments from the stack (including the receiver), and push thisArg (if ++ // present) instead. ++ { ++ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a ++ // consistent state for a simple pop operation. ++ ++ __ mov(scratch, argc); ++ __ Ldl(this_arg, MemOperand(sp, kPointerSize)); ++ __ Ldl(arg_array, MemOperand(sp, 2 * kPointerSize)); ++ __ Seleq(arg_array, undefined_value, scratch); // if argc == 0 ++ __ Seleq(this_arg, undefined_value, scratch); // if argc == 0 ++ __ Subl(scratch, scratch, Operand(1)); ++ __ Seleq(arg_array, undefined_value, scratch); // if argc == 1 ++ __ Ldl(receiver, MemOperand(sp)); ++ __ Dlsa(sp, sp, argc, kSystemPointerSizeLog2); ++ __ Stl(this_arg, MemOperand(sp)); ++ } ++ ++ // ----------- S t a t e ------------- ++ // -- a2 : argArray ++ // -- a1 : receiver ++ // -- a3 : undefined root value ++ // -- sp[0] : thisArg ++ // ----------------------------------- ++ ++ // 2. We don't need to check explicitly for callable receiver here, ++ // since that's the first thing the Call/CallWithArrayLike builtins ++ // will do. ++ ++ // 3. Tail call with no arguments if argArray is null or undefined. ++ Label no_arguments; ++ __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments); ++ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value)); ++ ++ // 4a. Apply the receiver to the given argArray. ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), ++ RelocInfo::CODE_TARGET); ++ ++ // 4b. The argArray is either null or undefined, so we tail call without any ++ // arguments to the receiver. ++ __ bind(&no_arguments); ++ { ++ __ mov(a0, zero_reg); ++ DCHECK(receiver == a1); ++ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); ++ } ++} ++ ++// static ++void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { ++ // 1. Get the callable to call (passed as receiver) from the stack. ++ { ++ __ Pop(a1); ++ } ++ ++ // 2. Make sure we have at least one argument. ++ // a0: actual number of arguments ++ { ++ Label done; ++ __ Branch(&done, ne, a0, Operand(zero_reg)); ++ __ PushRoot(RootIndex::kUndefinedValue); ++ __ Addl(a0, a0, Operand(1)); ++ __ bind(&done); ++ } ++ ++ // 3. Adjust the actual number of arguments. ++ __ subl(a0, 1, a0); ++ ++ // 4. Call the callable. ++ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); ++} ++ ++void Builtins::Generate_ReflectApply(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argc ++ // -- sp[0] : argumentsList (if argc ==3) ++ // -- sp[4] : thisArgument (if argc >=2) ++ // -- sp[8] : target (if argc >=1) ++ // -- sp[12] : receiver ++ // ----------------------------------- ++ ++ Register argc = a0; ++ Register arguments_list = a2; ++ Register target = a1; ++ Register this_argument = a5; ++ Register undefined_value = a3; ++ Register scratch = a4; ++ ++ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); ++ ++ // 1. Load target into a1 (if present), argumentsList into a2 (if present), ++ // remove all arguments from the stack (including the receiver), and push ++ // thisArgument (if present) instead. ++ { ++ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a ++ // consistent state for a simple pop operation. ++ ++ __ mov(scratch, argc); ++ __ Ldl(target, MemOperand(sp, kPointerSize)); ++ __ Ldl(this_argument, MemOperand(sp, 2 * kPointerSize)); ++ __ Ldl(arguments_list, MemOperand(sp, 3 * kPointerSize)); ++ __ Seleq(arguments_list, undefined_value, scratch); // if argc == 0 ++ __ Seleq(this_argument, undefined_value, scratch); // if argc == 0 ++ __ Seleq(target, undefined_value, scratch); // if argc == 0 ++ __ Subl(scratch, scratch, Operand(1)); ++ __ Seleq(arguments_list, undefined_value, scratch); // if argc == 1 ++ __ Seleq(this_argument, undefined_value, scratch); // if argc == 1 ++ __ Subl(scratch, scratch, Operand(1)); ++ __ Seleq(arguments_list, undefined_value, scratch); // if argc == 2 ++ ++ __ Dlsa(sp, sp, argc, kSystemPointerSizeLog2); ++ __ Stl(this_argument, MemOperand(sp, 0)); // Overwrite receiver ++ } ++ ++ // ----------- S t a t e ------------- ++ // -- a2 : argumentsList ++ // -- a1 : target ++ // -- a3 : undefined root value ++ // -- sp[0] : thisArgument ++ // ----------------------------------- ++ ++ // 2. We don't need to check explicitly for callable target here, ++ // since that's the first thing the Call/CallWithArrayLike builtins ++ // will do. ++ ++ // 3. Apply the target to the given argumentsList. ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), ++ RelocInfo::CODE_TARGET); ++} ++ ++void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argc ++ // -- sp[0] : new.target (optional) (dummy value if argc <= 2) ++ // -- sp[4] : argumentsList (dummy value if argc <= 1) ++ // -- sp[8] : target (dummy value if argc == 0) ++ // -- sp[12] : receiver ++ // ----------------------------------- ++ // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS ++ ++ Register argc = a0; ++ Register arguments_list = a2; ++ Register target = a1; ++ Register new_target = a3; ++ Register undefined_value = a4; ++ Register scratch = a5; ++ ++ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); ++ ++ // 1. Load target into a1 (if present), argumentsList into a2 (if present), ++ // new.target into a3 (if present, otherwise use target), remove all ++ // arguments from the stack (including the receiver), and push thisArgument ++ // (if present) instead. ++ { ++ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a ++ // consistent state for a simple pop operation. ++ ++ __ mov(scratch, argc); ++ __ Ldl(target, MemOperand(sp, kPointerSize)); ++ __ Ldl(arguments_list, MemOperand(sp, 2 * kPointerSize)); ++ __ Ldl(new_target, MemOperand(sp, 3 * kPointerSize)); ++ __ Seleq(arguments_list, undefined_value, scratch); // if argc == 0 ++ __ Seleq(new_target, undefined_value, scratch); // if argc == 0 ++ __ Seleq(target, undefined_value, scratch); // if argc == 0 ++ __ Subl(scratch, scratch, Operand(1)); ++ __ Seleq(arguments_list, undefined_value, scratch); // if argc == 1 ++ __ Seleq(new_target, target, scratch); // if argc == 1 ++ __ Subl(scratch, scratch, Operand(1)); ++ __ Seleq(new_target, target, scratch); // if argc == 2 ++ ++ __ Dlsa(sp, sp, argc, kSystemPointerSizeLog2); ++ __ Stl(undefined_value, MemOperand(sp, 0)); // Overwrite receiver ++ } ++ ++ // ----------- S t a t e ------------- ++ // -- a2 : argumentsList ++ // -- a1 : target ++ // -- a3 : new.target ++ // -- sp[0] : receiver (undefined) ++ // ----------------------------------- ++ ++ // 2. We don't need to check explicitly for constructor target here, ++ // since that's the first thing the Construct/ConstructWithArrayLike ++ // builtins will do. ++ ++ // 3. We don't need to check explicitly for constructor new.target here, ++ // since that's the second thing the Construct/ConstructWithArrayLike ++ // builtins will do. ++ ++ // 4. Construct the target with the given new.target and argumentsList. ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike), ++ RelocInfo::CODE_TARGET); ++} ++ ++static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { ++ __ SmiTag(a0); ++ __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); ++ __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit()); ++ __ Push(Smi::zero()); // Padding. ++ __ Addl(fp, sp, ++ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); ++} ++ ++static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- v0 : result being passed through ++ // ----------------------------------- ++ // Get the number of arguments passed (as a smi), tear down the frame and ++ // then tear down the parameters. ++ __ Ldl(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); ++ __ mov(sp, fp); ++ __ MultiPop(fp.bit() | ra.bit()); ++ __ SmiScale(a4, a1, kPointerSizeLog2); ++ __ Addl(sp, sp, a4); ++ // Adjust for the receiver. ++ __ Addl(sp, sp, Operand(kPointerSize)); ++} ++ ++// static ++void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, ++ Handle code) { ++ // ----------- S t a t e ------------- ++ // -- a1 : target ++ // -- a0 : number of parameters on the stack (not including the receiver) ++ // -- a2 : arguments list (a FixedArray) ++ // -- a4 : len (number of elements to push from args) ++ // -- a3 : new.target (for [[Construct]]) ++ // ----------------------------------- ++ if (masm->emit_debug_code()) { ++ // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0. ++ Label ok, fail; ++ __ AssertNotSmi(a2); ++ __ GetObjectType(a2, t8, t8); ++ __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE)); ++ __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE)); ++ __ Branch(&ok, eq, a4, Operand(zero_reg)); ++ // Fall through. ++ __ bind(&fail); ++ __ Abort(AbortReason::kOperandIsNotAFixedArray); ++ ++ __ bind(&ok); ++ } ++ ++ Register args = a2; ++ Register len = a4; ++ ++ // Check for stack overflow. ++ Label stack_overflow; ++ Generate_StackOverflowCheck(masm, len, kScratchReg, a5, &stack_overflow); ++ ++ // Move the arguments already in the stack, ++ // including the receiver and the return address. ++ { ++ Label copy; ++ Register src = t9, dest = t10; ++ __ mov(src, sp); ++ __ slll(a4, kSystemPointerSizeLog2, t0); ++ __ Subl(sp, sp, Operand(t0)); ++ // Update stack pointer. ++ __ mov(dest, sp); ++ __ Addl(t0, a0, Operand(zero_reg)); ++ ++ __ bind(©); ++ __ Ldl(t1, MemOperand(src, 0)); ++ __ Stl(t1, MemOperand(dest, 0)); ++ __ Subl(t0, t0, Operand(1)); ++ __ Addl(src, src, Operand(kSystemPointerSize)); ++ __ Addl(dest, dest, Operand(kSystemPointerSize)); ++ __ Branch(©, ge, t0, Operand(zero_reg)); ++ } ++ ++ // Push arguments onto the stack (thisArgument is already on the stack). ++ { ++ Label done, push, loop; ++ Register src = t9; ++ Register scratch = len; ++ ++ __ Addl(src, args, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); ++ __ Addl(a0, a0, len); // The 'len' argument for Call() or Construct(). ++ __ Branch(&done, eq, len, Operand(zero_reg)); ++ __ slll(len, kPointerSizeLog2, scratch); ++ __ Subl(scratch, sp, Operand(scratch)); ++ __ LoadRoot(t1, RootIndex::kTheHoleValue); ++ __ bind(&loop); ++ __ Ldl(a5, MemOperand(src)); ++ __ addl(src, kPointerSize, src); ++ __ Branch(&push, ne, a5, Operand(t1)); ++ __ LoadRoot(a5, RootIndex::kUndefinedValue); ++ __ bind(&push); ++ __ Stl(a5, MemOperand(t10, 0)); ++ __ Addl(t10, t10, Operand(kSystemPointerSize)); ++ __ Addl(scratch, scratch, Operand(kSystemPointerSize)); ++ __ Branch(&loop, ne, scratch, Operand(sp)); ++ __ bind(&done); ++ } ++ ++ // Tail-call to the actual Call or Construct builtin. ++ __ Jump(code, RelocInfo::CODE_TARGET); ++ ++ __ bind(&stack_overflow); ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++} ++ ++// static ++void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, ++ CallOrConstructMode mode, ++ Handle code) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a3 : the new.target (for [[Construct]] calls) ++ // -- a1 : the target to call (can be any Object) ++ // -- a2 : start index (to support rest parameters) ++ // ----------------------------------- ++ ++ // Check if new.target has a [[Construct]] internal method. ++ if (mode == CallOrConstructMode::kConstruct) { ++ Label new_target_constructor, new_target_not_constructor; ++ __ JumpIfSmi(a3, &new_target_not_constructor); ++ __ Ldl(t1, FieldMemOperand(a3, HeapObject::kMapOffset)); ++ __ Ldbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); ++ __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask)); ++ __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg)); ++ __ bind(&new_target_not_constructor); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterFrame(StackFrame::INTERNAL); ++ __ Push(a3); ++ __ CallRuntime(Runtime::kThrowNotConstructor); ++ } ++ __ bind(&new_target_constructor); ++ } ++ ++ // Check if we have an arguments adaptor frame below the function frame. ++ Label arguments_adaptor, arguments_done; ++ __ Ldl(t9, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); ++ __ Ldl(t10, MemOperand(t9, CommonFrameConstants::kContextOrFrameTypeOffset)); ++ __ Branch(&arguments_adaptor, eq, t10, ++ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); ++ { ++ __ Ldl(t10, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ __ Ldl(t10, FieldMemOperand(t10, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldhu(t10, FieldMemOperand( ++ t10, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ mov(t9, fp); ++ } ++ __ Branch(&arguments_done); ++ __ bind(&arguments_adaptor); ++ { ++ // Just get the length from the ArgumentsAdaptorFrame. ++ __ SmiUntag(t10, ++ MemOperand(t9, ArgumentsAdaptorFrameConstants::kLengthOffset)); ++ } ++ __ bind(&arguments_done); ++ ++ Label stack_done, stack_overflow; ++ __ Subw(t10, t10, a2); ++ __ Branch(&stack_done, le, t10, Operand(zero_reg)); ++ { ++ // Check for stack overflow. ++ Generate_StackOverflowCheck(masm, t10, a4, a5, &stack_overflow); ++ ++ // Forward the arguments from the caller frame. ++ ++ // Point to the first argument to copy (skipping the receiver). ++ __ Addl(t9, t9, ++ Operand(CommonFrameConstants::kFixedFrameSizeAboveFp + ++ kSystemPointerSize)); ++ __ Dlsa(t9, t9, a2, kSystemPointerSizeLog2); ++ ++ // Move the arguments already in the stack, ++ // including the receiver and the return address. ++ { ++ Label copy; ++ Register src = t0, dest = a2; ++ __ mov(src, sp); ++ // Update stack pointer. ++ __ slll(t10, kSystemPointerSizeLog2, t1); ++ __ Subl(sp, sp, Operand(t1)); ++ __ mov(dest, sp); ++ __ Addl(t2, a0, Operand(zero_reg)); ++ ++ __ bind(©); ++ __ Ldl(t1, MemOperand(src, 0)); ++ __ Stl(t1, MemOperand(dest, 0)); ++ __ Subl(t2, t2, Operand(1)); ++ __ Addl(src, src, Operand(kSystemPointerSize)); ++ __ Addl(dest, dest, Operand(kSystemPointerSize)); ++ __ Branch(©, ge, t2, Operand(zero_reg)); ++ } ++ ++ // Copy arguments from the caller frame. ++ // TODO(victorgomes): Consider using forward order as potentially more cache ++ // friendly. ++ { ++ Label loop; ++ __ Addl(a0, a0, t10); ++ __ bind(&loop); ++ { ++ __ Subw(t10, t10, Operand(1)); ++ __ Dlsa(t0, t9, t10, kPointerSizeLog2); ++ __ Ldl(kScratchReg, MemOperand(t0)); ++ __ Dlsa(t0, a2, t10, kPointerSizeLog2); ++ __ Stl(kScratchReg, MemOperand(t0)); ++ __ Branch(&loop, ne, t10, Operand(zero_reg)); ++ } ++ } ++ } ++ __ Branch(&stack_done); ++ __ bind(&stack_overflow); ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++ __ bind(&stack_done); ++ ++ // Tail-call to the {code} handler. ++ __ Jump(code, RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_CallFunction(MacroAssembler* masm, ++ ConvertReceiverMode mode) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSFunction) ++ // ----------------------------------- ++ __ AssertFunction(a1); ++ ++ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) ++ // Check that function is not a "classConstructor". ++ Label class_constructor; ++ __ Ldl(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); ++ __ And(kScratchReg, a3, ++ Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); ++ __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg)); ++ ++ // Enter the context of the function; ToObject has to run in the function ++ // context, and we also need to take the global proxy from the function ++ // context in case of conversion. ++ __ Ldl(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ++ // We need to convert the receiver for non-native sloppy mode functions. ++ Label done_convert; ++ __ Ldwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); ++ __ And(kScratchReg, a3, ++ Operand(SharedFunctionInfo::IsNativeBit::kMask | ++ SharedFunctionInfo::IsStrictBit::kMask)); ++ __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); ++ { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSFunction) ++ // -- a2 : the shared function info. ++ // -- cp : the function context. ++ // ----------------------------------- ++ ++ if (mode == ConvertReceiverMode::kNullOrUndefined) { ++ // Patch receiver to global proxy. ++ __ LoadGlobalProxy(a3); ++ } else { ++ Label convert_to_object, convert_receiver; ++ __ LoadReceiver(a3, a0); ++ __ JumpIfSmi(a3, &convert_to_object); ++ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); ++ __ GetObjectType(a3, a4, a4); ++ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE)); ++ if (mode != ConvertReceiverMode::kNotNullOrUndefined) { ++ Label convert_global_proxy; ++ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); ++ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object); ++ __ bind(&convert_global_proxy); ++ { ++ // Patch receiver to global proxy. ++ __ LoadGlobalProxy(a3); ++ } ++ __ Branch(&convert_receiver); ++ } ++ __ bind(&convert_to_object); ++ { ++ // Convert receiver using ToObject. ++ // TODO(bmeurer): Inline the allocation here to avoid building the frame ++ // in the fast case? (fall back to AllocateInNewSpace?) ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ SmiTag(a0); ++ __ Push(a0, a1); ++ __ mov(a0, a3); ++ __ Push(cp); ++ __ Call(BUILTIN_CODE(masm->isolate(), ToObject), ++ RelocInfo::CODE_TARGET); ++ __ Pop(cp); ++ __ mov(a3, v0); ++ __ Pop(a0, a1); ++ __ SmiUntag(a0); ++ } ++ __ Ldl(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ bind(&convert_receiver); ++ } ++ __ StoreReceiver(a3, a0, kScratchReg); ++ } ++ __ bind(&done_convert); ++ ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSFunction) ++ // -- a2 : the shared function info. ++ // -- cp : the function context. ++ // ----------------------------------- ++ ++ __ Ldhu(a2, ++ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION); ++ ++ // The function is a "classConstructor", need to raise an exception. ++ __ bind(&class_constructor); ++ { ++ FrameScope frame(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kThrowConstructorNonCallableError); ++ } ++} ++ ++// static ++void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // ----------------------------------- ++ __ AssertBoundFunction(a1); ++ ++ // Patch the receiver to [[BoundThis]]. ++ { ++ __ Ldl(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); ++ __ StoreReceiver(t0, a0, kScratchReg); ++ } ++ ++ // Load [[BoundArguments]] into a2 and length of that into a4. ++ __ Ldl(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // -- a2 : the [[BoundArguments]] (implemented as FixedArray) ++ // -- a4 : the number of [[BoundArguments]] ++ // ----------------------------------- ++ ++ // Reserve stack space for the [[BoundArguments]]. ++ { ++ Label done; ++ __ slll(a4, kPointerSizeLog2, a5); ++ __ Subl(t0, sp, Operand(a5)); ++ // Check the stack for overflow. We are not trying to catch interruptions ++ // (i.e. debug break and preemption) here, so check the "real stack limit". ++ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); ++ __ Branch(&done, hs, t0, Operand(kScratchReg)); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterFrame(StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ } ++ __ bind(&done); ++ } ++ ++ // Pop receiver. ++ __ Pop(t0); ++ ++ // Push [[BoundArguments]]. ++ { ++ Label loop, done_loop; ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ __ Addl(a0, a0, Operand(a4)); ++ __ Addl(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); ++ __ bind(&loop); ++ __ Subl(a4, a4, Operand(1)); ++ __ Branch(&done_loop, lt, a4, Operand(zero_reg)); ++ __ s8addl(a4, a2, a5); DCHECK_EQ(kPointerSizeLog2, 3); ++ __ Ldl(kScratchReg, MemOperand(a5)); ++ __ Push(kScratchReg); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Push receiver. ++ __ Push(t0); ++ ++ // Call the [[BoundTargetFunction]] via the Call builtin. ++ __ Ldl(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), ++ RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the target to call (can be any Object). ++ // ----------------------------------- ++ ++ Label non_callable, non_smi; ++ __ JumpIfSmi(a1, &non_callable); ++ __ bind(&non_smi); ++ __ GetObjectType(a1, t1, t2); ++ __ Jump(masm->isolate()->builtins()->CallFunction(mode), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); ++ ++ // Check if target has a [[Call]] internal method. ++ __ Ldbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); ++ __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask)); ++ __ Branch(&non_callable, eq, t1, Operand(zero_reg)); ++ ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE)); ++ ++ // 2. Call to something else, which might have a [[Call]] internal method (if ++ // not we raise an exception). ++ // Overwrite the original receiver with the (original) target. ++ __ StoreReceiver(a1, a0, kScratchReg); ++ // Let the "call_as_function_delegate" take care of the rest. ++ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1); ++ __ Jump(masm->isolate()->builtins()->CallFunction( ++ ConvertReceiverMode::kNotNullOrUndefined), ++ RelocInfo::CODE_TARGET); ++ ++ // 3. Call to something that is not callable. ++ __ bind(&non_callable); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kThrowCalledNonCallable); ++ } ++} ++ ++void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the constructor to call (checked to be a JSFunction) ++ // -- a3 : the new target (checked to be a constructor) ++ // ----------------------------------- ++ __ AssertConstructor(a1); ++ __ AssertFunction(a1); ++ ++ // Calling convention for function specific ConstructStubs require ++ // a2 to contain either an AllocationSite or undefined. ++ __ LoadRoot(a2, RootIndex::kUndefinedValue); ++ ++ Label call_generic_stub; ++ ++ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. ++ __ Ldl(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset)); ++ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); ++ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg)); ++ ++ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), ++ RelocInfo::CODE_TARGET); ++ ++ __ bind(&call_generic_stub); ++ __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric), ++ RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // -- a3 : the new target (checked to be a constructor) ++ // ----------------------------------- ++ __ AssertConstructor(a1); ++ __ AssertBoundFunction(a1); ++ ++ // Load [[BoundArguments]] into a2 and length of that into a4. ++ __ Ldl(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // -- a2 : the [[BoundArguments]] (implemented as FixedArray) ++ // -- a3 : the new target (checked to be a constructor) ++ // -- a4 : the number of [[BoundArguments]] ++ // ----------------------------------- ++ ++ // Reserve stack space for the [[BoundArguments]]. ++ { ++ Label done; ++ __ slll(a4, kPointerSizeLog2, a5); ++ __ Subl(t0, sp, Operand(a5)); ++ // Check the stack for overflow. We are not trying to catch interruptions ++ // (i.e. debug break and preemption) here, so check the "real stack limit". ++ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); ++ __ Branch(&done, hs, t0, Operand(kScratchReg)); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterFrame(StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ } ++ __ bind(&done); ++ } ++ ++ // Pop receiver. ++ __ Pop(t0); ++ ++ // Push [[BoundArguments]]. ++ { ++ Label loop, done_loop; ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ __ Addl(a0, a0, Operand(a4)); ++ __ Addl(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); ++ __ bind(&loop); ++ __ Subl(a4, a4, Operand(1)); ++ __ Branch(&done_loop, lt, a4, Operand(zero_reg)); ++ __ s8addl(a4, a2, a5); DCHECK_EQ(kPointerSizeLog2, 3); ++ __ Ldl(kScratchReg, MemOperand(a5)); ++ __ Push(kScratchReg); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Push receiver. ++ __ Push(t0); ++ ++ // Patch new.target to [[BoundTargetFunction]] if new.target equals target. ++ { ++ Label skip_load; ++ __ Branch(&skip_load, ne, a1, Operand(a3)); ++ __ Ldl(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); ++ __ bind(&skip_load); ++ } ++ ++ // Construct the [[BoundTargetFunction]] via the Construct builtin. ++ __ Ldl(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_Construct(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the constructor to call (can be any Object) ++ // -- a3 : the new target (either the same as the constructor or ++ // the JSFunction on which new was invoked initially) ++ // ----------------------------------- ++ ++ // Check if target is a Smi. ++ Label non_constructor, non_proxy; ++ __ JumpIfSmi(a1, &non_constructor); ++ ++ // Check if target has a [[Construct]] internal method. ++ __ Ldl(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); ++ __ Ldbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); ++ __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask)); ++ __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); ++ ++ // Dispatch based on instance type. ++ __ Ldhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); ++ ++ // Only dispatch to bound functions after checking whether they are ++ // constructors. ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); ++ ++ // Only dispatch to proxies after checking whether they are constructors. ++ __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), ++ RelocInfo::CODE_TARGET); ++ ++ // Called Construct on an exotic Object with a [[Construct]] internal method. ++ __ bind(&non_proxy); ++ { ++ // Overwrite the original receiver with the (original) target. ++ __ StoreReceiver(a1, a0, kScratchReg); ++ // Let the "call_as_constructor_delegate" take care of the rest. ++ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1); ++ __ Jump(masm->isolate()->builtins()->CallFunction(), ++ RelocInfo::CODE_TARGET); ++ } ++ ++ // Called Construct on an Object that doesn't have a [[Construct]] internal ++ // method. ++ __ bind(&non_constructor); ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable), ++ RelocInfo::CODE_TARGET); ++} ++ ++void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ++ // State setup as expected by MacroAssembler::InvokePrologue. ++ // ----------- S t a t e ------------- ++ // -- a0: actual arguments count ++ // -- a1: function (passed through to callee) ++ // -- a2: expected arguments count ++ // -- a3: new target (passed through to callee) ++ // ----------------------------------- ++ ++ Label invoke, dont_adapt_arguments, stack_overflow; ++ ++ Label enough, too_few; ++ __ Branch(&dont_adapt_arguments, eq, a2, ++ Operand(kDontAdaptArgumentsSentinel)); ++ // We use Uless as the number of argument should always be greater than 0. ++ __ Branch(&too_few, Uless, a0, Operand(a2)); ++ ++ { // Enough parameters: actual >= expected. ++ // a0: actual number of arguments as a smi ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ __ bind(&enough); ++ EnterArgumentsAdaptorFrame(masm); ++ Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow); ++ ++ // Calculate copy start address into a0 and copy end address into a4. ++ __ slll(a2, kPointerSizeLog2, a0); ++ __ Addl(a0, fp, a0); ++ // Adjust for return address and receiver. ++ __ Addl(a0, a0, Operand(2 * kPointerSize)); ++ // Compute copy end address. ++ __ slll(a2, kPointerSizeLog2, a4); ++ __ Subl(a4, a0, Operand(a4)); ++ ++ // Copy the arguments (including the receiver) to the new stack frame. ++ // a0: copy start address ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ // a4: copy end address ++ ++ Label copy; ++ __ addl(a0, kPointerSize, a0); // In delay slot. ++ __ bind(©); ++ __ subl(a0, kPointerSize, a0); // In delay slot. ++ __ Ldl(a5, MemOperand(a0)); ++ __ push(a5); ++ __ Branch(©, ne, a0, Operand(a4)); ++ ++ __ jmp(&invoke); ++ } ++ ++ { // Too few parameters: Actual < expected. ++ __ bind(&too_few); ++ EnterArgumentsAdaptorFrame(masm); ++ Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow); ++ ++ // Fill the remaining expected arguments with undefined. ++ __ LoadRoot(t0, RootIndex::kUndefinedValue); ++ __ SmiUntag(t1, a0); ++ __ Subl(t2, a2, Operand(t1)); ++ __ slll(t2, kSystemPointerSizeLog2, a4); ++ __ Subl(a4, fp, a4); ++ // Adjust for frame. ++ __ Subl(a4, a4, ++ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + ++ kSystemPointerSize)); ++ ++ Label fill; ++ __ bind(&fill); ++ __ push(t0); ++ __ Branch(&fill, ne, sp, Operand(a4)); ++ ++ // Calculate copy start address into r0 and copy end address is fp. ++ __ SmiScale(a0, a0, kPointerSizeLog2); ++ __ Addl(a0, fp, a0); ++ ++ // Copy the arguments (including the receiver) to the new stack frame. ++ Label copy; ++ __ Addl(a0, a0, Operand(kSystemPointerSize)); // In delay slot. ++ __ bind(©); ++ __ Subl(a0, a0, Operand(kSystemPointerSize)); // In delay slot. ++ ++ // Adjust load for return address and receiver. ++ __ Ldl(t0, MemOperand(a0, 2 * kSystemPointerSize)); ++ __ push(t0); ++ ++ __ Branch(©, ne, a0, Operand(fp)); ++ } ++ ++ // Call the entry point. ++ __ bind(&invoke); ++ __ mov(a0, a2); ++ // a0 : expected number of arguments ++ // a1 : function (passed through to callee) ++ // a3: new target (passed through to callee) ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ldl(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); ++ __ Addl(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Call(a2); ++ ++ // Store offset of return address for deoptimizer. ++ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); ++ ++ // Exit frame and return. ++ LeaveArgumentsAdaptorFrame(masm); ++ __ Ret(); ++ ++ // ------------------------------------------- ++ // Don't adapt arguments. ++ // ------------------------------------------- ++ __ bind(&dont_adapt_arguments); ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ldl(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); ++ __ Addl(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Jump(a2); ++ ++ __ bind(&stack_overflow); ++ { ++ FrameScope frame(masm, StackFrame::MANUAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ __ sys_call(0x80); ++ } ++} ++ ++void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ++ // The function index was put in t0 by the jump table trampoline. ++ // Convert to Smi for the runtime call ++ __ SmiTag(kWasmCompileLazyFuncIndexRegister); ++ { ++ HardAbortScope hard_abort(masm); // Avoid calls to Abort. ++ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); ++ ++ // Save all parameter registers (see wasm-linkage.cc). They might be ++ // overwritten in the runtime call below. We don't have any callee-saved ++ // registers in wasm, so no need to store anything else. ++ constexpr RegList gp_regs = ++ Register::ListOf(a0, a1, a2, a3, a4, a5); ++ constexpr RegList fp_regs = ++ DoubleRegister::ListOf(f16, f17, f18, f19, f20, f21); ++ constexpr int16_t num_to_push = base::bits::CountPopulation(gp_regs) + ++ base::bits::CountPopulation(fp_regs); ++ // The number of regs to be pushed before kWasmInstanceRegister should be ++ // equal to kNumberOfSavedAllParamRegs. ++ STATIC_ASSERT(num_to_push == ++ WasmCompileLazyFrameConstants::kNumberOfSavedAllParamRegs); ++ __ MultiPush(gp_regs); ++ __ MultiPushFPU(fp_regs); ++ ++ // Pass instance and function index as an explicit arguments to the runtime ++ // function. ++ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); ++ // Initialize the JavaScript context with 0. CEntry will use it to ++ // set the current context on the isolate. ++ __ Move(kContextRegister, Smi::zero()); ++ __ CallRuntime(Runtime::kWasmCompileLazy, 2); ++ ++ // Restore registers. ++ __ MultiPopFPU(fp_regs); ++ __ MultiPop(gp_regs); ++ } ++ // Finally, jump to the entrypoint. ++ __ Jump(v0); ++} ++ ++void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { ++ HardAbortScope hard_abort(masm); // Avoid calls to Abort. ++ { ++ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK); ++ ++ // Save all parameter registers. They might hold live values, we restore ++ // them after the runtime call. ++ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs); ++ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); ++ ++ // Initialize the JavaScript context with 0. CEntry will use it to ++ // set the current context on the isolate. ++ __ Move(cp, Smi::zero()); ++ __ CallRuntime(Runtime::kWasmDebugBreak, 0); ++ ++ // Restore registers. ++ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); ++ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs); ++ } ++ __ Ret(); ++} ++ ++void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ++ SaveFPRegsMode save_doubles, ArgvMode argv_mode, ++ bool builtin_exit_frame) { ++ // Called from JavaScript; parameters are on stack as if calling JS function ++ // a0: number of arguments including receiver ++ // a1: pointer to builtin function ++ // fp: frame pointer (restored after C call) ++ // sp: stack pointer (restored as callee's sp after C call) ++ // cp: current context (C callee-saved) ++ // ++ // If argv_mode == kArgvInRegister: ++ // a2: pointer to the first argument ++ ++ if (argv_mode == kArgvInRegister) { ++ // Move argv into the correct register. ++ __ mov(s1, a2); ++ } else { ++ // Compute the argv pointer in a callee-saved register. ++ __ s8addl(a0, sp, s1); DCHECK_EQ(kPointerSizeLog2, 3); ++ __ Subl(s1, s1, kPointerSize); ++ } ++ ++ // Enter the exit frame that transitions from JavaScript to C++. ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterExitFrame( ++ save_doubles == kSaveFPRegs, 0, ++ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); ++ ++ // s0: number of arguments including receiver (C callee-saved) ++ // s1: pointer to first argument (C callee-saved) ++ // s2: pointer to builtin function (C callee-saved) ++ ++ // Prepare arguments for C routine. ++ // a0 = argc ++ __ mov(s0, a0); ++ __ mov(s2, a1); ++ ++ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We ++ // also need to reserve the 4 argument slots on the stack. ++ ++ __ AssertStackIsAligned(); ++ ++ // a0 = argc, a1 = argv, a2 = isolate ++ __ li(a2, ExternalReference::isolate_address(masm->isolate())); ++ __ mov(a1, s1); ++ ++ __ StoreReturnAddressAndCall(s2); ++ ++ // Result returned in v0 or v1:v0 (a5:v0) - do not destroy these registers! ++ ++ // Check result for exception sentinel. ++ Label exception_returned; ++ __ LoadRoot(a4, RootIndex::kException); ++ __ Branch(&exception_returned, eq, a4, Operand(v0)); ++ ++ // Check that there is no pending exception, otherwise we ++ // should have returned the exception sentinel. ++ if (FLAG_debug_code) { ++ Label okay; ++ ExternalReference pending_exception_address = ExternalReference::Create( ++ IsolateAddressId::kPendingExceptionAddress, masm->isolate()); ++ __ li(a2, pending_exception_address); ++ __ Ldl(a2, MemOperand(a2)); ++ __ LoadRoot(a4, RootIndex::kTheHoleValue); ++ // Cannot use check here as it attempts to generate call into runtime. ++ __ Branch(&okay, eq, a4, Operand(a2)); ++ __ halt();//stop("Unexpected pending exception"); ++ __ bind(&okay); ++ } ++ ++ // Exit C frame and return. ++ // v0:v1(v0:a5): result ++ // sp: stack pointer ++ // fp: frame pointer ++ Register argc = argv_mode == kArgvInRegister ++ // We don't want to pop arguments so set argc to no_reg. ++ ? no_reg ++ // s0: still holds argc (callee-saved). ++ : s0; ++ __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN); ++ ++ // Handling of exception. ++ __ bind(&exception_returned); ++ ++ ExternalReference pending_handler_context_address = ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerContextAddress, masm->isolate()); ++ ExternalReference pending_handler_entrypoint_address = ++ ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate()); ++ ExternalReference pending_handler_fp_address = ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerFPAddress, masm->isolate()); ++ ExternalReference pending_handler_sp_address = ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerSPAddress, masm->isolate()); ++ ++ // Ask the runtime for help to determine the handler. This will set v0 to ++ // contain the current pending exception, don't clobber it. ++ ExternalReference find_handler = ++ ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ PrepareCallCFunction(3, 0, a0); ++ __ mov(a0, zero_reg); ++ __ mov(a1, zero_reg); ++ __ li(a2, ExternalReference::isolate_address(masm->isolate())); ++ __ CallCFunction(find_handler, 3); ++ } ++ ++ // Retrieve the handler context, SP and FP. ++ __ li(cp, pending_handler_context_address); ++ __ Ldl(cp, MemOperand(cp)); ++ __ li(sp, pending_handler_sp_address); ++ __ Ldl(sp, MemOperand(sp)); ++ __ li(fp, pending_handler_fp_address); ++ __ Ldl(fp, MemOperand(fp)); ++ ++ // If the handler is a JS frame, restore the context to the frame. Note that ++ // the context will be set to (cp == 0) for non-JS frames. ++ Label zero; ++ __ Branch(&zero, eq, cp, Operand(zero_reg)); ++ __ Stl(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); ++ __ bind(&zero); ++ ++ // Reset the masking register. This is done independent of the underlying ++ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work ++ // with both configurations. It is safe to always do this, because the ++ // underlying register is caller-saved and can be arbitrarily clobbered. ++ __ ResetSpeculationPoisonRegister(); ++ ++ // Compute the handler entry address and jump to it. ++ __ li(t12, pending_handler_entrypoint_address); ++ __ Ldl(t12, MemOperand(t12)); ++ __ Jump(t12); ++} ++ ++void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ++ Label done; ++ Register result_reg = t0; ++ ++ Register scratch = GetRegisterThatIsNotOneOf(result_reg); ++ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch); ++ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2); ++ ++ // Account for saved regs. ++ const int kArgumentOffset = 4 * kPointerSize; ++ ++ __ Push(result_reg, scratch, scratch2, scratch3); ++ ++#if 0 ++ DoubleRegister double_scratch = kScratchDoubleReg; ++ FPURegister fp_scratch = f16; ++ FPURegister fp_scratch2 = f17; ++ __ MultiPushFPU(fp_scratch.bit() | fp_scratch2.bit()); //SW64 ++ kArgumentOffset += 2 * kPointerSize; ++ ++ // Load double input. ++ __ Fldd(double_scratch, MemOperand(sp, kArgumentOffset)); ++ ++ // Clear cumulative exception flags and save the FCSR. ++ // SW64 neednot clear FPCR in 20150513. ++ __ rfpcr(fp_scratch2); ++ //in order to have same effection, we should do four steps in sw: ++ //1) set fpcr = 0 ++ //2) Rounding: sw(10), round-to-even ++ //3) set trap bit: sw(62~61,51~49), exception controlled by fpcr but not trap ++ //4) set exception mode: sw(00) setfpec1 ++ __ li(scratch, sFCSRControlMask | sFCSRRound1Mask); //1), 2), 3) ++ __ ifmovd(scratch, fp_scratch); ++ __ wfpcr(fp_scratch); ++ __ setfpec1();//4) ++ ++ // Try a conversion to a signed integer. ++ __ fcvtdl_z(double_scratch, fp_scratch); ++ __ fcvtlw(fp_scratch, double_scratch); ++ // Move the converted value into the result register. ++ __ fimovs(double_scratch, scratch3); ++ ++ // Retrieve and restore the FCSR. ++ __ rfpcr(fp_scratch); ++ __ wfpcr(fp_scratch2); ++ __ setfpec1(); ++ __ fimovd(fp_scratch, scratch); ++ ++ // Check for overflow and NaNs. ++ __ li(scratch2, sFCSROverflowFlagMask | sFCSRUnderflowFlagMask | ++ sFCSRInvalidOpFlagMask); ++ __ And(scratch, scratch, Operand(scratch2)); ++ ++ __ MultiPopFPU(fp_scratch.bit() | fp_scratch2.bit()); //SW64 ++ kArgumentOffset -= 2 * kPointerSize; ++ ++ // If we had no exceptions then set result_reg and we are done. ++ Label error; ++ __ Branch(&error, ne, scratch, Operand(zero_reg)); ++ __ Move(result_reg, scratch3); ++ __ Branch(&done); ++ __ bind(&error); ++#endif ++ ++ // Load the double value and perform a manual truncation. ++ Register input_high = scratch2; ++ Register input_low = scratch3; ++ ++ __ Ldw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset)); ++ __ Ldw(input_high, ++ MemOperand(sp, kArgumentOffset + Register::kExponentOffset)); ++ ++ Label normal_exponent; ++ // Extract the biased exponent in result. ++ __ Ext(result_reg, input_high, HeapNumber::kExponentShift, ++ HeapNumber::kExponentBits); ++ ++ // Check for Infinity and NaNs, which should return 0. ++ __ Subw(scratch, result_reg, HeapNumber::kExponentMask); ++ __ Seleq(result_reg, zero_reg, scratch); ++ __ Branch(&done, eq, scratch, Operand(zero_reg)); ++ ++ // Express exponent as delta to (number of mantissa bits + 31). ++ __ Subw(result_reg, result_reg, ++ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); ++ ++ // If the delta is strictly positive, all bits would be shifted away, ++ // which means that we can return 0. ++ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg)); ++ __ mov(result_reg, zero_reg); ++ __ Branch(&done); ++ ++ __ bind(&normal_exponent); ++ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; ++ // Calculate shift. ++ __ Addw(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits)); ++ ++ // Save the sign. ++ Register sign = result_reg; ++ result_reg = no_reg; ++ __ And(sign, input_high, Operand(HeapNumber::kSignMask)); ++ ++ // On ARM shifts > 31 bits are valid and will result in zero. On SW64 we need ++ // to check for this specific case. ++ Label high_shift_needed, high_shift_done; ++ __ Branch(&high_shift_needed, lt, scratch, Operand(32)); ++ __ mov(input_high, zero_reg); ++ __ Branch(&high_shift_done); ++ __ bind(&high_shift_needed); ++ ++ // Set the implicit 1 before the mantissa part in input_high. ++ __ Or(input_high, input_high, ++ Operand(1 << HeapNumber::kMantissaBitsInTopWord)); ++ // Shift the mantissa bits to the correct position. ++ // We don't need to clear non-mantissa bits as they will be shifted away. ++ // If they weren't, it would mean that the answer is in the 32bit range. ++ __ Sllw(input_high, input_high, scratch); ++ ++ __ bind(&high_shift_done); ++ ++ // Replace the shifted bits with bits from the lower mantissa word. ++ Label pos_shift, shift_done; ++ __ li(kScratchReg, 32); ++ __ Subw(scratch, kScratchReg, scratch); ++ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); ++ ++ // Negate scratch. ++ __ Subw(scratch, zero_reg, scratch); ++ __ Sllw(input_low, input_low, scratch); ++ __ Branch(&shift_done); ++ ++ __ bind(&pos_shift); ++ __ Srlw(input_low, input_low, scratch); ++ ++ __ bind(&shift_done); ++ __ Or(input_high, input_high, Operand(input_low)); ++ // Restore sign if necessary. ++ __ mov(scratch, sign); ++ result_reg = sign; ++ sign = no_reg; ++ __ Subw(result_reg, zero_reg, input_high); ++ __ Seleq(result_reg, input_high, scratch); ++ ++ __ bind(&done); ++ ++ __ Stl(result_reg, MemOperand(sp, kArgumentOffset)); ++ __ Pop(result_reg, scratch, scratch2, scratch3); ++ __ Ret(); ++} ++ ++void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { ++ // TODO(v8:10701): Implement for this platform. ++ __ Trap(); ++} ++ ++namespace { ++ ++int AddressOffset(ExternalReference ref0, ExternalReference ref1) { ++ int64_t offset = (ref0.address() - ref1.address()); ++ DCHECK(static_cast(offset) == offset); ++ return static_cast(offset); ++} ++ ++// Calls an API function. Allocates HandleScope, extracts returned value ++// from handle and propagates exceptions. Restores context. stack_space ++// - space to be unwound on exit (includes the call JS arguments space and ++// the additional space allocated for the fast call). ++void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, ++ ExternalReference thunk_ref, int stack_space, ++ MemOperand* stack_space_operand, ++ MemOperand return_value_operand) { ++ Isolate* isolate = masm->isolate(); ++ ExternalReference next_address = ++ ExternalReference::handle_scope_next_address(isolate); ++ const int kNextOffset = 0; ++ const int kLimitOffset = AddressOffset( ++ ExternalReference::handle_scope_limit_address(isolate), next_address); ++ const int kLevelOffset = AddressOffset( ++ ExternalReference::handle_scope_level_address(isolate), next_address); ++ ++ DCHECK(function_address == a1 || function_address == a2); ++ ++ Label profiler_enabled, end_profiler_check; ++ __ li(t12, ExternalReference::is_profiling_address(isolate)); ++ __ Ldb(t12, MemOperand(t12, 0)); ++ __ Branch(&profiler_enabled, ne, t12, Operand(zero_reg)); ++ __ li(t12, ExternalReference::address_of_runtime_stats_flag()); ++ __ Ldw(t12, MemOperand(t12, 0)); ++ __ Branch(&profiler_enabled, ne, t12, Operand(zero_reg)); ++ { ++ // Call the api function directly. ++ __ mov(t12, function_address); ++ __ Branch(&end_profiler_check); ++ } ++ ++ __ bind(&profiler_enabled); ++ { ++ // Additional parameter is the address of the actual callback. ++ __ li(t12, thunk_ref); ++ } ++ __ bind(&end_profiler_check); ++ ++ // Allocate HandleScope in callee-save registers. ++ __ li(s3, next_address); ++ __ Ldl(s0, MemOperand(s3, kNextOffset)); ++ __ Ldl(s1, MemOperand(s3, kLimitOffset)); ++ __ Ldw(s2, MemOperand(s3, kLevelOffset)); ++ __ Addw(s2, s2, Operand(1)); ++ __ Stw(s2, MemOperand(s3, kLevelOffset)); ++ ++ __ StoreReturnAddressAndCall(t12); ++ ++ Label promote_scheduled_exception; ++ Label delete_allocated_handles; ++ Label leave_exit_frame; ++ Label return_value_loaded; ++ ++ // Load value from ReturnValue. ++ __ Ldl(v0, return_value_operand); ++ __ bind(&return_value_loaded); ++ ++ // No more valid handles (the result handle was the last one). Restore ++ // previous handle scope. ++ __ Stl(s0, MemOperand(s3, kNextOffset)); ++ if (__ emit_debug_code()) { ++ __ Ldw(a1, MemOperand(s3, kLevelOffset)); ++ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, ++ Operand(s2)); ++ } ++ __ Subw(s2, s2, Operand(1)); ++ __ Stw(s2, MemOperand(s3, kLevelOffset)); ++ __ Ldl(kScratchReg, MemOperand(s3, kLimitOffset)); ++ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg)); ++ ++ // Leave the API exit frame. ++ __ bind(&leave_exit_frame); ++ ++ if (stack_space_operand == nullptr) { ++ DCHECK_NE(stack_space, 0); ++ __ li(s0, Operand(stack_space)); ++ } else { ++ DCHECK_EQ(stack_space, 0); ++ STATIC_ASSERT(kCArgSlotCount == 0); ++ __ Ldl(s0, *stack_space_operand); ++ } ++ ++ static constexpr bool kDontSaveDoubles = false; ++ static constexpr bool kRegisterContainsSlotCount = false; ++ __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN, ++ kRegisterContainsSlotCount); ++ ++ // Check if the function scheduled an exception. ++ __ LoadRoot(a4, RootIndex::kTheHoleValue); ++ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate)); ++ __ Ldl(a5, MemOperand(kScratchReg)); ++ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); ++ ++ __ Ret(); ++ ++ // Re-throw by promoting a scheduled exception. ++ __ bind(&promote_scheduled_exception); ++ __ TailCallRuntime(Runtime::kPromoteScheduledException); ++ ++ // HandleScope limit has changed. Delete allocated extensions. ++ __ bind(&delete_allocated_handles); ++ __ Stl(s1, MemOperand(s3, kLimitOffset)); ++ __ mov(s0, v0); ++ __ mov(a0, v0); ++ __ PrepareCallCFunction(1, s1); ++ __ li(a0, ExternalReference::isolate_address(isolate)); ++ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1); ++ __ mov(v0, s0); ++ __ jmp(&leave_exit_frame); ++} ++ ++} // namespace ++ ++void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- cp : context ++ // -- a1 : api function address ++ // -- a2 : arguments count (not including the receiver) ++ // -- a3 : call data ++ // -- a0 : holder ++ // -- ++ // -- sp[0] : last argument ++ // -- ... ++ // -- sp[(argc - 1) * 8] : first argument ++ // -- sp[(argc + 0) * 8] : receiver ++ // ----------------------------------- ++ ++ Register api_function_address = a1; ++ Register argc = a2; ++ Register call_data = a3; ++ Register holder = a0; ++ Register scratch = t0; ++ Register base = t1; // For addressing MemOperands on the stack. ++ ++ DCHECK(!AreAliased(api_function_address, argc, call_data, ++ holder, scratch, base)); ++ ++ using FCA = FunctionCallbackArguments; ++ ++ STATIC_ASSERT(FCA::kArgsLength == 6); ++ STATIC_ASSERT(FCA::kNewTargetIndex == 5); ++ STATIC_ASSERT(FCA::kDataIndex == 4); ++ STATIC_ASSERT(FCA::kReturnValueOffset == 3); ++ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); ++ STATIC_ASSERT(FCA::kIsolateIndex == 1); ++ STATIC_ASSERT(FCA::kHolderIndex == 0); ++ ++ // Set up FunctionCallbackInfo's implicit_args on the stack as follows: ++ // ++ // Target state: ++ // sp[0 * kPointerSize]: kHolder ++ // sp[1 * kPointerSize]: kIsolate ++ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue) ++ // sp[3 * kPointerSize]: undefined (kReturnValue) ++ // sp[4 * kPointerSize]: kData ++ // sp[5 * kPointerSize]: undefined (kNewTarget) ++ ++ // Set up the base register for addressing through MemOperands. It will point ++ // at the receiver (located at sp + argc * kPointerSize). ++ __ s8addl(argc, sp, base); DCHECK_EQ(kPointerSizeLog2, 3); ++ ++ // Reserve space on the stack. ++ __ Subl(sp, sp, Operand(FCA::kArgsLength * kPointerSize)); ++ ++ // kHolder. ++ __ Stl(holder, MemOperand(sp, 0 * kPointerSize)); ++ ++ // kIsolate. ++ __ li(scratch, ExternalReference::isolate_address(masm->isolate())); ++ __ Stl(scratch, MemOperand(sp, 1 * kPointerSize)); ++ ++ // kReturnValueDefaultValue and kReturnValue. ++ __ LoadRoot(scratch, RootIndex::kUndefinedValue); ++ __ Stl(scratch, MemOperand(sp, 2 * kPointerSize)); ++ __ Stl(scratch, MemOperand(sp, 3 * kPointerSize)); ++ ++ // kData. ++ __ Stl(call_data, MemOperand(sp, 4 * kPointerSize)); ++ ++ // kNewTarget. ++ __ Stl(scratch, MemOperand(sp, 5 * kPointerSize)); ++ ++ // Keep a pointer to kHolder (= implicit_args) in a scratch register. ++ // We use it below to set up the FunctionCallbackInfo object. ++ __ mov(scratch, sp); ++ ++ // Allocate the v8::Arguments structure in the arguments' space since ++ // it's not controlled by GC. ++ static constexpr int kApiStackSpace = 4; ++ static constexpr bool kDontSaveDoubles = false; ++ FrameScope frame_scope(masm, StackFrame::MANUAL); ++ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); ++ ++ // EnterExitFrame may align the sp. ++ ++ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). ++ // Arguments are after the return address (pushed by EnterExitFrame()). ++ __ Stl(scratch, MemOperand(sp, 1 * kPointerSize)); ++ ++ // FunctionCallbackInfo::values_ (points at the first varargs argument passed ++ // on the stack). ++ __ Addl(scratch, scratch, ++ Operand((FCA::kArgsLength + 1) * kSystemPointerSize)); ++ ++ __ Stl(scratch, MemOperand(sp, 2 * kPointerSize)); ++ ++ // FunctionCallbackInfo::length_. ++ // Stored as int field, 32-bit integers within struct on stack always left ++ // justified by n64 ABI. ++ __ Stw(argc, MemOperand(sp, 3 * kPointerSize)); ++ ++ // We also store the number of bytes to drop from the stack after returning ++ // from the API function here. ++ // Note: Unlike on other architectures, this stores the number of slots to ++ // drop, not the number of bytes. ++ __ Addl(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */)); ++ __ Stl(scratch, MemOperand(sp, 4 * kPointerSize)); ++ ++ // v8::InvocationCallback's argument. ++ DCHECK(!AreAliased(api_function_address, scratch, a0)); ++ __ Addl(a0, sp, Operand(1 * kPointerSize)); ++ ++ ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); ++ ++ // There are two stack slots above the arguments we constructed on the stack. ++ // TODO(jgruber): Document what these arguments are. ++ static constexpr int kStackSlotsAboveFCA = 2; ++ MemOperand return_value_operand( ++ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize); ++ ++ static constexpr int kUseStackSpaceOperand = 0; ++ MemOperand stack_space_operand(sp, 4 * kPointerSize); ++ ++ AllowExternalCallThatCantCauseGC scope(masm); ++ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, ++ kUseStackSpaceOperand, &stack_space_operand, ++ return_value_operand); ++} ++ ++void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { ++ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property ++ // name below the exit frame to make GC aware of them. ++ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0); ++ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1); ++ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2); ++ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3); ++ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4); ++ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5); ++ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6); ++ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7); ++ ++ Register receiver = ApiGetterDescriptor::ReceiverRegister(); ++ Register holder = ApiGetterDescriptor::HolderRegister(); ++ Register callback = ApiGetterDescriptor::CallbackRegister(); ++ Register scratch = a4; ++ DCHECK(!AreAliased(receiver, holder, callback, scratch)); ++ ++ Register api_function_address = a2; ++ ++ // Here and below +1 is for name() pushed after the args_ array. ++ using PCA = PropertyCallbackArguments; ++ __ Subl(sp, sp, (PCA::kArgsLength + 1) * kPointerSize); ++ __ Stl(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize)); ++ __ Ldl(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset)); ++ __ Stl(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize)); ++ __ LoadRoot(scratch, RootIndex::kUndefinedValue); ++ __ Stl(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize)); ++ __ Stl(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) * ++ kPointerSize)); ++ __ li(scratch, ExternalReference::isolate_address(masm->isolate())); ++ __ Stl(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize)); ++ __ Stl(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize)); ++ // should_throw_on_error -> false ++ DCHECK_EQ(0, Smi::zero().ptr()); ++ __ Stl(zero_reg, ++ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize)); ++ __ Ldl(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); ++ __ Stl(scratch, MemOperand(sp, 0 * kPointerSize)); ++ ++ // v8::PropertyCallbackInfo::args_ array and name handle. ++ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; ++ ++ // Load address of v8::PropertyAccessorInfo::args_ array and name handle. ++ __ mov(a0, sp); // a0 = Handle ++ __ Addl(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_ ++ ++ const int kApiStackSpace = 1; ++ FrameScope frame_scope(masm, StackFrame::MANUAL); ++ __ EnterExitFrame(false, kApiStackSpace); ++ ++ // Create v8::PropertyCallbackInfo object on the stack and initialize ++ // it's args_ field. ++ __ Stl(a1, MemOperand(sp, 1 * kPointerSize)); ++ __ Addl(a1, sp, Operand(1 * kPointerSize)); ++ // a1 = v8::PropertyCallbackInfo& ++ ++ ExternalReference thunk_ref = ++ ExternalReference::invoke_accessor_getter_callback(); ++ ++ __ Ldl(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); ++ __ Ldl(api_function_address, ++ FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); ++ ++ // +3 is to skip prolog, return address and name handle. ++ MemOperand return_value_operand( ++ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); ++ MemOperand* const kUseStackSpaceConstant = nullptr; ++ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, ++ kStackUnwindSpace, kUseStackSpaceConstant, ++ return_value_operand); ++} ++ ++void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { ++ // The sole purpose of DirectCEntry is for movable callers (e.g. any general ++ // purpose Code object) to be able to call into C functions that may trigger ++ // GC and thus move the caller. ++ // ++ // DirectCEntry places the return address on the stack (updated by the GC), ++ // making the call GC safe. The irregexp backend relies on this. ++ ++ // Make place for arguments to fit C calling convention. Callers use ++ // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't ++ // have to do that here. Any caller must drop kCArgsSlotsSize stack space ++ // after the call. ++ __ subl(sp, kCArgsSlotsSize, sp); ++ ++ __ Stl(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address. ++ __ Call(t12); // Call the C++ function. ++ // set fpec 1 while return from C++. ++ __ setfpec1(); ++ __ Ldl(t12, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code. ++ ++ if (FLAG_debug_code && FLAG_enable_slow_asserts) { ++ // In case of an error the return address may point to a memory area ++ // filled with kZapValue by the GC. Dereference the address and check for ++ // this. ++ __ Uldl(a4, MemOperand(t12)); ++ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4, ++ Operand(reinterpret_cast(kZapValue))); ++ } ++ ++ __ Jump(t12); ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/assembler-arch.h b/src/3rdparty/chromium/v8/src/codegen/assembler-arch.h +index d56b37250..9aed27934 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/assembler-arch.h ++++ b/src/3rdparty/chromium/v8/src/codegen/assembler-arch.h +@@ -23,6 +23,8 @@ + #include "src/codegen/mips64/assembler-mips64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/assembler-s390.h" ++#elif V8_TARGET_ARCH_SW64 ++#include "src/codegen/sw64/assembler-sw64.h" + #else + #error Unknown architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/codegen/assembler-inl.h b/src/3rdparty/chromium/v8/src/codegen/assembler-inl.h +index 8c81315d5..59a49ecf9 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/assembler-inl.h ++++ b/src/3rdparty/chromium/v8/src/codegen/assembler-inl.h +@@ -23,6 +23,8 @@ + #include "src/codegen/mips64/assembler-mips64-inl.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/assembler-s390-inl.h" ++#elif V8_TARGET_ARCH_SW64 ++#include "src/codegen/sw64/assembler-sw64-inl.h" + #else + #error Unknown architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/codegen/assembler-sw64-inl.h b/src/3rdparty/chromium/v8/src/codegen/assembler-sw64-inl.h +new file mode 100755 +index 000000000..22811ec31 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/assembler-sw64-inl.h +@@ -0,0 +1,329 @@ ++ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#ifndef V8_CODEGEN_SW64_ASSEMBLER_SW64_INL_H_ ++#define V8_CODEGEN_SW64_ASSEMBLER_SW64_INL_H_ ++ ++#include "src/codegen/sw64/assembler-sw64.h" ++ ++#include "src/codegen/assembler.h" ++#include "src/debug/debug.h" ++#include "src/objects/objects-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); } ++ ++bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SW64_SIMD); } ++ ++// ----------------------------------------------------------------------------- ++// Operand and MemOperand. ++ ++bool Operand::is_reg() const { ++ return rm_.is_valid(); ++} ++ ++int64_t Operand::immediate() const { ++ DCHECK(!is_reg()); ++ DCHECK(!IsHeapObjectRequest()); ++ return value_.immediate; ++} ++ ++// ----------------------------------------------------------------------------- ++// RelocInfo. ++ ++void RelocInfo::apply(intptr_t delta) { ++ if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { ++ // Absolute code pointer inside code object moves with the code object. ++ Assembler::RelocateInternalReference(rmode_, pc_, delta); ++ } ++} ++ ++ ++Address RelocInfo::target_address() { ++ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_)); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++Address RelocInfo::target_address_address() { ++ DCHECK(HasTargetAddressAddress()); ++ // Read the address of the word containing the target_address in an ++ // instruction stream. ++ // The only architecture-independent user of this function is the serializer. ++ // The serializer uses it to find out how many raw bytes of instruction to ++ // output before the next target. ++ // For an instruction like LUI/ORI where the target bits are mixed into the ++ // instruction bits, the size of the target will be zero, indicating that the ++ // serializer should not step forward in memory after a target is resolved ++ // and written. In this case the target_address_address function should ++ // return the end of the instructions to be patched, allowing the ++ // deserializer to deserialize the instructions as raw bytes and put them in ++ // place, ready to be patched with the target. After jump optimization, ++ // that is the address of the instruction that follows J/JAL/JR/JALR ++ // instruction. ++ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize; ++} ++ ++Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); } ++ ++int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; } ++ ++void Assembler::deserialization_set_special_target_at( ++ Address instruction_payload, Code code, Address target) { ++ set_target_address_at(instruction_payload, ++ !code.is_null() ? code.constant_pool() : kNullAddress, ++ target); ++} ++ ++int Assembler::deserialization_special_target_size( ++ Address instruction_payload) { ++ return kSpecialTargetSize; ++} ++ ++void Assembler::set_target_internal_reference_encoded_at(Address pc, ++ Address target) { ++ // Encoded internal references are j/jal instructions. ++ Instr instr = Assembler::instr_at(pc + 0 * kInstrSize); ++ ++ uint64_t imm28 = target & static_cast(kImm28Mask); ++ ++ instr &= ~kImm26Mask; ++ uint64_t imm26 = imm28 >> 2; ++ DCHECK(is_uint26(imm26)); ++ ++ instr_at_put(pc, instr | (imm26 & kImm26Mask)); ++ // Currently used only by deserializer, and all code will be flushed ++ // after complete deserialization, no need to flush on each reference. ++} ++ ++void Assembler::deserialization_set_target_internal_reference_at( ++ Address pc, Address target, RelocInfo::Mode mode) { ++ if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) { ++ UNREACHABLE(); // DCHECK(IsJ(instr_at(pc))); ++ set_target_internal_reference_encoded_at(pc, target); ++ } else { ++ DCHECK(mode == RelocInfo::INTERNAL_REFERENCE); ++ Memory
(pc) = target; ++ } ++} ++ ++HeapObject RelocInfo::target_object() { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ return HeapObject::cast( ++ Object(Assembler::target_address_at(pc_, constant_pool_))); ++} ++ ++HeapObject RelocInfo::target_object_no_host(Isolate* isolate) { ++ return target_object(); ++} ++ ++Handle RelocInfo::target_object_handle(Assembler* origin) { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ return Handle(reinterpret_cast( ++ Assembler::target_address_at(pc_, constant_pool_))); ++} ++ ++void RelocInfo::set_target_object(Heap* heap, HeapObject target, ++ WriteBarrierMode write_barrier_mode, ++ ICacheFlushMode icache_flush_mode) { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), ++ icache_flush_mode); ++ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && ++ !FLAG_disable_write_barriers) { ++ WriteBarrierForCode(host(), this, target); ++ } ++} ++ ++ ++Address RelocInfo::target_external_reference() { ++ DCHECK(rmode_ == EXTERNAL_REFERENCE); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++void RelocInfo::set_target_external_reference( ++ Address target, ICacheFlushMode icache_flush_mode) { ++ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); ++ Assembler::set_target_address_at(pc_, constant_pool_, target, ++ icache_flush_mode); ++} ++ ++Address RelocInfo::target_internal_reference() { ++ if (rmode_ == INTERNAL_REFERENCE) { ++ return Memory
(pc_); ++ } else { ++ // Encoded internal references are j/jal instructions. ++ DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED); ++ Instr instr = Assembler::instr_at(pc_ + 0 * kInstrSize); ++ instr &= kImm26Mask; ++ uint64_t imm28 = instr << 2; ++ uint64_t segment = pc_ & ~static_cast(kImm28Mask); ++ return static_cast
(segment | imm28); ++ } ++} ++ ++ ++Address RelocInfo::target_internal_reference_address() { ++ DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED); ++ return pc_; ++} ++ ++Address RelocInfo::target_runtime_entry(Assembler* origin) { ++ DCHECK(IsRuntimeEntry(rmode_)); ++ return target_address(); ++} ++ ++void RelocInfo::set_target_runtime_entry(Address target, ++ WriteBarrierMode write_barrier_mode, ++ ICacheFlushMode icache_flush_mode) { ++ DCHECK(IsRuntimeEntry(rmode_)); ++ if (target_address() != target) ++ set_target_address(target, write_barrier_mode, icache_flush_mode); ++} ++ ++Address RelocInfo::target_off_heap_target() { ++ DCHECK(IsOffHeapTarget(rmode_)); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++void RelocInfo::WipeOut() { ++ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || ++ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || ++ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) || ++ IsOffHeapTarget(rmode_)); ++ if (IsInternalReference(rmode_)) { ++ Memory
(pc_) = kNullAddress; ++ } else if (IsInternalReferenceEncoded(rmode_)) { ++ Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress); ++ } else { ++ Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// Assembler. ++ ++ ++void Assembler::CheckBuffer() { ++ if (buffer_space() <= kGap) { ++ GrowBuffer(); ++ } ++} ++ ++ ++void Assembler::CheckForEmitInForbiddenSlot() { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ if (IsPrevInstrCompactBranch()) { ++ // Nop instruction to precede a CTI in forbidden slot: ++ Instr nop = op_ldi | (zero_reg.code() << sRaShift) | (zero_reg.code() << sRbShift); ++ *reinterpret_cast(pc_) = nop; ++ pc_ += kInstrSize; ++ ++ ClearCompactBranchState(); ++ } ++} ++ ++ ++void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) { ++ if (IsPrevInstrCompactBranch()) { ++ if (Instruction::IsForbiddenAfterBranchInstr(x)) { ++ // Nop instruction to precede a CTI in forbidden slot: ++ Instr nop = op_ldi | (zero_reg.code() << sRaShift) | (zero_reg.code() << sRbShift); ++ *reinterpret_cast(pc_) = nop; ++ pc_ += kInstrSize; ++ } ++ ClearCompactBranchState(); ++ } ++ *reinterpret_cast(pc_) = x; ++ pc_ += kInstrSize; ++ if (is_compact_branch == CompactBranchType::COMPACT_BRANCH) { ++ EmittedCompactBranchInstruction(); ++ } ++ CheckTrampolinePoolQuick(); ++} ++ ++template <> ++inline void Assembler::EmitHelper(uint8_t x); ++ ++template ++void Assembler::EmitHelper(T x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += sizeof(x); ++ CheckTrampolinePoolQuick(); ++} ++ ++template <> ++void Assembler::EmitHelper(uint8_t x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += sizeof(x); ++ if (reinterpret_cast(pc_) % kInstrSize == 0) { ++ CheckTrampolinePoolQuick(); ++ } ++} ++ ++#ifdef ZHJDEL ++void Assembler::emit(Instr x, CompactBranchType is_compact_branch) { ++ UNREACHABLE(); // This should never be reached on sw64. ++} ++ ++ ++void Assembler::emit(uint64_t data) { ++ UNREACHABLE(); // This should never be reached on sw64. ++} ++#endif ++ ++#ifdef SW64 ++void Assembler::emitSW(Instr x) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ EmitHelper(x); ++} ++ ++void Assembler::emitSW(uint64_t data) { ++ CheckForEmitInForbiddenSlot(); ++ EmitHelper(data); ++} ++#endif ++ ++EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_SW64_ASSEMBLER_SW64_INL_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/constants-arch.h b/src/3rdparty/chromium/v8/src/codegen/constants-arch.h +index 7a222c960..74b11c074 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/constants-arch.h ++++ b/src/3rdparty/chromium/v8/src/codegen/constants-arch.h +@@ -21,6 +21,8 @@ + #include "src/codegen/s390/constants-s390.h" // NOLINT + #elif V8_TARGET_ARCH_X64 + #include "src/codegen/x64/constants-x64.h" // NOLINT ++#elif V8_TARGET_ARCH_SW64 ++#include "src/codegen/sw64/constants-sw64.h" // NOLINT + #else + #error Unsupported target architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/codegen/cpu-features.h b/src/3rdparty/chromium/v8/src/codegen/cpu-features.h +index 14c94ebae..c5ce2a43e 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/cpu-features.h ++++ b/src/3rdparty/chromium/v8/src/codegen/cpu-features.h +@@ -46,6 +46,10 @@ enum CpuFeature { + MIPSr2, + MIPSr6, + MIPS_SIMD, // MSA instructions ++#elif V8_TARGET_ARCH_SW64 ++ FPU, ++ FP64FPU, ++ SW64_SIMD, // SSA instructions + + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 + FPU, +diff --git a/src/3rdparty/chromium/v8/src/codegen/external-reference.cc b/src/3rdparty/chromium/v8/src/codegen/external-reference.cc +index 7a42e4046..752af0314 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/external-reference.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/external-reference.cc +@@ -474,6 +474,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() { + #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState + #elif V8_TARGET_ARCH_S390 + #define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState ++#elif V8_TARGET_ARCH_SW64 ++#define re_stack_check_func RegExpMacroAssemblerSW64::CheckStackGuardState + #else + UNREACHABLE(); + #endif +@@ -900,7 +902,101 @@ static int EnterMicrotaskContextWrapper(HandleScopeImplementer* hsi, + return 0; + } + +-FUNCTION_REFERENCE(call_enter_context_function, EnterMicrotaskContextWrapper) ++FUNCTION_REFERENCE(call_enter_context_function, EnterMicrotaskContextWrapper); ++ ++// ===================================================================== ++// add start for SW64. ++ ++#define SW_CONST64(x) (x ## LL) ++ ++const int sw_min_int = (int)1 << (sizeof(int)*8-1); // 0x80000000 == smallest int ++const long sw_min_long = SW_CONST64(0x8000000000000000); ++ ++static int sw_div(int x, int y) { ++ if (x == sw_min_int && y == SW_CONST64(-1)) { ++ return x; ++ } else { ++#if defined(USE_SIMULATOR) ++ if (0 == y) return 0; ++#endif ++ ++ return x / y; ++ } ++} ++ ++static int sw_divu(uint32_t x, uint32_t y) { ++ return (int)(x / y); ++} ++ ++static long sw_ddiv(long x, long y) { ++ if (x == sw_min_long && y == SW_CONST64(-1)) { ++ return x; ++ } else { ++ return x / y; ++ } ++} ++ ++static long sw_ddivu(uint64_t x, uint64_t y) { ++ return (long)(x / y); ++} ++ ++static int sw_mod(int x, int y) { ++ if (x == sw_min_int && y == SW_CONST64(-1)) { ++ return 0; ++ } else { ++ return x % y; ++ } ++} ++ ++static int sw_modu(uint32_t x, uint32_t y) { ++ return (int)(x % y); ++} ++ ++static long sw_dmod(long x, long y) { ++ if (x == sw_min_long && y == SW_CONST64(-1)) { ++ return 0; ++ } else { ++ return x % y; ++ } ++} ++ ++static long sw_dmodu(uint64_t x, uint64_t y) { ++ return (long)(x % y); ++} ++ ++ExternalReference ExternalReference::math_sw_div_function() { ++ return ExternalReference(Redirect(FUNCTION_ADDR(sw_div))); ++} ++ ++ExternalReference ExternalReference::math_sw_divu_function() { ++ return ExternalReference(Redirect(FUNCTION_ADDR(sw_divu))); ++} ++ ++ExternalReference ExternalReference::math_sw_ddiv_function() { ++ return ExternalReference(Redirect(FUNCTION_ADDR(sw_ddiv))); ++} ++ ++ExternalReference ExternalReference::math_sw_ddivu_function() { ++ return ExternalReference(Redirect(FUNCTION_ADDR(sw_ddivu))); ++} ++ ++ExternalReference ExternalReference::math_sw_mod_function() { ++ return ExternalReference(Redirect(FUNCTION_ADDR(sw_mod))); ++} ++ ++ExternalReference ExternalReference::math_sw_modu_function() { ++ return ExternalReference(Redirect(FUNCTION_ADDR(sw_modu))); ++} ++ ++ExternalReference ExternalReference::math_sw_dmod_function() { ++ return ExternalReference(Redirect(FUNCTION_ADDR(sw_dmod))); ++} ++ ++ExternalReference ExternalReference::math_sw_dmodu_function() { ++ return ExternalReference(Redirect(FUNCTION_ADDR(sw_dmodu))); ++} ++// add end for SW64. ++// ====================================================================== + + bool operator==(ExternalReference lhs, ExternalReference rhs) { + return lhs.address() == rhs.address(); +diff --git a/src/3rdparty/chromium/v8/src/codegen/external-reference.h b/src/3rdparty/chromium/v8/src/codegen/external-reference.h +index 2c5c8348f..d4b63dedd 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/external-reference.h ++++ b/src/3rdparty/chromium/v8/src/codegen/external-reference.h +@@ -114,6 +114,14 @@ class StatsCounter; + V(f64_acos_wrapper_function, "f64_acos_wrapper") \ + V(f64_asin_wrapper_function, "f64_asin_wrapper") \ + V(f64_mod_wrapper_function, "f64_mod_wrapper") \ ++ V(math_sw_div_function, "sw_div") \ ++ V(math_sw_divu_function, "sw_divu") \ ++ V(math_sw_ddiv_function, "sw_ddiv") \ ++ V(math_sw_ddivu_function, "sw_ddivu") \ ++ V(math_sw_mod_function, "sw_mod") \ ++ V(math_sw_modu_function, "sw_modu") \ ++ V(math_sw_dmod_function, "sw_dmod") \ ++ V(math_sw_dmodu_function, "sw_dmodu") \ + V(get_date_field_function, "JSDate::GetField") \ + V(get_or_create_hash_raw, "get_or_create_hash_raw") \ + V(ieee754_acos_function, "base::ieee754::acos") \ +diff --git a/src/3rdparty/chromium/v8/src/codegen/interface-descriptors.cc b/src/3rdparty/chromium/v8/src/codegen/interface-descriptors.cc +index 42b45c0f3..7f8b8f0c7 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/interface-descriptors.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/interface-descriptors.cc +@@ -412,7 +412,7 @@ void WasmAtomicNotifyDescriptor::InitializePlatformSpecific( + DefaultInitializePlatformSpecific(data, kParameterCount); + } + +-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) ++#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_SW64) + void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + DefaultInitializePlatformSpecific(data, kParameterCount); +diff --git a/src/3rdparty/chromium/v8/src/codegen/macro-assembler.h b/src/3rdparty/chromium/v8/src/codegen/macro-assembler.h +index 01175e585..ba78e35fa 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/macro-assembler.h ++++ b/src/3rdparty/chromium/v8/src/codegen/macro-assembler.h +@@ -52,6 +52,9 @@ enum AllocationFlags { + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/constants-s390.h" + #include "src/codegen/s390/macro-assembler-s390.h" ++#elif V8_TARGET_ARCH_SW64 ++#include "src/codegen/sw64/constants-sw64.h" ++#include "src/codegen/sw64/macro-assembler-sw64.h" + #else + #error Unsupported target architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/codegen/register-arch.h b/src/3rdparty/chromium/v8/src/codegen/register-arch.h +index 21a723301..82aa7c34a 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/register-arch.h ++++ b/src/3rdparty/chromium/v8/src/codegen/register-arch.h +@@ -24,6 +24,8 @@ + #include "src/codegen/mips64/register-mips64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/register-s390.h" ++#elif V8_TARGET_ARCH_SW64 ++#include "src/codegen/sw64/register-sw64.h" + #else + #error Unknown architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/codegen/register-configuration.cc b/src/3rdparty/chromium/v8/src/codegen/register-configuration.cc +index 5752b4633..f7033a18f 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/register-configuration.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/register-configuration.cc +@@ -64,6 +64,8 @@ static int get_num_allocatable_double_registers() { + kMaxAllocatableDoubleRegisterCount; + #elif V8_TARGET_ARCH_S390 + kMaxAllocatableDoubleRegisterCount; ++#elif V8_TARGET_ARCH_SW64 ++ kMaxAllocatableDoubleRegisterCount; + #else + #error Unsupported target architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/codegen/reloc-info.cc b/src/3rdparty/chromium/v8/src/codegen/reloc-info.cc +index 9f0797893..63a5d7e8c 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/reloc-info.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/reloc-info.cc +@@ -329,7 +329,8 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() { + return false; + #elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \ + defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \ +- defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) ++ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_SW64) ++ + return true; + #endif + } +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/OWNERS b/src/3rdparty/chromium/v8/src/codegen/sw64/OWNERS +new file mode 100755 +index 000000000..42582e993 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/OWNERS +@@ -0,0 +1,3 @@ ++ivica.bogosavljevic@sw64.com ++Miran.Karic@sw64.com ++sreten.kovacevic@sw64.com +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64-inl.h b/src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64-inl.h +new file mode 100755 +index 000000000..22811ec31 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64-inl.h +@@ -0,0 +1,329 @@ ++ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#ifndef V8_CODEGEN_SW64_ASSEMBLER_SW64_INL_H_ ++#define V8_CODEGEN_SW64_ASSEMBLER_SW64_INL_H_ ++ ++#include "src/codegen/sw64/assembler-sw64.h" ++ ++#include "src/codegen/assembler.h" ++#include "src/debug/debug.h" ++#include "src/objects/objects-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); } ++ ++bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SW64_SIMD); } ++ ++// ----------------------------------------------------------------------------- ++// Operand and MemOperand. ++ ++bool Operand::is_reg() const { ++ return rm_.is_valid(); ++} ++ ++int64_t Operand::immediate() const { ++ DCHECK(!is_reg()); ++ DCHECK(!IsHeapObjectRequest()); ++ return value_.immediate; ++} ++ ++// ----------------------------------------------------------------------------- ++// RelocInfo. ++ ++void RelocInfo::apply(intptr_t delta) { ++ if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { ++ // Absolute code pointer inside code object moves with the code object. ++ Assembler::RelocateInternalReference(rmode_, pc_, delta); ++ } ++} ++ ++ ++Address RelocInfo::target_address() { ++ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_)); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++Address RelocInfo::target_address_address() { ++ DCHECK(HasTargetAddressAddress()); ++ // Read the address of the word containing the target_address in an ++ // instruction stream. ++ // The only architecture-independent user of this function is the serializer. ++ // The serializer uses it to find out how many raw bytes of instruction to ++ // output before the next target. ++ // For an instruction like LUI/ORI where the target bits are mixed into the ++ // instruction bits, the size of the target will be zero, indicating that the ++ // serializer should not step forward in memory after a target is resolved ++ // and written. In this case the target_address_address function should ++ // return the end of the instructions to be patched, allowing the ++ // deserializer to deserialize the instructions as raw bytes and put them in ++ // place, ready to be patched with the target. After jump optimization, ++ // that is the address of the instruction that follows J/JAL/JR/JALR ++ // instruction. ++ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize; ++} ++ ++Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); } ++ ++int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; } ++ ++void Assembler::deserialization_set_special_target_at( ++ Address instruction_payload, Code code, Address target) { ++ set_target_address_at(instruction_payload, ++ !code.is_null() ? code.constant_pool() : kNullAddress, ++ target); ++} ++ ++int Assembler::deserialization_special_target_size( ++ Address instruction_payload) { ++ return kSpecialTargetSize; ++} ++ ++void Assembler::set_target_internal_reference_encoded_at(Address pc, ++ Address target) { ++ // Encoded internal references are j/jal instructions. ++ Instr instr = Assembler::instr_at(pc + 0 * kInstrSize); ++ ++ uint64_t imm28 = target & static_cast(kImm28Mask); ++ ++ instr &= ~kImm26Mask; ++ uint64_t imm26 = imm28 >> 2; ++ DCHECK(is_uint26(imm26)); ++ ++ instr_at_put(pc, instr | (imm26 & kImm26Mask)); ++ // Currently used only by deserializer, and all code will be flushed ++ // after complete deserialization, no need to flush on each reference. ++} ++ ++void Assembler::deserialization_set_target_internal_reference_at( ++ Address pc, Address target, RelocInfo::Mode mode) { ++ if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) { ++ UNREACHABLE(); // DCHECK(IsJ(instr_at(pc))); ++ set_target_internal_reference_encoded_at(pc, target); ++ } else { ++ DCHECK(mode == RelocInfo::INTERNAL_REFERENCE); ++ Memory
(pc) = target; ++ } ++} ++ ++HeapObject RelocInfo::target_object() { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ return HeapObject::cast( ++ Object(Assembler::target_address_at(pc_, constant_pool_))); ++} ++ ++HeapObject RelocInfo::target_object_no_host(Isolate* isolate) { ++ return target_object(); ++} ++ ++Handle RelocInfo::target_object_handle(Assembler* origin) { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ return Handle(reinterpret_cast( ++ Assembler::target_address_at(pc_, constant_pool_))); ++} ++ ++void RelocInfo::set_target_object(Heap* heap, HeapObject target, ++ WriteBarrierMode write_barrier_mode, ++ ICacheFlushMode icache_flush_mode) { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), ++ icache_flush_mode); ++ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && ++ !FLAG_disable_write_barriers) { ++ WriteBarrierForCode(host(), this, target); ++ } ++} ++ ++ ++Address RelocInfo::target_external_reference() { ++ DCHECK(rmode_ == EXTERNAL_REFERENCE); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++void RelocInfo::set_target_external_reference( ++ Address target, ICacheFlushMode icache_flush_mode) { ++ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); ++ Assembler::set_target_address_at(pc_, constant_pool_, target, ++ icache_flush_mode); ++} ++ ++Address RelocInfo::target_internal_reference() { ++ if (rmode_ == INTERNAL_REFERENCE) { ++ return Memory
(pc_); ++ } else { ++ // Encoded internal references are j/jal instructions. ++ DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED); ++ Instr instr = Assembler::instr_at(pc_ + 0 * kInstrSize); ++ instr &= kImm26Mask; ++ uint64_t imm28 = instr << 2; ++ uint64_t segment = pc_ & ~static_cast(kImm28Mask); ++ return static_cast
(segment | imm28); ++ } ++} ++ ++ ++Address RelocInfo::target_internal_reference_address() { ++ DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED); ++ return pc_; ++} ++ ++Address RelocInfo::target_runtime_entry(Assembler* origin) { ++ DCHECK(IsRuntimeEntry(rmode_)); ++ return target_address(); ++} ++ ++void RelocInfo::set_target_runtime_entry(Address target, ++ WriteBarrierMode write_barrier_mode, ++ ICacheFlushMode icache_flush_mode) { ++ DCHECK(IsRuntimeEntry(rmode_)); ++ if (target_address() != target) ++ set_target_address(target, write_barrier_mode, icache_flush_mode); ++} ++ ++Address RelocInfo::target_off_heap_target() { ++ DCHECK(IsOffHeapTarget(rmode_)); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++void RelocInfo::WipeOut() { ++ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || ++ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || ++ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) || ++ IsOffHeapTarget(rmode_)); ++ if (IsInternalReference(rmode_)) { ++ Memory
(pc_) = kNullAddress; ++ } else if (IsInternalReferenceEncoded(rmode_)) { ++ Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress); ++ } else { ++ Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// Assembler. ++ ++ ++void Assembler::CheckBuffer() { ++ if (buffer_space() <= kGap) { ++ GrowBuffer(); ++ } ++} ++ ++ ++void Assembler::CheckForEmitInForbiddenSlot() { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ if (IsPrevInstrCompactBranch()) { ++ // Nop instruction to precede a CTI in forbidden slot: ++ Instr nop = op_ldi | (zero_reg.code() << sRaShift) | (zero_reg.code() << sRbShift); ++ *reinterpret_cast(pc_) = nop; ++ pc_ += kInstrSize; ++ ++ ClearCompactBranchState(); ++ } ++} ++ ++ ++void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) { ++ if (IsPrevInstrCompactBranch()) { ++ if (Instruction::IsForbiddenAfterBranchInstr(x)) { ++ // Nop instruction to precede a CTI in forbidden slot: ++ Instr nop = op_ldi | (zero_reg.code() << sRaShift) | (zero_reg.code() << sRbShift); ++ *reinterpret_cast(pc_) = nop; ++ pc_ += kInstrSize; ++ } ++ ClearCompactBranchState(); ++ } ++ *reinterpret_cast(pc_) = x; ++ pc_ += kInstrSize; ++ if (is_compact_branch == CompactBranchType::COMPACT_BRANCH) { ++ EmittedCompactBranchInstruction(); ++ } ++ CheckTrampolinePoolQuick(); ++} ++ ++template <> ++inline void Assembler::EmitHelper(uint8_t x); ++ ++template ++void Assembler::EmitHelper(T x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += sizeof(x); ++ CheckTrampolinePoolQuick(); ++} ++ ++template <> ++void Assembler::EmitHelper(uint8_t x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += sizeof(x); ++ if (reinterpret_cast(pc_) % kInstrSize == 0) { ++ CheckTrampolinePoolQuick(); ++ } ++} ++ ++#ifdef ZHJDEL ++void Assembler::emit(Instr x, CompactBranchType is_compact_branch) { ++ UNREACHABLE(); // This should never be reached on sw64. ++} ++ ++ ++void Assembler::emit(uint64_t data) { ++ UNREACHABLE(); // This should never be reached on sw64. ++} ++#endif ++ ++#ifdef SW64 ++void Assembler::emitSW(Instr x) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ EmitHelper(x); ++} ++ ++void Assembler::emitSW(uint64_t data) { ++ CheckForEmitInForbiddenSlot(); ++ EmitHelper(data); ++} ++#endif ++ ++EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_SW64_ASSEMBLER_SW64_INL_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64.cc b/src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64.cc +new file mode 100755 +index 000000000..9f7609602 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64.cc +@@ -0,0 +1,4379 @@ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#include "src/codegen/sw64/assembler-sw64.h" ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/base/cpu.h" ++#include "src/codegen/sw64/assembler-sw64-inl.h" ++#include "src/codegen/safepoint-table.h" ++#include "src/codegen/string-constants.h" ++#include "src/deoptimizer/deoptimizer.h" ++#include "src/objects/heap-number-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++ ++// Get the CPU features enabled by the build. For cross compilation the ++// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS ++// can be defined to enable FPU instructions when building the ++// snapshot. ++static unsigned CpuFeaturesImpliedByCompiler() { ++ unsigned answer = 0; ++ ++ // If the compiler is allowed to use FPU then we can use FPU too in our code ++ // generation even when generating snapshots. This won't work for cross ++ // compilation. ++ answer |= 1u << FPU; ++ ++ return answer; ++} ++ ++ ++void CpuFeatures::ProbeImpl(bool cross_compile) { ++ supported_ |= CpuFeaturesImpliedByCompiler(); ++ ++ // Only use statically determined features for cross compile (snapshot). ++ if (cross_compile) return; ++ ++ // Probe for additional features at runtime. ++ base::CPU cpu; ++ if (cpu.has_fpu()) supported_ |= 1u << FPU; ++ if (cpu.has_msa()) supported_ |= 1u << SW64_SIMD; ++} ++ ++ ++void CpuFeatures::PrintTarget() { } ++void CpuFeatures::PrintFeatures() { } ++ ++ ++int ToNumber(Register reg) { ++ DCHECK(reg.is_valid()); ++ const int kNumbers[] = { ++ // (SW64) ++ 0, // v0 ++ 1, // t0 ++ 2, // t1 ++ 3, // t2 ++ 4, // t3 ++ 5, // t4 ++ 6, // t5 ++ 7, // t6 ++ 8, // t7 ++ 9, // s0 ++ 10, // s1 ++ 11, // s2 ++ 12, // s3 ++ 13, // s4 ++ 14, // s5 ++ 15, // fp ++ 16, // a0 ++ 17, // a1 ++ 18, // a2 ++ 19, // a3 ++ 20, // a4 ++ 21, // a5 ++ 22, // t8 ++ 23, // t9 ++ 24, // t10 ++ 25, // t11 ++ 26, // ra ++ 27, // t12 ++ 28, // at ++ 29, // gp ++ 30, // sp ++ 31 // zero_reg ++ }; ++ return kNumbers[reg.code()]; ++} ++ ++ ++Register ToRegister(int num) { ++ DCHECK(num >= 0 && num < kNumRegisters); ++ const Register kRegisters[] = { ++ v0, ++ t0, t1, t2, t3, t4, t5, t6, t7, ++ s0, s1, s2, s3, s4, s5, fp, ++ a0, a1, a2, a3, a4, a5, ++ t8, t9, t10, t11, ++ ra, ++ t12, ++ at, ++ gp, ++ sp, ++ zero_reg ++ }; ++ return kRegisters[num]; ++} ++ ++ ++// ----------------------------------------------------------------------------- ++// Implementation of RelocInfo. ++ ++const int RelocInfo::kApplyMask = ++ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | ++ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED); ++ ++bool RelocInfo::IsCodedSpecially() { ++ // The deserializer needs to know whether a pointer is specially coded. Being ++ // specially coded on SW64 means that it is a lui/ori instruction, and that is ++ // always the case inside code objects. ++ return true; ++} ++ ++ ++bool RelocInfo::IsInConstantPool() { ++ return false; ++} ++ ++uint32_t RelocInfo::wasm_call_tag() const { ++ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL); ++ return static_cast( ++ Assembler::target_address_at(pc_, constant_pool_)); ++} ++ ++// ----------------------------------------------------------------------------- ++// Implementation of Operand and MemOperand. ++// See assembler-sw64-inl.h for inlined constructors. ++ ++Operand::Operand(Handle handle) ++ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) { ++ value_.immediate = static_cast(handle.address()); ++} ++ ++Operand Operand::EmbeddedNumber(double value) { ++ int32_t smi; ++ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi)); ++ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); ++ result.is_heap_object_request_ = true; ++ result.value_.heap_object_request = HeapObjectRequest(value); ++ return result; ++} ++ ++Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { ++ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); ++ result.is_heap_object_request_ = true; ++ result.value_.heap_object_request = HeapObjectRequest(str); ++ return result; ++} ++ ++MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) { ++ offset_ = offset; ++} ++ ++ ++MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier, ++ OffsetAddend offset_addend) ++ : Operand(rm) { ++ offset_ = unit * multiplier + offset_addend; ++} ++ ++void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { ++ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); ++ for (auto& request : heap_object_requests_) { ++ Handle object; ++ switch (request.kind()) { ++ case HeapObjectRequest::kHeapNumber: ++ object = isolate->factory()->NewHeapNumber( ++ request.heap_number()); ++ break; ++ case HeapObjectRequest::kStringConstant: ++ const StringConstantBase* str = request.string(); ++ CHECK_NOT_NULL(str); ++ object = str->AllocateStringConstant(isolate); ++ break; ++ } ++ Address pc = reinterpret_cast
(buffer_start_) + request.offset(); ++ set_target_value_at(pc, reinterpret_cast(object.location())); ++ } ++} ++ ++Assembler::Assembler(const AssemblerOptions& options, ++ std::unique_ptr buffer) ++ : AssemblerBase(options, std::move(buffer)), ++ scratch_register_list_(at.bit()) { ++ if (CpuFeatures::IsSupported(SW64_SIMD)) { ++ EnableCpuFeature(SW64_SIMD); ++ } ++ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); ++ ++ last_trampoline_pool_end_ = 0; ++ no_trampoline_pool_before_ = 0; ++ trampoline_pool_blocked_nesting_ = 0; ++ // We leave space (16 * kTrampolineSlotsSize) ++ // for BlockTrampolinePoolScope buffer. ++ next_buffer_check_ = FLAG_force_long_branches ++ ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16; ++ internal_trampoline_exception_ = false; ++ last_bound_pos_ = 0; ++ ++ trampoline_emitted_ = FLAG_force_long_branches; ++ unbound_labels_count_ = 0; ++ block_buffer_growth_ = false; ++} ++ ++void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, ++ SafepointTableBuilder* safepoint_table_builder, ++ int handler_table_offset) { ++ EmitForbiddenSlotInstruction(); ++ ++ int code_comments_size = WriteCodeComments(); ++ ++ DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. ++ ++ AllocateAndInstallRequestedHeapObjects(isolate); ++ ++ // Set up code descriptor. ++ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to ++ // this point to make CodeDesc initialization less fiddly. ++ ++ static constexpr int kConstantPoolSize = 0; ++ const int instruction_size = pc_offset(); ++ const int code_comments_offset = instruction_size - code_comments_size; ++ const int constant_pool_offset = code_comments_offset - kConstantPoolSize; ++ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) ++ ? constant_pool_offset ++ : handler_table_offset; ++ const int safepoint_table_offset = ++ (safepoint_table_builder == kNoSafepointTable) ++ ? handler_table_offset2 ++ : safepoint_table_builder->GetCodeOffset(); ++ const int reloc_info_offset = ++ static_cast(reloc_info_writer.pos() - buffer_->start()); ++ CodeDesc::Initialize(desc, this, safepoint_table_offset, ++ handler_table_offset2, constant_pool_offset, ++ code_comments_offset, reloc_info_offset); ++} ++ ++void Assembler::Align(int m) { ++ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m)); ++ EmitForbiddenSlotInstruction(); ++ while ((pc_offset() & (m - 1)) != 0) { ++ nop(); ++ } ++} ++ ++ ++void Assembler::CodeTargetAlign() { ++ // No advantage to aligning branch/call targets to more than ++ // single instruction, that I am aware of. ++ Align(4); ++} ++ ++ ++uint32_t Assembler::GetSwRa(Instr instr) { ++ return (instr & sRaFieldMask) >> sRaShift; ++} ++ ++ ++uint32_t Assembler::GetSwRb(Instr instr) { ++ return (instr & sRbFieldMask) >> sRbShift; ++} ++ ++ ++uint32_t Assembler::GetSwRc(Instr instr) { ++ return (instr & sRcFieldMask) >> sRcShift; ++} ++ ++ ++uint32_t Assembler::GetLabelConst(Instr instr) { ++ return instr & ~kImm16Mask; ++} ++ ++ ++#ifdef SW64 //20181123 ++ ++#define OP(x) (((x) & 0x3F) << 26) ++#define OPR(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) ++ ++int32_t Assembler::GetSwOpcodeField(Instr instr) { ++ return instr & OP(-1); ++} ++ ++ ++int32_t Assembler::GetSwOpcodeAndFunctionField(Instr instr) { ++ return instr & OPR(-1, -1); ++} ++ ++#undef OP ++#undef OPR ++ ++uint32_t Assembler::GetSwImmediate8(Instr instr) { ++ return (instr & sImm8Mask) >> sImm8Shift; ++} ++ ++ ++uint32_t Assembler::GetSwImmediate16(Instr instr) { ++ return (instr & sImm16Mask) >> sImm16Shift; ++} ++#endif ++ ++ ++// Labels refer to positions in the (to be) generated code. ++// There are bound, linked, and unused labels. ++// ++// Bound labels refer to known positions in the already ++// generated code. pos() is the position the label refers to. ++// ++// Linked labels refer to unknown positions in the code ++// to be generated; pos() is the position of the last ++// instruction using the label. ++ ++// The link chain is terminated by a value in the instruction of -1, ++// which is an otherwise illegal value (branch -1 is inf loop). ++// The instruction 16-bit offset field addresses 32-bit words, but in ++// code is conv to an 18-bit value addressing bytes, hence the -4 value. ++ ++const int kEndOfChain = -4; ++// Determines the end of the Jump chain (a subset of the label link chain). ++const int kEndOfJumpChain = 0; ++ ++ ++bool Assembler::IsLdih(Instr instr) { ++ int32_t opcode = GetSwOpcodeField(instr); ++ ++ return opcode == op_ldih; ++} ++ ++bool Assembler::IsLdi(Instr instr) { ++ int32_t opcode = GetSwOpcodeField(instr); ++ ++ return opcode == op_ldi; ++} ++ ++ ++bool Assembler::IsBranch(Instr instr) { ++ int32_t opcode = GetSwOpcodeField(instr); ++ return opcode == op_br || opcode == op_bsr || //; unconditional branch ++ opcode == op_beq || opcode == op_bne || ++ opcode == op_blt || opcode == op_ble || ++ opcode == op_bgt || opcode == op_bge || ++ opcode == op_blbc || opcode == op_blbs || ++ opcode == op_fbeq || opcode == op_fbne || ++ opcode == op_fblt || opcode == op_fble || ++ opcode == op_fbgt || opcode == op_fbge; ++} ++ ++ ++bool Assembler::IsEmittedConstant(Instr instr) { ++ uint32_t label_constant = GetLabelConst(instr); ++ return label_constant == 0; // Emitted label const in reg-exp engine. ++} ++ ++ ++bool Assembler::IsBeq(Instr instr) { ++ return GetSwOpcodeField(instr) == op_beq; ++} ++ ++ ++bool Assembler::IsBne(Instr instr) { ++ return GetSwOpcodeField(instr) == op_bne; ++} ++ ++ ++bool Assembler::IsAddImmediate(Instr instr) { ++ int32_t opcode = GetSwOpcodeAndFunctionField(instr); ++ return opcode == op_addw_l || opcode == op_addl_l; ++} ++ ++ ++bool Assembler::IsAndImmediate(Instr instr) { ++ return GetSwOpcodeAndFunctionField(instr) == op_and_l; ++} ++ ++ ++int Assembler::target_at(int pos, bool is_internal) { ++ if (is_internal) { ++ int64_t* p = reinterpret_cast(buffer_start_ + pos); ++ int64_t address = *p; ++ if (address == kEndOfJumpChain) { ++ return kEndOfChain; ++ } else { ++ int64_t instr_address = reinterpret_cast(p); ++ DCHECK(instr_address - address < INT_MAX); ++ int delta = static_cast(instr_address - address); ++ DCHECK(pos > delta); ++ return pos - delta; ++ } ++ } ++ Instr instr = instr_at(pos); ++ if ((instr & ~sImm21Mask) == 0) { ++ // Emitted label constant, not part of a branch. ++ if (instr == 0) { ++ return kEndOfChain; ++ } else { ++ int32_t imm23 =((instr & static_cast(sImm21Mask)) << 11) >> 9; ++ return (imm23 + pos); ++ } ++ } ++ // Check we have a branch or jump instruction. ++ DCHECK(IsBranch(instr) || IsLdi(instr)); ++ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming ++ // the compiler uses arithmectic shifts for signed integers. ++ if (IsBranch(instr)) { ++ int32_t imm23 = ((instr & static_cast(sImm21Mask)) << 11) >> 9; ++ ++ if (imm23 == kEndOfChain) { ++ // EndOfChain sentinel is returned directly, not relative to pc or pos. ++ return kEndOfChain; ++ } else { ++ return pos + kBranchPCOffset + imm23; ++ } ++ } else if (IsLdi(instr)) { ++ Instr instr0_ldi = instr_at(pos + 0 * kInstrSize); ++ Instr instr2_ldih = instr_at(pos + 2 * kInstrSize); ++ Instr instr3_ldi = instr_at(pos + 3 * kInstrSize); ++ DCHECK(IsLdi(instr0_ldi)); ++ DCHECK(IsLdih(instr2_ldih)); ++ DCHECK(IsLdi(instr3_ldi)); ++ ++ // TODO(plind) create named constants for shift values. ++ int64_t imm = static_cast(instr0_ldi << 16) << 16; ++ imm += static_cast(instr2_ldih << 16); ++ imm += static_cast(instr3_ldi << 16) >> 16; ++ ++ if (imm == kEndOfJumpChain) { ++ // EndOfChain sentinel is returned directly, not relative to pc or pos. ++ return kEndOfChain; ++ } else { ++ uint64_t instr_address = reinterpret_cast(buffer_start_ + pos); ++ int64_t delta = instr_address - imm; ++ DCHECK(pos > delta); ++ return (int)(pos - delta); ++ } ++ } else { ++ UNIMPLEMENTED_SW64(); ++ return -1; ++ } ++} ++ ++ ++void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { ++ if (is_internal) { ++ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; ++ *reinterpret_cast(buffer_start_ + pos) = imm; ++ return; ++ } ++ Instr instr = instr_at(pos); ++ if ((instr & ~sImm21Mask) == 0) { ++ DCHECK(target_pos == kEndOfChain || target_pos >= 0); ++ // Emitted label constant, not part of a branch. ++ // Make label relative to Code pointer of generated Code object. ++ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); ++ return; ++ } ++ ++ DCHECK(IsBranch(instr) || IsLdi(instr)); ++ if (IsBranch(instr)) { ++ int32_t imm23 = target_pos - (pos + kBranchPCOffset); ++ DCHECK((imm23 & 3) == 0); ++ ++ int32_t imm21 = imm23 >> 2; ++ instr &= ~sImm21Mask; ++ DCHECK(is_int21(imm21)); ++ ++ instr_at_put(pos, instr | (imm21 & sImm21Mask)); ++ } else if (IsLdi(instr)) { ++ Instr instr0_ldi = instr_at(pos + 0 * kInstrSize); ++ Instr instr2_ldih = instr_at(pos + 2 * kInstrSize); ++ Instr instr3_ldi = instr_at(pos + 3 * kInstrSize); ++ DCHECK(IsLdi(instr0_ldi)); ++ DCHECK(IsLdih(instr2_ldih)); ++ DCHECK(IsLdi(instr3_ldi)); ++ ++ int64_t imm = reinterpret_cast(buffer_start_) + target_pos; ++ DCHECK((imm & 3) == 0); ++ ++ instr0_ldi &= ~kImm16Mask; ++ instr2_ldih &= ~kImm16Mask; ++ instr3_ldi &= ~kImm16Mask; ++ ++ int32_t lsb32 = (int32_t) (imm); ++ int32_t msb32 = (int32_t) ((imm - lsb32) >> 32); ++ instr_at_put(pos + 0 * kInstrSize, ++ instr0_ldi | ((int16_t)(msb32 & 0xffff) & 0xffff)); ++ instr_at_put(pos + 2 * kInstrSize, ++ instr2_ldih | (((lsb32-(int16_t)lsb32)>>16) & 0xffff)); ++ instr_at_put(pos + 3 * kInstrSize, ++ instr3_ldi | ((int16_t)(lsb32 & 0xffff) & 0xffff)); ++ } else { ++ UNIMPLEMENTED_SW64(); ++ } ++} ++ ++void Assembler::print(const Label* L) { ++ if (L->is_unused()) { ++ PrintF("unused label\n"); ++ } else if (L->is_bound()) { ++ PrintF("bound label to %d\n", L->pos()); ++ } else if (L->is_linked()) { ++ Label l; ++ l.link_to(L->pos()); ++ PrintF("unbound label"); ++ while (l.is_linked()) { ++ PrintF("@ %d ", l.pos()); ++ Instr instr = instr_at(l.pos()); ++ if ((instr & ~kImm16Mask) == 0) { ++ PrintF("value\n"); ++ } else { ++ PrintF("%d\n", instr); ++ } ++ next(&l, is_internal_reference(&l)); ++ } ++ } else { ++ PrintF("label in inconsistent state (pos = %d)\n", L->pos_); ++ } ++} ++ ++ ++void Assembler::bind_to(Label* L, int pos) { ++ DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. ++ int trampoline_pos = kInvalidSlotPos; ++ bool is_internal = false; ++ if (L->is_linked() && !trampoline_emitted_) { ++ unbound_labels_count_--; ++ if (!is_internal_reference(L)) { ++ next_buffer_check_ += kTrampolineSlotsSize; ++ } ++ } ++ ++ while (L->is_linked()) { ++ int fixup_pos = L->pos(); ++ int dist = pos - fixup_pos; ++ is_internal = is_internal_reference(L); ++ next(L, is_internal); // Call next before overwriting link with target at ++ // fixup_pos. ++ Instr instr = instr_at(fixup_pos); ++ if (is_internal) { ++ target_at_put(fixup_pos, pos, is_internal); ++ } else { ++ if (IsBranch(instr)) { ++ int branch_offset = BranchOffset(instr); ++ if (dist > branch_offset) { ++ if (trampoline_pos == kInvalidSlotPos) { ++ trampoline_pos = get_trampoline_entry(fixup_pos); ++ CHECK_NE(trampoline_pos, kInvalidSlotPos); ++ } ++ CHECK((trampoline_pos - fixup_pos) <= branch_offset); ++ target_at_put(fixup_pos, trampoline_pos, false); ++ fixup_pos = trampoline_pos; ++ } ++ target_at_put(fixup_pos, pos, false); ++ } else { ++ DCHECK(IsLdi(instr) || IsEmittedConstant(instr)); ++ target_at_put(fixup_pos, pos, false); ++ } ++ } ++ } ++ L->bind_to(pos); ++ ++ // Keep track of the last bound label so we don't eliminate any instructions ++ // before a bound label. ++ if (pos > last_bound_pos_) ++ last_bound_pos_ = pos; ++} ++ ++ ++void Assembler::bind(Label* L) { ++ DCHECK(!L->is_bound()); // Label can only be bound once. ++ bind_to(L, pc_offset()); ++} ++ ++ ++void Assembler::next(Label* L, bool is_internal) { ++ DCHECK(L->is_linked()); ++ int link = target_at(L->pos(), is_internal); ++ if (link == kEndOfChain) { ++ L->Unuse(); ++ } else { ++ DCHECK_GE(link, 0); ++ L->link_to(link); ++ } ++} ++ ++ ++bool Assembler::is_near(Label* L) { ++ DCHECK(L->is_bound()); ++ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; ++} ++ ++ ++bool Assembler::is_near(Label* L, OffsetSize bits) { ++ if (L == nullptr || !L->is_bound()) return true; ++ return ((pc_offset() - L->pos()) < ++ (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize); ++} ++ ++ ++bool Assembler::is_near_branch(Label* L) { ++ DCHECK(L->is_bound()); ++ return kArchVariant == kSw64r3 ? is_near_r3(L) : is_near_pre_r3(L); ++} ++ ++ ++int Assembler::BranchOffset(Instr instr) { ++ int bits = OffsetSize::kOffset21; ++ ++ return (1 << (bits + 2 - 1)) - 1; ++} ++ ++ ++// We have to use a temporary register for things that can be relocated even ++// if they can be encoded in the SW64's 16 bits of immediate-offset instruction ++// space. There is no guarantee that the relocated location can be similarly ++// encoded. ++bool Assembler::MustUseReg(RelocInfo::Mode rmode) { ++ return !RelocInfo::IsNone(rmode); ++} ++ ++ ++// Returns the next free trampoline entry. ++int32_t Assembler::get_trampoline_entry(int32_t pos) { ++ int32_t trampoline_entry = kInvalidSlotPos; ++ if (!internal_trampoline_exception_) { ++ if (trampoline_.start() > pos) { ++ trampoline_entry = trampoline_.take_slot(); ++ } ++ ++ if (kInvalidSlotPos == trampoline_entry) { ++ internal_trampoline_exception_ = true; ++ } ++ } ++ return trampoline_entry; ++} ++ ++ ++uint64_t Assembler::jump_address(Label* L) { ++ int64_t target_pos; ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ L->link_to(pc_offset()); ++ } else { ++ L->link_to(pc_offset()); ++ return kEndOfJumpChain; ++ } ++ } ++ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; ++ DCHECK_EQ(imm & 3, 0); ++ ++ return imm; ++} ++ ++uint64_t Assembler::jump_offset(Label* L) { ++ int64_t target_pos; ++ int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0; ++ ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ L->link_to(pc_offset() + pad); ++ } else { ++ L->link_to(pc_offset() + pad); ++ return kEndOfJumpChain; ++ } ++ } ++ int64_t imm = target_pos - (pc_offset() + pad); ++ DCHECK_EQ(imm & 3, 0); ++ ++ return static_cast(imm); ++} ++ ++uint64_t Assembler::branch_long_offset(Label* L) { ++ int64_t target_pos; ++ ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ L->link_to(pc_offset()); ++ } else { ++ L->link_to(pc_offset()); ++ return kEndOfJumpChain; ++ } ++ } ++ int64_t offset = target_pos - (pc_offset() + kInstrSize); ++ DCHECK_EQ(offset & 3, 0); ++ ++ return static_cast(offset); ++} ++ ++int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { ++ int32_t target_pos; ++ int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0; ++ ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); ++ L->link_to(pc_offset() + pad); ++ } else { ++ L->link_to(pc_offset() + pad); ++ if (!trampoline_emitted_) { ++ unbound_labels_count_++; ++ next_buffer_check_ -= kTrampolineSlotsSize; ++ } ++ return kEndOfChain; ++ } ++ } ++ ++ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad); ++ DCHECK(is_intn(offset, bits + 2)); ++ DCHECK_EQ(offset & 3, 0); ++ ++ return offset; ++} ++ ++ ++void Assembler::label_at_put(Label* L, int at_offset) { ++ int target_pos; ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ int32_t imm18 = target_pos - at_offset; ++ DCHECK_EQ(imm18 & 3, 0); ++ int32_t imm16 = imm18 >> 2; ++ DCHECK(is_int16(imm16)); ++ instr_at_put(at_offset, (imm16 & kImm16Mask)); ++ } else { ++ target_pos = kEndOfChain; ++ instr_at_put(at_offset, 0); ++ if (!trampoline_emitted_) { ++ unbound_labels_count_++; ++ next_buffer_check_ -= kTrampolineSlotsSize; ++ } ++ } ++ L->link_to(at_offset); ++ } ++} ++ ++ ++//------- Branch and jump instructions -------- ++ ++void Assembler::br(int offset) { ++ br(zero_reg, offset); ++} ++ ++ ++void Assembler::bsr(int offset) { ++ bsr(ra, offset); ++} ++ ++ ++// ------------Memory-instructions------------- ++ ++void Assembler::AdjustBaseAndOffset(MemOperand* src, ++ OffsetAccessType access_type, ++ int second_access_add_to_offset) { ++ // This method is used to adjust the base register and offset pair ++ // for a load/store when the offset doesn't fit into int16_t. ++ // It is assumed that 'base + offset' is sufficiently aligned for memory ++ // operands that are machine word in size or smaller. For doubleword-sized ++ // operands it's assumed that 'base' is a multiple of 8, while 'offset' ++ // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments ++ // and spilled variables on the stack accessed relative to the stack ++ // pointer register). ++ // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8. ++ ++ bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; ++ bool two_accesses = static_cast(access_type) || !doubleword_aligned; ++ DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. ++ ++ // is_int16 must be passed a signed value, hence the static cast below. ++ if (is_int16(src->offset()) && ++ (!two_accesses || is_int16(static_cast( ++ src->offset() + second_access_add_to_offset)))) { ++ // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified ++ // value) fits into int16_t. ++ return; ++ } ++ ++ DCHECK(src->rm() != ++ at); // Must not overwrite the register 'base' while loading 'offset'. ++ ++#ifdef DEBUG ++ // Remember the "(mis)alignment" of 'offset', it will be checked at the end. ++ uint32_t misalignment = src->offset() & (kDoubleSize - 1); ++#endif ++ ++ // Do not load the whole 32-bit 'offset' if it can be represented as ++ // a sum of two 16-bit signed offsets. This can save an instruction or two. ++ // To simplify matters, only do this for a symmetric range of offsets from ++ // about -64KB to about +64KB, allowing further addition of 4 when accessing ++ // 64-bit variables with two 32-bit accesses. ++ constexpr int32_t kMinOffsetForSimpleAdjustment = ++ 0x7FF8; // Max int16_t that's a multiple of 8. ++ constexpr int32_t kMaxOffsetForSimpleAdjustment = ++ 2 * kMinOffsetForSimpleAdjustment; ++ ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { ++ ldi(scratch, kMinOffsetForSimpleAdjustment, src->rm()); ++ src->offset_ -= kMinOffsetForSimpleAdjustment; ++ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && ++ src->offset() < 0) { ++ ldi(scratch, -kMinOffsetForSimpleAdjustment, src->rm()); ++ src->offset_ += kMinOffsetForSimpleAdjustment; ++ } else { ++ // Do not load the whole 32-bit 'offset' if it can be represented as ++ // a sum of three 16-bit signed offsets. This can save an instruction. ++ // To simplify matters, only do this for a symmetric range of offsets from ++ // about -96KB to about +96KB, allowing further addition of 4 when accessing ++ // 64-bit variables with two 32-bit accesses. ++ constexpr int32_t kMinOffsetForMediumAdjustment = ++ 2 * kMinOffsetForSimpleAdjustment; ++ constexpr int32_t kMaxOffsetForMediumAdjustment = ++ 3 * kMinOffsetForSimpleAdjustment; ++ if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { ++ ldi(scratch, kMinOffsetForMediumAdjustment / 2, src->rm()); ++ ldi(scratch, kMinOffsetForMediumAdjustment / 2, scratch); ++ src->offset_ -= kMinOffsetForMediumAdjustment; ++ } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && ++ src->offset() < 0) { ++ ldi(scratch, -kMinOffsetForMediumAdjustment / 2, src->rm()); ++ ldi(scratch, -kMinOffsetForMediumAdjustment / 2, scratch); ++ src->offset_ += kMinOffsetForMediumAdjustment; ++ } else { ++ // Now that all shorter options have been exhausted, load the full 32-bit ++ // offset. ++ int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); ++ int16_t lo_offset = static_cast(loaded_offset); ++ int16_t hi_offset = (loaded_offset-(int16_t)loaded_offset) >> 16; ++ if ( ((int32_t)hi_offset == -32768) && ((int32_t)lo_offset < 0) ) { ++ // range from 0x7FFF8000 to 0x7FFFFFFF ++ ldih(scratch, 0x4000, zero_reg); ++ ldih(scratch, 0x4000, scratch); ++ if (lo_offset != 0 ) ++ ldi(scratch, lo_offset, scratch); ++ } else { ++ ldih(scratch, hi_offset, zero_reg); ++ if (lo_offset != 0 ) ++ ldi(scratch, lo_offset, scratch); ++ } ++ addl(scratch, src->rm(),scratch); ++ src->offset_ -= loaded_offset; ++ } ++ } ++ src->rm_ = scratch; ++ ++ DCHECK(is_int16(src->offset())); ++ if (two_accesses) { ++ DCHECK(is_int16( ++ static_cast(src->offset() + second_access_add_to_offset))); ++ } ++ DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); ++} ++ ++ ++void Assembler::fmovd(FPURegister fs, FPURegister fd) { ++ fcpys(fs, fs, fd); ++} ++ ++ ++void Assembler::fmovs(FPURegister fs, FPURegister fd) { ++ fcpys(fs, fs, fd); ++} ++ ++ ++void Assembler::fnegs(FPURegister fs, FPURegister fd) { ++ fcpysn(fs, fs, fd); ++} ++ ++ ++void Assembler::fnegd(FPURegister fs, FPURegister fd) { ++ fcpysn(fs, fs, fd); ++} ++ ++// Conversions. ++void Assembler::fcvtsw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ DCHECK(fs != kScratchDoubleReg2 && fd != kScratchDoubleReg2); ++ fcvtsd(fs, kScratchDoubleReg2); ++ fcvtdl(kScratchDoubleReg2, kScratchDoubleReg1); ++ fcvtlw(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::fcvtdw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtdl(fs, kScratchDoubleReg1); ++ fcvtlw(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::ftruncsw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ DCHECK(fs != kScratchDoubleReg2 && fd != kScratchDoubleReg2); ++ fcvtsd(fs, kScratchDoubleReg1); ++ fcvtdl_z(kScratchDoubleReg1, kScratchDoubleReg2); ++ fcvtlw(kScratchDoubleReg2, fd); ++} ++ ++ ++void Assembler::ftruncdw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtdl_z(fs, kScratchDoubleReg1); ++ fcvtlw(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::froundsw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ DCHECK(fs != kScratchDoubleReg2 && fd != kScratchDoubleReg2); ++ fcvtsd(fs, kScratchDoubleReg1); ++ fcvtdl_g(kScratchDoubleReg1, kScratchDoubleReg2); ++ fcvtlw(kScratchDoubleReg2, fd); ++} ++ ++ ++void Assembler::frounddw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtdl_g(fs, kScratchDoubleReg1); ++ fcvtlw(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::ffloorsw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ DCHECK(fs != kScratchDoubleReg2 && fd != kScratchDoubleReg2); ++ fcvtsd(fs, kScratchDoubleReg2); ++ fcvtdl_n(kScratchDoubleReg2, kScratchDoubleReg1); ++ fcvtlw(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::ffloordw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtdl_n(fs, kScratchDoubleReg1); ++ fcvtlw(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::fceilsw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ DCHECK(fs != kScratchDoubleReg2 && fd != kScratchDoubleReg2); ++ fcvtsd(fs, kScratchDoubleReg2); ++ fcvtdl_p(kScratchDoubleReg2, kScratchDoubleReg1); ++ fcvtlw(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::fceildw(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtdl_p(fs, kScratchDoubleReg1); ++ fcvtlw(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::fcvtsl(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtsd(fs, kScratchDoubleReg1); ++ fcvtdl(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::ftruncsl(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtsd(fs, kScratchDoubleReg1); ++ fcvtdl_z(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::ftruncdl(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ if (fs == fd) { ++ fmovd(fs, kScratchDoubleReg1); ++ fcvtdl_z(kScratchDoubleReg1, fd); ++ }else{ ++ fcvtdl_z(fs, fd); ++ } ++} ++ ++ ++void Assembler::froundsl(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtsd(fs, kScratchDoubleReg1); ++ fcvtdl_g(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::frounddl(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ if (fs == fd) { ++ fmovd(fs, kScratchDoubleReg1); ++ fcvtdl_g(kScratchDoubleReg1, fd); ++ } else { ++ fcvtdl_g(fs, fd); ++ } ++} ++ ++ ++void Assembler::ffloorsl(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtsd(fs, kScratchDoubleReg1); ++ fcvtdl_n(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::ffloordl(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ if (fs == fd) { ++ fmovd(fs, kScratchDoubleReg1); ++ fcvtdl_n(kScratchDoubleReg1, fd); ++ } else { ++ fcvtdl_n(fs, fd); ++ } ++} ++ ++ ++void Assembler::fceilsl(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtsd(fs, kScratchDoubleReg1); ++ fcvtdl_p(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::fceildl(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ if (fs == fd) { ++ fmovd(fs, kScratchDoubleReg1); ++ fcvtdl_p(kScratchDoubleReg1, fd); ++ } else { ++ fcvtdl_p(fs, fd); ++ } ++} ++ ++ ++void Assembler::fcvtws(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtwl(fs, kScratchDoubleReg1); ++ fcvtls(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::fcvtls_(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ if (fs == fd){ ++ fmovd(fs, kScratchDoubleReg1); ++ fcvtls(kScratchDoubleReg1, fd); ++ }else{ ++ fcvtls(fs, fd); ++ } ++} ++ ++ ++void Assembler::fcvtds_(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ if (fs == fd){ ++ fmovd(fs, kScratchDoubleReg1); ++ fcvtds(kScratchDoubleReg1, fd); ++ }else{ ++ fcvtds(fs, fd); ++ } ++} ++ ++ ++void Assembler::fcvtwd(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ fcvtwl(fs, kScratchDoubleReg1); ++ fcvtld(kScratchDoubleReg1, fd); ++} ++ ++ ++void Assembler::fcvtld_(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ if (fs == fd){ ++ fmovd(fs, kScratchDoubleReg1); ++ fcvtld(kScratchDoubleReg1, fd); ++ }else{ ++ fcvtld(fs, fd); ++ } ++} ++ ++ ++void Assembler::fcvtsd_(FPURegister fs, FPURegister fd) { ++ DCHECK(fs != kScratchDoubleReg1 && fd != kScratchDoubleReg1); ++ if (fs == fd){ ++ fmovs(fs, kScratchDoubleReg1); ++ fcvtsd(kScratchDoubleReg1, fd); ++ }else{ ++ fcvtsd(fs, fd); ++ } ++} ++ ++ ++// Conditions for >= SW64r3. ++void Assembler::cmp(FPUCondition cond, SecondaryField fmt, ++ FPURegister fd, FPURegister fs, FPURegister ft) { ++#ifdef SW64 ++ //TODO: SecondaryField is useless! ++ DCHECK_EQ(fd, kDoubleCompareReg); ++ switch(cond) { ++ case EQ: ++ fcmpeq(fs, ft, fd); ++ break; ++ case OLT: ++ fcmplt(fs, ft, fd); ++ break; ++ case OLE: ++ fcmple(fs, ft, fd); ++ break; ++ case UN: ++ fcmpun(fs, ft, fd); ++ break; ++ default: ++ UNREACHABLE(); ++ }; ++#endif ++} ++ ++ ++#ifdef SW64 //20180904 define SW-character assembler ++ ++void Assembler::GenInstrB_SW(Opcode_ops_bra opcode, ++ Register Ra, ++ int32_t disp) { ++ DCHECK(Ra.is_valid() && is_int21(disp)); ++ Instr instr = opcode | (Ra.code() << sRaShift) | (disp & sImm21Mask); ++ emitSW(instr); ++} ++ ++ ++void Assembler::GenInstrFB_SW(Opcode_ops_bra opcode, ++ FloatRegister fa, ++ int32_t disp) { ++ DCHECK(fa.is_valid() && is_int21(disp)); ++ Instr instr = opcode | (fa.code() << sRaShift) | (disp & sImm21Mask); ++ emitSW(instr); ++} ++ ++ ++void Assembler::GenInstrM_SW(Opcode_ops_mem opcode, ++ Register Ra, ++ int16_t disp, ++ Register Rb) { ++ DCHECK(Ra.is_valid() && Rb.is_valid() && is_int16(disp)); ++ Instr instr = opcode | (Ra.code() << sRaShift) | (Rb.code() << sRbShift) | ++ (disp & sImm16Mask); ++ emitSW(instr); ++} ++ ++ ++void Assembler::GenInstrFM_SW(Opcode_ops_mem opcode, ++ FloatRegister fa, ++ int16_t disp, ++ Register Rb) { ++ DCHECK(fa.is_valid() && Rb.is_valid() && is_int16(disp)); ++ Instr instr = opcode | (fa.code() << sRaShift) | (Rb.code() << sRbShift) | ++ (disp & sImm16Mask); ++ emitSW(instr); ++} ++ ++ ++void Assembler::GenInstrMWithFun_SW(Opcode_ops_atmem opcode, ++ Register Ra, ++ int16_t disp, ++ Register Rb) { ++ DCHECK(Ra.is_valid() && Rb.is_valid() && is_int12(disp)); ++ Instr instr = opcode | (Ra.code() << sRaShift) | (Rb.code() << sRbShift) | ++ (disp & sImm12Mask); ++ emitSW(instr); ++} ++ ++ ++void Assembler::GenInstrR_SW(Opcode_ops_opr opcode, ++ Register Ra, ++ Register Rb, ++ Register Rc) { ++ DCHECK(Ra.is_valid() && Rb.is_valid() && Rc.is_valid()); ++ Instr instr = opcode | (Ra.code() << sRaShift) | (Rb.code() << sRbShift) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::GenInstrI_SW(Opcode_ops_oprl opcode, ++ Register Ra, ++ int16_t imm, ++ Register Rc) { ++ DCHECK(Ra.is_valid() && is_uint8(imm) && Rc.is_valid()); ++ Instr instr = opcode | (Ra.code() << sRaShift) | ( (imm << sImm8Shift) & sImm8Mask) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++// Float-point ALU instructions. ++void Assembler::GenInstrFR_SW(Opcode_ops_fp opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ FloatRegister fc) { ++ DCHECK(fa.is_valid() && fb.is_valid() && fc.is_valid()); ++ Instr instr = opcode | (fa.code() << sRaShift) | (fb.code() << sRbShift) | ++ (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++void Assembler::GenInstrFR_SW(Opcode_ops_fp opcode, ++ FloatRegister fb, ++ FloatRegister fc) { ++ DCHECK(fb.is_valid() && fc.is_valid()); ++ Instr instr = opcode | (fb.code() << sRbShift) | ++ (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++//20180914 ++void Assembler::GenInstrFR_SW(Opcode_ops_fpl opcode, ++ FloatRegister fa, ++ int16_t imm, ++ FloatRegister fc) { ++ DCHECK(fa.is_valid() && is_uint8(imm) && fc.is_valid()); ++ Instr instr = opcode | (fa.code() << sRaShift) | ((imm << sImm8Shift) & sImm8Mask) | ++ (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++void Assembler::GenInstrFR_SW(Opcode_ops_fpl opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ int16_t fmalit, ++ FloatRegister fc) { ++ DCHECK(fa.is_valid() && fb.is_valid() && is_uint5(fmalit) && fc.is_valid()); ++ Instr instr = opcode | (fa.code() << sRaShift) | (fb.code() << sRbShift) | ++ ((fmalit << sImm5Shift) & sImm5Mask) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++void Assembler::GenInstrFMA_SW(Opcode_ops_fmal opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ int16_t fmalit, ++ FloatRegister fc) { ++ DCHECK(fa.is_valid() && fb.is_valid() && is_uint5(fmalit) && fc.is_valid()); ++ Instr instr = opcode | (fa.code() << sRaShift) | (fb.code() << sRbShift) | ++ ((fmalit << sImm5Shift) & sImm5Mask) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++void Assembler::GenInstrFMA_SW(Opcode_ops_fmal opcode, ++ FloatRegister fa, ++ int16_t fmalit, ++ FloatRegister fc) { ++ DCHECK(fa.is_valid() && is_uint5(fmalit) && fc.is_valid()); ++ Instr instr = opcode | (fa.code() << sRaShift) | ++ ((fmalit << sImm5Shift) & sImm5Mask) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++void Assembler::GenInstrFMA_SW(Opcode_ops_fma opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ FloatRegister fc) { ++ DCHECK(fa.is_valid() && fb.is_valid() && fc.is_valid()); ++ Instr instr = opcode | (fa.code() << sRaShift) | (fb.code() << sRbShift) | ++ (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++void Assembler::GenInstrSIMD_SW(Opcode_ops_atmem opcode, ++ FloatRegister fa, ++ int16_t atmdisp, ++ Register Rb) { ++ DCHECK(fa.is_valid() && is_uint11(atmdisp) && Rb.is_valid()); ++ Instr instr = opcode | (fa.code() << sRaShift) | ++ ((atmdisp << sImm11Shift) & sImm11Mask) | (Rb.code() << sRbShift); ++ emitSW(instr); ++} ++ ++// FMA + FSEL** instructions. ++void Assembler::GenInstrFMA_SW(Opcode_ops_fma opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ FloatRegister f3, ++ FloatRegister fc) { ++ DCHECK(fa.is_valid() && fb.is_valid() && f3.is_valid() && fc.is_valid()); ++ Instr instr = opcode | (fa.code() << sRaShift) | (fb.code() << sRbShift) | ++ (f3.code() << sR3Shift) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++// SEL** instructions. ++void Assembler::GenInstrSelR_SW(Opcode_ops_sel opcode, ++ Register Ra, ++ Register Rb, ++ Register R3, ++ Register Rc) { ++ DCHECK(Ra.is_valid() && Rb.is_valid() && R3.is_valid() && Rc.is_valid()); ++ Instr instr = opcode | (Ra.code() << sRaShift) | (Rb.code() << sRbShift) | ++ (R3.code() << sR3Shift) | (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++// SEL**_l instructions. ++void Assembler::GenInstrSelI_SW(Opcode_ops_sel_l opcode, ++ Register Ra, ++ int32_t imm, ++ Register R3, ++ Register Rc) { ++ DCHECK(Ra.is_valid() && is_int8(imm) && R3.is_valid() && Rc.is_valid()); ++ Instr instr = opcode | (Ra.code() << sRaShift) | ((imm << sImm8Shift) & sImm8Mask) | ++ (R3.code() << sR3Shift) | (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++// All SW64 instructions ++ ++void Assembler::sys_call_b(int palfn) { ++ DCHECK(is_int26(palfn)); ++ Instr instr = op_sys_call | palfn; ++ emitSW(instr); ++} ++ ++ ++void Assembler::sys_call(int palfn) { ++ DCHECK(is_int26(palfn)); ++ Instr instr = op_sys_call | ( palfn & (( 1 << 26 ) - 1)); ++ emitSW(instr); ++} ++ ++ ++void Assembler::call(Register Ra, Register Rb, int jmphint) { ++ // call ra, (rb), jmphint; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrM_SW(op_call, Ra, jmphint, Rb); ++} ++ ++ ++void Assembler::ret(Register Ra, Register Rb, int rethint) { ++ // ret ra, (rb), rethint; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrM_SW(op_ret, Ra, rethint, Rb); ++} ++ ++ ++void Assembler::jmp(Register Ra, Register Rb, int jmphint) { ++ // jmp ra, (rb), jmphint; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrM_SW(op_jmp, Ra, jmphint, Rb); ++} ++ ++ ++void Assembler::br(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_br, Ra, bdisp); ++} ++ ++ ++void Assembler::bsr(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_bsr, Ra, bdisp); ++} ++ ++ ++void Assembler::memb(void) { ++ Instr instr = op_memb; ++ emitSW(instr); ++} ++ ++ ++void Assembler::imemb(void) { ++ Instr instr = op_imemb; ++ emitSW(instr); ++} ++ ++void Assembler::wmemb(void) { ++ DCHECK(kArchVariant == kSw64r3); ++ Instr instr = op_wmemb; ++ emitSW(instr); ++} ++ ++ ++void Assembler::rtc(Register Ra, Register Rb) { ++ DCHECK(Ra.is_valid() && Rb.is_valid()); ++ Instr instr = op_rtc | Ra.code() << sRaShift | Rb.code() << sRbShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::rcid(Register Ra) { ++ DCHECK(Ra.is_valid()); ++ Instr instr = op_rcid | Ra.code() << sRaShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::halt(void) { ++ Instr instr = op_halt; ++ emitSW(instr); ++} ++ ++ ++void Assembler::rd_f(Register Ra) { ++ DCHECK(Ra.is_valid()); ++ Instr instr = op_rd_f | Ra.code() << sRaShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::wr_f(Register Ra) { ++ DCHECK(Ra.is_valid()); ++ Instr instr = op_wr_f | Ra.code() << sRaShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::rtid(Register Ra) { ++ DCHECK(Ra.is_valid()); ++ Instr instr = op_rtid | Ra.code() << sRaShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::csrrs(Register Ra, int rpiindex) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(Ra.is_valid() && is_uint8(rpiindex)); ++ Instr instr = op_csrrs | (Ra.code() << sRaShift) | (rpiindex & sRpiMask); ++ emitSW(instr); ++} ++ ++ ++void Assembler::csrrc(Register Ra, int rpiindex) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(Ra.is_valid() && is_uint8(rpiindex)); ++ Instr instr = op_csrrc | (Ra.code() << sRaShift) | (rpiindex & sRpiMask); ++ emitSW(instr); ++} ++ ++ ++void Assembler::csrr(Register Ra, int rpiindex) { ++ DCHECK(Ra.is_valid() && is_uint8(rpiindex)); ++ Instr instr = op_csrr | (Ra.code() << sRaShift) | (rpiindex & sRpiMask); ++ emitSW(instr); ++} ++ ++ ++void Assembler::csrw(Register Ra, int rpiindex) { ++ DCHECK(Ra.is_valid() && is_uint8(rpiindex)); ++ Instr instr = op_csrw | (Ra.code() << sRaShift) | (rpiindex & sRpiMask); ++ emitSW(instr); ++} ++ ++ ++void Assembler::pri_ret(Register Ra) { ++ DCHECK(Ra.is_valid()); ++ Instr instr = op_pri_ret | Ra.code() << sRaShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::lldw(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_lldw, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::lldl(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_lldl, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldw_inc(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_ldw_inc, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldl_inc(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_ldl_inc, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldw_dec(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_ldw_dec, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldl_dec(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_ldl_dec, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldw_set(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_ldw_set, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldl_set(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_ldl_set, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::lstw(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_lstw, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::lstl(Register Ra, int atmdisp, Register Rb) { ++ GenInstrMWithFun_SW(op_lstl, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldw_nc(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(Ra.is_valid() && is_uint11(atmdisp) && Rb.is_valid()); ++ Instr instr = op_ldw_nc | (Ra.code() << sRaShift) ++ | ((atmdisp << sImm11Shift) & sImm11Mask) | (Rb.code() << sRbShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::ldl_nc(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(Ra.is_valid() && is_uint11(atmdisp) && Rb.is_valid()); ++ Instr instr = op_ldl_nc | (Ra.code() << sRaShift) ++ | ((atmdisp << sImm11Shift) & sImm11Mask) | (Rb.code() << sRbShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::ldd_nc(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(Ra.is_valid() && is_uint11(atmdisp) && Rb.is_valid()); ++ Instr instr = op_ldd_nc | (Ra.code() << sRaShift) ++ | ((atmdisp << sImm11Shift) & sImm11Mask) | (Rb.code() << sRbShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::stw_nc(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(Ra.is_valid() && is_uint11(atmdisp) && Rb.is_valid()); ++ Instr instr = op_stw_nc | (Ra.code() << sRaShift) | ++ ((atmdisp << sImm11Shift) & sImm11Mask) | (Rb.code() << sRbShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::stl_nc(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(Ra.is_valid() && is_uint11(atmdisp) && Rb.is_valid()); ++ Instr instr = op_stl_nc | (Ra.code() << sRaShift) | ++ ((atmdisp << sImm11Shift) & sImm11Mask) | (Rb.code() << sRbShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::std_nc(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(Ra.is_valid() && is_uint11(atmdisp) && Rb.is_valid()); ++ Instr instr = op_std_nc | (Ra.code() << sRaShift) | ++ ((atmdisp << sImm11Shift) & sImm11Mask) | (Rb.code() << sRbShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::ldwe(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_ldwe, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::ldse(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_ldse, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::ldde(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_ldde, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::vlds(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_vlds, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::vldd(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_vldd, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::vsts(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_vsts, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::vstd(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_vstd, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::addw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_addw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::addw(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_addw_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::subw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_subw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::subw(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_subw_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::s4addw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_s4addw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::s4addw(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_s4addw_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::s4subw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_s4subw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::s4subw(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_s4subw_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::s8addw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_s8addw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::s8addw(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_s8addw_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::s8subw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_s8subw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::s8subw(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_s8subw_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::addl(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_addl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::addl(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_addl_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::subl(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_subl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::subl(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_subl_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::s4addl(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_s4addl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::s4addl(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_s4addl_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::s4subl(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_s4subl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::s4subl(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_s4subl_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::s8addl(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_s8addl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::s8addl(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_s8addl_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::s8subl(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_s8subl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::s8subl(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_s8subl_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::mulw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_mulw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::mulw(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_mulw_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::divw(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_divw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::udivw(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_udivw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::remw(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_remw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::uremw(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_uremw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::mull(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_mull, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::mull(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_mull_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::umulh(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_umulh, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::umulh(Register Ra, int imm, Register Rc) { ++ GenInstrI_SW(op_umulh_l, Ra, imm, Rc); ++} ++ ++ ++void Assembler::divl(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_divl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::udivl(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_udivl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::reml(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_reml, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::ureml(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_ureml, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::addpi(int apint, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(is_uint13(apint) && Rc.is_valid()); ++ Instr instr = op_addpi | ((apint << sImm13Shift) & sImm13Mask) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::addpis(int apint, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(is_uint13(apint) && Rc.is_valid()); ++ Instr instr = op_addpis | ( (apint << sImm13Shift) & sImm13Mask) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::cmpeq(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_cmpeq, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::cmpeq(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_cmpeq_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::cmplt(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_cmplt, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::cmplt(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_cmplt_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::cmple(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_cmple, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::cmple(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_cmple_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::cmpult(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_cmpult, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::cmpult(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_cmpult_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::cmpule(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_cmpule, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::cmpule(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_cmpule_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::sbt(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_sbt, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::sbt(Register Ra, int lit, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrI_SW(op_sbt_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::cbt(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_cbt, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::cbt(Register Ra, int lit, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrI_SW(op_cbt_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::and_ins(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_and, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::and_ins(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_and_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::bic(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_bic, Ra, Rb, Rc); ++} ++ ++void Assembler::bic(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_bic_l, Ra, lit, Rc); ++} ++ ++void Assembler::andnot(Register Ra, Register Rb, Register Rc) { ++ bic(Ra, Rb, Rc ); ++} ++ ++void Assembler::andnot(Register Ra, int lit, Register Rc) { ++ bic(Ra, lit, Rc ); ++} ++ ++void Assembler::bis(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_bis, Ra, Rb, Rc); ++} ++ ++void Assembler::bis(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_bis_l, Ra, lit, Rc); ++} ++ ++void Assembler::or_ins(Register Ra, Register Rb, Register Rc) { ++ //GenInstrR_SW(op_bis, Ra, Rb, Rc); ++ bis(Ra, Rb, Rc); ++} ++ ++void Assembler::or_ins(Register Ra, int lit, Register Rc) { ++ //GenInstrI_SW(op_bis_l, Ra, lit, Rc); ++ bis(Ra, lit, Rc); ++} ++ ++void Assembler::ornot(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_ornot, Ra, Rb, Rc); ++} ++ ++void Assembler::ornot(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_ornot_l, Ra, lit, Rc); ++} ++ ++void Assembler::xor_ins(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_xor, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::xor_ins(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_xor_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::eqv(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_eqv, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::eqv(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_eqv_l, Ra, lit, Rc); ++} ++ ++ ++// 0x10.40-0x10.47 INS[0-7]B ++void Assembler::inslb(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_inslb, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::inslb(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_inslb_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::inslh(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_inslh, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::inslh(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_inslh_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::inslw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_inslw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::inslw(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_inslw_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::insll(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_insll, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::insll(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_insll_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::inshb(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_inshb, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::inshb(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_inshb_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::inshh(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_inshh, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::inshh(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_inshh_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::inshw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_inshw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::inshw(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_inshw_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::inshl(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_inshl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::inshl(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_inshl_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::slll(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_slll, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::slll(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_slll_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::srll(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_srll, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::srll(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_srll_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::sral(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_sral, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::sral(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_sral_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::roll(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_roll, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::roll(Register Ra, int lit, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrI_SW(op_roll_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::sllw(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_sllw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::sllw(Register Ra, int lit, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrI_SW(op_sllw_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::srlw(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_srlw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::srlw(Register Ra, int lit, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrI_SW(op_srlw_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::sraw(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_sraw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::sraw(Register Ra, int lit, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrI_SW(op_sraw_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::rolw(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_rolw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::rolw(Register Ra, int lit, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrI_SW(op_rolw_l, Ra, lit, Rc); ++} ++ ++ ++// 0x10.50-0x10.57 EXT[0-7]B ++void Assembler::extlb(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_extlb, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::extlb(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_extlb_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::extlh(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_extlh, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::extlh(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_extlh_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::extlw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_extlw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::extlw(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_extlw_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::extll(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_extll, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::extll(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_extll_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::exthb(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_exthb, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::exthb(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_exthb_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::exthh(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_exthh, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::exthh(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_exthh_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::exthw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_exthw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::exthw(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_exthw_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::exthl(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_exthl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::exthl(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_exthl_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::ctpop(Register Rb, Register Rc) { ++ DCHECK(Rb.is_valid() && Rc.is_valid()); ++ Instr instr = op_ctpop | Rb.code() << sRbShift | Rc.code() << sRcShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::ctlz(Register Rb, Register Rc) { ++ DCHECK(Rb.is_valid() && Rc.is_valid()); ++ Instr instr = op_ctlz | Rb.code() << sRbShift | Rc.code() << sRcShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::cttz(Register Rb, Register Rc) { ++ DCHECK(Rb.is_valid() && Rc.is_valid()); ++ Instr instr = op_cttz | Rb.code() << sRbShift | Rc.code() << sRcShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::revbh(Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(Rb.is_valid() && Rc.is_valid()); ++ Instr instr = op_revbh | Rb.code() << sRbShift | Rc.code() << sRcShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::revbw(Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(Rb.is_valid() && Rc.is_valid()); ++ Instr instr = op_revbw | Rb.code() << sRbShift | Rc.code() << sRcShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::revbl(Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(Rb.is_valid() && Rc.is_valid()); ++ Instr instr = op_revbl | Rb.code() << sRbShift | Rc.code() << sRcShift; ++ emitSW(instr); ++} ++ ++ ++void Assembler::casw(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_casw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::casl(Register Ra, Register Rb, Register Rc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrR_SW(op_casl, Ra, Rb, Rc); ++} ++ ++ ++// 0x10.60-0x10.67 MASK[0-7]B ++void Assembler::masklb(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_masklb, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::masklb(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_masklb_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::masklh(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_masklh, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::masklh(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_masklh_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::masklw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_masklw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::masklw(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_masklw_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::maskll(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_maskll, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::maskll(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_maskll_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::maskhb(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_maskhb, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::maskhb(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_maskhb_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::maskhh(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_maskhh, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::maskhh(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_maskhh_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::maskhw(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_maskhw, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::maskhw(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_maskhw_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::maskhl(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_maskhl, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::maskhl(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_maskhl_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::zap(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_zap, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::zap(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_zap_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::zapnot(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_zapnot, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::zapnot(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_zapnot_l, Ra, lit, Rc); ++} ++ ++ ++void Assembler::sextb(Register Rb, Register Rc) { ++ DCHECK(Rb.is_valid() && Rc.is_valid()); ++ Instr instr = op_sextb | (Rb.code() << sRbShift) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::sextb(int lit, Register Rc) { ++ DCHECK(is_uint8(lit) && Rc.is_valid()); ++ Instr instr = op_sextb_l | (lit & sImm8Mask) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::sexth(Register Rb, Register Rc) { ++ DCHECK(Rb.is_valid() && Rc.is_valid()); ++ Instr instr = op_sexth | (Rb.code() << sRbShift) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::sexth(int lit, Register Rc) { ++ DCHECK(is_uint8(lit) && Rc.is_valid()); ++ Instr instr = op_sexth_l | (lit & sImm8Mask) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++//0x10.6c CMPGEB ++void Assembler::cmpgeb(Register Ra, Register Rb, Register Rc) { ++ GenInstrR_SW(op_cmpgeb, Ra, Rb, Rc); ++} ++ ++ ++void Assembler::cmpgeb(Register Ra, int lit, Register Rc) { ++ GenInstrI_SW(op_cmpgeb_l, Ra, lit, Rc); ++} ++ ++ ++//void Assembler::ftois(FloatRegister fa, Register Rc ) ++//void Assembler::ftoid(FloatRegister fa, Register Rc ) ++void Assembler::fimovs(FloatRegister fa, Register Rc) { ++ DCHECK(fa.is_valid() && Rc.is_valid()); ++ Instr instr = op_fimovs | (fa.code() << sRaShift) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::fimovd(FloatRegister fa, Register Rc) { ++ DCHECK(fa.is_valid() && Rc.is_valid() ); ++ Instr instr = op_fimovd | (fa.code() << sRaShift) | ++ (Rc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::seleq(Register Ra, Register Rb, Register R3, Register Rc) { ++ GenInstrSelR_SW(op_seleq, Ra, Rb, R3, Rc); ++} ++ ++ ++void Assembler::seleq(Register Ra, int lit, Register R3, Register Rc) { ++ GenInstrSelI_SW(op_seleq_l, Ra, lit, R3, Rc); ++} ++ ++ ++void Assembler::selge(Register Ra, Register Rb, Register R3, Register Rc) { ++ GenInstrSelR_SW(op_selge, Ra, Rb, R3, Rc); ++} ++ ++ ++void Assembler::selge(Register Ra, int lit, Register R3, Register Rc) { ++ GenInstrSelI_SW(op_selge_l, Ra, lit, R3, Rc); ++} ++ ++ ++void Assembler::selgt(Register Ra, Register Rb, Register R3, Register Rc) { ++ GenInstrSelR_SW(op_selgt, Ra, Rb, R3, Rc); ++} ++ ++ ++void Assembler::selgt(Register Ra, int lit, Register R3, Register Rc) { ++ GenInstrSelI_SW(op_selgt_l, Ra, lit, R3, Rc); ++} ++ ++ ++void Assembler::selle(Register Ra, Register Rb, Register R3, Register Rc) { ++ GenInstrSelR_SW(op_selle, Ra, Rb, R3, Rc); ++} ++ ++ ++void Assembler::selle(Register Ra, int lit, Register R3, Register Rc) { ++ GenInstrSelI_SW(op_selle_l, Ra, lit, R3, Rc); ++} ++ ++ ++void Assembler::sellt(Register Ra, Register Rb, Register R3, Register Rc) { ++ GenInstrSelR_SW(op_sellt, Ra, Rb, R3, Rc); ++} ++ ++ ++void Assembler::sellt(Register Ra, int lit, Register R3, Register Rc) { ++ GenInstrSelI_SW(op_sellt_l, Ra, lit, R3, Rc); ++} ++ ++ ++void Assembler::selne(Register Ra, Register Rb, Register R3, Register Rc) { ++ GenInstrSelR_SW(op_selne, Ra, Rb, R3, Rc); ++} ++ ++ ++void Assembler::selne(Register Ra, int lit, Register R3, Register Rc) { ++ GenInstrSelI_SW(op_selne_l, Ra, lit, R3, Rc); ++} ++ ++ ++void Assembler::sellbc(Register Ra, Register Rb, Register R3, Register Rc) { ++ GenInstrSelR_SW(op_sellbc, Ra, Rb, R3, Rc); ++} ++ ++ ++void Assembler::sellbc(Register Ra, int lit, Register R3, Register Rc) { ++ GenInstrSelI_SW(op_sellbc_l, Ra, lit, R3, Rc); ++} ++ ++ ++void Assembler::sellbs(Register Ra, Register Rb, Register R3, Register Rc) { ++ GenInstrSelR_SW(op_sellbs, Ra, Rb, R3, Rc); ++} ++ ++ ++void Assembler::sellbs(Register Ra, int lit, Register R3, Register Rc) { ++ GenInstrSelI_SW(op_sellbs_l, Ra, lit, R3, Rc); ++} ++ ++ ++void Assembler::vlog(int vlog, FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ UNREACHABLE(); ++} ++ ++void Assembler::f_exclude_same_src_fc(Opcode_ops_fp opcode, FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ if (fa == fc || fb == fc) { ++ DCHECK(fa != kScratchDoubleReg && fb != kScratchDoubleReg); ++ GenInstrFR_SW(opcode, fa, fb, kScratchDoubleReg); ++ fmov(kScratchDoubleReg, fc); ++ } else { ++ GenInstrFR_SW(opcode, fa, fb, fc); ++ } ++} ++ ++void Assembler::f_exclude_same_src_fc(Opcode_ops_fp opcode, FloatRegister fb, FloatRegister fc) { ++ if (fb == fc) { ++ DCHECK(fb != kScratchDoubleReg); ++ GenInstrFR_SW(opcode, fb, kScratchDoubleReg); ++ fmov(kScratchDoubleReg, fc); ++ } else { ++ GenInstrFR_SW(opcode, fb, fc); ++ } ++} ++ ++void Assembler::vbisw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_vbisw, fa, fb, fc); ++} ++ ++void Assembler::vxorw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_vxorw, fa, fb, fc); ++} ++ ++void Assembler::vandw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_vandw, fa, fb, fc); ++} ++ ++void Assembler::veqvw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_veqvw, fa, fb, fc); ++} ++ ++void Assembler::vornotw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_vornotw, fa, fb, fc); ++} ++ ++void Assembler::vbicw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_vbicw, fa, fb, fc); ++} ++ ++void Assembler::fadds(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fadds, fa, fb, fc); ++} ++ ++void Assembler::faddd(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_faddd, fa, fb, fc); ++} ++ ++void Assembler::fsubs(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fsubs, fa, fb, fc); ++} ++ ++void Assembler::fsubd(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fsubd, fa, fb, fc); ++} ++ ++void Assembler::fmuls(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fmuls, fa, fb, fc); ++} ++ ++void Assembler::fmuld(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fmuld, fa, fb, fc); ++} ++ ++void Assembler::fdivs(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fdivs, fa, fb, fc); ++} ++ ++void Assembler::fdivd(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fdivd, fa, fb, fc); ++} ++ ++void Assembler::fsqrts(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fsqrts, fb, fc); ++} ++ ++void Assembler::fsqrtd(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fsqrtd, fb, fc); ++} ++ ++ ++void Assembler::fcmpeq(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcmpeq, fa, fb, fc); ++} ++ ++ ++void Assembler::fcmple(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcmple, fa, fb, fc); ++} ++ ++ ++void Assembler::fcmplt(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcmplt, fa, fb, fc); ++} ++ ++ ++void Assembler::fcmpun(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcmpun, fa, fb, fc); ++} ++ ++ ++void Assembler::fcvtsd(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtsd, fb, fc); ++} ++ ++ ++void Assembler::fcvtds(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtds, fb, fc); ++} ++ ++ ++void Assembler::fcvtdl_g(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtdl_g, fb, fc); ++} ++ ++ ++void Assembler::fcvtdl_p(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtdl_p, fb, fc); ++} ++ ++ ++void Assembler::fcvtdl_z(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtdl_z, fb, fc); ++} ++ ++ ++void Assembler::fcvtdl_n(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtdl_n, fb, fc); ++} ++ ++ ++void Assembler::fcvtdl(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtdl, fb, fc); ++} ++ ++ ++void Assembler::fcvtwl(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtwl, fb, fc); ++} ++ ++ ++void Assembler::fcvtlw(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtlw, fb, fc); ++} ++ ++ ++void Assembler::fcvtls(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtls, fb, fc); ++} ++ ++ ++void Assembler::fcvtld(FloatRegister fb, FloatRegister fc) { ++ f_exclude_same_src_fc(op_fcvtld, fb, fc); ++} ++ ++ ++void Assembler::fcpys(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_fcpys, fa, fb, fc); ++} ++ ++ ++void Assembler::fcpyse(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_fcpyse, fa, fb, fc); ++} ++ ++ ++void Assembler::fcpysn(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_fcpysn, fa, fb, fc); ++} ++ ++ ++void Assembler::ifmovs(Register Ra, FloatRegister fc) { ++ DCHECK(Ra.is_valid() && fc.is_valid()); ++ Instr instr = op_ifmovs | (Ra.code() << sRaShift) | ++ (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::ifmovd(Register Ra, FloatRegister fc) { ++ DCHECK(Ra.is_valid() && fc.is_valid()); ++ Instr instr = op_ifmovd | (Ra.code() << sRaShift) | ++ (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::rfpcr(FloatRegister fa) { ++ DCHECK(fa.is_valid()); ++ Instr instr = op_rfpcr | (fa.code() << sRaShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::wfpcr(FloatRegister fa) { ++ DCHECK(fa.is_valid()); ++ Instr instr = op_wfpcr | (fa.code() << sRaShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::setfpec0() { ++// Instr instr = op_setfpec0; ++// emitSW(instr); ++} ++ ++ ++void Assembler::setfpec1() { ++// Instr instr = op_setfpec1; ++// emitSW(instr); ++} ++ ++ ++void Assembler::setfpec2() { ++// Instr instr = op_setfpec2; ++// emitSW(instr); ++} ++ ++ ++void Assembler::setfpec3() { ++// Instr instr = op_setfpec3; ++// emitSW(instr); ++} ++ ++ ++void Assembler::frecs(FloatRegister fa, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(fa.is_valid() && fc.is_valid()); ++ Instr instr = op_frecs | (fa.code() << sRaShift) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::frecd(FloatRegister fa, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(fa.is_valid() && fc.is_valid()); ++ Instr instr = op_frecd | (fa.code() << sRaShift) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::fris(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_fris, fb, fc); ++} ++ ++ ++void Assembler::fris_g(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_fris_g, fb, fc); ++} ++ ++ ++void Assembler::fris_p(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_fris_p, fb, fc); ++} ++ ++ ++void Assembler::fris_z(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_fris_z, fb, fc); ++} ++ ++ ++void Assembler::fris_n(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_fris_n, fb, fc); ++} ++ ++ ++void Assembler::frid(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_frid, fb, fc); ++} ++ ++ ++void Assembler::frid_g(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_frid_g, fb, fc); ++} ++ ++ ++void Assembler::frid_p(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_frid_p, fb, fc); ++} ++ ++ ++void Assembler::frid_z(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_frid_z, fb, fc); ++} ++ ++ ++void Assembler::frid_n(FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ f_exclude_same_src_fc(op_frid_n, fb, fc); ++} ++ ++ ++void Assembler::fmas(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fmas, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fmad(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fmad, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fmss(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fmss, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fmsd(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fmsd, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fnmas(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fnmas, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fnmad(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fnmad, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fnmss(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fnmss, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fnmsd(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fnmsd, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fseleq(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fseleq, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fselne(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fselne, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fsellt(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fsellt, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fselle(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fselle, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fselgt(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fselgt, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::fselge(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_fselge, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vaddw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vaddw, fa, fb, fc); ++} ++ ++ ++void Assembler::vaddw(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vaddw_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsubw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vsubw, fa, fb, fc); ++} ++ ++ ++void Assembler::vsubw(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vsubw_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vcmpgew(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpgew, fa, fb, fc); ++} ++ ++ ++void Assembler::vcmpgew(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpgew_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vcmpeqw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpeqw, fa, fb, fc); ++} ++ ++ ++void Assembler::vcmpeqw(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpeqw_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vcmplew(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmplew, fa, fb, fc); ++} ++ ++ ++void Assembler::vcmplew(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmplew_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vcmpltw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpltw, fa, fb, fc); ++} ++ ++ ++void Assembler::vcmpltw(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpltw_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vcmpulew(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpulew, fa, fb, fc); ++} ++ ++ ++void Assembler::vcmpulew(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpulew_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vcmpultw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpultw, fa, fb, fc); ++} ++ ++ ++void Assembler::vcmpultw(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vcmpultw_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsllw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vsllw, fa, fb, fc); ++} ++ ++ ++void Assembler::vsllw(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vsllw_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsrlw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vsrlw, fa, fb, fc); ++} ++ ++ ++void Assembler::vsrlw(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vsrlw_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsraw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vsraw, fa, fb, fc); ++} ++ ++ ++void Assembler::vsraw(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vsraw_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vrolw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vrolw, fa, fb, fc); ++} ++ ++ ++void Assembler::vrolw(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vrolw_l, fa, lit, fc); ++} ++ ++ ++void Assembler::sllow(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_sllow, fa, fb, fc); ++} ++ ++ ++void Assembler::sllow(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_sllow_l, fa, lit, fc); ++} ++ ++ ++void Assembler::srlow(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_srlow, fa, fb, fc); ++} ++ ++ ++void Assembler::srlow(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_srlow_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vaddl(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vaddl, fa, fb, fc); ++} ++ ++ ++void Assembler::vaddl(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vaddl_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsubl(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ GenInstrFR_SW(op_vsubl, fa, fb, fc); ++} ++ ++ ++void Assembler::vsubl(FloatRegister fa, int lit, FloatRegister fc) { ++ GenInstrFR_SW(op_vsubl_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsllb(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsllb, fa, fb, fc); ++} ++ ++ ++void Assembler::vsllb(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsllb_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsrlb(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrlb, fa, fb, fc); ++} ++ ++ ++void Assembler::vsrlb(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrlb_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsrab(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrab, fa, fb, fc); ++} ++ ++ ++void Assembler::vsrab(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrab_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vrolb(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vrolb, fa, fb, fc); ++} ++ ++ ++void Assembler::vrolb(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vrolb_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsllh(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsllh, fa, fb, fc); ++} ++ ++ ++void Assembler::vsllh(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsllh_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsrlh(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrlh, fa, fb, fc); ++} ++ ++ ++void Assembler::vsrlh(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrlh_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsrah(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrah, fa, fb, fc); ++} ++ ++ ++void Assembler::vsrah(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrah_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vrolh(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vrolh, fa, fb, fc); ++} ++ ++ ++void Assembler::vrolh(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vrolh_l, fa, lit, fc); ++} ++ ++ ++void Assembler::ctpopow(FloatRegister fa, FloatRegister fc) { ++ DCHECK(fa.is_valid() && fc.is_valid()); ++ Instr instr = op_ctpopow | (fa.code() << sRaShift) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::ctlzow (FloatRegister fa, FloatRegister fc) { ++ DCHECK(fa.is_valid() && fc.is_valid()); ++ Instr instr = op_ctlzow | (fa.code() << sRaShift) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::vslll(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vslll, fa, fb, fc); ++} ++ ++ ++void Assembler::vslll(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vslll_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsrll(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrll, fa, fb, fc); ++} ++ ++ ++void Assembler::vsrll(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsrll_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vsral(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsral, fa, fb, fc); ++} ++ ++ ++void Assembler::vsral(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vsral_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vroll(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vroll, fa, fb, fc); ++} ++ ++ ++void Assembler::vroll(FloatRegister fa, int lit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vroll_l, fa, lit, fc); ++} ++ ++ ++void Assembler::vmaxb(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vmaxb, fa, fb, fc); ++} ++ ++ ++void Assembler::vminb(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vminb, fa, fb, fc); ++} ++ ++ ++void Assembler::vmas (FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vmas, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vmad (FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vmad, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vmss (FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vmss, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vmsd (FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vmsd, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vnmas(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vnmas, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vnmad(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vnmad, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vnmss(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vnmss, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vnmsd(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vnmsd, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vfseleq(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vfseleq, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vfsellt(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vfsellt, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vfselle(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vfselle, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vseleqw(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vseleqw, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vseleqw(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ GenInstrFMA_SW(op_vseleqw_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vsellbcw(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vsellbcw, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vsellbcw(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ GenInstrFMA_SW(op_vsellbcw_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vselltw(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vselltw, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vselltw(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ GenInstrFMA_SW(op_vselltw_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vsellew(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vsellew, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vsellew(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ GenInstrFMA_SW(op_vsellew_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vinsw(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ GenInstrFMA_SW(op_vinsw_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vinsf(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ GenInstrFMA_SW(op_vinsf_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vextw(FloatRegister fa, int fmalit, FloatRegister fc) { ++ GenInstrFMA_SW(op_vextw_l, fa, fmalit, fc); ++} ++ ++ ++void Assembler::vextf(FloatRegister fa, int fmalit, FloatRegister fc) { ++ GenInstrFMA_SW(op_vextf_l, fa, fmalit, fc); ++} ++ ++ ++void Assembler::vcpyw(FloatRegister fa, FloatRegister fc) { ++ DCHECK(fa.is_valid() && fc.is_valid()); ++ Instr instr = op_vcpyw | (fa.code() << sRaShift) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::vcpyf(FloatRegister fa, FloatRegister fc) { ++ DCHECK(fa.is_valid() && fc.is_valid()); ++ Instr instr = op_vcpyf | (fa.code() << sRaShift) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::vconw(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vconw, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vshfw(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vshfw, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vcons(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vcons, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vcond(FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc) { ++ GenInstrFMA_SW(op_vcond, fa, fb, f3, fc); ++} ++ ++ ++void Assembler::vinsb(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFMA_SW(op_vinsb_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vinsh(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFMA_SW(op_vinsh_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vinsectlh(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFMA_SW(op_vinsectlh, fa, fb, fc); ++} ++ ++ ++void Assembler::vinsectlw(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFMA_SW(op_vinsectlw, fa, fb, fc); ++} ++ ++ ++void Assembler::vinsectll(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFMA_SW(op_vinsectll, fa, fb, fc); ++} ++ ++ ++void Assembler::vinsectlb(FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFMA_SW(op_vinsectlb, fa, fb, fc); ++} ++ ++ ++void Assembler::vshfq(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFMA_SW(op_vshfq_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vshfqb (FloatRegister fa, FloatRegister fb, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFMA_SW(op_vshfqb, fa, fb, fc); ++} ++ ++ ++void Assembler::vcpyb(FloatRegister fa, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(fa.is_valid() && fc.is_valid()); ++ Instr instr = op_vcpyb | (fa.code() << sRaShift) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::vcpyh(FloatRegister fa, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(fa.is_valid() && fc.is_valid()); ++ Instr instr = op_vcpyh | (fa.code() << sRaShift) | (fc.code() << sRcShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::vsm3r(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFMA_SW(op_vsm3r_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vfcvtsh(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vfcvtsh_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vfcvths(FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrFR_SW(op_vfcvths_l, fa, fb, fmalit, fc); ++} ++ ++ ++void Assembler::vldw_u(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vldw_u, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vstw_u(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vstw_u, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vlds_u(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vlds_u, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vsts_u(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vsts_u, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vldd_u(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vldd_u, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vstd_u(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vstd_u, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vstw_ul(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vstw_ul, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vstw_uh(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vstw_uh, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vsts_ul(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vsts_ul, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vsts_uh(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vsts_uh, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vstd_ul(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vstd_ul, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::vstd_uh(FloatRegister fa, int atmdisp, Register Rb) { ++ GenInstrSIMD_SW (op_vstd_uh, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::lbr(int palfn) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK(is_int26(palfn)); ++ Instr instr = op_sys_call | ( palfn & (( 1 << 26 ) - 1)); ++ emitSW(instr); ++} ++ ++ ++void Assembler::ldbu_a(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrMWithFun_SW(op_ldbu_a, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldhu_a(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrMWithFun_SW(op_ldhu_a, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldw_a(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrMWithFun_SW(op_ldw_a, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::ldl_a(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrMWithFun_SW(op_ldl_a, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::stb_a(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrMWithFun_SW(op_stb_a, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::sth_a(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrMWithFun_SW(op_sth_a, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::stw_a(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrMWithFun_SW(op_stw_a, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::stl_a(Register Ra, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrMWithFun_SW(op_stl_a, Ra, atmdisp, Rb); ++} ++ ++ ++void Assembler::flds_a(FloatRegister fa, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrSIMD_SW (op_flds_a, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::fldd_a(FloatRegister fa, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrSIMD_SW (op_fldd_a, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::fsts_a(FloatRegister fa, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrSIMD_SW (op_fsts_a, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::fstd_a(FloatRegister fa, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ GenInstrSIMD_SW (op_fstd_a, fa, atmdisp, Rb ); ++} ++ ++ ++void Assembler::dpfhr(int th, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK( is_uint5(th) && is_uint11(atmdisp) && Rb.is_valid()); ++ Instr instr = op_dpfhr | ((th << sRaShift) & sRaFieldMask) | ((atmdisp << sImm11Shift) & sImm11Mask) ++ | (Rb.code() << sRbShift); ++ emitSW(instr); ++} ++ ++ ++void Assembler::dpfhw(int th, int atmdisp, Register Rb) { ++ DCHECK(kArchVariant == kSw64r3); ++ DCHECK( is_uint5(th) && is_uint11(atmdisp) && Rb.is_valid()); ++ Instr instr = op_dpfhw | ((th << sRaShift) & sRaFieldMask) | ((atmdisp << sImm11Shift) & sImm11Mask) ++ | (Rb.code() << sRbShift); ++ emitSW(instr); ++} ++ ++ ++//0x1A.00-0x1c.E SIMD instructions. ++ ++ ++void Assembler::ldbu(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_ldbu, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::ldhu(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_ldhu, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::ldw(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_ldw, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::ldl(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_ldl, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::ldl_u(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_ldl_u, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::pri_ld(Register Ra, int ev6hwdisp, Register Rb) { ++ UNREACHABLE(); ++} ++ ++ ++void Assembler::flds(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_flds, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::fldd(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_fldd, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::stb(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_stb, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::sth(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_sth, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::stw(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_stw, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::stl(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_stl, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::stl_u(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_stl_u, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::pri_st(Register Ra, int ev6hwdisp, Register Rb) { ++ UNREACHABLE(); ++} ++ ++ ++void Assembler::fsts(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_fsts, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::fstd(FloatRegister fa, int mdisp, Register Rb) { ++ GenInstrFM_SW(op_fstd, fa, mdisp, Rb); ++} ++ ++ ++void Assembler::beq(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_beq, Ra, bdisp); ++} ++ ++ ++void Assembler::bne(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_bne, Ra, bdisp); ++} ++ ++ ++void Assembler::blt(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_blt, Ra, bdisp); ++} ++ ++ ++void Assembler::ble(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_ble, Ra, bdisp); ++} ++ ++ ++void Assembler::bgt(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_bgt, Ra, bdisp); ++} ++ ++ ++void Assembler::bge(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_bge, Ra, bdisp); ++} ++ ++ ++void Assembler::blbc(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_blbc, Ra, bdisp); ++} ++ ++ ++void Assembler::blbs(Register Ra, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrB_SW(op_blbs, Ra, bdisp); ++} ++ ++ ++void Assembler::fbeq(FloatRegister fa, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrFB_SW(op_fbeq, fa, bdisp); ++} ++ ++ ++void Assembler::fbne(FloatRegister fa, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrFB_SW(op_fbne, fa, bdisp); ++} ++ ++ ++void Assembler::fblt(FloatRegister fa, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrFB_SW(op_fblt, fa, bdisp); ++} ++ ++ ++void Assembler::fble(FloatRegister fa, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrFB_SW(op_fble, fa, bdisp); ++} ++ ++ ++void Assembler::fbgt(FloatRegister fa, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrFB_SW(op_fbgt, fa, bdisp); ++} ++ ++ ++void Assembler::fbge(FloatRegister fa, int bdisp) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ GenInstrFB_SW(op_fbge, fa, bdisp); ++} ++ ++ ++void Assembler::ldi(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_ldi, Ra, mdisp, Rb); ++} ++ ++ ++void Assembler::ldih(Register Ra, int mdisp, Register Rb) { ++ GenInstrM_SW(op_ldih, Ra, mdisp, Rb); ++} ++ ++ ++// cache control instruction ++void Assembler::s_fillcs(int mdisp, Register Rb) { ++ ldw(zero_reg, mdisp, Rb); ++} ++ ++ ++void Assembler::s_fillde(int mdisp, Register Rb) { ++ ldl(zero_reg, mdisp, Rb); ++} ++ ++ ++void Assembler::fillde(int mdisp, Register Rb) { ++ flds(f31, mdisp, Rb); ++} ++ ++ ++void Assembler::fillde_e(int mdisp, Register Rb) { ++ fldd(f31, mdisp, Rb); ++} ++ ++ ++void Assembler::fillcs(int mdisp, Register Rb) { ++ ldwe(f31, mdisp, Rb); ++} ++ ++ ++void Assembler::fillcs_e(int mdisp, Register Rb) { ++ ldde(f31, mdisp, Rb); ++} ++ ++ ++void Assembler::e_fillcs(int mdisp, Register Rb) { ++ ldse(f31, mdisp, Rb); ++} ++ ++ ++void Assembler::e_fillde(int mdisp, Register Rb) { ++ vlds(f31/*V31*/, mdisp, Rb); ++} ++ ++ ++void Assembler::flushd(int mdisp, Register Rb) { ++ ldbu(zero_reg, mdisp, Rb); ++} ++ ++ ++void Assembler::evictdl(int mdisp, Register Rb) { ++ ldl_u(zero_reg, mdisp, Rb); ++} ++ ++ ++void Assembler::evictdg(int mdisp, Register Rb) { ++ ldhu(zero_reg, mdisp, Rb); ++} ++ ++void Assembler::ldb(Register Ra, const MemOperand& rs) { // sw add ++ ldbu(Ra, rs); ++ sextb(Ra, Ra); ++} ++ ++ ++// Helper for base-reg + offset, when offset is larger than int16. ++void Assembler::SwLoadRegPlusOffsetToAt(const MemOperand& src) { ++ DCHECK(src.rm() != at); ++ DCHECK(is_int32(src.offset_)); ++ ++ int16_t lo_offset = static_cast(src.offset_); ++ int16_t hi_offset = (src.offset_-(int16_t)src.offset_) >> 16; ++ if ( ((int32_t)hi_offset == -32768) && ((int32_t)lo_offset < 0) ) { ++ // range from 0x7FFF8000 to 0x7FFFFFFF ++ ldih(at, 0x4000, zero_reg); ++ ldih(at, 0x4000, at); ++ if (lo_offset != 0 ) ++ ldi(at, lo_offset, at); ++ } else { ++ ldih(at, hi_offset, zero_reg); ++ if (lo_offset != 0 ) ++ ldi(at, lo_offset, at); ++ } ++ addl(src.rm(), at, at); // Add base register. ++} ++ ++ ++void Assembler::ldbu(Register Ra, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrM_SW(op_ldbu, Ra, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrM_SW(op_ldbu, Ra, 0, at); ++ } ++} ++ ++ ++void Assembler::ldh(Register Ra, const MemOperand& rs) { // sw add ++ ldhu(Ra, rs); ++ sexth(Ra, Ra); ++} ++ ++ ++void Assembler::ldhu(Register Ra, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrM_SW(op_ldhu, Ra, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrM_SW(op_ldhu, Ra, 0, at); ++ } ++} ++ ++ ++void Assembler::ldw(Register Ra, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrM_SW(op_ldw, Ra, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrM_SW(op_ldw, Ra, 0, at); // Equiv to ldw(rd, MemOperand(at, 0)); ++ } ++} ++ ++ ++void Assembler::ldwu(Register Ra, const MemOperand& rs) { // sw add ++ ldw(Ra, rs); ++ zapnot(Ra, 0xf, Ra); ++} ++ ++ ++void Assembler::ldl(Register Ra, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrM_SW(op_ldl, Ra, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrM_SW(op_ldl, Ra, 0, at); ++ } ++} ++ ++ ++void Assembler::flds(FloatRegister fa, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrFM_SW(op_flds, fa, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrFM_SW(op_flds, fa, 0, at); ++ } ++} ++ ++ ++void Assembler::fldd(FloatRegister fa, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrFM_SW(op_fldd, fa, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrFM_SW(op_fldd, fa, 0, at); ++ } ++} ++ ++ ++ ++void Assembler::stb(Register Ra, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrM_SW(op_stb, Ra, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrM_SW(op_stb, Ra, 0, at); ++ } ++} ++ ++ ++void Assembler::sth(Register Ra, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrM_SW(op_sth, Ra, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrM_SW(op_sth, Ra, 0, at); ++ } ++} ++ ++ ++void Assembler::stw(Register Ra, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrM_SW(op_stw, Ra, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrM_SW(op_stw, Ra, 0, at); ++ } ++} ++ ++ ++void Assembler::stl(Register Ra, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrM_SW(op_stl, Ra, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrM_SW(op_stl, Ra, 0, at); ++ } ++} ++ ++ ++void Assembler::fsts(FloatRegister fa, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrFM_SW(op_fsts, fa, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrFM_SW(op_fsts, fa, 0, at); ++ } ++} ++ ++ ++void Assembler::fstd(FloatRegister fa, const MemOperand& rs) { ++ if (is_int16(rs.offset_)) { ++ GenInstrFM_SW(op_fstd, fa, rs.offset_, rs.rm()); ++ } else { // Offset > 16 bits, use multiple instructions to load. ++ SwLoadRegPlusOffsetToAt(rs); ++ GenInstrFM_SW(op_fstd, fa, 0, at); ++ } ++} ++#endif ++ ++ ++int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, ++ intptr_t pc_delta) { ++ if (RelocInfo::IsInternalReference(rmode)) { ++ int64_t* p = reinterpret_cast(pc); ++ if (*p == kEndOfJumpChain) { ++ return 0; // Number of instructions patched. ++ } ++ *p += pc_delta; ++ return 2; // Number of instructions patched. ++ } ++ Instr instr = instr_at(pc); ++ DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode)); ++#ifdef SW64 ++ // br in BranchLong ++ DCHECK(IsBranch(instr)); ++ return 0; ++#else ++ if (IsLui(instr)) { ++ Instr instr_lui = instr_at(pc + 0 * kInstrSize); ++ Instr instr_ori = instr_at(pc + 1 * kInstrSize); ++ Instr instr_ori2 = instr_at(pc + 3 * kInstrSize); ++ DCHECK(IsOri(instr_ori)); ++ DCHECK(IsOri(instr_ori2)); ++ // TODO(plind): symbolic names for the shifts. ++ int64_t imm = (instr_lui & static_cast(kImm16Mask)) << 48; ++ imm |= (instr_ori & static_cast(kImm16Mask)) << 32; ++ imm |= (instr_ori2 & static_cast(kImm16Mask)) << 16; ++ // Sign extend address. ++ imm >>= 16; ++ ++ if (imm == kEndOfJumpChain) { ++ return 0; // Number of instructions patched. ++ } ++ imm += pc_delta; ++ DCHECK_EQ(imm & 3, 0); ++ ++ instr_lui &= ~kImm16Mask; ++ instr_ori &= ~kImm16Mask; ++ instr_ori2 &= ~kImm16Mask; ++ ++ instr_at_put(pc + 0 * kInstrSize, ++ instr_lui | ((imm >> 32) & kImm16Mask)); ++ instr_at_put(pc + 1 * kInstrSize, ++ instr_ori | (imm >> 16 & kImm16Mask)); ++ instr_at_put(pc + 3 * kInstrSize, ++ instr_ori2 | (imm & kImm16Mask)); ++ return 4; // Number of instructions patched. ++ } else if (IsJ(instr) || IsJal(instr)) { ++ // Regular j/jal relocation. ++ uint32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; ++ imm28 += pc_delta; ++ imm28 &= kImm28Mask; ++ instr &= ~kImm26Mask; ++ DCHECK_EQ(imm28 & 3, 0); ++ uint32_t imm26 = static_cast(imm28 >> 2); ++ instr_at_put(pc, instr | (imm26 & kImm26Mask)); ++ return 1; // Number of instructions patched. ++ } else { ++ DCHECK(((instr & kJumpRawMask) == kJRawMark) || ++ ((instr & kJumpRawMask) == kJalRawMark)); ++ // Unbox raw offset and emit j/jal. ++ int32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; ++ // Sign extend 28-bit offset to 32-bit. ++ imm28 = (imm28 << 4) >> 4; ++ uint64_t target = ++ static_cast(imm28) + reinterpret_cast(pc); ++ target &= kImm28Mask; ++ DCHECK_EQ(imm28 & 3, 0); ++ uint32_t imm26 = static_cast(target >> 2); ++ // Check markings whether to emit j or jal. ++ uint32_t unbox = (instr & kJRawMark) ? J : JAL; ++ instr_at_put(pc, unbox | (imm26 & kImm26Mask)); ++ return 1; // Number of instructions patched. ++ } ++#endif ++} ++ ++ ++void Assembler::GrowBuffer() { ++ // Compute new buffer size. ++ int old_size = buffer_->size(); ++ int new_size = std::min(2 * old_size, old_size + 1 * MB); ++ ++ // Some internal data structures overflow for very large buffers, ++ // they must ensure that kMaximalBufferSize is not too large. ++ if (new_size > kMaximalBufferSize) { ++ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer"); ++ } ++ ++ // Set up new buffer. ++ std::unique_ptr new_buffer = buffer_->Grow(new_size); ++ DCHECK_EQ(new_size, new_buffer->size()); ++ byte* new_start = new_buffer->start(); ++ ++ // Copy the data. ++ intptr_t pc_delta = new_start - buffer_start_; ++ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size); ++ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos(); ++ MemMove(new_start, buffer_start_, pc_offset()); ++ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), ++ reloc_size); ++ ++ // Switch buffers. ++ buffer_ = std::move(new_buffer); ++ buffer_start_ = new_start; ++ pc_ += pc_delta; ++ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, ++ reloc_info_writer.last_pc() + pc_delta); ++ ++ // Relocate runtime entries. ++ Vector instructions{buffer_start_, pc_offset()}; ++ Vector reloc_info{reloc_info_writer.pos(), reloc_size}; ++ for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) { ++ RelocInfo::Mode rmode = it.rinfo()->rmode(); ++ if (rmode == RelocInfo::INTERNAL_REFERENCE) { ++ RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta); ++ } ++ } ++ DCHECK(!overflow()); ++} ++ ++ ++void Assembler::db(uint8_t data) { ++ CheckForEmitInForbiddenSlot(); ++ *reinterpret_cast(pc_) = data; ++ pc_ += sizeof(uint8_t); ++} ++ ++void Assembler::dd(uint32_t data) { ++ CheckForEmitInForbiddenSlot(); ++ *reinterpret_cast(pc_) = data; ++ pc_ += sizeof(uint32_t); ++} ++ ++void Assembler::dq(uint64_t data) { ++ CheckForEmitInForbiddenSlot(); ++ *reinterpret_cast(pc_) = data; ++ pc_ += sizeof(uint64_t); ++} ++ ++void Assembler::dd(Label* label) { ++ uint64_t data; ++ CheckForEmitInForbiddenSlot(); ++ if (label->is_bound()) { ++ data = reinterpret_cast(buffer_start_ + label->pos()); ++ } else { ++ data = jump_address(label); ++ unbound_labels_count_++; ++ internal_reference_positions_.insert(label->pos()); ++ } ++ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); ++ EmitHelper(data); ++} ++ ++ ++void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { ++ if (!ShouldRecordRelocInfo(rmode)) return; ++ // We do not try to reuse pool constants. ++ RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); ++ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here. ++ reloc_info_writer.Write(&rinfo); ++} ++ ++ ++void Assembler::BlockTrampolinePoolFor(int instructions) { ++ CheckTrampolinePoolQuick(instructions); ++ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); ++} ++ ++ ++void Assembler::CheckTrampolinePool() { ++ // Some small sequences of instructions must not be broken up by the ++ // insertion of a trampoline pool; such sequences are protected by setting ++ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, ++ // which are both checked here. Also, recursive calls to CheckTrampolinePool ++ // are blocked by trampoline_pool_blocked_nesting_. ++ if ((trampoline_pool_blocked_nesting_ > 0) || ++ (pc_offset() < no_trampoline_pool_before_)) { ++ // Emission is currently blocked; make sure we try again as soon as ++ // possible. ++ if (trampoline_pool_blocked_nesting_ > 0) { ++ next_buffer_check_ = pc_offset() + kInstrSize; ++ } else { ++ next_buffer_check_ = no_trampoline_pool_before_; ++ } ++ return; ++ } ++ ++ DCHECK(!trampoline_emitted_); ++ DCHECK_GE(unbound_labels_count_, 0); ++ if (unbound_labels_count_ > 0) { ++ // First we emit jump (2 instructions), then we emit trampoline pool. ++ { BlockTrampolinePoolScope block_trampoline_pool(this); ++ Label after_pool; ++ br(&after_pool); ++ nop(); ++ ++ int pool_start = pc_offset(); ++ for (int i = 0; i < unbound_labels_count_; i++) { ++ { BlockGrowBufferScope block_buf_growth(this); ++ // Buffer growth (and relocation) must be blocked for internal ++ // references until associated instructions are emitted and available ++ // to be patched. ++ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); ++ br(&after_pool); ++ } ++ nop(); ++ } ++ // If unbound_labels_count_ is big enough, label after_pool will ++ // need a trampoline too, so we must create the trampoline before ++ // the bind operation to make sure function 'bind' can get this ++ // information. ++ trampoline_ = Trampoline(pool_start, unbound_labels_count_); ++ bind(&after_pool); ++ ++ trampoline_emitted_ = true; ++ // As we are only going to emit trampoline once, we need to prevent any ++ // further emission. ++ next_buffer_check_ = kMaxInt; ++ } ++ } else { ++ // Number of branches to unbound label at this point is zero, so we can ++ // move next buffer check to maximum. ++ next_buffer_check_ = pc_offset() + ++ kMaxBranchOffset - kTrampolineSlotsSize * 16; ++ } ++ return; ++} ++ ++ ++Address Assembler::target_address_at(Address pc) { ++ Instr instr0 = instr_at(pc); ++ #ifdef DEBUG ++ Instr instr1 = instr_at(pc + kInstrSize); ++ #endif ++ Instr instr2 = instr_at(pc + 2 * kInstrSize); ++ Instr instr3 = instr_at(pc + 3 * kInstrSize); ++ ++ DCHECK(GetSwOpcodeField(instr0) == op_ldi); ++ DCHECK(GetSwOpcodeAndFunctionField(instr1) == op_slll_l); ++ DCHECK(GetSwOpcodeField(instr2) == op_ldih); ++ DCHECK(GetSwOpcodeField(instr3) == op_ldi); ++ ++ // Interpret 4 instructions generated by set ++ uintptr_t addr; ++ addr = (instr0 << 16) >> 16; ++ addr = addr << 32; ++ addr += (instr2 << 16) + ((instr3 << 16) >> 16); ++ ++ return static_cast
(addr); ++ ++ // We should never get here, force a bad address if we do. ++ UNREACHABLE(); ++} ++ ++// On Sw64, a target address is stored in a 4-instruction sequence: ++// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask); ++// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); ++// 2: dsll(rd, rd, 16); ++// 3: ori(rd, rd, j.imm32_ & kImm16Mask); ++// ++// Patching the address must replace all the lui & ori instructions, ++// and flush the i-cache. ++// ++// There is an optimization below, which emits a nop when the address ++// fits in just 16 bits. This is unlikely to help, and should be benchmarked, ++// and possibly removed. ++void Assembler::set_target_value_at(Address pc, uint64_t target, ++ ICacheFlushMode icache_flush_mode) { ++ // There is an optimization where only 4 instructions are used to load address ++ // in code on MIP64 because only 48-bits of address is effectively used. ++ // It relies on fact the upper [63:48] bits are not used for virtual address ++ // translation and they have to be set according to value of bit 47 in order ++ // get canonical address. ++ uint32_t* p = reinterpret_cast(pc); ++ uint64_t itarget = reinterpret_cast(target); ++ ++#ifdef DEBUG ++ // Check we have the result from a li macro-instruction, using instr pair. ++ Instr instr0 = instr_at(pc); ++ Instr instr1 = instr_at(pc + kInstrSize); ++ Instr instr2 = instr_at(pc + 2 * kInstrSize); ++ Instr instr3 = instr_at(pc + 3 * kInstrSize); ++ CHECK(GetSwOpcodeField(instr0) == op_ldi); ++ CHECK(GetSwOpcodeAndFunctionField(instr1) == op_slll_l); ++ CHECK(GetSwOpcodeField(instr2) == op_ldih); ++ CHECK(GetSwOpcodeField(instr3) == op_ldi); ++#endif ++ ++ // Must use 4 instructions to insure patchable code. ++ int32_t lsb32 = (int32_t) (itarget); ++ int32_t msb32 = (int32_t) ((itarget - lsb32) >> 32); ++ ++ // Maybe value to "|" is negative, so need set it to 16-bits. ++ *(p+0) = ( *(p+0) & 0xffff0000) | ((int16_t)(msb32 & 0xffff) & 0xffff); ++ *(p+2) = ( *(p+2) & 0xffff0000) | (((lsb32-(int16_t)lsb32)>>16) & 0xffff); ++ *(p+3) = ( *(p+3) & 0xffff0000) | ((int16_t)(lsb32 & 0xffff) & 0xffff); ++ ++ if (icache_flush_mode != SKIP_ICACHE_FLUSH) { ++ FlushInstructionCache(pc, 4 * kInstrSize); ++ } ++} ++ ++UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) ++ : available_(assembler->GetScratchRegisterList()), ++ old_available_(*available_) {} ++ ++UseScratchRegisterScope::~UseScratchRegisterScope() { ++ *available_ = old_available_; ++} ++ ++Register UseScratchRegisterScope::Acquire() { ++ DCHECK_NOT_NULL(available_); ++ DCHECK_NE(*available_, 0); ++ int index = static_cast(base::bits::CountTrailingZeros32(*available_)); ++ *available_ &= ~(1UL << index); ++ ++ return Register::from_code(index); ++} ++ ++bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; } ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64.h b/src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64.h +new file mode 100755 +index 000000000..02e6bdce5 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/assembler-sw64.h +@@ -0,0 +1,1636 @@ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#ifndef V8_CODEGEN_SW64_ASSEMBLER_SW64_H_ ++#define V8_CODEGEN_SW64_ASSEMBLER_SW64_H_ ++ ++#include ++#include ++#include ++ ++#include "src/codegen/assembler.h" ++#include "src/codegen/external-reference.h" ++#include "src/codegen/label.h" ++#include "src/codegen/sw64/constants-sw64.h" ++#include "src/codegen/sw64/register-sw64.h" ++#include "src/objects/contexts.h" ++#include "src/objects/smi.h" ++ ++namespace v8 { ++namespace internal { ++ ++class SafepointTableBuilder; ++ ++// ----------------------------------------------------------------------------- ++// Machine instruction Operands. ++constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize; ++constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1; ++// Class Operand represents a shifter operand in data processing instructions. ++class Operand { ++ public: ++ // Immediate. ++ V8_INLINE explicit Operand(int64_t immediate, ++ RelocInfo::Mode rmode = RelocInfo::NONE) ++ : rm_(no_reg), rmode_(rmode) { ++ value_.immediate = immediate; ++ } ++ V8_INLINE static Operand Zero() { return Operand(static_cast(0)); } ++ V8_INLINE explicit Operand(const ExternalReference& f) ++ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) { ++ value_.immediate = static_cast(f.address()); ++ } ++ V8_INLINE explicit Operand(const char* s); ++ explicit Operand(Handle handle); ++ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) { ++ value_.immediate = static_cast(value.ptr()); ++ } ++ ++ static Operand EmbeddedNumber(double number); // Smi or HeapNumber. ++ static Operand EmbeddedStringConstant(const StringConstantBase* str); ++ ++ // Register. ++ V8_INLINE explicit Operand(Register rm) : rm_(rm) {} ++ ++ // Return true if this is a register operand. ++ V8_INLINE bool is_reg() const; ++ ++ inline int64_t immediate() const; ++ ++ bool IsImmediate() const { return !rm_.is_valid(); } ++ ++ HeapObjectRequest heap_object_request() const { ++ DCHECK(IsHeapObjectRequest()); ++ return value_.heap_object_request; ++ } ++ ++ bool IsHeapObjectRequest() const { ++ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); ++ DCHECK_IMPLIES(is_heap_object_request_, ++ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT || ++ rmode_ == RelocInfo::CODE_TARGET); ++ return is_heap_object_request_; ++ } ++ ++ Register rm() const { return rm_; } ++ ++ RelocInfo::Mode rmode() const { return rmode_; } ++ ++ private: ++ Register rm_; ++ union Value { ++ Value() {} ++ HeapObjectRequest heap_object_request; // if is_heap_object_request_ ++ int64_t immediate; // otherwise ++ } value_; // valid if rm_ == no_reg ++ bool is_heap_object_request_ = false; ++ RelocInfo::Mode rmode_; ++ ++ friend class Assembler; ++ friend class MacroAssembler; ++}; ++ ++ ++// On SW64 we have only one addressing mode with base_reg + offset. ++// Class MemOperand represents a memory operand in load and store instructions. ++class V8_EXPORT_PRIVATE MemOperand : public Operand { ++ public: ++ // Immediate value attached to offset. ++ enum OffsetAddend { ++ offset_minus_one = -1, ++ offset_zero = 0 ++ }; ++ ++ explicit MemOperand(Register rn, int32_t offset = 0); ++ explicit MemOperand(Register rn, int32_t unit, int32_t multiplier, ++ OffsetAddend offset_addend = offset_zero); ++ int32_t offset() const { return offset_; } ++ ++ bool OffsetIsInt16Encodable() const { ++ return is_int16(offset_); ++ } ++ ++ private: ++ int32_t offset_; ++ ++ friend class Assembler; ++}; ++ ++ ++class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ++ public: ++ // Create an assembler. Instructions and relocation information are emitted ++ // into a buffer, with the instructions starting from the beginning and the ++ // relocation information starting from the end of the buffer. See CodeDesc ++ // for a detailed comment on the layout (globals.h). ++ // ++ // If the provided buffer is nullptr, the assembler allocates and grows its ++ // own buffer. Otherwise it takes ownership of the provided buffer. ++ explicit Assembler(const AssemblerOptions&, ++ std::unique_ptr = {}); ++ ++ virtual ~Assembler() { } ++ ++ // GetCode emits any pending (non-emitted) code and fills the descriptor desc. ++ static constexpr int kNoHandlerTable = 0; ++ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; ++ void GetCode(Isolate* isolate, CodeDesc* desc, ++ SafepointTableBuilder* safepoint_table_builder, ++ int handler_table_offset); ++ ++ // Convenience wrapper for code without safepoint or handler tables. ++ void GetCode(Isolate* isolate, CodeDesc* desc) { ++ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); ++ } ++ ++ // Unused on this architecture. ++ void MaybeEmitOutOfLineConstantPool() {} ++ ++ // Sw64 uses BlockTrampolinePool to prevent generating trampoline inside a ++ // continuous instruction block. For Call instruction, it prevents generating ++ // trampoline between jalr and delay slot instruction. In the destructor of ++ // BlockTrampolinePool, it must check if it needs to generate trampoline ++ // immediately, if it does not do this, the branch range will go beyond the ++ // max branch offset, that means the pc_offset after call CheckTrampolinePool ++ // may be not the Call instruction's location. So we use last_call_pc here for ++ // safepoint record. ++ int pc_offset_for_safepoint() { ++//SKTODO ++#if 0 ++#ifdef DEBUG ++ Instr instr1 = ++ instr_at(static_cast(last_call_pc_ - buffer_start_ - kInstrSize)); ++ Instr instr2 = instr_at( ++ static_cast(last_call_pc_ - buffer_start_ - kInstrSize * 2)); ++ if (GetOpcodeField(instr1) != SPECIAL) { // instr1 == jialc. ++ DCHECK(GetOpcodeField(instr1) == POP76 && ++ GetRs(instr1) == 0); ++ } else { ++ if (GetFunctionField(instr1) == SLL) { // instr1 == nop, instr2 == jalr. ++ DCHECK(GetOpcodeField(instr2) == SPECIAL && ++ GetFunctionField(instr2) == JALR); ++ } else { // instr1 == jalr. ++ DCHECK(GetFunctionField(instr1) == JALR); ++ } ++ } ++#endif ++#endif ++ return static_cast(last_call_pc_ - buffer_start_); ++ } ++ ++ // Label operations & relative jumps (PPUM Appendix D). ++ // ++ // Takes a branch opcode (cc) and a label (L) and generates ++ // either a backward branch or a forward branch and links it ++ // to the label fixup chain. Usage: ++ // ++ // Label L; // unbound label ++ // j(cc, &L); // forward branch to unbound label ++ // bind(&L); // bind label to the current pc ++ // j(cc, &L); // backward branch to bound label ++ // bind(&L); // illegal: a label may be bound only once ++ // ++ // Note: The same Label can be used for forward and backward branches ++ // but it may be bound only once. ++ void bind(Label* L); // Binds an unbound label L to current code position. ++ ++ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; ++ ++ // Determines if Label is bound and near enough so that branch instruction ++ // can be used to reach it, instead of jump instruction. ++ bool is_near(Label* L); ++ bool is_near(Label* L, OffsetSize bits); ++ bool is_near_branch(Label* L); ++ inline bool is_near_pre_r3(Label* L) { ++ DCHECK(!(kArchVariant == kSw64r3)); ++ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; ++ } ++ inline bool is_near_r3(Label* L) { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize; ++ } ++ ++ int BranchOffset(Instr instr); ++ ++ // Returns the branch offset to the given label from the current code ++ // position. Links the label to the current position if it is still unbound. ++ // Manages the jump elimination optimization if the second parameter is true. ++ int32_t branch_offset_helper(Label* L, OffsetSize bits); ++ inline int32_t branch_offset(Label* L) { ++ return branch_offset_helper(L, OffsetSize::kOffset21); // change kOffset21 as default. ++ } ++ inline int32_t branch_offset21(Label* L) { ++ return branch_offset_helper(L, OffsetSize::kOffset21); ++ } ++ inline int32_t branch_offset26(Label* L) { ++ return branch_offset_helper(L, OffsetSize::kOffset26); ++ } ++ inline int32_t shifted_branch_offset(Label* L) { ++ return branch_offset(L) >> 2; ++ } ++ inline int32_t shifted_branch_offset21(Label* L) { ++ return branch_offset21(L) >> 2; ++ } ++ inline int32_t shifted_branch_offset26(Label* L) { ++ return branch_offset26(L) >> 2; ++ } ++ uint64_t jump_address(Label* L); ++ uint64_t jump_offset(Label* L); ++ uint64_t branch_long_offset(Label* L); ++ ++ // Puts a labels target address at the given position. ++ // The high 8 bits are set to zero. ++ void label_at_put(Label* L, int at_offset); ++ ++ // Read/Modify the code target address in the branch/call instruction at pc. ++ // The isolate argument is unused (and may be nullptr) when skipping flushing. ++ static Address target_address_at(Address pc); ++ V8_INLINE static void set_target_address_at( ++ Address pc, Address target, ++ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { ++ set_target_value_at(pc, target, icache_flush_mode); ++ } ++ // On SW64 there is no Constant Pool so we skip that parameter. ++ V8_INLINE static Address target_address_at(Address pc, ++ Address constant_pool) { ++ return target_address_at(pc); ++ } ++ V8_INLINE static void set_target_address_at( ++ Address pc, Address constant_pool, Address target, ++ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { ++ set_target_address_at(pc, target, icache_flush_mode); ++ } ++ ++ static void set_target_value_at( ++ Address pc, uint64_t target, ++ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); ++ ++ static void JumpLabelToJumpRegister(Address pc); ++ ++ // This sets the branch destination (which gets loaded at the call address). ++ // This is for calls and branches within generated code. The serializer ++ // has already deserialized the lui/ori instructions etc. ++ inline static void deserialization_set_special_target_at( ++ Address instruction_payload, Code code, Address target); ++ ++ // Get the size of the special target encoded at 'instruction_payload'. ++ inline static int deserialization_special_target_size( ++ Address instruction_payload); ++ ++ // This sets the internal reference at the pc. ++ inline static void deserialization_set_target_internal_reference_at( ++ Address pc, Address target, ++ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); ++ ++ // Difference between address of current opcode and target address offset. ++ static constexpr int kBranchPCOffset = kInstrSize; ++ ++ // Difference between address of current opcode and target address offset, ++ // when we are generatinga sequence of instructions for long relative PC ++ // branches ++ //static constexpr int kLongBranchPCOffset = 3 * kInstrSize; ++ ++ // Adjust ra register in branch delay slot of bal instruction so to skip ++ // instructions not needed after optimization of PIC in ++ // TurboAssembler::BranchAndLink method. ++ ++ //static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize; ++ ++ // Here we are patching the address in the LUI/ORI instruction pair. ++ // These values are used in the serialization process and must be zero for ++ // SW64 platform, as Code, Embedded Object or External-reference pointers ++ // are split across two consecutive instructions and don't exist separately ++ // in the code, so the serializer should not step forwards in memory after ++ // a target is resolved and written. ++ static constexpr int kSpecialTargetSize = 0; ++ ++ // Number of consecutive instructions used to store 32bit/64bit constant. ++ // This constant was used in RelocInfo::target_address_address() function ++ // to tell serializer address of the instruction that follows ++ // LUI/ORI instruction pair. ++ static constexpr int kInstructionsFor64BitConstant = 4; ++ ++ // Difference between address of current opcode and value read from pc ++ // register. ++ static constexpr int kPcLoadDelta = 4; ++ ++ // Max offset for instructions with 21-bit offset field ++ static constexpr int kMaxBranchOffset = (1 << (23 - 1)) - 1; ++ ++ // Max offset for compact branch instructions with 26-bit offset field ++ static constexpr int kMaxCompactBranchOffset = (1 << (23 - 1)) - 1; ++ ++ static constexpr int kTrampolineSlotsSize = 2 * kInstrSize; ++ ++ RegList* GetScratchRegisterList() { return &scratch_register_list_; } ++ ++ // --------------------------------------------------------------------------- ++ // Code generation. ++ ++ // Insert the smallest number of nop instructions ++ // possible to align the pc offset to a multiple ++ // of m. m must be a power of 2 (>= 4). ++ void Align(int m); ++ // Insert the smallest number of zero bytes possible to align the pc offset ++ // to a mulitple of m. m must be a power of 2 (>= 2). ++ void DataAlign(int m); ++ // Aligns code to something that's optimal for a jump target for the platform. ++ void CodeTargetAlign(); ++ ++ // Different nop operations are used by the code generator to detect certain ++ // states of the generated code. ++ enum NopMarkerTypes { ++ NON_MARKING_NOP = 0, ++ DEBUG_BREAK_NOP, ++ // IC markers. ++ PROPERTY_ACCESS_INLINED, ++ PROPERTY_ACCESS_INLINED_CONTEXT, ++ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, ++ // Helper values. ++ LAST_CODE_MARKER, ++ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, ++ }; ++ ++ // Type == 0 is the default non-marking nop. For sw64 this is a ++ // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero ++ // marking, to avoid conflict with ssnop and ehb instructions. ++ void nop(unsigned int type = 0) { ++ DCHECK_EQ(type, 0); ++ ldi(zero_reg, 0, zero_reg); ++ } ++ ++ ++ // --------Branch-and-jump-instructions---------- ++ // We don't use likely variant of instructions. ++ void br(int32_t offset); ++ inline void br(Label* L) { br(shifted_branch_offset(L)); } ++ void bsr(int32_t offset); ++ inline void bsr(Label* L) { bsr(shifted_branch_offset(L)); } ++ ++ void fmovd(FPURegister fs, FPURegister fd); ++ void fmovs(FPURegister fs, FPURegister fd); ++ void fnegs(FPURegister fs, FPURegister fd); ++ void fnegd(FPURegister fs, FPURegister fd); ++ ++ // Conversion. ++ void fcvtsw(FPURegister fs, FPURegister fd); ++ void fcvtdw(FPURegister fs, FPURegister fd); ++ void ftruncsw(FPURegister fs, FPURegister fd); ++ void ftruncdw(FPURegister fs, FPURegister fd); ++ void froundsw(FPURegister fs, FPURegister fd); ++ void frounddw(FPURegister fs, FPURegister fd); ++ void ffloorsw(FPURegister fs, FPURegister fd); ++ void ffloordw(FPURegister fs, FPURegister fd); ++ void fceilsw(FPURegister fs, FPURegister fd); ++ void fceildw(FPURegister fs, FPURegister fd); ++ ++ ++ void fcvtsl(FPURegister fs, FPURegister fd); ++ void ftruncsl(FPURegister fs, FPURegister fd); ++ void ftruncdl(FPURegister fs, FPURegister fd); ++ void froundsl(FPURegister fs, FPURegister fd); ++ void frounddl(FPURegister fs, FPURegister fd); ++ void ffloorsl(FPURegister fs, FPURegister fd); ++ void ffloordl(FPURegister fs, FPURegister fd); ++ void fceilsl(FPURegister fs, FPURegister fd); ++ void fceildl(FPURegister fs, FPURegister fd); ++ ++ void fcvtws(FPURegister fs, FPURegister fd); ++ void fcvtls_(FPURegister fs, FPURegister fd); ++ void fcvtds_(FPURegister fs, FPURegister fd); ++ ++ void fcvtwd(FPURegister fs, FPURegister fd); ++ void fcvtld_(FPURegister fs, FPURegister fd); ++ void fcvtsd_(FPURegister fs, FPURegister fd); ++ ++ // Conditions and branches for SW64r3. ++ void cmp(FPUCondition cond, SecondaryField fmt, ++ FPURegister fd, FPURegister ft, FPURegister fs); ++ ++ void sld_b(MSARegister wd, MSARegister ws, Register rt); ++ void sld_h(MSARegister wd, MSARegister ws, Register rt); ++ void sld_w(MSARegister wd, MSARegister ws, Register rt); ++ void sld_d(MSARegister wd, MSARegister ws, Register rt); ++ void splat_b(MSARegister wd, MSARegister ws, Register rt); ++ void splat_h(MSARegister wd, MSARegister ws, Register rt); ++ void splat_w(MSARegister wd, MSARegister ws, Register rt); ++ void splat_d(MSARegister wd, MSARegister ws, Register rt); ++ ++#ifdef SW64 ++ void sys_call_b( int palfn ); ++ void sys_call ( int palfn ); ++ ++ void call ( Register Ra, Register Rb, int jmphint ); ++ void ret ( Register Ra, Register Rb, int rethint ); ++ void jmp ( Register Ra, Register Rb, int jmphint ); ++ ++ void br ( Register Ra, int bdisp ); ++ void bsr ( Register Ra, int bdisp ); ++ ++ void memb ( void ); ++ void imemb ( void ); ++ void wmemb ( void ); //SW6B ++// void rtc ( Register Ra ); ++ void rtc ( Register Ra, Register Rb ); ++ void rcid ( Register Ra); ++ void halt ( void); ++ ++ void rd_f ( Register Ra ); //SW2F ++ void wr_f ( Register Ra ); //SW2F ++ ++ void rtid ( Register Ra); ++ void csrrs ( Register Ra, int rpiindex ); //SW6B ++ void csrrc ( Register Ra, int rpiindex ); //SW6B ++ void csrr ( Register Ra, int rpiindex ); ++ void csrw ( Register Ra, int rpiindex ); ++// void pri_rcsr ( Register Ra, int rpiindex ); ++// void pri_wcsr ( Register Ra, int rpiindex ); ++ void pri_ret ( Register Ra ); ++// void pri_ret_b ( Register Ra ); ++ ++ void lldw ( Register Ra, int atmdisp, Register Rb ); ++ void lldl ( Register Ra, int atmdisp, Register Rb ); ++ void ldw_inc ( Register Ra, int atmdisp, Register Rb ); //SW2F ++ void ldl_inc ( Register Ra, int atmdisp, Register Rb ); //SW2F ++ void ldw_dec ( Register Ra, int atmdisp, Register Rb ); //SW2F ++ void ldl_dec ( Register Ra, int atmdisp, Register Rb ); //SW2F ++ void ldw_set ( Register Ra, int atmdisp, Register Rb ); //SW2F ++ void ldl_set ( Register Ra, int atmdisp, Register Rb ); //SW2F ++ void lstw ( Register Ra, int atmdisp, Register Rb ); ++ void lstl ( Register Ra, int atmdisp, Register Rb ); ++ void ldw_nc ( Register Ra, int atmdisp, Register Rb ); ++ void ldl_nc ( Register Ra, int atmdisp, Register Rb ); ++ void ldd_nc ( Register Ra, int atmdisp, Register Rb ); ++ void stw_nc ( Register Ra, int atmdisp, Register Rb ); ++ void stl_nc ( Register Ra, int atmdisp, Register Rb ); ++ void std_nc ( Register Ra, int atmdisp, Register Rb ); ++ ++ //TODO: 0x8.a-0x8.f *_NC instructions ++ ++ // --------Load/Store-instructions----------------- ++ void ldwe ( FloatRegister fa, int mdisp, Register Rb ); ++ void ldse ( FloatRegister fa, int mdisp, Register Rb ); ++ void ldde ( FloatRegister fa, int mdisp, Register Rb ); ++ void vlds ( FloatRegister fa, int mdisp, Register Rb ); ++ void vldd ( FloatRegister fa, int mdisp, Register Rb ); ++ void vsts ( FloatRegister fa, int mdisp, Register Rb ); ++ void vstd ( FloatRegister fa, int mdisp, Register Rb ); ++ ++ // --------ALU-instructions----------------- ++ void addw ( Register Ra, Register Rb, Register Rc ); ++ void addw ( Register Ra, int lit, Register Rc ); ++ void subw ( Register Ra, Register Rb, Register Rc ); ++ void subw ( Register Ra, int lit, Register Rc ); ++ void s4addw ( Register Ra, Register Rb, Register Rc ); ++ void s4addw ( Register Ra, int lit, Register Rc ); ++ void s4subw ( Register Ra, Register Rb, Register Rc ); ++ void s4subw ( Register Ra, int lit, Register Rc ); ++ void s8addw ( Register Ra, Register Rb, Register Rc ); ++ void s8addw ( Register Ra, int lit, Register Rc ); ++ void s8subw ( Register Ra, Register Rb, Register Rc ); ++ void s8subw ( Register Ra, int lit, Register Rc ); ++ void addl ( Register Ra, Register Rb, Register Rc ); ++ void addl ( Register Ra, int lit, Register Rc ); ++ void subl ( Register Ra, Register Rb, Register Rc ); ++ void subl ( Register Ra, int lit, Register Rc ); ++ void s4addl ( Register Ra, Register Rb, Register Rc ); ++ void s4addl ( Register Ra, int lit, Register Rc ); ++ void s4subl ( Register Ra, Register Rb, Register Rc ); ++ void s4subl ( Register Ra, int lit, Register Rc ); ++ void s8addl ( Register Ra, Register Rb, Register Rc ); ++ void s8addl ( Register Ra, int lit, Register Rc ); ++ void s8subl ( Register Ra, Register Rb, Register Rc ); ++ void s8subl ( Register Ra, int lit, Register Rc ); ++ void mulw ( Register Ra, Register Rb, Register Rc ); ++ void mulw ( Register Ra, int lit, Register Rc ); ++ void divw ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void udivw ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void remw ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void uremw ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void mull ( Register Ra, Register Rb, Register Rc ); ++ void mull ( Register Ra, int lit, Register Rc ); ++ void umulh ( Register Ra, Register Rb, Register Rc ); ++ void umulh ( Register Ra, int lit, Register Rc ); ++ void divl ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void udivl ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void reml ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void ureml ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void addpi ( int apint, Register Rc ); //SW6B ++ void addpis ( int apint, Register Rc ); //SW6B ++ ++ void cmpeq ( Register Ra, Register Rb, Register Rc ); ++ void cmpeq ( Register Ra, int lit, Register Rc ); ++ void cmplt ( Register Ra, Register Rb, Register Rc ); ++ void cmplt ( Register Ra, int lit, Register Rc ); ++ void cmple ( Register Ra, Register Rb, Register Rc ); ++ void cmple ( Register Ra, int lit, Register Rc ); ++ void cmpult ( Register Ra, Register Rb, Register Rc ); ++ void cmpult ( Register Ra, int lit, Register Rc ); ++ void cmpule ( Register Ra, Register Rb, Register Rc ); ++ void cmpule ( Register Ra, int lit, Register Rc ); ++ void sbt ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void sbt ( Register Ra, int lit, Register Rc ); //SW6B ++ void cbt ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void cbt ( Register Ra, int lit, Register Rc ); //SW6B ++ ++ void and_ins ( Register Ra, Register Rb, Register Rc ); ++ void and_ins ( Register Ra, int lit, Register Rc ); ++ void bic ( Register Ra, Register Rb, Register Rc ); ++ void andnot ( Register Ra, Register Rb, Register Rc ); // bic ++ void bic ( Register Ra, int lit, Register Rc ); ++ void andnot ( Register Ra, int lit, Register Rc ); // bic ++ void bis ( Register Ra, Register Rb, Register Rc ); ++ void or_ins ( Register Ra, Register Rb, Register Rc ); ++ void bis ( Register Ra, int lit, Register Rc ); ++ void or_ins ( Register Ra, int lit, Register Rc ); ++ void ornot ( Register Ra, Register Rb, Register Rc ); ++ void ornot ( Register Ra, int lit, Register Rc ); ++ void xor_ins ( Register Ra, Register Rb, Register Rc ); ++ void xor_ins ( Register Ra, int lit, Register Rc ); ++ void eqv ( Register Ra, Register Rb, Register Rc ); ++ void eqv ( Register Ra, int lit, Register Rc ); ++ ++ // 0x10.40-0x10.47 INS[0-7]B ++ void inslb ( Register Ra, Register Rb, Register Rc ); ++ void inslb ( Register Ra, int lit, Register Rc ); ++ void inslh ( Register Ra, Register Rb, Register Rc ); ++ void inslh ( Register Ra, int lit, Register Rc ); ++ void inslw ( Register Ra, Register Rb, Register Rc ); ++ void inslw ( Register Ra, int lit, Register Rc ); ++ void insll ( Register Ra, Register Rb, Register Rc ); ++ void insll ( Register Ra, int lit, Register Rc ); ++ void inshb ( Register Ra, Register Rb, Register Rc ); ++ void inshb ( Register Ra, int lit, Register Rc ); ++ void inshh ( Register Ra, Register Rb, Register Rc ); ++ void inshh ( Register Ra, int lit, Register Rc ); ++ void inshw ( Register Ra, Register Rb, Register Rc ); ++ void inshw ( Register Ra, int lit, Register Rc ); ++ void inshl ( Register Ra, Register Rb, Register Rc ); ++ void inshl ( Register Ra, int lit, Register Rc ); ++ ++ void slll ( Register Ra, Register Rb, Register Rc ); ++ void slll ( Register Ra, int lit, Register Rc ); ++ void srll ( Register Ra, Register Rb, Register Rc ); ++ void srll ( Register Ra, int lit, Register Rc ); ++ void sral ( Register Ra, Register Rb, Register Rc ); ++ void sral ( Register Ra, int lit, Register Rc ); ++ void roll ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void roll ( Register Ra, int lit, Register Rc ); //SW6B ++ void sllw ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void sllw ( Register Ra, int lit, Register Rc ); //SW6B ++ void srlw ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void srlw ( Register Ra, int lit, Register Rc ); //SW6B ++ void sraw ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void sraw ( Register Ra, int lit, Register Rc ); //SW6B ++ void rolw ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void rolw ( Register Ra, int lit, Register Rc ); //SW6B ++ ++ // 0x10.50-0x10.57 EXT[0-7]B ++ void extlb ( Register Ra, Register Rb, Register Rc ); ++ void extlb ( Register Ra, int lit, Register Rc ); ++ void extlh ( Register Ra, Register Rb, Register Rc ); ++ void extlh ( Register Ra, int lit, Register Rc ); ++ void extlw ( Register Ra, Register Rb, Register Rc ); ++ void extlw ( Register Ra, int lit, Register Rc ); ++ void extll ( Register Ra, Register Rb, Register Rc ); ++ void extll ( Register Ra, int lit, Register Rc ); ++ void exthb ( Register Ra, Register Rb, Register Rc ); ++ void exthb ( Register Ra, int lit, Register Rc ); ++ void exthh ( Register Ra, Register Rb, Register Rc ); ++ void exthh ( Register Ra, int lit, Register Rc ); ++ void exthw ( Register Ra, Register Rb, Register Rc ); ++ void exthw ( Register Ra, int lit, Register Rc ); ++ void exthl ( Register Ra, Register Rb, Register Rc ); ++ void exthl ( Register Ra, int lit, Register Rc ); ++ ++ void ctpop ( Register Rb, Register Rc ); ++ void ctlz ( Register Rb, Register Rc ); ++ void cttz ( Register Rb, Register Rc ); ++ void revbh ( Register Rb, Register Rc ); //SW6B ++ void revbw ( Register Rb, Register Rc ); //SW6B ++ void revbl ( Register Rb, Register Rc ); //SW6B ++ void casw ( Register Ra, Register Rb, Register Rc ); //SW6B ++ void casl ( Register Ra, Register Rb, Register Rc ); //SW6B ++ ++ // 0x10.60-0x10.67 MASK[0-7]B ++ void masklb ( Register Ra, Register Rb, Register Rc ); ++ void masklb ( Register Ra, int lit, Register Rc ); ++ void masklh ( Register Ra, Register Rb, Register Rc ); ++ void masklh ( Register Ra, int lit, Register Rc ); ++ void masklw ( Register Ra, Register Rb, Register Rc ); ++ void masklw ( Register Ra, int lit, Register Rc ); ++ void maskll ( Register Ra, Register Rb, Register Rc ); ++ void maskll ( Register Ra, int lit, Register Rc ); ++ void maskhb ( Register Ra, Register Rb, Register Rc ); ++ void maskhb ( Register Ra, int lit, Register Rc ); ++ void maskhh ( Register Ra, Register Rb, Register Rc ); ++ void maskhh ( Register Ra, int lit, Register Rc ); ++ void maskhw ( Register Ra, Register Rb, Register Rc ); ++ void maskhw ( Register Ra, int lit, Register Rc ); ++ void maskhl ( Register Ra, Register Rb, Register Rc ); ++ void maskhl ( Register Ra, int lit, Register Rc ); ++ ++ void zap ( Register Ra, Register Rb, Register Rc ); ++ void zap ( Register Ra, int lit, Register Rc ); ++ void zapnot ( Register Ra, Register Rb, Register Rc ); ++ void zapnot ( Register Ra, int lit, Register Rc ); ++ void sextb ( Register Rb, Register Rc); ++ void sextb ( int lit, Register Rc ); ++ void sexth ( Register Rb, Register Rc ); ++ void sexth ( int lit, Register Rc ); ++ //0x10.6c CMPGEB ++ void cmpgeb ( Register Ra, Register Rb, Register Rc ); ++ void cmpgeb ( Register Ra, int lit, Register Rc ); ++ ++ void fimovs ( FloatRegister fa, Register Rc ); ++ void fimovd ( FloatRegister fa, Register Rc ); ++ ++ void seleq ( Register Ra, Register Rb,Register R3, Register Rc ); ++ void seleq ( Register Ra, int lit, Register R3,Register Rc ); ++ void selge ( Register Ra, Register Rb,Register R3, Register Rc ); ++ void selge ( Register Ra, int lit, Register R3,Register Rc ); ++ void selgt ( Register Ra, Register Rb,Register R3, Register Rc ); ++ void selgt ( Register Ra, int lit, Register R3,Register Rc ); ++ void selle ( Register Ra, Register Rb,Register R3, Register Rc ); ++ void selle ( Register Ra, int lit, Register R3,Register Rc ); ++ void sellt ( Register Ra, Register Rb,Register R3, Register Rc ); ++ void sellt ( Register Ra, int lit, Register R3,Register Rc ); ++ void selne ( Register Ra, Register Rb,Register R3, Register Rc ); ++ void selne ( Register Ra, int lit, Register R3,Register Rc ); ++ void sellbc ( Register Ra, Register Rb,Register R3, Register Rc ); ++ void sellbc ( Register Ra, int lit, Register R3,Register Rc ); ++ void sellbs ( Register Ra, Register Rb,Register R3, Register Rc ); ++ void sellbs ( Register Ra, int lit, Register R3,Register Rc ); ++ ++ void vlog ( int vlog, FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc); ++ ++ void f_exclude_same_src_fc(Opcode_ops_fp opcode, FloatRegister fa, FloatRegister fb, FloatRegister fc); ++ void f_exclude_same_src_fc(Opcode_ops_fp opcode, FloatRegister fb, FloatRegister fc); ++ void vbisw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vxorw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vandw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void veqvw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vornotw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vbicw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fadds ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void faddd ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fsubs ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fsubd ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fmuls ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fmuld ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fdivs ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fdivd ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fsqrts ( FloatRegister fb, FloatRegister fc ); ++ void fsqrtd ( FloatRegister fb, FloatRegister fc ); ++ void fcmpeq ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fcmple ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fcmplt ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fcmpun ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fcvtsd ( FloatRegister fb, FloatRegister fc ); ++ void fcvtds ( FloatRegister fb, FloatRegister fc ); ++ void fcvtdl_g ( FloatRegister fb, FloatRegister fc ); ++ void fcvtdl_p ( FloatRegister fb, FloatRegister fc ); ++ void fcvtdl_z ( FloatRegister fb, FloatRegister fc ); ++ void fcvtdl_n ( FloatRegister fb, FloatRegister fc ); ++ void fcvtdl ( FloatRegister fb, FloatRegister fc ); ++ void fcvtwl ( FloatRegister fb, FloatRegister fc ); ++ void fcvtlw ( FloatRegister fb, FloatRegister fc ); ++ void fcvtls ( FloatRegister fb, FloatRegister fc ); ++ void fcvtld ( FloatRegister fb, FloatRegister fc ); ++ void fcpys ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fcpyse ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void fcpysn ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void ifmovs ( Register Ra, FloatRegister fc ); ++ void ifmovd ( Register Ra, FloatRegister fc ); ++ void rfpcr ( FloatRegister fa); ++ void wfpcr ( FloatRegister fa); ++ void setfpec0 (); ++ void setfpec1 (); ++ void setfpec2 (); ++ void setfpec3 (); ++ ++ void frecs ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void frecd ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void fris ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void fris_g ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void fris_p ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void fris_z ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void fris_n ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void frid ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void frid_g ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void frid_p ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void frid_z ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void frid_n ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void fmas ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fmad ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fmss ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fmsd ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fnmas ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fnmad ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fnmss ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fnmsd ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ ++ void fseleq ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fselne ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fsellt ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fselle ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fselgt ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void fselge ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ ++ void vaddw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vaddw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vsubw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vsubw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vcmpgew ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vcmpgew ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vcmpeqw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vcmpeqw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vcmplew ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vcmplew ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vcmpltw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vcmpltw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vcmpulew ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vcmpulew ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vcmpultw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vcmpultw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vsllw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vsllw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vsrlw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vsrlw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vsraw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vsraw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vrolw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vrolw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void sllow ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void sllow ( FloatRegister fa, int lit, FloatRegister fc ); ++ void srlow ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void srlow ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vaddl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vaddl ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vsubl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vsubl ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vsllb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vsllb ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vsrlb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vsrlb ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vsrab ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vsrab ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vrolb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vrolb ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vsllh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vsllh ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vsrlh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vsrlh ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vsrah ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vsrah ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vrolh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vrolh ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void ctpopow ( FloatRegister fa, FloatRegister fc ); ++ void ctlzow ( FloatRegister fa, FloatRegister fc ); ++ void vslll ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vslll ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vsrll ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vsrll ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vsral ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vsral ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vroll ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vroll ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vmaxb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vminb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ ++ // some unimplemented SIMD ++ void vucaddw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vucaddw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vucsubw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vucsubw ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vucaddh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vucaddh ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vucsubh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vucsubh ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vucaddb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vucaddb ( FloatRegister fa, int lit, FloatRegister fc ); ++ void vucsubb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vucsubb ( FloatRegister fa, int lit, FloatRegister fc ); ++ void sraow ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void sraow ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vsumw ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void vsuml ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void vcmpueqb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vcmpueqb ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vcmpugtb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vcmpugtb ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vmaxh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vminh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vmaxw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vminw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vmaxl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vminl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vumaxb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vuminb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vumaxh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vuminh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vumaxw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vuminw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vumaxl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vuminl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ ++ void vsm3msw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vsm4key ( FloatRegister fa, int lit, FloatRegister fc ); //SW6B ++ void vsm4r ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vbinvw ( FloatRegister fb, FloatRegister fc ); //SW6B ++ ++ void vadds ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vaddd ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vsubs ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vsubd ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vmuls ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vmuld ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vdivs ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vdivd ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vsqrts ( FloatRegister fb, FloatRegister fc ); ++ void vsqrtd ( FloatRegister fb, FloatRegister fc ); ++ void vfcmpeq ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vfcmple ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vfcmplt ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vfcmpun ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vcpys ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vfmov ( FloatRegister fa, FloatRegister fc ); ++ void vcpyse ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vcpysn ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); ++ void vsums ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void vsumd ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void vfrecs ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void vfrecd ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void vfcvtsd ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfcvtds ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfcvtls ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfcvtld ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfcvtdl ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfcvtdl_g ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfcvtdl_p ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfcvtdl_z ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfcvtdl_n ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfris ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfris_g ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfris_p ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfris_z ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfris_n ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfrid ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfrid_g ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfrid_p ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfrid_z ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vfrid_n ( FloatRegister fb, FloatRegister fc ); //SW6B ++ void vmaxs ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vmins ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vmaxd ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vmind ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ // end of unimplemented SIMD ++ ++ void vmas ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vmad ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vmss ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vmsd ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vnmas ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vnmad ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vnmss ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vnmsd ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vfseleq ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vfsellt ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vfselle ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vseleqw ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vseleqw ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ void vsellbcw ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vsellbcw ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ void vselltw ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vselltw ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ void vsellew ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vsellew ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ void vinsw ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ void vinsf ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ void vextw ( FloatRegister fa, int fmalit, FloatRegister fc); ++ void vextf ( FloatRegister fa, int fmalit, FloatRegister fc); ++ void vcpyw ( FloatRegister fa, FloatRegister fc); ++ void vcpyf ( FloatRegister fa, FloatRegister fc); ++ void vconw ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vshfw ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vcons ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vcond ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ void vinsb ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW6B ++ void vinsh ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW6B ++ void vinsectlh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vinsectlw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vinsectll ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vinsectlb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vshfq ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW6B ++ void vshfqb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW6B ++ void vcpyb ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void vcpyh ( FloatRegister fa, FloatRegister fc ); //SW6B ++ void vsm3r ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW6B ++ void vfcvtsh ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW6B ++ void vfcvths ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW6B ++ ++ void vldw_u ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vstw_u ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vlds_u ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vsts_u ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vldd_u ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vstd_u ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vstw_ul ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vstw_uh ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vsts_ul ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vsts_uh ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vstd_ul ( FloatRegister fa, int atmdisp, Register Rb ); ++ void vstd_uh ( FloatRegister fa, int atmdisp, Register Rb ); ++ void lbr ( int palfn ); //SW6B ++ void ldbu_a ( Register Ra, int atmdisp, Register Rb ); //SW6B ++ void ldhu_a ( Register Ra, int atmdisp, Register Rb ); //SW6B ++ void ldw_a ( Register Ra, int atmdisp, Register Rb ); //SW6B ++ void ldl_a ( Register Ra, int atmdisp, Register Rb ); //SW6B ++ void stb_a ( Register Ra, int atmdisp, Register Rb ); //SW6B ++ void sth_a ( Register Ra, int atmdisp, Register Rb ); //SW6B ++ void stw_a ( Register Ra, int atmdisp, Register Rb ); //SW6B ++ void stl_a ( Register Ra, int atmdisp, Register Rb ); //SW6B ++ void flds_a ( FloatRegister fa, int atmdisp, Register Rb ); //SW6B ++ void fldd_a ( FloatRegister fa, int atmdisp, Register Rb ); //SW6B ++ void fsts_a ( FloatRegister fa, int atmdisp, Register Rb ); //SW6B ++ void fstd_a ( FloatRegister fa, int atmdisp, Register Rb ); //SW6B ++ void dpfhr ( int th, int atmdisp, Register Rb ); //SW6B ++ void dpfhw ( int th, int atmdisp, Register Rb ); //SW6B ++ ++ //TODO: 0x1A.00-0x1c.E SIMD instructions. ++ ++ ++ void ldbu ( Register Ra, int mdisp, Register Rb ); ++ void ldhu ( Register Ra, int mdisp, Register Rb ); ++ void ldw ( Register Ra, int mdisp, Register Rb ); ++ void ldl ( Register Ra, int mdisp, Register Rb ); ++ void ldl_u ( Register Ra, int mdisp, Register Rb ); ++ void pri_ld ( Register Ra, int ev6hwdisp, Register Rb ); ++ void flds ( FloatRegister fa, int mdisp, Register Rb ); ++ void fldd ( FloatRegister fa, int mdisp, Register Rb ); ++ ++ void stb ( Register Ra, int mdisp, Register Rb ); ++ void sth ( Register Ra, int mdisp, Register Rb ); ++ void stw ( Register Ra, int mdisp, Register Rb ); ++ void stl ( Register Ra, int mdisp, Register Rb ); ++ void stl_u ( Register Ra, int mdisp, Register Rb ); ++ void pri_st ( Register Ra, int ev6hwdisp, Register Rb ); ++ void fsts ( FloatRegister fa, int mdisp, Register Rb ); ++ void fstd ( FloatRegister fa, int mdisp, Register Rb ); ++ ++ // Branch Instructions. ++ void beq ( Register Ra, int bdisp ); ++ void bne ( Register Ra, int bdisp ); ++ void blt ( Register Ra, int bdisp ); ++ void ble ( Register Ra, int bdisp ); ++ void bgt ( Register Ra, int bdisp ); ++ void bge ( Register Ra, int bdisp ); ++ void blbc ( Register Ra, int bdisp ); ++ void blbs ( Register Ra, int bdisp ); ++ void fbeq ( FloatRegister fa, int bdisp ); ++ void fbne ( FloatRegister fa, int bdisp ); ++ void fblt ( FloatRegister fa, int bdisp ); ++ void fble ( FloatRegister fa, int bdisp ); ++ void fbgt ( FloatRegister fa, int bdisp ); ++ void fbge ( FloatRegister fa, int bdisp ); ++ void ldi ( Register Ra, int mdisp, Register Rb ); ++ void ldih ( Register Ra, int mdisp, Register Rb ); ++ ++ // cache control instruction ++ void s_fillcs ( int mdisp, Register Rb ); ++ void s_fillde ( int mdisp, Register Rb ); ++ void fillde ( int mdisp, Register Rb ); ++ void fillde_e ( int mdisp, Register Rb ); ++ void fillcs ( int mdisp, Register Rb ); ++ void fillcs_e ( int mdisp, Register Rb ); ++ void e_fillcs ( int mdisp, Register Rb ); ++ void e_fillde ( int mdisp, Register Rb ); ++ void flushd ( int mdisp, Register Rb ); ++ void evictdl ( int mdisp, Register Rb ); ++ void evictdg ( int mdisp, Register Rb ); ++ ++#ifdef SW64 ++ // ------------Memory-instructions------------- ++ void ldb ( Register Ra, const MemOperand& rs ); // sw add ++ void ldbu ( Register Ra, const MemOperand& rs ); ++ void ldh ( Register Ra, const MemOperand& rs ); // sw add ++ void ldhu ( Register Ra, const MemOperand& rs ); ++ void ldw ( Register Ra, const MemOperand& rs ); ++ void ldwu ( Register Ra, const MemOperand& rs ); // sw add ++ void ldl ( Register Ra, const MemOperand& rs ); ++ void flds ( FloatRegister fa, const MemOperand& rs ); ++ void fldd ( FloatRegister fa, const MemOperand& rs ); ++ ++ void stb ( Register Ra, const MemOperand& rs ); ++ void sth ( Register Ra, const MemOperand& rs ); ++ void stw ( Register Ra, const MemOperand& rs ); ++ void stl ( Register Ra, const MemOperand& rs ); ++ void fsts ( FloatRegister fa, const MemOperand& rs ); ++ void fstd ( FloatRegister fa, const MemOperand& rs ); ++ ++ void beq(Register Ra, Label* L) { ++ beq(Ra, branch_offset(L) >> 2); ++ } ++ void bne(Register Ra, Label* L) { ++ bne(Ra, branch_offset(L) >> 2); ++ } ++ void blt(Register Ra, Label* L) { ++ blt(Ra, branch_offset(L) >> 2); ++ } ++ void ble(Register Ra, Label* L) { ++ ble(Ra, branch_offset(L) >> 2); ++ } ++ void bgt(Register Ra, Label* L) { ++ bgt(Ra, branch_offset(L) >> 2); ++ } ++ void bge(Register Ra, Label* L) { ++ bge(Ra, branch_offset(L) >> 2); ++ } ++ ++ void fbr(Label* L) { ++ fbeq(f31, branch_offset(L) >> 2); ++ } ++ void fbeq(FloatRegister fa, Label* L) { ++ fbeq(fa, branch_offset(L) >> 2); ++ } ++ void fbne(FloatRegister fa, Label* L) { ++ fbne(fa, branch_offset(L) >> 2); ++ } ++ void fblt(FloatRegister fa, Label* L) { ++ fblt(fa, branch_offset(L) >> 2); ++ } ++ void fble(FloatRegister fa, Label* L) { ++ fble(fa, branch_offset(L) >> 2); ++ } ++ void fbgt(FloatRegister fa, Label* L) { ++ fbgt(fa, branch_offset(L) >> 2); ++ } ++ void fbge(FloatRegister fa, Label* L) { ++ fbge(fa, branch_offset(L) >> 2); ++ } ++ void fmov(FPURegister rs, FPURegister rd) { ++ fcpys(rs, rs, rd); ++ } ++#endif ++#endif ++ ++ // Check the code size generated from label to here. ++ int SizeOfCodeGeneratedSince(Label* label) { ++ return pc_offset() - label->pos(); ++ } ++ ++ // Check the number of instructions generated from label to here. ++ int InstructionsGeneratedSince(Label* label) { ++ return SizeOfCodeGeneratedSince(label) / kInstrSize; ++ } ++ ++ // Class for scoping postponing the trampoline pool generation. ++ class BlockTrampolinePoolScope { ++ public: ++ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { ++ assem_->StartBlockTrampolinePool(); ++ } ++ ~BlockTrampolinePoolScope() { ++ assem_->EndBlockTrampolinePool(); ++ } ++ ++ private: ++ Assembler* assem_; ++ ++ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); ++ }; ++ ++ // Class for postponing the assembly buffer growth. Typically used for ++ // sequences of instructions that must be emitted as a unit, before ++ // buffer growth (and relocation) can occur. ++ // This blocking scope is not nestable. ++ class BlockGrowBufferScope { ++ public: ++ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { ++ assem_->StartBlockGrowBuffer(); ++ } ++ ~BlockGrowBufferScope() { ++ assem_->EndBlockGrowBuffer(); ++ } ++ ++ private: ++ Assembler* assem_; ++ ++ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); ++ }; ++ ++ // Record a deoptimization reason that can be used by a log or cpu profiler. ++ // Use --trace-deopt to enable. ++ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position, ++ int id); ++ ++ static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, ++ intptr_t pc_delta); ++ ++ // Writes a single byte or word of data in the code stream. Used for ++ // inline tables, e.g., jump-tables. ++ void db(uint8_t data); ++ void dd(uint32_t data); ++ void dq(uint64_t data); ++ void dp(uintptr_t data) { dq(data); } ++ void dd(Label* label); ++ ++ // Postpone the generation of the trampoline pool for the specified number of ++ // instructions. ++ void BlockTrampolinePoolFor(int instructions); ++ ++ // Check if there is less than kGap bytes available in the buffer. ++ // If this is the case, we need to grow the buffer before emitting ++ // an instruction or relocation information. ++ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } ++ ++ // Get the number of bytes available in the buffer. ++ inline intptr_t available_space() const { ++ return reloc_info_writer.pos() - pc_; ++ } ++ ++ // Read/patch instructions. ++ static Instr instr_at(Address pc) { return *reinterpret_cast(pc); } ++ static void instr_at_put(Address pc, Instr instr) { ++ *reinterpret_cast(pc) = instr; ++ } ++ Instr instr_at(int pos) { ++ return *reinterpret_cast(buffer_start_ + pos); ++ } ++ void instr_at_put(int pos, Instr instr) { ++ *reinterpret_cast(buffer_start_ + pos) = instr; ++ } ++ ++ // Check if an instruction is a branch of some kind. ++#ifdef SW64 ++ static bool IsLdih(Instr instr); ++ static bool IsLdi(Instr instr); ++#endif ++ static bool IsBranch(Instr instr); ++ ++ static bool IsBeq(Instr instr); ++ static bool IsBne(Instr instr); ++ ++ static uint32_t GetLabelConst(Instr instr); ++ ++#ifdef SW64 ++ static uint32_t GetSwRa(Instr instr); ++ static uint32_t GetSwRb(Instr instr); ++ static uint32_t GetSwRc(Instr instr); ++ ++ static int32_t GetSwOpcodeField(Instr instr); ++ static int32_t GetSwOpcodeAndFunctionField(Instr instr); ++ static uint32_t GetSwImmediate8(Instr instr); ++ static uint32_t GetSwImmediate16(Instr instr); ++#endif ++ ++ static bool IsAddImmediate(Instr instr); ++ ++ static bool IsAndImmediate(Instr instr); ++ static bool IsEmittedConstant(Instr instr); ++ ++ void CheckTrampolinePool(); ++ ++ bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; } ++ static bool IsCompactBranchSupported() { return kArchVariant == kSw64r3; } ++ ++ inline int UnboundLabelsCount() { return unbound_labels_count_; } ++ ++ protected: ++ // Readable constants for base and offset adjustment helper, these indicate if ++ // aside from offset, another value like offset + 4 should fit into int16. ++ enum class OffsetAccessType : bool { ++ SINGLE_ACCESS = false, ++ TWO_ACCESSES = true ++ }; ++ ++ // Helper function for memory load/store using base register and offset. ++ void AdjustBaseAndOffset( ++ MemOperand* src, ++ OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, ++ int second_access_add_to_offset = 4); ++ ++ inline static void set_target_internal_reference_encoded_at(Address pc, ++ Address target); ++ ++ int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; } ++ ++ // Decode branch instruction at pos and return branch target pos. ++ int target_at(int pos, bool is_internal); ++ ++ // Patch branch instruction at pos to branch to given branch target pos. ++ void target_at_put(int pos, int target_pos, bool is_internal); ++ ++ // Say if we need to relocate with this mode. ++ bool MustUseReg(RelocInfo::Mode rmode); ++ ++ // Record reloc info for current pc_. ++ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); ++ ++ // Block the emission of the trampoline pool before pc_offset. ++ void BlockTrampolinePoolBefore(int pc_offset) { ++ if (no_trampoline_pool_before_ < pc_offset) ++ no_trampoline_pool_before_ = pc_offset; ++ } ++ ++ void StartBlockTrampolinePool() { ++ trampoline_pool_blocked_nesting_++; ++ } ++ ++ void EndBlockTrampolinePool() { ++ trampoline_pool_blocked_nesting_--; ++ if (trampoline_pool_blocked_nesting_ == 0) { ++ CheckTrampolinePoolQuick(1); ++ } ++ } ++ ++ bool is_trampoline_pool_blocked() const { ++ return trampoline_pool_blocked_nesting_ > 0; ++ } ++ ++ bool has_exception() const { ++ return internal_trampoline_exception_; ++ } ++ ++ bool is_trampoline_emitted() const { ++ return trampoline_emitted_; ++ } ++ ++ // Temporarily block automatic assembly buffer growth. ++ void StartBlockGrowBuffer() { ++ DCHECK(!block_buffer_growth_); ++ block_buffer_growth_ = true; ++ } ++ ++ void EndBlockGrowBuffer() { ++ DCHECK(block_buffer_growth_); ++ block_buffer_growth_ = false; ++ } ++ ++ bool is_buffer_growth_blocked() const { ++ return block_buffer_growth_; ++ } ++ ++ void EmitForbiddenSlotInstruction() { ++ if (IsPrevInstrCompactBranch()) { ++ nop(); ++ } ++ } ++ ++ void CheckTrampolinePoolQuick(int extra_instructions = 0) { ++ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { ++ CheckTrampolinePool(); ++ } ++ } ++ ++ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; } ++ ++ private: ++ // Avoid overflows for displacements etc. ++ static const int kMaximalBufferSize = 512 * MB; ++ ++ // Buffer size and constant pool distance are checked together at regular ++ // intervals of kBufferCheckInterval emitted bytes. ++ static constexpr int kBufferCheckInterval = 1 * KB / 2; ++ ++ // Code generation. ++ // The relocation writer's position is at least kGap bytes below the end of ++ // the generated instructions. This is so that multi-instruction sequences do ++ // not have to check for overflow. The same is true for writes of large ++ // relocation info entries. ++ static constexpr int kGap = 64; ++ STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap); ++ ++ // Repeated checking whether the trampoline pool should be emitted is rather ++ // expensive. By default we only check again once a number of instructions ++ // has been generated. ++ static constexpr int kCheckConstIntervalInst = 32; ++ static constexpr int kCheckConstInterval = ++ kCheckConstIntervalInst * kInstrSize; ++ ++ int next_buffer_check_; // pc offset of next buffer check. ++ ++ // Emission of the trampoline pool may be blocked in some code sequences. ++ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. ++ int no_trampoline_pool_before_; // Block emission before this pc offset. ++ ++ // Keep track of the last emitted pool to guarantee a maximal distance. ++ int last_trampoline_pool_end_; // pc offset of the end of the last pool. ++ ++ // Automatic growth of the assembly buffer may be blocked for some sequences. ++ bool block_buffer_growth_; // Block growth when true. ++ ++ // Relocation information generation. ++ // Each relocation is encoded as a variable size value. ++ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; ++ RelocInfoWriter reloc_info_writer; ++ ++ // The bound position, before this we cannot do instruction elimination. ++ int last_bound_pos_; ++ ++ // Readable constants for compact branch handling in emit() ++ enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true }; ++ ++ // Code emission. ++ inline void CheckBuffer(); ++ void GrowBuffer(); ++ inline void emit(Instr x, ++ CompactBranchType is_compact_branch = CompactBranchType::NO); ++ inline void emit(uint64_t x); ++ inline void CheckForEmitInForbiddenSlot(); ++ template ++ inline void EmitHelper(T x); ++ inline void EmitHelper(Instr x, CompactBranchType is_compact_branch); ++ ++#ifdef SW64 ++ inline void emitSW(Instr x); ++ inline void emitSW(uint64_t x); ++#endif ++ ++ // Instruction generation. ++ // We have 3 different kind of encoding layout on SW64. ++ // However due to many different types of objects encoded in the same fields ++ // we have quite a few aliases for each mode. ++ // Using the same structure to refer to Register and FPURegister would spare a ++ // few aliases, but mixing both does not look clean to me. ++ // Anyway we could surely implement this differently. ++ ++ inline bool is_valid_msa_df_m(SecondaryField bit_df, uint32_t m) { ++ switch (bit_df) { ++ case BIT_DF_b: ++ return is_uint3(m); ++ case BIT_DF_h: ++ return is_uint4(m); ++ case BIT_DF_w: ++ return is_uint5(m); ++ case BIT_DF_d: ++ return is_uint6(m); ++ default: ++ return false; ++ } ++ } ++ ++ inline bool is_valid_msa_df_n(SecondaryField elm_df, uint32_t n) { ++ switch (elm_df) { ++ case ELM_DF_B: ++ return is_uint4(n); ++ case ELM_DF_H: ++ return is_uint3(n); ++ case ELM_DF_W: ++ return is_uint2(n); ++ case ELM_DF_D: ++ return is_uint1(n); ++ default: ++ return false; ++ } ++ } ++ ++#ifdef SW64 ++ void GenInstrB_SW(Opcode_ops_bra opcode, ++ Register Ra, ++ int32_t disp); ++ ++ void GenInstrFB_SW(Opcode_ops_bra opcode, ++ FloatRegister fa, ++ int32_t disp); ++ ++ void GenInstrM_SW(Opcode_ops_mem opcode, ++ Register Ra, ++ int16_t disp, ++ Register Rb); ++ ++ void GenInstrFM_SW(Opcode_ops_mem opcode, ++ FloatRegister fa, ++ int16_t disp, ++ Register Rb); ++ ++ void GenInstrMWithFun_SW(Opcode_ops_atmem opcode, ++ Register Ra, ++ int16_t disp, ++ Register Rb); ++ ++ void GenInstrR_SW(Opcode_ops_opr opcode, ++ Register Ra, ++ Register Rb, ++ Register Rc); ++ ++ void GenInstrI_SW(Opcode_ops_oprl opcode, ++ Register Ra, ++ int16_t imm, ++ Register Rc); ++ ++ void GenInstrFR_SW(Opcode_ops_fp opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ FloatRegister fc); ++ ++ void GenInstrFR_SW(Opcode_ops_fp opcode, ++ FloatRegister fb, ++ FloatRegister fc); ++ ++ void GenInstrFR_SW(Opcode_ops_fpl opcode, ++ FloatRegister fa, ++ int16_t imm, ++ FloatRegister fc); ++ ++ void GenInstrFR_SW(Opcode_ops_fpl opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ int16_t fmalit, ++ FloatRegister fc); ++ ++ void GenInstrFMA_SW(Opcode_ops_fmal opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ int16_t fmalit, ++ FloatRegister fc); ++ ++ void GenInstrFMA_SW(Opcode_ops_fmal opcode, ++ FloatRegister fa, ++ int16_t fmalit, ++ FloatRegister fc); ++ ++ void GenInstrFMA_SW(Opcode_ops_fma opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ FloatRegister f3, ++ FloatRegister fc); ++ ++ void GenInstrFMA_SW(Opcode_ops_fma opcode, ++ FloatRegister fa, ++ FloatRegister fb, ++ FloatRegister fc); ++ ++ void GenInstrSIMD_SW(Opcode_ops_atmem opcode, ++ FloatRegister fa, ++ int16_t atmdisp, ++ Register Rb ); ++ ++ void GenInstrSelR_SW(Opcode_ops_sel opcode, ++ Register Ra, ++ Register Rb, ++ Register R3, ++ Register Rc); ++ ++ void GenInstrSelI_SW(Opcode_ops_sel_l opcode, ++ Register Ra, ++ int32_t imm, ++ Register R3, ++ Register Rc); ++ ++ // Helpers. ++ void SwLoadRegPlusOffsetToAt(const MemOperand& src); ++#endif ++ ++ // Labels. ++ void print(const Label* L); ++ void bind_to(Label* L, int pos); ++ void next(Label* L, bool is_internal); ++ ++ // One trampoline consists of: ++ // - space for trampoline slots, ++ // - space for labels. ++ // ++ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. ++ // Space for trampoline slots precedes space for labels. Each label is of one ++ // instruction size, so total amount for labels is equal to ++ // label_count * kInstrSize. ++ class Trampoline { ++ public: ++ Trampoline() { ++ start_ = 0; ++ next_slot_ = 0; ++ free_slot_count_ = 0; ++ end_ = 0; ++ } ++ Trampoline(int start, int slot_count) { ++ start_ = start; ++ next_slot_ = start; ++ free_slot_count_ = slot_count; ++ end_ = start + slot_count * kTrampolineSlotsSize; ++ } ++ int start() { ++ return start_; ++ } ++ int end() { ++ return end_; ++ } ++ int take_slot() { ++ int trampoline_slot = kInvalidSlotPos; ++ if (free_slot_count_ <= 0) { ++ // We have run out of space on trampolines. ++ // Make sure we fail in debug mode, so we become aware of each case ++ // when this happens. ++ DCHECK(0); ++ // Internal exception will be caught. ++ } else { ++ trampoline_slot = next_slot_; ++ free_slot_count_--; ++ next_slot_ += kTrampolineSlotsSize; ++ } ++ return trampoline_slot; ++ } ++ ++ private: ++ int start_; ++ int end_; ++ int next_slot_; ++ int free_slot_count_; ++ }; ++ ++ int32_t get_trampoline_entry(int32_t pos); ++ int unbound_labels_count_; ++ // After trampoline is emitted, long branches are used in generated code for ++ // the forward branches whose target offsets could be beyond reach of branch ++ // instruction. We use this information to trigger different mode of ++ // branch instruction generation, where we use jump instructions rather ++ // than regular branch instructions. ++ bool trampoline_emitted_; ++ static constexpr int kInvalidSlotPos = -1; ++ ++ // Internal reference positions, required for unbounded internal reference ++ // labels. ++ std::set internal_reference_positions_; ++ bool is_internal_reference(Label* L) { ++ return internal_reference_positions_.find(L->pos()) != ++ internal_reference_positions_.end(); ++ } ++ ++ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } ++ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } ++ bool prev_instr_compact_branch_ = false; ++ ++ Trampoline trampoline_; ++ bool internal_trampoline_exception_; ++ ++ // Keep track of the last Call's position to ensure that safepoint can get the ++ // correct information even if there is a trampoline immediately after the ++ // Call. ++ byte* last_call_pc_; ++ ++ RegList scratch_register_list_; ++ ++ private: ++ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); ++ ++ int WriteCodeComments(); ++ ++ friend class RegExpMacroAssemblerSW64; ++ friend class RelocInfo; ++ friend class BlockTrampolinePoolScope; ++ friend class EnsureSpace; ++}; ++ ++ ++class EnsureSpace { ++ public: ++ explicit inline EnsureSpace(Assembler* assembler); ++}; ++ ++class V8_EXPORT_PRIVATE UseScratchRegisterScope { ++ public: ++ explicit UseScratchRegisterScope(Assembler* assembler); ++ ~UseScratchRegisterScope(); ++ ++ Register Acquire(); ++ bool hasAvailable() const; ++ ++ private: ++ RegList* available_; ++ RegList old_available_; ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_SW64_ASSEMBLER_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/constants-sw64.cc b/src/3rdparty/chromium/v8/src/codegen/sw64/constants-sw64.cc +new file mode 100755 +index 000000000..76c5be76f +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/constants-sw64.cc +@@ -0,0 +1,160 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/codegen/sw64/constants-sw64.h" ++ ++namespace v8 { ++namespace internal { ++ ++ ++// ----------------------------------------------------------------------------- ++// Registers. ++ ++ ++// These register names are defined in a way to match the native disassembler ++// formatting. See for example the command "objdump -d ". ++const char* Registers::names_[kNumRegisters] = { ++ "v0", ++ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", ++ "s0", "s1", "s2", "s3", "s4", "s5", "fp", ++ "a0", "a1", "a2", "a3", "a4", "a5", ++ "t8", "t9", "t10", "t11", ++ "ra", ++ "t12", ++ "at", ++ "gp", ++ "sp", ++ "zero_reg" ++}; ++ ++ ++// List of alias names which can be used when referring to SW64 registers. ++const Registers::RegisterAlias Registers::aliases_[] = { ++ {14, "cp"}, ++ {15, "s6"}, ++ {15, "s6_fp"}, ++ {31, "zero_reg"}, ++ {kInvalidRegister, nullptr}}; ++ ++const char* Registers::Name(int reg) { ++ const char* result; ++ if ((0 <= reg) && (reg < kNumRegisters)) { ++ result = names_[reg]; ++ } else { ++ result = "noreg"; ++ } ++ return result; ++} ++ ++ ++int Registers::Number(const char* name) { ++ // Look through the canonical names. ++ for (int i = 0; i < kNumRegisters; i++) { ++ if (strcmp(names_[i], name) == 0) { ++ return i; ++ } ++ } ++ ++ // Look through the alias names. ++ int i = 0; ++ while (aliases_[i].reg != kInvalidRegister) { ++ if (strcmp(aliases_[i].name, name) == 0) { ++ return aliases_[i].reg; ++ } ++ i++; ++ } ++ ++ // No register with the reguested name found. ++ return kInvalidRegister; ++} ++ ++ ++const char* FPURegisters::names_[kNumFPURegisters] = { ++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", ++ "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", ++ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" ++}; ++ ++ ++// List of alias names which can be used when referring to SW64 registers. ++const FPURegisters::RegisterAlias FPURegisters::aliases_[] = { ++ {kInvalidRegister, nullptr}}; ++ ++const char* FPURegisters::Name(int creg) { ++ const char* result; ++ if ((0 <= creg) && (creg < kNumFPURegisters)) { ++ result = names_[creg]; ++ } else { ++ result = "nocreg"; ++ } ++ return result; ++} ++ ++ ++int FPURegisters::Number(const char* name) { ++ // Look through the canonical names. ++ for (int i = 0; i < kNumFPURegisters; i++) { ++ if (strcmp(names_[i], name) == 0) { ++ return i; ++ } ++ } ++ ++ // Look through the alias names. ++ int i = 0; ++ while (aliases_[i].creg != kInvalidRegister) { ++ if (strcmp(aliases_[i].name, name) == 0) { ++ return aliases_[i].creg; ++ } ++ i++; ++ } ++ ++ // No Cregister with the reguested name found. ++ return kInvalidFPURegister; ++} ++ ++const char* MSARegisters::names_[kNumMSARegisters] = { ++ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", ++ "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", ++ "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"}; ++ ++const MSARegisters::RegisterAlias MSARegisters::aliases_[] = { ++ {kInvalidRegister, nullptr}}; ++ ++const char* MSARegisters::Name(int creg) { ++ const char* result; ++ if ((0 <= creg) && (creg < kNumMSARegisters)) { ++ result = names_[creg]; ++ } else { ++ result = "nocreg"; ++ } ++ return result; ++} ++ ++int MSARegisters::Number(const char* name) { ++ // Look through the canonical names. ++ for (int i = 0; i < kNumMSARegisters; i++) { ++ if (strcmp(names_[i], name) == 0) { ++ return i; ++ } ++ } ++ ++ // Look through the alias names. ++ int i = 0; ++ while (aliases_[i].creg != kInvalidRegister) { ++ if (strcmp(aliases_[i].name, name) == 0) { ++ return aliases_[i].creg; ++ } ++ i++; ++ } ++ ++ // No Cregister with the reguested name found. ++ return kInvalidMSARegister; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/constants-sw64.h b/src/3rdparty/chromium/v8/src/codegen/sw64/constants-sw64.h +new file mode 100755 +index 000000000..7bfe7ea95 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/constants-sw64.h +@@ -0,0 +1,2756 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_CODEGEN_SW64_CONSTANTS_SW64_H_ ++#define V8_CODEGEN_SW64_CONSTANTS_SW64_H_ ++ ++#include "src/base/logging.h" ++#include "src/base/macros.h" ++#include "src/common/globals.h" ++ ++// UNIMPLEMENTED_ macro for SW64. ++#ifdef DEBUG ++#define UNIMPLEMENTED_SW64() \ ++ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \ ++ __FILE__, __LINE__, __func__) ++#else ++#define UNIMPLEMENTED_SW64() ++#endif ++ ++#define UNSUPPORTED_SW64() v8::internal::PrintF("Unsupported instruction.\n") ++ ++enum ArchVariants { ++ kSw64r2, ++ kSw64r3 ++}; ++ ++ ++#ifdef _SW64_ARCH_SW64R2 ++ static const ArchVariants kArchVariant = kSw64r2; ++#elif _SW64_ARCH_SW64R3 ++ static const ArchVariants kArchVariant = kSw64r3; ++#else ++ static const ArchVariants kArchVariant = kSw64r2; ++#endif ++ ++ ++ enum Endianness { kLittle, kBig }; ++ ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++ static const Endianness kArchEndian = kLittle; ++#elif defined(V8_TARGET_BIG_ENDIAN) ++ static const Endianness kArchEndian = kBig; ++#else ++#error Unknown endianness ++#endif ++ ++// TODO(plind): consider renaming these ... ++#if(defined(__sw64_hard_float) && __sw64_hard_float != 0) ++// Use floating-point coprocessor instructions. This flag is raised when ++// -mhard-float is passed to the compiler. ++const bool IsSw64SoftFloatABI = false; ++#elif(defined(__sw64_soft_float) && __sw64_soft_float != 0) ++// This flag is raised when -msoft-float is passed to the compiler. ++// Although FPU is a base requirement for v8, soft-float ABI is used ++// on soft-float systems with FPU kernel emulation. ++const bool IsSw64SoftFloatABI = true; ++#else ++const bool IsSw64SoftFloatABI = true; ++#endif ++ ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++const uint32_t kSwLwrOffset = 0; ++const uint32_t kSwLwlOffset = 3; ++const uint32_t kSwSwrOffset = 0; ++const uint32_t kSwSwlOffset = 3; ++const uint32_t kSwLdrOffset = 0; ++const uint32_t kSwLdlOffset = 7; ++const uint32_t kSwSdrOffset = 0; ++const uint32_t kSwSdlOffset = 7; ++#else ++#error Unknown endianness ++#endif ++ ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++const uint32_t kLeastSignificantByteInInt32Offset = 0; ++const uint32_t kLessSignificantWordInDoublewordOffset = 0; ++#else ++#error Unknown endianness ++#endif ++ ++#ifndef __STDC_FORMAT_MACROS ++#define __STDC_FORMAT_MACROS ++#endif ++#include ++ ++ ++// Defines constants and accessor classes to assemble, disassemble ++// ++ ++namespace v8 { ++namespace internal { ++ ++// TODO(sigurds): Change this value once we use relative jumps. ++constexpr size_t kMaxPCRelativeCodeRangeInMB = 0; ++ ++// ----------------------------------------------------------------------------- ++// Registers and FPURegisters. ++ ++// Number of general purpose registers. ++const int kNumRegisters = 32; ++const int kInvalidRegister = -1; ++ ++// Number of registers with HI, LO, and pc. ++const int kNumSimuRegisters = 35; ++ ++// In the simulator, the PC register is simulated as the 34th register. ++const int kPCRegister = 34; ++ ++// Number coprocessor registers. ++const int kNumFPURegisters = 32; ++const int kInvalidFPURegister = -1; ++ ++// Number of MSA registers ++const int kNumMSARegisters = 32; ++const int kInvalidMSARegister = -1; ++ ++const int kInvalidMSAControlRegister = -1; ++const int kMSAIRRegister = 0; ++const int kMSACSRRegister = 1; ++const int kMSARegSize = 128; ++const int kMSALanesByte = kMSARegSize / 8; ++const int kMSALanesHalf = kMSARegSize / 16; ++const int kMSALanesWord = kMSARegSize / 32; ++const int kMSALanesDword = kMSARegSize / 64; ++ ++// FPU (coprocessor 1) control registers. Currently only FCSR is implemented. ++const int kFCSRRegister = 31; ++const int kInvalidFPUControlRegister = -1; ++const uint32_t kFPUInvalidResult = static_cast(1u << 31) - 1; ++const int32_t kFPUInvalidResultNegative = static_cast(1u << 31); ++const uint64_t kFPU64InvalidResult = ++ static_cast(static_cast(1) << 63) - 1; ++const int64_t kFPU64InvalidResultNegative = ++ static_cast(static_cast(1) << 63); ++ ++// FCSR constants. ++const uint32_t kFCSRInexactFlagBit = 2; ++const uint32_t kFCSRUnderflowFlagBit = 3; ++const uint32_t kFCSROverflowFlagBit = 4; ++const uint32_t kFCSRDivideByZeroFlagBit = 5; ++const uint32_t kFCSRInvalidOpFlagBit = 6; ++const uint32_t kFCSRNaN2008FlagBit = 18; ++ ++const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit; ++const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit; ++const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit; ++const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit; ++const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit; ++const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit; ++ ++const uint32_t kFCSRFlagMask = ++ kFCSRInexactFlagMask | ++ kFCSRUnderflowFlagMask | ++ kFCSROverflowFlagMask | ++ kFCSRDivideByZeroFlagMask | ++ kFCSRInvalidOpFlagMask; ++ ++const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask; ++ ++#ifdef SW64 ++// SW64 FCSR constants. ++const uint64_t sFCSROverflowIntegerFlagBit = 57; ++const uint64_t sFCSRInexactFlagBit = 56; ++const uint64_t sFCSRUnderflowFlagBit = 55; ++const uint64_t sFCSROverflowFlagBit = 54; ++const uint64_t sFCSRDivideByZeroFlagBit = 53; ++const uint64_t sFCSRInvalidOpFlagBit = 52; ++ ++const uint64_t sFCSRInexactControlBit = 62; ++const uint64_t sFCSRUnderflowControlBit = 61; ++const uint64_t sFCSROverflowControlBit = 51; ++const uint64_t sFCSRDivideByZeroControlBit = 50; ++const uint64_t sFCSRInvalidOpControlBit = 49; ++ ++const uint64_t sFCSRRound0Bit = 58; ++const uint64_t sFCSRRound1Bit = 59; ++ ++const uint64_t sFCSRControlMask = (0x1UL << sFCSRInexactControlBit) | ++ (0x1UL << sFCSRUnderflowControlBit) | ++ (0x1UL << sFCSROverflowControlBit) | ++ (0x1UL << sFCSRDivideByZeroControlBit)| ++ (0x1UL << sFCSRInvalidOpControlBit); ++ ++const uint64_t sFCSRRound0Mask = 0x1UL << sFCSRRound0Bit; ++const uint64_t sFCSRRound1Mask = 0x1UL << sFCSRRound1Bit; ++ ++const uint64_t sFCSROverflowIntegerFlagMask = 0x1UL << sFCSROverflowIntegerFlagBit; ++const uint64_t sFCSRInexactFlagMask = 0x1UL << sFCSRInexactFlagBit; ++const uint64_t sFCSRUnderflowFlagMask = 0x1UL << sFCSRUnderflowFlagBit; ++const uint64_t sFCSROverflowFlagMask = 0x1UL << sFCSROverflowFlagBit; ++const uint64_t sFCSRDivideByZeroFlagMask = 0x1UL << sFCSRDivideByZeroFlagBit; ++const uint64_t sFCSRInvalidOpFlagMask = 0x1UL << sFCSRInvalidOpFlagBit; ++ ++const uint64_t sFCSRFlagMask = ++ sFCSROverflowIntegerFlagMask | ++ sFCSRInexactFlagMask | ++ sFCSRUnderflowFlagMask | ++ sFCSROverflowFlagMask | ++ sFCSRDivideByZeroFlagMask | ++ sFCSRInvalidOpFlagMask; ++ ++const uint64_t sFCSRExceptionFlagMask = sFCSRFlagMask ^ sFCSRInexactFlagMask; ++#endif ++ ++// 'pref' instruction hints ++const int32_t kPrefHintLoad = 0; ++const int32_t kPrefHintStore = 1; ++const int32_t kPrefHintLoadStreamed = 4; ++const int32_t kPrefHintStoreStreamed = 5; ++const int32_t kPrefHintLoadRetained = 6; ++const int32_t kPrefHintStoreRetained = 7; ++const int32_t kPrefHintWritebackInvalidate = 25; ++const int32_t kPrefHintPrepareForStore = 30; ++ ++// Actual value of root register is offset from the root array's start ++// to take advantage of negative displacement values. ++// TODO(sigurds): Choose best value. ++constexpr int kRootRegisterBias = 256; ++ ++// Helper functions for converting between register numbers and names. ++class Registers { ++ public: ++ // Return the name of the register. ++ static const char* Name(int reg); ++ ++ // Lookup the register number for the name provided. ++ static int Number(const char* name); ++ ++ struct RegisterAlias { ++ int reg; ++ const char* name; ++ }; ++ ++ static const int64_t kMaxValue = 0x7fffffffffffffffl; ++ static const int64_t kMinValue = 0x8000000000000000l; ++ ++ private: ++ static const char* names_[kNumRegisters]; ++ static const RegisterAlias aliases_[]; ++}; ++ ++// Helper functions for converting between register numbers and names. ++class FPURegisters { ++ public: ++ // Return the name of the register. ++ static const char* Name(int reg); ++ ++ // Lookup the register number for the name provided. ++ static int Number(const char* name); ++ ++ struct RegisterAlias { ++ int creg; ++ const char* name; ++ }; ++ ++ private: ++ static const char* names_[kNumFPURegisters]; ++ static const RegisterAlias aliases_[]; ++}; ++ ++// Helper functions for converting between register numbers and names. ++class MSARegisters { ++ public: ++ // Return the name of the register. ++ static const char* Name(int reg); ++ ++ // Lookup the register number for the name provided. ++ static int Number(const char* name); ++ ++ struct RegisterAlias { ++ int creg; ++ const char* name; ++ }; ++ ++ private: ++ static const char* names_[kNumMSARegisters]; ++ static const RegisterAlias aliases_[]; ++}; ++ ++// ----------------------------------------------------------------------------- ++// Instructions encoding constants. ++ ++// On SW64 all instructions are 32 bits. ++using Instr = int32_t; ++ ++// Special Software Interrupt codes when used in the presence of the SW64 ++// simulator. ++enum SoftwareInterruptCodes { ++ // Transition to C code. ++ call_rt_redirected = 0xfffff ++}; ++ ++// On SW64 Simulator breakpoints can have different codes: ++// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints, ++// the simulator will run through them and print the registers. ++// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop() ++// instructions (see Assembler::stop()). ++// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the ++// debugger. ++const uint32_t kMaxWatchpointCode = 31; ++const uint32_t kMaxStopCode = 127; ++STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode); ++ ++ ++// ----- Fields offset and length. ++const int kOpcodeShift = 26; ++const int kOpcodeBits = 6; ++const int kRsShift = 21; ++const int kRsBits = 5; ++const int kRtShift = 16; ++const int kRtBits = 5; ++const int kRdShift = 11; ++const int kRdBits = 5; ++const int kSaShift = 6; ++const int kSaBits = 5; ++const int kLsaSaBits = 2; ++const int kFunctionShift = 0; ++const int kFunctionBits = 6; ++const int kLuiShift = 16; ++const int kBp2Shift = 6; ++const int kBp2Bits = 2; ++const int kBp3Shift = 6; ++const int kBp3Bits = 3; ++const int kBaseShift = 21; ++const int kBaseBits = 5; ++const int kBit6Shift = 6; ++const int kBit6Bits = 1; ++ ++const int kImm9Shift = 7; ++const int kImm9Bits = 9; ++const int kImm16Shift = 0; ++const int kImm16Bits = 16; ++const int kImm18Shift = 0; ++const int kImm18Bits = 18; ++const int kImm19Shift = 0; ++const int kImm19Bits = 19; ++const int kImm21Shift = 0; ++const int kImm21Bits = 21; ++const int kImm26Shift = 0; ++const int kImm26Bits = 26; ++const int kImm28Shift = 0; ++const int kImm28Bits = 28; ++const int kImm32Shift = 0; ++const int kImm32Bits = 32; ++const int kMsaImm8Shift = 16; ++const int kMsaImm8Bits = 8; ++const int kMsaImm5Shift = 16; ++const int kMsaImm5Bits = 5; ++const int kMsaImm10Shift = 11; ++const int kMsaImm10Bits = 10; ++const int kMsaImmMI10Shift = 16; ++const int kMsaImmMI10Bits = 10; ++ ++// In branches and jumps immediate fields point to words, not bytes, ++// and are therefore shifted by 2. ++const int kImmFieldShift = 2; ++ ++const int kFrBits = 5; ++const int kFrShift = 21; ++const int kFsShift = 11; ++const int kFsBits = 5; ++const int kFtShift = 16; ++const int kFtBits = 5; ++const int kFdShift = 6; ++const int kFdBits = 5; ++const int kFCccShift = 8; ++const int kFCccBits = 3; ++const int kFBccShift = 18; ++const int kFBccBits = 3; ++const int kFBtrueShift = 16; ++const int kFBtrueBits = 1; ++const int kWtBits = 5; ++const int kWtShift = 16; ++const int kWsBits = 5; ++const int kWsShift = 11; ++const int kWdBits = 5; ++const int kWdShift = 6; ++ ++// ----- Miscellaneous useful masks. ++// Instruction bit masks. ++const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift; ++const int kImm9Mask = ((1 << kImm9Bits) - 1) << kImm9Shift; ++const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift; ++const int kImm18Mask = ((1 << kImm18Bits) - 1) << kImm18Shift; ++const int kImm19Mask = ((1 << kImm19Bits) - 1) << kImm19Shift; ++const int kImm21Mask = ((1 << kImm21Bits) - 1) << kImm21Shift; ++const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift; ++const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift; ++const int kImm5Mask = ((1 << 5) - 1); ++const int kImm8Mask = ((1 << 8) - 1); ++const int kImm10Mask = ((1 << 10) - 1); ++const int kMsaI5I10Mask = ((7U << 23) | ((1 << 6) - 1)); ++const int kMsaI8Mask = ((3U << 24) | ((1 << 6) - 1)); ++const int kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1)); ++const int kMsaMI10Mask = (15U << 2); ++const int kMsaBITMask = ((7U << 23) | ((1 << 6) - 1)); ++const int kMsaELMMask = (15U << 22); ++const int kMsaLongerELMMask = kMsaELMMask | (63U << 16); ++const int kMsa3RMask = ((7U << 23) | ((1 << 6) - 1)); ++const int kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1)); ++const int kMsaVECMask = (23U << 21); ++const int kMsa2RMask = (7U << 18); ++const int kMsa2RFMask = (15U << 17); ++const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift; ++const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift; ++const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift; ++const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift; ++const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift; ++// Misc masks. ++const int kHiMaskOf32 = 0xffff << 16; // Only to be used with 32-bit values ++const int kLoMaskOf32 = 0xffff; ++const int kSignMaskOf32 = 0x80000000; // Only to be used with 32-bit values ++const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1; ++const int64_t kTop16MaskOf64 = (int64_t)0xffff << 48; ++const int64_t kHigher16MaskOf64 = (int64_t)0xffff << 32; ++const int64_t kUpper16MaskOf64 = (int64_t)0xffff << 16; ++const int32_t kJalRawMark = 0x00000000; ++const int32_t kJRawMark = 0xf0000000; ++const int32_t kJumpRawMask = 0xf0000000; ++ ++#ifdef SW64 //20180904 ,use the opcode form in hotspot ++ ++// ----- Fields offset and length for sw64 ++const int sOpcodeShift = 26; //31-26 ++const int sOpcodeBits = 6; ++const int sRaShift = 21; //25-21 ++const int sRaBits = 5; ++const int sRbShift = 16; //20-16 ++const int sRbBits = 5; ++const int sFunctionShift = 5; //12- 5 ++const int sFunctionBits = 8; ++const int sRcShift = 0; // 4- 0 ++const int sRcBits = 5; ++const int sR3Shift = 5; // 9- 5 ++const int sR3Bits = 5; ++const int sRdShift = 0; //jzy 20150317 ++const int sRdBits = 5; ++ ++// ----- 21-bits disp(20-0) for SYS_CALL ++const int sImm21Shift = 0; ++const int sImm21Bits = 21; ++ ++// ----- 16-bits disp(15-0) for M ++const int sImm16Shift = 0; ++const int sImm16Bits = 16; ++ ++// ----- 12-bits disp(11-0) for MWithFun ++const int sImm12Shift = 0; ++const int sImm12Bits = 12; ++ ++// ----- 8-bits(20-13) imm for ALU_I & complex interger ALU ++const int sImm8Shift = 13; ++const int sImm8Bits = 8; ++ ++// ----- 5-bits(9-5) imm for complex-float ALU ++const int sImm5Shift = 5; ++const int sImm5Bits = 5; ++ ++//----- 13-bits(25-13) imm for ALU ++const int sImm13Shift = 13; ++const int sImm13Bits = 13; ++ ++//----- 11-bits disp(10-0) for SIMD ++const int sImm11Shift = 0; ++const int sImm11Bits = 11; ++ ++//----- 8-bits disp(7-0) for CSR ++const int RpiShift = 0; ++const int RpiBits = 8; ++ ++// Instruction bit masks. ++const int sOpcodeMask = ((1 << sOpcodeBits) - 1) << sOpcodeShift; ++const int sFunctionMask = ((1 << sFunctionBits) - 1) << sFunctionShift; ++const int sImm5Mask = ((1 << sImm5Bits) - 1) << sImm5Shift; ++const int sImm8Mask = ((1 << sImm8Bits) - 1) << sImm8Shift; ++const int sImm12Mask = ((1 << sImm12Bits) - 1) << sImm12Shift; ++const int sImm16Mask = ((1 << sImm16Bits) - 1) << sImm16Shift; ++const int sImm21Mask = ((1 << sImm21Bits) - 1) << sImm21Shift; ++const int sRaFieldMask = ((1 << sRaBits) - 1) << sRaShift; ++const int sRbFieldMask = ((1 << sRbBits) - 1) << sRbShift; ++const int sRcFieldMask = ((1 << sRcBits) - 1) << sRcShift; ++const int sR3FieldMask = ((1 << sR3Bits) - 1) << sR3Shift; ++const int sRdFieldMask = ((1 << sRdBits) - 1) << sRdShift; ++const int sImm13Mask = ((1 << sImm13Bits) - 1) << sImm13Shift; ++const int sImm11Mask = ((1 << sImm11Bits) - 1) << sImm11Shift; ++const int sRpiMask = ((1 << RpiBits) - 1) << RpiShift; ++ ++#define OP(x) (((x) & 0x3F) << 26) ++#define PCD(oo) (OP(oo)) ++#define OPMEM(oo) (OP(oo)) ++#define BRA(oo) (OP(oo)) ++ ++#define OFP(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) ++#define FMA(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10)) ++#define MFC(oo,ff) (OP(oo) | ((ff) & 0xFFFF)) ++#define MBR(oo,h) (OP(oo) | (((h) & 3) << 14)) ++#define OPR(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) ++#define OPRL(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) ++#define TOPR(oo,ff) (OP(oo) | (((ff) & 0x07) << 10)) ++#define TOPRL(oo,ff) (OP(oo) | (((ff) & 0x07) << 10)) ++ ++#define ATMEM(oo,h) (OP(oo) | (((h) & 0xF) << 12)) ++#define PRIRET(oo,h) (OP(oo) | (((h) & 0x1) << 20)) ++#define SPCD(oo,ff) (OP(oo) | ((ff) & 0x3FFFFFF)) ++#define EV6HWMEM(oo,ff) (OP(oo) | (((ff) & 0xF) << 12)) ++#define CSR(oo,ff) (OP(oo) | (((ff) & 0xFF) << 8)) ++ ++#define LOGX(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10)) ++#define PSE_LOGX(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10) | (((ff) >> 0x6) << 26 ) | 0x3E0 ) ++ ++enum OpcodeSW : uint32_t { ++}; ++ ++ ++enum Opcode_ops_mem { ++ op_call = OPMEM(0x01), ++ op_ret = OPMEM(0x02), ++ op_jmp = OPMEM(0x03), ++ op_ldwe = OPMEM(0x09), op_fillcs = op_ldwe, ++ op_ldse = OPMEM(0x0A), op_e_fillcs = op_ldse, ++ op_ldde = OPMEM(0x0B), op_fillcs_e = op_ldde, ++ op_vlds = OPMEM(0x0C), op_e_fillde = op_vlds, ++ op_vldd = OPMEM(0x0D), ++ op_vsts = OPMEM(0x0E), ++ op_vstd = OPMEM(0x0F), ++ op_ldbu = OPMEM(0x20), op_flushd = op_ldbu, ++ op_ldhu = OPMEM(0x21), op_evictdg = op_ldhu, ++ op_ldw = OPMEM(0x22), op_s_fillcs = op_ldw, ++ op_ldl = OPMEM(0x23), op_s_fillde = op_ldl, ++ op_ldl_u = OPMEM(0x24), op_evictdl = op_ldl_u, ++ op_flds = OPMEM(0x26), op_fillde = op_flds, ++ op_fldd = OPMEM(0x27), op_fillde_e = op_fldd, ++ op_stb = OPMEM(0x28), ++ op_sth = OPMEM(0x29), ++ op_stw = OPMEM(0x2A), ++ op_stl = OPMEM(0x2B), ++ op_stl_u = OPMEM(0x2C), ++ op_fsts = OPMEM(0x2E), ++ op_fstd = OPMEM(0x2F), ++ op_ldi = OPMEM(0x3E), ++ op_ldih = OPMEM(0x3F) ++// unop = OPMEM(0x3F) | (30 << 16), ++}; ++ ++ ++enum Opcode_ops_atmem { ++ op_lldw = ATMEM(0x08, 0x0), ++ op_lldl = ATMEM(0x08, 0x1), ++ op_ldw_inc = ATMEM(0x08, 0x2), //SW2F ++ op_ldl_inc = ATMEM(0x08, 0x3), //SW2F ++ op_ldw_dec = ATMEM(0x08, 0x4), //SW2F ++ op_ldl_dec = ATMEM(0x08, 0x5), //SW2F ++ op_ldw_set = ATMEM(0x08, 0x6), //SW2F ++ op_ldl_set = ATMEM(0x08, 0x7), //SW2F ++ op_lstw = ATMEM(0x08, 0x8), ++ op_lstl = ATMEM(0x08, 0x9), ++ op_ldw_nc = ATMEM(0x08, 0xA), ++ op_ldl_nc = ATMEM(0x08, 0xB), ++ op_ldd_nc = ATMEM(0x08, 0xC), ++ op_stw_nc = ATMEM(0x08, 0xD), ++ op_stl_nc = ATMEM(0x08, 0xE), ++ op_std_nc = ATMEM(0x08, 0xF), ++ op_vldw_u = ATMEM(0x1C, 0x0), ++ op_vstw_u = ATMEM(0x1C, 0x1), ++ op_vlds_u = ATMEM(0x1C, 0x2), ++ op_vsts_u = ATMEM(0x1C, 0x3), ++ op_vldd_u = ATMEM(0x1C, 0x4), ++ op_vstd_u = ATMEM(0x1C, 0x5), ++ op_vstw_ul = ATMEM(0x1C, 0x8), ++ op_vstw_uh = ATMEM(0x1C, 0x9), ++ op_vsts_ul = ATMEM(0x1C, 0xA), ++ op_vsts_uh = ATMEM(0x1C, 0xB), ++ op_vstd_ul = ATMEM(0x1C, 0xC), ++ op_vstd_uh = ATMEM(0x1C, 0xD), ++ op_vldd_nc = ATMEM(0x1C, 0xE), ++ op_vstd_nc = ATMEM(0x1C, 0xF), ++ op_ldbu_a = ATMEM(0x1E, 0x0), //SW6B ++ op_ldhu_a = ATMEM(0x1E, 0x1), //SW6B ++ op_ldw_a = ATMEM(0x1E, 0x2), //SW6B ++ op_ldl_a = ATMEM(0x1E, 0x3), //SW6B ++ op_flds_a = ATMEM(0x1E, 0x4), //SW6B ++ op_fldd_a = ATMEM(0x1E, 0x5), //SW6B ++ op_stb_a = ATMEM(0x1E, 0x6), //SW6B ++ op_sth_a = ATMEM(0x1E, 0x7), //SW6B ++ op_stw_a = ATMEM(0x1E, 0x8), //SW6B ++ op_stl_a = ATMEM(0x1E, 0x9), //SW6B ++ op_fsts_a = ATMEM(0x1E, 0xA), //SW6B ++ op_fstd_a = ATMEM(0x1E, 0xB) //SW6B ++}; ++ ++enum Opcode_ops_ev6hwmem { ++ op_pri_ld = EV6HWMEM(0x25, 0x0), ++ op_pri_st = EV6HWMEM(0x2D, 0x0), ++}; ++ ++enum Opcode_ops_opr { ++ op_addw = OPR(0x10, 0x00), ++ op_subw = OPR(0x10, 0x01), ++ op_s4addw = OPR(0x10, 0x02), ++ op_s4subw = OPR(0x10, 0x03), ++ op_s8addw = OPR(0x10, 0x04), ++ op_s8subw = OPR(0x10, 0x05), ++ op_addl = OPR(0x10, 0x08), ++ op_subl = OPR(0x10, 0x09), ++ op_s4addl = OPR(0x10, 0x0A), ++ op_s4subl = OPR(0x10, 0x0B), ++ op_s8addl = OPR(0x10, 0x0C), ++ op_s8subl = OPR(0x10, 0x0D), ++ op_mulw = OPR(0x10, 0x10), ++ op_divw = OPR(0x10, 0x11), //SW6B ++ op_udivw = OPR(0x10, 0x12), //SW6B ++ op_remw = OPR(0x10, 0x13), //SW6B ++ op_uremw = OPR(0x10, 0x14), //SW6B ++ op_mull = OPR(0x10, 0x18), ++ op_umulh = OPR(0x10, 0x19), ++ op_divl = OPR(0x10, 0x1A), //SW6B ++ op_udivl = OPR(0x10, 0x1B), //SW6B ++ op_reml = OPR(0x10, 0x1C), //SW6B ++ op_ureml = OPR(0x10, 0x1D), //SW6B ++ op_addpi = OPR(0x10, 0x1E), //SW6B ++ op_addpis = OPR(0x10, 0x1F), //SW6B ++ op_cmpeq = OPR(0x10, 0x28), ++ op_cmplt = OPR(0x10, 0x29), ++ op_cmple = OPR(0x10, 0x2A), ++ op_cmpult = OPR(0x10, 0x2B), ++ op_cmpule = OPR(0x10, 0x2C), ++ op_sbt = OPR(0x10, 0x2D), //SW6B ++ op_cbt = OPR(0x10, 0x2E), //SW6B ++ op_and = OPR(0x10, 0x38), ++ op_bic = OPR(0x10, 0x39), ++ op_bis = OPR(0x10, 0x3A), ++ op_ornot = OPR(0x10, 0x3B), ++ op_xor = OPR(0x10, 0x3C), ++ op_eqv = OPR(0x10, 0x3D), ++ op_inslb = OPR(0x10, 0x40), //0x10.40~0x10.47 ++ op_inslh = OPR(0x10, 0x41), ++ op_inslw = OPR(0x10, 0x42), ++ op_insll = OPR(0x10, 0x43), ++ op_inshb = OPR(0x10, 0x44), ++ op_inshh = OPR(0x10, 0x45), ++ op_inshw = OPR(0x10, 0x46), ++ op_inshl = OPR(0x10, 0x47), ++ op_slll = OPR(0x10, 0x48), ++ op_srll = OPR(0x10, 0x49), ++ op_sral = OPR(0x10, 0x4A), ++ op_roll = OPR(0x10, 0x4B), //SW6B ++ op_sllw = OPR(0x10, 0x4C), //SW6B ++ op_srlw = OPR(0x10, 0x4D), //SW6B ++ op_sraw = OPR(0x10, 0x4E), //SW6B ++ op_rolw = OPR(0x10, 0x4F), //SW6B ++ op_extlb = OPR(0x10, 0x50), //0x10.50~0x10.57 ++ op_extlh = OPR(0x10, 0x51), ++ op_extlw = OPR(0x10, 0x52), ++ op_extll = OPR(0x10, 0x53), ++ op_exthb = OPR(0x10, 0x54), ++ op_exthh = OPR(0x10, 0x55), ++ op_exthw = OPR(0x10, 0x56), ++ op_exthl = OPR(0x10, 0x57), ++ op_ctpop = OPR(0x10, 0x58), ++ op_ctlz = OPR(0x10, 0x59), ++ op_cttz = OPR(0x10, 0x5A), ++ op_revbh = OPR(0x10, 0x5B), //SW6B ++ op_revbw = OPR(0x10, 0x5C), //SW6B ++ op_revbl = OPR(0x10, 0x5D), //SW6B ++ op_casw = OPR(0x10, 0x5E), //SW6B ++ op_casl = OPR(0x10, 0x5F), //SW6B ++ op_masklb = OPR(0x10, 0x60), //0x10.60~0x10.67 ++ op_masklh = OPR(0x10, 0x61), ++ op_masklw = OPR(0x10, 0x62), ++ op_maskll = OPR(0x10, 0x63), ++ op_maskhb = OPR(0x10, 0x64), ++ op_maskhh = OPR(0x10, 0x65), ++ op_maskhw = OPR(0x10, 0x66), ++ op_maskhl = OPR(0x10, 0x67), ++ op_zap = OPR(0x10, 0x68), ++ op_zapnot = OPR(0x10, 0x69), ++ op_sextb = OPR(0x10, 0x6A), ++ op_sexth = OPR(0x10, 0x6B), ++ op_cmpgeb = OPR(0x10, 0x6C), //0x10.6C ++ op_fimovs = OPR(0x10, 0x70), ++ op_fimovd = OPR(0x10, 0x78), ++}; ++ ++enum Opcode_ops_sel { ++ op_seleq = TOPR(0x11, 0x0), ++ op_selge = TOPR(0x11, 0x1), ++ op_selgt = TOPR(0x11, 0x2), ++ op_selle = TOPR(0x11, 0x3), ++ op_sellt = TOPR(0x11, 0x4), ++ op_selne = TOPR(0x11, 0x5), ++ op_sellbc = TOPR(0x11, 0x6), ++ op_sellbs = TOPR(0x11, 0x7) ++}; ++ ++enum Opcode_ops_oprl { ++ op_addw_l = OPRL(0x12, 0x00), ++ op_subw_l = OPRL(0x12, 0x01), ++ op_s4addw_l = OPRL(0x12, 0x02), ++ op_s4subw_l = OPRL(0x12, 0x03), ++ op_s8addw_l = OPRL(0x12, 0x04), ++ op_s8subw_l = OPRL(0x12, 0x05), ++ op_addl_l = OPRL(0x12, 0x08), ++ op_subl_l = OPRL(0x12, 0x09), ++ op_s4addl_l = OPRL(0x12, 0x0A), ++ op_s4subl_l = OPRL(0x12, 0x0B), ++ op_s8addl_l = OPRL(0x12, 0x0C), ++ op_s8subl_l = OPRL(0x12, 0x0D), ++ op_mulw_l = OPRL(0x12, 0x10), ++ op_mull_l = OPRL(0x12, 0x18), ++ op_umulh_l = OPRL(0x12, 0x19), ++ op_cmpeq_l = OPRL(0x12, 0x28), ++ op_cmplt_l = OPRL(0x12, 0x29), ++ op_cmple_l = OPRL(0x12, 0x2A), ++ op_cmpult_l = OPRL(0x12, 0x2B), ++ op_cmpule_l = OPRL(0x12, 0x2C), ++ op_sbt_l = OPRL(0x12, 0x2D), //SW6B ++ op_cbt_l = OPRL(0x12, 0x2E), //SW6B ++ op_and_l = OPRL(0x12, 0x38), ++ op_bic_l = OPRL(0x12, 0x39), ++ op_bis_l = OPRL(0x12, 0x3A), ++ op_ornot_l = OPRL(0x12, 0x3B), ++ op_xor_l = OPRL(0x12, 0x3C), ++ op_eqv_l = OPRL(0x12, 0x3D), ++ op_inslb_l = OPRL(0x12, 0x40), //0x12.40~0x12.47 ++ op_inslh_l = OPRL(0x12, 0x41), ++ op_inslw_l = OPRL(0x12, 0x42), ++ op_insll_l = OPRL(0x12, 0x43), ++ op_inshb_l = OPRL(0x12, 0x44), ++ op_inshh_l = OPRL(0x12, 0x45), ++ op_inshw_l = OPRL(0x12, 0x46), ++ op_inshl_l = OPRL(0x12, 0x47), ++ op_slll_l = OPRL(0x12, 0x48), ++ op_srll_l = OPRL(0x12, 0x49), ++ op_sral_l = OPRL(0x12, 0x4A), ++ op_roll_l = OPRL(0x12, 0x4B), //SW6B ++ op_sllw_l = OPRL(0x12, 0x4C), ++ op_srlw_l = OPRL(0x12, 0x4D), ++ op_sraw_l = OPRL(0x12, 0x4E), ++ op_rolw_l = OPRL(0x12, 0x4F), //SW6B ++ op_extlb_l = OPRL(0x12, 0x50), //0x12.50~0x12.57 ++ op_extlh_l = OPRL(0x12, 0x51), ++ op_extlw_l = OPRL(0x12, 0x52), ++ op_extll_l = OPRL(0x12, 0x53), ++ op_exthb_l = OPRL(0x12, 0x54), ++ op_exthh_l = OPRL(0x12, 0x55), ++ op_exthw_l = OPRL(0x12, 0x56), ++ op_exthl_l = OPRL(0x12, 0x57), ++ op_masklb_l = OPRL(0x12, 0x60), //0x12.60~0x12.67 ++ op_masklh_l = OPRL(0x12, 0x61), ++ op_masklw_l = OPRL(0x12, 0x62), ++ op_maskll_l = OPRL(0x12, 0x63), ++ op_maskhb_l = OPRL(0x12, 0x64), ++ op_maskhh_l = OPRL(0x12, 0x65), ++ op_maskhw_l = OPRL(0x12, 0x66), ++ op_maskhl_l = OPRL(0x12, 0x67), ++ op_zap_l = OPRL(0x12, 0x68), ++ op_zapnot_l = OPRL(0x12, 0x69), ++ op_sextb_l = OPRL(0x12, 0x6A), ++ op_sexth_l = OPRL(0x12, 0x6B), ++ op_cmpgeb_l = OPRL(0x12, 0x6C), //0x12.6C ++}; ++ ++enum Opcode_ops_sel_l { ++ op_seleq_l = TOPRL(0x13, 0x0), ++ op_selge_l = TOPRL(0x13, 0x1), ++ op_selgt_l = TOPRL(0x13, 0x2), ++ op_selle_l = TOPRL(0x13, 0x3), ++ op_sellt_l = TOPRL(0x13, 0x4), ++ op_selne_l = TOPRL(0x13, 0x5), ++ op_sellbc_l = TOPRL(0x13, 0x6), ++ op_sellbs_l = TOPRL(0x13, 0x7) ++}; ++ ++enum Opcode_ops_bra { ++ op_br = BRA(0x04), ++ op_bsr = BRA(0x05), ++ op_beq = BRA(0x30), ++ op_bne = BRA(0x31), ++ op_blt = BRA(0x32), ++ op_ble = BRA(0x33), ++ op_bgt = BRA(0x34), ++ op_bge = BRA(0x35), ++ op_blbc = BRA(0x36), ++ op_blbs = BRA(0x37), ++ op_fbeq = BRA(0x38), ++ op_fbne = BRA(0x39), ++ op_fblt = BRA(0x3A), ++ op_fble = BRA(0x3B), ++ op_fbgt = BRA(0x3C), ++ op_fbge = BRA(0x3D) ++}; ++ ++enum Opcode_ops_fp { ++ op_fadds = OFP(0x18, 0x00), ++ op_faddd = OFP(0x18, 0x01), ++ op_fsubs = OFP(0x18, 0x02), ++ op_fsubd = OFP(0x18, 0x03), ++ op_fmuls = OFP(0x18, 0x04), ++ op_fmuld = OFP(0x18, 0x05), ++ op_fdivs = OFP(0x18, 0x06), ++ op_fdivd = OFP(0x18, 0x07), ++ op_fsqrts = OFP(0x18, 0x08), ++ op_fsqrtd = OFP(0x18, 0x09), ++ op_fcmpeq = OFP(0x18, 0x10), ++ op_fcmple = OFP(0x18, 0x11), ++ op_fcmplt = OFP(0x18, 0x12), ++ op_fcmpun = OFP(0x18, 0x13), ++ op_fcvtsd = OFP(0x18, 0x20), ++ op_fcvtds = OFP(0x18, 0x21), ++ op_fcvtdl_g = OFP(0x18, 0x22), //lx_fcvtdl ++ op_fcvtdl_p = OFP(0x18, 0x23), ++ op_fcvtdl_z = OFP(0x18, 0x24), ++ op_fcvtdl_n = OFP(0x18, 0x25), //lx_fcvtdl ++ op_fcvtdl = OFP(0x18, 0x27), ++ op_fcvtwl = OFP(0x18, 0x28), ++ op_fcvtlw = OFP(0x18, 0x29), ++ op_fcvtls = OFP(0x18, 0x2D), ++ op_fcvtld = OFP(0x18, 0x2F), ++ op_fcpys = OFP(0x18, 0x30), ++ op_fcpyse = OFP(0x18, 0x31), ++ op_fcpysn = OFP(0x18, 0x32), ++ op_ifmovs = OFP(0x18, 0x40), ++ op_ifmovd = OFP(0x18, 0x41), ++ op_rfpcr = OFP(0x18, 0x50), ++ op_wfpcr = OFP(0x18, 0x51), ++ op_setfpec0 = OFP(0x18, 0x54), ++ op_setfpec1 = OFP(0x18, 0x55), ++ op_setfpec2 = OFP(0x18, 0x56), ++ op_setfpec3 = OFP(0x18, 0x57), ++ op_frecs = OFP(0x18, 0x58), //SW6B ++ op_frecd = OFP(0x18, 0x59), //SW6B ++ op_fris = OFP(0x18, 0x5A), //SW6B ++ op_fris_g = OFP(0x18, 0x5B), //SW6B ++ op_fris_p = OFP(0x18, 0x5C), //SW6B ++ op_fris_z = OFP(0x18, 0x5D), //SW6B ++ op_fris_n = OFP(0x18, 0x5F), //SW6B ++ op_frid = OFP(0x18, 0x60), //SW6B ++ op_frid_g = OFP(0x18, 0x61), //SW6B ++ op_frid_p = OFP(0x18, 0x62), //SW6B ++ op_frid_z = OFP(0x18, 0x63), //SW6B ++ op_frid_n = OFP(0x18, 0x64), //SW6B ++ op_vaddw = OFP(0x1A, 0x00), ++ op_vsubw = OFP(0x1A, 0x01), ++ op_vcmpgew = OFP(0x1A, 0x02), ++ op_vcmpeqw = OFP(0x1A, 0x03), ++ op_vcmplew = OFP(0x1A, 0x04), ++ op_vcmpltw = OFP(0x1A, 0x05), ++ op_vcmpulew = OFP(0x1A, 0x06), ++ op_vcmpultw = OFP(0x1A, 0x07), ++ op_vsllw = OFP(0x1A, 0x08), ++ op_vsrlw = OFP(0x1A, 0x09), ++ op_vsraw = OFP(0x1A, 0x0A), ++ op_vrolw = OFP(0x1A, 0x0B), ++ op_sllow = OFP(0x1A, 0x0C), ++ op_srlow = OFP(0x1A, 0x0D), ++ op_vaddl = OFP(0x1A, 0x0E), ++ op_vsubl = OFP(0x1A, 0x0F), ++ op_vsllb = OFP(0x1A, 0x10), //SW6B ++ op_vsrlb = OFP(0x1A, 0x11), //SW6B ++ op_vsrab = OFP(0x1A, 0x12), //SW6B ++ op_vrolb = OFP(0x1A, 0x13), //SW6B ++ op_vsllh = OFP(0x1A, 0x14), //SW6B ++ op_vsrlh = OFP(0x1A, 0x15), //SW6B ++ op_vsrah = OFP(0x1A, 0x16), //SW6B ++ op_vrolh = OFP(0x1A, 0x17), //SW6B ++ op_ctpopow = OFP(0x1A, 0x18), ++ op_ctlzow = OFP(0x1A, 0x19), ++ op_vslll = OFP(0x1A, 0x1A), //SW6B ++ op_vsrll = OFP(0x1A, 0x1B), //SW6B ++ op_vsral = OFP(0x1A, 0x1C), //SW6B ++ op_vroll = OFP(0x1A, 0x1D), //SW6B ++ op_vmaxb = OFP(0x1A, 0x1E), //SW6B ++ op_vminb = OFP(0x1A, 0x1F), //SW6B ++ op_vucaddw = OFP(0x1A, 0x40), ++ op_vucsubw = OFP(0x1A, 0x41), ++ op_vucaddh = OFP(0x1A, 0x42), ++ op_vucsubh = OFP(0x1A, 0x43), ++ op_vucaddb = OFP(0x1A, 0x44), ++ op_vucsubb = OFP(0x1A, 0x45), ++ op_sraow = OFP(0x1A, 0x46), //SW6B ++ op_vsumw = OFP(0x1A, 0x47), //SW6B ++ op_vsuml = OFP(0x1A, 0x48), //SW6B ++ op_vcmpueqb = OFP(0x1A, 0x4B), //SW6B ++ op_vcmpugtb = OFP(0x1A, 0x4C), //SW6B ++ op_vmaxh = OFP(0x1A, 0x50), //SW6B ++ op_vminh = OFP(0x1A, 0x51), //SW6B ++ op_vmaxw = OFP(0x1A, 0x52), //SW6B ++ op_vminw = OFP(0x1A, 0x53), //SW6B ++ op_vmaxl = OFP(0x1A, 0x54), //SW6B ++ op_vminl = OFP(0x1A, 0x55), //SW6B ++ op_vumaxb = OFP(0x1A, 0x56), //SW6B ++ op_vuminb = OFP(0x1A, 0x57), //SW6B ++ op_vumaxh = OFP(0x1A, 0x58), //SW6B ++ op_vuminh = OFP(0x1A, 0x59), //SW6B ++ op_vumaxw = OFP(0x1A, 0x5A), //SW6B ++ op_vuminw = OFP(0x1A, 0x5B), //SW6B ++ op_vumaxl = OFP(0x1A, 0x5C), //SW6B ++ op_vuminl = OFP(0x1A, 0x5D), //SW6B ++ op_vsm3msw = OFP(0x1A, 0x67), //SW6B, ENCRYPT ++ op_vsm4r = OFP(0x1A, 0x69), //SW6B, ENCRYPT ++ op_vbinvw = OFP(0x1A, 0x6A), //SW6B, ENCRYPT ++ op_vadds = OFP(0x1A, 0x80), ++ op_vaddd = OFP(0x1A, 0x81), ++ op_vsubs = OFP(0x1A, 0x82), ++ op_vsubd = OFP(0x1A, 0x83), ++ op_vmuls = OFP(0x1A, 0x84), ++ op_vmuld = OFP(0x1A, 0x85), ++ op_vdivs = OFP(0x1A, 0x86), ++ op_vdivd = OFP(0x1A, 0x87), ++ op_vsqrts = OFP(0x1A, 0x88), ++ op_vsqrtd = OFP(0x1A, 0x89), ++ op_vfcmpeq = OFP(0x1A, 0x8C), ++ op_vfcmple = OFP(0x1A, 0x8D), ++ op_vfcmplt = OFP(0x1A, 0x8E), ++ op_vfcmpun = OFP(0x1A, 0x8F), ++ op_vcpys = OFP(0x1A, 0x90), ++ op_vcpyse = OFP(0x1A, 0x91), ++ op_vcpysn = OFP(0x1A, 0x92), ++ op_vsums = OFP(0x1A, 0x93), //SW6B ++ op_vsumd = OFP(0x1A, 0x94), //SW6B ++ op_vfcvtsd = OFP(0x1A, 0x95), //SW6B ++ op_vfcvtds = OFP(0x1A, 0x96), //SW6B ++ op_vfcvtls = OFP(0x1A, 0x99), //SW6B ++ op_vfcvtld = OFP(0x1A, 0x9A), //SW6B ++ op_vfcvtdl = OFP(0x1A, 0x9B), //SW6B ++ op_vfcvtdl_g = OFP(0x1A, 0x9C), //SW6B ++ op_vfcvtdl_p = OFP(0x1A, 0x9D), //SW6B ++ op_vfcvtdl_z = OFP(0x1A, 0x9E), //SW6B ++ op_vfcvtdl_n = OFP(0x1A, 0x9F), //SW6B ++ op_vfris = OFP(0x1A, 0xA0), //SW6B ++ op_vfris_g = OFP(0x1A, 0xA1), //SW6B ++ op_vfris_p = OFP(0x1A, 0xA2), //SW6B ++ op_vfris_z = OFP(0x1A, 0xA3), //SW6B ++ op_vfris_n = OFP(0x1A, 0xA4), //SW6B ++ op_vfrid = OFP(0x1A, 0xA5), //SW6B ++ op_vfrid_g = OFP(0x1A, 0xA6), //SW6B ++ op_vfrid_p = OFP(0x1A, 0xA7), //SW6B ++ op_vfrid_z = OFP(0x1A, 0xA8), //SW6B ++ op_vfrid_n = OFP(0x1A, 0xA9), //SW6B ++ op_vfrecs = OFP(0x1A, 0xAA), //SW6B ++ op_vfrecd = OFP(0x1A, 0xAB), //SW6B ++ op_vmaxs = OFP(0x1A, 0xAC), //SW6B ++ op_vmins = OFP(0x1A, 0xAD), //SW6B ++ op_vmaxd = OFP(0x1A, 0xAE), //SW6B ++ op_vmind = OFP(0x1A, 0xAF), //SW6B ++ ++ op_vbisw = PSE_LOGX(0x14, 0x30), ++ op_vxorw = PSE_LOGX(0x14, 0x3c), ++ op_vandw = PSE_LOGX(0x14, 0xc0), ++ op_veqvw = PSE_LOGX(0x14, 0xc3), ++ op_vornotw = PSE_LOGX(0x14, 0xf3), ++ op_vbicw = PSE_LOGX(0x14, 0xfc) ++}; ++ ++enum Opcode_ops_fpl { ++ op_vaddw_l = OFP(0x1A, 0x20), ++ op_vsubw_l = OFP(0x1A, 0x21), ++ op_vcmpgew_l = OFP(0x1A, 0x22), ++ op_vcmpeqw_l = OFP(0x1A, 0x23), ++ op_vcmplew_l = OFP(0x1A, 0x24), ++ op_vcmpltw_l = OFP(0x1A, 0x25), ++ op_vcmpulew_l = OFP(0x1A, 0x26), ++ op_vcmpultw_l = OFP(0x1A, 0x27), ++ op_vsllw_l = OFP(0x1A, 0x28), ++ op_vsrlw_l = OFP(0x1A, 0x29), ++ op_vsraw_l = OFP(0x1A, 0x2A), ++ op_vrolw_l = OFP(0x1A, 0x2B), ++ op_sllow_l = OFP(0x1A, 0x2C), ++ op_srlow_l = OFP(0x1A, 0x2D), ++ op_vaddl_l = OFP(0x1A, 0x2E), ++ op_vsubl_l = OFP(0x1A, 0x2F), ++ op_vsllb_l = OFP(0x1A, 0x30), //SW6B ++ op_vsrlb_l = OFP(0x1A, 0x31), //SW6B ++ op_vsrab_l = OFP(0x1A, 0x32), //SW6B ++ op_vrolb_l = OFP(0x1A, 0x33), //SW6B ++ op_vsllh_l = OFP(0x1A, 0x34), //SW6B ++ op_vsrlh_l = OFP(0x1A, 0x35), //SW6B ++ op_vsrah_l = OFP(0x1A, 0x36), //SW6B ++ op_vrolh_l = OFP(0x1A, 0x37), //SW6B ++ op_vslll_l = OFP(0x1A, 0x3A), //SW6B ++ op_vsrll_l = OFP(0x1A, 0x3B), //SW6B ++ op_vsral_l = OFP(0x1A, 0x3C), //SW6B ++ op_vroll_l = OFP(0x1A, 0x3D), //SW6B ++ op_vucaddw_l = OFP(0x1A, 0x60), ++ op_vucsubw_l = OFP(0x1A, 0x61), ++ op_vucaddh_l = OFP(0x1A, 0x62), ++ op_vucsubh_l = OFP(0x1A, 0x63), ++ op_vucaddb_l = OFP(0x1A, 0x64), ++ op_vucsubb_l = OFP(0x1A, 0x65), ++ op_sraow_l = OFP(0x1A, 0x66), //SW6B ++ op_vsm4key_l = OFP(0x1A, 0x68), //SW6B, ENCRYPT ++ op_vcmpueqb_l = OFP(0x1A, 0x6B), //SW6B ++ op_vcmpugtb_l = OFP(0x1A, 0x6C), //SW6B ++ op_vfcvtsh_l = OFP(0x1B, 0x35), //SW6B ++ op_vfcvths_l = OFP(0x1B, 0x36) //SW6B ++}; ++ ++enum Opcode_ops_fma { ++ op_fmas = FMA(0x19, 0x00), ++ op_fmad = FMA(0x19, 0x01), ++ op_fmss = FMA(0x19, 0x02), ++ op_fmsd = FMA(0x19, 0x03), ++ op_fnmas = FMA(0x19, 0x04), ++ op_fnmad = FMA(0x19, 0x05), ++ op_fnmss = FMA(0x19, 0x06), ++ op_fnmsd = FMA(0x19, 0x07), ++ op_fseleq = FMA(0x19, 0x10), ++ op_fselne = FMA(0x19, 0x11), ++ op_fsellt = FMA(0x19, 0x12), ++ op_fselle = FMA(0x19, 0x13), ++ op_fselgt = FMA(0x19, 0x14), ++ op_fselge = FMA(0x19, 0x15), ++ op_vmas = FMA(0x1B, 0x00), ++ op_vmad = FMA(0x1B, 0x01), ++ op_vmss = FMA(0x1B, 0x02), ++ op_vmsd = FMA(0x1B, 0x03), ++ op_vnmas = FMA(0x1B, 0x04), ++ op_vnmad = FMA(0x1B, 0x05), ++ op_vnmss = FMA(0x1B, 0x06), ++ op_vnmsd = FMA(0x1B, 0x07), ++ op_vfseleq = FMA(0x1B, 0x10), ++ op_vfsellt = FMA(0x1B, 0x12), ++ op_vfselle = FMA(0x1B, 0x13), ++ op_vseleqw = FMA(0x1B, 0x18), ++ op_vsellbcw = FMA(0x1B, 0x19), ++ op_vselltw = FMA(0x1B, 0x1A), ++ op_vsellew = FMA(0x1B, 0x1B), ++ op_vcpyw = FMA(0x1B, 0x24), ++ op_vcpyf = FMA(0x1B, 0x25), ++ op_vconw = FMA(0x1B, 0x26), ++ op_vshfw = FMA(0x1B, 0x27), ++ op_vcons = FMA(0x1B, 0x28), ++ op_vcond = FMA(0x1B, 0x29), ++ op_vinsectlh = FMA(0x1B, 0x2C), //SW6B ++ op_vinsectlw = FMA(0x1B, 0x2D), //SW6B ++ op_vinsectll = FMA(0x1B, 0x2E), //SW6B ++ op_vinsectlb = FMA(0x1B, 0x2F), //SW6B ++ op_vshfqb = FMA(0x1B, 0x31), //SW6B ++ op_vcpyh = FMA(0x1B, 0x32), //SW6B ++ op_vcpyb = FMA(0x1B, 0x33) //SW6B ++}; ++ ++enum Opcode_ops_fmal { ++ op_vinsw_l = FMA(0x1B, 0x20), ++ op_vinsf_l = FMA(0x1B, 0x21), ++ op_vextw_l = FMA(0x1B, 0x22), ++ op_vextf_l = FMA(0x1B, 0x23), ++ op_vinsb_l = FMA(0x1B, 0x2A), //SW6B ++ op_vinsh_l = FMA(0x1B, 0x2B), //SW6B ++ op_vshfq_l = FMA(0x1B, 0x30), //SW6B ++ op_vsm3r_l = FMA(0x1B, 0x34), //SW6B, ENCRYPT ++ op_vseleqw_l = FMA(0x1B, 0x38), ++ op_vsellbcw_l = FMA(0x1B, 0x39), ++ op_vselltw_l = FMA(0x1B, 0x3A), ++ op_vsellew_l = FMA(0x1B, 0x3B) ++}; ++ ++enum Opcode_ops_extra { ++ op_sys_call = PCD(0x00), ++ op_memb = MFC(0x06, 0x0000), ++ op_imemb = MFC(0x06, 0x0001), //SW6B ++ op_wmemb = MFC(0x06, 0x0002), //SW6B ++ op_rtc = MFC(0x06, 0x0020), ++ op_rcid = MFC(0x06, 0x0040), ++ op_halt = MFC(0x06, 0x0080), ++ op_rd_f = MFC(0x06, 0x1000), //SW2F ++ op_wr_f = MFC(0x06, 0x1020), //SW2F ++ op_rtid = MFC(0x06, 0x1040), ++ op_csrrs = CSR(0x06, 0xFC), //SW6B ++ op_csrrc = CSR(0x06, 0xFD), //SW6B ++ op_csrr = CSR(0x06, 0xFE), ++ op_csrw = CSR(0x06, 0xFF), ++ op_pri_ret = PRIRET(0x07, 0x0), ++ op_vlog = LOGX(0x14, 0x00), ++ op_lbr = PCD(0x1D), //SW6B ++ op_dpfhr = ATMEM(0x1E, 0xE), //SW6B ++ op_dpfhw = ATMEM(0x1E, 0xF), //SW6B ++}; ++ ++enum Opcode_ops_simulator { ++ op_trap = PCD(0x1F) ++}; ++ ++enum TrapCode : uint32_t { ++ BREAK = 0, ++ REDIRECT = 1 ++}; ++ ++#endif ++ ++// ----- XXXX64 Opcodes and Function Fields. ++// We use this presentation to stay close to the table representation in ++// SW32 Architecture For Programmers, Volume II: The SW32 Instruction Set. ++enum Opcode : uint32_t { ++ SPECIAL = 0U << kOpcodeShift, ++ REGIMM = 1U << kOpcodeShift, ++ ++ J = ((0U << 3) + 2) << kOpcodeShift, ++ JAL = ((0U << 3) + 3) << kOpcodeShift, ++ BEQ = ((0U << 3) + 4) << kOpcodeShift, ++ BNE = ((0U << 3) + 5) << kOpcodeShift, ++ BLEZ = ((0U << 3) + 6) << kOpcodeShift, ++ BGTZ = ((0U << 3) + 7) << kOpcodeShift, ++ ++ ADDI = ((1U << 3) + 0) << kOpcodeShift, ++ ADDIU = ((1U << 3) + 1) << kOpcodeShift, ++ SLTI = ((1U << 3) + 2) << kOpcodeShift, ++ SLTIU = ((1U << 3) + 3) << kOpcodeShift, ++ ANDI = ((1U << 3) + 4) << kOpcodeShift, ++ ORI = ((1U << 3) + 5) << kOpcodeShift, ++ XORI = ((1U << 3) + 6) << kOpcodeShift, ++ LUI = ((1U << 3) + 7) << kOpcodeShift, // LUI/AUI family. ++ DAUI = ((3U << 3) + 5) << kOpcodeShift, ++ ++ BEQC = ((2U << 3) + 0) << kOpcodeShift, ++ COP1 = ((2U << 3) + 1) << kOpcodeShift, // Coprocessor 1 class. ++ BEQL = ((2U << 3) + 4) << kOpcodeShift, ++ BNEL = ((2U << 3) + 5) << kOpcodeShift, ++ BLEZL = ((2U << 3) + 6) << kOpcodeShift, ++ BGTZL = ((2U << 3) + 7) << kOpcodeShift, ++ ++ DADDI = ((3U << 3) + 0) << kOpcodeShift, // This is also BNEC. ++// DADDIU = ((3U << 3) + 1) << kOpcodeShift, ++ LDL = ((3U << 3) + 2) << kOpcodeShift, ++ LDR = ((3U << 3) + 3) << kOpcodeShift, ++ SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift, ++ MSA = ((3U << 3) + 6) << kOpcodeShift, ++ SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift, ++ ++ LB = ((4U << 3) + 0) << kOpcodeShift, ++ LH = ((4U << 3) + 1) << kOpcodeShift, ++ LWL = ((4U << 3) + 2) << kOpcodeShift, ++ LW = ((4U << 3) + 3) << kOpcodeShift, ++ LBU = ((4U << 3) + 4) << kOpcodeShift, ++ LHU = ((4U << 3) + 5) << kOpcodeShift, ++ LWR = ((4U << 3) + 6) << kOpcodeShift, ++ LWU = ((4U << 3) + 7) << kOpcodeShift, ++ ++ SB = ((5U << 3) + 0) << kOpcodeShift, ++ SH = ((5U << 3) + 1) << kOpcodeShift, ++ SWL = ((5U << 3) + 2) << kOpcodeShift, ++ SW = ((5U << 3) + 3) << kOpcodeShift, ++ SDL = ((5U << 3) + 4) << kOpcodeShift, ++ SDR = ((5U << 3) + 5) << kOpcodeShift, ++ SWR = ((5U << 3) + 6) << kOpcodeShift, ++ ++ LL = ((6U << 3) + 0) << kOpcodeShift, ++ LWC1 = ((6U << 3) + 1) << kOpcodeShift, ++ BC = ((6U << 3) + 2) << kOpcodeShift, ++ LLD = ((6U << 3) + 4) << kOpcodeShift, ++ LDC1 = ((6U << 3) + 5) << kOpcodeShift, ++ POP66 = ((6U << 3) + 6) << kOpcodeShift, ++ LD = ((6U << 3) + 7) << kOpcodeShift, ++ ++ PREF = ((6U << 3) + 3) << kOpcodeShift, ++ ++ SC = ((7U << 3) + 0) << kOpcodeShift, ++ SWC1 = ((7U << 3) + 1) << kOpcodeShift, ++ BALC = ((7U << 3) + 2) << kOpcodeShift, ++ PCREL = ((7U << 3) + 3) << kOpcodeShift, ++ SCD = ((7U << 3) + 4) << kOpcodeShift, ++ SDC1 = ((7U << 3) + 5) << kOpcodeShift, ++ POP76 = ((7U << 3) + 6) << kOpcodeShift, ++ SD = ((7U << 3) + 7) << kOpcodeShift, ++ ++ COP1X = ((1U << 4) + 3) << kOpcodeShift, ++ ++ // New r6 instruction. ++ POP06 = BLEZ, // bgeuc/bleuc, blezalc, bgezalc ++ POP07 = BGTZ, // bltuc/bgtuc, bgtzalc, bltzalc ++ POP10 = ADDI, // beqzalc, bovc, beqc ++ POP26 = BLEZL, // bgezc, blezc, bgec/blec ++ POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc ++ POP30 = DADDI, // bnezalc, bnvc, bnec ++}; ++ ++enum SecondaryField : uint32_t { ++ // SPECIAL Encoding of Function Field. ++ SLL = ((0U << 3) + 0), ++ MOVCI = ((0U << 3) + 1), ++ SRL = ((0U << 3) + 2), ++ SRA = ((0U << 3) + 3), ++ SLLV = ((0U << 3) + 4), ++ LSA = ((0U << 3) + 5), ++ SRLV = ((0U << 3) + 6), ++ SRAV = ((0U << 3) + 7), ++ ++ JR = ((1U << 3) + 0), ++ JALR = ((1U << 3) + 1), ++ //MOVZ = ((1U << 3) + 2), ++ MOVN = ((1U << 3) + 3), ++ //BREAK = ((1U << 3) + 5), ++ SYNC = ((1U << 3) + 7), ++ ++ MFHI = ((2U << 3) + 0), ++ CLZ_R6 = ((2U << 3) + 0), ++ CLO_R6 = ((2U << 3) + 1), ++ MFLO = ((2U << 3) + 2), ++ DCLZ_R6 = ((2U << 3) + 2), ++ DCLO_R6 = ((2U << 3) + 3), ++// DSLLV = ((2U << 3) + 4), ++ DLSA = ((2U << 3) + 5), ++// DSRLV = ((2U << 3) + 6), ++// DSRAV = ((2U << 3) + 7), ++ ++ MULT = ((3U << 3) + 0), ++ MULTU = ((3U << 3) + 1), ++// DIV = ((3U << 3) + 2), ++// DIVU = ((3U << 3) + 3), ++ DMULT = ((3U << 3) + 4), ++ DMULTU = ((3U << 3) + 5), ++// DDIV = ((3U << 3) + 6), ++// DDIVU = ((3U << 3) + 7), ++ ++ ADD = ((4U << 3) + 0), ++// ADDU = ((4U << 3) + 1), ++ SUB = ((4U << 3) + 2), ++// SUBU = ((4U << 3) + 3), ++ AND = ((4U << 3) + 4), ++ OR = ((4U << 3) + 5), ++ XOR = ((4U << 3) + 6), ++ NOR = ((4U << 3) + 7), ++ ++ SLT = ((5U << 3) + 2), ++ SLTU = ((5U << 3) + 3), ++ DADD = ((5U << 3) + 4), ++// DADDU = ((5U << 3) + 5), ++ DSUB = ((5U << 3) + 6), ++// DSUBU = ((5U << 3) + 7), ++ ++ TGE = ((6U << 3) + 0), ++ TGEU = ((6U << 3) + 1), ++ TLT = ((6U << 3) + 2), ++ TLTU = ((6U << 3) + 3), ++ TEQ = ((6U << 3) + 4), ++ SELEQZ_S = ((6U << 3) + 5), ++ TNE = ((6U << 3) + 6), ++ SELNEZ_S = ((6U << 3) + 7), ++ ++// DSLL = ((7U << 3) + 0), ++// DSRL = ((7U << 3) + 2), ++// DSRA = ((7U << 3) + 3), ++// DSLL32 = ((7U << 3) + 4), ++// DSRL32 = ((7U << 3) + 6), ++// DSRA32 = ((7U << 3) + 7), ++ ++ // Multiply integers in r6. ++ MUL_MUH = ((3U << 3) + 0), // MUL, MUH. ++ MUL_MUH_U = ((3U << 3) + 1), // MUL_U, MUH_U. ++ D_MUL_MUH = ((7U << 2) + 0), // DMUL, DMUH. ++ D_MUL_MUH_U = ((7U << 2) + 1), // DMUL_U, DMUH_U. ++ RINT = ((3U << 3) + 2), ++ ++ MUL_OP = ((0U << 3) + 2), ++ MUH_OP = ((0U << 3) + 3), ++ DIV_OP = ((0U << 3) + 2), ++ MOD_OP = ((0U << 3) + 3), ++ ++ DIV_MOD = ((3U << 3) + 2), ++ DIV_MOD_U = ((3U << 3) + 3), ++ D_DIV_MOD = ((3U << 3) + 6), ++ D_DIV_MOD_U = ((3U << 3) + 7), ++ ++ // drotr in special4? ++ ++ // SPECIAL2 Encoding of Function Field. ++ MUL = ((0U << 3) + 2), ++ CLZ = ((4U << 3) + 0), ++ CLO = ((4U << 3) + 1), ++ DCLZ = ((4U << 3) + 4), ++ DCLO = ((4U << 3) + 5), ++ ++ // SPECIAL3 Encoding of Function Field. ++ EXT = ((0U << 3) + 0), ++ DEXTM = ((0U << 3) + 1), ++ DEXTU = ((0U << 3) + 2), ++ DEXT = ((0U << 3) + 3), ++ INS = ((0U << 3) + 4), ++ DINSM = ((0U << 3) + 5), ++ DINSU = ((0U << 3) + 6), ++ DINS = ((0U << 3) + 7), ++ ++ BSHFL = ((4U << 3) + 0), ++ DBSHFL = ((4U << 3) + 4), ++ SC_R6 = ((4U << 3) + 6), ++ SCD_R6 = ((4U << 3) + 7), ++ LL_R6 = ((6U << 3) + 6), ++ LLD_R6 = ((6U << 3) + 7), ++ ++ // SPECIAL3 Encoding of sa Field. ++ BITSWAP = ((0U << 3) + 0), ++ ALIGN = ((0U << 3) + 2), ++ WSBH = ((0U << 3) + 2), ++ SEB = ((2U << 3) + 0), ++ SEH = ((3U << 3) + 0), ++ ++ DBITSWAP = ((0U << 3) + 0), ++ DALIGN = ((0U << 3) + 1), ++ DBITSWAP_SA = ((0U << 3) + 0) << kSaShift, ++ DSBH = ((0U << 3) + 2), ++ DSHD = ((0U << 3) + 5), ++ ++ // REGIMM encoding of rt Field. ++ BLTZ = ((0U << 3) + 0) << 16, ++ BGEZ = ((0U << 3) + 1) << 16, ++ BLTZAL = ((2U << 3) + 0) << 16, ++ BGEZAL = ((2U << 3) + 1) << 16, ++ BGEZALL = ((2U << 3) + 3) << 16, ++ DAHI = ((0U << 3) + 6) << 16, ++ DATI = ((3U << 3) + 6) << 16, ++ ++ // COP1 Encoding of rs Field. ++ MFC1 = ((0U << 3) + 0) << 21, ++ DMFC1 = ((0U << 3) + 1) << 21, ++ CFC1 = ((0U << 3) + 2) << 21, ++ MFHC1 = ((0U << 3) + 3) << 21, ++ MTC1 = ((0U << 3) + 4) << 21, ++ DMTC1 = ((0U << 3) + 5) << 21, ++ CTC1 = ((0U << 3) + 6) << 21, ++ MTHC1 = ((0U << 3) + 7) << 21, ++ BC1 = ((1U << 3) + 0) << 21, ++ S = ((2U << 3) + 0) << 21, ++ D = ((2U << 3) + 1) << 21, ++ W = ((2U << 3) + 4) << 21, ++ L = ((2U << 3) + 5) << 21, ++ PS = ((2U << 3) + 6) << 21, ++ // COP1 Encoding of Function Field When rs=S. ++ ++ ADD_S = ((0U << 3) + 0), ++ SUB_S = ((0U << 3) + 1), ++ MUL_S = ((0U << 3) + 2), ++ DIV_S = ((0U << 3) + 3), ++ ABS_S = ((0U << 3) + 5), ++ SQRT_S = ((0U << 3) + 4), ++ MOV_S = ((0U << 3) + 6), ++ NEG_S = ((0U << 3) + 7), ++ ROUND_L_S = ((1U << 3) + 0), ++ TRUNC_L_S = ((1U << 3) + 1), ++ CEIL_L_S = ((1U << 3) + 2), ++ FLOOR_L_S = ((1U << 3) + 3), ++ ROUND_W_S = ((1U << 3) + 4), ++ TRUNC_W_S = ((1U << 3) + 5), ++ CEIL_W_S = ((1U << 3) + 6), ++ FLOOR_W_S = ((1U << 3) + 7), ++ RECIP_S = ((2U << 3) + 5), ++ RSQRT_S = ((2U << 3) + 6), ++ MADDF_S = ((3U << 3) + 0), ++ MSUBF_S = ((3U << 3) + 1), ++ CLASS_S = ((3U << 3) + 3), ++ CVT_D_S = ((4U << 3) + 1), ++ CVT_W_S = ((4U << 3) + 4), ++ CVT_L_S = ((4U << 3) + 5), ++ CVT_PS_S = ((4U << 3) + 6), ++ // COP1 Encoding of Function Field When rs=D. ++ ADD_D = ((0U << 3) + 0), ++ SUB_D = ((0U << 3) + 1), ++ MUL_D = ((0U << 3) + 2), ++ DIV_D = ((0U << 3) + 3), ++ SQRT_D = ((0U << 3) + 4), ++ ABS_D = ((0U << 3) + 5), ++ MOV_D = ((0U << 3) + 6), ++ NEG_D = ((0U << 3) + 7), ++ ROUND_L_D = ((1U << 3) + 0), ++ TRUNC_L_D = ((1U << 3) + 1), ++ CEIL_L_D = ((1U << 3) + 2), ++ FLOOR_L_D = ((1U << 3) + 3), ++ ROUND_W_D = ((1U << 3) + 4), ++ TRUNC_W_D = ((1U << 3) + 5), ++ CEIL_W_D = ((1U << 3) + 6), ++ FLOOR_W_D = ((1U << 3) + 7), ++ RECIP_D = ((2U << 3) + 5), ++ RSQRT_D = ((2U << 3) + 6), ++ MADDF_D = ((3U << 3) + 0), ++ MSUBF_D = ((3U << 3) + 1), ++ CLASS_D = ((3U << 3) + 3), ++ MIN = ((3U << 3) + 4), ++ MINA = ((3U << 3) + 5), ++ MAX = ((3U << 3) + 6), ++ MAXA = ((3U << 3) + 7), ++ CVT_S_D = ((4U << 3) + 0), ++ CVT_W_D = ((4U << 3) + 4), ++ CVT_L_D = ((4U << 3) + 5), ++ C_F_D = ((6U << 3) + 0), ++ C_UN_D = ((6U << 3) + 1), ++ C_EQ_D = ((6U << 3) + 2), ++ C_UEQ_D = ((6U << 3) + 3), ++ C_OLT_D = ((6U << 3) + 4), ++ C_ULT_D = ((6U << 3) + 5), ++ C_OLE_D = ((6U << 3) + 6), ++ C_ULE_D = ((6U << 3) + 7), ++ ++ // COP1 Encoding of Function Field When rs=W or L. ++ CVT_S_W = ((4U << 3) + 0), ++ CVT_D_W = ((4U << 3) + 1), ++ CVT_S_L = ((4U << 3) + 0), ++ CVT_D_L = ((4U << 3) + 1), ++ BC1EQZ = ((2U << 2) + 1) << 21, ++ BC1NEZ = ((3U << 2) + 1) << 21, ++ // COP1 CMP positive predicates Bit 5..4 = 00. ++ CMP_AF = ((0U << 3) + 0), ++ CMP_UN = ((0U << 3) + 1), ++ CMP_EQ = ((0U << 3) + 2), ++ CMP_UEQ = ((0U << 3) + 3), ++ CMP_LT = ((0U << 3) + 4), ++ CMP_ULT = ((0U << 3) + 5), ++ CMP_LE = ((0U << 3) + 6), ++ CMP_ULE = ((0U << 3) + 7), ++ CMP_SAF = ((1U << 3) + 0), ++ CMP_SUN = ((1U << 3) + 1), ++ CMP_SEQ = ((1U << 3) + 2), ++ CMP_SUEQ = ((1U << 3) + 3), ++ CMP_SSLT = ((1U << 3) + 4), ++ CMP_SSULT = ((1U << 3) + 5), ++ CMP_SLE = ((1U << 3) + 6), ++ CMP_SULE = ((1U << 3) + 7), ++ // COP1 CMP negative predicates Bit 5..4 = 01. ++ CMP_AT = ((2U << 3) + 0), // Reserved, not implemented. ++ CMP_OR = ((2U << 3) + 1), ++ CMP_UNE = ((2U << 3) + 2), ++ CMP_NE = ((2U << 3) + 3), ++ CMP_UGE = ((2U << 3) + 4), // Reserved, not implemented. ++ CMP_OGE = ((2U << 3) + 5), // Reserved, not implemented. ++ CMP_UGT = ((2U << 3) + 6), // Reserved, not implemented. ++ CMP_OGT = ((2U << 3) + 7), // Reserved, not implemented. ++ CMP_SAT = ((3U << 3) + 0), // Reserved, not implemented. ++ CMP_SOR = ((3U << 3) + 1), ++ CMP_SUNE = ((3U << 3) + 2), ++ CMP_SNE = ((3U << 3) + 3), ++ CMP_SUGE = ((3U << 3) + 4), // Reserved, not implemented. ++ CMP_SOGE = ((3U << 3) + 5), // Reserved, not implemented. ++ CMP_SUGT = ((3U << 3) + 6), // Reserved, not implemented. ++ CMP_SOGT = ((3U << 3) + 7), // Reserved, not implemented. ++ ++ SEL = ((2U << 3) + 0), ++ MOVF = ((2U << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt ++ MOVZ_C = ((2U << 3) + 2), // COP1 on FPR registers. ++ MOVN_C = ((2U << 3) + 3), // COP1 on FPR registers. ++ SELEQZ_C = ((2U << 3) + 4), // COP1 on FPR registers. ++ SELNEZ_C = ((2U << 3) + 7), // COP1 on FPR registers. ++ ++ // COP1 Encoding of Function Field When rs=PS. ++ ++ // COP1X Encoding of Function Field. ++ MADD_S = ((4U << 3) + 0), ++ MADD_D = ((4U << 3) + 1), ++ MSUB_S = ((5U << 3) + 0), ++ MSUB_D = ((5U << 3) + 1), ++ ++ // PCREL Encoding of rt Field. ++ ADDIUPC = ((0U << 2) + 0), ++ LWPC = ((0U << 2) + 1), ++ LWUPC = ((0U << 2) + 2), ++ LDPC = ((0U << 3) + 6), ++ // reserved ((1U << 3) + 6), ++ AUIPC = ((3U << 3) + 6), ++ ALUIPC = ((3U << 3) + 7), ++ ++ // POP66 Encoding of rs Field. ++ JIC = ((0U << 5) + 0), ++ ++ // POP76 Encoding of rs Field. ++ JIALC = ((0U << 5) + 0), ++ ++ // COP1 Encoding of rs Field for MSA Branch Instructions ++ BZ_V = (((1U << 3) + 3) << kRsShift), ++ BNZ_V = (((1U << 3) + 7) << kRsShift), ++ BZ_B = (((3U << 3) + 0) << kRsShift), ++ BZ_H = (((3U << 3) + 1) << kRsShift), ++ BZ_W = (((3U << 3) + 2) << kRsShift), ++ BZ_D = (((3U << 3) + 3) << kRsShift), ++ BNZ_B = (((3U << 3) + 4) << kRsShift), ++ BNZ_H = (((3U << 3) + 5) << kRsShift), ++ BNZ_W = (((3U << 3) + 6) << kRsShift), ++ BNZ_D = (((3U << 3) + 7) << kRsShift), ++ ++ // MSA: Operation Field for MI10 Instruction Formats ++ MSA_LD = (8U << 2), ++ MSA_ST = (9U << 2), ++ LD_B = ((8U << 2) + 0), ++ LD_H = ((8U << 2) + 1), ++ LD_W = ((8U << 2) + 2), ++ LD_D = ((8U << 2) + 3), ++ ST_B = ((9U << 2) + 0), ++ ST_H = ((9U << 2) + 1), ++ ST_W = ((9U << 2) + 2), ++ ST_D = ((9U << 2) + 3), ++ ++ // MSA: Operation Field for I5 Instruction Format ++ ADDVI = ((0U << 23) + 6), ++ SUBVI = ((1U << 23) + 6), ++ MAXI_S = ((2U << 23) + 6), ++ MAXI_U = ((3U << 23) + 6), ++ MINI_S = ((4U << 23) + 6), ++ MINI_U = ((5U << 23) + 6), ++ CEQI = ((0U << 23) + 7), ++ CLTI_S = ((2U << 23) + 7), ++ CLTI_U = ((3U << 23) + 7), ++ CLEI_S = ((4U << 23) + 7), ++ CLEI_U = ((5U << 23) + 7), ++ LDI = ((6U << 23) + 7), // I10 instruction format ++ I5_DF_b = (0U << 21), ++ I5_DF_h = (1U << 21), ++ I5_DF_w = (2U << 21), ++ I5_DF_d = (3U << 21), ++ ++ // MSA: Operation Field for I8 Instruction Format ++ ANDI_B = ((0U << 24) + 0), ++ ORI_B = ((1U << 24) + 0), ++ NORI_B = ((2U << 24) + 0), ++ XORI_B = ((3U << 24) + 0), ++ BMNZI_B = ((0U << 24) + 1), ++ BMZI_B = ((1U << 24) + 1), ++ BSELI_B = ((2U << 24) + 1), ++ SHF_B = ((0U << 24) + 2), ++ SHF_H = ((1U << 24) + 2), ++ SHF_W = ((2U << 24) + 2), ++ ++ MSA_VEC_2R_2RF_MINOR = ((3U << 3) + 6), ++ ++ // MSA: Operation Field for VEC Instruction Formats ++ AND_V = (((0U << 2) + 0) << 21), ++ OR_V = (((0U << 2) + 1) << 21), ++ NOR_V = (((0U << 2) + 2) << 21), ++ XOR_V = (((0U << 2) + 3) << 21), ++ BMNZ_V = (((1U << 2) + 0) << 21), ++ BMZ_V = (((1U << 2) + 1) << 21), ++ BSEL_V = (((1U << 2) + 2) << 21), ++ ++ // MSA: Operation Field for 2R Instruction Formats ++ MSA_2R_FORMAT = (((6U << 2) + 0) << 21), ++ FILL = (0U << 18), ++ PCNT = (1U << 18), ++ NLOC = (2U << 18), ++ NLZC = (3U << 18), ++ MSA_2R_DF_b = (0U << 16), ++ MSA_2R_DF_h = (1U << 16), ++ MSA_2R_DF_w = (2U << 16), ++ MSA_2R_DF_d = (3U << 16), ++ ++ // MSA: Operation Field for 2RF Instruction Formats ++ MSA_2RF_FORMAT = (((6U << 2) + 1) << 21), ++ FCLASS = (0U << 17), ++ FTRUNC_S = (1U << 17), ++ FTRUNC_U = (2U << 17), ++ FSQRT = (3U << 17), ++ FRSQRT = (4U << 17), ++ FRCP = (5U << 17), ++ FRINT = (6U << 17), ++ FLOG2 = (7U << 17), ++ FEXUPL = (8U << 17), ++ FEXUPR = (9U << 17), ++ FFQL = (10U << 17), ++ FFQR = (11U << 17), ++ FTINT_S = (12U << 17), ++ FTINT_U = (13U << 17), ++ FFINT_S = (14U << 17), ++ FFINT_U = (15U << 17), ++ MSA_2RF_DF_w = (0U << 16), ++ MSA_2RF_DF_d = (1U << 16), ++ ++ // MSA: Operation Field for 3R Instruction Format ++ SLL_MSA = ((0U << 23) + 13), ++ SRA_MSA = ((1U << 23) + 13), ++ SRL_MSA = ((2U << 23) + 13), ++ BCLR = ((3U << 23) + 13), ++ BSET = ((4U << 23) + 13), ++ BNEG = ((5U << 23) + 13), ++ BINSL = ((6U << 23) + 13), ++ BINSR = ((7U << 23) + 13), ++ ADDV = ((0U << 23) + 14), ++ SUBV = ((1U << 23) + 14), ++ MAX_S = ((2U << 23) + 14), ++ MAX_U = ((3U << 23) + 14), ++ MIN_S = ((4U << 23) + 14), ++ MIN_U = ((5U << 23) + 14), ++ MAX_A = ((6U << 23) + 14), ++ MIN_A = ((7U << 23) + 14), ++ CEQ = ((0U << 23) + 15), ++ CLT_S = ((2U << 23) + 15), ++ CLT_U = ((3U << 23) + 15), ++ CLE_S = ((4U << 23) + 15), ++ CLE_U = ((5U << 23) + 15), ++ ADD_A = ((0U << 23) + 16), ++ ADDS_A = ((1U << 23) + 16), ++ ADDS_S = ((2U << 23) + 16), ++ ADDS_U = ((3U << 23) + 16), ++ AVE_S = ((4U << 23) + 16), ++ AVE_U = ((5U << 23) + 16), ++ AVER_S = ((6U << 23) + 16), ++ AVER_U = ((7U << 23) + 16), ++ SUBS_S = ((0U << 23) + 17), ++ SUBS_U = ((1U << 23) + 17), ++ SUBSUS_U = ((2U << 23) + 17), ++ SUBSUU_S = ((3U << 23) + 17), ++ ASUB_S = ((4U << 23) + 17), ++ ASUB_U = ((5U << 23) + 17), ++ MULV = ((0U << 23) + 18), ++ MADDV = ((1U << 23) + 18), ++ MSUBV = ((2U << 23) + 18), ++ DIV_S_MSA = ((4U << 23) + 18), ++ DIV_U = ((5U << 23) + 18), ++ MOD_S = ((6U << 23) + 18), ++ MOD_U = ((7U << 23) + 18), ++ DOTP_S = ((0U << 23) + 19), ++ DOTP_U = ((1U << 23) + 19), ++ DPADD_S = ((2U << 23) + 19), ++ DPADD_U = ((3U << 23) + 19), ++ DPSUB_S = ((4U << 23) + 19), ++ DPSUB_U = ((5U << 23) + 19), ++ SLD = ((0U << 23) + 20), ++ SPLAT = ((1U << 23) + 20), ++ PCKEV = ((2U << 23) + 20), ++ PCKOD = ((3U << 23) + 20), ++ ILVL = ((4U << 23) + 20), ++ ILVR = ((5U << 23) + 20), ++ ILVEV = ((6U << 23) + 20), ++ ILVOD = ((7U << 23) + 20), ++ VSHF = ((0U << 23) + 21), ++ SRAR = ((1U << 23) + 21), ++ SRLR = ((2U << 23) + 21), ++ HADD_S = ((4U << 23) + 21), ++ HADD_U = ((5U << 23) + 21), ++ HSUB_S = ((6U << 23) + 21), ++ HSUB_U = ((7U << 23) + 21), ++ MSA_3R_DF_b = (0U << 21), ++ MSA_3R_DF_h = (1U << 21), ++ MSA_3R_DF_w = (2U << 21), ++ MSA_3R_DF_d = (3U << 21), ++ ++ // MSA: Operation Field for 3RF Instruction Format ++ FCAF = ((0U << 22) + 26), ++ FCUN = ((1U << 22) + 26), ++ FCEQ = ((2U << 22) + 26), ++ FCUEQ = ((3U << 22) + 26), ++ FCLT = ((4U << 22) + 26), ++ FCULT = ((5U << 22) + 26), ++ FCLE = ((6U << 22) + 26), ++ FCULE = ((7U << 22) + 26), ++ FSAF = ((8U << 22) + 26), ++ FSUN = ((9U << 22) + 26), ++ FSEQ = ((10U << 22) + 26), ++ FSUEQ = ((11U << 22) + 26), ++ FSLT = ((12U << 22) + 26), ++ FSULT = ((13U << 22) + 26), ++ FSLE = ((14U << 22) + 26), ++ FSULE = ((15U << 22) + 26), ++ FADD = ((0U << 22) + 27), ++ FSUB = ((1U << 22) + 27), ++ FMUL = ((2U << 22) + 27), ++ FDIV = ((3U << 22) + 27), ++ FMADD = ((4U << 22) + 27), ++ FMSUB = ((5U << 22) + 27), ++ FEXP2 = ((7U << 22) + 27), ++ FEXDO = ((8U << 22) + 27), ++ FTQ = ((10U << 22) + 27), ++ FMIN = ((12U << 22) + 27), ++ FMIN_A = ((13U << 22) + 27), ++ FMAX = ((14U << 22) + 27), ++ FMAX_A = ((15U << 22) + 27), ++ FCOR = ((1U << 22) + 28), ++ FCUNE = ((2U << 22) + 28), ++ FCNE = ((3U << 22) + 28), ++ MUL_Q = ((4U << 22) + 28), ++ MADD_Q = ((5U << 22) + 28), ++ MSUB_Q = ((6U << 22) + 28), ++ FSOR = ((9U << 22) + 28), ++ FSUNE = ((10U << 22) + 28), ++ FSNE = ((11U << 22) + 28), ++ MULR_Q = ((12U << 22) + 28), ++ MADDR_Q = ((13U << 22) + 28), ++ MSUBR_Q = ((14U << 22) + 28), ++ ++ // MSA: Operation Field for ELM Instruction Format ++ MSA_ELM_MINOR = ((3U << 3) + 1), ++ SLDI = (0U << 22), ++ CTCMSA = ((0U << 22) | (62U << 16)), ++ SPLATI = (1U << 22), ++ CFCMSA = ((1U << 22) | (62U << 16)), ++ COPY_S = (2U << 22), ++ MOVE_V = ((2U << 22) | (62U << 16)), ++ COPY_U = (3U << 22), ++ INSERT = (4U << 22), ++ INSVE = (5U << 22), ++ ELM_DF_B = ((0U << 4) << 16), ++ ELM_DF_H = ((4U << 3) << 16), ++ ELM_DF_W = ((12U << 2) << 16), ++ ELM_DF_D = ((28U << 1) << 16), ++ ++ // MSA: Operation Field for BIT Instruction Format ++ SLLI = ((0U << 23) + 9), ++ SRAI = ((1U << 23) + 9), ++ SRLI = ((2U << 23) + 9), ++ BCLRI = ((3U << 23) + 9), ++ BSETI = ((4U << 23) + 9), ++ BNEGI = ((5U << 23) + 9), ++ BINSLI = ((6U << 23) + 9), ++ BINSRI = ((7U << 23) + 9), ++ SAT_S = ((0U << 23) + 10), ++ SAT_U = ((1U << 23) + 10), ++ SRARI = ((2U << 23) + 10), ++ SRLRI = ((3U << 23) + 10), ++ BIT_DF_b = ((14U << 3) << 16), ++ BIT_DF_h = ((6U << 4) << 16), ++ BIT_DF_w = ((2U << 5) << 16), ++ BIT_DF_d = ((0U << 6) << 16), ++ ++ nullptrSF = 0U ++}; ++ ++enum MSAMinorOpcode : uint32_t { ++ kMsaMinorUndefined = 0, ++ kMsaMinorI8, ++ kMsaMinorI5, ++ kMsaMinorI10, ++ kMsaMinorBIT, ++ kMsaMinor3R, ++ kMsaMinor3RF, ++ kMsaMinorELM, ++ kMsaMinorVEC, ++ kMsaMinor2R, ++ kMsaMinor2RF, ++ kMsaMinorMI10 ++}; ++ ++// ----- Emulated conditions. ++// On XXXX64 we use this enum to abstract from conditional branch instructions. ++// The 'U' prefix is used to specify unsigned comparisons. ++// Opposite conditions must be paired as odd/even numbers ++// because 'NegateCondition' function flips LSB to negate condition. ++enum Condition { ++ // Any value < 0 is considered no_condition. ++ kNoCondition = -1, ++ overflow = 0, ++ no_overflow = 1, ++ Uless = 2, ++ Ugreater_equal = 3, ++ Uless_equal = 4, ++ Ugreater = 5, ++ equal = 6, ++ not_equal = 7, // Unordered or Not Equal. ++ negative = 8, ++ positive = 9, ++ parity_even = 10, ++ parity_odd = 11, ++ less = 12, ++ greater_equal = 13, ++ less_equal = 14, ++ greater = 15, ++ ueq = 16, // Unordered or Equal. ++ ogl = 17, // Ordered and Not Equal. ++ cc_always = 18, ++ ++ // Aliases. ++ carry = Uless, ++ not_carry = Ugreater_equal, ++ zero = equal, ++ eq = equal, ++ not_zero = not_equal, ++ ne = not_equal, ++ nz = not_equal, ++ sign = negative, ++ not_sign = positive, ++ mi = negative, ++ pl = positive, ++ hi = Ugreater, ++ ls = Uless_equal, ++ ge = greater_equal, ++ lt = less, ++ gt = greater, ++ le = less_equal, ++ hs = Ugreater_equal, ++ lo = Uless, ++ al = cc_always, ++ ult = Uless, ++ uge = Ugreater_equal, ++ ule = Uless_equal, ++ ugt = Ugreater, ++ cc_default = kNoCondition ++}; ++ ++ ++// Returns the equivalent of !cc. ++// Negation of the default kNoCondition (-1) results in a non-default ++// no_condition value (-2). As long as tests for no_condition check ++// for condition < 0, this will work as expected. ++inline Condition NegateCondition(Condition cc) { ++ DCHECK(cc != cc_always); ++ return static_cast(cc ^ 1); ++} ++ ++ ++inline Condition NegateFpuCondition(Condition cc) { ++ DCHECK(cc != cc_always); ++ switch (cc) { ++ case ult: ++ return ge; ++ case ugt: ++ return le; ++ case uge: ++ return lt; ++ case ule: ++ return gt; ++ case lt: ++ return uge; ++ case gt: ++ return ule; ++ case ge: ++ return ult; ++ case le: ++ return ugt; ++ case eq: ++ return ne; ++ case ne: ++ return eq; ++ case ueq: ++ return ogl; ++ case ogl: ++ return ueq; ++ default: ++ return cc; ++ } ++} ++ ++enum MSABranchCondition { ++ all_not_zero = 0, // Branch If All Elements Are Not Zero ++ one_elem_not_zero, // Branch If At Least One Element of Any Format Is Not ++ // Zero ++ one_elem_zero, // Branch If At Least One Element Is Zero ++ all_zero // Branch If All Elements of Any Format Are Zero ++}; ++ ++inline MSABranchCondition NegateMSABranchCondition(MSABranchCondition cond) { ++ switch (cond) { ++ case all_not_zero: ++ return one_elem_zero; ++ case one_elem_not_zero: ++ return all_zero; ++ case one_elem_zero: ++ return all_not_zero; ++ case all_zero: ++ return one_elem_not_zero; ++ default: ++ return cond; ++ } ++} ++ ++enum MSABranchDF { ++ MSA_BRANCH_B = 0, ++ MSA_BRANCH_H, ++ MSA_BRANCH_W, ++ MSA_BRANCH_D, ++ MSA_BRANCH_V ++}; ++ ++ ++// ----- Coprocessor conditions. ++enum FPUCondition { ++ kNoFPUCondition = -1, ++ ++ F = 0x00, // False. ++ UN = 0x01, // Unordered. ++ EQ = 0x02, // Equal. ++ UEQ = 0x03, // Unordered or Equal. ++ OLT = 0x04, // Ordered or Less Than, on Sw64 release < 3. ++ LT = 0x04, // Ordered or Less Than, on Sw64 release >= 3. ++ ULT = 0x05, // Unordered or Less Than. ++ OLE = 0x06, // Ordered or Less Than or Equal, on Sw64 release < 3. ++ LE = 0x06, // Ordered or Less Than or Equal, on Sw64 release >= 3. ++ ULE = 0x07, // Unordered or Less Than or Equal. ++ ++ // Following constants are available on Sw64 release >= 3 only. ++ ORD = 0x11, // Ordered, on Sw64 release >= 3. ++ UNE = 0x12, // Not equal, on Sw64 release >= 3. ++ NE = 0x13, // Ordered Greater Than or Less Than. on Sw64 >= 3 only. ++}; ++ ++ ++// FPU rounding modes. ++enum FPURoundingMode { ++ RN = 0 << 0, // Round to Nearest. ++ RZ = 1 << 0, // Round towards zero. ++ RP = 2 << 0, // Round towards Plus Infinity. ++ RM = 3 << 0, // Round towards Minus Infinity. ++ ++ // Aliases. ++ kRoundToNearest = RN, ++ kRoundToZero = RZ, ++ kRoundToPlusInf = RP, ++ kRoundToMinusInf = RM, ++ ++ mode_round = RN, ++ mode_ceil = RP, ++ mode_floor = RM, ++ mode_trunc = RZ ++}; ++ ++const uint32_t kFPURoundingModeMask = 3 << 0; ++ ++enum CheckForInexactConversion { ++ kCheckForInexactConversion, ++ kDontCheckForInexactConversion ++}; ++ ++enum class MaxMinKind : int { kMin = 0, kMax = 1 }; ++ ++// ----------------------------------------------------------------------------- ++// Hints. ++ ++// Branch hints are not used on the XXXX64. They are defined so that they can ++// appear in shared function signatures, but will be ignored in XXXX64 ++// implementations. ++enum Hint { ++ no_hint = 0 ++}; ++ ++ ++inline Hint NegateHint(Hint hint) { ++ return no_hint; ++} ++ ++ ++// ----------------------------------------------------------------------------- ++// Specific instructions, constants, and masks. ++// These constants are declared in assembler-sw64.cc, as they use named ++// registers and other constants. ++ ++// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r) ++// operations as post-increment of sp. ++//extern const Instr kPopInstruction; ++// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp. ++//extern const Instr kPushInstruction; ++// Stw(r, MemOperand(sp, 0)) ++//extern const Instr kPushRegPattern; ++// Ldw(r, MemOperand(sp, 0)) ++//extern const Instr kPopRegPattern; ++//extern const Instr kLwRegFpOffsetPattern; ++//extern const Instr kSwRegFpOffsetPattern; ++//extern const Instr kLwRegFpNegOffsetPattern; ++//extern const Instr kSwRegFpNegOffsetPattern; ++// A mask for the Rt register for push, pop, lw, sw instructions. ++extern const Instr kRtMask; ++extern const Instr kLwSwInstrTypeMask; ++extern const Instr kLwSwInstrArgumentMask; ++extern const Instr kLwSwOffsetMask; ++ ++// simulator custom instruction ++const Instr rtCallRedirInstr = op_trap | REDIRECT; ++// A nop instruction. (Encoding of sll 0 0 0). ++const Instr nopInstr = 0; ++ ++static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) { ++ return 1ULL << (static_cast(opcode) >> kOpcodeShift); ++} ++ ++constexpr uint8_t kInstrSize = 4; ++constexpr uint8_t kInstrSizeLog2 = 2; ++ ++class InstructionBase { ++ public: ++ enum { ++ // On XXXX64 PC cannot actually be directly accessed. We behave as if PC was ++ // always the value of the current instruction being executed. ++ kPCReadOffset = 0 ++ }; ++ ++ // Instruction type. ++ enum Type { kRegisterType, kImmediateType, kJumpType, ++#ifdef SW64 //jzy 20150213 ++ kSwSyscallType, ++ kSwTransferanceType, ++ kSwStorageType, ++ kSwSimpleCalculationType, ++ kSwCompositeCalculationType, ++ kSwExtendType, ++ kSwSimulatorTrap, ++#endif ++ kUnsupported = -1 }; ++ ++ // Get the raw instruction bits. ++ inline Instr InstructionBits() const { ++ return *reinterpret_cast(this); ++ } ++ ++ // Set the raw instruction bits to value. ++ inline void SetInstructionBits(Instr value) { ++ *reinterpret_cast(this) = value; ++ } ++ ++ // Read one particular bit out of the instruction bits. ++ inline int Bit(int nr) const { ++ return (InstructionBits() >> nr) & 1; ++ } ++ ++ // Read a bit field out of the instruction bits. ++ inline int Bits(int hi, int lo) const { ++ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1); ++ } ++ ++ static constexpr uint64_t kOpcodeImmediateTypeMask = ++ OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) | ++ OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) | ++ OpcodeToBitNumber(BGTZ) | OpcodeToBitNumber(ADDI) | ++ OpcodeToBitNumber(DADDI) | OpcodeToBitNumber(ADDIU) | ++ /*OpcodeToBitNumber(DADDIU) |*/ OpcodeToBitNumber(SLTI) | ++ OpcodeToBitNumber(SLTIU) | OpcodeToBitNumber(ANDI) | ++ OpcodeToBitNumber(ORI) | OpcodeToBitNumber(XORI) | ++ OpcodeToBitNumber(LUI) | OpcodeToBitNumber(BEQL) | ++ OpcodeToBitNumber(BNEL) | OpcodeToBitNumber(BLEZL) | ++ OpcodeToBitNumber(BGTZL) | OpcodeToBitNumber(POP66) | ++ OpcodeToBitNumber(POP76) | OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) | ++ OpcodeToBitNumber(LWL) | OpcodeToBitNumber(LW) | OpcodeToBitNumber(LWU) | ++ OpcodeToBitNumber(LD) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) | ++ OpcodeToBitNumber(LDL) | OpcodeToBitNumber(LDR) | OpcodeToBitNumber(LWR) | ++ OpcodeToBitNumber(SDL) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) | ++ OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SD) | ++ OpcodeToBitNumber(SWR) | OpcodeToBitNumber(SDR) | ++ OpcodeToBitNumber(LWC1) | OpcodeToBitNumber(LDC1) | ++ OpcodeToBitNumber(SWC1) | OpcodeToBitNumber(SDC1) | ++ OpcodeToBitNumber(PCREL) | OpcodeToBitNumber(DAUI) | ++ OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC); ++ ++#define FunctionFieldToBitNumber(function) (1ULL << function) ++ ++ // On r6, DCLZ_R6 aliases to existing MFLO. ++ static const uint64_t kFunctionFieldRegisterTypeMask = ++ FunctionFieldToBitNumber(JR) | FunctionFieldToBitNumber(JALR) | ++ FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) | ++ /* FunctionFieldToBitNumber(DSLL) | FunctionFieldToBitNumber(DSLL32) |*/ ++ FunctionFieldToBitNumber(SRL) |/* FunctionFieldToBitNumber(DSRL) | ++ FunctionFieldToBitNumber(DSRL32) |*/ FunctionFieldToBitNumber(SRA) | ++ /* FunctionFieldToBitNumber(DSRA) | FunctionFieldToBitNumber(DSRA32) |*/ ++ FunctionFieldToBitNumber(SLLV) |/* FunctionFieldToBitNumber(DSLLV) |*/ ++ FunctionFieldToBitNumber(SRLV) |/* FunctionFieldToBitNumber(DSRLV) |*/ ++ FunctionFieldToBitNumber(SRAV) |/* FunctionFieldToBitNumber(DSRAV) |*/ ++ FunctionFieldToBitNumber(LSA) | FunctionFieldToBitNumber(DLSA) | ++ FunctionFieldToBitNumber(MFHI) | FunctionFieldToBitNumber(MFLO) | ++ FunctionFieldToBitNumber(MULT) | FunctionFieldToBitNumber(DMULT) | ++ FunctionFieldToBitNumber(MULTU) | FunctionFieldToBitNumber(DMULTU) | ++ /* FunctionFieldToBitNumber(DIV) | FunctionFieldToBitNumber(DDIV) | ++ FunctionFieldToBitNumber(DIVU) | FunctionFieldToBitNumber(DDIVU) |*/ ++ FunctionFieldToBitNumber(ADD) | FunctionFieldToBitNumber(DADD) | ++ /* FunctionFieldToBitNumber(ADDU) | FunctionFieldToBitNumber(DADDU) |*/ ++ FunctionFieldToBitNumber(SUB) | FunctionFieldToBitNumber(DSUB) | ++ /* FunctionFieldToBitNumber(SUBU) | FunctionFieldToBitNumber(DSUBU) |*/ ++ FunctionFieldToBitNumber(AND) | FunctionFieldToBitNumber(OR) | ++ FunctionFieldToBitNumber(XOR) | FunctionFieldToBitNumber(NOR) | ++ FunctionFieldToBitNumber(SLT) | FunctionFieldToBitNumber(SLTU) | ++ FunctionFieldToBitNumber(TGE) | FunctionFieldToBitNumber(TGEU) | ++ FunctionFieldToBitNumber(TLT) | FunctionFieldToBitNumber(TLTU) | ++ FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) | ++ /* FunctionFieldToBitNumber(MOVZ) |*/ FunctionFieldToBitNumber(MOVN) | ++ FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) | ++ FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC); ++ ++ ++ // Accessors for the different named fields used in the XXXX64 encoding. ++ inline Opcode OpcodeValue() const { ++ return static_cast( ++ Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift)); ++ } ++ ++ inline int FunctionFieldRaw() const { ++ return InstructionBits() & kFunctionFieldMask; ++ } ++ ++ // Return the fields at their original place in the instruction encoding. ++ inline Opcode OpcodeFieldRaw() const { ++ return static_cast(InstructionBits() & kOpcodeMask); ++ } ++ ++ inline int OpcodeFieldValue() const { ++ return static_cast(InstructionBits() & kOpcodeMask); ++ } ++ ++ // Safe to call within InstructionType(). ++ inline int RsFieldRawNoAssert() const { ++ return InstructionBits() & kRsFieldMask; ++ } ++ ++ inline int SaFieldRaw() const { return InstructionBits() & kSaFieldMask; } ++ ++ // Get the encoding type of the instruction. ++ inline Type InstructionType() const; ++ ++ inline MSAMinorOpcode MSAMinorOpcodeField() const { ++ int op = this->FunctionFieldRaw(); ++ switch (op) { ++ case 0: ++ case 1: ++ case 2: ++ return kMsaMinorI8; ++ case 6: ++ return kMsaMinorI5; ++ case 7: ++ return (((this->InstructionBits() & kMsaI5I10Mask) == LDI) ++ ? kMsaMinorI10 ++ : kMsaMinorI5); ++ case 9: ++ case 10: ++ return kMsaMinorBIT; ++ case 13: ++ case 14: ++ case 15: ++ case 16: ++ case 17: ++ case 18: ++ case 19: ++ case 20: ++ case 21: ++ return kMsaMinor3R; ++ case 25: ++ return kMsaMinorELM; ++ case 26: ++ case 27: ++ case 28: ++ return kMsaMinor3RF; ++ case 30: ++ switch (this->RsFieldRawNoAssert()) { ++ case MSA_2R_FORMAT: ++ return kMsaMinor2R; ++ case MSA_2RF_FORMAT: ++ return kMsaMinor2RF; ++ default: ++ return kMsaMinorVEC; ++ } ++ break; ++ case 32: ++ case 33: ++ case 34: ++ case 35: ++ case 36: ++ case 37: ++ case 38: ++ case 39: ++ return kMsaMinorMI10; ++ default: ++ return kMsaMinorUndefined; ++ } ++ } ++ ++ protected: ++ InstructionBase() {} ++}; ++ ++template ++class InstructionGetters : public T { ++ public: ++ inline int RsValue() const { ++ return this->Bits(kRsShift + kRsBits - 1, kRsShift); ++ } ++ ++ inline int RtValue() const { ++ return this->Bits(kRtShift + kRtBits - 1, kRtShift); ++ } ++ ++ inline int RdValue() const { ++ return this->Bits(kRdShift + kRdBits - 1, kRdShift); ++ } ++ ++ inline int BaseValue() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(kBaseShift + kBaseBits - 1, kBaseShift); ++ } ++ ++ inline int SaValue() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); ++ return this->Bits(kSaShift + kSaBits - 1, kSaShift); ++ } ++ ++ inline int LsaSaValue() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); ++ return this->Bits(kSaShift + kLsaSaBits - 1, kSaShift); ++ } ++ ++ inline int FunctionValue() const { ++ DCHECK(this->InstructionType() == InstructionBase::kRegisterType || ++ this->InstructionType() == InstructionBase::kImmediateType); ++ return this->Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift); ++ } ++ ++ inline int FdValue() const { ++ return this->Bits(kFdShift + kFdBits - 1, kFdShift); ++ } ++ ++ inline int FsValue() const { ++ return this->Bits(kFsShift + kFsBits - 1, kFsShift); ++ } ++ ++ inline int FtValue() const { ++ return this->Bits(kFtShift + kFtBits - 1, kFtShift); ++ } ++ ++ inline int FrValue() const { ++ return this->Bits(kFrShift + kFrBits - 1, kFrShift); ++ } ++ ++ inline int WdValue() const { ++ return this->Bits(kWdShift + kWdBits - 1, kWdShift); ++ } ++ ++ inline int WsValue() const { ++ return this->Bits(kWsShift + kWsBits - 1, kWsShift); ++ } ++ ++ inline int WtValue() const { ++ return this->Bits(kWtShift + kWtBits - 1, kWtShift); ++ } ++ ++ inline int Bp2Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); ++ return this->Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift); ++ } ++ ++ inline int Bp3Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); ++ return this->Bits(kBp3Shift + kBp3Bits - 1, kBp3Shift); ++ } ++ ++ // Float Compare condition code instruction bits. ++ inline int FCccValue() const { ++ return this->Bits(kFCccShift + kFCccBits - 1, kFCccShift); ++ } ++ ++ // Float Branch condition code instruction bits. ++ inline int FBccValue() const { ++ return this->Bits(kFBccShift + kFBccBits - 1, kFBccShift); ++ } ++ ++ // Float Branch true/false instruction bit. ++ inline int FBtrueValue() const { ++ return this->Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift); ++ } ++ ++ // Return the fields at their original place in the instruction encoding. ++ inline Opcode OpcodeFieldRaw() const { ++ return static_cast(this->InstructionBits() & kOpcodeMask); ++ } ++ ++ inline int OpcodeFieldValue() const { ++ return static_cast(this->InstructionBits() & kOpcodeMask); ++ } ++ ++ inline int RsFieldRaw() const { ++ DCHECK(this->InstructionType() == InstructionBase::kRegisterType || ++ this->InstructionType() == InstructionBase::kImmediateType); ++ return this->InstructionBits() & kRsFieldMask; ++ } ++ ++ // Same as above function, but safe to call within InstructionType(). ++ inline int RsFieldRawNoAssert() const { ++ return this->InstructionBits() & kRsFieldMask; ++ } ++ ++ inline int RtFieldRaw() const { ++ DCHECK(this->InstructionType() == InstructionBase::kRegisterType || ++ this->InstructionType() == InstructionBase::kImmediateType); ++ return this->InstructionBits() & kRtFieldMask; ++ } ++ ++ inline int RdFieldRaw() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); ++ return this->InstructionBits() & kRdFieldMask; ++ } ++ ++ inline int SaFieldRaw() const { ++ return this->InstructionBits() & kSaFieldMask; ++ } ++ ++ inline int FunctionFieldRaw() const { ++ return this->InstructionBits() & kFunctionFieldMask; ++ } ++ ++#ifdef SW64 //jzy 20150213 ++ inline int SwGetMask(int hi, int lo) const { ++ int mask = 2 << (hi-lo); ++ mask -= 1; ++ return mask << lo; ++ } ++ ++ //the position of instruction function_code in SW is different in different type of instructions ++ inline int SwFunctionFieldRaw(int hi, int lo) const { ++ return this->InstructionBits() & SwGetMask(hi, lo); ++ } ++ inline int SwFunctionFieldValue(int hi, int lo) const { ++ return this->InstructionBits() & SwGetMask(hi, lo); ++ } ++ ++ inline int SwImmOrDispFieldRaw(int hi, int lo) const { ++ return this->InstructionBits() & SwGetMask(hi, lo); ++ } ++ inline int SwImmOrDispFieldValue(int hi, int lo) const { ++ int shift_len = 32 - (hi-lo+1); ++ return (((this->InstructionBits() & SwGetMask(hi, lo)) >> lo) << shift_len) >> shift_len; ++ } ++ ++ inline int SwRaFieldRaw() const { ++ return this->InstructionBits() & sRaFieldMask; //SwGetMask(25,21); ++ } ++ inline int SwRbFieldRaw() const { ++ return this->InstructionBits() & sRbFieldMask; //SwGetMask(20,16); ++ } ++ inline int SwRcFieldRaw(int hi, int lo) const { ++ return this->InstructionBits() & SwGetMask(hi,lo); ++ } ++ inline int SwRdFieldRaw() const { ++ return this->InstructionBits() & sRdFieldMask; //SwGetMask(4,0); ++ } ++ ++ inline int SwFaFieldRaw() const { ++ return this->InstructionBits() & sRaFieldMask; //SwGetMask(25,21); ++ } ++ inline int SwFbFieldRaw() const { ++ return this->InstructionBits() & sRbFieldMask; //SwGetMask(20,16); ++ } ++ inline int SwFcFieldRaw(int hi, int lo) const { ++ return this->InstructionBits() & SwGetMask(hi,lo); ++ } ++ inline int SwFdFieldRaw() const { ++ return this->InstructionBits() & sRdFieldMask; //SwGetMask(4,0); ++ } ++ ++ inline int SwRaValue() const { ++ return this->Bits(sRaShift+sRaBits-1, sRaShift); //Bits(25,21); ++ } ++ inline int SwRbValue() const { ++ return this->Bits(sRbShift+sRbBits-1, sRbShift); //Bits(20,16); ++ } ++ //the position of rc register in SW is different in different type of instructions ++ inline int SwRcValue(int hi, int lo) const { ++ return this->Bits(hi, lo); ++ } ++ inline int SwRdValue() const { ++ return this->Bits(sRdShift+sRdBits-1, sRdShift); //Bits(4,0); ++ } ++ inline int SwFaValue() const { ++ return this->Bits(sRaShift+sRaBits-1, sRaShift); //Bits(25,21); ++ } ++ inline int SwFbValue() const { ++ return this->Bits(sRbShift+sRbBits-1, sRbShift); //Bits(20,16); ++ } ++ //the position of rc register in SW is different in different type of instructions ++ inline int SwFcValue(int hi, int lo) const { ++ return this->Bits(hi, lo); ++ } ++ inline int SwFdValue() const { ++ return this->Bits(sRdShift+sRdBits-1, sRdShift); //Bits(4,0); ++ } ++#endif ++ ++ // Get the secondary field according to the opcode. ++ inline int SecondaryValue() const { ++ Opcode op = this->OpcodeFieldRaw(); ++ switch (op) { ++ case SPECIAL: ++ case SPECIAL2: ++ return FunctionValue(); ++ case COP1: ++ return RsValue(); ++ case REGIMM: ++ return RtValue(); ++ default: ++ return nullptrSF; ++ } ++ } ++ ++ inline int32_t ImmValue(int bits) const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(bits - 1, 0); ++ } ++ ++ inline int32_t Imm9Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(kImm9Shift + kImm9Bits - 1, kImm9Shift); ++ } ++ ++ inline int32_t Imm16Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift); ++ } ++ ++ inline int32_t Imm18Value() const { ++ return this->Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift); ++ } ++ ++ inline int32_t Imm19Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift); ++ } ++ ++ inline int32_t Imm21Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift); ++ } ++ ++ inline int32_t Imm26Value() const { ++ DCHECK((this->InstructionType() == InstructionBase::kJumpType) || ++ (this->InstructionType() == InstructionBase::kImmediateType)); ++ return this->Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift); ++ } ++ ++ inline int32_t MsaImm8Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(kMsaImm8Shift + kMsaImm8Bits - 1, kMsaImm8Shift); ++ } ++ ++ inline int32_t MsaImm5Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(kMsaImm5Shift + kMsaImm5Bits - 1, kMsaImm5Shift); ++ } ++ ++ inline int32_t MsaImm10Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(kMsaImm10Shift + kMsaImm10Bits - 1, kMsaImm10Shift); ++ } ++ ++ inline int32_t MsaImmMI10Value() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(kMsaImmMI10Shift + kMsaImmMI10Bits - 1, kMsaImmMI10Shift); ++ } ++ ++ inline int32_t MsaBitDf() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ int32_t df_m = this->Bits(22, 16); ++ if (((df_m >> 6) & 1U) == 0) { ++ return 3; ++ } else if (((df_m >> 5) & 3U) == 2) { ++ return 2; ++ } else if (((df_m >> 4) & 7U) == 6) { ++ return 1; ++ } else if (((df_m >> 3) & 15U) == 14) { ++ return 0; ++ } else { ++ return -1; ++ } ++ } ++ ++ inline int32_t MsaBitMValue() const { ++ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); ++ return this->Bits(16 + this->MsaBitDf() + 3, 16); ++ } ++ ++ inline int32_t MsaElmDf() const { ++ DCHECK(this->InstructionType() == InstructionBase::kRegisterType || ++ this->InstructionType() == InstructionBase::kImmediateType); ++ int32_t df_n = this->Bits(21, 16); ++ if (((df_n >> 4) & 3U) == 0) { ++ return 0; ++ } else if (((df_n >> 3) & 7U) == 4) { ++ return 1; ++ } else if (((df_n >> 2) & 15U) == 12) { ++ return 2; ++ } else if (((df_n >> 1) & 31U) == 28) { ++ return 3; ++ } else { ++ return -1; ++ } ++ } ++ ++ inline int32_t MsaElmNValue() const { ++ DCHECK(this->InstructionType() == InstructionBase::kRegisterType || ++ this->InstructionType() == InstructionBase::kImmediateType); ++ return this->Bits(16 + 4 - this->MsaElmDf(), 16); ++ } ++ ++ static bool IsForbiddenAfterBranchInstr(Instr instr); ++ ++ // Say if the instruction should not be used in a branch delay slot or ++ // immediately after a compact branch. ++ inline bool IsForbiddenAfterBranch() const { ++ return IsForbiddenAfterBranchInstr(this->InstructionBits()); ++ } ++ ++ inline bool IsForbiddenInBranchDelay() const { ++ return IsForbiddenAfterBranch(); ++ } ++ ++ // Say if the instruction 'links'. e.g. jal, bal. ++ bool IsLinkingInstruction() const; ++ // Say if the instruction is a break or a trap. ++ bool IsTrap() const; ++ ++ inline bool IsMSABranchInstr() const { ++ if (this->OpcodeFieldRaw() == COP1) { ++ switch (this->RsFieldRaw()) { ++ case BZ_V: ++ case BZ_B: ++ case BZ_H: ++ case BZ_W: ++ case BZ_D: ++ case BNZ_V: ++ case BNZ_B: ++ case BNZ_H: ++ case BNZ_W: ++ case BNZ_D: ++ return true; ++ default: ++ return false; ++ } ++ } ++ return false; ++ } ++ ++ inline bool IsMSAInstr() const { ++ if (this->IsMSABranchInstr() || (this->OpcodeFieldRaw() == MSA)) ++ return true; ++ return false; ++ } ++}; ++ ++class Instruction : public InstructionGetters { ++ public: ++ // Instructions are read of out a code stream. The only way to get a ++ // reference to an instruction is to convert a pointer. There is no way ++ // to allocate or create instances of class Instruction. ++ // Use the At(pc) function to create references to Instruction. ++ static Instruction* At(byte* pc) { ++ return reinterpret_cast(pc); ++ } ++ ++ private: ++ // We need to prevent the creation of instances of class Instruction. ++ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction); ++}; ++ ++ ++// ----------------------------------------------------------------------------- ++// XXXX64 assembly various constants. ++ ++// C/C++ argument slots size. ++const int kCArgSlotCount = 0; ++ ++// TODO(plind): below should be based on kPointerSize ++// TODO(plind): find all usages and remove the needless instructions for n64. ++const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2; ++ ++const int kInvalidStackOffset = -1; ++const int kBranchReturnOffset = 1 * kInstrSize; ++ ++static const int kNegOffset = 0x00008000; ++ ++InstructionBase::Type InstructionBase::InstructionType() const { ++ switch (OpcodeFieldValue()) { ++ //cjq 20150317: add instruction 'sys_call' to do ++ case op_sys_call: ++ return kSwSyscallType; ++ ++ case op_call: ++ case op_ret: ++ case op_jmp: ++ case op_ldwe: ++ case op_ldse: ++ case op_ldde: ++ case op_vlds: ++ case op_vldd: ++ case op_vsts: ++ case op_vstd: ++ case op_ldbu: ++ case op_ldhu: ++ case op_ldw: ++ case op_ldl: ++ case op_ldl_u: ++ case op_flds: ++ case op_fldd: ++ case op_stb: ++ case op_sth: ++ case op_stw: ++ case op_stl: ++ case op_stl_u: ++ case op_fsts: ++ case op_fstd: ++ case op_ldi: ++ case op_ldih: ++ return kSwStorageType; ++ case OP(0x08): ++ return kSwStorageType; ++ case OP(0x06): ++ return kSwStorageType;//jzy 20150213:TODO ++ ++ case op_br: ++ case op_bsr: ++ case op_beq: ++ case op_bne: ++ case op_blt: ++ case op_ble: ++ case op_bgt: ++ case op_bge: ++ case op_blbc: ++ case op_blbs: ++ case op_fbeq: ++ case op_fbne: ++ case op_fblt: ++ case op_fble: ++ case op_fbgt: ++ case op_fbge: ++ return kSwTransferanceType;//ld 20150319 ++ ++ case OP(0x10): ++ case OP(0x12): ++ case OP(0x18): ++ return kSwSimpleCalculationType; ++ ++ case OP(0x11): ++ case OP(0x13): ++ case OP(0x19): ++ return kSwCompositeCalculationType; ++ ++ case op_trap: ++ return kSwSimulatorTrap; ++ ++ default: ++ return kImmediateType; ++ } ++ ++ return kUnsupported; ++} ++#undef OpcodeToBitNumber ++#undef FunctionFieldToBitNumber ++ ++// ----------------------------------------------------------------------------- ++// Instructions. ++ ++template ++bool InstructionGetters

::IsLinkingInstruction() const { ++ switch (OpcodeFieldRaw()) { ++ case JAL: ++ return true; ++ case POP76: ++ if (RsFieldRawNoAssert() == JIALC) ++ return true; // JIALC ++ else ++ return false; // BNEZC ++ case REGIMM: ++ switch (RtFieldRaw()) { ++ case BGEZAL: ++ case BLTZAL: ++ return true; ++ default: ++ return false; ++ } ++ case SPECIAL: ++ switch (FunctionFieldRaw()) { ++ case JALR: ++ return true; ++ default: ++ return false; ++ } ++ default: ++ return false; ++ } ++} ++ ++template ++bool InstructionGetters

::IsTrap() const { ++ return OpcodeFieldValue() == op_trap; ++} ++ ++// static ++template ++bool InstructionGetters::IsForbiddenAfterBranchInstr(Instr instr) { ++ Opcode opcode = static_cast(instr & kOpcodeMask); ++ switch (opcode) { ++ case J: ++ case JAL: ++ case BEQ: ++ case BNE: ++ case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc ++ case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc ++ case BEQL: ++ case BNEL: ++ case BLEZL: // POP26 bgezc, blezc, bgec/blec ++ case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc ++ case BC: ++ case BALC: ++ case POP10: // beqzalc, bovc, beqc ++ case POP30: // bnezalc, bnvc, bnec ++ case POP66: // beqzc, jic ++ case POP76: // bnezc, jialc ++ return true; ++ case REGIMM: ++ switch (instr & kRtFieldMask) { ++ case BLTZ: ++ case BGEZ: ++ case BLTZAL: ++ case BGEZAL: ++ return true; ++ default: ++ return false; ++ } ++ break; ++ case SPECIAL: ++ switch (instr & kFunctionFieldMask) { ++ case JR: ++ case JALR: ++ return true; ++ default: ++ return false; ++ } ++ break; ++ case COP1: ++ switch (instr & kRsFieldMask) { ++ case BC1: ++ case BC1EQZ: ++ case BC1NEZ: ++ case BZ_V: ++ case BZ_B: ++ case BZ_H: ++ case BZ_W: ++ case BZ_D: ++ case BNZ_V: ++ case BNZ_B: ++ case BNZ_H: ++ case BNZ_W: ++ case BNZ_D: ++ return true; ++ break; ++ default: ++ return false; ++ } ++ break; ++ default: ++ return false; ++ } ++} ++ ++#ifdef SW64 ++ ++#undef OP ++#undef PCD ++#undef OPMEM ++#undef BRA ++ ++#undef OFP ++#undef FMA ++#undef MFC ++#undef MBR ++#undef OPR ++#undef OPRL ++#undef TOPR ++#undef TOPRL ++ ++#undef ATMEM ++#undef PRIRET ++#undef SPCD ++#undef EV6HWMEM ++#undef CSR ++ ++#undef LOGX ++#undef PSE_LOGX ++ ++#endif ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_SW64_CONSTANTS_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/cpu-sw64.cc b/src/3rdparty/chromium/v8/src/codegen/sw64/cpu-sw64.cc +new file mode 100755 +index 000000000..bfd8138a8 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/cpu-sw64.cc +@@ -0,0 +1,40 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// CPU specific code for arm independent of OS goes here. ++ ++#include ++#include ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/codegen/assembler.h" ++#include "src/codegen/macro-assembler.h" ++ ++#include "src/execution/simulator.h" // For cache flushing. ++#include "src/codegen/cpu-features.h" ++ ++namespace v8 { ++namespace internal { ++ ++ ++void CpuFeatures::FlushICache(void* start, size_t size) { ++ // Nothing to do, flushing no instructions. ++ if (size == 0) { ++ return; ++ } ++ ++#if defined(__sw_64__) ++ asm volatile("ldi $0, 266($31)\n" ++ "sys_call 0x83\n"); ++#else ++#error "Target Architecture are not supported in CpuFeatures::FlushICache" ++#endif ++ ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/interface-descriptors-sw64.cc b/src/3rdparty/chromium/v8/src/codegen/sw64/interface-descriptors-sw64.cc +new file mode 100755 +index 000000000..8edc3a06f +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/interface-descriptors-sw64.cc +@@ -0,0 +1,351 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/codegen/interface-descriptors.h" ++ ++#include "src/execution/frames.h" ++ ++namespace v8 { ++namespace internal { ++ ++const Register CallInterfaceDescriptor::ContextRegister() { return cp; } ++ ++void CallInterfaceDescriptor::DefaultInitializePlatformSpecific( ++ CallInterfaceDescriptorData* data, int register_parameter_count) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ CHECK_LE(static_cast(register_parameter_count), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(register_parameter_count, ++ default_stub_registers); ++} ++ ++void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3}; ++ CHECK_EQ(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ CHECK_EQ(static_cast(kParameterCount - kStackArgumentsCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount, ++ default_stub_registers); ++} ++ ++void RecordWriteDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0}; ++ ++ data->RestrictAllocatableRegisters(default_stub_registers, ++ arraysize(default_stub_registers)); ++ ++ CHECK_LE(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0}; ++ ++ data->RestrictAllocatableRegisters(default_stub_registers, ++ arraysize(default_stub_registers)); ++ ++ CHECK_LE(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++const Register LoadDescriptor::ReceiverRegister() { return a1; } ++const Register LoadDescriptor::NameRegister() { return a2; } ++const Register LoadDescriptor::SlotRegister() { return a0; } ++ ++const Register LoadWithVectorDescriptor::VectorRegister() { return a3; } ++ ++const Register ++LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() { ++ return a4; ++} ++ ++const Register StoreDescriptor::ReceiverRegister() { return a1; } ++const Register StoreDescriptor::NameRegister() { return a2; } ++const Register StoreDescriptor::ValueRegister() { return a0; } ++const Register StoreDescriptor::SlotRegister() { return a4; } ++ ++const Register StoreWithVectorDescriptor::VectorRegister() { return a3; } ++ ++const Register StoreTransitionDescriptor::SlotRegister() { return a4; } ++const Register StoreTransitionDescriptor::VectorRegister() { return a3; } ++const Register StoreTransitionDescriptor::MapRegister() { return a5; } ++ ++const Register ApiGetterDescriptor::HolderRegister() { return a0; } ++const Register ApiGetterDescriptor::CallbackRegister() { return a3; } ++ ++const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; } ++const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; } ++ ++ ++// static ++const Register TypeConversionDescriptor::ArgumentRegister() { return a0; } ++ ++void TypeofDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a3}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallTrampolineDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: target ++ // a0: number of arguments ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a4 : arguments list length (untagged) ++ // a2 : arguments list (FixedArray) ++ Register registers[] = {a1, a0, a4, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallForwardVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: the target to call ++ // a0: number of arguments ++ // a2: start index (to support rest parameters) ++ Register registers[] = {a1, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallFunctionTemplateDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1 : function template info ++ // a0 : number of arguments (on the stack, not including receiver) ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallWithSpreadDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a2 : the object to spread ++ Register registers[] = {a1, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallWithArrayLikeDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1 : the target to call ++ // a2 : the arguments list ++ Register registers[] = {a1, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a3 : the new target ++ // a4 : arguments list length (untagged) ++ // a2 : arguments list (FixedArray) ++ Register registers[] = {a1, a3, a0, a4, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: the target to call ++ // a3: new target ++ // a0: number of arguments ++ // a2: start index (to support rest parameters) ++ Register registers[] = {a1, a3, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructWithSpreadDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a3 : the new target ++ // a2 : the object to spread ++ Register registers[] = {a1, a3, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1 : the target to call ++ // a3 : the new target ++ // a2 : the arguments list ++ Register registers[] = {a1, a3, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructStubDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: target ++ // a3: new target ++ // a0: number of arguments ++ // a2: allocation site or undefined ++ Register registers[] = {a1, a3, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void AbortDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CompareDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++ ++void BinaryOpDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ArgumentsAdaptorDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a1, // JSFunction ++ a3, // the new target ++ a0, // actual number of arguments ++ a2, // expected number of arguments ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ApiCallbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a1, // kApiFunctionAddress ++ a2, // kArgc ++ a3, // kCallData ++ a0, // kHolder ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void InterpreterDispatchDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, ++ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a0, // argument count (not including receiver) ++ a2, // address of first argument ++ a1 // the target callable to be call ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a0, // argument count (not including receiver) ++ a4, // address of the first argument ++ a1, // constructor to call ++ a3, // new target ++ a2, // allocation site feedback if available, undefined otherwise ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ResumeGeneratorDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ v0, // the value to pass to the generator ++ a1 // the JSGeneratorObject to resume ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void FrameDropperTrampolineDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a1, // loaded new FP ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void RunMicrotasksEntryDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a0, a1}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void Compare_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 3); ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.cc b/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.cc +new file mode 100755 +index 000000000..a678af140 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.cc +@@ -0,0 +1,5089 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include // For LONG_MIN, LONG_MAX. ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/base/bits.h" ++#include "src/base/division-by-constant.h" ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/callable.h" ++#include "src/codegen/code-factory.h" ++#include "src/codegen/external-reference-table.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/register-configuration.h" ++#include "src/debug/debug.h" ++#include "src/execution/frames-inl.h" ++#include "src/heap/memory-chunk.h" ++#include "src/init/bootstrapper.h" ++#include "src/logging/counters.h" ++#include "src/objects/heap-number.h" ++#include "src/runtime/runtime.h" ++#include "src/snapshot/embedded/embedded-data.h" ++#include "src/snapshot/snapshot.h" ++#include "src/wasm/wasm-code-manager.h" ++ ++namespace v8 { ++namespace internal { ++ ++static inline bool IsZero(const Operand& rt) { ++ if (rt.is_reg()) { ++ return rt.rm() == zero_reg; ++ } else { ++ return rt.immediate() == 0; ++ } ++} ++ ++int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, ++ Register exclusion1, ++ Register exclusion2, ++ Register exclusion3) const { ++ int bytes = 0; ++ RegList exclusions = 0; ++ if (exclusion1 != no_reg) { ++ exclusions |= exclusion1.bit(); ++ if (exclusion2 != no_reg) { ++ exclusions |= exclusion2.bit(); ++ if (exclusion3 != no_reg) { ++ exclusions |= exclusion3.bit(); ++ } ++ } ++ } ++ ++ RegList list = kJSCallerSaved & ~exclusions; ++ bytes += NumRegs(list) * kPointerSize; ++ ++ if (fp_mode == kSaveFPRegs) { ++ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; ++ } ++ ++ return bytes; ++} ++ ++int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, ++ Register exclusion2, Register exclusion3) { ++ int bytes = 0; ++ RegList exclusions = 0; ++ if (exclusion1 != no_reg) { ++ exclusions |= exclusion1.bit(); ++ if (exclusion2 != no_reg) { ++ exclusions |= exclusion2.bit(); ++ if (exclusion3 != no_reg) { ++ exclusions |= exclusion3.bit(); ++ } ++ } ++ } ++ ++ RegList list = kJSCallerSaved & ~exclusions; ++ MultiPush(list); ++ bytes += NumRegs(list) * kPointerSize; ++ ++ if (fp_mode == kSaveFPRegs) { ++ MultiPushFPU(kCallerSavedFPU); ++ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; ++ } ++ ++ return bytes; ++} ++ ++int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, ++ Register exclusion2, Register exclusion3) { ++ int bytes = 0; ++ if (fp_mode == kSaveFPRegs) { ++ MultiPopFPU(kCallerSavedFPU); ++ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; ++ } ++ ++ RegList exclusions = 0; ++ if (exclusion1 != no_reg) { ++ exclusions |= exclusion1.bit(); ++ if (exclusion2 != no_reg) { ++ exclusions |= exclusion2.bit(); ++ if (exclusion3 != no_reg) { ++ exclusions |= exclusion3.bit(); ++ } ++ } ++ } ++ ++ RegList list = kJSCallerSaved & ~exclusions; ++ MultiPop(list); ++ bytes += NumRegs(list) * kPointerSize; ++ ++ return bytes; ++} ++ ++void TurboAssembler::LoadRoot(Register destination, RootIndex index) {SCOPEMARK_NAME(TurboAssembler::LoadRoot, this); ++ Ldl(destination, MemOperand(s4, RootRegisterOffsetForRootIndex(index))); ++} ++ ++void TurboAssembler::LoadRoot(Register destination, RootIndex index, ++ Condition cond, Register src1, ++ const Operand& src2) {SCOPEMARK_NAME(TurboAssembler::LoadRoot, this); ++ Branch(2, NegateCondition(cond), src1, src2); ++ Ldl(destination, MemOperand(s4, RootRegisterOffsetForRootIndex(index))); ++} ++ ++ ++void TurboAssembler::PushCommonFrame(Register marker_reg) {SCOPEMARK_NAME(TurboAssembler::PushCommonFrame, this); ++ if (marker_reg.is_valid()) { ++ Push(ra, fp, marker_reg); ++ Addl(fp, sp, Operand(kPointerSize)); ++ } else { ++ Push(ra, fp); ++ mov(fp, sp); ++ } ++} ++ ++void TurboAssembler::PushStandardFrame(Register function_reg) {SCOPEMARK_NAME(TurboAssembler::PushStandardFrame, this); ++ int offset = -StandardFrameConstants::kContextOffset; ++ if (function_reg.is_valid()) { ++ Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); ++ offset += 2 * kPointerSize; ++ } else { ++ Push(ra, fp, cp, kJavaScriptCallArgCountRegister); ++ offset += kPointerSize; ++ } ++ Addl(fp, sp, Operand(offset)); ++} ++ ++int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { ++ // The registers are pushed starting with the highest encoding, ++ // which means that lowest encodings are closest to the stack pointer. ++ return kSafepointRegisterStackIndexMap[reg_code]; ++} ++ ++ ++// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved) ++// The register 'object' contains a heap object pointer. The heap object ++// tag is shifted away. ++void MacroAssembler::RecordWriteField(Register object, int offset, ++ Register value, Register dst, ++ RAStatus ra_status, ++ SaveFPRegsMode save_fp, ++ RememberedSetAction remembered_set_action, ++ SmiCheck smi_check) {SCOPEMARK_NAME(MacroAssembler::RecordWriteField, this); ++ DCHECK(!AreAliased(value, dst, t11, object)); ++ // First, check if a write barrier is even needed. The tests below ++ // catch stores of Smis. ++ Label done; ++ ++ // Skip barrier if writing a smi. ++ if (smi_check == INLINE_SMI_CHECK) { ++ JumpIfSmi(value, &done); ++ } ++ ++ // Although the object register is tagged, the offset is relative to the start ++ // of the object, so so offset must be a multiple of kPointerSize. ++ DCHECK(IsAligned(offset, kPointerSize)); ++ ++ Addl(dst, object, Operand(offset - kHeapObjectTag)); ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Label ok; ++ And(t11, dst, Operand(kPointerSize - 1)); ++ Branch(&ok, eq, t11, Operand(zero_reg)); ++ halt();//stop("Unaligned cell in write barrier"); ++ bind(&ok); ++ } ++ ++ RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action, ++ OMIT_SMI_CHECK); ++ ++ bind(&done); ++ ++ // Clobber clobbered input registers when running with the debug-code flag ++ // turned on to provoke errors. ++ if (emit_debug_code()) { ++ li(value, Operand(bit_cast(kZapValue + 4))); ++ li(dst, Operand(bit_cast(kZapValue + 8))); ++ } ++} ++ ++void TurboAssembler::SaveRegisters(RegList registers) {SCOPEMARK_NAME(TurboAssembler::SaveRegisters, this); ++ DCHECK_GT(NumRegs(registers), 0); ++ RegList regs = 0; ++ for (int i = 0; i < Register::kNumRegisters; ++i) { ++ if ((registers >> i) & 1u) { ++ regs |= Register::from_code(i).bit(); ++ } ++ } ++ MultiPush(regs); ++} ++ ++void TurboAssembler::RestoreRegisters(RegList registers) {SCOPEMARK_NAME(TurboAssembler::RestoreRegisters, this); ++ DCHECK_GT(NumRegs(registers), 0); ++ RegList regs = 0; ++ for (int i = 0; i < Register::kNumRegisters; ++i) { ++ if ((registers >> i) & 1u) { ++ regs |= Register::from_code(i).bit(); ++ } ++ } ++ MultiPop(regs); ++} ++ ++//SKTODO ++void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address, ++ SaveFPRegsMode fp_mode) { ++ EphemeronKeyBarrierDescriptor descriptor; ++ RegList registers = descriptor.allocatable_registers(); ++ ++ SaveRegisters(registers); ++ ++ Register object_parameter( ++ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject)); ++ Register slot_parameter(descriptor.GetRegisterParameter( ++ EphemeronKeyBarrierDescriptor::kSlotAddress)); ++ Register fp_mode_parameter( ++ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode)); ++ ++ Push(object); ++ Push(address); ++ ++ Pop(slot_parameter); ++ Pop(object_parameter); ++ ++ Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); ++ Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier), ++ RelocInfo::CODE_TARGET); ++ RestoreRegisters(registers); ++} ++ ++void TurboAssembler::CallRecordWriteStub( ++ Register object, Register address, ++ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {SCOPEMARK_NAME(TurboAssembler::CallRecordWriteStub, this); ++ CallRecordWriteStub( ++ object, address, remembered_set_action, fp_mode, ++ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), ++ kNullAddress); ++} ++ ++void TurboAssembler::CallRecordWriteStub( ++ Register object, Register address, ++ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, ++ Address wasm_target) {SCOPEMARK_NAME(TurboAssembler::CallRecordWriteStub, this); ++ CallRecordWriteStub(object, address, remembered_set_action, fp_mode, ++ Handle::null(), wasm_target); ++} ++ ++void TurboAssembler::CallRecordWriteStub( ++ Register object, Register address, ++ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, ++ Handle code_target, Address wasm_target) { ++ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); ++ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode, ++ // i.e. always emit remember set and save FP registers in RecordWriteStub. If ++ // large performance regression is observed, we should use these values to ++ // avoid unnecessary work. ++ ++ RecordWriteDescriptor descriptor; ++ RegList registers = descriptor.allocatable_registers(); ++ ++ SaveRegisters(registers); ++ Register object_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject)); ++ Register slot_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot)); ++ Register remembered_set_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet)); ++ Register fp_mode_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode)); ++ ++ Push(object); ++ Push(address); ++ ++ Pop(slot_parameter); ++ Pop(object_parameter); ++ ++ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); ++ Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); ++ if (code_target.is_null()) { ++ Call(wasm_target, RelocInfo::WASM_STUB_CALL); ++ } else { ++ Call(code_target, RelocInfo::CODE_TARGET); ++ } ++ ++ RestoreRegisters(registers); ++} ++ ++// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved) ++// The register 'object' contains a heap object pointer. The heap object ++// tag is shifted away. ++void MacroAssembler::RecordWrite(Register object, Register address, ++ Register value, RAStatus ra_status, ++ SaveFPRegsMode fp_mode, ++ RememberedSetAction remembered_set_action, ++ SmiCheck smi_check) {SCOPEMARK_NAME(MacroAssembler::RecordWrite, this); ++ DCHECK(!AreAliased(object, address, value, t11)); ++ DCHECK(!AreAliased(object, address, value, t12)); ++ ++ if (emit_debug_code()) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Ldl(scratch, MemOperand(address)); ++ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch, ++ Operand(value)); ++ } ++ ++ if ((remembered_set_action == OMIT_REMEMBERED_SET && ++ !FLAG_incremental_marking) || ++ FLAG_disable_write_barriers) { ++ return; ++ } ++ ++ // First, check if a write barrier is even needed. The tests below ++ // catch stores of smis and stores into the young generation. ++ Label done; ++ ++ if (smi_check == INLINE_SMI_CHECK) { ++ DCHECK_EQ(0, kSmiTag); ++ JumpIfSmi(value, &done); ++ } ++ ++ CheckPageFlag(value, ++ value, // Used as scratch. ++ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); ++ CheckPageFlag(object, ++ value, // Used as scratch. ++ MemoryChunk::kPointersFromHereAreInterestingMask, ++ eq, ++ &done); ++ ++ // Record the actual write. ++ if (ra_status == kRAHasNotBeenSaved) { ++ push(ra); ++ } ++ CallRecordWriteStub(object, address, remembered_set_action, fp_mode); ++ if (ra_status == kRAHasNotBeenSaved) { ++ pop(ra); ++ } ++ ++ bind(&done); ++ ++ // Clobber clobbered registers when running with the debug-code flag ++ // turned on to provoke errors. ++ if (emit_debug_code()) { ++ li(address, Operand(bit_cast(kZapValue + 12))); ++ li(value, Operand(bit_cast(kZapValue + 16))); ++ } ++} ++ ++// --------------------------------------------------------------------------- ++// Instruction macros. ++ ++void TurboAssembler::Addw(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Addw, this); ++ if (rt.is_reg()) { ++ addw(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ addw(rs, (int)rt.immediate(), rd); ++ } else if (is_uint8(-rt.immediate()) && !MustUseReg(rt.rmode())){ ++ subw(rs, (int)-rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ addw(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Addl(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Addl, this); ++ if (rt.is_reg()) { ++ addl(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ addl(rs, (int)rt.immediate(), rd); ++ } else if (is_uint8(-rt.immediate()) && !MustUseReg(rt.rmode())){ ++ subl(rs, (int)-rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ addl(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Subw(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Subw, this); ++ if (rt.is_reg()) { ++ subw(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(-rt.immediate()) && !MustUseReg(rt.rmode())) { ++ addw(rs, (int)-rt.immediate(), rd); ++ } else if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())){ ++ subw(rs, (int)rt.immediate(), rd); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ subw(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Subl(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Subl, this); ++ if (rt.is_reg()) { ++ subl(rs, rt.rm(), rd); ++ } else if (is_uint8(-rt.immediate()) && !MustUseReg(rt.rmode())) { ++ addl(rs, (int)-rt.immediate(), rd); ++ } else if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ subl(rs, (int)rt.immediate(), rd); ++ } else { ++ DCHECK(rs != at); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rt); ++ subl(rs, scratch, rd); ++ } ++} ++ ++void TurboAssembler::Mulw(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Mulw, this); ++ if (rt.is_reg()) { ++ mulw(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode()) ){ ++ mulw(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ mulw(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Mulwh(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Mulwh, this); ++ if (rt.is_reg()) { ++ mull(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ mull(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ mull(rs, scratch, rd); ++ } ++ } ++ sral(rd, 32, rd); ++} ++ ++void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Mulhu, this); ++ if (rt.is_reg()) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ DCHECK(rt.rm() != scratch); ++ zapnot(rs, 0xf, scratch); ++ zapnot(rt.rm(), 0xf, rd); ++ mull(scratch, rd, rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ zapnot(rs, 0xf, scratch); ++ mull(scratch, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ zapnot(scratch, 0xf, scratch); ++ zapnot(rs, 0xf, rd); ++ mull(rd, scratch, rd); ++ } ++ } ++ sral(rd, 32, rd); ++} ++ ++void TurboAssembler::Mull(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Mull, this); ++ if (rt.is_reg()) { ++ mull(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ mull(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ mull(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Dmulh, this); ++ if (rt.is_reg()) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ DCHECK(rt.rm() != scratch); ++ umulh(rs, rt.rm(), rd); ++ srll(rs, 63, scratch); ++ mull(scratch, rt.rm(), scratch); ++ subl(rd, scratch, rd); ++ srll(rt.rm(), 63, scratch); ++ mull(scratch, rs, scratch); ++ subl(rd, scratch, rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ DCHECK(a5 != scratch); ++ li(scratch, rt); ++ umulh(rs, scratch, rd); ++ srll(rs, 63, a5); ++ mull(a5, scratch, a5); ++ subl(rd, a5, rd); ++ srll(scratch, 63, a5); ++ mull(a5, rs, a5); ++ subl(rd, a5, rd); ++ } ++} ++ ++void TurboAssembler::Divw(Register rd, Register rs, const Operand& rt) { ++ FPURegister fsrc1 = f22; ++ FPURegister fsrc2 = f23; ++ FPURegister fdest = f24; ++ ++ if (rt.is_reg()) { ++ ifmovd(rs, fsrc1); ++ ifmovd(rt.rm(), fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ ifmovd(rs, fsrc1); ++ ifmovd(scratch, fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, rd); ++ } ++} ++ ++void TurboAssembler::Divwu(Register rd, Register rs, const Operand& rt) { ++ FPURegister fsrc1 = f22; ++ FPURegister fsrc2 = f23; ++ FPURegister fdest = f24; ++ zapnot(rs, 0xf, rs); ++ ++ if (rt.is_reg()) { ++ zapnot(rt.rm(), 0xf, rt.rm()); ++ ifmovd(rs, fsrc1); ++ ifmovd(rt.rm(), fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ zapnot(scratch, 0xf, scratch); ++ ifmovd(rs, fsrc1); ++ ifmovd(scratch, fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, rd); ++ } ++} ++ ++void TurboAssembler::Divl(Register rd, Register rs, const Operand& rt) { ++ FPURegister fsrc1 = f22; ++ FPURegister fsrc2 = f23; ++ FPURegister fdest = f24; ++ ++ if (rt.is_reg()) { ++ ifmovd(rs, fsrc1); ++ ifmovd(rt.rm(), fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ ifmovd(rs, fsrc1); ++ ifmovd(scratch, fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, rd); ++ } ++} ++ ++void TurboAssembler::Divlu(Register rd, Register rs, const Operand& rt) { ++ FPURegister fsrc1 = f22; ++ FPURegister fsrc2 = f23; ++ FPURegister fdest = f24; ++ ++ if (rt.is_reg()) { ++ ifmovd(rs, fsrc1); ++ ifmovd(rt.rm(), fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ ifmovd(rs, fsrc1); ++ ifmovd(scratch, fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, rd); ++ } ++} ++ ++void TurboAssembler::Modw(Register rd, Register rs, const Operand& rt) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(scratch != rs); ++ ++ FPURegister fsrc1 = f22; ++ FPURegister fsrc2 = f23; ++ FPURegister fdest = f24; ++ ++ if (rt.is_reg()) { ++ ifmovd(rs, fsrc1); ++ ifmovd(rt.rm(), fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, scratch); ++ mulw(scratch, rt.rm(), scratch); ++ subw(rs, scratch, rd); ++ } else { ++ Register scratch2 = t12; ++ DCHECK(scratch2 != rs); ++ // li handles the relocation. ++ li(scratch, rt); ++ ifmovd(rs, fsrc1); ++ ifmovd(scratch, fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, scratch2); ++ mulw(scratch, scratch2, scratch); ++ subw(rs, scratch, rd); ++ } ++} ++ ++void TurboAssembler::Modwu(Register rd, Register rs, const Operand& rt) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(scratch != rs); ++ zapnot(rs, 0xf, rs); ++ ++ FPURegister fsrc1 = f22; ++ FPURegister fsrc2 = f23; ++ FPURegister fdest = f24; ++ ++ if (rt.is_reg()) { ++ zapnot(rt.rm(), 0xf, rt.rm()); ++ ifmovd(rs, fsrc1); ++ ifmovd(rt.rm(), fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, scratch); ++ mulw(scratch, rt.rm(), scratch); ++ subw(rs, scratch, rd); ++ } else { ++ Register scratch2 = t12; ++ DCHECK(scratch2 != rs); ++ // li handles the relocation. ++ li(scratch, rt); ++ zapnot(scratch, 0xf, scratch); ++ ifmovd(rs, fsrc1); ++ ifmovd(scratch, fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, scratch2); ++ mulw(scratch, scratch2, scratch); ++ subw(rs, scratch, rd); ++ } ++} ++ ++void TurboAssembler::Modl(Register rd, Register rs, const Operand& rt) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(scratch != rs); ++ ++ FPURegister fsrc1 = f22; ++ FPURegister fsrc2 = f23; ++ FPURegister fdest = f24; ++ ++ if (rt.is_reg()) { ++ ifmovd(rs, fsrc1); ++ ifmovd(rt.rm(), fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, scratch); ++ mull(scratch, rt.rm(), scratch); ++ subl(rs, scratch, rd); ++ } else { ++ Register scratch2 = t12; ++ DCHECK(scratch2 != rs); ++ // li handles the relocation. ++ li(scratch, rt); ++ ifmovd(rs, fsrc1); ++ ifmovd(scratch, fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, scratch2); ++ mull(scratch, scratch2, scratch); ++ subl(rs, scratch, rd); ++ } ++} ++ ++void TurboAssembler::Modlu(Register rd, Register rs, const Operand& rt) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(scratch != rs); ++ ++ FPURegister fsrc1 = f22; ++ FPURegister fsrc2 = f23; ++ FPURegister fdest = f24; ++ ++ if (rt.is_reg()) { ++ ifmovd(rs, fsrc1); ++ ifmovd(rt.rm(), fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, scratch); ++ mull(scratch, rt.rm(), scratch); ++ subl(rs, scratch, rd); ++ } else { ++ Register scratch2 = t12; ++ DCHECK(scratch2 != rs); ++ // li handles the relocation. ++ li(scratch, rt); ++ ifmovd(rs, fsrc1); ++ ifmovd(scratch, fsrc2); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fsrc1, fsrc2, fdest); ++ fcvtdl_z(fdest, fdest); ++ fimovd(fdest, scratch2); ++ mull(scratch, scratch2, scratch); ++ subl(rs, scratch, rd); ++ } ++} ++ ++void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Dmodu, this); ++ UNREACHABLE(); ++} ++ ++void TurboAssembler::Abs_sw(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(TurboAssembler::Abs, this); ++ fcpys(f31,fs,fd); ++} ++ ++void TurboAssembler::Sllw(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Sllw, this); ++ if (rt.is_reg()) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ and_ins(rt.rm(), 0x1f, scratch); ++ slll(rs, scratch, rd); ++ addw(rd, 0x0, rd); ++ } else { ++ slll(rs, ((int)rt.immediate()) & 0x1f, rd); ++ addw(rd, 0x0, rd); ++ } ++} ++ ++void TurboAssembler::Srlw(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Srlw, this); ++ if (rt.is_reg()) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ and_ins(rt.rm(), 0x1f, scratch); ++ zapnot(rs, 0xf, rd); ++ srll(rd, scratch, rd); ++ addw(rd, 0x0, rd); ++ } else { ++ zapnot(rs, 0xf, rd); ++ srll(rd, ((int)rt.immediate()) & 0x1f, rd); ++ addw(rd, 0x0, rd); ++ } ++} ++ ++void TurboAssembler::Sraw(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Sraw, this); ++ if (rt.is_reg()) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ and_ins(rt.rm(), 0x1f, scratch); ++ addw(rs, 0x0, rd); ++ sral(rd, scratch, rd); ++ } else { ++ addw(rs, 0x0, rd); ++ sral(rd, ((int)rt.immediate()) & 0x1f, rd); ++ } ++} ++ ++void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::And, this); ++ if (rt.is_reg()) { ++ and_ins(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) {//20181121 is_uint16 ++ and_ins(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ and_ins(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Or, this); ++ if (rt.is_reg()) { ++ or_ins(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) {//20181121 is_uint16 ++ or_ins(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ or_ins(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Xor, this); ++ if (rt.is_reg()) { ++ xor_ins(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ xor_ins(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ xor_ins(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Nor, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ if (rt.is_reg()) { ++ bis(rs, rt.rm(), scratch); ++ ornot(zero_reg, scratch, rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ bis(rs, (int)rt.immediate(), rd); ++ ornot(zero_reg, rd, rd); ++ } else { ++ // li handles the relocation. ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ bis(rs, scratch, scratch); ++ ornot(zero_reg, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Neg(Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Neg, this); ++ subl(zero_reg, rt.rm(),rs); ++} ++ ++void TurboAssembler::Cmplt(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Cmplt, this); ++ if (rt.is_reg()) { ++ cmplt(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ cmplt(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ cmplt(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Cmpult(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Cmpult, this); ++ if (rt.is_reg()) { ++ cmpult(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ cmpult(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ cmpult(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Cmple(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Cmple, this); ++ if (rt.is_reg()) { ++ cmple(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ cmple(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ cmple(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Cmpule(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Cmpule, this); ++ if (rt.is_reg()) { ++ cmpule(rs, rt.rm(), rd); ++ } else { ++ if (is_uint8(rt.immediate()) && !MustUseReg(rt.rmode())) { ++ cmpule(rs, (int)rt.immediate(), rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ cmpule(rs, scratch, rd); ++ } ++ } ++} ++ ++void TurboAssembler::Cmpge(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Cmpge, this); ++ if (rt.is_reg()) { ++ cmple(rt.rm(), rs, rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ cmple(scratch, rs, rd); ++ } ++ } ++ ++void TurboAssembler::Cmpuge(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Cmpuge, this); ++ if (rt.is_reg()) { ++ cmpule(rt.rm(), rs, rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ cmpule(scratch, rs, rd); ++ } ++ } ++ ++void TurboAssembler::Cmpgt(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Cmpgt, this); ++ if (rt.is_reg()) { ++ cmplt(rt.rm(), rs, rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ cmplt(scratch, rs, rd); ++ } ++} ++ ++void TurboAssembler::Cmpugt(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Cmpugt, this); ++ if (rt.is_reg()) { ++ cmpult(rt.rm(), rs, rd); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rs != scratch); ++ li(scratch, rt); ++ cmpult(scratch, rs, rd); ++ } ++} ++ ++void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Ror, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ DCHECK(rs != scratch); ++ DCHECK(rd != scratch); ++ if (rt.is_reg()) { ++ Register scratch1 = (scratch == t11) ? a5 : t11; ++ Register scratch2 = t12; ++ Register scratch3 = gp; // avoid to get at in srlv and sllv ++ and_ins(rt.rm(), 0x1f, scratch1); ++ ldi(scratch2, 32, zero_reg); ++ subw(scratch2, scratch1, scratch2); ++ { ++ // srlv(scratch1, rs, scratch1); // srlw(rs, scratch1, scratch1); ++ and_ins(scratch1, 0x1f, scratch3); ++ zapnot(rs, 0xf, scratch1); ++ srll(scratch1, scratch3, scratch1); ++ addw(scratch1, 0, scratch1); ++ } ++ { ++ // sllv(rd, rs, scratch2); // sllw(rs, scratch2, rd); ++ and_ins(scratch2, 0x1f, scratch3); ++ slll(rs, scratch3, rd); ++ addw(rd, 0, rd); ++ } ++ bis(scratch1, rd, rd); ++ addw(rd, 0, rd); ++ } else { ++ int64_t ror_value = rt.immediate() % 32; ++ if (ror_value < 0) { ++ ror_value += 32; ++ } ++ Srlw(scratch, rs, ror_value); // srlw(rs, ror_value, scratch); ++ Sllw(rd, rs, 32-ror_value); // sllw(rs, 32-ror_value, rd); ++ bis(scratch, rd, rd); ++ addw(rd, 0, rd); ++ } ++} ++ ++void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::Dror, this); ++#ifdef SW64 ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ DCHECK(rs != scratch); ++ DCHECK(rd != scratch); ++ if (rt.is_reg()) { ++ Register scratch1 = (scratch == t11) ? a5 : t11; ++ Register scratch2 = t12; ++ srll(rs, rt.rm(), scratch1); ++ ldi(scratch2, 64, zero_reg); ++ subl(scratch2, rt.rm(), scratch2); ++ slll(rs, scratch2, rd); ++ bis(scratch1, rd, rd); ++ } else { ++ int64_t dror_value = rt.immediate() % 64; ++ if (dror_value < 0) dror_value += 64; ++ srll(rs, (int)dror_value, scratch); ++ slll(rs, (int)(64-dror_value), rd); ++ bis(scratch, rd, rd); ++ } ++#else ++ if (rt.is_reg()) { ++ roll(rs, (64-(rt.rm()).code()),rd); ++ } else { ++ int64_t dror_value = rt.immediate() % 64; ++ if (dror_value < 0) dror_value += 64; ++ if (dror_value <= 31) { ++ roll(rs, (64-dror_value),rd); ++ } else { ++ roll(rs, dror_value, rd); ++ } ++ } ++#endif ++} ++ ++ ++void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, ++ Register scratch) {SCOPEMARK_NAME(TurboAssembler::Lsa, this); ++ DCHECK(sa >= 1 && sa <= 31); ++#ifdef SW64 ++ if (sa == 3) { ++ s8addw(rs, rt, rd); //rd = rs * 8 + rt ++ } else if (sa == 2) { ++ s4addw(rs, rt, rd); //rd = rs * 4 + rt ++ } else { ++ Register tmp = rd == rt ? scratch : rd; ++ DCHECK(tmp != rt); ++ Sllw(tmp, rs, sa); //not sw's sllw ++ addw(rt, tmp, rd); ++ } ++#else ++ //if (kArchVariant == kSw64r3 && sa <= 4) { ++ // lsa(rd, rt, rs, sa - 1); ++ //} else { ++ Register tmp = rd == rt ? scratch : rd; ++ DCHECK(tmp != rt); ++ Sllw(tmp, rs, sa); ++ Addw(rd, rt, tmp); ++ //} ++#endif ++} ++ ++void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa, ++ Register scratch) {SCOPEMARK_NAME(TurboAssembler::Dlsa, this); ++ DCHECK(sa >= 1 && sa <= 63); ++#ifdef SW64 ++ if (sa == 3) { ++ s8addl(rs, rt, rd); //rd = rs * 8 + rt ++ } else if (sa == 2) { ++ s4addl(rs, rt, rd); //rd = rs * 4 + rt ++ } else { ++ Register tmp = rd == rt ? scratch : rd; ++ DCHECK(tmp != rt); ++ slll(rs, sa, tmp); ++ Addl(rd, rt, tmp); ++ } ++#else ++ Register tmp = rd == rt ? scratch : rd; ++ DCHECK(tmp != rt); ++ slll(rs, sa, tmp); ++ Addl(rd, rt, tmp); ++#endif ++// } ++} ++ ++// ------------Pseudo-instructions------------- ++ ++// Change endianness ++void TurboAssembler::ByteSwapSigned(Register dest, Register src, ++ int operand_size) {SCOPEMARK_NAME(TurboAssembler::ByteSwapSigned, this); ++ DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8); ++ DCHECK(kArchVariant == kSw64r3 || kArchVariant == kSw64r2); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(src != scratch); ++ if (operand_size == 2) { ++ //HGFEDCBA ==> 000000AB ++ zapnot(src, 0x3, dest); ++ srll(dest, 8, scratch); ++ slll(dest, 24, dest); ++ addw(dest, 0, dest); ++ sral(dest, 16, dest); ++ or_ins(dest, scratch, dest); ++ } else if (operand_size == 4) { ++ //HGFEDCBA ==> 0000ABCD ++ zapnot(src, 0xf, dest); ++ srll(dest, 8, scratch); //0DCB ++ slll(dest, 24, dest); //A000 ++ bis(dest, scratch, dest); //ADCB ++ srll(scratch, 16, scratch); //000D ++ xor_ins(dest, scratch, scratch); ++ and_ins(scratch, 0xff, scratch); //000B^D ++ xor_ins(dest, scratch, dest); //ADCD ++ slll(scratch, 16, scratch); //0B^D00 ++ xor_ins(dest, scratch, dest); //0000ABCD ++ addw(dest, 0, dest); ++ } else { ++ // 87654321 ==> 12345678 ++ srll(src, 8, scratch); //08765432 ++ slll(src, 56, dest); //10000000 ++ bis(dest, scratch, dest); //18765432 ++ // 8 <==> 2 ++ srll(scratch, 48, scratch); //00000008 ++ xor_ins(dest, scratch, scratch); ++ and_ins(scratch, 0xff, scratch); //00000002^8 ++ xor_ins(dest, scratch, dest); //18765438 ++ slll(scratch, 48, scratch); //02^8000000 ++ xor_ins(dest, scratch, dest); //12765438 ++ // 7 <==> 3 ++ srll(dest, 32, scratch); //00001276 ++ xor_ins(dest, scratch, scratch); ++ zapnot (scratch, 0x2, scratch); //0000003^70 ++ xor_ins(dest, scratch, dest); //12765478 ++ slll(scratch, 32, scratch); //03^7000000 ++ xor_ins(dest, scratch, dest); //12365478 ++ // 6 <==> 4 ++ srll(dest, 16, scratch); //00123654 ++ xor_ins(dest, scratch, scratch); ++ zapnot (scratch, 0x4, scratch); //000004^600 ++ xor_ins(dest, scratch, dest); //12365678 ++ slll(scratch, 16, scratch); //0004^60000 ++ xor_ins(dest, scratch, dest); //12345678 ++ } ++} ++ ++void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, ++ int operand_size) {SCOPEMARK_NAME(TurboAssembler::ByteSwapUnsigned, this); ++ DCHECK(operand_size == 2 || operand_size == 4); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(src != scratch); ++ if (operand_size == 2) { ++ zapnot(src, 0x3, dest); ++ srll(dest, 8, scratch); ++ slll(dest, 8, dest); ++ bis(dest, scratch, dest); ++// slll(dest, 48, dest); ++ zapnot(dest, 0x3, dest); ++ } else { ++ zapnot(src, 0xf, dest); ++ srll(dest, 8, scratch); //0DCB ++ slll(dest, 24, dest); //A000 ++ bis(dest, scratch, dest); //ADCB ++ srll(scratch, 16, scratch); //000D ++ xor_ins(dest, scratch, scratch); ++ and_ins(scratch, 0xff, scratch); //000B^D ++ xor_ins(dest, scratch, dest); //ADCD ++ slll(scratch, 16, scratch); //0B^D000 ++ xor_ins(dest, scratch, dest); //0000ABCD ++// slll(dest, 32, dest); //ABCD0000 ++ ++ } ++} ++ ++void TurboAssembler::Uldw(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Uldw, this); ++ DCHECK(rd != at); ++ DCHECK(rs.rm() != at); ++ Ldw(rd, rs); ++} ++ ++void TurboAssembler::Uldwu(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Uldwu, this); ++ if (kArchVariant == kSw64r3) { ++ Ldwu(rd, rs); ++ } else { ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ Uldw(rd, rs); ++ zapnot(rd, 0xf, rd); //ZHJ Dext(rd, rd, 0, 32); ++ } ++} ++ ++void TurboAssembler::Ustw(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Ustw, this); ++ DCHECK(rd != at); ++ DCHECK(rs.rm() != at); ++ DCHECK(rd != rs.rm()); ++ Stw(rd, rs); ++} ++ ++void TurboAssembler::Uldh(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Uldh, this); ++ DCHECK(rd != at); ++ DCHECK(rs.rm() != at); ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ MemOperand source = rs; ++ // Adjust offset for two accesses and check if offset + 1 fits into int16_t. ++ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ if (source.rm() == scratch) { ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++ Ldb(rd, MemOperand(source.rm(), source.offset() + 1)); ++ Ldbu(scratch, source); ++#endif ++ } else { ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++ Ldbu(scratch, source); ++ Ldb(rd, MemOperand(source.rm(), source.offset() + 1)); ++#endif ++ } ++ slll(rd, 8, rd); ++ or_ins(rd, scratch, rd); ++} ++ ++void TurboAssembler::Uldhu(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Uldhu, this); ++ DCHECK(rd != at); ++ DCHECK(rs.rm() != at); ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ MemOperand source = rs; ++ // Adjust offset for two accesses and check if offset + 1 fits into int16_t. ++ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ if (source.rm() == scratch) { ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++ Ldbu(rd, MemOperand(source.rm(), source.offset() + 1)); ++ Ldbu(scratch, source); ++#endif ++ } else { ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++ Ldbu(scratch, source); ++ Ldbu(rd, MemOperand(source.rm(), source.offset() + 1)); ++#endif ++ } ++ slll(rd, 8, rd); ++ or_ins(rd, scratch, rd); ++} ++ ++void TurboAssembler::Usth(Register rd, const MemOperand& rs, Register scratch) {SCOPEMARK_NAME(TurboAssembler::Usth, this); ++ DCHECK(rd != at); ++ DCHECK(rs.rm() != at); ++ DCHECK(rs.rm() != scratch); ++ DCHECK(scratch != at); ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ MemOperand source = rs; ++ // Adjust offset for two accesses and check if offset + 1 fits into int16_t. ++ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); ++ ++ if (scratch != rd) { ++ mov(scratch, rd); ++ } ++ ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++ Stb(scratch, source); ++ Srlw(scratch, scratch, 8); ++ Stb(scratch, MemOperand(source.rm(), source.offset() + 1)); ++#endif ++} ++ ++void TurboAssembler::Uldl(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Uldl, this); ++ DCHECK(rd != at); ++ DCHECK(rs.rm() != at); ++ Ldl(rd, rs); ++} ++ ++ ++void TurboAssembler::Ustl(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Ustl, this); ++ DCHECK(rd != at); ++ DCHECK(rs.rm() != at); ++ Stl(rd, rs); ++} ++ ++ ++void TurboAssembler::Uflds(FPURegister fd, const MemOperand& rs, ++ Register scratch) {SCOPEMARK_NAME(TurboAssembler::Uflds, this); ++ Flds(fd, rs); ++} ++ ++void TurboAssembler::Ufsts(FPURegister fd, const MemOperand& rs, ++ Register scratch) {SCOPEMARK_NAME(TurboAssembler::Ufsts, this); ++ Fsts(fd, rs); ++} ++ ++void TurboAssembler::Ufldd(FPURegister fd, const MemOperand& rs, ++ Register scratch) {SCOPEMARK_NAME(TurboAssembler::Ufldd, this); ++ DCHECK(scratch != at); ++ Fldd(fd, rs); ++} ++ ++void TurboAssembler::Ufstd(FPURegister fd, const MemOperand& rs, ++ Register scratch) {SCOPEMARK_NAME(TurboAssembler::Ufstd, this); ++ DCHECK(scratch != at); ++ Fstd(fd, rs); ++} ++ ++void TurboAssembler::Ldb(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Ldb, this); ++ ldbu(rd, rs); ++ sextb(rd, rd); ++} ++ ++void TurboAssembler::Ldbu(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Ldbu, this); ++ ldbu(rd, rs); ++} ++ ++void TurboAssembler::Stb(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Stb, this); ++ stb(rd, rs); ++} ++ ++void TurboAssembler::Ldh(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Ldh, this); ++ ldhu(rd, rs); ++ sexth(rd, rd); ++} ++ ++void TurboAssembler::Ldhu(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Ldhu, this); ++ ldhu(rd, rs); ++} ++ ++void TurboAssembler::Sth(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Sth, this); ++ sth(rd, rs); ++} ++ ++void TurboAssembler::Ldw(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Ldw, this); ++ ldw(rd, rs); ++} ++ ++void TurboAssembler::Ldwu(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Ldwu, this); ++ ldw(rd, rs); ++ zapnot(rd, 0xf, rd); ++} ++ ++void TurboAssembler::Stw(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Stw, this); ++ stw(rd, rs); ++} ++ ++void TurboAssembler::Ldl(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Ldl, this); ++ ldl(rd, rs); ++} ++ ++void TurboAssembler::Stl(Register rd, const MemOperand& rs) {SCOPEMARK_NAME(TurboAssembler::Stl, this); ++ stl(rd, rs); ++} ++ ++void TurboAssembler::Flds(FPURegister fd, const MemOperand& src) {SCOPEMARK_NAME(TurboAssembler::Flds, this); ++ flds(fd, src); ++} ++ ++void TurboAssembler::Fsts(FPURegister fs, const MemOperand& src) {SCOPEMARK_NAME(TurboAssembler::Fsts, this); ++ fsts(fs, src); ++} ++ ++void TurboAssembler::Fldd(FPURegister fd, const MemOperand& src) {SCOPEMARK_NAME(TurboAssembler::Fldd, this); ++ fldd(fd, src); ++} ++ ++void TurboAssembler::Fstd(FPURegister fs, const MemOperand& src) {SCOPEMARK_NAME(TurboAssembler::Fstd, this); ++ fstd(fs, src); ++} ++ ++void TurboAssembler::li(Register dst, Handle value, LiFlags mode) {SCOPEMARK_NAME(TurboAssembler::li, this); ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadConstant(dst, value); ++ return; ++ } ++ li(dst, Operand(value), mode); ++} ++ ++void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {SCOPEMARK_NAME(TurboAssembler::li, this); ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadExternalReference(dst, value); ++ return; ++ } ++ li(dst, Operand(value), mode); ++} ++ ++void TurboAssembler::li(Register dst, const StringConstantBase* string, ++ LiFlags mode) {SCOPEMARK_NAME(TurboAssembler::li, this); ++ li(dst, Operand::EmbeddedStringConstant(string), mode); ++} ++ ++static inline int InstrCountForLiLower32Bit(int64_t value) { ++ int32_t lsb32 = static_cast(value); ++ int16_t lsb_h = (lsb32-static_cast(lsb32)) >> 16; ++ int16_t lsb_l = static_cast(lsb32); ++ ++ if (is_int16(lsb32)) { ++ return 1; ++ } else { ++ if ( (int32_t)(lsb_h) == -32768 && (int32_t)(lsb_l) < 0 ) { ++ // range from 0x7FFF8000 to 0x7FFFFFFF ++ return lsb_l ? 3 : 2; ++ } else { ++ return lsb_l ? 2 : 1; ++ } ++ } ++} ++ ++void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {SCOPEMARK_NAME(TurboAssembler::LiLower32BitHelper, this); ++ int32_t lsb32 = static_cast(j.immediate()); ++ ++ if (is_int16(lsb32)) { ++ ldi(rd, lsb32, zero_reg); ++ } else { ++ int16_t lsb_h = (lsb32-static_cast(lsb32)) >> 16; ++ int16_t lsb_l = static_cast(lsb32); ++ if ( (int32_t)(lsb_h) == -32768 && (int32_t)(lsb_l) < 0 ) { ++ // range from 0x7FFF8000 to 0x7FFFFFFF ++ ldih(rd, 0x4000, zero_reg); ++ ldih(rd, 0x4000, rd); ++ if (lsb_l) ++ ldi(rd, lsb_l, rd); ++ } else { ++ ldih(rd, lsb_h, zero_reg); ++ if (lsb_l) { ++ ldi(rd, lsb_l, rd); ++ } ++ } ++ } ++} ++ ++ ++int TurboAssembler::InstrCountForLi64Bit(int64_t value) { ++ int32_t lo = static_cast(value); ++ int32_t hi = static_cast((value - lo) >> 32); ++ int16_t lo_h16 = (lo - static_cast(lo))>>16; ++ int16_t lo_l16 = static_cast(lo); ++ int16_t hi_l16 = static_cast(hi); ++ ++ if (is_int32(value)) { ++ return InstrCountForLiLower32Bit(value); ++ } else { ++ int count = 1; // slll 32 ++ if ( is_int16(hi) ) { ++ count += 1; ++ } else { ++ count += hi_l16 ? 2 : 1; ++ } ++ if ( lo != 0 ) { ++ if ( ((int32_t)lo_h16 == -32768) && ((int32_t)lo_l16 < 0)) { ++ // range from 0x7FFF8000 to 0x7FFFFFFF ++ count += lo_l16 ? 3 : 2; ++ } else { ++ count += lo_l16 ? 2 : 1; ++ } ++ } ++ return count; ++ } ++ ++ UNREACHABLE(); ++ return INT_MAX; ++} ++ ++// All changes to if...else conditions here must be added to ++// InstrCountForLi64Bit as well. ++void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {SCOPEMARK_NAME(TurboAssembler::li_optimized, this); ++ DCHECK(!j.is_reg()); ++ DCHECK(!MustUseReg(j.rmode())); ++ DCHECK(mode == OPTIMIZE_SIZE); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ int64_t value = j.immediate(); ++ int32_t lo = static_cast(value); ++ int32_t hi = static_cast((value - lo) >> 32); ++ int16_t lo_h16 = (lo - static_cast(lo))>>16; ++ int16_t lo_l16 = static_cast(lo); ++ int16_t hi_h16 = (hi - static_cast(hi))>>16; ++ int16_t hi_l16 = static_cast(hi); ++ // Normal load of an immediate value which does not need Relocation Info. ++ if (is_int32(value)) { ++ LiLower32BitHelper(rd, j); ++ } else { ++ if ( is_int16(hi) ) { ++ ldi(rd, hi, zero_reg); ++ } else { ++ ldih(rd, hi_h16, zero_reg); ++ if (hi_l16 != 0) ++ ldi(rd, hi_l16, rd); ++ } ++ slll(rd, 32, rd); ++ if ( lo != 0 ) { ++ if ( ((int32_t)lo_h16 == -32768) && ((int32_t)lo_l16 < 0)) { ++ // range from 0x7FFF8000 to 0x7FFFFFFF ++ ldih(rd, 0x4000, rd); ++ ldih(rd, 0x4000, rd); ++ if (lo_l16 != 0) ++ ldi(rd, lo_l16, rd); ++ } else { ++ ldih(rd, lo_h16, rd); ++ if (lo_l16 != 0) ++ ldi(rd, lo_l16, rd); ++ } ++ } ++ } ++} ++ ++void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {SCOPEMARK_NAME(TurboAssembler::li, this); ++ DCHECK(!j.is_reg()); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { ++ int li_count = InstrCountForLi64Bit(j.immediate()); ++ int li_neg_count = InstrCountForLi64Bit(-j.immediate()); ++ int li_not_count = InstrCountForLi64Bit(~j.immediate()); ++ // Loading -MIN_INT64 could cause problems, but loading MIN_INT64 takes only ++ // two instructions so no need to check for this. ++ if (li_neg_count <= li_not_count && li_neg_count < li_count - 1) { ++ DCHECK(j.immediate() != std::numeric_limits::min()); ++ li_optimized(rd, Operand(-j.immediate()), mode); ++ Subl(rd, zero_reg, rd); ++ } else if (li_neg_count > li_not_count && li_not_count < li_count - 1) { ++ DCHECK(j.immediate() != std::numeric_limits::min()); ++ li_optimized(rd, Operand(~j.immediate()), mode); ++ ornot(zero_reg, rd, rd); // nor(rd, rd, rd); ++ } else { ++ li_optimized(rd, j, mode); ++ } ++ } else if (MustUseReg(j.rmode())) { ++ int64_t immediate; ++ if (j.IsHeapObjectRequest()) { ++ RequestHeapObject(j.heap_object_request()); ++ immediate = 0; ++ } else { ++ immediate = j.immediate(); ++ } ++ ++ RecordRelocInfo(j.rmode(), immediate); ++ ++ int32_t lsb32 = static_cast (immediate); ++ int32_t msb32 = static_cast ((immediate - lsb32) >> 32); ++ int16_t msb_l = static_cast(msb32); ++ int16_t lsb_h = (lsb32-static_cast(lsb32)) >> 16; ++ int16_t lsb_l = static_cast(lsb32); ++ ++ // lsb32's range should not be from 0x7FFF8000 to 0x7FFFFFFF. ++ DCHECK( !( (lsb32>0x7FFF8000)&&(lsb32<0x7FFFFFFF) ) ); ++ ldi(rd, msb_l, zero_reg); ++ slll(rd, 32, rd); ++ ldih(rd, lsb_h, rd); ++ ldi(rd, lsb_l, rd); ++ ++ } else if (mode == ADDRESS_LOAD) { ++ // We always need the same number of instructions as we may need to patch ++ // this code to load another value which may need all 4 instructions. ++ int32_t lsb32 = static_cast (j.immediate()); ++ int32_t msb32 = static_cast ((j.immediate() - lsb32) >> 32); ++ ++ if (lsb32 < 0 && (j.immediate()>>32) > 0) { ++ return li_optimized(rd, j, OPTIMIZE_SIZE); ++ } ++ ++ int16_t msb_l = static_cast(msb32); ++ int16_t lsb_h = (lsb32-static_cast(lsb32)) >> 16; ++ int16_t lsb_l = static_cast(lsb32); ++ ++ // lsb32's range should not be from 0x7FFF8000 to 0x7FFFFFFF. ++ DCHECK( !( (lsb32>0x7FFF8000)&&(lsb32<0x7FFFFFFF) ) ); ++ ldi(rd, msb_l, zero_reg); ++ slll(rd, 32, rd); ++ ldih(rd, lsb_h, rd); ++ ldi(rd, lsb_l, rd); ++ ++ } else { // mode == CONSTANT_SIZE - always emit the same instruction ++ // sequence. ++ // CONSTANT_SIZE, must 5 instructions. ++ int64_t imm = j.immediate(); ++ int32_t lsb32 = static_cast (imm); ++ int32_t msb32 = static_cast ((imm - lsb32) >> 32); ++ int16_t msb_h = (msb32-static_cast(msb32)) >> 16; ++ int16_t msb_l = static_cast(msb32); ++ int16_t lsb_h = (lsb32-static_cast(lsb32)) >> 16; ++ int16_t lsb_l = static_cast(lsb32); ++ ++ ldih(rd, msb_h, zero_reg); ++ ldi(rd, msb_l, rd); ++ slll(rd, 32, rd); ++ ldih(rd, lsb_h, rd); ++ ldi(rd, lsb_l, rd); ++ ++ } ++} ++ ++void TurboAssembler::MultiPush(RegList regs) {SCOPEMARK_NAME(TurboAssembler::MultiPush, this); ++ int16_t num_to_push = base::bits::CountPopulation(regs); ++ int16_t stack_offset = num_to_push * kPointerSize; ++ ++ Subl(sp, sp, Operand(stack_offset)); ++ if ((regs & (1 << ra.code())) != 0) { ++ stack_offset -= kPointerSize; ++ stl(ToRegister(ra.code()), MemOperand(sp, stack_offset)); ++ } ++ if ((regs & (1 << fp.code())) != 0) { ++ stack_offset -= kPointerSize; ++ stl(ToRegister(fp.code()), MemOperand(sp, stack_offset)); ++ } ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs & (1 << i)) != 0 && (i != ra.code()) && (i != fp.code())) { ++ stack_offset -= kPointerSize; ++ stl(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++} ++ ++ ++void TurboAssembler::MultiPop(RegList regs) {SCOPEMARK_NAME(TurboAssembler::MultiPop, this); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs & (1 << i)) != 0 && (i != ra.code()) && (i != fp.code())) { ++ ldl(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ if ((regs & (1 << fp.code())) != 0) { ++ ldl(ToRegister(fp.code()), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ if ((regs & (1 << ra.code())) != 0) { ++ ldl(ToRegister(ra.code()), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ addl(sp, stack_offset, sp); ++} ++ ++ ++void TurboAssembler::MultiPushFPU(RegList regs) {SCOPEMARK_NAME(TurboAssembler::MultiPushFPU, this); ++ int16_t num_to_push = base::bits::CountPopulation(regs); ++ int16_t stack_offset = num_to_push * kDoubleSize; ++ ++ Subl(sp, sp, Operand(stack_offset)); ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs & (1 << i)) != 0) { ++ stack_offset -= kDoubleSize; ++ fstd(FPURegister::from_code(i), MemOperand(sp, stack_offset)); ++ } ++ } ++} ++ ++ ++void TurboAssembler::MultiPopFPU(RegList regs) {SCOPEMARK_NAME(TurboAssembler::MultiPopFPU, this); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs & (1 << i)) != 0) { ++ fldd(FPURegister::from_code(i), MemOperand(sp, stack_offset)); ++ stack_offset += kDoubleSize; ++ } ++ } ++ addl(sp, stack_offset, sp); ++} ++ ++ ++void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos, ++ uint16_t size) {SCOPEMARK_NAME(TurboAssembler::Ext, this); ++ DCHECK_LT(pos, 32); ++ DCHECK_LT(pos + size, 33); ++#if SW64 ++ // Ext is word-sign-extend. ++ if (pos == 0) { ++ if (size == 8 ) { ++ zapnot(rs, 0x1, rt); ++ } else if (size == 16 ) { ++ zapnot(rs, 0x3, rt); ++ } else if (size == 32 ) { ++ addw(rs, 0, rt); ++ } else { ++ long bitmask = (0x1L << size) - 1; ++ And(rt, rs, (int)bitmask); ++ addw(rt, 0, rt); ++ } ++ } else { ++ long bitmask = (0x1L << size) - 1; ++ srll(rs, pos, rt); ++ And(rt, rt, (int)bitmask); ++ addw(rt, 0, rt); ++ } ++#else ++ long bitsize = (0x1L << size) - 1; ++ srll(rs, pos, rt); ++ And(rt, rt, bitsize); ++#endif ++} ++ ++void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos, ++ uint16_t size) {SCOPEMARK_NAME(TurboAssembler::Dext, this); ++DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size && ++ pos + size <= 64); ++#ifdef SW64 ++// dext is zero-extend ++ if (pos != 0) { ++ srll(rs, pos, rt); ++ ++ switch (size) { ++ case 8: ++ zapnot(rt, 0x1, rt); ++ break; ++ case 16: ++ zapnot(rt, 0x3, rt); ++ break; ++ case 32: ++ zapnot(rt, 0xf, rt); ++ break; ++ default: { ++ DCHECK(size < 64); ++ long bitmask = (0x1L << size) - 1; ++ And(rt, rt, (int)bitmask); ++ } ++ } ++ } else { ++ switch (size) { ++ case 8: ++ zapnot(rs, 0x1, rt); ++ break; ++ case 16: ++ zapnot(rs, 0x3, rt); ++ break; ++ case 32: ++ zapnot(rs, 0xf, rt); ++ break; ++ case 64: // the result of 0x1L<<64 is 1. ++ mov(rt, rs); ++ break; ++ default: { ++ long bitmask = (0x1L << size) - 1; ++ And(rt, rs, (int)bitmask); ++ } ++ } ++ } ++#else ++ if ( (pos == 0) && (size == 32)) { ++ zapnot(rs, 0xf, rt); ++ } else { ++ long bitsize = (0x1L << size) - 1; ++ srll(rs, pos, rt); ++ And(rt, rt, bitsize); ++ } ++#endif ++} ++ ++void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos, ++ uint16_t size) {SCOPEMARK_NAME(TurboAssembler::Ins, this); ++ DCHECK_LT(pos, 32); ++ DCHECK_LE(pos + size, 32); ++ DCHECK_NE(size, 0); ++ DCHECK(rs != t11 && rt != t11); ++ DCHECK(rt != at); ++ ++ long bitsize = (0x1L << size) - 1; ++ li(t11, bitsize); ++ and_ins(rs, t11, at); ++ slll(at, pos, at); ++ slll(t11, pos, t11); ++ bic(rt, t11, rt); ++ bis(rt, at, rt); ++ addw(rt, 0, rt); ++} ++ ++void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos, ++ uint16_t size) {SCOPEMARK_NAME(TurboAssembler::Dins, this); ++ DCHECK_LT(pos, 64); ++ DCHECK_LE(pos + size, 64); ++ DCHECK_NE(size, 0); ++ DCHECK(rs != t11 && rt != t11); ++ DCHECK(rt != at); ++ ++ long bitsize = (size == 64) ? -1 : (0x1L << size) - 1; ++ li(t11, bitsize); ++ and_ins(rs, t11, at); ++ slll(at, pos, at); ++ slll(t11, pos, t11); ++ bic(rt, t11, rt); ++ bis(rt, at, rt); ++} ++ ++void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, ++ int size, bool sign_extend) {SCOPEMARK_NAME(TurboAssembler::ExtractBits, this); ++#ifdef SW64 ++ sral(source, pos, dest); ++ if (sign_extend) { ++ switch (size) { ++ case 8: ++ sextb(dest, dest); ++ break; ++ case 16: ++ sexth(dest, dest); ++ break; ++ case 32: ++ // sign-extend word ++ addw(dest, 0, dest); // Sllw(dest, dest, 0); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } else { ++ switch (size) { ++ case 8: ++ zapnot(dest, 0x1, dest); ++ break; ++ case 16: ++ zapnot(dest, 0x3, dest); ++ break; ++ case 32: ++ zapnot(dest, 0xf, dest); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++#endif ++} ++ ++void TurboAssembler::InsertBits(Register dest, Register source, Register pos, ++ int size) {SCOPEMARK_NAME(TurboAssembler::InsertBits, this); ++#ifdef SW64 ++ DCHECK(source != t12 && dest != t12); ++ DCHECK(source != t11 && dest != t11); ++ ++ long sizemask = (0x1L << size) -1; ++ li(t11, sizemask); ++ and_ins(source, t11, t12); ++ slll(t12, pos, t12); // (source 0..size-1) << pos ++ slll(t11, pos, t11); ++ bic(dest, t11, dest); ++ bis(dest, t12, dest); ++#endif ++} ++ ++void TurboAssembler::Fnegs(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(TurboAssembler::Fnegs, this); ++#ifdef SW64 ++ fnegs(fs, fd); ++#endif ++} ++ ++void TurboAssembler::Fnegd(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(TurboAssembler::Fnegd, this); ++#ifdef SW64 ++ fnegd(fs, fd); ++#endif ++} ++ ++void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(TurboAssembler::Cvt_d_uw, this); ++ // Move the data from fs to t11. ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ fimovs(fs,t11); ++ Cvt_d_uw(fd, t11); ++} ++ ++void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {SCOPEMARK_NAME(TurboAssembler::Cvt_d_uw, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ ++ // Convert rs to a FP value in fd. ++ DCHECK(rs != t12); ++ DCHECK(rs != at); ++ ++ // Zero extend int32 in rs. ++ zapnot(rs, 0xf, t12); //ZHJ Dext(t12, rs, 0, 32); ++ ifmovd(t12, fd); ++ fcvtld_(fd, fd); ++} ++ ++void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(TurboAssembler::Cvt_d_ul, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Move the data from fs to t11. ++ fimovd(fs,t11); ++ Cvt_d_ul(fd, t11); ++} ++ ++void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {SCOPEMARK_NAME(TurboAssembler::Cvt_d_ul, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Convert rs to a FP value in fd. ++ ++ DCHECK(rs != t12); ++ DCHECK(rs != at); ++ ++ Label msb_clear, conversion_done; ++ ++ Branch(&msb_clear, ge, rs, Operand(zero_reg)); ++ ++ // Rs >= 2^63 ++ and_ins(rs, 1,t12); ++ srll(rs, 1 ,rs); ++ or_ins(t12, rs, t12); ++ ifmovd(t12, fd); ++ fcvtld_(fd, fd); ++ faddd(fd, fd, fd); // In delay slot. ++ Branch(&conversion_done); ++ ++ bind(&msb_clear); ++ // Rs < 2^63, we can do simple conversion. ++ ifmovd(rs, fd); ++ fcvtld_(fd, fd); ++ ++ bind(&conversion_done); ++} ++ ++void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(TurboAssembler::Cvt_s_uw, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Move the data from fs to t11. ++ fimovs(fs,t11); ++ Cvt_s_uw(fd, t11); ++} ++ ++void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {SCOPEMARK_NAME(TurboAssembler::Cvt_s_uw, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Convert rs to a FP value in fd. ++ DCHECK(rs != t12); ++ DCHECK(rs != at); ++ ++ // Zero extend int32 in rs. ++ zapnot(rs, 0xf, t12); //ZHJ Dext(t12, rs, 0, 32); ++ ifmovd(t12, fd); ++ fcvtls_(fd, fd); ++} ++ ++void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(TurboAssembler::Cvt_s_ul, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Move the data from fs to t11. ++ fimovd(fs,t11); ++ Cvt_s_ul(fd, t11); ++} ++ ++void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {SCOPEMARK_NAME(TurboAssembler::Cvt_s_ul, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Convert rs to a FP value in fd. ++ ++ DCHECK(rs != t12); ++ DCHECK(rs != at); ++ ++ Label positive, conversion_done; ++ ++ Branch(&positive, ge, rs, Operand(zero_reg)); ++ ++ // Rs >= 2^31. ++ and_ins(rs, 1,t12); ++ srll(rs, 1, rs); ++ or_ins(t12, rs, t12); ++ ifmovd(t12, fd); ++ fcvtls_(fd, fd); ++ fadds(fd, fd, fd); // In delay slot. ++ Branch(&conversion_done); ++ ++ bind(&positive); ++ // Rs < 2^31, we can do simple conversion. ++ ifmovd(rs, fd); ++ fcvtls_(fd, fd); ++ ++ bind(&conversion_done); ++} ++ ++ ++void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(MacroAssembler::Round_l_d, this); ++ fcvtdl_g(fs, fd); // rounding to nearest ++} ++ ++ ++void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(MacroAssembler::Floor_l_d, this); ++ fcvtdl_n(fs, fd); // rounding down ++} ++ ++ ++void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(MacroAssembler::Ceil_l_d, this); ++ fcvtdl_p(fs, fd); // rounding up ++} ++ ++ ++void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(MacroAssembler::Trunc_l_d, this); ++ fcvtdl_z(fs, fd); // rounding toward zero ++} ++ ++ ++void MacroAssembler::Trunc_l_ud(FPURegister fd, ++ FPURegister fs, ++ FPURegister scratch) {SCOPEMARK_NAME(MacroAssembler::Trunc_l_ud, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Load to GPR. ++ fimovd(fs,t11); ++ // Reset sign bit. ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x7FFFFFFFFFFFFFFF); ++ and_ins(t11, scratch1, t11); ++ } ++ ifmovd(t11, fs); ++ ftruncdl(fs, fd); ++} ++ ++void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, ++ FPURegister scratch) {SCOPEMARK_NAME(TurboAssembler::Trunc_uw_d, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Trunc_uw_d(t11, fs, scratch); ++ ifmovs(t11, fd); ++} ++ ++void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs, ++ FPURegister scratch) {SCOPEMARK_NAME(TurboAssembler::Trunc_uw_s, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Trunc_uw_s(t11, fs, scratch); ++ ifmovs(t11, fd); ++} ++ ++void TurboAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs, ++ FPURegister scratch, Register result) {SCOPEMARK_NAME(TurboAssembler::Trunc_ul_d, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Trunc_ul_d(t11, fs, scratch, result); ++ ifmovd(t11, fd); ++} ++ ++void TurboAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs, ++ FPURegister scratch, Register result) {SCOPEMARK_NAME(TurboAssembler::Trunc_ul_s, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Trunc_ul_s(t11, fs, scratch, result); ++ ifmovd(t11, fd); ++} ++ ++ ++void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(MacroAssembler::Trunc_w_d, this); ++ ftruncdw(fs, fd); ++} ++ ++ ++void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(MacroAssembler::Round_w_d, this); ++ frounddw(fs, fd); ++} ++ ++ ++void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(MacroAssembler::Floor_w_d, this); ++ ffloordw(fs, fd); ++} ++ ++ ++void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {SCOPEMARK_NAME(MacroAssembler::Ceil_w_d, this); ++ fceildw(fs, fd); ++} ++ ++void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, ++ FPURegister scratch) {SCOPEMARK_NAME(TurboAssembler::Trunc_uw_d, this); ++ DCHECK(fs != scratch); ++ DCHECK(rd != at); ++ ++ { ++ // Load 2^31 into scratch as its float representation. ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x41E00000); ++ slll(scratch1, 32, scratch1); ++ ifmovd(scratch1, scratch); ++ } ++ // Test if scratch > fd. ++ // If fd < 2^31 we can convert it normally. ++ Label simple_convert; ++ CompareF64(OLT, fs, scratch); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^31 from fd, then trunc it to rs ++ // and add 2^31 to rs. ++ fsubd(fs, scratch, kDoubleCompareReg); ++ ftruncdw(kDoubleCompareReg, scratch); ++ fimovs(scratch,rd); ++ Or(rd, rd, 1 << 31); ++ ++ Label done; ++ Branch(&done); ++ // Simple conversion. ++ bind(&simple_convert); ++ fcvtdl_z(fs, kScratchDoubleReg1); ++ fcvtlw(kScratchDoubleReg1, scratch); ++ fimovs(scratch, rd); ++ ++ bind(&done); ++} ++ ++void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, ++ FPURegister scratch) {SCOPEMARK_NAME(TurboAssembler::Trunc_uw_s, this); ++ DCHECK(fs != scratch); ++ DCHECK(rd != at); ++ ++ { ++ // Load 2^31 into scratch as its float representation. ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x4F000000); ++ ifmovs(scratch1, scratch); ++ } ++ // Test if scratch > fs. ++ // If fs < 2^31 we can convert it normally. ++ Label simple_convert; ++ CompareF32(OLT, fs, scratch); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^31 from fs, then trunc it to rd ++ // and add 2^31 to rd. ++ fsubs(fs, scratch, kDoubleCompareReg); // sub_s(scratch, fs, scratch); ++ ftruncsw(kDoubleCompareReg, scratch); // trunc_w_s(scratch, scratch); ++ fimovs(scratch,rd); ++ Or(rd, rd, 1 << 31); ++ ++ Label done; ++ Branch(&done); ++ // Simple conversion. ++ bind(&simple_convert); ++ ftruncsw(fs, scratch); ++ fimovs(scratch,rd); ++ ++ bind(&done); ++} ++ ++void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, ++ FPURegister scratch, Register result) {SCOPEMARK_NAME(TurboAssembler::Trunc_ul_d, this); ++ DCHECK(fs != scratch); ++ DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at)); ++ ++ Label simple_convert, done, fail; ++ if (result.is_valid()) { ++ mov(result, zero_reg); ++ Move(scratch, -1.0); ++ // If fd =< -1 or unordered, then the conversion fails. ++ CompareF64(OLE, fs, scratch); ++ BranchTrueShortF(&fail); ++ CompareIsNanF64(fs, scratch); ++ BranchTrueShortF(&fail); ++ // if fd >= (double)UINT64_MAX, then the conversion fails. ++ ldih(rd, 0x43f0, zero_reg); ++ slll(rd, 32, rd); ++ ifmovd(rd, scratch); ++ CompareF64(OLT, fs, scratch); ++ BranchFalseShortF(&fail); ++ } ++ ++ // Load 2^63 into scratch as its double representation. ++ li(at, 0x43E0000000000000); ++ ifmovd(at, scratch); ++ ++ // Test if scratch > fs. ++ // If fs < 2^63 we can convert it normally. ++ CompareF64(OLT, fs, scratch); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^63 from fs, then trunc it to rd ++ // and add 2^63 to rd. ++ fsubd(fs, scratch, kDoubleCompareReg); // sub_d(scratch, fs, scratch); ++ fcvtdl_z(kDoubleCompareReg, scratch); // trunc_l_d(scratch, scratch); ++ fimovd(scratch,rd); ++ Or(rd, rd, Operand(1UL << 63)); ++ Branch(&done); ++ ++ // Simple conversion. ++ bind(&simple_convert); ++ ftruncdl(fs, scratch); ++ fimovd(scratch,rd); ++ ++ bind(&done); ++ if (result.is_valid()) { ++ // Conversion is failed if the result is negative. ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ Addw(scratch1, zero_reg, Operand(-1)); ++ srll(scratch1, 1, scratch1); // Load 2^62. ++ fimovd(scratch,result); ++ xor_ins(result, scratch1, result); ++ } ++ Cmplt(result, zero_reg, result); ++ } ++ ++ bind(&fail); ++} ++ ++void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, ++ FPURegister scratch, Register result) {SCOPEMARK_NAME(TurboAssembler::Trunc_ul_s, this); ++ DCHECK(fs != scratch); ++ DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at)); ++ ++ Label simple_convert, done, fail; ++ FPURegister fscratch2 = kScratchDoubleReg2; ++ if (result.is_valid()) { ++ mov(result, zero_reg); ++ Move(scratch, -1.0f); ++ // If fd =< -1 or unordered, then the conversion fails. ++ CompareF32(OLE, fs, scratch); ++ BranchTrueShortF(&fail); ++ CompareIsNanF32(fs, scratch); ++ BranchTrueShortF(&fail); ++ // if fd >= (float)UINT64_MAX, then the conversion fails. ++ ldih(rd, 0x5F80, zero_reg); ++ ifmovs(rd, scratch); ++ CompareF32(OLT, fs, scratch); ++ BranchFalseShortF(&fail); ++ } ++ ++ { ++ // Load 2^63 into scratch as its float representation. ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x5F000000); ++ ifmovs(scratch1, scratch); ++ } ++ ++ // Test if scratch > fs. ++ // If fs < 2^63 we can convert it normally. ++ CompareF32(OLT, fs, scratch); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^63 from fs, then trunc it to rd ++ // and add 2^63 to rd. ++ fsubs(fs, scratch, fscratch2); ++ ftruncsl(fscratch2, scratch); ++ fimovd(scratch,rd); ++ Or(rd, rd, Operand(1UL << 63)); ++ Branch(&done); ++ ++ // Simple conversion. ++ bind(&simple_convert); ++ ftruncsl(fs, scratch); ++ fimovd(scratch,rd); ++ ++ bind(&done); ++ if (result.is_valid()) { ++ // Conversion is failed if the result is negative or unordered. ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ Addw(scratch1, zero_reg, Operand(-1)); ++ srll(scratch1, 1 ,scratch1); // Load 2^62. ++ fimovd(scratch,result); ++ xor_ins(result, scratch1, result); ++ } ++ Cmplt(result, zero_reg, result); ++ } ++ ++ bind(&fail); ++} ++ ++template ++void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, ++ FPURoundingMode mode, RoundFunc round) {SCOPEMARK_NAME(TurboAssembler::RoundDouble, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = t11; ++ Label done; ++ fimovd(src, scratch); ++ srll(scratch, 32, scratch); ++ srll(scratch, HeapNumber::kExponentShift, at); ++ li(gp, (0x1L<ffloordl(src,dst); ++ }); ++} ++ ++void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {SCOPEMARK_NAME(TurboAssembler::Ceil_d_d, this); ++ RoundDouble(dst, src, mode_ceil, ++ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { ++ tasm->fceildl(src, dst); ++ }); ++} ++ ++void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {SCOPEMARK_NAME(TurboAssembler::Trunc_d_d, this); ++ RoundDouble(dst, src, mode_trunc, ++ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { ++ tasm->ftruncdl(src,dst); ++ }); ++} ++ ++void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) {SCOPEMARK_NAME(TurboAssembler::Round_d_d, this); ++ RoundDouble(dst, src, mode_round, ++ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { ++ tasm->frounddl(src,dst); ++ }); ++} ++ ++template ++void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, ++ FPURoundingMode mode, RoundFunc round) {SCOPEMARK_NAME(TurboAssembler::RoundFloat, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = t11; ++ Register scratch2 = t8; ++ int32_t kFloat32ExponentBias = 127; ++ int32_t kFloat32MantissaBits = 23; ++// int32_t kFloat32ExponentBits = 8; ++ Label done; ++ fimovd(src, scratch); ++ srll(scratch, 32, scratch); ++ srll(scratch, 20, scratch2);//sign + exponent, 12 bits ++ and_ins(scratch2, 0x7F, at);//low 7 exponent bits ++ addw(at, 0, at); ++ srll(scratch2, 3, gp); ++ and_ins(gp, 0x80, gp); ++ addw(gp, 0, gp); ++ or_ins(at, gp, at); ++ addw(at, 0, at); ++ fmovs(src, dst); ++ li(gp, kFloat32ExponentBias + kFloat32MantissaBits); ++ cmpult(at, gp, at); ++ beq(at, &done); ++ round(this, dst, src); ++ fimovs(dst, at); ++ fcvtws(dst, dst); ++ bne(at, &done); ++ srll(scratch, 31, at); ++ slll(at, 31 + 32, at); ++ ifmovd(at, dst); ++ bind(&done); ++} ++ ++void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) {SCOPEMARK_NAME(TurboAssembler::Floor_s_s, this); ++ RoundFloat(dst, src, mode_floor, ++ [](Assembler* tasm, FPURegister dst, FPURegister src) { ++ tasm->ffloorsw(src, dst); ++ }); ++} ++ ++void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {SCOPEMARK_NAME(TurboAssembler::Ceil_s_s, this); ++ RoundFloat(dst, src, mode_ceil, ++ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { ++ tasm->fceilsw(src, dst); ++ }); ++} ++ ++void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {SCOPEMARK_NAME(TurboAssembler::Trunc_s_s, this); ++ RoundFloat(dst, src, mode_trunc, ++ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { ++ tasm->ftruncsw(src, dst); ++ }); ++} ++ ++void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {SCOPEMARK_NAME(TurboAssembler::Round_s_s, this); ++ RoundFloat(dst, src, mode_round, ++ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { ++ tasm->froundsw(src, dst); ++ }); ++} ++//SKTODO ++#if 0 ++void TurboAssembler::MSARoundW(MSARegister dst, MSARegister src, ++ FPURoundingMode mode) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = t8; ++ Register scratch2 = at; ++ cfcmsa(scratch, MSACSR); ++ if (mode == kRoundToNearest) { ++ scratch2 = zero_reg; ++ } else { ++ li(scratch2, Operand(mode)); ++ } ++ ctcmsa(MSACSR, scratch2); ++ frint_w(dst, src); ++ ctcmsa(MSACSR, scratch); ++} ++ ++void TurboAssembler::MSARoundD(MSARegister dst, MSARegister src, ++ FPURoundingMode mode) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = t8; ++ Register scratch2 = at; ++ cfcmsa(scratch, MSACSR); ++ if (mode == kRoundToNearest) { ++ scratch2 = zero_reg; ++ } else { ++ li(scratch2, Operand(mode)); ++ } ++ ctcmsa(MSACSR, scratch2); ++ frint_d(dst, src); ++ ctcmsa(MSACSR, scratch); ++} ++#endif ++ ++void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, ++ FPURegister ft, FPURegister scratch) {SCOPEMARK_NAME(MacroAssembler::Madd_s, this); ++ DCHECK(fr != scratch && fs != scratch && ft != scratch); ++ fmuls(fs, ft, scratch); ++ fadds(fr, scratch, fd); ++} ++ ++void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, ++ FPURegister ft, FPURegister scratch) {SCOPEMARK_NAME(MacroAssembler::Madd_d, this); ++ DCHECK(fr != scratch && fs != scratch && ft != scratch); ++ fmuld(fs, ft, scratch); ++ faddd(fr, scratch, fd); ++} ++ ++void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, ++ FPURegister ft, FPURegister scratch) {SCOPEMARK_NAME(MacroAssembler::Msub_s, this); ++ DCHECK(fr != scratch && fs != scratch && ft != scratch); ++ fmuls(fs, ft, scratch); ++ fsubs(scratch, fr, fd); ++} ++ ++void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, ++ FPURegister ft, FPURegister scratch) {SCOPEMARK_NAME(MacroAssembler::Msub_d, this); ++ DCHECK(fr != scratch && fs != scratch && ft != scratch); ++ fmuld(fs, ft, scratch); ++ fsubd(scratch, fr, fd); ++} ++ ++void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc, ++ FPURegister cmp1, FPURegister cmp2) {SCOPEMARK_NAME(TurboAssembler::CompareF, this); ++ sizeField = sizeField == D ? L : W; ++ DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg); ++ cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2); ++} ++ ++void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, ++ FPURegister cmp2) {SCOPEMARK_NAME(TurboAssembler::CompareIsNanF, this); ++ CompareF(sizeField, UN, cmp1, cmp2); ++} ++ ++void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::BranchTrueShortF, this); ++ fbne(kDoubleCompareReg, target); ++} ++ ++void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::BranchFalseShortF, this); ++ fbeq(kDoubleCompareReg, target); ++} ++ ++void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::BranchTrueF, this); ++ bool long_branch = ++ target->is_bound() ? !is_near(target) : is_trampoline_emitted(); ++ if (long_branch) { ++ Label skip; ++ BranchFalseShortF(&skip); ++ BranchLong(target, bd); ++ bind(&skip); ++ } else { ++ BranchTrueShortF(target, bd); ++ } ++} ++ ++void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::BranchFalseF, this); ++ bool long_branch = ++ target->is_bound() ? !is_near(target) : is_trampoline_emitted(); ++ if (long_branch) { ++ Label skip; ++ BranchTrueShortF(&skip); ++ BranchLong(target, bd); ++ bind(&skip); ++ } else { ++ BranchFalseShortF(target, bd); ++ } ++} ++ ++void TurboAssembler::BranchMSA(Label* target, MSABranchDF df, ++ MSABranchCondition cond, MSARegister wt, ++ BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::BranchMSA, this); ++ UNREACHABLE(); ++} ++ ++void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target, ++ MSABranchCondition cond, MSARegister wt, ++ BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::BranchShortMSA, this); ++ UNREACHABLE(); ++} ++ ++void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {SCOPEMARK_NAME(TurboAssembler::FmoveLow, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Register scratch1 = t8; ++ DCHECK(src_low != scratch); ++#ifdef SW64 ++ fimovd(dst, scratch); ++ srll(scratch, 32, scratch); ++ slll(scratch, 32, scratch); ++ zapnot(src_low, 0xf, scratch1); ++ or_ins(scratch, scratch1, scratch); ++ ifmovd(scratch, dst); ++#endif ++} ++ ++void TurboAssembler::Move(FPURegister dst, uint32_t src) {SCOPEMARK_NAME(TurboAssembler::Move, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(static_cast(src))); ++ ifmovs(scratch, dst); ++} ++ ++void TurboAssembler::Move(FPURegister dst, uint64_t src) {SCOPEMARK_NAME(TurboAssembler::Move, this); ++ // Handle special values first. ++ if (src == bit_cast(0.0) /*&& has_double_zero_reg_set_*/) { ++ fmovd(kDoubleRegZero,dst); ++ } else if (src == bit_cast(-0.0) /*&& has_double_zero_reg_set_*/) { ++ Fnegd(dst, kDoubleRegZero); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(bit_cast(src))); ++ ifmovd(scratch, dst); ++ } ++} ++ ++void TurboAssembler::Seleq(Register rd, Register rs, Register rt) {SCOPEMARK_NAME(TurboAssembler::Seleq, this); ++ seleq(rt, rs, rd, rd); ++} ++ ++void TurboAssembler::Selne(Register rd, Register rs, Register rt) {SCOPEMARK_NAME(TurboAssembler::Selne, this); ++ selne(rt, rs, rd, rd); ++} ++ ++void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs, ++ const Operand& rt, Condition cond) {SCOPEMARK_NAME(TurboAssembler::LoadZeroOnCondition, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ switch (cond) { ++ case cc_always: ++ mov(rd, zero_reg); ++ break; ++ case eq: ++ if (rs == zero_reg) { ++ if (rt.is_reg()) { ++ LoadZeroIfConditionZero(rd, rt.rm()); ++ } else { ++ if (rt.immediate() == 0) { ++ mov(rd, zero_reg); ++ } else { ++ nop(); ++ } ++ } ++ } else if (IsZero(rt)) { ++ LoadZeroIfConditionZero(rd, rs); ++ } else { ++ Subl(t12, rs, rt); ++ LoadZeroIfConditionZero(rd, t12); ++ } ++ break; ++ case ne: ++ if (rs == zero_reg) { ++ if (rt.is_reg()) { ++ LoadZeroIfConditionNotZero(rd, rt.rm()); ++ } else { ++ if (rt.immediate() != 0) { ++ mov(rd, zero_reg); ++ } else { ++ nop(); ++ } ++ } ++ } else if (IsZero(rt)) { ++ LoadZeroIfConditionNotZero(rd, rs); ++ } else { ++ Subl(t12, rs, rt); ++ LoadZeroIfConditionNotZero(rd, t12); ++ } ++ break; ++ ++ // Signed comparison. ++ case greater: ++ Cmpgt(t12, rs, rt); ++ LoadZeroIfConditionNotZero(rd, t12); ++ break; ++ case greater_equal: ++ Cmpge(t12, rs, rt); ++ LoadZeroIfConditionNotZero(rd, t12); ++ // rs >= rt ++ break; ++ case less: ++ Cmplt(t12, rs, rt); ++ LoadZeroIfConditionNotZero(rd, t12); ++ // rs < rt ++ break; ++ case less_equal: ++ Cmple(t12, rs, rt); ++ LoadZeroIfConditionNotZero(rd, t12); ++ // rs <= rt ++ break; ++ ++ // Unsigned comparison. ++ case Ugreater: ++ Cmpugt(t12, rs, rt); ++ LoadZeroIfConditionNotZero(rd, t12); ++ // rs > rt ++ break; ++ ++ case Ugreater_equal: ++ Cmpuge(t12, rs, rt); ++ LoadZeroIfConditionNotZero(rd, t12); ++ // rs >= rt ++ break; ++ case Uless: ++ Cmpult(t12, rs, rt); ++ LoadZeroIfConditionNotZero(rd, t12); ++ // rs < rt ++ break; ++ case Uless_equal: ++ Cmpule(t12, rs, rt); ++ LoadZeroIfConditionNotZero(rd, t12); ++ // rs <= rt ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, ++ Register condition) {SCOPEMARK_NAME(TurboAssembler::LoadZeroIfConditionNotZero, this); ++ Selne(dest, zero_reg, condition); ++} ++ ++void TurboAssembler::LoadZeroIfConditionZero(Register dest, ++ Register condition) {SCOPEMARK_NAME(TurboAssembler::LoadZeroIfConditionZero, this); ++ Seleq(dest, zero_reg, condition); ++} ++ ++void TurboAssembler::LoadZeroIfFPUCondition(Register dest) {SCOPEMARK_NAME(TurboAssembler::LoadZeroIfFPUCondition, this); ++#ifdef SW64 ++ fimovd(kDoubleCompareReg, kScratchReg); ++ LoadZeroIfConditionNotZero(dest, kScratchReg); ++#endif ++} ++ ++void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) {SCOPEMARK_NAME(TurboAssembler::LoadZeroIfNotFPUCondition, this); ++#ifdef SW64 ++ fimovd(kDoubleCompareReg, kScratchReg); ++ LoadZeroIfConditionZero(dest, kScratchReg); ++#endif ++} ++ ++ ++void TurboAssembler::Clz(Register rd, Register rs) {SCOPEMARK_NAME(TurboAssembler::Clz, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ addw(rs, 0, scratch); // sign extend ++ sellt(scratch, zero_reg, rd, rd); // (int)rs < 0 => rd = 0; ++ blt(scratch, 3); ++ ctlz(scratch, rd); // rs>0 => rd=rd-32; ++ ldi(scratch, 32, zero_reg); ++ subl(rd, scratch, rd); ++} ++ ++void TurboAssembler::Dclz(Register rd, Register rs) { ctlz(rs, rd); } ++ ++void TurboAssembler::Ctz(Register rd, Register rs) {SCOPEMARK_NAME(TurboAssembler::Ctz, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ cttz(rs, rd); ++ ldi(scratch, 32, zero_reg); ++ subl(rd, scratch, scratch); ++ selge(scratch, 32, rd, rd); ++} ++ ++void TurboAssembler::Dctz(Register rd, Register rs) {SCOPEMARK_NAME(TurboAssembler::Dctz, this); ++ cttz(rs, rd); ++} ++ ++void TurboAssembler::Popcnt(Register rd, Register rs) {SCOPEMARK_NAME(TurboAssembler::Popcnt, this); ++ zapnot(rs, 0xf, rd); ++ ctpop(rd, rd); ++} ++ ++void TurboAssembler::Dpopcnt(Register rd, Register rs) {SCOPEMARK_NAME(TurboAssembler::Dpopcnt, this); ++ ctpop(rs, rd); ++} ++ ++void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, ++ Register result, ++ DoubleRegister double_input, ++ Register scratch, ++ DoubleRegister double_scratch, ++ Register except_flag, ++ CheckForInexactConversion check_inexact) {SCOPEMARK_NAME(MacroAssembler::EmitFPUTruncate, this); ++ DCHECK(result != scratch); ++ DCHECK(double_input != double_scratch); ++ DCHECK(except_flag != scratch); ++ ++ Label done; ++ ++ // Clear the except flag (0 = no exception) ++ mov(except_flag, zero_reg); ++ ++ // Test for values that can be exactly represented as a signed 32-bit integer. ++ fcvtdl(double_input, double_scratch); ++ fcvtlw(double_scratch, kScratchDoubleReg1); ++ fimovs(kScratchDoubleReg1, result); ++ ++ fcvtwd(double_scratch, double_scratch); ++ CompareF64(EQ, double_input, double_scratch); ++ BranchTrueShortF(&done); ++ ++ int64_t except_mask = sFCSRFlagMask; // int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions. ++ ++ if (check_inexact == kDontCheckForInexactConversion) { ++ // Ignore inexact exceptions. ++ except_mask &= ~sFCSRInexactFlagMask; // except_mask &= ~kFCSRInexactFlagMask; ++ } ++ ++ // Save FCSR. ++ rfpcr(kScratchDoubleReg); ++ // Disable FPU exceptions. ++ // SW64 neednot clear FPCR 20150513. ++ //in order to have same effection , we should do four steps in sw: ++ //1) set fpcr = 0 ++ //2) Rounding: sw(10), round-to-even ++ //3) set trap bit: sw(62~61,51~49), exception controlled by fpcr but not trap ++ //4) set exception mode: sw(00) setfpec0 ++ li(scratch, sFCSRControlMask | sFCSRRound1Mask); //1), 2), 3) ++ ifmovd(scratch, double_scratch); ++ wfpcr(double_scratch); ++ setfpec1();//4) ++ ++ // Do operation based on rounding mode. ++ switch (rounding_mode) { ++ case kRoundToNearest: ++ fcvtdl_g(double_input, double_scratch); ++ break; ++ case kRoundToZero: ++ fcvtdl_z(double_input, double_scratch); ++ break; ++ case kRoundToPlusInf: ++ fcvtdl_p(double_input, double_scratch); ++ break; ++ case kRoundToMinusInf: ++ fcvtdl_n(double_input, double_scratch); ++ break; ++ } // End of switch-statement. ++ ++ // Move the converted value into the result register. ++ fcvtlw(double_scratch, kScratchDoubleReg1); ++ fimovs(kScratchDoubleReg1, result); ++ ++ // Retrieve FCSR. ++ rfpcr(double_scratch); ++ fimovd(double_scratch, except_flag); ++ // Restore FCSR. ++ wfpcr(kScratchDoubleReg); ++ setfpec1(); ++ ++ // Check for fpu exceptions. ++ And(except_flag, except_flag, Operand(except_mask)); ++ ++ bind(&done); ++} ++ ++//SKTODO ++void TurboAssembler::TryInlineTruncateDoubleToI(Register result, ++ DoubleRegister double_input, ++ Label* done) {SCOPEMARK_NAME(TurboAssembler::TryInlineTruncateDoubleToI, this); ++ DoubleRegister single_scratch = kScratchDoubleReg; ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.Acquire(); ++ Register scratch2 = t12; ++ ++ DoubleRegister fp_scratch = f20; ++ DoubleRegister fp_scratch2 = f21; ++ DCHECK(fp_scratch != double_input && fp_scratch2 != double_input && single_scratch != double_input); ++ DCHECK(scratch != result && scratch2 != result); ++ ++ MultiPushFPU(fp_scratch.bit() | fp_scratch2.bit()); ++ ++ // Clear cumulative exception flags and save the FCSR. ++ // SW64 FPCR, equal to DoubleToIStub::Generate ++ //in order to have same effection, we should do four steps in sw: ++ //1) set fpcr = 0 ++ //2) Rounding: sw(10), round-to-even ++ //3) set trap bit: sw(62~61,51~49), exception controlled by fpcr but not trap ++ //4) set exception mode: sw(00) setfpec0 ++ rfpcr(fp_scratch2); ++ li(scratch, sFCSRControlMask | sFCSRRound1Mask); //1), 2), 3) ++ ifmovd(scratch, fp_scratch); ++ wfpcr(fp_scratch); ++ setfpec1();//4) ++ ++ // Try a conversion to a signed integer. ++ fcvtdl_z(double_input, single_scratch); ++ fcvtlw(single_scratch, fp_scratch); ++ fimovs(fp_scratch, result); ++ ++ // Retrieve and restore the FCSR. ++ rfpcr(fp_scratch); ++ wfpcr(fp_scratch2); ++ setfpec1(); ++ fimovd(fp_scratch, scratch); ++ ++ MultiPopFPU(fp_scratch.bit() | fp_scratch2.bit()); ++ ++ // Check for overflow and NaNs. ++ li(scratch2, sFCSROverflowFlagMask | sFCSRUnderflowFlagMask | ++ sFCSRInvalidOpFlagMask); ++ And(scratch, scratch, Operand(scratch2)); ++ ++ // If we had no exceptions we are done. ++ Branch(done, eq, scratch, Operand(zero_reg)); ++} ++ ++void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, ++ Register result, ++ DoubleRegister double_input, ++ StubCallMode stub_mode) {SCOPEMARK_NAME(TurboAssembler::TruncateDoubleToI, this); ++ Label done; ++ ++ TryInlineTruncateDoubleToI(result, double_input, &done); ++ ++ // If we fell through then inline version didn't succeed - call stub instead. ++ push(ra); ++ Subl(sp, sp, Operand(kDoubleSize)); // Put input on stack. ++ Fstd(double_input, MemOperand(sp, 0)); ++ ++ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) { ++ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); ++ } else { ++ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET); ++ } ++ Ldl(result, MemOperand(sp, 0)); ++ ++ Addl(sp, sp, Operand(kDoubleSize)); ++ pop(ra); ++ ++ bind(&done); ++} ++ ++// Emulated condtional branches do not emit a nop in the branch delay slot. ++// ++// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. ++#define BRANCH_ARGS_CHECK(cond, rs, rt) \ ++ DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ ++ (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) ++ ++void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::Branch, this); ++ DCHECK_EQ(kArchVariant, kSw64r3 ? is_int26(offset) : is_int21(offset)); ++ BranchShort(offset, bdslot); ++} ++ ++void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::Branch, this); ++ bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); ++ DCHECK(is_near); ++ USE(is_near); ++} ++ ++void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::Branch, this); ++ if (L->is_bound()) { ++ if (is_near_branch(L)) { ++ BranchShort(L, bdslot); ++ } else { ++ BranchLong(L, bdslot); ++ } ++ } else { ++ if (is_trampoline_emitted()) { ++ BranchLong(L, bdslot); ++ } else { ++ BranchShort(L, bdslot); ++ } ++ } ++} ++ ++void TurboAssembler::Branch(Label* L, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::Branch, this); ++ if (L->is_bound()) { ++ if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) { ++ if (cond != cc_always) { ++ Label skip; ++ Condition neg_cond = NegateCondition(cond); ++ BranchShort(&skip, neg_cond, rs, rt); ++ BranchLong(L, bdslot); ++ bind(&skip); ++ } else { ++ BranchLong(L, bdslot); ++ } ++ } ++ } else { ++ if (is_trampoline_emitted()) { ++ if (cond != cc_always) { ++ Label skip; ++ Condition neg_cond = NegateCondition(cond); ++ BranchShort(&skip, neg_cond, rs, rt); ++ BranchLong(L, bdslot); ++ bind(&skip); ++ } else { ++ BranchLong(L, bdslot); ++ } ++ } else { ++ BranchShort(L, cond, rs, rt, bdslot); ++ } ++ } ++} ++ ++void TurboAssembler::Branch(Label* L, Condition cond, Register rs, ++ RootIndex index, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::Branch, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Branch(L, cond, rs, Operand(scratch), bdslot); ++} ++ ++void TurboAssembler::BranchShortHelper(int32_t offset, Label* L, ++ BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchShortHelper, this); ++ DCHECK(L == nullptr || offset == 0); ++ offset = GetOffset(offset, L, OffsetSize::kOffset21); ++ br(offset); ++} ++ ++void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchShort, this); ++ DCHECK(is_int21(offset)); ++ BranchShortHelper(offset, nullptr, bdslot); ++} ++ ++void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchShort, this); ++ BranchShortHelper(0, L, bdslot); ++} ++ ++ ++int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {SCOPEMARK_NAME(TurboAssembler::GetOffset, this); ++ if (L) { ++ offset = branch_offset_helper(L, bits) >> 2; ++ } else { ++ DCHECK(is_intn(offset, bits)); ++ } ++ return offset; ++} ++ ++Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, ++ Register scratch) {SCOPEMARK_NAME(TurboAssembler::GetRtAsRegisterHelper, this); ++ Register r2 = no_reg; ++ if (rt.is_reg()) { ++ r2 = rt.rm(); ++ } else { ++ r2 = scratch; ++ li(r2, rt); ++ } ++ ++ return r2; ++} ++ ++bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, ++ OffsetSize bits) {SCOPEMARK_NAME(TurboAssembler::CalculateOffset, this); ++ if (!is_near(L, bits)) return false; ++ *offset = GetOffset(*offset, L, bits); ++ return true; ++} ++ ++bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, ++ Register* scratch, const Operand& rt) {SCOPEMARK_NAME(TurboAssembler::CalculateOffset, this); ++ if (!is_near(L, bits)) return false; ++ *scratch = GetRtAsRegisterHelper(rt, *scratch); ++ *offset = GetOffset(*offset, L, bits); ++ return true; ++} ++ ++bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, ++ Register rs, const Operand& rt, ++ BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchShortHelper, this); ++ DCHECK(L == nullptr || offset == 0); ++ if (!is_near(L, OffsetSize::kOffset21)) return false; ++ ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t11; ++ Register scratch2 = gp; ++ int32_t offset32; ++ ++ // Be careful to always use shifted_branch_offset only just before the ++ // branch instruction, as the location will be remember for patching the ++ // target. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ switch (cond) { ++ case cc_always: ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ br(offset32); // b(offset32); ++ break; ++ case eq: ++ if (IsZero(rt)) { ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ beq(rs, offset32); // beq(rs, zero_reg, offset32); ++ } else { ++ // We don't want any other register but scratch clobbered. ++ scratch = GetRtAsRegisterHelper(rt, scratch); ++ DCHECK(rs!=scratch2 && scratch != scratch2); ++ cmpeq(rs, scratch, scratch2); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ bne(scratch2, offset32); ++ } ++ break; ++ case ne: ++ if (IsZero(rt)) { ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ bne(rs, offset32); // bne(rs, zero_reg, offset32); ++ } else { ++ // We don't want any other register but scratch clobbered. ++ scratch = GetRtAsRegisterHelper(rt, scratch); ++ DCHECK(rs!=scratch2 && scratch != scratch2); ++ cmpeq(rs, scratch, scratch2); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ beq(scratch2, offset32); ++ } ++ break; ++ ++ // Signed comparison. ++ case greater: ++ if (IsZero(rt)) { ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ bgt(rs, offset32); // bgtz(rs, offset32); ++ } else { ++ Cmplt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ bne(scratch, offset32); // bne(scratch, zero_reg, offset32); ++ } ++ break; ++ case greater_equal: ++ if (IsZero(rt)) { ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ bge(rs, offset32); // bgez(rs, offset32); ++ } else { ++ Cmplt(scratch, rs, rt); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ beq(scratch, offset32); // beq(scratch, zero_reg, offset32); ++ } ++ break; ++ case less: ++ if (IsZero(rt)) { ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ blt(rs, offset32); // bltz(rs, offset32); ++ } else { ++ Cmplt(scratch, rs, rt); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ bne(scratch, offset32); // bne(scratch, zero_reg, offset32); ++ } ++ break; ++ case less_equal: ++ if (IsZero(rt)) { ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ ble(rs, offset32); // blez(rs, offset32); ++ } else { ++ Cmplt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ beq(scratch, offset32); // beq(scratch, zero_reg, offset32); ++ } ++ break; ++ ++ // Unsigned comparison. ++ case Ugreater: ++ if (IsZero(rt)) { ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ bne(rs, offset32); // bne(rs, zero_reg, offset32); ++ } else { ++ Cmpult(scratch, GetRtAsRegisterHelper(rt, scratch), rs); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ bne(scratch, offset32); // bne(scratch, zero_reg, offset32); ++ } ++ break; ++ case Ugreater_equal: ++ if (IsZero(rt)) { ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ br(offset32); // b(offset32); ++ } else { ++ Cmpult(scratch, rs, rt); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ beq(scratch, offset32); // beq(scratch, zero_reg, offset32); ++ } ++ break; ++ case Uless: ++ if (IsZero(rt)) { ++ return true; // No code needs to be emitted. ++ } else { ++ Cmpult(scratch, rs, rt); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ bne(scratch, offset32); // bne(scratch, zero_reg, offset32); ++ } ++ break; ++ case Uless_equal: ++ if (IsZero(rt)) { ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ beq(rs, offset32); // beq(rs, zero_reg, offset32); ++ } else { ++ Cmpult(scratch, GetRtAsRegisterHelper(rt, scratch), rs); ++ offset32 = GetOffset(offset, L, OffsetSize::kOffset21); ++ beq(scratch, offset32); // beq(scratch, zero_reg, offset32); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ ++ return true; ++} ++ ++bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, ++ Register rs, const Operand& rt, ++ BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchShortCheck, this); ++ BRANCH_ARGS_CHECK(cond, rs, rt); ++ ++ if (!L) { ++ DCHECK(is_int21(offset)); ++ return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot); ++ } else { ++ DCHECK_EQ(offset, 0); ++ return BranchShortHelper(0, L, cond, rs, rt, bdslot); ++ } ++ return false; ++} ++ ++void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchShort, this); ++ BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); ++} ++ ++void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchShort, this); ++ BranchShortCheck(0, L, cond, rs, rt, bdslot); ++} ++ ++void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLink, this); ++ BranchAndLinkShort(offset, bdslot); ++} ++ ++void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLink, this); ++ bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot); ++ DCHECK(is_near); ++ USE(is_near); ++} ++ ++void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLink, this); ++ if (L->is_bound()) { ++ if (is_near_branch(L)) { ++ BranchAndLinkShort(L, bdslot); ++ } else { ++ BranchAndLinkLong(L, bdslot); ++ } ++ } else { ++ if (is_trampoline_emitted()) { ++ BranchAndLinkLong(L, bdslot); ++ } else { ++ BranchAndLinkShort(L, bdslot); ++ } ++ } ++} ++ ++void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLink, this); ++ if (L->is_bound()) { ++ if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) { ++ Label skip; ++ Condition neg_cond = NegateCondition(cond); ++ BranchShort(&skip, neg_cond, rs, rt); ++ BranchAndLinkLong(L, bdslot); ++ bind(&skip); ++ } ++ } else { ++ if (is_trampoline_emitted()) { ++ Label skip; ++ Condition neg_cond = NegateCondition(cond); ++ BranchShort(&skip, neg_cond, rs, rt); ++ BranchAndLinkLong(L, bdslot); ++ bind(&skip); ++ } else { ++ BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot); ++ } ++ } ++} ++ ++void TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L, ++ BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLinkShortHelper, this); ++ DCHECK(L == nullptr || offset == 0); ++ offset = GetOffset(offset, L, OffsetSize::kOffset21); ++ br(ra, offset); // bal(offset); ++} ++ ++void TurboAssembler::BranchAndLinkShort(int32_t offset, ++ BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLinkShort, this); ++ DCHECK(is_int21(offset)); ++ BranchAndLinkShortHelper(offset, nullptr, bdslot); ++} ++ ++void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLinkShort, this); ++ BranchAndLinkShortHelper(0, L, bdslot); ++} ++ ++// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly ++// with the cmplt instructions. We could use sub or add instead but we would miss ++// overflow cases, so we keep cmplt and add an intermediate third instruction. ++bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L, ++ Condition cond, Register rs, ++ const Operand& rt, ++ BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLinkShortHelper, this); ++ DCHECK(L == nullptr || offset == 0); ++ if (!is_near(L, OffsetSize::kOffset21)) return false; ++ ++ Register scratch = t11; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ ++ switch (cond) { ++ case cc_always: ++ offset = GetOffset(offset, L, OffsetSize::kOffset21); ++ br(ra, offset); // bal(offset); ++ break; ++ case eq: ++ cmpeq(rs, GetRtAsRegisterHelper(rt, scratch), scratch); beq(scratch, 1); // bne(rs, GetRtAsRegisterHelper(rt, scratch), 2); ++ offset = GetOffset(offset, L, OffsetSize::kOffset21); ++ br(ra, offset); // bal(offset); ++ break; ++ case ne: ++ cmpeq(rs, GetRtAsRegisterHelper(rt, scratch), scratch); bne(scratch, 1); // beq(rs, GetRtAsRegisterHelper(rt, scratch), 2); ++ offset = GetOffset(offset, L, OffsetSize::kOffset21); ++ br(ra, offset); // bal(offset); ++ break; ++ ++ // Signed comparison. ++ case greater: ++ Cmplt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); ++ Addw(scratch, scratch, Operand(-1)); ++ blt(scratch, 1); bsr(L); // bgezal(scratch, offset); ++ break; ++ case greater_equal: ++ Cmplt(scratch, rs, rt); ++ Addw(scratch, scratch, Operand(-1)); ++ bge(scratch, 1); bsr(L); // bltzal(scratch, offset); ++ break; ++ case less: ++ Cmplt(scratch, rs, rt); ++ Addw(scratch, scratch, Operand(-1)); ++ offset = GetOffset(offset, L, OffsetSize::kOffset21); ++ blt(scratch, 1); br(ra, offset); // bgezal(scratch, offset); ++ break; ++ case less_equal: ++ Cmplt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); ++ Addw(scratch, scratch, Operand(-1)); ++ bge(scratch, 1); bsr(L); // bltzal(scratch, offset); ++ break; ++ ++ // Unsigned comparison. ++ case Ugreater: ++ Cmpult(scratch, GetRtAsRegisterHelper(rt, scratch), rs); ++ Addw(scratch, scratch, Operand(-1)); ++ blt(scratch, 1); bsr(L); // bgezal(scratch, offset); ++ break; ++ case Ugreater_equal: ++ Cmpult(scratch, rs, rt); ++ Addw(scratch, scratch, Operand(-1)); ++ bge(scratch, 1); bsr(L); // bltzal(scratch, offset); ++ break; ++ case Uless: ++ Cmpult(scratch, rs, rt); ++ Addw(scratch, scratch, Operand(-1)); ++ blt(scratch, 1); bsr(L); // bgezal(scratch, offset); ++ break; ++ case Uless_equal: ++ Cmpult(scratch, GetRtAsRegisterHelper(rt, scratch), rs); ++ Addw(scratch, scratch, Operand(-1)); ++ bge(scratch, 1); bsr(L); // bltzal(scratch, offset); ++ break; ++ ++ default: ++ UNREACHABLE(); ++ } ++ ++ return true; ++} ++ ++bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, ++ Condition cond, Register rs, ++ const Operand& rt, ++ BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLinkShortCheck, this); ++ BRANCH_ARGS_CHECK(cond, rs, rt); ++ ++ if (!L) { ++ DCHECK(is_int21(offset)); ++ return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot); ++ } else { ++ DCHECK_EQ(offset, 0); ++ return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot); ++ } ++ return false; ++} ++ ++void TurboAssembler::LoadFromConstantsTable(Register destination, ++ int constant_index) {SCOPEMARK_NAME(TurboAssembler::LoadFromConstantsTable, this); ++ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); ++ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); ++ Ldl(destination, ++ FieldMemOperand(destination, ++ FixedArray::kHeaderSize + constant_index * kPointerSize)); ++} ++ ++void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {SCOPEMARK_NAME(TurboAssembler::LoadRootRelative, this); ++ Ldl(destination, MemOperand(kRootRegister, offset)); ++} ++ ++void TurboAssembler::LoadRootRegisterOffset(Register destination, ++ intptr_t offset) {SCOPEMARK_NAME(TurboAssembler::LoadRootRegisterOffset, this); ++ if (offset == 0) { ++ Move(destination, kRootRegister); ++ } else { ++ Addl(destination, kRootRegister, Operand(offset)); ++ } ++} ++ ++void TurboAssembler::Jump(Register target, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::Jump, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK_EQ(bd, PROTECT); ++ if (cond == cc_always) { ++ Assembler::jmp(zero_reg, target, 0); ++ } else { ++ BRANCH_ARGS_CHECK(cond, rs, rt); ++ Branch(1, NegateCondition(cond), rs, rt); ++ Assembler::jmp(zero_reg, target, 0); ++ } ++} ++ ++void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, ++ Condition cond, Register rs, const Operand& rt, ++ BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::Jump, this); ++ Label skip; ++ DCHECK_EQ(bd, PROTECT); ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ li(t12, Operand(target, rmode)); ++ if (cond != cc_always) { ++ Branch(&skip, NegateCondition(cond), rs, rt); ++ } ++ Jump(t12, al, zero_reg, Operand(zero_reg), bd); ++ bind(&skip); ++ } ++} ++ ++void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, ++ Register rs, const Operand& rt, BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::Jump, this); ++ DCHECK(!RelocInfo::IsCodeTarget(rmode)); ++ Jump(static_cast(target), rmode, cond, rs, rt, bd); ++} ++ ++void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, ++ Condition cond, Register rs, const Operand& rt, ++ BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::Jump, this); ++ DCHECK(RelocInfo::IsCodeTarget(rmode)); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadConstant(t12, code); ++ Addl(t12, t12, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ Jump(t12, cond, rs, rt, bd); ++ return; ++ } else if (options().inline_offheap_trampolines) { ++ int builtin_index = Builtins::kNoBuiltinId; ++ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && ++ Builtins::IsIsolateIndependent(builtin_index)) { ++ // Inline the trampoline. ++ RecordCommentForOffHeapTrampoline(builtin_index); ++ CHECK_NE(builtin_index, Builtins::kNoBuiltinId); ++ EmbeddedData d = EmbeddedData::FromBlob(); ++ Address entry = d.InstructionStartOfBuiltin(builtin_index); ++ li(t12, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ Jump(t12, cond, rs, rt, bd); ++ return; ++ } ++ } ++ Jump(static_cast(code.address()), rmode, cond, rs, rt, bd); ++} ++ ++void TurboAssembler::Jump(const ExternalReference& reference) { ++ li(t12, reference); ++ Jump(t12); ++} ++ ++ ++// Note: To call gcc-compiled C code on sw64, you must call through t12. ++void TurboAssembler::Call(Register target, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::Call, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(bd == PROTECT); ++ if (cond == cc_always) { ++ call(ra, target, 0); ++ } else { ++ BRANCH_ARGS_CHECK(cond, rs, rt); ++ Branch(1, NegateCondition(cond), rs, rt); ++ call(ra, target, 0); ++ } ++ set_last_call_pc_(pc_); ++} ++ ++void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, ++ unsigned higher_limit, ++ Label* on_in_range) { ++ if (lower_limit != 0) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Subl(scratch, value, Operand(lower_limit)); ++ Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit)); ++ } else { ++ Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit)); ++ } ++} ++ ++void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, ++ Register rs, const Operand& rt, BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::Call, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ li(t12, Operand(static_cast(target), rmode), ADDRESS_LOAD); ++ Call(t12, cond, rs, rt, bd); ++} ++ ++void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, ++ Condition cond, Register rs, const Operand& rt, ++ BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::Call, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadConstant(t12, code); ++ Addl(t12, t12, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ Call(t12, cond, rs, rt, bd); ++ return; ++ } else if (options().inline_offheap_trampolines) { ++ int builtin_index = Builtins::kNoBuiltinId; ++ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && ++ Builtins::IsIsolateIndependent(builtin_index)) { ++ // Inline the trampoline. ++ RecordCommentForOffHeapTrampoline(builtin_index); ++ CHECK_NE(builtin_index, Builtins::kNoBuiltinId); ++ EmbeddedData d = EmbeddedData::FromBlob(); ++ Address entry = d.InstructionStartOfBuiltin(builtin_index); ++ li(t12, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ Call(t12, cond, rs, rt, bd); ++ return; ++ } ++ } ++ DCHECK(RelocInfo::IsCodeTarget(rmode)); ++ DCHECK(code->IsExecutable()); ++ Call(code.address(), rmode, cond, rs, rt, bd); ++} ++ ++void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ++ STATIC_ASSERT(kSystemPointerSize == 8); ++ STATIC_ASSERT(kSmiTagSize == 1); ++ STATIC_ASSERT(kSmiTag == 0); ++ ++ // The builtin_index register contains the builtin index as a Smi. ++ SmiUntag(builtin_index, builtin_index); ++ Dlsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2); ++ Ldl(builtin_index, ++ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); ++} ++ ++void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { ++ LoadEntryFromBuiltinIndex(builtin_index); ++ Call(builtin_index); ++} ++ ++void TurboAssembler::PatchAndJump(Address target) { ++ if (kArchVariant != kSw64r3) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ mov(scratch, ra); ++ Align(8); ++ br(ra, 0); // jump to ld ++ ldl(t12, MemOperand(ra, kInstrSize * 3)); // ra == pc_ ++ mov(ra, scratch); ++ Assembler::jmp(zero_reg, t12, 0); ++ DCHECK_EQ(reinterpret_cast(pc_) % 8, 0); ++ *reinterpret_cast(pc_) = target; // pc_ should be align. ++ pc_ += sizeof(uint64_t); ++ } else { ++ // TODO(sw64 r3): Implement. ++ UNIMPLEMENTED(); ++ } ++} ++ ++void TurboAssembler::StoreReturnAddressAndCall(Register target) {SCOPEMARK_NAME(TurboAssembler::StoreReturnAddressAndCall, this); ++ // This generates the final instruction sequence for calls to C functions ++ // once an exit frame has been constructed. ++ // ++ // Note that this assumes the caller code (i.e. the Code object currently ++ // being generated) is immovable or that the callee function cannot trigger ++ // GC, since the callee function will return to it. ++ ++ // Compute the return address in lr to return to after the jump below. The pc ++ // is already at '+ 8' from the current instruction; but return is after three ++ // instructions, so add another 4 to pc to get the return address. ++ ++Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); ++ static constexpr int kNumInstructionsToJump = 2; ++ Label find_ra; ++ ++ Move(t12, target); ++ br(kScratchReg, 0); ++ addl(kScratchReg, (kNumInstructionsToJump + 1) * kInstrSize, ra); ++ bind(&find_ra); ++ stl(ra, MemOperand(sp, 0)); ++ call(ra, t12, 0); ++ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); ++ setfpec1(); ++} ++ ++void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt, ++ BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::Ret, this); ++ Jump(ra, cond, rs, rt, bd); ++} ++ ++void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchLong, this); ++ // use jmp only when Label is bound and target is beyond br's limit ++ if (L->is_bound() && !is_near_pre_r3(L)) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ int64_t imm64; ++ imm64 = branch_long_offset(L); ++ DCHECK(is_int32(imm64)); ++ br(t8, 0); ++ li(t12, Operand(imm64)); ++ addl(t8, t12, t12); ++ Assembler::jmp(zero_reg, t12, 0); ++ } else { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ { ++ BlockGrowBufferScope block_buf_growth(this); ++ // Buffer growth (and relocation) must be blocked for internal references ++ // until associated instructions are emitted and available to be patched. ++ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); ++ br(L); //ZHJ j(L); ++ } ++ } ++} ++ ++void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {SCOPEMARK_NAME(TurboAssembler::BranchAndLinkLong, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ { ++ BlockGrowBufferScope block_buf_growth(this); ++ // Buffer growth (and relocation) must be blocked for internal references ++ // until associated instructions are emitted and available to be patched. ++ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); ++ bsr(L); // jal(L); ++ } ++} ++ ++void TurboAssembler::DropAndRet(int drop) {SCOPEMARK_NAME(TurboAssembler::DropAndRet, this); ++ if(is_uint8(drop * kPointerSize)){ ++ addl(sp, drop * kPointerSize, sp); ++ }else{ ++ DCHECK(is_int16(drop * kPointerSize)); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ ldi(scratch, drop * kPointerSize, zero_reg); ++ addl(sp, scratch, sp); ++ } ++ Ret(); ++} ++ ++void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, ++ const Operand& r2) {SCOPEMARK_NAME(TurboAssembler::DropAndRet, this); ++ // Both Drop and Ret need to be conditional. ++ Label skip; ++ if (cond != cc_always) { ++ Branch(&skip, NegateCondition(cond), r1, r2); ++ } ++ ++ Drop(drop); ++ Ret(); ++ ++ if (cond != cc_always) { ++ bind(&skip); ++ } ++} ++ ++void TurboAssembler::Drop(int count, Condition cond, Register reg, ++ const Operand& op) {SCOPEMARK_NAME(TurboAssembler::Drop, this); ++ if (count <= 0) { ++ return; ++ } ++ ++ Label skip; ++ ++ if (cond != al) { ++ Branch(&skip, NegateCondition(cond), reg, op); ++ } ++ ++ Addl(sp, sp, Operand(count * kPointerSize)); ++ ++ if (cond != al) { ++ bind(&skip); ++ } ++} ++ ++ ++ ++void MacroAssembler::Swap(Register reg1, ++ Register reg2, ++ Register scratch) {SCOPEMARK_NAME(MacroAssembler::Swap, this); ++ if (scratch == no_reg) { ++ Xor(reg1, reg1, Operand(reg2)); ++ Xor(reg2, reg2, Operand(reg1)); ++ Xor(reg1, reg1, Operand(reg2)); ++ } else { ++ mov(scratch, reg1); ++ mov(reg1, reg2); ++ mov(reg2, scratch); ++ } ++} ++ ++void TurboAssembler::Call(Label* target) { SCOPEMARK_NAME(TurboAssembler::Call, this); ++ BranchAndLink(target); } ++ ++void TurboAssembler::LoadAddress(Register dst, Label* target) { ++ uint64_t address = jump_address(target); ++ li(dst, address); ++} ++ ++void TurboAssembler::Push(Smi smi) {SCOPEMARK_NAME(TurboAssembler::Push, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(smi)); ++ push(scratch); ++} ++ ++void TurboAssembler::Push(Handle handle) {SCOPEMARK_NAME(TurboAssembler::Push, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(handle)); ++ push(scratch); ++} ++ ++//SKTODO ++void TurboAssembler::PushArray(Register array, Register size, Register scratch, ++ Register scratch2, PushArrayOrder order) { ++ DCHECK(!AreAliased(array, size, scratch, scratch2)); ++ Label loop, entry; ++ if (order == PushArrayOrder::kReverse) { ++ mov(scratch, zero_reg); ++ jmp(&entry); ++ bind(&loop); ++ Dlsa(scratch2, array, scratch, kPointerSizeLog2); ++ Ldl(scratch2, MemOperand(scratch2)); ++ push(scratch2); ++ Addl(scratch, scratch, Operand(1)); ++ bind(&entry); ++ Branch(&loop, less, scratch, Operand(size)); ++ } else { ++ mov(scratch, size); ++ jmp(&entry); ++ bind(&loop); ++ Dlsa(scratch2, array, scratch, kPointerSizeLog2); ++ Ldl(scratch2, MemOperand(scratch2)); ++ push(scratch2); ++ bind(&entry); ++ Addl(scratch, scratch, Operand(-1)); ++ Branch(&loop, greater_equal, scratch, Operand(zero_reg)); ++ } ++} ++ ++void MacroAssembler::MaybeDropFrames() {SCOPEMARK_NAME(MacroAssembler::MaybeDropFrames, this); ++ // Check whether we need to drop frames to restart a function on the stack. ++ li(a1, ExternalReference::debug_restart_fp_address(isolate())); ++ Ldl(a1, MemOperand(a1)); ++ Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET, ++ ne, a1, Operand(zero_reg)); ++} ++ ++// --------------------------------------------------------------------------- ++// Exception handling. ++ ++void MacroAssembler::PushStackHandler() {SCOPEMARK_NAME(MacroAssembler::PushStackHandler, this); ++ // Adjust this code if not the case. ++ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); ++ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); ++ ++ Push(Smi::zero()); // Padding. ++ ++ // Link the current handler as the next handler. ++ li(t9, ++ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); ++ Ldl(t1, MemOperand(t9)); ++ push(t1); ++ ++ // Set this new handler as the current one. ++ Stl(sp, MemOperand(t9)); ++} ++ ++ ++void MacroAssembler::PopStackHandler() {SCOPEMARK_NAME(MacroAssembler::PopStackHandler, this); ++ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); ++ pop(a1); ++ Addl(sp, sp, Operand(static_cast(StackHandlerConstants::kSize - ++ kPointerSize))); ++// UseScratchRegisterScope temps(this); ++// Register scratch = temps.Acquire(); ++// li(scratch, ++ li(t9, ++ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); ++ Stl(a1, MemOperand(t9)); ++} ++ ++void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, ++ const DoubleRegister src) {SCOPEMARK_NAME(TurboAssembler::FPUCanonicalizeNaN, this); ++ fsubd(src, kDoubleRegZero, dst); ++} ++ ++void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {SCOPEMARK_NAME(TurboAssembler::MovFromFloatResult, this); ++ Move(dst, f0); // Reg f0 is o32 ABI FP return value. ++} ++ ++void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {SCOPEMARK_NAME(TurboAssembler::MovFromFloatParameter, this); ++ Move(dst, f16); // Reg f16 is sw64 ABI FP first argument value. ++} ++ ++void TurboAssembler::MovToFloatParameter(DoubleRegister src) {SCOPEMARK_NAME(TurboAssembler::MovToFloatParameter, this); ++ Move(f16, src); ++} ++ ++void TurboAssembler::MovToFloatResult(DoubleRegister src) {SCOPEMARK_NAME(TurboAssembler::MovToFloatResult, this); ++ Move(f0, src); ++} ++ ++void TurboAssembler::MovToFloatParameters(DoubleRegister fsrc0, ++ DoubleRegister fsrc1) {SCOPEMARK_NAME(TurboAssembler::MovToFloatParameters, this); ++ const DoubleRegister fparg2 = f17; ++ if (fsrc1 == f16) { ++ DCHECK(fsrc0 != fparg2); ++ Move(fparg2, fsrc1); ++ Move(f16, fsrc0); ++ } else { ++ Move(f16, fsrc0); ++ Move(fparg2, fsrc1); ++ } ++} ++ ++#ifdef SW64 ++void TurboAssembler::MovFromGeneralResult(const Register dst) {SCOPEMARK_NAME(TurboAssembler::MovFromGeneralResult, this); ++ Move(dst, v0); ++} ++ ++void TurboAssembler::MovFromGeneralParameter(const Register dst) {SCOPEMARK_NAME(TurboAssembler::MovFromGeneralParameter, this); ++ Move(dst, a0); ++} ++ ++void TurboAssembler::MovToGeneralParameter(Register src) {SCOPEMARK_NAME(TurboAssembler::MovToGeneralParameter, this); ++ Move(a0, src); ++} ++ ++void TurboAssembler::MovToGeneralResult(Register src) {SCOPEMARK_NAME(TurboAssembler::MovToGeneralResult, this); ++ Move(a0, src); ++} ++ ++void TurboAssembler::MovToGeneralParameters(Register src0, ++ Register src1) {SCOPEMARK_NAME(TurboAssembler::MovToGeneralParameters, this); ++ if (src1 == a0) { ++ if (src0 != a1) { ++ Move(a1, src1); // src1 = a0 ++ Move(a0, src0); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Move(scratch, src1); // at = src1(a0) ++ Move(a0, src0); // a0 = src0(a1) ++ Move(a1, scratch); // a1 = at ++ } ++ } else { ++ Move(a0, src0); ++ Move(a1, src1); ++ } ++} ++#endif ++ ++// ----------------------------------------------------------------------------- ++// JavaScript invokes. ++ ++void TurboAssembler::PrepareForTailCall(Register callee_args_count, ++ Register caller_args_count, ++ Register scratch0, Register scratch1) { ++ // Calculate the end of destination area where we will put the arguments ++ // after we drop current frame. We add kPointerSize to count the receiver ++ // argument which is not included into formal parameters count. ++ Register dst_reg = scratch0; ++ s8addl(caller_args_count, fp, dst_reg); DCHECK_EQ(kPointerSizeLog2, 3); ++ Addl(dst_reg, dst_reg, ++ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize)); ++ ++ Register src_reg = caller_args_count; ++ // Calculate the end of source area. +kPointerSize is for the receiver. ++ Dlsa(src_reg, sp, callee_args_count, kPointerSizeLog2); ++ Addl(src_reg, src_reg, Operand(kPointerSize)); ++ ++ if (FLAG_debug_code) { ++ Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg, ++ Operand(dst_reg)); ++ } ++ ++ // Restore caller's frame pointer and return address now as they will be ++ // overwritten by the copying loop. ++ Ldl(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); ++ Ldl(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); ++ ++ // Now copy callee arguments to the caller frame going backwards to avoid ++ // callee arguments corruption (source and destination areas could overlap). ++ ++ // Both src_reg and dst_reg are pointing to the word after the one to copy, ++ // so they must be pre-decremented in the loop. ++ Register tmp_reg = scratch1; ++ Label loop, entry; ++ Branch(&entry); ++ bind(&loop); ++ Subl(src_reg, src_reg, Operand(kPointerSize)); ++ Subl(dst_reg, dst_reg, Operand(kPointerSize)); ++ Ldl(tmp_reg, MemOperand(src_reg)); ++ Stl(tmp_reg, MemOperand(dst_reg)); ++ bind(&entry); ++ Branch(&loop, ne, sp, Operand(src_reg)); ++ ++ // Leave current frame. ++ mov(sp, dst_reg); ++} ++ ++void MacroAssembler::InvokePrologue(Register expected_parameter_count, ++ Register actual_parameter_count, ++ Label* done, InvokeFlag flag) { ++ Label regular_invoke; ++ ++ // a0: actual arguments count ++ // a1: function (passed through to callee) ++ // a2: expected arguments count ++ ++ DCHECK_EQ(actual_parameter_count, a0); ++ DCHECK_EQ(expected_parameter_count, a2); ++ ++ Branch(®ular_invoke, eq, expected_parameter_count, ++ Operand(actual_parameter_count)); ++ ++ Handle adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline); ++ if (flag == CALL_FUNCTION) { ++ Call(adaptor); ++ Branch(done); ++ } else { ++ Jump(adaptor, RelocInfo::CODE_TARGET); ++ } ++ ++ bind(®ular_invoke); ++ } ++ ++void MacroAssembler::CheckDebugHook(Register fun, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count) { ++ Label skip_hook; ++ ++ li(t0, ExternalReference::debug_hook_on_function_call_address(isolate())); ++ Ldb(t0, MemOperand(t0)); ++ Branch(&skip_hook, eq, t0, Operand(zero_reg)); ++ ++ { ++ // Load receiver to pass it later to DebugOnFunctionCall hook. ++ LoadReceiver(t0, actual_parameter_count); ++ ++ FrameScope frame(this, ++ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); ++ SmiTag(expected_parameter_count); ++ Push(expected_parameter_count); ++ ++ SmiTag(actual_parameter_count); ++ Push(actual_parameter_count); ++ ++ if (new_target.is_valid()) { ++ Push(new_target); ++ } ++ Push(fun); ++ Push(fun); ++ Push(t0); ++ CallRuntime(Runtime::kDebugOnFunctionCall); ++ Pop(fun); ++ if (new_target.is_valid()) { ++ Pop(new_target); ++ } ++ ++ Pop(actual_parameter_count); ++ SmiUntag(actual_parameter_count); ++ ++ Pop(expected_parameter_count); ++ SmiUntag(expected_parameter_count); ++ } ++ bind(&skip_hook); ++} ++ ++void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count, ++ InvokeFlag flag) { ++ // You can't call a function without a valid frame. ++ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); ++ DCHECK_EQ(function, a1); ++ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3); ++ ++ // On function call, call into the debugger if necessary. ++ CheckDebugHook(function, new_target, expected_parameter_count, ++ actual_parameter_count); ++ ++ // Clear the new.target register if not given. ++ if (!new_target.is_valid()) { ++ LoadRoot(a3, RootIndex::kUndefinedValue); ++ } ++ ++ Label done; ++ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag); ++ // We call indirectly through the code field in the function to ++ // allow recompilation to take effect without changing any of the ++ // call sites. ++ Register code = kJavaScriptCallCodeStartRegister; ++ Ldl(code, FieldMemOperand(function, JSFunction::kCodeOffset)); ++ if (flag == CALL_FUNCTION) { ++ Addl(code, code, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ Call(code); ++ } else { ++ DCHECK(flag == JUMP_FUNCTION); ++ Addl(code, code, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ Jump(code); ++ } ++ // Continue here if InvokePrologue does handle the invocation due to ++ // mismatched parameter counts. ++ bind(&done); ++ } ++ ++void MacroAssembler::InvokeFunctionWithNewTarget( ++ Register function, Register new_target, Register actual_parameter_count, ++ InvokeFlag flag) { ++ // You can't call a function without a valid frame. ++ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); ++ ++ // Contract with called JS functions requires that function is passed in a1. ++ DCHECK_EQ(function, a1); ++ Register expected_parameter_count = a2; ++ Register temp_reg = t0; ++ Ldl(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ Ldl(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ++ // The argument count is stored as int32_t on 64-bit platforms. ++ // TODO(plind): Smi on 32-bit platforms. ++ Ldhu(expected_parameter_count, ++ FieldMemOperand(temp_reg, ++ SharedFunctionInfo::kFormalParameterCountOffset)); ++ ++ InvokeFunctionCode(a1, new_target, expected_parameter_count, ++ actual_parameter_count, flag); ++} ++ ++void MacroAssembler::InvokeFunction(Register function, ++ Register expected_parameter_count, ++ Register actual_parameter_count, ++ InvokeFlag flag) { ++ // You can't call a function without a valid frame. ++ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); ++ ++ // Contract with called JS functions requires that function is passed in a1. ++ DCHECK_EQ(function, a1); ++ ++ // Get the function and setup the context. ++ Ldl(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ++ ++ InvokeFunctionCode(a1, no_reg, expected_parameter_count, ++ actual_parameter_count, flag); ++} ++ ++ ++// --------------------------------------------------------------------------- ++// Support functions. ++ ++void MacroAssembler::GetObjectType(Register object, ++ Register map, ++ Register type_reg) {SCOPEMARK_NAME(MacroAssembler::GetObjectType, this); ++ LoadMap(map, object); ++ Ldhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); ++} ++ ++ ++// ----------------------------------------------------------------------------- ++// Runtime calls. ++ ++void TurboAssembler::DaddOverflow(Register dst, Register left, ++ const Operand& right, Register overflow) {SCOPEMARK_NAME(TurboAssembler::DaddOverflow, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register right_reg = no_reg; ++ Register scratch = t11; ++ if (!right.is_reg()) { ++ li(at, Operand(right)); ++ right_reg = at; ++ } else { ++ right_reg = right.rm(); ++ } ++ ++ DCHECK(left != scratch && right_reg != scratch && dst != scratch && ++ overflow != scratch); ++ DCHECK(overflow != left && overflow != right_reg); ++ ++ if (dst == left || dst == right_reg) { ++ addl(left, right_reg,scratch); ++ xor_ins(scratch, left, overflow); ++ xor_ins(scratch, right_reg, at); ++ and_ins(overflow, at, overflow); ++ mov(dst, scratch); ++ } else { ++ addl(left, right_reg,dst); ++ xor_ins(dst, left, overflow); ++ xor_ins(dst, right_reg, at); ++ and_ins(overflow, at, overflow); ++ ++ } ++} ++ ++void TurboAssembler::DsubOverflow(Register dst, Register left, ++ const Operand& right, Register overflow) {SCOPEMARK_NAME(TurboAssembler::DsubOverflow, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register right_reg = no_reg; ++ Register scratch = t11; ++ if (!right.is_reg()) { ++ li(at, Operand(right)); ++ right_reg = at; ++ } else { ++ right_reg = right.rm(); ++ } ++ ++ DCHECK(left != scratch && right_reg != scratch && dst != scratch && ++ overflow != scratch); ++ DCHECK(overflow != left && overflow != right_reg); ++ ++ if (dst == left || dst == right_reg) { ++ subl(left, right_reg,scratch); ++ xor_ins(left, scratch, overflow); ++ xor_ins(left, right_reg, at); ++ and_ins(overflow, at, overflow); ++ mov(dst, scratch); ++ } else { ++ subl(left, right_reg,dst); ++ xor_ins(left, dst, overflow); ++ xor_ins(left, right_reg, at); ++ and_ins(overflow, at, overflow); ++ } ++} ++ ++void TurboAssembler::MulOverflow(Register dst, Register left, ++ const Operand& right, Register overflow) {SCOPEMARK_NAME(TurboAssembler::MulOverflow, this); ++ Register right_reg = no_reg; ++ Register scratch = t11; ++ if (!right.is_reg()) { ++ li(at, Operand(right)); ++ right_reg = at; ++ } else { ++ right_reg = right.rm(); ++ } ++ ++ DCHECK(left != scratch && right_reg != scratch && dst != scratch && ++ overflow != scratch); ++ DCHECK(overflow != left && overflow != right_reg); ++ ++ if (dst == left || dst == right_reg) { ++ Mulw(scratch, left, right_reg); ++ Mulwh(overflow, left, right_reg); ++ mov(dst, scratch); ++ } else { ++ Mulw(dst, left, right_reg); ++ Mulwh(overflow, left, right_reg); ++ } ++ ++ sral(dst, 32 ,scratch); ++ xor_ins(overflow, scratch, overflow); ++} ++ ++void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, ++ SaveFPRegsMode save_doubles) {SCOPEMARK_NAME(MacroAssembler::CallRuntime, this); ++ // All parameters are on the stack. v0 has the return value after call. ++ ++ // If the expected number of arguments of the runtime function is ++ // constant, we check that the actual number of arguments match the ++ // expectation. ++ CHECK(f->nargs < 0 || f->nargs == num_arguments); ++ ++ // TODO(1236192): Most runtime routines don't need the number of ++ // arguments passed in because it is constant. At some point we ++ // should remove this need and make the runtime routine entry code ++ // smarter. ++ PrepareCEntryArgs(num_arguments); ++ PrepareCEntryFunction(ExternalReference::Create(f)); ++ Handle code = ++ CodeFactory::CEntry(isolate(), f->result_size, save_doubles); ++ Call(code, RelocInfo::CODE_TARGET); ++} ++ ++void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {SCOPEMARK_NAME(MacroAssembler::TailCallRuntime, this); ++ const Runtime::Function* function = Runtime::FunctionForId(fid); ++ DCHECK_EQ(1, function->result_size); ++ if (function->nargs >= 0) { ++ PrepareCEntryArgs(function->nargs); ++ } ++ JumpToExternalReference(ExternalReference::Create(fid)); ++} ++ ++void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, ++ BranchDelaySlot bd, ++ bool builtin_exit_frame) {SCOPEMARK_NAME(MacroAssembler::JumpToExternalReference, this); ++ PrepareCEntryFunction(builtin); ++ Handle code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, ++ kArgvOnStack, builtin_exit_frame); ++ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd); ++} ++ ++void MacroAssembler::JumpToInstructionStream(Address entry) {SCOPEMARK_NAME(MacroAssembler::JumpToExternalReference, this); ++ li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ Jump(kOffHeapTrampolineRegister); ++} ++ ++void MacroAssembler::LoadWeakValue(Register out, Register in, ++ Label* target_if_cleared) {SCOPEMARK_NAME(MacroAssembler::LoadWeakValue, this); ++ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32)); ++ ++ And(out, in, Operand(~kWeakHeapObjectMask)); ++} ++ ++void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, ++ Register scratch1, Register scratch2) {SCOPEMARK_NAME(MacroAssembler::IncrementCounter, this); ++ DCHECK_GT(value, 0); ++ if (FLAG_native_code_counters && counter->Enabled()) { ++ li(scratch2, ExternalReference::Create(counter)); ++ Ldw(scratch1, MemOperand(scratch2)); ++ Addw(scratch1, scratch1, Operand(value)); ++ Stw(scratch1, MemOperand(scratch2)); ++ } ++} ++ ++ ++void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, ++ Register scratch1, Register scratch2) {SCOPEMARK_NAME(MacroAssembler::DecrementCounter, this); ++ DCHECK_GT(value, 0); ++ if (FLAG_native_code_counters && counter->Enabled()) { ++ li(scratch2, ExternalReference::Create(counter)); ++ Ldw(scratch1, MemOperand(scratch2)); ++ Subw(scratch1, scratch1, Operand(value)); ++ Stw(scratch1, MemOperand(scratch2)); ++ } ++} ++ ++ ++// ----------------------------------------------------------------------------- ++// Debugging. ++ ++void TurboAssembler::Trap() { halt(); } ++void TurboAssembler::DebugBreak() { halt(); } ++ ++void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, ++ Operand rt) {SCOPEMARK_NAME(TurboAssembler::Assert, this); ++ if (emit_debug_code()) ++ Check(cc, reason, rs, rt); ++} ++ ++void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, ++ Operand rt) {SCOPEMARK_NAME(TurboAssembler::Check, this); ++ Label L; ++ Branch(&L, cc, rs, rt); ++ Abort(reason); ++ // Will not return here. ++ bind(&L); ++} ++ ++void TurboAssembler::Abort(AbortReason reason) {SCOPEMARK_NAME(TurboAssembler::Abort, this); ++ Label abort_start; ++ bind(&abort_start); ++#ifdef DEBUG ++ const char* msg = GetAbortReason(reason); ++ RecordComment("Abort message: "); ++ RecordComment(msg); ++#endif ++ ++ // Avoid emitting call to builtin if requested. ++ if (trap_on_abort()) { ++ halt();//stop(msg); ++ return; ++ } ++ ++ if (should_abort_hard()) { ++ // We don't care if we constructed a frame. Just pretend we did. ++ FrameScope assume_frame(this, StackFrame::NONE); ++ PrepareCallCFunction(0, a0); ++ li(a0, Operand(static_cast(reason))); ++ CallCFunction(ExternalReference::abort_with_reason(), 1); ++ return; ++ } ++ ++ Move(a0, Smi::FromInt(static_cast(reason))); ++ ++ // Disable stub call restrictions to always allow calls to abort. ++ if (!has_frame()) { ++ // We don't actually want to generate a pile of code for this, so just ++ // claim there is a stack frame, without generating one. ++ FrameScope scope(this, StackFrame::NONE); ++ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); ++ } else { ++ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); ++ } ++ // Will not return here. ++ if (is_trampoline_pool_blocked()) { ++ // If the calling code cares about the exact number of ++ // instructions generated, we insert padding here to keep the size ++ // of the Abort macro constant. ++ // Currently in debug mode with debug_code enabled the number of ++ // generated instructions is 10, so we use this as a maximum value. ++ static const int kExpectedAbortInstructions = 10; ++ int abort_instructions = InstructionsGeneratedSince(&abort_start); ++ DCHECK_LE(abort_instructions, kExpectedAbortInstructions); ++ while (abort_instructions++ < kExpectedAbortInstructions) { ++ nop(); ++ } ++ } ++} ++ ++void MacroAssembler::LoadMap(Register destination, Register object) { ++ Ldl(destination, FieldMemOperand(object, HeapObject::kMapOffset)); ++} ++ ++void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { ++ LoadMap(dst, cp); ++ Ldl(dst, ++ FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); ++ Ldl(dst, MemOperand(dst, Context::SlotOffset(index))); ++} ++ ++void TurboAssembler::StubPrologue(StackFrame::Type type) {SCOPEMARK_NAME(TurboAssembler::StubPrologue, this); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(StackFrame::TypeToMarker(type))); ++ PushCommonFrame(scratch); ++} ++ ++void TurboAssembler::Prologue() {SCOPEMARK_NAME(TurboAssembler::Prologue, this); ++ PushStandardFrame(a1); } ++ ++void TurboAssembler::EnterFrame(StackFrame::Type type) {SCOPEMARK_NAME(TurboAssembler::EnterFrame, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ int stack_offset = -3 * kPointerSize; ++ const int fp_offset = 1 * kPointerSize; ++ subl(sp, (-stack_offset), sp); //stack_offset is negtive ++ stack_offset = -stack_offset - kPointerSize; ++ Stl(ra, MemOperand(sp, stack_offset)); ++ stack_offset -= kPointerSize; ++ Stl(fp, MemOperand(sp, stack_offset)); ++ stack_offset -= kPointerSize; ++ li(t12, Operand(StackFrame::TypeToMarker(type))); ++ Stl(t12, MemOperand(sp, stack_offset)); ++ // Adjust FP to point to saved FP. ++ DCHECK_EQ(stack_offset, 0); ++ Addl(fp, sp, Operand(fp_offset)); ++} ++ ++void TurboAssembler::LeaveFrame(StackFrame::Type type) {SCOPEMARK_NAME(TurboAssembler::LeaveFrame, this); ++ addl(fp, 2 * kPointerSize, sp); ++ Ldl(ra, MemOperand(fp, 1 * kPointerSize)); ++ Ldl(fp, MemOperand(fp, 0 * kPointerSize)); ++} ++ ++void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, ++ StackFrame::Type frame_type) {SCOPEMARK_NAME(MacroAssembler::EnterExitFrame, this); ++ DCHECK(frame_type == StackFrame::EXIT || ++ frame_type == StackFrame::BUILTIN_EXIT); ++ ++ // Set up the frame structure on the stack. ++ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); ++ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); ++ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); ++ ++ // This is how the stack will look: ++ // fp + 2 (==kCallerSPDisplacement) - old stack's end ++ // [fp + 1 (==kCallerPCOffset)] - saved old ra ++ // [fp + 0 (==kCallerFPOffset)] - saved old fp ++ // [fp - 1 StackFrame::EXIT Smi ++ // [fp - 2 (==kSPOffset)] - sp of the called function ++ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the ++ // new stack (will contain saved ra) ++ ++ // Save registers and reserve room for saved entry sp and code object. ++ subl(sp, 2 * kPointerSize + ExitFrameConstants::kFixedFrameSizeFromFp, sp); ++ Stl(ra, MemOperand(sp, 3 * kPointerSize)); ++ Stl(fp, MemOperand(sp, 2 * kPointerSize)); ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(StackFrame::TypeToMarker(frame_type))); ++ Stl(scratch, MemOperand(sp, 1 * kPointerSize)); ++ } ++ // Set up new frame pointer. ++ addl(sp, ExitFrameConstants::kFixedFrameSizeFromFp, fp); ++ ++ if (emit_debug_code()) { ++ Stl(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); ++ } ++ ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Save the frame pointer and the context in top. ++ li(t11, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, ++ isolate())); ++ Stl(fp, MemOperand(t11)); ++ li(t11, ++ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ++ Stl(cp, MemOperand(t11)); ++ } ++ ++ const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); ++ if (save_doubles) { ++ // The stack is already aligned to 0 modulo 8 for stores with sdc1. ++ int kNumOfSavedRegisters = FPURegister::kNumRegisters; ++ int space = kNumOfSavedRegisters * kDoubleSize; ++ Subl(sp, sp, Operand(space)); ++ // Remember: we only need to save every 2nd double FPU value. ++ for (int i = 0; i < kNumOfSavedRegisters; i++) { ++ FPURegister reg = FPURegister::from_code(i); ++ Fstd(reg, MemOperand(sp, i * kDoubleSize)); ++ } ++ } ++ ++ // Reserve place for the return address, stack space and an optional slot ++ // (used by DirectCEntry to hold the return value if a struct is ++ // returned) and align the frame preparing for calling the runtime function. ++ DCHECK_GE(stack_space, 0); ++ Subl(sp, sp, Operand((stack_space + 2) * kPointerSize)); ++ if (frame_alignment > 0) { ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ And(sp, sp, Operand(-frame_alignment)); // Align stack. ++ } ++ ++ // Set the exit frame sp value to point just before the return address ++ // location. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ addl(sp, kPointerSize, scratch); ++ Stl(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); ++} ++ ++void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, ++ bool do_return, ++ bool argument_count_is_length) {SCOPEMARK_NAME(MacroAssembler::LeaveExitFrame, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Optionally restore all double registers. ++ if (save_doubles) { ++ // Remember: we only need to restore every 2nd double FPU value. ++ int kNumOfSavedRegisters = FPURegister::kNumRegisters; ++ Subl(t11, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp + ++ kNumOfSavedRegisters * kDoubleSize)); ++ for (int i = 0; i < kNumOfSavedRegisters; i++) { ++ FPURegister reg = FPURegister::from_code(i); ++ Fldd(reg, MemOperand(t11, i * kDoubleSize)); ++ } ++ } ++ ++ // Clear top frame. ++ li(t11, ++ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); ++ Stl(zero_reg, MemOperand(t11)); ++ ++ // Restore current context from top and clear it in debug mode. ++ li(t11, ++ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ++ Ldl(cp, MemOperand(t11)); ++ ++#ifdef DEBUG ++ li(t11, ++ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ++ Stl(a3, MemOperand(t11)); ++#endif ++ ++ // Pop the arguments, restore registers, and return. ++ mov(sp, fp); // Respect ABI stack constraint. ++ Ldl(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); ++ Ldl(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); ++ ++ if (argument_count.is_valid()) { ++ if (argument_count_is_length) { ++ addl(sp, argument_count,sp); ++ } else { ++ s8addl(argument_count, sp, sp); DCHECK_EQ(kPointerSizeLog2, 3); ++ } ++ } ++ ++ addl(sp, 2 * kPointerSize, sp); ++ if (do_return) { ++ // If returning, the instruction in the delay slot will be the addw below. ++ Ret(); ++ } ++} ++ ++int TurboAssembler::ActivationFrameAlignment() { ++#if V8_HOST_ARCH_SW64 ++ // Running on the real platform. Use the alignment as mandated by the local ++ // environment. ++ // Note: This will break if we ever start generating snapshots on one Sw64 ++ // platform for another Sw64 platform with a different alignment. ++ return base::OS::ActivationFrameAlignment(); ++#endif // V8_HOST_ARCH_SW64 ++} ++ ++ ++void MacroAssembler::AssertStackIsAligned() {SCOPEMARK_NAME(MacroAssembler::AssertStackIsAligned, this); ++ if (emit_debug_code()) { ++ const int frame_alignment = ActivationFrameAlignment(); ++ const int frame_alignment_mask = frame_alignment - 1; ++ ++ if (frame_alignment > kPointerSize) { ++ Label alignment_as_expected; ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ and_ins(sp, frame_alignment_mask,scratch); ++ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); ++ } ++ // Don't use Check here, as it will call Runtime_Abort re-entering here. ++ halt();//stop("Unexpected stack alignment"); ++ bind(&alignment_as_expected); ++ } ++ } ++} ++ ++void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {SCOPEMARK_NAME(TurboAssembler::SmiUntag, this); ++ if (SmiValuesAre32Bits()) { ++ Ldw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset()))); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ Ldw(dst, src); ++ SmiUntag(dst); ++ } ++} ++ ++void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, ++ Register scratch, BranchDelaySlot bd) {SCOPEMARK_NAME(TurboAssembler::JumpIfSmi, this); ++ DCHECK_EQ(0, kSmiTag); ++ and_ins(value, kSmiTagMask,scratch); ++ Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); ++} ++ ++void MacroAssembler::JumpIfNotSmi(Register value, ++ Label* not_smi_label, ++ Register scratch, ++ BranchDelaySlot bd) {SCOPEMARK_NAME(MacroAssembler::JumpIfNotSmi, this); ++ DCHECK_EQ(0, kSmiTag); ++ and_ins(value, kSmiTagMask,scratch); ++ Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); ++} ++ ++void MacroAssembler::AssertNotSmi(Register object) {SCOPEMARK_NAME(MacroAssembler::AssertNotSmi, this); ++ if (emit_debug_code()) { ++ STATIC_ASSERT(kSmiTag == 0); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ and_ins(object, kSmiTagMask,scratch); ++ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); ++ } ++} ++ ++ ++void MacroAssembler::AssertSmi(Register object) {SCOPEMARK_NAME(MacroAssembler::AssertSmi, this); ++ if (emit_debug_code()) { ++ STATIC_ASSERT(kSmiTag == 0); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ and_ins(object, kSmiTagMask,scratch); ++ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); ++ } ++} ++ ++void MacroAssembler::AssertConstructor(Register object) {SCOPEMARK_NAME(MacroAssembler::AssertConstructor, this); ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t11); ++ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t11, ++ Operand(zero_reg)); ++ LoadMap(t11, object); ++ Ldbu(t11, FieldMemOperand(t11, Map::kBitFieldOffset)); ++ And(t11, t11, Operand(Map::Bits1::IsConstructorBit::kMask)); ++ Check(ne, AbortReason::kOperandIsNotAConstructor, t11, Operand(zero_reg)); ++ } ++} ++ ++void MacroAssembler::AssertFunction(Register object) {SCOPEMARK_NAME(MacroAssembler::AssertFunction, this); ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t11); ++ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t11, ++ Operand(zero_reg)); ++ GetObjectType(object, t11, t11); ++ Check(eq, AbortReason::kOperandIsNotAFunction, t11, ++ Operand(JS_FUNCTION_TYPE)); ++ } ++} ++ ++ ++void MacroAssembler::AssertBoundFunction(Register object) {SCOPEMARK_NAME(MacroAssembler::AssertBoundFunction, this); ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t11); ++ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t11, ++ Operand(zero_reg)); ++ GetObjectType(object, t11, t11); ++ Check(eq, AbortReason::kOperandIsNotABoundFunction, t11, ++ Operand(JS_BOUND_FUNCTION_TYPE)); ++ } ++} ++ ++void MacroAssembler::AssertGeneratorObject(Register object) {SCOPEMARK_NAME(MacroAssembler::AssertGeneratorObject, this); ++ if (!emit_debug_code()) return; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t11); ++ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t11, ++ Operand(zero_reg)); ++ ++ GetObjectType(object, t11, t11); ++ ++ Label done; ++ ++ // Check if JSGeneratorObject ++ Branch(&done, eq, t11, Operand(JS_GENERATOR_OBJECT_TYPE)); ++ ++ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType) ++ Branch(&done, eq, t11, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE)); ++ ++ // Check if JSAsyncGeneratorObject ++ Branch(&done, eq, t11, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE)); ++ ++ Abort(AbortReason::kOperandIsNotAGeneratorObject); ++ ++ bind(&done); ++} ++ ++void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, ++ Register scratch) {SCOPEMARK_NAME(MacroAssembler::AssertUndefinedOrAllocationSite, this); ++ if (emit_debug_code()) { ++ Label done_checking; ++ AssertNotSmi(object); ++ LoadRoot(scratch, RootIndex::kUndefinedValue); ++ Branch(&done_checking, eq, object, Operand(scratch)); ++ GetObjectType(object, scratch, scratch); ++ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch, ++ Operand(ALLOCATION_SITE_TYPE)); ++ bind(&done_checking); ++ } ++} ++ ++ ++void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) {SCOPEMARK_NAME(TurboAssembler::Float32Max, this); ++ if (src1 == src2) { ++ Move_s(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF32(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ if (kArchVariant >= kSw64r3) { ++ UNREACHABLE(); ++ } else { ++ Label return_left, return_right, done; ++ ++ CompareF32(OLT, src1, src2); ++ BranchTrueShortF(&return_right); ++ CompareF32(OLT, src2, src1); ++ BranchTrueShortF(&return_left); ++ ++ // Operands are equal, but check for +/-0. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ fimovs(src1,t11); ++ slll(t11, 32, t11); ++ Branch(&return_left, eq, t11, Operand(zero_reg)); ++ Branch(&return_right); ++ } ++ ++ bind(&return_right); ++ if (src2 != dst) { ++ Move_s(dst, src2); ++ } ++ Branch(&done); ++ ++ bind(&return_left); ++ if (src1 != dst) { ++ Move_s(dst, src1); ++ } ++ ++ bind(&done); ++ } ++} ++ ++void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) {SCOPEMARK_NAME(TurboAssembler::Float32MaxOutOfLine, this); ++ fadds(src1, src2, dst); ++} ++ ++void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) {SCOPEMARK_NAME(TurboAssembler::Float32Min, this); ++ if (src1 == src2) { ++ Move_s(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF32(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ if (kArchVariant >= kSw64r3) { ++ UNREACHABLE(); ++ } else { ++ Label return_left, return_right, done; ++ ++ CompareF32(OLT, src1, src2); ++ BranchTrueShortF(&return_left); ++ CompareF32(OLT, src2, src1); ++ BranchTrueShortF(&return_right); ++ ++ // Left equals right => check for -0. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ fimovs(src1,t11); ++ slll(t11, 32, t11); ++ Branch(&return_right, eq, t11, Operand(zero_reg)); ++ Branch(&return_left); ++ } ++ ++ bind(&return_right); ++ if (src2 != dst) { ++ Move_s(dst, src2); ++ } ++ Branch(&done); ++ ++ bind(&return_left); ++ if (src1 != dst) { ++ Move_s(dst, src1); ++ } ++ ++ bind(&done); ++ } ++} ++ ++void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) {SCOPEMARK_NAME(TurboAssembler::Float32MinOutOfLine, this); ++ fadds(src1, src2, dst); ++} ++ ++void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) {SCOPEMARK_NAME(TurboAssembler::Float64Max, this); ++ if (src1 == src2) { ++ Move_d(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF64(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ if (kArchVariant >= kSw64r3) { ++ UNREACHABLE(); ++ } else { ++ Label return_left, return_right, done; ++ ++ CompareF64(OLT, src1, src2); ++ BranchTrueShortF(&return_right); ++ CompareF64(OLT, src2, src1); ++ BranchTrueShortF(&return_left); ++ ++ // Left equals right => check for -0. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ fimovd(src1,t11); ++ Branch(&return_left, eq, t11, Operand(zero_reg)); ++ Branch(&return_right); ++ } ++ ++ bind(&return_right); ++ if (src2 != dst) { ++ Move_d(dst, src2); ++ } ++ Branch(&done); ++ ++ bind(&return_left); ++ if (src1 != dst) { ++ Move_d(dst, src1); ++ } ++ ++ bind(&done); ++ } ++} ++ ++void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) {SCOPEMARK_NAME(TurboAssembler::Float64MaxOutOfLine, this); ++ faddd(src1, src2, dst); ++} ++ ++void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) {SCOPEMARK_NAME(TurboAssembler::Float64Min, this); ++ if (src1 == src2) { ++ Move_d(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF64(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ if (kArchVariant >= kSw64r3) { ++ UNREACHABLE(); ++ } else { ++ Label return_left, return_right, done; ++ ++ CompareF64(OLT, src1, src2); ++ BranchTrueShortF(&return_left); ++ CompareF64(OLT, src2, src1); ++ BranchTrueShortF(&return_right); ++ ++ // Left equals right => check for -0. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ fimovd(src1,t11); ++ Branch(&return_right, eq, t11, Operand(zero_reg)); ++ Branch(&return_left); ++ } ++ ++ bind(&return_right); ++ if (src2 != dst) { ++ Move_d(dst, src2); ++ } ++ Branch(&done); ++ ++ bind(&return_left); ++ if (src1 != dst) { ++ Move_d(dst, src1); ++ } ++ ++ bind(&done); ++ } ++} ++ ++void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) {SCOPEMARK_NAME(TurboAssembler::Float64MinOutOfLine, this); ++ faddd(src1, src2, dst); ++} ++ ++static const int kRegisterPassedArguments = 6; ++ ++int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, ++ int num_double_arguments) { ++ int stack_passed_words = 0; ++ num_reg_arguments += 2 * num_double_arguments; ++ ++ // O32: Up to four simple arguments are passed in registers a0..a3. ++ // N64: Up to eight simple arguments are passed in registers a0..a5. ++ if (num_reg_arguments > kRegisterPassedArguments) { ++ stack_passed_words += num_reg_arguments - kRegisterPassedArguments; ++ } ++ stack_passed_words += kCArgSlotCount; ++ return stack_passed_words; ++} ++ ++void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, ++ int num_double_arguments, ++ Register scratch) {SCOPEMARK_NAME(TurboAssembler::PrepareCallCFunction, this); ++ int frame_alignment = ActivationFrameAlignment(); ++ ++ // n64: Up to eight simple arguments in a0..a3, a4..a5, No argument slots. ++ // O32: Up to four simple arguments are passed in registers a0..a3. ++ // Those four arguments must have reserved argument slots on the stack for ++ // sw64, even though those argument slots are not normally used. ++ // Both ABIs: Remaining arguments are pushed on the stack, above (higher ++ // address than) the (O32) argument slots. (arg slot calculation handled by ++ // CalculateStackPassedWords()). ++ int stack_passed_arguments = CalculateStackPassedWords( ++ num_reg_arguments, num_double_arguments); ++ if (frame_alignment > kPointerSize) { ++ // Make stack end at alignment and make room for num_arguments - 4 words ++ // and the original value of sp. ++ mov(scratch, sp); ++ Subl(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ And(sp, sp, Operand(-frame_alignment)); ++ Stl(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); ++ } else { ++ Subl(sp, sp, Operand(stack_passed_arguments * kPointerSize)); ++ } ++} ++ ++void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, ++ Register scratch) {SCOPEMARK_NAME(TurboAssembler::PrepareCallCFunction, this); ++ PrepareCallCFunction(num_reg_arguments, 0, scratch); ++} ++ ++void TurboAssembler::CallCFunction(ExternalReference function, ++ int num_reg_arguments, ++ int num_double_arguments) {SCOPEMARK_NAME(TurboAssembler::CallCFunction, this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ li(t12, function); ++ CallCFunctionHelper(t12, num_reg_arguments, num_double_arguments); ++} ++ ++void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, ++ int num_double_arguments) {SCOPEMARK_NAME(TurboAssembler::CallCFunction, this); ++ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); ++} ++ ++void TurboAssembler::CallCFunction(ExternalReference function, ++ int num_arguments) {SCOPEMARK_NAME(TurboAssembler::CallCFunction, this); ++ CallCFunction(function, num_arguments, 0); ++} ++ ++void TurboAssembler::CallCFunction(Register function, int num_arguments) {SCOPEMARK_NAME(TurboAssembler::CallCFunction, this); ++ CallCFunction(function, num_arguments, 0); ++} ++ ++void TurboAssembler::CallCFunctionHelper(Register function, ++ int num_reg_arguments, ++ int num_double_arguments) {SCOPEMARK_NAME(TurboAssembler::CallCFunctionHelper, this); ++ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); ++ DCHECK(has_frame()); ++ // Make sure that the stack is aligned before calling a C function unless ++ // running in the simulator. The simulator has its own alignment check which ++ // provides more information. ++ // The argument stots are presumed to have been set up by ++ // PrepareCallCFunction. The C function must be called via t12, for sw64 ABI. ++ ++#if V8_HOST_ARCH_SW64 ++ if (emit_debug_code()) { ++ int frame_alignment = ActivationFrameAlignment(); ++ int frame_alignment_mask = frame_alignment - 1; ++ if (frame_alignment > kPointerSize) { ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ Label alignment_as_expected; ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ And(scratch, sp, Operand(frame_alignment_mask)); ++ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); ++ } ++ // Don't use Check here, as it will call Runtime_Abort possibly ++ // re-entering here. ++ halt();//stop("Unexpected alignment in CallCFunction"); ++ bind(&alignment_as_expected); ++ } ++ } ++#endif // V8_HOST_ARCH_SW64 ++ ++ // Just call directly. The function called cannot cause a GC, or ++ // allow preemption, so the return address in the link register ++ // stays correct. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (function != t12) { ++ mov(t12, function); ++ function = t12; ++ } ++ ++ // Save the frame pointer and PC so that the stack layout remains iterable, ++ // even without an ExitFrame which normally exists between JS and C frames. ++ // 't' registers are caller-saved so this is safe as a scratch register. ++ Register pc_scratch = t1; ++ Register scratch = t2; ++ DCHECK(!AreAliased(pc_scratch, scratch, function)); ++ ++#ifdef SW64 //ZHJ20210817 ++ br(pc_scratch, 0); ++#else ++ mov(scratch, ra); ++ mov(pc_scratch, ra); ++ mov(ra, scratch); ++#endif ++ ++ // See x64 code for reasoning about how to address the isolate data fields. ++ if (root_array_available()) { ++ Stl(pc_scratch, MemOperand(kRootRegister, ++ IsolateData::fast_c_call_caller_pc_offset())); ++ Stl(fp, MemOperand(kRootRegister, ++ IsolateData::fast_c_call_caller_fp_offset())); ++ } else { ++ DCHECK_NOT_NULL(isolate()); ++ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); ++ Stl(pc_scratch, MemOperand(scratch)); ++ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); ++ Stl(fp, MemOperand(scratch)); ++ } ++ ++ Call(function); ++ ++ // We don't unset the PC; the FP is the source of truth. ++ if (root_array_available()) { ++ Stl(zero_reg, MemOperand(kRootRegister, ++ IsolateData::fast_c_call_caller_fp_offset())); ++ } else { ++ DCHECK_NOT_NULL(isolate()); ++ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); ++ Stl(zero_reg, MemOperand(scratch)); ++ } ++ } ++ ++ int stack_passed_arguments = CalculateStackPassedWords( ++ num_reg_arguments, num_double_arguments); ++ ++ if (ActivationFrameAlignment() > kPointerSize) { ++ Ldl(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); ++ } else { ++ Addl(sp, sp, Operand(stack_passed_arguments * kPointerSize)); ++ } ++} ++ ++ ++#undef BRANCH_ARGS_CHECK ++ ++void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, ++ Condition cc, Label* condition_met) {SCOPEMARK_NAME(TurboAssembler::CheckPageFlag, this); ++And(scratch, object, Operand(~kPageAlignmentMask)); ++ Ldl(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); ++ And(scratch, scratch, Operand(mask)); ++ Branch(condition_met, cc, scratch, Operand(zero_reg)); ++} ++ ++ ++Register GetRegisterThatIsNotOneOf(Register reg1, ++ Register reg2, ++ Register reg3, ++ Register reg4, ++ Register reg5, ++ Register reg6) { ++ RegList regs = 0; ++ if (reg1.is_valid()) regs |= reg1.bit(); ++ if (reg2.is_valid()) regs |= reg2.bit(); ++ if (reg3.is_valid()) regs |= reg3.bit(); ++ if (reg4.is_valid()) regs |= reg4.bit(); ++ if (reg5.is_valid()) regs |= reg5.bit(); ++ if (reg6.is_valid()) regs |= reg6.bit(); ++ ++ const RegisterConfiguration* config = RegisterConfiguration::Default(); ++ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { ++ int code = config->GetAllocatableGeneralCode(i); ++ Register candidate = Register::from_code(code); ++ if (regs & candidate.bit()) continue; ++ return candidate; ++ } ++ UNREACHABLE(); ++} ++ ++void TurboAssembler::ComputeCodeStartAddress(Register dst) {SCOPEMARK_NAME(TurboAssembler::ComputeCodeStartAddress, this); ++ // This push on ra and the pop below together ensure that we restore the ++ // register ra, which is needed while computing the code start address. ++ push(ra); ++ ++ // The bal instruction puts the address of the current instruction into ++ // the return address (ra) register, which we can use later on. ++ br(ra, 0); ++ int pc = pc_offset(); ++ li(dst, Operand(pc)); ++ Subl(dst, ra, dst); ++ ++ pop(ra); // Restore ra ++} ++ ++void TurboAssembler::ResetSpeculationPoisonRegister() {SCOPEMARK_NAME(TurboAssembler::ResetSpeculationPoisonRegister, this); ++ li(kSpeculationPoisonRegister, -1); ++} ++ ++void TurboAssembler::CallForDeoptimization(Address target, int deopt_id, ++ Label* exit, DeoptimizeKind kind) { ++ USE(exit, kind); ++ NoRootArrayScope no_root_array(this); ++ ++ // Save the deopt id in kRootRegister (we don't need the roots array from now ++ // on). ++ DCHECK_LE(deopt_id, 0xFFFF); ++ li(kRootRegister, deopt_id); ++ Call(target, RelocInfo::RUNTIME_ENTRY); ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.h b/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.h +new file mode 100755 +index 000000000..1abfdf29b +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.h +@@ -0,0 +1,1321 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++// ++#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H ++#error This header must be included via macro-assembler.h ++#endif ++ ++#ifndef V8_CODEGEN_SW64_MACRO_ASSEMBLER_SW64_H_ ++#define V8_CODEGEN_SW64_MACRO_ASSEMBLER_SW64_H_ ++ ++#ifdef PRODUCT ++#define SCOPEMARK /* nothing */ ++#define SCOPEMARK_NAME(name, tasm) /* nothing */ ++#else ++#define SCOPEMARK \ ++char line[200]; sprintf(line,"%s:%d",__FILE__, __LINE__);\ ++ScopeMark scopeMark(_tasm, line); ++ ++#define SCOPEMARK_NAME(name, tasm) \ ++char line[200]; sprintf(line,"%s:%d",__FILE__, __LINE__);\ ++ScopeMark scopeMark(tasm, line, #name); ++ ++#endif ++ ++#include "src/codegen/assembler.h" ++#include "src/codegen/sw64/assembler-sw64.h" ++#include "src/common/globals.h" ++ ++namespace v8 { ++namespace internal { ++ ++// Forward declarations. ++enum class AbortReason : uint8_t; ++ ++// Reserved Register Usage Summary. ++// ++// Registers t11, t12, and at are reserved for use by the MacroAssembler. ++// ++// The programmer should know that the MacroAssembler may clobber these three, ++// but won't touch other registers except in special cases. ++// ++// Per the SW64 ABI, register t12 must be used for indirect function call ++// via 'jalr t12' or 'jr t12' instructions. This is relied upon by gcc when ++// trying to update gp register for position-independent-code. Whenever ++// SW64 generated code calls C code, it must be via t12 register. ++ ++ ++// Flags used for LeaveExitFrame function. ++enum LeaveExitFrameMode { ++ EMIT_RETURN = true, ++ NO_EMIT_RETURN = false ++}; ++ ++// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. ++enum BranchDelaySlot { ++ USE_DELAY_SLOT, ++ PROTECT ++}; ++ ++// Flags used for the li macro-assembler function. ++enum LiFlags { ++ // If the constant value can be represented in just 16 bits, then ++ // optimize the li to use a single instruction, rather than lui/ori/dsll ++ // sequence. A number of other optimizations that emits less than ++ // maximum number of instructions exists. ++ OPTIMIZE_SIZE = 0, ++ // Always use 6 instructions (lui/ori/dsll sequence) for release 2 or 4 ++ // instructions for release 6 (lui/ori/dahi/dati), even if the constant ++ // could be loaded with just one, so that this value is patchable later. ++ CONSTANT_SIZE = 1, ++ // For address loads only 4 instruction are required. Used to mark ++ // constant load that will be used as address without relocation ++ // information. It ensures predictable code size, so specific sites ++ // in code are patchable. ++ ADDRESS_LOAD = 2 ++}; ++ ++enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; ++enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; ++enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; ++ ++Register GetRegisterThatIsNotOneOf(Register reg1, ++ Register reg2 = no_reg, ++ Register reg3 = no_reg, ++ Register reg4 = no_reg, ++ Register reg5 = no_reg, ++ Register reg6 = no_reg); ++ ++// ----------------------------------------------------------------------------- ++// Static helper functions. ++ ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++#define SmiWordOffset(offset) (offset + kPointerSize / 2) ++#else ++#define SmiWordOffset(offset) offset ++#endif ++ ++// Generate a MemOperand for loading a field from an object. ++inline MemOperand FieldMemOperand(Register object, int offset) { ++ return MemOperand(object, offset - kHeapObjectTag); ++} ++ ++// Generate a MemOperand for storing arguments 7..N on the stack ++// when calling CallCFunction(). ++// TODO(plind): Currently ONLY used for O32. Should be fixed for ++// n64, and used in RegExp code, and other places ++// with more than 8 arguments. ++inline MemOperand CFunctionArgumentOperand(int index) { ++ DCHECK_GT(index, kCArgSlotCount); ++ // Argument 5 takes the slot just past the four Arg-slots. ++ int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; ++ return MemOperand(sp, offset); ++} ++ ++class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ++ public: ++ using TurboAssemblerBase::TurboAssemblerBase; ++ ++ // Activation support. ++ void EnterFrame(StackFrame::Type type); ++ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { ++ // Out-of-line constant pool not implemented on sw64. ++ UNREACHABLE(); ++ } ++ void LeaveFrame(StackFrame::Type type); ++ ++ // Generates function and stub prologue code. ++ void StubPrologue(StackFrame::Type type); ++ void Prologue(); ++ ++ void InitializeRootRegister() { ++ ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); ++ li(kRootRegister, Operand(isolate_root)); ++ } ++ ++ // Jump unconditionally to given label. ++ // We NEED a nop in the branch delay slot, as it used by v8, for example in ++ // CodeGenerator::ProcessDeferred(). ++ // Currently the branch delay slot is filled by the MacroAssembler. ++ // Use rather b(Label) for code generation. ++ void jmp(Label* L) { Branch(L); } ++ ++ // ------------------------------------------------------------------------- ++ // Debugging. ++ ++ void Trap() override; ++ void DebugBreak() override; ++ ++ // Calls Abort(msg) if the condition cc is not satisfied. ++ // Use --debug_code to enable. ++ void Assert(Condition cc, AbortReason reason, Register rs, Operand rt); ++ ++ // Like Assert(), but always enabled. ++ void Check(Condition cc, AbortReason reason, Register rs, Operand rt); ++ ++ // Print a message to stdout and abort execution. ++ void Abort(AbortReason msg); ++ ++ // Arguments macros. ++#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 ++#define COND_ARGS cond, r1, r2 ++ ++ // Cases when relocation is not needed. ++#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \ ++ void Name(target_type target, BranchDelaySlot bd = PROTECT); \ ++ inline void Name(BranchDelaySlot bd, target_type target) { \ ++ Name(target, bd); \ ++ } \ ++ void Name(target_type target, \ ++ COND_TYPED_ARGS, \ ++ BranchDelaySlot bd = PROTECT); \ ++ inline void Name(BranchDelaySlot bd, \ ++ target_type target, \ ++ COND_TYPED_ARGS) { \ ++ Name(target, COND_ARGS, bd); \ ++ } ++ ++#define DECLARE_BRANCH_PROTOTYPES(Name) \ ++ DECLARE_NORELOC_PROTOTYPE(Name, Label*) \ ++ DECLARE_NORELOC_PROTOTYPE(Name, int32_t) ++ ++ DECLARE_BRANCH_PROTOTYPES(Branch) ++ DECLARE_BRANCH_PROTOTYPES(BranchAndLink) ++ DECLARE_BRANCH_PROTOTYPES(BranchShort) ++ ++#undef DECLARE_BRANCH_PROTOTYPES ++#undef COND_TYPED_ARGS ++#undef COND_ARGS ++ ++ // Floating point branches ++ void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { ++ CompareF(S, cc, cmp1, cmp2); ++ } ++ ++ void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) { ++ CompareIsNanF(S, cmp1, cmp2); ++ } ++ ++ void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { ++ CompareF(D, cc, cmp1, cmp2); ++ } ++ ++ void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) { ++ CompareIsNanF(D, cmp1, cmp2); ++ } ++ ++ void BranchTrueShortF(Label* target, BranchDelaySlot bd = PROTECT); ++ void BranchFalseShortF(Label* target, BranchDelaySlot bd = PROTECT); ++ ++ void BranchTrueF(Label* target, BranchDelaySlot bd = PROTECT); ++ void BranchFalseF(Label* target, BranchDelaySlot bd = PROTECT); ++ ++ // MSA branches ++ void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond, ++ MSARegister wt, BranchDelaySlot bd = PROTECT); ++ ++ void Branch(Label* L, Condition cond, Register rs, RootIndex index, ++ BranchDelaySlot bdslot = PROTECT); ++ ++ static int InstrCountForLi64Bit(int64_t value); ++ inline void LiLower32BitHelper(Register rd, Operand j); ++ void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); ++ // Load int32 in the rd register. ++ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); ++ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) { ++ li(rd, Operand(j), mode); ++ } ++ void li(Register dst, Handle value, LiFlags mode = OPTIMIZE_SIZE); ++ void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE); ++ void li(Register dst, const StringConstantBase* string, ++ LiFlags mode = OPTIMIZE_SIZE); ++ ++ void LoadFromConstantsTable(Register destination, ++ int constant_index) override; ++ void LoadRootRegisterOffset(Register destination, intptr_t offset) override; ++ void LoadRootRelative(Register destination, int32_t offset) override; ++ ++// Jump, Call, and Ret pseudo instructions implementing inter-working. ++#define COND_ARGS Condition cond = al, Register rs = zero_reg, \ ++ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT ++ ++ void Jump(Register target, COND_ARGS); ++ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); ++ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); ++ // Deffer from li, this method save target to the memory, and then load ++ // it to register use ld, it can be used in wasm jump table for concurrent ++ // patching. ++ void PatchAndJump(Address target); ++ void Jump(Handle code, RelocInfo::Mode rmode, COND_ARGS); ++ void Jump(const ExternalReference& reference) override; ++ void Call(Register target, COND_ARGS); ++ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); ++ void Call(Handle code, ++ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, ++ COND_ARGS); ++ void Call(Label* target); ++ void LoadAddress(Register dst, Label* target); ++ ++ // Load the builtin given by the Smi in |builtin_index| into the same ++ // register. ++ void LoadEntryFromBuiltinIndex(Register builtin_index); ++ void CallBuiltinByIndex(Register builtin_index) override; ++ ++ void LoadCodeObjectEntry(Register destination, ++ Register code_object) override { ++ // TODO(sw64): Implement. ++ UNIMPLEMENTED(); ++ } ++ void CallCodeObject(Register code_object) override { ++ // TODO(sw64): Implement. ++ UNIMPLEMENTED(); ++ } ++ void JumpCodeObject(Register code_object) override { ++ // TODO(sw64): Implement. ++ UNIMPLEMENTED(); ++ } ++ ++ // Generates an instruction sequence s.t. the return address points to the ++ // instruction following the call. ++ // The return address on the stack is used by frame iteration. ++ void StoreReturnAddressAndCall(Register target); ++ ++ void CallForDeoptimization(Address target, int deopt_id, Label* exit, ++ DeoptimizeKind kind); ++ ++ void Ret(COND_ARGS); ++ inline void Ret(BranchDelaySlot bd, Condition cond = al, ++ Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) { ++ Ret(cond, rs, rt, bd); ++ } ++ ++ // Emit code to discard a non-negative number of pointer-sized elements ++ // from the stack, clobbering only the sp register. ++ void Drop(int count, ++ Condition cond = cc_always, ++ Register reg = no_reg, ++ const Operand& op = Operand(no_reg)); ++ ++ // Trivial case of DropAndRet that utilizes the delay slot and only emits ++ // 2 instructions. ++ void DropAndRet(int drop); ++ ++ void DropAndRet(int drop, ++ Condition cond, ++ Register reg, ++ const Operand& op); ++ ++ void Ldl(Register rd, const MemOperand& rs); ++ void Stl(Register rd, const MemOperand& rs); ++ ++ void push(Register src) { ++ Addl(sp, sp, Operand(-kPointerSize)); ++ Stl(src, MemOperand(sp, 0)); ++ } ++ void Push(Register src) { push(src); } ++ void Push(Handle handle); ++ void Push(Smi smi); ++ ++ // Push two registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2) { ++ Subl(sp, sp, Operand(2 * kPointerSize)); ++ Stl(src1, MemOperand(sp, 1 * kPointerSize)); ++ Stl(src2, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ // Push three registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2, Register src3) { ++ Subl(sp, sp, Operand(3 * kPointerSize)); ++ Stl(src1, MemOperand(sp, 2 * kPointerSize)); ++ Stl(src2, MemOperand(sp, 1 * kPointerSize)); ++ Stl(src3, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ // Push four registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2, Register src3, Register src4) { ++ Subl(sp, sp, Operand(4 * kPointerSize)); ++ Stl(src1, MemOperand(sp, 3 * kPointerSize)); ++ Stl(src2, MemOperand(sp, 2 * kPointerSize)); ++ Stl(src3, MemOperand(sp, 1 * kPointerSize)); ++ Stl(src4, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ // Push five registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2, Register src3, Register src4, ++ Register src5) { ++ Subl(sp, sp, Operand(5 * kPointerSize)); ++ Stl(src1, MemOperand(sp, 4 * kPointerSize)); ++ Stl(src2, MemOperand(sp, 3 * kPointerSize)); ++ Stl(src3, MemOperand(sp, 2 * kPointerSize)); ++ Stl(src4, MemOperand(sp, 1 * kPointerSize)); ++ Stl(src5, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ void Push(Register src, Condition cond, Register tst1, Register tst2) { ++ // Since we don't have conditional execution we use a Branch. ++ Branch(3, cond, tst1, Operand(tst2)); ++ Subl(sp, sp, Operand(kPointerSize)); ++ Stl(src, MemOperand(sp, 0)); ++ } ++ ++ enum PushArrayOrder { kNormal, kReverse }; ++ void PushArray(Register array, Register size, Register scratch, ++ Register scratch2, PushArrayOrder order = kNormal); ++ ++ void SaveRegisters(RegList registers); ++ void RestoreRegisters(RegList registers); ++ ++ void CallRecordWriteStub(Register object, Register address, ++ RememberedSetAction remembered_set_action, ++ SaveFPRegsMode fp_mode); ++ void CallRecordWriteStub(Register object, Register address, ++ RememberedSetAction remembered_set_action, ++ SaveFPRegsMode fp_mode, Address wasm_target); ++ void CallEphemeronKeyBarrier(Register object, Register address, ++ SaveFPRegsMode fp_mode); ++ ++ // Push multiple registers on the stack. ++ // Registers are saved in numerical order, with higher numbered registers ++ // saved in higher memory addresses. ++ void MultiPush(RegList regs); ++ void MultiPushFPU(RegList regs); ++ ++ // Calculate how much stack space (in bytes) are required to store caller ++ // registers excluding those specified in the arguments. ++ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, ++ Register exclusion1 = no_reg, ++ Register exclusion2 = no_reg, ++ Register exclusion3 = no_reg) const; ++ ++ // Push caller saved registers on the stack, and return the number of bytes ++ // stack pointer is adjusted. ++ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, ++ Register exclusion2 = no_reg, ++ Register exclusion3 = no_reg); ++ // Restore caller saved registers from the stack, and return the number of ++ // bytes stack pointer is adjusted. ++ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, ++ Register exclusion2 = no_reg, ++ Register exclusion3 = no_reg); ++ ++ void pop(Register dst) { ++ Ldl(dst, MemOperand(sp, 0)); ++ Addl(sp, sp, Operand(kPointerSize)); ++ } ++ void Pop(Register dst) { pop(dst); } ++ ++ // Pop two registers. Pops rightmost register first (from lower address). ++ void Pop(Register src1, Register src2) { ++ DCHECK(src1 != src2); ++ Ldl(src2, MemOperand(sp, 0 * kPointerSize)); ++ Ldl(src1, MemOperand(sp, 1 * kPointerSize)); ++ Addl(sp, sp, 2 * kPointerSize); ++ } ++ ++ // Pop three registers. Pops rightmost register first (from lower address). ++ void Pop(Register src1, Register src2, Register src3) { ++ Ldl(src3, MemOperand(sp, 0 * kPointerSize)); ++ Ldl(src2, MemOperand(sp, 1 * kPointerSize)); ++ Ldl(src1, MemOperand(sp, 2 * kPointerSize)); ++ Addl(sp, sp, 3 * kPointerSize); ++ } ++ ++ // Pop four registers. Pops rightmost register first (from lower address). ++ void Pop(Register src1, Register src2, Register src3, Register src4) { ++ Ldl(src4, MemOperand(sp, 0 * kPointerSize)); ++ Ldl(src3, MemOperand(sp, 1 * kPointerSize)); ++ Ldl(src2, MemOperand(sp, 2 * kPointerSize)); ++ Ldl(src1, MemOperand(sp, 3 * kPointerSize)); ++ Addl(sp, sp, 4 * kPointerSize); ++ } ++ ++ void Pop(uint32_t count = 1) { Addl(sp, sp, Operand(count * kPointerSize)); } ++ ++ // Pops multiple values from the stack and load them in the ++ // registers specified in regs. Pop order is the opposite as in MultiPush. ++ void MultiPop(RegList regs); ++ void MultiPopFPU(RegList regs); ++ ++#define DEFINE_INSTRUCTION(instr) \ ++ void instr(Register rd, Register rs, const Operand& rt); \ ++ void instr(Register rd, Register rs, Register rt) { \ ++ instr(rd, rs, Operand(rt)); \ ++ } \ ++ void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); } ++ ++#define DEFINE_INSTRUCTION2(instr) \ ++ void instr(Register rs, const Operand& rt); \ ++ void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \ ++ void instr(Register rs, int32_t j) { instr(rs, Operand(j)); } ++ ++ DEFINE_INSTRUCTION(Addw) ++ DEFINE_INSTRUCTION(Addl) ++ DEFINE_INSTRUCTION(Divw) ++ DEFINE_INSTRUCTION(Divwu) ++ DEFINE_INSTRUCTION(Divl) ++ DEFINE_INSTRUCTION(Divlu) ++ DEFINE_INSTRUCTION(Modw) ++ DEFINE_INSTRUCTION(Modwu) ++ DEFINE_INSTRUCTION(Modl) ++ DEFINE_INSTRUCTION(Modlu) ++ DEFINE_INSTRUCTION(Subw) ++ DEFINE_INSTRUCTION(Subl) ++ DEFINE_INSTRUCTION(Dmodu) ++ DEFINE_INSTRUCTION(Mulw) ++ DEFINE_INSTRUCTION(Mulwh) ++ DEFINE_INSTRUCTION(Mulhu) ++ DEFINE_INSTRUCTION(Mull) ++ DEFINE_INSTRUCTION(Dmulh) ++ ++ DEFINE_INSTRUCTION(Sllw) ++ DEFINE_INSTRUCTION(Srlw) ++ DEFINE_INSTRUCTION(Sraw) ++ ++ DEFINE_INSTRUCTION(And) ++ DEFINE_INSTRUCTION(Or) ++ DEFINE_INSTRUCTION(Xor) ++ DEFINE_INSTRUCTION(Nor) ++ DEFINE_INSTRUCTION2(Neg) ++ ++ DEFINE_INSTRUCTION(Cmplt) ++ DEFINE_INSTRUCTION(Cmpult) ++ DEFINE_INSTRUCTION(Cmple) ++ DEFINE_INSTRUCTION(Cmpule) ++ DEFINE_INSTRUCTION(Cmpgt) ++ DEFINE_INSTRUCTION(Cmpugt) ++ DEFINE_INSTRUCTION(Cmpge) ++ DEFINE_INSTRUCTION(Cmpuge) ++ ++ DEFINE_INSTRUCTION(Ror) ++ DEFINE_INSTRUCTION(Dror) ++ ++#undef DEFINE_INSTRUCTION ++#undef DEFINE_INSTRUCTION2 ++#undef DEFINE_INSTRUCTION3 ++ ++ void SmiUntag(Register dst, const MemOperand& src); ++ void SmiUntag(Register dst, Register src) { ++ if (SmiValuesAre32Bits()) { ++ sral(src, kSmiShift ,dst); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ Sraw(dst, src, kSmiShift); ++ } ++ } ++ ++ void SmiUntag(Register reg) { SmiUntag(reg, reg); } ++ ++ // Removes current frame and its arguments from the stack preserving ++ // the arguments and a return address pushed to the stack for the next call. ++ // Both |callee_args_count| and |caller_args_count_reg| do not include ++ // receiver. |callee_args_count| is not modified, |caller_args_count_reg| ++ // is trashed. ++ void PrepareForTailCall(Register callee_args_count, ++ Register caller_args_count, Register scratch0, ++ Register scratch1); ++ ++ int CalculateStackPassedWords(int num_reg_arguments, ++ int num_double_arguments); ++ ++ // Before calling a C-function from generated code, align arguments on stack ++ // and add space for the four sw64 argument slots. ++ // After aligning the frame, non-register arguments must be stored on the ++ // stack, after the argument-slots using helper: CFunctionArgumentOperand(). ++ // The argument count assumes all arguments are word sized. ++ // Some compilers/platforms require the stack to be aligned when calling ++ // C++ code. ++ // Needs a scratch register to do some arithmetic. This register will be ++ // trashed. ++ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, ++ Register scratch); ++ void PrepareCallCFunction(int num_reg_arguments, Register scratch); ++ ++ // Calls a C function and cleans up the space for arguments allocated ++ // by PrepareCallCFunction. The called function is not allowed to trigger a ++ // garbage collection, since that might move the code and invalidate the ++ // return address (unless this is somehow accounted for by the called ++ // function). ++ void CallCFunction(ExternalReference function, int num_arguments); ++ void CallCFunction(Register function, int num_arguments); ++ void CallCFunction(ExternalReference function, int num_reg_arguments, ++ int num_double_arguments); ++ void CallCFunction(Register function, int num_reg_arguments, ++ int num_double_arguments); ++ void MovFromFloatResult(DoubleRegister fdst); ++ void MovFromFloatParameter(DoubleRegister fdst); ++ ++ // There are two ways of passing double arguments on SW64, depending on ++ // whether soft or hard floating point ABI is used. These functions ++ // abstract parameter passing for the three different ways we call ++ // C functions from generated code. ++ void MovToFloatParameter(DoubleRegister fsrc); ++ void MovToFloatParameters(DoubleRegister fsrc0, DoubleRegister fsrc1); ++ void MovToFloatResult(DoubleRegister fsrc); ++ ++#ifdef SW64 ++ void MovFromGeneralResult(Register dst); ++ void MovFromGeneralParameter(Register dst); ++ void MovToGeneralParameter(Register src); ++ void MovToGeneralParameters(Register src0, Register src1); ++ void MovToGeneralResult(Register src); ++#endif ++ ++ // See comments at the beginning of Builtins::Generate_CEntry. ++ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); } ++ inline void PrepareCEntryFunction(const ExternalReference& ref) { ++ li(a1, ref); ++ } ++ ++ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, ++ Label* condition_met); ++#undef COND_ARGS ++ ++ // Performs a truncating conversion of a floating point number as used by ++ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. ++ // Exits with 'result' holding the answer. ++ void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, ++ DoubleRegister double_input, StubCallMode stub_mode); ++ ++ // Conditional move. ++ void Seleq(Register rd, Register rs, Register rt); ++ void Selne(Register rd, Register rs, Register rt); ++ ++ void LoadZeroIfFPUCondition(Register dest); ++ void LoadZeroIfNotFPUCondition(Register dest); ++ ++ void LoadZeroIfConditionNotZero(Register dest, Register condition); ++ void LoadZeroIfConditionZero(Register dest, Register condition); ++ void LoadZeroOnCondition(Register rd, Register rs, const Operand& rt, ++ Condition cond); ++ ++ void Clz(Register rd, Register rs); ++ void Dclz(Register rd, Register rs); ++ void Ctz(Register rd, Register rs); ++ void Dctz(Register rd, Register rs); ++ void Popcnt(Register rd, Register rs); ++ void Dpopcnt(Register rd, Register rs); ++ ++ // SW64 R2 instruction macro. ++ void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); ++ void Dext(Register rt, Register rs, uint16_t pos, uint16_t size); ++ void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); ++ void Dins(Register rt, Register rs, uint16_t pos, uint16_t size); ++ void ExtractBits(Register dest, Register source, Register pos, int size, ++ bool sign_extend = false); ++ void InsertBits(Register dest, Register source, Register pos, int size); ++ void Fnegs(FPURegister fd, FPURegister fs); ++ void Fnegd(FPURegister fd, FPURegister fs); ++ ++ // SW64 R6 instruction macros. ++ void Bovc(Register rt, Register rs, Label* L); ++ void Bnvc(Register rt, Register rs, Label* L); ++ ++ // Convert single to unsigned word. ++ void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch); ++ void Trunc_uw_s(Register rd, FPURegister fs, FPURegister scratch); ++ ++ // Change endianness ++ void ByteSwapSigned(Register dest, Register src, int operand_size); ++ void ByteSwapUnsigned(Register dest, Register src, int operand_size); ++ ++ void Uldh(Register rd, const MemOperand& rs); ++ void Uldhu(Register rd, const MemOperand& rs); ++ void Usth(Register rd, const MemOperand& rs, Register scratch); ++ ++ void Uldw(Register rd, const MemOperand& rs); ++ void Uldwu(Register rd, const MemOperand& rs); ++ void Ustw(Register rd, const MemOperand& rs); ++ ++ void Uldl(Register rd, const MemOperand& rs); ++ void Ustl(Register rd, const MemOperand& rs); ++ ++ void Uflds(FPURegister fd, const MemOperand& rs, Register scratch); ++ void Ufsts(FPURegister fd, const MemOperand& rs, Register scratch); ++ ++ void Ufldd(FPURegister fd, const MemOperand& rs, Register scratch); ++ void Ufstd(FPURegister fd, const MemOperand& rs, Register scratch); ++ ++ void Ldb(Register rd, const MemOperand& rs); ++ void Ldbu(Register rd, const MemOperand& rs); ++ void Stb(Register rd, const MemOperand& rs); ++ ++ void Ldh(Register rd, const MemOperand& rs); ++ void Ldhu(Register rd, const MemOperand& rs); ++ void Sth(Register rd, const MemOperand& rs); ++ ++ void Ldw(Register rd, const MemOperand& rs); ++ void Ldwu(Register rd, const MemOperand& rs); ++ void Stw(Register rd, const MemOperand& rs); ++ ++ void Flds(FPURegister fd, const MemOperand& src); ++ void Fsts(FPURegister fs, const MemOperand& dst); ++ ++ void Fldd(FPURegister fd, const MemOperand& src); ++ void Fstd(FPURegister fs, const MemOperand& dst); ++ ++ void Ll(Register rd, const MemOperand& rs); ++ void Sc(Register rd, const MemOperand& rs); ++ ++ void Lld(Register rd, const MemOperand& rs); ++ void Scd(Register rd, const MemOperand& rs); ++ void Abs_sw(FPURegister fd, FPURegister fs); ++ ++ // Perform a floating-point min or max operation with the ++ // (IEEE-754-compatible) semantics of SW32's Release 6 MIN.fmt/MAX.fmt. ++ // Some cases, typically NaNs or +/-0.0, are expected to be rare and are ++ // handled in out-of-line code. The specific behaviour depends on supported ++ // instructions. ++ // ++ // These functions assume (and assert) that src1!=src2. It is permitted ++ // for the result to alias either input register. ++ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ ++ // Generate out-of-line cases for the macros above. ++ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ ++ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; } ++ ++ void mov(Register rd, Register rt) { if(rt != rd) bis(zero_reg, rt, rd); } ++ ++ inline void Move(Register dst, Handle handle) { li(dst, handle); } ++ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); } ++ ++ inline void Move(Register dst, Register src) { ++ if (dst != src) { ++ mov(dst, src); ++ } ++ } ++ ++ inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); } ++ ++ inline void Move(Register dst_low, Register dst_high, FPURegister src) { ++ fimovd(src, dst_low); ++ sral(dst_low, 32, dst_high); ++ addw(dst_low, 0, dst_low); ++ } ++ ++ inline void Move(Register dst, FPURegister src) { fimovd(src,dst); } ++ ++ inline void Move(FPURegister dst, Register src) { ifmovd(src, dst); } ++ ++ inline void FmoveHigh(Register dst_high, FPURegister src) { ++ fimovd(src, dst_high); ++ sral(dst_high, 32, dst_high); ++ } ++ ++ inline void FmoveHigh(FPURegister dst, Register src_high) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Register scratch1 = t8; ++ DCHECK(src_high != scratch && src_high != scratch1); ++ fimovd(dst, scratch); ++ zapnot(scratch, 0xf, scratch); ++ slll(src_high, 32, scratch1); ++ or_ins(scratch, scratch1, scratch); ++ ifmovd(scratch, dst); ++ } ++ ++ inline void FmoveLow(Register dst_low, FPURegister src) { ++ fimovd(src, dst_low); ++ addw(dst_low, 0, dst_low); ++ } ++ ++ void FmoveLow(FPURegister dst, Register src_low); ++ ++ inline void Move(FPURegister dst, Register src_low, Register src_high) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(src_high != scratch && src_low != scratch); ++ slll(src_high, 32, scratch); ++ or_ins(scratch, src_low, scratch); ++ ifmovd(scratch, dst); ++ } ++ ++ inline void Move_d(FPURegister dst, FPURegister src) { ++ if (dst != src) { ++ fmovd(src, dst); ++ } ++ } ++ ++ inline void Move_s(FPURegister dst, FPURegister src) { ++ if (dst != src) { ++ fmovs(src, dst); ++ } ++ } ++ ++ void Move(FPURegister dst, float imm) { Move(dst, bit_cast(imm)); } ++ void Move(FPURegister dst, double imm) { Move(dst, bit_cast(imm)); } ++ void Move(FPURegister dst, uint32_t src); ++ void Move(FPURegister dst, uint64_t src); ++ ++ // DaddOverflow sets overflow register to a negative value if ++ // overflow occured, otherwise it is zero or positive ++ void DaddOverflow(Register dst, Register left, const Operand& right, ++ Register overflow); ++ // DsubOverflow sets overflow register to a negative value if ++ // overflow occured, otherwise it is zero or positive ++ void DsubOverflow(Register dst, Register left, const Operand& right, ++ Register overflow); ++ // MulOverflow sets overflow register to zero if no overflow occured ++ void MulOverflow(Register dst, Register left, const Operand& right, ++ Register overflow); ++ ++// Number of instructions needed for calculation of switch table entry address ++ static const int kSwitchTablePrologueSize = 5; // 4 + 1 (ALIGN may 1 nop) ++ ++ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a ++ // functor/function with 'Label *func(size_t index)' declaration. ++ template ++ void GenerateSwitchTable(Register index, size_t case_count, ++ Func GetLabelFunction); ++ ++ // Load an object from the root table. ++ void LoadRoot(Register destination, RootIndex index) override; ++ void LoadRoot(Register destination, RootIndex index, Condition cond, ++ Register src1, const Operand& src2); ++ ++ // If the value is a NaN, canonicalize the value else, do nothing. ++ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); ++ ++ // --------------------------------------------------------------------------- ++ // FPU macros. These do not handle special cases like NaN or +- inf. ++ ++ // Convert unsigned word to double. ++ void Cvt_d_uw(FPURegister fd, FPURegister fs); ++ void Cvt_d_uw(FPURegister fd, Register rs); ++ ++ // Convert unsigned long to double. ++ void Cvt_d_ul(FPURegister fd, FPURegister fs); ++ void Cvt_d_ul(FPURegister fd, Register rs); ++ ++ // Convert unsigned word to float. ++ void Cvt_s_uw(FPURegister fd, FPURegister fs); ++ void Cvt_s_uw(FPURegister fd, Register rs); ++ ++ // Convert unsigned long to float. ++ void Cvt_s_ul(FPURegister fd, FPURegister fs); ++ void Cvt_s_ul(FPURegister fd, Register rs); ++ ++ // Convert double to unsigned word. ++ void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); ++ void Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch); ++ ++ // Convert double to unsigned long. ++ void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, ++ Register result = no_reg); ++ void Trunc_ul_d(Register rd, FPURegister fs, FPURegister scratch, ++ Register result = no_reg); ++ ++ // Convert single to unsigned long. ++ void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, ++ Register result = no_reg); ++ void Trunc_ul_s(Register rd, FPURegister fs, FPURegister scratch, ++ Register result = no_reg); ++ ++ // Round double functions ++ void Trunc_d_d(FPURegister fd, FPURegister fs); ++ void Round_d_d(FPURegister fd, FPURegister fs); ++ void Floor_d_d(FPURegister fd, FPURegister fs); ++ void Ceil_d_d(FPURegister fd, FPURegister fs); ++ ++ // Round float functions ++ void Trunc_s_s(FPURegister fd, FPURegister fs); ++ void Round_s_s(FPURegister fd, FPURegister fs); ++ void Floor_s_s(FPURegister fd, FPURegister fs); ++ void Ceil_s_s(FPURegister fd, FPURegister fs); ++ ++ void MSARoundW(MSARegister dst, MSARegister src, FPURoundingMode mode); ++ void MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode); ++ ++ // Jump the register contains a smi. ++ void JumpIfSmi(Register value, Label* smi_label, Register scratch = at, ++ BranchDelaySlot bd = PROTECT); ++ ++ void JumpIfEqual(Register a, int32_t b, Label* dest) { ++ li(kScratchReg, Operand(b)); ++ Branch(dest, eq, a, Operand(kScratchReg)); ++ } ++ ++ void JumpIfLessThan(Register a, int32_t b, Label* dest) { ++ li(kScratchReg, Operand(b)); ++ Branch(dest, lt, a, Operand(kScratchReg)); ++ } ++ ++ // Push a standard frame, consisting of ra, fp, context and JS function. ++ void PushStandardFrame(Register function_reg); ++ ++ // Get the actual activation frame alignment for target environment. ++ static int ActivationFrameAlignment(); ++ ++ // Load Scaled Address instructions. Parameter sa (shift argument) must be ++ // between [1, 31] (inclusive). On pre-r6 architectures the scratch register ++ // may be clobbered. ++ void Lsa(Register rd, Register rs, Register rt, uint8_t sa, ++ Register scratch = at); ++ void Dlsa(Register rd, Register rs, Register rt, uint8_t sa, ++ Register scratch = at); ++ ++ // Compute the start of the generated instruction stream from the current PC. ++ // This is an alternative to embedding the {CodeObject} handle as a reference. ++ void ComputeCodeStartAddress(Register dst); ++ ++ void ResetSpeculationPoisonRegister(); ++ ++ // Control-flow integrity: ++ ++ // Define a function entrypoint. This doesn't emit any code for this ++ // architecture, as control-flow integrity is not supported for it. ++ void CodeEntry() {} ++ // Define an exception handler. ++ void ExceptionHandler() {} ++ // Define an exception handler and bind a label. ++ void BindExceptionHandler(Label* label) { bind(label); } ++ ++ protected: ++ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); ++ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); ++ ++ //20181127 Macro assembler to emit code. ++ MacroAssembler* masm() const { return masm_; } ++ ++ private: ++ bool has_double_zero_reg_set_ = false; ++ ++ // Performs a truncating conversion of a floating point number as used by ++ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it ++ // succeeds, otherwise falls through if result is saturated. On return ++ // 'result' either holds answer, or is clobbered on fall through. ++ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, ++ Label* done); ++ ++ void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, ++ FPURegister cmp2); ++ ++ void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, ++ FPURegister cmp2); ++ ++ void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, ++ MSARegister wt, BranchDelaySlot bd = PROTECT); ++ ++ void CallCFunctionHelper(Register function, int num_reg_arguments, ++ int num_double_arguments); ++ ++ // TODO(sw64) Reorder parameters so out parameters come last. ++ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); ++ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, ++ Register* scratch, const Operand& rt); ++ ++ void BranchShortHelperR6(int32_t offset, Label* L); ++ void BranchShortHelper(int32_t offset, Label* L, BranchDelaySlot bdslot); ++ bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond, ++ Register rs, const Operand& rt); ++ bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bdslot); ++ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, ++ const Operand& rt, BranchDelaySlot bdslot); ++ ++ void BranchAndLinkShortHelperR6(int32_t offset, Label* L); ++ void BranchAndLinkShortHelper(int32_t offset, Label* L, ++ BranchDelaySlot bdslot); ++ void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT); ++ void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT); ++ bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, ++ Register rs, const Operand& rt); ++ bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond, ++ Register rs, const Operand& rt, ++ BranchDelaySlot bdslot); ++ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, ++ Register rs, const Operand& rt, ++ BranchDelaySlot bdslot); ++ void BranchLong(Label* L, BranchDelaySlot bdslot); ++ void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot); ++ ++ template ++ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, ++ RoundFunc round); ++ ++ template ++ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, ++ RoundFunc round); ++ ++ // Push a fixed frame, consisting of ra, fp. ++ void PushCommonFrame(Register marker_reg = no_reg); ++ ++ void CallRecordWriteStub(Register object, Register address, ++ RememberedSetAction remembered_set_action, ++ SaveFPRegsMode fp_mode, Handle code_target, ++ Address wasm_target); ++ ++ //20181127 ++ MacroAssembler* masm_; ++}; ++ ++class ScopeMark { ++ private: ++ TurboAssembler* _tasm; ++ char _begin[50]; ++ char _end[300]; ++ ++ public: ++ ++ ScopeMark(TurboAssembler* tasm, const char* position, const char* comment) : _tasm(tasm) { ++ ::sprintf(_begin, "{ %s", comment); ++ ::sprintf(_end, "} %s", position); ++ ++ _tasm->RecordComment(_begin); ++ } ++ ++ ~ScopeMark() { ++ _tasm->RecordComment(_end); ++ } ++}; ++ ++// MacroAssembler implements a collection of frequently used macros. ++class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { ++ public: ++ using TurboAssembler::TurboAssembler; ++ ++ // It assumes that the arguments are located below the stack pointer. ++ // argc is the number of arguments not including the receiver. ++ // TODO(victorgomes): Remove this function once we stick with the reversed ++ // arguments order. ++ void LoadReceiver(Register dest, Register argc) { ++ Ldl(dest, MemOperand(sp, 0)); ++ } ++ ++ void StoreReceiver(Register rec, Register argc, Register scratch) { ++ Stl(rec, MemOperand(sp, 0)); ++ } ++ ++ bool IsNear(Label* L, Condition cond, int rs_reg); ++ ++ // Swap two registers. If the scratch register is omitted then a slightly ++ // less efficient form using xor instead of mov is emitted. ++ void Swap(Register reg1, Register reg2, Register scratch = no_reg); ++ ++ void PushRoot(RootIndex index) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Push(scratch); ++ } ++ ++ // Compare the object in a register to a value and jump if they are equal. ++ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Branch(if_equal, eq, with, Operand(scratch)); ++ } ++ ++ // Compare the object in a register to a value and jump if they are not equal. ++ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Branch(if_not_equal, ne, with, Operand(scratch)); ++ } ++ ++ // Checks if value is in range [lower_limit, higher_limit] using a single ++ // comparison. ++ void JumpIfIsInRange(Register value, unsigned lower_limit, ++ unsigned higher_limit, Label* on_in_range); ++ ++ // --------------------------------------------------------------------------- ++ // GC Support ++ ++ // Notify the garbage collector that we wrote a pointer into an object. ++ // |object| is the object being stored into, |value| is the object being ++ // stored. value and scratch registers are clobbered by the operation. ++ // The offset is the offset from the start of the object, not the offset from ++ // the tagged HeapObject pointer. For use with FieldOperand(reg, off). ++ void RecordWriteField( ++ Register object, int offset, Register value, Register scratch, ++ RAStatus ra_status, SaveFPRegsMode save_fp, ++ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, ++ SmiCheck smi_check = INLINE_SMI_CHECK); ++ ++ // For a given |object| notify the garbage collector that the slot |address| ++ // has been written. |value| is the object being stored. The value and ++ // address registers are clobbered by the operation. ++ void RecordWrite( ++ Register object, Register address, Register value, RAStatus ra_status, ++ SaveFPRegsMode save_fp, ++ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, ++ SmiCheck smi_check = INLINE_SMI_CHECK); ++ ++ ++ // Convert double to unsigned long. ++ void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch); ++ ++ void Trunc_l_d(FPURegister fd, FPURegister fs); ++ void Round_l_d(FPURegister fd, FPURegister fs); ++ void Floor_l_d(FPURegister fd, FPURegister fs); ++ void Ceil_l_d(FPURegister fd, FPURegister fs); ++ ++ void Trunc_w_d(FPURegister fd, FPURegister fs); ++ void Round_w_d(FPURegister fd, FPURegister fs); ++ void Floor_w_d(FPURegister fd, FPURegister fs); ++ void Ceil_w_d(FPURegister fd, FPURegister fs); ++ ++ void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, ++ FPURegister scratch); ++ void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, ++ FPURegister scratch); ++ void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, ++ FPURegister scratch); ++ void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, ++ FPURegister scratch); ++ ++ void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, ++ MSARegister wt, BranchDelaySlot bd = PROTECT); ++ ++ // Truncates a double using a specific rounding mode, and writes the value ++ // to the result register. ++ // The except_flag will contain any exceptions caused by the instruction. ++ // If check_inexact is kDontCheckForInexactConversion, then the inexact ++ // exception is masked. ++ void EmitFPUTruncate( ++ FPURoundingMode rounding_mode, Register result, ++ DoubleRegister double_input, Register scratch, ++ DoubleRegister double_scratch, Register except_flag, ++ CheckForInexactConversion check_inexact = kDontCheckForInexactConversion); ++ ++ // Enter exit frame. ++ // argc - argument count to be dropped by LeaveExitFrame. ++ // save_doubles - saves FPU registers on stack, currently disabled. ++ // stack_space - extra stack space. ++ void EnterExitFrame(bool save_doubles, int stack_space = 0, ++ StackFrame::Type frame_type = StackFrame::EXIT); ++ ++ // Leave the current exit frame. ++ void LeaveExitFrame(bool save_doubles, Register arg_count, ++ bool do_return = NO_EMIT_RETURN, ++ bool argument_count_is_length = false); ++ ++ void LoadMap(Register destination, Register object); ++ ++ // Make sure the stack is aligned. Only emits code in debug mode. ++ void AssertStackIsAligned(); ++ ++ // Load the global proxy from the current context. ++ void LoadGlobalProxy(Register dst) { ++ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); ++ } ++ ++ void LoadNativeContextSlot(int index, Register dst); ++ ++ // Load the initial map from the global function. The registers ++ // function and map can be the same, function is then overwritten. ++ void LoadGlobalFunctionInitialMap(Register function, ++ Register map, ++ Register scratch); ++ ++ // ------------------------------------------------------------------------- ++ // JavaScript invokes. ++ ++ // Invoke the JavaScript function code by either calling or jumping. ++ void InvokeFunctionCode(Register function, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count, InvokeFlag flag); ++ ++ // On function call, call into the debugger if necessary. ++ void CheckDebugHook(Register fun, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count); ++ ++ // Invoke the JavaScript function in the given register. Changes the ++ // current context to the context in the function before invoking. ++ void InvokeFunctionWithNewTarget(Register function, Register new_target, ++ Register actual_parameter_count, ++ InvokeFlag flag); ++ void InvokeFunction(Register function, Register expected_parameter_count, ++ Register actual_parameter_count, InvokeFlag flag); ++ ++ // Frame restart support. ++ void MaybeDropFrames(); ++ ++ // Exception handling. ++ ++ // Push a new stack handler and link into stack handler chain. ++ void PushStackHandler(); ++ ++ // Unlink the stack handler on top of the stack from the stack handler chain. ++ // Must preserve the result register. ++ void PopStackHandler(); ++ ++ // ------------------------------------------------------------------------- ++ // Support functions. ++ ++ void GetObjectType(Register function, ++ Register map, ++ Register type_reg); ++ ++ // ------------------------------------------------------------------------- ++ // Runtime calls. ++ ++ // Call a runtime routine. ++ void CallRuntime(const Runtime::Function* f, int num_arguments, ++ SaveFPRegsMode save_doubles = kDontSaveFPRegs); ++ ++ // Convenience function: Same as above, but takes the fid instead. ++ void CallRuntime(Runtime::FunctionId fid, ++ SaveFPRegsMode save_doubles = kDontSaveFPRegs) { ++ const Runtime::Function* function = Runtime::FunctionForId(fid); ++ CallRuntime(function, function->nargs, save_doubles); ++ } ++ ++ // Convenience function: Same as above, but takes the fid instead. ++ void CallRuntime(Runtime::FunctionId fid, int num_arguments, ++ SaveFPRegsMode save_doubles = kDontSaveFPRegs) { ++ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); ++ } ++ ++ // Convenience function: tail call a runtime routine (jump). ++ void TailCallRuntime(Runtime::FunctionId fid); ++ ++ // Jump to the builtin routine. ++ void JumpToExternalReference(const ExternalReference& builtin, ++ BranchDelaySlot bd = PROTECT, ++ bool builtin_exit_frame = false); ++ ++ // Generates a trampoline to jump to the off-heap instruction stream. ++ void JumpToInstructionStream(Address entry); ++ ++ // --------------------------------------------------------------------------- ++ // In-place weak references. ++ void LoadWeakValue(Register out, Register in, Label* target_if_cleared); ++ ++ // ------------------------------------------------------------------------- ++ // StatsCounter support. ++ ++ void IncrementCounter(StatsCounter* counter, int value, ++ Register scratch1, Register scratch2); ++ void DecrementCounter(StatsCounter* counter, int value, ++ Register scratch1, Register scratch2); ++ ++ // ------------------------------------------------------------------------- ++ // Smi utilities. ++ ++ void SmiTag(Register dst, Register src) { ++ STATIC_ASSERT(kSmiTag == 0); ++ if (SmiValuesAre32Bits()) { ++ slll(src, 32 ,dst); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ Addw(dst, src, src); ++ } ++ } ++ ++ void SmiTag(Register reg) { ++ SmiTag(reg, reg); ++ } ++ ++ // Left-shifted from int32 equivalent of Smi. ++ void SmiScale(Register dst, Register src, int scale) { ++ if (SmiValuesAre32Bits()) { ++ // The int portion is upper 32-bits of 64-bit word. ++ sral(src, kSmiShift - scale, dst); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ DCHECK_GE(scale, kSmiTagSize); ++ Sllw(dst, src, scale - kSmiTagSize); ++ } ++ } ++ ++ // Test if the register contains a smi. ++ inline void SmiTst(Register value, Register scratch) { ++ And(scratch, value, Operand(kSmiTagMask)); ++ } ++ ++ // Jump if the register contains a non-smi. ++ void JumpIfNotSmi(Register value, ++ Label* not_smi_label, ++ Register scratch = at, ++ BranchDelaySlot bd = PROTECT); ++ ++ // Abort execution if argument is a smi, enabled via --debug-code. ++ void AssertNotSmi(Register object); ++ void AssertSmi(Register object); ++ ++ // Abort execution if argument is not a Constructor, enabled via --debug-code. ++ void AssertConstructor(Register object); ++ ++ // Abort execution if argument is not a JSFunction, enabled via --debug-code. ++ void AssertFunction(Register object); ++ ++ // Abort execution if argument is not a JSBoundFunction, ++ // enabled via --debug-code. ++ void AssertBoundFunction(Register object); ++ ++ // Abort execution if argument is not a JSGeneratorObject (or subclass), ++ // enabled via --debug-code. ++ void AssertGeneratorObject(Register object); ++ ++ // Abort execution if argument is not undefined or an AllocationSite, enabled ++ // via --debug-code. ++ void AssertUndefinedOrAllocationSite(Register object, Register scratch); ++ ++ template ++ void DecodeField(Register dst, Register src) { ++ Ext(dst, src, Field::kShift, Field::kSize); ++ } ++ ++ template ++ void DecodeField(Register reg) { ++ DecodeField(reg, reg); ++ } ++ ++ private: ++ // Helper functions for generating invokes. ++ void InvokePrologue(Register expected_parameter_count, ++ Register actual_parameter_count, Label* done, ++ InvokeFlag flag); ++ ++ // Compute memory operands for safepoint stack slots. ++ static int SafepointRegisterStackIndex(int reg_code); ++ ++ // Needs access to SafepointRegisterStackIndex for compiled frame ++ // traversal. ++ friend class StandardFrame; ++ ++ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); ++}; ++ ++template ++void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, ++ Func GetLabelFunction) { ++ // Ensure that dd-ed labels following this instruction use 8 bytes aligned ++ // addresses. ++ BlockTrampolinePoolFor(static_cast(case_count) * 2 + ++ kSwitchTablePrologueSize); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Align(8); ++ int instr_num = 3; //Added 20190117 ++ br(scratch, 0); ++ s8addl(index, scratch, scratch); // get_mem = cur_pc + index * 8 (kPointerSizeLog2); ++ Ldl(scratch, MemOperand(scratch, instr_num * v8::internal::kInstrSize)); ++ Assembler::jmp(zero_reg, scratch, 0); ++ for (size_t index = 0; index < case_count; ++index) { ++ dd(GetLabelFunction(index)); ++ } ++} ++ ++#define ACCESS_MASM(masm) masm-> ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_SW64_MACRO_ASSEMBLER_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/register-sw64.h b/src/3rdparty/chromium/v8/src/codegen/sw64/register-sw64.h +new file mode 100755 +index 000000000..ebcdf711c +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/register-sw64.h +@@ -0,0 +1,406 @@ ++// Copyright 2018 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_CODEGEN_SW64_REGISTER_SW64_H_ ++#define V8_CODEGEN_SW64_REGISTER_SW64_H_ ++ ++#include "src/codegen/sw64/constants-sw64.h" ++#include "src/codegen/register.h" ++#include "src/codegen/reglist.h" ++ ++namespace v8 { ++namespace internal { ++ ++// clang-format off ++#define GENERAL_REGISTERS(V) \ ++ V(v0) \ ++ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) \ ++ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(fp) \ ++ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) \ ++ V(t8) V(t9) V(t10) V(t11) \ ++ V(ra) \ ++ V(t12) \ ++ V(at) \ ++ V(gp) \ ++ V(sp) \ ++ V(zero_reg) ++ ++// t7, t8 used as two scratch regs instead of s3, s4; so they ++// should not be added to allocatable lists. ++#define ALLOCATABLE_GENERAL_REGISTERS(V) \ ++ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) \ ++ V(t0) V(t1) V(t2) V(t3) V(t5) V(t6) V(t9) V(t10) V(s5) \ ++ V(v0) V(t4) ++ ++#define DOUBLE_REGISTERS(V) \ ++ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ ++ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \ ++ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \ ++ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31) ++ ++#define FLOAT_REGISTERS DOUBLE_REGISTERS ++#define SIMD128_REGISTERS(V) \ ++ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \ ++ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \ ++ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \ ++ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31) ++ ++/* f27-f30 scratch fregisters */ ++#define ALLOCATABLE_DOUBLE_REGISTERS(V) \ ++ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ ++ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \ ++ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \ ++ V(f24) V(f25) V(f26) ++// clang-format on ++ ++// Note that the bit values must match those used in actual instruction ++// encoding. ++const int kNumRegs = 32; ++ ++const RegList kJSCallerSaved = 1 << 0 | // v0 ++ 1 << 1 | // t0 ++ 1 << 2 | // t1 ++ 1 << 3 | // t2 ++ 1 << 4 | // t3 ++ 1 << 5 | // t4 ++ 1 << 6 | // t5 ++ 1 << 7 | // t6 ++ 1 << 8 | // t7 ++ 1 << 16 | // a0 ++ 1 << 17 | // a1 ++ 1 << 18 | // a2 ++ 1 << 19 | // a3 ++ 1 << 20 | // a4 ++ 1 << 21 | // a5 ++ 1 << 22 | // t8 ++ 1 << 23 | // t9 ++ 1 << 24; // t10 ++ ++const int kNumJSCallerSaved = 18; ++ ++// Callee-saved registers preserved when switching from C to JavaScript. ++const RegList kCalleeSaved = 1 << 9 | // s0 ++ 1 << 10 | // s1 ++ 1 << 11 | // s2 ++ 1 << 12 | // s3 ++ 1 << 13 | // s4 (roots in Javascript code) ++ 1 << 14 | // s5 (cp in Javascript code) ++ 1 << 15; // fp/s6 ++ ++const int kNumCalleeSaved = 7; ++ ++const RegList kCalleeSavedFPU = 1 << 2 | // f2 ++ 1 << 3 | // f3 ++ 1 << 4 | // f4 ++ 1 << 5 | // f5 ++ 1 << 6 | // f6 ++ 1 << 7 | // f7 ++ 1 << 8 | // f8 ++ 1 << 9; // f9 ++ ++const int kNumCalleeSavedFPU = 8; ++ ++const RegList kCallerSavedFPU = 1 << 0 | // f0 ++ 1 << 1 | // f1 ++ 1 << 10 | // f10 ++ 1 << 11 | // f11 ++ 1 << 12 | // f12 ++ 1 << 13 | // f13 ++ 1 << 14 | // f14 ++ 1 << 15 | // f15 ++ 1 << 16 | // f16 ++ 1 << 17 | // f17 ++ 1 << 18 | // f18 ++ 1 << 19 | // f19 ++ 1 << 20 | // f20 ++ 1 << 21 | // f21 ++ 1 << 22 | // f22 ++ 1 << 23 | // f23 ++ 1 << 24 | // f24 ++ 1 << 25 | // f25 ++ 1 << 26 | // f26 ++ 1 << 27 | // f27 ++ 1 << 28 | // f28 ++ 1 << 29 | // f29 ++ 1 << 30; // f30 ++ ++// Number of registers for which space is reserved in safepoints. Must be a ++// multiple of 8. ++const int kNumSafepointRegisters = 32; ++ ++// Define the list of registers actually saved at safepoints. ++// Note that the number of saved registers may be smaller than the reserved ++// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters. ++const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved; ++const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved; ++ ++const int kUndefIndex = -1; ++// Map with indexes on stack that corresponds to codes of saved registers. ++const int kSafepointRegisterStackIndexMap[kNumRegs] = {0, // v0 ++ 1, // t0 ++ 2, // t1 ++ 3, // t2 ++ 4, // t3 ++ 5, // t4 ++ 6, // t5 ++ 7, // t6 ++ 8, // t7 ++ 9, // s0 ++ 10, // s1 ++ 11, // s2 ++ 12, // s3 ++ 13, // s4 ++ 14, // s5 ++ 24, // fp <---- a trick? ++ 15, // a0 ++ 16, // a1 ++ 17, // a2 ++ 18, // a3 ++ 19, // a4 ++ 20, // a5 ++ 21, // t8 ++ 22, // t9 ++ 23, // t10 ++ kUndefIndex, // t11 ++ kUndefIndex, // ra ++ kUndefIndex, // t12 ++ kUndefIndex, // at ++ kUndefIndex, // gp ++ kUndefIndex, // sp ++ kUndefIndex}; // zero_reg ++ ++// CPU Registers. ++// ++// 1) We would prefer to use an enum, but enum values are assignment- ++// compatible with int, which has caused code-generation bugs. ++// ++// 2) We would prefer to use a class instead of a struct but we don't like ++// the register initialization to depend on the particular initialization ++// order (which appears to be different on OS X, Linux, and Windows for the ++// installed versions of C++ we tried). Using a struct permits C-style ++// "initialization". Also, the Register objects cannot be const as this ++// forces initialization stubs in MSVC, making us dependent on initialization ++// order. ++// ++// 3) By not using an enum, we are possibly preventing the compiler from ++// doing certain constant folds, which may significantly reduce the ++// code generated for some assembly instructions (because they boil down ++// to a few constants). If this is a problem, we could change the code ++// such that we use an enum in optimized mode, and the struct in debug ++// mode. This way we get the compile-time error checking in debug mode ++// and best performance in optimized code. ++ ++// ----------------------------------------------------------------------------- ++// Implementation of Register and FPURegister. ++ ++enum RegisterCode { ++#define REGISTER_CODE(R) kRegCode_##R, ++ GENERAL_REGISTERS(REGISTER_CODE) ++#undef REGISTER_CODE ++ kRegAfterLast ++}; ++ ++class Register : public RegisterBase { ++ public: ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++ static constexpr int kMantissaOffset = 0; ++ static constexpr int kExponentOffset = 4; ++#else ++#error Unknown endianness ++#endif ++ ++ private: ++ friend class RegisterBase; ++ explicit constexpr Register(int code) : RegisterBase(code) {} ++}; ++ ++// s5: context register ++// t7: lithium scratch ++// t8: lithium scratch2 ++#define DECLARE_REGISTER(R) \ ++ constexpr Register R = Register::from_code(kRegCode_##R); ++GENERAL_REGISTERS(DECLARE_REGISTER) ++#undef DECLARE_REGISTER ++ ++constexpr Register no_reg = Register::no_reg(); ++ ++int ToNumber(Register reg); ++ ++Register ToRegister(int num); ++ ++constexpr bool kPadArguments = false; ++constexpr bool kSimpleFPAliasing = true; ++constexpr bool kSimdMaskRegisters = false; ++ ++enum DoubleRegisterCode { ++#define REGISTER_CODE(R) kDoubleCode_##R, ++ DOUBLE_REGISTERS(REGISTER_CODE) ++#undef REGISTER_CODE ++ kDoubleAfterLast ++}; ++ ++// Coprocessor register. ++class FPURegister : public RegisterBase { ++ public: ++ // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers ++ // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to ++ // number of Double regs (64-bit regs, or FPU-reg-pairs). ++ ++ private: ++ friend class RegisterBase; ++ explicit constexpr FPURegister(int code) : RegisterBase(code) {} ++}; ++ ++enum MSARegisterCode { ++#define REGISTER_CODE(R) kMsaCode_##R, ++ SIMD128_REGISTERS(REGISTER_CODE) ++#undef REGISTER_CODE ++ kMsaAfterLast ++}; ++ ++// SW64 SIMD (MSA) register ++class MSARegister : public RegisterBase { ++ friend class RegisterBase; ++ explicit constexpr MSARegister(int code) : RegisterBase(code) {} ++}; ++ ++// A few double registers are reserved: one as a scratch register and one to ++// hold 0.0. ++// f31: 0.0 ++// f30: scratch register. ++ ++// V8 now supports the O32 ABI, and the FPU Registers are organized as 32 ++// 32-bit registers, f0 through f31. When used as 'double' they are used ++// in pairs, starting with the even numbered register. So a double operation ++// on f0 really uses f0 and f1. ++// (Modern sw64 hardware also supports 32 64-bit registers, via setting ++// (privileged) Status Register FR bit to 1. This is used by the N32 ABI, ++// but it is not in common use. Someday we will want to support this in v8.) ++ ++// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers. ++using FloatRegister = FPURegister; ++ ++using DoubleRegister = FPURegister; ++ ++#define DECLARE_DOUBLE_REGISTER(R) \ ++ constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R); ++DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER) ++#undef DECLARE_DOUBLE_REGISTER ++ ++constexpr DoubleRegister no_dreg = DoubleRegister::no_reg(); ++ ++// SIMD registers. ++using Simd128Register = MSARegister; ++ ++#define DECLARE_SIMD128_REGISTER(R) \ ++ constexpr Simd128Register R = Simd128Register::from_code(kMsaCode_##R); ++SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER) ++#undef DECLARE_SIMD128_REGISTER ++ ++const Simd128Register no_msareg = Simd128Register::no_reg(); ++ ++// Register aliases. ++// cp is assumed to be a callee saved register. ++constexpr Register kRootRegister = s4; ++constexpr Register cp = s5; ++constexpr Register kScratchReg = t7; ++constexpr Register kScratchReg2 = t8; ++constexpr DoubleRegister kDoubleRegZero = f31; ++constexpr DoubleRegister kScratchDoubleReg = f29; // change f30 to f29, use f28, f29 as scratch ++constexpr DoubleRegister kScratchDoubleReg1 = f28; ++constexpr DoubleRegister kScratchDoubleReg2 = f27; ++// Used on sw64r3 for compare operations. ++// We use the last non-callee saved odd register for N64 ABI ++constexpr DoubleRegister kDoubleCompareReg = f30; // change f28 to f30 ++// MSA zero and scratch regs must have the same numbers as FPU zero and scratch ++constexpr Simd128Register kSimd128RegZero = w31; // may be used as scratch ++constexpr Simd128Register kSimd128ScratchReg = w30; ++ ++// FPU (coprocessor 1) control registers. ++// Currently only FCSR (#31) is implemented. ++struct FPUControlRegister { ++ bool is_valid() const { return reg_code == kFCSRRegister; } ++ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; } ++ int code() const { ++ DCHECK(is_valid()); ++ return reg_code; ++ } ++ int bit() const { ++ DCHECK(is_valid()); ++ return 1 << reg_code; ++ } ++ void setcode(int f) { ++ reg_code = f; ++ DCHECK(is_valid()); ++ } ++ // Unfortunately we can't make this private in a struct. ++ int reg_code; ++}; ++ ++constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister}; ++constexpr FPUControlRegister FCSR = {kFCSRRegister}; ++ ++// MSA control registers ++struct MSAControlRegister { ++ bool is_valid() const { ++ return (reg_code == kMSAIRRegister) || (reg_code == kMSACSRRegister); ++ } ++ bool is(MSAControlRegister creg) const { return reg_code == creg.reg_code; } ++ int code() const { ++ DCHECK(is_valid()); ++ return reg_code; ++ } ++ int bit() const { ++ DCHECK(is_valid()); ++ return 1 << reg_code; ++ } ++ void setcode(int f) { ++ reg_code = f; ++ DCHECK(is_valid()); ++ } ++ // Unfortunately we can't make this private in a struct. ++ int reg_code; ++}; ++ ++constexpr MSAControlRegister no_msacreg = {kInvalidMSAControlRegister}; ++constexpr MSAControlRegister MSAIR = {kMSAIRRegister}; ++constexpr MSAControlRegister MSACSR = {kMSACSRRegister}; ++ ++// Define {RegisterName} methods for the register types. ++DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS) ++DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS) ++DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS) ++ ++// Give alias names to registers for calling conventions. ++constexpr Register kReturnRegister0 = v0; ++constexpr Register kReturnRegister1 = a5; // v1; ++constexpr Register kReturnRegister2 = a0; ++constexpr Register kJSFunctionRegister = a1; ++constexpr Register kContextRegister = s5; // s7; ++constexpr Register kAllocateSizeRegister = a0; ++constexpr Register kSpeculationPoisonRegister = t10; // a7; ++constexpr Register kInterpreterAccumulatorRegister = v0; ++constexpr Register kInterpreterBytecodeOffsetRegister = t0; ++constexpr Register kInterpreterBytecodeArrayRegister = t1; ++constexpr Register kInterpreterDispatchTableRegister = t2; ++ ++constexpr Register kJavaScriptCallArgCountRegister = a0; ++constexpr Register kJavaScriptCallCodeStartRegister = a2; ++constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister; ++constexpr Register kJavaScriptCallNewTargetRegister = a3; ++constexpr Register kJavaScriptCallExtraArg1Register = a2; ++ ++constexpr Register kOffHeapTrampolineRegister = at; ++constexpr Register kRuntimeCallFunctionRegister = a1; ++constexpr Register kRuntimeCallArgCountRegister = a0; ++constexpr Register kRuntimeCallArgvRegister = a2; ++constexpr Register kWasmInstanceRegister = a0; ++constexpr Register kWasmCompileLazyFuncIndexRegister = t0; ++ ++constexpr DoubleRegister kFPReturnRegister0 = f0; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_SW64_REGISTER_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/common/globals.h b/src/3rdparty/chromium/v8/src/common/globals.h +index c79b3b633..37dc14d86 100644 +--- a/src/3rdparty/chromium/v8/src/common/globals.h ++++ b/src/3rdparty/chromium/v8/src/common/globals.h +@@ -58,6 +58,9 @@ constexpr int GB = MB * 1024; + #if (V8_TARGET_ARCH_S390 && !V8_HOST_ARCH_S390) + #define USE_SIMULATOR 1 + #endif ++#if (V8_TARGET_ARCH_SW64 && !V8_HOST_ARCH_SW64) ++#define USE_SIMULATOR 1 ++#endif + #endif + + // Determine whether the architecture uses an embedded constant pool +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-codes.h b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-codes.h +index 84d5d249b..f04aa60c8 100644 +--- a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-codes.h ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-codes.h +@@ -23,6 +23,8 @@ + #include "src/compiler/backend/ppc/instruction-codes-ppc.h" + #elif V8_TARGET_ARCH_S390 + #include "src/compiler/backend/s390/instruction-codes-s390.h" ++#elif V8_TARGET_ARCH_SW64 ++#include "src/compiler/backend/sw64/instruction-codes-sw64.h" + #else + #define TARGET_ARCH_OPCODE_LIST(V) + #define TARGET_ADDRESSING_MODE_LIST(V) +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc +index 7d72dbbf2..697d70bbf 100644 +--- a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc +@@ -2573,7 +2573,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { + #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS + + #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \ +- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 ++ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_SW64 + void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); } + + void InstructionSelector::VisitWord64AtomicStore(Node* node) { +@@ -2611,7 +2611,7 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) { + #endif // !V8_TARGET_ARCH_IA32 + + #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X +-#if !V8_TARGET_ARCH_ARM64 ++#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_SW64 + void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/OWNERS b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/OWNERS +new file mode 100755 +index 000000000..42582e993 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/OWNERS +@@ -0,0 +1,3 @@ ++ivica.bogosavljevic@sw64.com ++Miran.Karic@sw64.com ++sreten.kovacevic@sw64.com +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/code-generator-sw64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/code-generator-sw64.cc +new file mode 100755 +index 000000000..02840f457 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/code-generator-sw64.cc +@@ -0,0 +1,4209 @@ ++// Copyright 2014 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/callable.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/sw64/constants-sw64.h" ++#include "src/codegen/optimized-compilation-info.h" ++#include "src/compiler/backend/code-generator-impl.h" ++#include "src/compiler/backend/code-generator.h" ++#include "src/compiler/backend/gap-resolver.h" ++#include "src/compiler/node-matchers.h" ++#include "src/compiler/osr.h" ++#include "src/heap/memory-chunk.h" ++#include "src/wasm/wasm-code-manager.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++#define __ tasm()-> ++ ++// TODO(plind): consider renaming these macros. ++#define TRACE_MSG(msg) \ ++ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ ++ __LINE__) ++ ++#define TRACE_UNIMPL() \ ++ PrintF("UNIMPLEMENTED code_generator_sw64: %s at line %d\n", __FUNCTION__, \ ++ __LINE__) ++ ++// Adds Sw64-specific methods to convert InstructionOperands. ++class Sw64OperandConverter final : public InstructionOperandConverter { ++ public: ++ Sw64OperandConverter(CodeGenerator* gen, Instruction* instr) ++ : InstructionOperandConverter(gen, instr) {} ++ ++ FloatRegister OutputSingleRegister(size_t index = 0) { ++ return ToSingleRegister(instr_->OutputAt(index)); ++ } ++ ++ FloatRegister InputSingleRegister(size_t index) { ++ return ToSingleRegister(instr_->InputAt(index)); ++ } ++ ++ FloatRegister ToSingleRegister(InstructionOperand* op) { ++ // Single (Float) and Double register namespace is same on SW64, ++ // both are typedefs of FPURegister. ++ return ToDoubleRegister(op); ++ } ++ ++ Register InputOrZeroRegister(size_t index) { ++ if (instr_->InputAt(index)->IsImmediate()) { ++ DCHECK_EQ(0, InputInt32(index)); ++ return zero_reg; ++ } ++ return InputRegister(index); ++ } ++ ++ DoubleRegister InputOrZeroDoubleRegister(size_t index) { ++ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; ++ ++ return InputDoubleRegister(index); ++ } ++ ++ DoubleRegister InputOrZeroSingleRegister(size_t index) { ++ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; ++ ++ return InputSingleRegister(index); ++ } ++ ++ Operand InputImmediate(size_t index) { ++ Constant constant = ToConstant(instr_->InputAt(index)); ++ switch (constant.type()) { ++ case Constant::kInt32: ++ return Operand(constant.ToInt32()); ++ case Constant::kInt64: ++ return Operand(constant.ToInt64()); ++ case Constant::kFloat32: ++ return Operand::EmbeddedNumber(constant.ToFloat32()); ++ case Constant::kFloat64: ++ return Operand::EmbeddedNumber(constant.ToFloat64().value()); ++ case Constant::kExternalReference: ++ case Constant::kCompressedHeapObject: ++ case Constant::kHeapObject: ++ // TODO(plind): Maybe we should handle ExtRef & HeapObj here? ++ // maybe not done on arm due to const pool ?? ++ break; ++ case Constant::kDelayedStringConstant: ++ return Operand::EmbeddedStringConstant( ++ constant.ToDelayedStringConstant()); ++ case Constant::kRpoNumber: ++ UNREACHABLE(); // TODO(titzer): RPO immediates on sw64? ++ break; ++ } ++ UNREACHABLE(); ++ } ++ ++ Operand InputOperand(size_t index) { ++ InstructionOperand* op = instr_->InputAt(index); ++ if (op->IsRegister()) { ++ return Operand(ToRegister(op)); ++ } ++ return InputImmediate(index); ++ } ++ ++ MemOperand MemoryOperand(size_t* first_index) { ++ const size_t index = *first_index; ++ switch (AddressingModeField::decode(instr_->opcode())) { ++ case kMode_None: ++ break; ++ case kMode_MRI: ++ *first_index += 2; ++ return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); ++ case kMode_MRR: ++ // TODO(plind): r6 address mode, to be implemented ... ++ UNREACHABLE(); ++ } ++ UNREACHABLE(); ++ } ++ ++ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); } ++ ++ MemOperand ToMemOperand(InstructionOperand* op) const { ++ DCHECK_NOT_NULL(op); ++ DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); ++ return SlotToMemOperand(AllocatedOperand::cast(op)->index()); ++ } ++ ++ MemOperand SlotToMemOperand(int slot) const { ++ FrameOffset offset = frame_access_state()->GetFrameOffset(slot); ++ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); ++ } ++}; ++ ++static inline bool HasRegisterInput(Instruction* instr, size_t index) { ++ return instr->InputAt(index)->IsRegister(); ++} ++ ++namespace { ++ ++class OutOfLineRecordWrite final : public OutOfLineCode { ++ public: ++ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, ++ Register value, Register scratch0, Register scratch1, ++ RecordWriteMode mode, StubCallMode stub_mode) ++ : OutOfLineCode(gen), ++ object_(object), ++ index_(index), ++ value_(value), ++ scratch0_(scratch0), ++ scratch1_(scratch1), ++ mode_(mode), ++ stub_mode_(stub_mode), ++ must_save_lr_(!gen->frame_access_state()->has_frame()), ++ zone_(gen->zone()) {} ++ ++ void Generate() final { ++ if (mode_ > RecordWriteMode::kValueIsPointer) { ++ __ JumpIfSmi(value_, exit()); ++ } ++ __ CheckPageFlag(value_, scratch0_, ++ MemoryChunk::kPointersToHereAreInterestingMask, eq, ++ exit()); ++ __ Addl(scratch1_, object_, index_); ++ RememberedSetAction const remembered_set_action = ++ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET ++ : OMIT_REMEMBERED_SET; ++ SaveFPRegsMode const save_fp_mode = ++ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; ++ if (must_save_lr_) { ++ // We need to save and restore ra if the frame was elided. ++ __ Push(ra); ++ } ++ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { ++ __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode); ++ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) { ++ // A direct call to a wasm runtime stub defined in this module. ++ // Just encode the stub index. This will be patched when the code ++ // is added to the native module and copied into wasm code space. ++ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, ++ save_fp_mode, wasm::WasmCode::kRecordWrite); ++ } else { ++ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, ++ save_fp_mode); ++ } ++ if (must_save_lr_) { ++ __ Pop(ra); ++ } ++ } ++ ++ private: ++ Register const object_; ++ Register const index_; ++ Register const value_; ++ Register const scratch0_; ++ Register const scratch1_; ++ RecordWriteMode const mode_; ++ StubCallMode const stub_mode_; ++ bool must_save_lr_; ++ Zone* zone_; ++}; ++ ++#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \ ++ class ool_name final : public OutOfLineCode { \ ++ public: \ ++ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \ ++ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \ ++ \ ++ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \ ++ \ ++ private: \ ++ T const dst_; \ ++ T const src1_; \ ++ T const src2_; \ ++ } ++ ++CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister); ++CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister); ++CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister); ++CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister); ++ ++#undef CREATE_OOL_CLASS ++ ++Condition FlagsConditionToConditionCmp(FlagsCondition condition) { ++ switch (condition) { ++ case kEqual: ++ return eq; ++ case kNotEqual: ++ return ne; ++ case kSignedLessThan: ++ return lt; ++ case kSignedGreaterThanOrEqual: ++ return ge; ++ case kSignedLessThanOrEqual: ++ return le; ++ case kSignedGreaterThan: ++ return gt; ++ case kUnsignedLessThan: ++ return lo; ++ case kUnsignedGreaterThanOrEqual: ++ return hs; ++ case kUnsignedLessThanOrEqual: ++ return ls; ++ case kUnsignedGreaterThan: ++ return hi; ++ case kUnorderedEqual: ++ case kUnorderedNotEqual: ++ break; ++ default: ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++Condition FlagsConditionToConditionTst(FlagsCondition condition) { ++ switch (condition) { ++ case kNotEqual: ++ return ne; ++ case kEqual: ++ return eq; ++ default: ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++Condition FlagsConditionToConditionOvf(FlagsCondition condition) { ++ switch (condition) { ++ case kOverflow: ++ return ne; ++ case kNotOverflow: ++ return eq; ++ default: ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, ++ FlagsCondition condition) { ++ switch (condition) { ++ case kEqual: ++ *predicate = true; ++ return EQ; ++ case kNotEqual: ++ *predicate = false; ++ return EQ; ++ case kUnsignedLessThan: ++ *predicate = true; ++ return OLT; ++ case kUnsignedGreaterThanOrEqual: ++ *predicate = false; ++ return OLT; ++ case kUnsignedLessThanOrEqual: ++ *predicate = true; ++ return OLE; ++ case kUnsignedGreaterThan: ++ *predicate = false; ++ return OLE; ++ case kUnorderedEqual: ++ case kUnorderedNotEqual: ++ *predicate = true; ++ break; ++ default: ++ *predicate = true; ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, ++ InstructionCode opcode, Instruction* instr, ++ Sw64OperandConverter const& i) { ++ const MemoryAccessMode access_mode = ++ static_cast(MiscField::decode(opcode)); ++ if (access_mode == kMemoryAccessPoisoned) { ++ Register value = i.OutputRegister(); ++ codegen->tasm()->And(value, value, kSpeculationPoisonRegister); ++ } ++} ++ ++} // namespace ++ ++#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ ++ do { \ ++ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ ++ /*__ memb();*/ \ ++ } while (0) ++ ++#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ ++ do { \ ++ /*__ memb();*/ \ ++ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \ ++ /*__ memb();*/ \ ++ } while (0) ++ ++#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \ ++ do { \ ++ Label binop; \ ++ __ Addl(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ /*__ memb();*/ \ ++ __ bind(&binop); \ ++ __ load_linked(i.OutputRegister(0), 0, i.TempRegister(0)); \ ++ __ ldi(gp, 1, zero_reg); \ ++ __ wr_f(gp); \ ++ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ ++ Operand(i.InputRegister(2))); \ ++ __ Align(8); \ ++ __ store_conditional(i.TempRegister(1), 0, i.TempRegister(0)); \ ++ __ rd_f(i.TempRegister(1)); \ ++ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ ++ /*__ memb();*/ \ ++ } while (0) ++ ++#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \ ++ size, bin_instr, representation) \ ++ do { \ ++ Label binop; \ ++ __ addl(i.InputRegister(0), i.InputRegister(1),i.TempRegister(0)); \ ++ if (representation == 32) { \ ++ __ and_ins(i.TempRegister(0), 0x3,i.TempRegister(3)); \ ++ } else { \ ++ DCHECK_EQ(representation, 64); \ ++ __ and_ins(i.TempRegister(0), 0x7,i.TempRegister(3)); \ ++ } \ ++ __ Subl(i.TempRegister(0), i.TempRegister(0), \ ++ Operand(i.TempRegister(3))); \ ++ __ slll(i.TempRegister(3), 3, i.TempRegister(3)); \ ++ /*__ memb();*/ \ ++ __ bind(&binop); \ ++ __ load_linked(i.TempRegister(1), 0, i.TempRegister(0)); \ ++ __ ldi(gp, 1, zero_reg); \ ++ __ wr_f(gp); \ ++ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ ++ size, sign_extend); \ ++ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \ ++ Operand(i.InputRegister(2))); \ ++ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \ ++ size); \ ++ __ Align(8); \ ++ __ store_conditional(i.TempRegister(1), 0, i.TempRegister(0)); \ ++ __ rd_f(i.TempRegister(1)); \ ++ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ ++ /*__ memb();*/ \ ++ } while (0) ++ ++#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \ ++ do { \ ++ Label exchange; \ ++ /*__ memb();*/ \ ++ __ bind(&exchange); \ ++ __ addl(i.InputRegister(0), i.InputRegister(1),i.TempRegister(0)); \ ++ __ load_linked(i.OutputRegister(0), 0, i.TempRegister(0)); \ ++ __ ldi(gp, 1, zero_reg); \ ++ __ wr_f(gp); \ ++ __ mov(i.TempRegister(1), i.InputRegister(2)); \ ++ __ Align(8); \ ++ __ store_conditional(i.TempRegister(1), 0, i.TempRegister(0)); \ ++ __ rd_f(i.TempRegister(1)); \ ++ __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \ ++ /*__ memb();*/ \ ++ } while (0) ++ ++#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \ ++ load_linked, store_conditional, sign_extend, size, representation) \ ++ do { \ ++ Label exchange; \ ++ __ addl(i.InputRegister(0), i.InputRegister(1),i.TempRegister(0)); \ ++ if (representation == 32) { \ ++ __ and_ins(i.TempRegister(0), 0x3,i.TempRegister(1)); \ ++ } else { \ ++ DCHECK_EQ(representation, 64); \ ++ __ and_ins(i.TempRegister(0), 0x7,i.TempRegister(1)); \ ++ } \ ++ __ Subl(i.TempRegister(0), i.TempRegister(0), \ ++ Operand(i.TempRegister(1))); \ ++ __ Sllw(i.TempRegister(1), i.TempRegister(1), 3); \ ++ /*__ memb();*/ \ ++ __ bind(&exchange); \ ++ __ load_linked(i.TempRegister(2), 0, i.TempRegister(0)); \ ++ __ ldi(gp, 1, zero_reg); \ ++ __ wr_f(gp); \ ++ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ ++ size, sign_extend); \ ++ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \ ++ size); \ ++ __ Align(8); \ ++ __ store_conditional(i.TempRegister(2), 0, i.TempRegister(0)); \ ++ __ rd_f(i.TempRegister(2)); \ ++ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \ ++ /*__ memb();*/ \ ++ } while (0) ++ ++ ++#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \ ++ store_conditional) \ ++ do { \ ++ Label compareExchange; \ ++ Label exit; \ ++ __ addl(i.InputRegister(0), i.InputRegister(1),i.TempRegister(0)); \ ++ /*__ memb();*/ \ ++ __ bind(&compareExchange); \ ++ __ load_linked(i.OutputRegister(0), 0, i.TempRegister(0)); \ ++ __ cmpeq(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(1)); \ ++ __ wr_f(i.TempRegister(1)); \ ++ __ mov(i.TempRegister(2), i.InputRegister(3)); \ ++ __ Align(8); \ ++ __ store_conditional(i.TempRegister(2), 0, i.TempRegister(0)); \ ++ __ rd_f(i.TempRegister(2)); \ ++ __ beq(i.TempRegister(1), &exit); \ ++ __ beq(i.TempRegister(2), &compareExchange); \ ++ __ bind(&exit); \ ++ /*__ memb();*/ \ ++ } while (0) ++ ++ ++#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \ ++ load_linked, store_conditional, sign_extend, size, representation) \ ++ do { \ ++ Label compareExchange; \ ++ Label exit; \ ++ __ addl(i.InputRegister(0), i.InputRegister(1),i.TempRegister(0)); \ ++ if (representation == 32) { \ ++ __ and_ins(i.TempRegister(0), 0x3,i.TempRegister(1)); \ ++ } else { \ ++ DCHECK_EQ(representation, 64); \ ++ __ and_ins(i.TempRegister(0), 0x7,i.TempRegister(1)); \ ++ } \ ++ __ Subl(i.TempRegister(0), i.TempRegister(0), \ ++ Operand(i.TempRegister(1))); \ ++ __ slll(i.TempRegister(1), 3, i.TempRegister(1)); \ ++ /*__ memb();*/ \ ++ __ bind(&compareExchange); \ ++ __ load_linked(i.TempRegister(2), 0, i.TempRegister(0)); \ ++ __ mov(i.OutputRegister(0),i.TempRegister(2)); \ ++ __ cmpeq(i.OutputRegister(0), i.InputRegister(2), gp); \ ++ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ ++ size, sign_extend); \ ++ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \ ++ size, sign_extend); \ ++ __ wr_f(gp); \ ++ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \ ++ size); \ ++ __ Align(8); \ ++ __ store_conditional(i.TempRegister(2), 0, i.TempRegister(0)); \ ++ __ rd_f(i.TempRegister(2)); \ ++ __ beq(gp, &exit); \ ++ __ beq(i.TempRegister(2), &compareExchange); \ ++ __ bind(&exit); \ ++ /*__ memb();*/ \ ++ } while (0) ++ ++ ++#define ASSEMBLE_IEEE754_BINOP(name) \ ++ do { \ ++ FrameScope scope(tasm(), StackFrame::MANUAL); \ ++ __ PrepareCallCFunction(0, 2, kScratchReg); \ ++ __ MovToFloatParameters(i.InputDoubleRegister(0), \ ++ i.InputDoubleRegister(1)); \ ++ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ ++ /* Move the result in the double result register. */ \ ++ __ MovFromFloatResult(i.OutputDoubleRegister()); \ ++ } while (0) ++ ++#define ASSEMBLE_IEEE754_UNOP(name) \ ++ do { \ ++ FrameScope scope(tasm(), StackFrame::MANUAL); \ ++ __ PrepareCallCFunction(0, 1, kScratchReg); \ ++ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ ++ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ ++ /* Move the result in the double result register. */ \ ++ __ MovFromFloatResult(i.OutputDoubleRegister()); \ ++ } while (0) ++ ++#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \ ++ do { \ ++ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \ ++ i.InputSimd128Register(1)); \ ++ } while (0) ++ ++void CodeGenerator::AssembleDeconstructFrame() { ++ __ mov(sp, fp); ++ __ Pop(ra, fp); ++} ++ ++void CodeGenerator::AssemblePrepareTailCall() { ++ if (frame_access_state()->has_frame()) { ++ __ Ldl(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); ++ __ Ldl(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); ++ } ++ frame_access_state()->SetFrameAccessToSP(); ++} ++ ++void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, ++ Register scratch1, ++ Register scratch2, ++ Register scratch3) { ++ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3)); ++ Label done; ++ ++ // Check if current frame is an arguments adaptor frame. ++ __ Ldl(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset)); ++ __ Branch(&done, ne, scratch3, ++ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); ++ ++ // Load arguments count from current arguments adaptor frame (note, it ++ // does not include receiver). ++ Register caller_args_count_reg = scratch1; ++ __ Ldl(caller_args_count_reg, ++ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); ++ __ SmiUntag(caller_args_count_reg); ++ ++ __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3); ++ __ bind(&done); ++} ++ ++namespace { ++ ++void AdjustStackPointerForTailCall(TurboAssembler* tasm, ++ FrameAccessState* state, ++ int new_slot_above_sp, ++ bool allow_shrinkage = true) { ++ int current_sp_offset = state->GetSPToFPSlotCount() + ++ StandardFrameConstants::kFixedSlotCountAboveFp; ++ int stack_slot_delta = new_slot_above_sp - current_sp_offset; ++ if (stack_slot_delta > 0) { ++ tasm->Subl(sp, sp, stack_slot_delta * kSystemPointerSize); ++ state->IncreaseSPDelta(stack_slot_delta); ++ } else if (allow_shrinkage && stack_slot_delta < 0) { ++ tasm->Addl(sp, sp, -stack_slot_delta * kSystemPointerSize); ++ state->IncreaseSPDelta(stack_slot_delta); ++ } ++} ++ ++} // namespace ++ ++void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, ++ int first_unused_stack_slot) { ++ AdjustStackPointerForTailCall(tasm(), frame_access_state(), ++ first_unused_stack_slot, false); ++} ++ ++void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, ++ int first_unused_stack_slot) { ++ AdjustStackPointerForTailCall(tasm(), frame_access_state(), ++ first_unused_stack_slot); ++} ++ ++// Check that {kJavaScriptCallCodeStartRegister} is correct. ++void CodeGenerator::AssembleCodeStartRegisterCheck() { ++ __ ComputeCodeStartAddress(kScratchReg); ++ __ Assert(eq, AbortReason::kWrongFunctionCodeStart, ++ kJavaScriptCallCodeStartRegister, Operand(kScratchReg)); ++} ++ ++// Check if the code object is marked for deoptimization. If it is, then it ++// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need ++// to: ++// 1. read from memory the word that contains that bit, which can be found in ++// the flags in the referenced {CodeDataContainer} object; ++// 2. test kMarkedForDeoptimizationBit in those flags; and ++// 3. if it is not zero then it jumps to the builtin. ++void CodeGenerator::BailoutIfDeoptimized() { ++ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; ++ __ Ldl(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); ++ __ Ldw(kScratchReg, ++ FieldMemOperand(kScratchReg, ++ CodeDataContainer::kKindSpecificFlagsOffset)); ++ __ And(kScratchReg, kScratchReg, ++ Operand(1 << Code::kMarkedForDeoptimizationBit)); ++ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), ++ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); ++} ++ ++void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { ++ // Calculate a mask which has all bits set in the normal case, but has all ++ // bits cleared if we are speculatively executing the wrong PC. ++ // difference = (current - expected) | (expected - current) ++ // poison = ~(difference >> (kBitsPerPointer - 1)) ++ __ ComputeCodeStartAddress(kScratchReg); ++ __ Move(kSpeculationPoisonRegister, kScratchReg); ++ __ Subw(kSpeculationPoisonRegister, kSpeculationPoisonRegister, ++ Operand(kJavaScriptCallCodeStartRegister)); ++ __ Subw(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister, ++ Operand(kScratchReg)); ++ __ or_ins(kSpeculationPoisonRegister, kJavaScriptCallCodeStartRegister, kSpeculationPoisonRegister); ++ __ Sraw(kSpeculationPoisonRegister, kSpeculationPoisonRegister, ++ kBitsPerSystemPointer - 1); ++ __ ornot(zero_reg, kSpeculationPoisonRegister, ++ kSpeculationPoisonRegister); ++} ++ ++void CodeGenerator::AssembleRegisterArgumentPoisoning() { ++ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); ++ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister); ++ __ And(sp, sp, kSpeculationPoisonRegister); ++} ++ ++// Assembles an instruction after register allocation, producing machine code. ++CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ++ Instruction* instr) { ++ Sw64OperandConverter i(this, instr); ++ InstructionCode opcode = instr->opcode(); ++ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); ++ switch (arch_opcode) { ++ case kArchCallCodeObject: { ++ if (instr->InputAt(0)->IsImmediate()) { ++ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); ++ } else { ++ Register reg = i.InputRegister(0); ++ DCHECK_IMPLIES( ++ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), ++ reg == kJavaScriptCallCodeStartRegister); ++ __ addl(reg, Code::kHeaderSize - kHeapObjectTag, reg); ++ __ Call(reg); ++ } ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchCallBuiltinPointer: { ++ DCHECK(!instr->InputAt(0)->IsImmediate()); ++ Register builtin_index = i.InputRegister(0); ++ __ CallBuiltinByIndex(builtin_index); ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchCallWasmFunction: { ++ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { ++ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, ++ i.TempRegister(0), i.TempRegister(1), ++ i.TempRegister(2)); ++ } ++ if (instr->InputAt(0)->IsImmediate()) { ++ Constant constant = i.ToConstant(instr->InputAt(0)); ++ Address wasm_code = static_cast

(constant.ToInt64()); ++ __ Call(wasm_code, constant.rmode()); ++ } else { ++ __ addl(i.InputRegister(0), 0, kScratchReg); ++ __ Call(kScratchReg); ++ } ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchTailCallCodeObjectFromJSFunction: ++ case kArchTailCallCodeObject: { ++ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { ++ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, ++ i.TempRegister(0), i.TempRegister(1), ++ i.TempRegister(2)); ++ } ++ if (instr->InputAt(0)->IsImmediate()) { ++ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); ++ } else { ++ Register reg = i.InputRegister(0); ++ DCHECK_IMPLIES( ++ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), ++ reg == kJavaScriptCallCodeStartRegister); ++ __ addl(reg, Code::kHeaderSize - kHeapObjectTag, reg); ++ __ Jump(reg); ++ } ++ frame_access_state()->ClearSPDelta(); ++ frame_access_state()->SetFrameAccessToDefault(); ++ break; ++ } ++ case kArchTailCallWasm: { ++ if (instr->InputAt(0)->IsImmediate()) { ++ Constant constant = i.ToConstant(instr->InputAt(0)); ++ Address wasm_code = static_cast
(constant.ToInt64()); ++ __ Jump(wasm_code, constant.rmode()); ++ } else { ++ __ addl(i.InputRegister(0), 0, kScratchReg); ++ __ Jump(kScratchReg); ++ } ++ frame_access_state()->ClearSPDelta(); ++ frame_access_state()->SetFrameAccessToDefault(); ++ break; ++ } ++ case kArchTailCallAddress: { ++ CHECK(!instr->InputAt(0)->IsImmediate()); ++ Register reg = i.InputRegister(0); ++ DCHECK_IMPLIES( ++ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), ++ reg == kJavaScriptCallCodeStartRegister); ++ __ Jump(reg); ++ frame_access_state()->ClearSPDelta(); ++ frame_access_state()->SetFrameAccessToDefault(); ++ break; ++ } ++ case kArchCallJSFunction: { ++ Register func = i.InputRegister(0); ++ if (FLAG_debug_code) { ++ // Check the function's context matches the context argument. ++ __ Ldl(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); ++ __ Assert(eq, AbortReason::kWrongFunctionContext, cp, ++ Operand(kScratchReg)); ++ } ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ldl(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); ++ __ Addl(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Call(a2); ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchPrepareCallCFunction: { ++ int const num_parameters = MiscField::decode(instr->opcode()); ++ __ PrepareCallCFunction(num_parameters, kScratchReg); ++ // Frame alignment requires using FP-relative frame addressing. ++ frame_access_state()->SetFrameAccessToFP(); ++ break; ++ } ++ case kArchSaveCallerRegisters: { ++ fp_mode_ = ++ static_cast(MiscField::decode(instr->opcode())); ++ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); ++ // kReturnRegister0 should have been saved before entering the stub. ++ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0); ++ DCHECK(IsAligned(bytes, kSystemPointerSize)); ++ DCHECK_EQ(0, frame_access_state()->sp_delta()); ++ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); ++ DCHECK(!caller_registers_saved_); ++ caller_registers_saved_ = true; ++ break; ++ } ++ case kArchRestoreCallerRegisters: { ++ DCHECK(fp_mode_ == ++ static_cast(MiscField::decode(instr->opcode()))); ++ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); ++ // Don't overwrite the returned value. ++ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0); ++ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize)); ++ DCHECK_EQ(0, frame_access_state()->sp_delta()); ++ DCHECK(caller_registers_saved_); ++ caller_registers_saved_ = false; ++ break; ++ } ++ case kArchPrepareTailCall: ++ AssemblePrepareTailCall(); ++ break; ++ case kArchCallCFunction: { ++ int const num_parameters = MiscField::decode(instr->opcode()); ++ Label start_call; ++ bool isWasmCapiFunction = ++ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); ++ // from start_call to return address. ++ int offset = __ root_array_available() ? 76 : 88; ++#if V8_HOST_ARCH_SW64 ++ if (__ emit_debug_code()) { ++ offset += 16; ++ } ++#endif ++ if (isWasmCapiFunction) { ++ // Put the return address in a stack slot. ++ __ mov(kScratchReg, ra); ++ __ bind(&start_call); ++ __ br(ra, 0); // __ nal(); // __ nop(); ++ __ Addl(ra, ra, offset - 4); // 4 = br(ra, 0); ++ __ stl(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); ++ __ mov(ra, kScratchReg); ++ } ++ if (instr->InputAt(0)->IsImmediate()) { ++ ExternalReference ref = i.InputExternalReference(0); ++ __ CallCFunction(ref, num_parameters); ++ } else { ++ Register func = i.InputRegister(0); ++ __ CallCFunction(func, num_parameters); ++ } ++ if (isWasmCapiFunction) { ++ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); ++ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); ++ } ++ ++ frame_access_state()->SetFrameAccessToDefault(); ++ // Ideally, we should decrement SP delta to match the change of stack ++ // pointer in CallCFunction. However, for certain architectures (e.g. ++ // ARM), there may be more strict alignment requirement, causing old SP ++ // to be saved on the stack. In those cases, we can not calculate the SP ++ // delta statically. ++ frame_access_state()->ClearSPDelta(); ++ if (caller_registers_saved_) { ++ // Need to re-memb SP delta introduced in kArchSaveCallerRegisters. ++ // Here, we assume the sequence to be: ++ // kArchSaveCallerRegisters; ++ // kArchCallCFunction; ++ // kArchRestoreCallerRegisters; ++ int bytes = ++ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0); ++ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); ++ } ++ break; ++ } ++ case kArchJmp: ++ AssembleArchJump(i.InputRpo(0)); ++ break; ++ case kArchBinarySearchSwitch: ++ AssembleArchBinarySearchSwitch(instr); ++ break; ++ case kArchTableSwitch: ++ AssembleArchTableSwitch(instr); ++ break; ++ case kArchAbortCSAAssert: ++ DCHECK(i.InputRegister(0) == a0); ++ { ++ // We don't actually want to generate a pile of code for this, so just ++ // claim there is a stack frame, without generating one. ++ FrameScope scope(tasm(), StackFrame::NONE); ++ __ Call( ++ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), ++ RelocInfo::CODE_TARGET); ++ } ++ __ halt();//stop("kArchDebugAbort"); ++ break; ++ case kArchDebugBreak: ++ __ halt();//stop("kArchDebugBreak"); ++ break; ++ case kArchComment: ++ __ RecordComment(reinterpret_cast(i.InputInt64(0))); ++ break; ++ case kArchNop: ++ case kArchThrowTerminator: ++ // don't emit code for nops. ++ break; ++ case kArchDeoptimize: { ++ DeoptimizationExit* exit = ++ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); ++ CodeGenResult result = AssembleDeoptimizerCall(exit); ++ if (result != kSuccess) return result; ++ break; ++ } ++ case kArchRet: ++ AssembleReturn(instr->InputAt(0)); ++ break; ++ case kArchStackPointerGreaterThan: ++ // Pseudo-instruction used for cmp/branch. No opcode emitted here. ++ break; ++ case kArchStackCheckOffset: ++ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); ++ break; ++ case kArchFramePointer: ++ __ mov(i.OutputRegister(), fp); ++ break; ++ case kArchParentFramePointer: ++ if (frame_access_state()->has_frame()) { ++ __ Ldl(i.OutputRegister(), MemOperand(fp, 0)); ++ } else { ++ __ mov(i.OutputRegister(), fp); ++ } ++ break; ++ case kArchTruncateDoubleToI: ++ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), ++ i.InputDoubleRegister(0), DetermineStubCallMode()); ++ break; ++ case kArchStoreWithWriteBarrier: { ++ RecordWriteMode mode = ++ static_cast(MiscField::decode(instr->opcode())); ++ Register object = i.InputRegister(0); ++ Register index = i.InputRegister(1); ++ Register value = i.InputRegister(2); ++ Register scratch0 = i.TempRegister(0); ++ Register scratch1 = i.TempRegister(1); ++ auto ool = zone()->New(this, object, index, value, ++ scratch0, scratch1, mode, ++ DetermineStubCallMode()); ++ __ Addl(kScratchReg, object, index); ++ __ Stl(value, MemOperand(kScratchReg)); ++ __ CheckPageFlag(object, scratch0, ++ MemoryChunk::kPointersFromHereAreInterestingMask, ne, ++ ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kArchStackSlot: { ++ FrameOffset offset = ++ frame_access_state()->GetFrameOffset(i.InputInt32(0)); ++ Register base_reg = offset.from_stack_pointer() ? sp : fp; ++ __ Addl(i.OutputRegister(), base_reg, Operand(offset.offset())); ++ int alignment = i.InputInt32(1); ++ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || ++ alignment == 16); ++ if (FLAG_debug_code && alignment > 0) { ++ // Verify that the output_register is properly aligned ++ __ And(kScratchReg, i.OutputRegister(), ++ Operand(kSystemPointerSize - 1)); ++ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg, ++ Operand(zero_reg)); ++ } ++ if (alignment == 2 * kSystemPointerSize) { ++ Label done; ++ __ Addl(kScratchReg, base_reg, Operand(offset.offset())); ++ __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); ++ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); ++ __ Addl(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize); ++ __ bind(&done); ++ } else if (alignment > 2 * kSystemPointerSize) { ++ Label done; ++ __ Addl(kScratchReg, base_reg, Operand(offset.offset())); ++ __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); ++ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); ++ __ li(kScratchReg2, alignment); ++ __ Subl(kScratchReg2, kScratchReg2, Operand(kScratchReg)); ++ __ Addl(i.OutputRegister(), i.OutputRegister(), kScratchReg2); ++ __ bind(&done); ++ } ++ ++ break; ++ } ++ case kArchWordPoisonOnSpeculation: ++ __ And(i.OutputRegister(), i.InputRegister(0), ++ kSpeculationPoisonRegister); ++ break; ++ case kIeee754Float64Acos: ++ ASSEMBLE_IEEE754_UNOP(acos); ++ break; ++ case kIeee754Float64Acosh: ++ ASSEMBLE_IEEE754_UNOP(acosh); ++ break; ++ case kIeee754Float64Asin: ++ ASSEMBLE_IEEE754_UNOP(asin); ++ break; ++ case kIeee754Float64Asinh: ++ ASSEMBLE_IEEE754_UNOP(asinh); ++ break; ++ case kIeee754Float64Atan: ++ ASSEMBLE_IEEE754_UNOP(atan); ++ break; ++ case kIeee754Float64Atanh: ++ ASSEMBLE_IEEE754_UNOP(atanh); ++ break; ++ case kIeee754Float64Atan2: ++ ASSEMBLE_IEEE754_BINOP(atan2); ++ break; ++ case kIeee754Float64Cos: ++ ASSEMBLE_IEEE754_UNOP(cos); ++ break; ++ case kIeee754Float64Cosh: ++ ASSEMBLE_IEEE754_UNOP(cosh); ++ break; ++ case kIeee754Float64Cbrt: ++ ASSEMBLE_IEEE754_UNOP(cbrt); ++ break; ++ case kIeee754Float64Exp: ++ ASSEMBLE_IEEE754_UNOP(exp); ++ break; ++ case kIeee754Float64Expm1: ++ ASSEMBLE_IEEE754_UNOP(expm1); ++ break; ++ case kIeee754Float64Log: ++ ASSEMBLE_IEEE754_UNOP(log); ++ break; ++ case kIeee754Float64Log1p: ++ ASSEMBLE_IEEE754_UNOP(log1p); ++ break; ++ case kIeee754Float64Log2: ++ ASSEMBLE_IEEE754_UNOP(log2); ++ break; ++ case kIeee754Float64Log10: ++ ASSEMBLE_IEEE754_UNOP(log10); ++ break; ++ case kIeee754Float64Pow: ++ ASSEMBLE_IEEE754_BINOP(pow); ++ break; ++ case kIeee754Float64Sin: ++ ASSEMBLE_IEEE754_UNOP(sin); ++ break; ++ case kIeee754Float64Sinh: ++ ASSEMBLE_IEEE754_UNOP(sinh); ++ break; ++ case kIeee754Float64Tan: ++ ASSEMBLE_IEEE754_UNOP(tan); ++ break; ++ case kIeee754Float64Tanh: ++ ASSEMBLE_IEEE754_UNOP(tanh); ++ break; ++ case kSw64Add: ++ __ Addw(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64Dadd: ++ __ Addl(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64DaddOvf: ++ __ DaddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), ++ kScratchReg); ++ break; ++ case kSw64Sub: ++ __ Subw(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64Dsub: ++ __ Subl(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64DsubOvf: ++ __ DsubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), ++ kScratchReg); ++ break; ++ case kSw64Mul: ++ __ Mulw(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64MulOvf: ++ __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), ++ kScratchReg); ++ break; ++ case kSw64MulHigh: ++ __ Mulwh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64MulHighU: ++ __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64DMulHigh: ++ __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64Div: { ++ __ Divw(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ } ++ case kSw64DivU: { ++ __ Divwu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ } ++ case kSw64Mod: { ++ __ Modw(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ } ++ case kSw64ModU: { ++ __ Modwu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ } ++ case kSw64Dmul: ++ __ Mull(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64Ddiv: { ++ Label ldiv, exit; ++ __ slll(i.InputRegister(0), 0xb, at); ++ __ sral(at, 0xb, at); ++ __ cmpeq(i.InputRegister(0), at, at); ++ __ beq(at, &ldiv); ++ __ Divl(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ beq(zero_reg, &exit); ++ ++ __ bind(&ldiv); ++ RegList saved_regs = (kJSCallerSaved | ra.bit()) & (~(i.OutputRegister().bit())) ; ++ __ MultiPush(saved_regs); ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(2, 0, kScratchReg); ++ __ MovToGeneralParameters(i.InputRegister(0), i.InputRegister(1)); ++ __ CallCFunction(ExternalReference::math_sw_ddiv_function(), 2, 0); ++ __ MovFromGeneralResult(i.OutputRegister()); ++ __ MultiPop(saved_regs); ++ ++ __ bind(&exit); ++ break; ++ } ++ case kSw64DdivU: { ++ Label ldivu, exit; ++ __ blt(i.InputRegister(0), &ldivu); ++ __ blt(i.InputRegister(1), &ldivu); ++ __ srll(i.InputRegister(0), 0x35, at); ++ __ bne(at, &ldivu); ++ __ Divlu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ beq(zero_reg, &exit); ++ ++ __ bind(&ldivu); ++ RegList saved_regs = (kJSCallerSaved | ra.bit()) & (~(i.OutputRegister().bit())) ; ++ __ MultiPush(saved_regs); ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(2, 0, kScratchReg); ++ __ MovToGeneralParameters(i.InputRegister(0), i.InputRegister(1)); ++ __ CallCFunction(ExternalReference::math_sw_ddivu_function(), 2, 0); ++ __ MovFromGeneralResult(i.OutputRegister()); ++ __ MultiPop(saved_regs); ++ ++ __ bind(&exit); ++ break; ++ } ++ case kSw64Dmod: { ++ Label modl, exit; ++ __ slll(i.InputRegister(0), 0xb, at); ++ __ sral(at, 0xb, at); ++ __ cmpeq(i.InputRegister(0), at, at); ++ __ beq(at, &modl); ++ __ Modl(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ beq(zero_reg, &exit); ++ ++ __ bind(&modl); ++ RegList saved_regs = (kJSCallerSaved | ra.bit()) & (~(i.OutputRegister().bit())) ; ++ __ MultiPush(saved_regs); ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(2, 0, kScratchReg); ++ __ MovToGeneralParameters(i.InputRegister(0), i.InputRegister(1)); ++ __ CallCFunction(ExternalReference::math_sw_dmod_function(), 2, 0); ++ __ MovFromGeneralResult(i.OutputRegister()); ++ __ MultiPop(saved_regs); ++ ++ __ bind(&exit); ++ break; ++ } ++ case kSw64DmodU: { ++ Label modlu, exit; ++ __ blt(i.InputRegister(0), &modlu); ++ __ blt(i.InputRegister(1), &modlu); ++ __ srll(i.InputRegister(0), 0x35, at); ++ __ bne(at, &modlu); ++ __ Modlu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ beq(zero_reg, &exit); ++ ++ __ bind(&modlu); ++ RegList saved_regs = (kJSCallerSaved | ra.bit()) & (~(i.OutputRegister().bit())) ; ++ __ MultiPush(saved_regs); ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(2, 0, kScratchReg); ++ __ MovToGeneralParameters(i.InputRegister(0), i.InputRegister(1)); ++ __ CallCFunction(ExternalReference::math_sw_dmodu_function(), 2, 0); ++ __ MovFromGeneralResult(i.OutputRegister()); ++ __ MultiPop(saved_regs); ++ __ beq(zero_reg, &exit); ++ ++ __ bind(&exit); ++ break; ++ } ++ case kSw64Dlsa: ++ DCHECK(instr->InputAt(2)->IsImmediate()); ++ __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), ++ i.InputInt8(2)); ++ break; ++ case kSw64Lsa: ++ DCHECK(instr->InputAt(2)->IsImmediate()); ++ __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), ++ i.InputInt8(2)); ++ break; ++ case kSw64And: ++ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64And32: ++ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ addw(i.OutputRegister(), 0x0, i.OutputRegister()); ++ break; ++ case kSw64Or: ++ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64Or32: ++ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ addw(i.OutputRegister(), 0x0, i.OutputRegister()); ++ break; ++ case kSw64Nor: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ } else { ++ DCHECK_EQ(0, i.InputOperand(1).immediate()); ++ __ ornot(zero_reg, i.InputRegister(0), i.OutputRegister()); ++ } ++ break; ++ case kSw64Nor32: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ addw(i.OutputRegister(), 0x0, i.OutputRegister()); ++ } else { ++ DCHECK_EQ(0, i.InputOperand(1).immediate()); ++ __ ornot(zero_reg, i.InputRegister(0), i.OutputRegister()); ++ __ addw(i.OutputRegister(), 0x0, i.OutputRegister()); ++ } ++ break; ++ case kSw64Xor: ++ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64Xor32: ++ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ addw(i.OutputRegister(), 0x0, i.OutputRegister()); ++ break; ++ case kSw64Clz: ++ __ Clz(i.OutputRegister(), i.InputRegister(0)); ++ break; ++ case kSw64Dclz: ++ __ ctlz(i.InputRegister(0),i.OutputRegister()); ++ break; ++ case kSw64Ctz: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Ctz(dst, src); ++ } break; ++ case kSw64Dctz: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Dctz(dst, src); ++ } break; ++ case kSw64Popcnt: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Popcnt(dst, src); ++ } break; ++ case kSw64Dpopcnt: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Dpopcnt(dst, src); ++ } break; ++ case kSw64Shl: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ Sllw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ if (imm == 0) { ++ __ addw(i.InputRegister(0), 0, i.OutputRegister()); ++ } else { ++ __ Sllw(i.OutputRegister(), i.InputRegister(0), ++ static_cast(imm)); ++ } ++ } ++ break; ++ case kSw64Shr: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ Srlw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ Srlw(i.OutputRegister(), i.InputRegister(0), ++ static_cast(imm)); ++ } ++ break; ++ case kSw64Sar: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ Sraw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ Sraw(i.OutputRegister(), i.InputRegister(0), ++ static_cast(imm)); ++ } ++ break; ++ case kSw64Ext: ++ __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), ++ i.InputInt8(2)); ++ break; ++ case kSw64Ins: ++ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { ++// __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2)); ++ if(i.InputInt8(2) == 8){ ++ __ zap(i.OutputRegister(), 0x1, i.OutputRegister()); ++ __ addw(i.OutputRegister(), 0, i.OutputRegister()); ++ }else if(i.InputInt8(2) == 16){ ++ __ zap(i.OutputRegister(), 0x3, i.OutputRegister()); ++ __ addw(i.OutputRegister(), 0, i.OutputRegister()); ++ }else if(i.InputInt8(2) == 24){ ++ __ zap(i.OutputRegister(), 0x7, i.OutputRegister()); ++ __ addw(i.OutputRegister(), 0, i.OutputRegister()); ++ }else if(i.InputInt8(2) == 32){ ++ __ bis(i.OutputRegister(), zero_reg, i.OutputRegister()); ++ }else { ++ long bitsize = (0x1L << i.InputInt8(2)) - 1; ++ if (is_uint8(bitsize)) { ++ __ bic(i.OutputRegister(), bitsize, i.OutputRegister()); ++ } else { ++ __ li(t11, bitsize); ++ __ bic(i.OutputRegister(), t11, i.OutputRegister()); ++ } ++ __ addw(i.OutputRegister(), 0, i.OutputRegister()); ++ } ++ } else { ++ __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), ++ i.InputInt8(2)); ++ } ++ break; ++ case kSw64Dext: { ++ __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), ++ i.InputInt8(2)); ++ break; ++ } ++ case kSw64Dins: ++ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { ++// __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2)); ++ if(i.InputInt8(2) == 8){ ++ __ zap(i.OutputRegister(), 0x1, i.OutputRegister()); ++ }else if(i.InputInt8(2) == 16){ ++ __ zap(i.OutputRegister(), 0x3, i.OutputRegister()); ++ }else if(i.InputInt8(2) == 24){ ++ __ zap(i.OutputRegister(), 0x7, i.OutputRegister()); ++ }else if(i.InputInt8(2) == 32){ ++ __ zap(i.OutputRegister(), 0xf, i.OutputRegister()); ++ }else { ++ long bitsize = (0x1L << i.InputInt8(2)) - 1; ++ if (is_uint8(bitsize)) { ++ __ bic(i.OutputRegister(), bitsize, i.OutputRegister()); ++ } else { ++ __ li(t11, bitsize); ++ __ bic(i.OutputRegister(), t11, i.OutputRegister()); ++ } ++ } ++ } else { ++ __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), ++ i.InputInt8(2)); ++ } ++ break; ++ case kSw64Dshl: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ slll(i.InputRegister(0), i.InputRegister(1),i.OutputRegister()); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ slll(i.InputRegister(0), ++ static_cast(imm), i.OutputRegister()); ++ } ++ break; ++ case kSw64Dshr: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ srll(i.InputRegister(0), i.InputRegister(1), i.OutputRegister()); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ srll(i.InputRegister(0), ++ static_cast(imm), i.OutputRegister()); ++ } ++ break; ++ case kSw64Dsar: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ sral(i.InputRegister(0), i.InputRegister(1), i.OutputRegister()); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ sral(i.InputRegister(0), imm , i.OutputRegister()); ++ } ++ break; ++ case kSw64Ror: ++ __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64Dror: ++ __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kSw64Tst: ++ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1)); ++ // Pseudo-instruction used for cmp/branch. No opcode emitted here. ++ break; ++ case kSw64Cmp: ++ // Pseudo-instruction used for cmp/branch. No opcode emitted here. ++ break; ++ case kSw64Mov: ++ // TODO(plind): Should we combine mov/li like this, or use separate instr? ++ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType ++ if (HasRegisterInput(instr, 0)) { ++ __ mov(i.OutputRegister(), i.InputRegister(0)); ++ } else { ++ __ li(i.OutputRegister(), i.InputOperand(0)); ++ } ++ break; ++ ++ case kSw64CmpS: { ++ FPURegister left = i.InputOrZeroSingleRegister(0); ++ FPURegister right = i.InputOrZeroSingleRegister(1); ++ bool predicate; ++ FPUCondition cc = ++ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); ++ __ CompareF32(cc, left, right); ++ } break; ++ case kSw64AddS: ++ // TODO(plind): add special case: combine mult & add. ++ __ fadds(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1), i.OutputDoubleRegister()); ++ break; ++ case kSw64SubS: ++ __ fsubs(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1), i.OutputDoubleRegister()); ++ break; ++ case kSw64MulS: ++ // TODO(plind): add special case: right op is -1.0, see arm port. ++ __ fmuls(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1), i.OutputDoubleRegister()); ++ break; ++ case kSw64DivS: ++ __ fdivs(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1), i.OutputDoubleRegister()); ++ break; ++ case kSw64ModS: { ++ // TODO(bmeurer): We should really get rid of this special instruction, ++ // and generate a CallAddress instruction instead. ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(0, 2, kScratchReg); ++ __ MovToFloatParameters(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate()) ++ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); ++ // Move the result in the double result register. ++ __ MovFromFloatResult(i.OutputSingleRegister()); ++ break; ++ } ++ case kSw64AbsS: ++ __ Abs_sw(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ case kSw64NegS: ++ __ Fnegs(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ case kSw64SqrtS: { ++ __ fsqrts(i.InputDoubleRegister(0),i.OutputDoubleRegister()); ++ break; ++ } ++ case kSw64MaxS:{ ++ UNREACHABLE(); ++ } ++ case kSw64MinS:{ ++ UNREACHABLE(); ++ } ++ case kSw64CmpD: { ++ FPURegister left = i.InputOrZeroDoubleRegister(0); ++ FPURegister right = i.InputOrZeroDoubleRegister(1); ++ bool predicate; ++ FPUCondition cc = ++ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); ++ __ CompareF64(cc, left, right); ++ } break; ++ case kSw64AddD: ++ // TODO(plind): add special case: combine mult & add. ++ __ faddd(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1), i.OutputDoubleRegister()); ++ break; ++ case kSw64SubD: ++ __ fsubd(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1), i.OutputDoubleRegister()); ++ break; ++ case kSw64MulD: ++ // TODO(plind): add special case: right op is -1.0, see arm port. ++ __ fmuld(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1), i.OutputDoubleRegister()); ++ break; ++ case kSw64DivD: ++ __ fdivd(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1), i.OutputDoubleRegister()); ++ break; ++ case kSw64ModD: { ++ // TODO(bmeurer): We should really get rid of this special instruction, ++ // and generate a CallAddress instruction instead. ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(0, 2, kScratchReg); ++ __ MovToFloatParameters(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); ++ // Move the result in the double result register. ++ __ MovFromFloatResult(i.OutputDoubleRegister()); ++ break; ++ } ++ case kSw64AbsD: ++ __ Abs_sw(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kSw64NegD: ++ __ Fnegd(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kSw64SqrtD: { ++ __ fsqrtd(i.InputDoubleRegister(0),i.OutputDoubleRegister()); ++ break; ++ } ++ case kSw64MaxD:{ ++ UNREACHABLE(); ++ } ++ case kSw64MinD: { ++ UNREACHABLE(); ++ } ++ case kSw64Float64RoundDown: { ++ __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kSw64Float32RoundDown: { ++ __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kSw64Float64RoundTruncate: { ++ __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kSw64Float32RoundTruncate: { ++ __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kSw64Float64RoundUp: { ++ __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kSw64Float32RoundUp: { ++ __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kSw64Float64RoundTiesEven: { ++ __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kSw64Float32RoundTiesEven: { ++ __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kSw64Float32Max: { ++ FPURegister dst = i.OutputSingleRegister(); ++ FPURegister src1 = i.InputSingleRegister(0); ++ FPURegister src2 = i.InputSingleRegister(1); ++ auto ool = zone()->New(this, dst, src1, src2); ++ __ Float32Max(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kSw64Float64Max: { ++ FPURegister dst = i.OutputDoubleRegister(); ++ FPURegister src1 = i.InputDoubleRegister(0); ++ FPURegister src2 = i.InputDoubleRegister(1); ++ auto ool = zone()->New(this, dst, src1, src2); ++ __ Float64Max(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kSw64Float32Min: { ++ FPURegister dst = i.OutputSingleRegister(); ++ FPURegister src1 = i.InputSingleRegister(0); ++ FPURegister src2 = i.InputSingleRegister(1); ++ auto ool = zone()->New(this, dst, src1, src2); ++ __ Float32Min(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kSw64Float64Min: { ++ FPURegister dst = i.OutputDoubleRegister(); ++ FPURegister src1 = i.InputDoubleRegister(0); ++ FPURegister src2 = i.InputDoubleRegister(1); ++ auto ool = zone()->New(this, dst, src1, src2); ++ __ Float64Min(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kSw64Float64SilenceNaN: ++ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kSw64CvtSD: ++ __ fcvtds(i.InputDoubleRegister(0), i.OutputSingleRegister()); ++ break; ++ case kSw64CvtDS: ++ __ fcvtsd(i.InputSingleRegister(0), i.OutputDoubleRegister()); ++ break; ++ case kSw64CvtDW: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ifmovd(i.InputRegister(0), scratch); ++ __ fcvtld(scratch, i.OutputDoubleRegister()); ++ break; ++ } ++ case kSw64CvtSW: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ifmovs(i.InputRegister(0), scratch); ++ __ fcvtws(scratch,i.OutputDoubleRegister()); ++ break; ++ } ++ case kSw64CvtSUw: { ++ __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kSw64CvtSL: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ifmovd(i.InputRegister(0), scratch); ++ __ fcvtls(scratch, i.OutputDoubleRegister()); ++ break; ++ } ++ case kSw64CvtDL: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ifmovd(i.InputRegister(0), scratch); ++ __ fcvtld(scratch, i.OutputDoubleRegister()); ++ break; ++ } ++ case kSw64CvtDUw: { ++ __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kSw64CvtDUl: { ++ __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kSw64CvtSUl: { ++ __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kSw64FloorWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ffloordw(i.InputDoubleRegister(0),scratch); ++ __ fimovs(scratch, i.OutputRegister()); ++ break; ++ } ++ case kSw64CeilWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ fceildw(i.InputDoubleRegister(0),scratch); ++ __ fimovs(scratch, i.OutputRegister()); ++ break; ++ } ++ case kSw64RoundWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ frounddw(i.InputDoubleRegister(0), scratch); ++ __ fimovs(scratch, i.OutputRegister()); ++ break; ++ } ++ case kSw64TruncWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ // Other arches use round to zero here, so we follow. ++ __ ftruncdw(i.InputDoubleRegister(0), scratch); ++ __ fimovs(scratch, i.OutputRegister()); ++ break; ++ } ++ case kSw64FloorWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ffloorsw(i.InputDoubleRegister(0),scratch); ++ __ fimovs(scratch,i.OutputRegister()); ++ break; ++ } ++ case kSw64CeilWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ fceilsw(i.InputDoubleRegister(0), scratch); ++ __ fimovs(scratch,i.OutputRegister()); ++ break; ++ } ++ case kSw64RoundWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ froundsw(i.InputDoubleRegister(0), scratch); ++ __ fimovs(scratch, i.OutputRegister()); ++ break; ++ } ++ case kSw64TruncWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftruncsw(i.InputDoubleRegister(0), scratch); ++ __ fimovs(scratch, i.OutputRegister()); ++ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, ++ // because INT32_MIN allows easier out-of-bounds detection. ++ __ Addw(kScratchReg, i.OutputRegister(), Operand(1)); ++ __ cmplt(kScratchReg, i.OutputRegister(), kScratchReg2); ++ __ Selne(i.OutputRegister(), kScratchReg, kScratchReg2); ++ break; ++ } ++ case kSw64TruncLS: { ++ FPURegister scratch = kScratchDoubleReg; ++ FPURegister scratch1 = kScratchDoubleReg1; ++ FPURegister scratch2 = kScratchDoubleReg2; ++ Register temp = kScratchReg; ++ Register result = kScratchReg2; ++ ++ bool load_status = instr->OutputCount() > 1; ++ if (load_status) { ++#ifdef SW64 ++ // Save FCSR. ++ __ rfpcr(scratch2); ++ // SW64 neednot clear FPCR in 20150513. ++ //in order to have same effection, we should do four steps in sw: ++ //1) set fpcr = 0 ++ //2) Rounding: sw(10), round-to-even ++ //3) set trap bit: sw(62~61,51~49), exception controlled by fpcr but not trap ++ //4) set exception mode: sw(00) setfpec0 ++ __ li(temp, sFCSRControlMask | sFCSRRound1Mask); //1), 2), 3) ++ __ ifmovd(temp, scratch1); ++ __ wfpcr(scratch1); ++ __ setfpec1();//4) ++#endif ++ } ++ // Other arches use round to zero here, so we follow. ++ __ fcvtsd(i.InputDoubleRegister(0), scratch); ++ __ fcvtdl_z(scratch, scratch1); ++ __ fimovd(scratch1, i.OutputRegister()); ++ ++ if (load_status) { ++#ifdef SW64 ++ __ rfpcr(scratch1); ++ __ fimovd(scratch1, result); ++ ++ // Check for overflow and NaNs. ++ __ li(temp, sFCSROverflowFlagMask | sFCSRUnderflowFlagMask | ++ sFCSRInvalidOpFlagMask); ++ __ and_ins(result, temp, result); ++ __ Cmplt(result, zero_reg, result); ++ __ xor_ins(result, 1, result); ++ __ mov(i.OutputRegister(1), result); ++ // Restore FCSR ++ __ wfpcr(scratch2); ++ __ setfpec1(); ++#endif ++ } ++ break; ++ } ++ case kSw64TruncLD: { ++ FPURegister scratch = kScratchDoubleReg; ++ FPURegister scratch1 = kScratchDoubleReg1; ++ FPURegister scratch2 = kScratchDoubleReg2; ++ Register temp = kScratchReg; ++ Register result = kScratchReg2; ++ ++ bool load_status = instr->OutputCount() > 1; ++ if (load_status) { ++#ifdef SW64 ++ // Save FCSR. ++ __ rfpcr(scratch2); ++ // SW64 neednot clear FPCR in 20150513. ++ //in order to have same effection, we should do four steps in sw: ++ //1) set fpcr = 0 ++ //2) Rounding: sw(10), round-to-even ++ //3) set trap bit: sw(62~61,51~49), exception controlled by fpcr but not trap ++ //4) set exception mode: sw(00) setfpec0 ++ __ li(temp, sFCSRControlMask | sFCSRRound1Mask); //1), 2), 3) ++ __ ifmovd(temp, scratch1); ++ __ wfpcr(scratch1); ++ __ setfpec1();//4) ++#endif ++ } ++ // Other arches use round to zero here, so we follow. ++ __ ftruncdl(i.InputDoubleRegister(0), scratch); ++ __ fimovd(scratch,i.OutputRegister(0)); ++ if (load_status) { ++#ifdef SW64 ++ __ rfpcr(scratch1); ++ __ fimovd(scratch1, result); ++ ++ // Check for overflow and NaNs. ++ __ li(temp, sFCSROverflowFlagMask | sFCSRUnderflowFlagMask | ++ sFCSRInvalidOpFlagMask); ++ __ and_ins(result, temp, result); ++ __ Cmplt(result, zero_reg, result); ++ __ xor_ins(result, 1, result); ++ __ mov(i.OutputRegister(1), result); ++ // Restore FCSR ++ __ wfpcr(scratch2); ++ __ setfpec1(); ++#endif ++ } ++ break; ++ } ++ case kSw64TruncUwD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch); ++ break; ++ } ++ case kSw64TruncUwS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); ++ // Avoid UINT32_MAX as an overflow indicator and use 0 instead, ++ // because 0 allows easier out-of-bounds detection. ++ __ Addw(kScratchReg, i.OutputRegister(), Operand(1)); ++ __ Seleq(i.OutputRegister(), zero_reg, kScratchReg); ++ break; ++ } ++ case kSw64TruncUlS: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; ++ __ Trunc_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch, ++ result); ++ break; ++ } ++ case kSw64TruncUlD: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; ++ __ Trunc_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch, ++ result); ++ break; ++ } ++ case kSw64BitcastDL: // D -> L ++ __ fimovd(i.InputDoubleRegister(0),i.OutputRegister()); ++ break; ++ case kSw64BitcastLD: ++ __ ifmovd(i.InputRegister(0), i.OutputDoubleRegister()); ++ break; ++#ifdef SW64 ++ case kSw64BitcastSW: // W -> S ++ __ ifmovs(i.InputRegister(0), i.OutputDoubleRegister()); ++ break; ++ case kSw64BitcastWS: // S -> W ++ __ fimovs(i.InputDoubleRegister(0), i.OutputRegister()); ++ break; ++#endif ++ case kSw64Float64ExtractLowWord32: ++ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kSw64Float64ExtractHighWord32: ++ __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kSw64Float64InsertLowWord32: ++ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1)); ++ break; ++ case kSw64Float64InsertHighWord32: ++ __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1)); ++ break; ++ // ... more basic instructions ... ++ ++ case kSw64Seb: ++ __ sextb(i.InputRegister(0), i.OutputRegister()); ++ break; ++ case kSw64Seh: ++ __ sexth(i.InputRegister(0), i.OutputRegister()); ++ break; ++ case kSw64Ldbu: ++ __ Ldbu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Ldb: ++ __ Ldb(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Stb: ++ __ Stb(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kSw64Ldhu: ++ __ Ldhu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Uldhu: ++ __ Uldhu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Ldh: ++ __ Ldh(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Uldh: ++ __ Uldh(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Sth: ++ __ Sth(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kSw64Usth: ++ __ Usth(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg); ++ break; ++ case kSw64Ldw: ++ __ Ldw(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Uldw: ++ __ Uldw(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Ldwu: ++ __ Ldwu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Uldwu: ++ __ Uldwu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Ldl: ++ __ Ldl(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Uldl: ++ __ Uldl(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kSw64Stw: ++ __ Stw(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kSw64Ustw: ++ __ Ustw(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kSw64Stl: ++ __ Stl(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kSw64Ustl: ++ __ Ustl(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kSw64Flds: { ++ __ Flds(i.OutputSingleRegister(), i.MemoryOperand()); ++ break; ++ } ++ case kSw64Uflds: { ++ __ Uflds(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg); ++ break; ++ } ++ case kSw64Fsts: { ++ size_t index = 0; ++ MemOperand operand = i.MemoryOperand(&index); ++ FPURegister ft = i.InputOrZeroSingleRegister(index); ++ __ Fsts(ft, operand); ++ break; ++ } ++ case kSw64Ufsts: { ++ size_t index = 0; ++ MemOperand operand = i.MemoryOperand(&index); ++ FPURegister ft = i.InputOrZeroSingleRegister(index); ++ __ Ufsts(ft, operand, kScratchReg); ++ break; ++ } ++ case kSw64Fldd: ++ __ Fldd(i.OutputDoubleRegister(), i.MemoryOperand()); ++ break; ++ case kSw64Ufldd: ++ __ Ufldd(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg); ++ break; ++ case kSw64Fstd: { ++ FPURegister ft = i.InputOrZeroDoubleRegister(2); ++ __ Fstd(ft, i.MemoryOperand()); ++ break; ++ } ++ case kSw64Ufstd: { ++ FPURegister ft = i.InputOrZeroDoubleRegister(2); ++ __ Ufstd(ft, i.MemoryOperand(), kScratchReg); ++ break; ++ } ++ case kSw64Sync: { ++ __ memb(); ++ break; ++ } ++ case kSw64Push: ++ if (instr->InputAt(0)->IsFPRegister()) { ++ __ Fstd(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); ++ __ Subw(sp, sp, Operand(kDoubleSize)); ++ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize); ++ } else { ++ __ Push(i.InputRegister(0)); ++ frame_access_state()->IncreaseSPDelta(1); ++ } ++ break; ++ case kSw64Peek: { ++ int reverse_slot = i.InputInt32(0); ++ int offset = ++ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot); ++ if (instr->OutputAt(0)->IsFPRegister()) { ++ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0)); ++ if (op->representation() == MachineRepresentation::kFloat64) { ++ __ Fldd(i.OutputDoubleRegister(), MemOperand(fp, offset)); ++ } else if (op->representation() == MachineRepresentation::kFloat32) { ++ __ Flds( ++ i.OutputSingleRegister(0), ++ MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset)); ++ } ++ } else { ++ __ Ldl(i.OutputRegister(0), MemOperand(fp, offset)); ++ } ++ break; ++ } ++ case kSw64StackClaim: { ++ __ Subl(sp, sp, Operand(i.InputInt32(0))); ++ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize); ++ break; ++ } ++ case kSw64StoreToStackSlot: { ++ if (instr->InputAt(0)->IsFPRegister()) { ++ if (instr->InputAt(0)->IsSimd128Register()) { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1))); ++ UNREACHABLE(); ++ } else if (instr->InputAt(0)->IsDoubleRegister()) { ++ __ Fstd(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); ++ } else { ++ __ Fsts(i.InputFloatRegister(0), MemOperand(sp, i.InputInt32(1))); ++ } ++ } else { ++ __ Stl(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); ++ } ++ break; ++ } ++ case kSw64ByteSwap64: { ++ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8); ++ break; ++ } ++ case kSw64ByteSwap32: { ++ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4); ++// __ srll(i.OutputRegister(0), 32 ,i.OutputRegister(0)); ++ break; ++ } ++ case kWord32AtomicLoadInt8: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldb); ++ break; ++ case kWord32AtomicLoadUint8: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldbu); ++ break; ++ case kWord32AtomicLoadInt16: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldh); ++ break; ++ case kWord32AtomicLoadUint16: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldhu); ++ break; ++ case kWord32AtomicLoadWord32: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldw); ++ break; ++ case kSw64Word64AtomicLoadUint8: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldbu); ++ break; ++ case kSw64Word64AtomicLoadUint16: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldhu); ++ break; ++ case kSw64Word64AtomicLoadUint32: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldwu); ++ break; ++ case kSw64Word64AtomicLoadUint64: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldl); ++ break; ++ case kWord32AtomicStoreWord8: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(Stb); ++ break; ++ case kWord32AtomicStoreWord16: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(Sth); ++ break; ++ case kWord32AtomicStoreWord32: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(Stw); ++ break; ++ case kSw64Word64AtomicStoreWord8: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(Stb); ++ break; ++ case kSw64Word64AtomicStoreWord16: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(Sth); ++ break; ++ case kSw64Word64AtomicStoreWord32: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(Stw); ++ break; ++ case kSw64Word64AtomicStoreWord64: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(Stl); ++ break; ++ case kWord32AtomicExchangeInt8: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(lldw, lstw, true, 8, 32); ++ break; ++ case kWord32AtomicExchangeUint8: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(lldw, lstw, false, 8, 32); ++ break; ++ case kWord32AtomicExchangeInt16: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(lldw, lstw, true, 16, 32); ++ break; ++ case kWord32AtomicExchangeUint16: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(lldw, lstw, false, 16, 32); ++ break; ++ case kWord32AtomicExchangeWord32: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lldw, lstw); ++ break; ++ case kSw64Word64AtomicExchangeUint8: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(lldl, lstl, false, 8, 64); ++ break; ++ case kSw64Word64AtomicExchangeUint16: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(lldl, lstl, false, 16, 64); ++ break; ++ case kSw64Word64AtomicExchangeUint32: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(lldl, lstl, false, 32, 64); ++ break; ++ case kSw64Word64AtomicExchangeUint64: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lldl, lstl); ++ break; ++ case kWord32AtomicCompareExchangeInt8: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(lldw, lstw, true, 8, 32); ++ break; ++ case kWord32AtomicCompareExchangeUint8: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(lldw, lstw, false, 8, 32); ++ break; ++ case kWord32AtomicCompareExchangeInt16: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(lldw, lstw, true, 16, 32); ++ break; ++ case kWord32AtomicCompareExchangeUint16: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(lldw, lstw, false, 16, 32); ++ break; ++ case kWord32AtomicCompareExchangeWord32: ++ __ addw(i.InputRegister(2), 0x0, i.InputRegister(2)); ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(lldw, lstw); ++ break; ++ case kSw64Word64AtomicCompareExchangeUint8: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(lldl, lstl, false, 8, 64); ++ break; ++ case kSw64Word64AtomicCompareExchangeUint16: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(lldl, lstl, false, 16, 64); ++ break; ++ case kSw64Word64AtomicCompareExchangeUint32: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(lldl, lstl, false, 32, 64); ++ break; ++ case kSw64Word64AtomicCompareExchangeUint64: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(lldl, lstl); ++ break; ++#define ATOMIC_BINOP_CASE(op, inst) \ ++ case kWord32Atomic##op##Int8: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(lldw, lstw, true, 8, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Uint8: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(lldw, lstw, false, 8, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Int16: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(lldw, lstw, true, 16, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Uint16: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(lldw, lstw, false, 16, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Word32: \ ++ ASSEMBLE_ATOMIC_BINOP(lldw, lstw, inst); \ ++ break; ++ ATOMIC_BINOP_CASE(Add, Addw) ++ ATOMIC_BINOP_CASE(Sub, Subw) ++ ATOMIC_BINOP_CASE(And, And) ++ ATOMIC_BINOP_CASE(Or, Or) ++ ATOMIC_BINOP_CASE(Xor, Xor) ++#undef ATOMIC_BINOP_CASE ++#define ATOMIC_BINOP_CASE(op, inst) \ ++ case kSw64Word64Atomic##op##Uint8: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(lldl, lstl, false, 8, inst, 64); \ ++ break; \ ++ case kSw64Word64Atomic##op##Uint16: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(lldl, lstl, false, 16, inst, 64); \ ++ break; \ ++ case kSw64Word64Atomic##op##Uint32: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(lldl, lstl, false, 32, inst, 64); \ ++ break; \ ++ case kSw64Word64Atomic##op##Uint64: \ ++ ASSEMBLE_ATOMIC_BINOP(lldl, lstl, inst); \ ++ break; ++ ATOMIC_BINOP_CASE(Add, Addl) ++ ATOMIC_BINOP_CASE(Sub, Subl) ++ ATOMIC_BINOP_CASE(And, And) ++ ATOMIC_BINOP_CASE(Or, Or) ++ ATOMIC_BINOP_CASE(Xor, Xor) ++#undef ATOMIC_BINOP_CASE ++ case kSw64AssertEqual: ++ __ Assert(eq, static_cast(i.InputOperand(2).immediate()), ++ i.InputRegister(0), Operand(i.InputRegister(1))); ++ break; ++ case kSw64S128Const: { ++// CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++// Simd128Register dst = i.OutputSimd128Register(); ++// uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0)); ++// uint64_t imm2 = make_uint64(i.InputUint32(3), i.InputUint32(2)); ++// __ li(kScratchReg, imm1); ++// __ insert_d(dst, 0, kScratchReg); ++// __ li(kScratchReg, imm2); ++// __ insert_d(dst, 1, kScratchReg); ++ UNREACHABLE(); ++// break; ++ } ++ case kSw64S128Zero: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(), ++ // i.OutputSimd128Register()); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S128AllOnes: { ++// CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++// Simd128Register dst = i.OutputSimd128Register(); ++// __ ceq_d(dst, dst, dst); ++ UNREACHABLE(); ++// break; ++ } ++ case kSw64I32x4Splat: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fill_w(i.OutputSimd128Register(), i.InputRegister(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4ExtractLane: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0), ++ // i.InputInt8(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4ReplaceLane: { ++ CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ Simd128Register src = i.InputSimd128Register(0); ++ Simd128Register dst = i.OutputSimd128Register(); ++ if (src != dst) { ++ //__ move_v(dst, src); ++ UNREACHABLE(); ++ } ++ //__ insert_w(dst, i.InputInt8(1), i.InputRegister(2)); ++ UNREACHABLE(); ++ } ++ case kSw64I32x4Add: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4Sub: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Abs: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Neg: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Sqrt: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Add: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Sub: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Mul: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Div: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Min: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Max: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Eq: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Ne: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Lt: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Le: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Splat: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2ExtractLane: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2ReplaceLane: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2Splat: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2ExtractLane: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Pmin: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Pmax: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Ceil: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Floor: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2Trunc: { ++ UNREACHABLE(); ++ } ++ case kSw64F64x2NearestInt: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2ReplaceLane: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2Add: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2Sub: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2Mul: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2Neg: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2Shl: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2ShrS: { ++ UNREACHABLE(); ++ } ++ case kSw64I64x2ShrU: { ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Splat: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ FmoveLow(kScratchReg, i.InputSingleRegister(0)); ++ //__ fill_w(i.OutputSimd128Register(), kScratchReg); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4ExtractLane: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1)); ++ //__ FmoveLow(i.OutputSingleRegister(), kScratchReg); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4ReplaceLane: { ++ CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ Simd128Register src = i.InputSimd128Register(0); ++ Simd128Register dst = i.OutputSimd128Register(); ++ if (src != dst) { ++ //__ move_v(dst, src); ++ UNREACHABLE(); ++ } ++ //__ FmoveLow(kScratchReg, i.InputSingleRegister(2)); ++ //__ insert_w(dst, i.InputInt8(1), kScratchReg); ++ break; ++ } ++ case kSw64F32x4SConvertI32x4: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4UConvertI32x4: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4Mul: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4MaxS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4MinS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4Eq: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4Ne: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //__ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); ++ //__ nor_v(dst, dst, dst); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4Shl: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputInt5(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4ShrS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputInt5(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4ShrU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputInt5(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4MaxU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4MinU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S128Select: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0)); ++ //__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S128AndNot: { ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Abs: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Neg: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4RecipApprox: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4RecipSqrtApprox: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Add: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Sub: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Mul: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Max: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Eq: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Ne: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Lt: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Le: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Div: { ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Pmin: { ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Pmax: { ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Ceil: { ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Floor: { ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Trunc: { ++ UNREACHABLE(); ++ } ++ case kSw64F32x4NearestInt: { ++ UNREACHABLE(); ++ } ++ case kSw64F32x4Sqrt: { ++ UNREACHABLE(); ++ } ++ case kSw64I32x4Abs: { ++ UNREACHABLE(); ++ } ++ case kSw64I32x4BitMask: { ++ UNREACHABLE(); ++ } ++// case kSw64I32x4SConvertF32x4: { ++// CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++// __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); ++// break; ++// } ++ case kSw64I32x4UConvertF32x4: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4Neg: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ //__ subv_w(i.OutputSimd128Register(), kSimd128RegZero, ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4GtS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4GeS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4GtU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4GeU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8Splat: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fill_h(i.OutputSimd128Register(), i.InputRegister(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8ExtractLaneU: { ++ UNREACHABLE(); ++ } ++ case kSw64I16x8ExtractLaneS: { ++ UNREACHABLE(); ++ } ++ case kSw64I16x8ReplaceLane: { ++ CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ Simd128Register src = i.InputSimd128Register(0); ++ Simd128Register dst = i.OutputSimd128Register(); ++ if (src != dst) { ++ //__ move_v(dst, src); ++ UNREACHABLE(); ++ } ++ //__ insert_h(dst, i.InputInt8(1), i.InputRegister(2)); ++ break; ++ } ++ case kSw64I16x8Neg: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ //__ subv_h(i.OutputSimd128Register(), kSimd128RegZero, ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8Shl: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputInt4(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8ShrS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputInt4(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8ShrU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputInt4(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8Sub: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8Mul: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8MaxS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8MinS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8Eq: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8AddSaturateS: { ++ UNREACHABLE(); ++ } ++ case kSw64I16x8SubSaturateS: { ++ UNREACHABLE(); ++ } ++ case kSw64I8x16AddSaturateS: { ++ UNREACHABLE(); ++ } ++ case kSw64I8x16SubSaturateS: { ++ UNREACHABLE(); ++ } ++ case kSw64I16x8AddSaturateU: { ++ UNREACHABLE(); ++ } ++ case kSw64I16x8SubSaturateU: { ++ UNREACHABLE(); ++ } ++ case kSw64I8x16AddSaturateU: { ++ UNREACHABLE(); ++ } ++ case kSw64I8x16SubSaturateU: { ++ UNREACHABLE(); ++ } ++ ++ case kSw64I16x8Ne: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //__ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); ++ //__ nor_v(dst, dst, dst); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8GtS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8GeS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8MaxU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8MinU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8GtU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8GeU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8RoundingAverageU: { ++ UNREACHABLE(); ++ } ++ case kSw64I16x8Abs: { ++ UNREACHABLE(); ++ } ++ case kSw64I16x8BitMask: { ++ UNREACHABLE(); ++ } ++ case kSw64I8x16Splat: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ fill_b(i.OutputSimd128Register(), i.InputRegister(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16ExtractLaneU: { ++ UNREACHABLE(); ++ } ++ case kSw64I8x16ExtractLaneS: { ++ UNREACHABLE(); ++ } ++ case kSw64I8x16ReplaceLane: { ++ CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ Simd128Register src = i.InputSimd128Register(0); ++ Simd128Register dst = i.OutputSimd128Register(); ++ if (src != dst) { ++ //__ move_v(dst, src); ++ UNREACHABLE(); ++ } ++ //__ insert_b(dst, i.InputInt8(1), i.InputRegister(2)); ++ break; ++ } ++ case kSw64I8x16Neg: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ //__ subv_b(i.OutputSimd128Register(), kSimd128RegZero, ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16Shl: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputInt3(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16ShrS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputInt3(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16Sub: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16Mul: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16MaxS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16MinS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16Eq: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16Ne: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //__ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); ++ //__ nor_v(dst, dst, dst); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16GtS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16GeS: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16ShrU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputInt3(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16MaxU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16MinU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16GtU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16GeU: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16RoundingAverageU: { ++ UNREACHABLE(); ++ } ++ case kSw64I8x16Abs: { ++ UNREACHABLE(); ++ } ++ case kSw64I8x16BitMask: { ++ UNREACHABLE(); ++ } ++ case kSw64S128And: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S128Or: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S128Xor: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(1)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S128Not: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64V32x4AnyTrue: ++ case kSw64V16x8AnyTrue: ++ case kSw64V8x16AnyTrue: { ++ CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ Register dst = i.OutputRegister(); ++ Label all_false; ++ __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero, ++ i.InputSimd128Register(0), USE_DELAY_SLOT); ++ __ li(dst, 0l); // branch delay slot ++ __ li(dst, 1); ++ __ bind(&all_false); ++ break; ++ } ++ case kSw64V32x4AllTrue: { ++ CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ Register dst = i.OutputRegister(); ++ Label all_true; ++ __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero, ++ i.InputSimd128Register(0), USE_DELAY_SLOT); ++ __ li(dst, 1); // branch delay slot ++ __ li(dst, 0l); ++ __ bind(&all_true); ++ break; ++ } ++ case kSw64V16x8AllTrue: { ++ CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ Register dst = i.OutputRegister(); ++ Label all_true; ++ __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero, ++ i.InputSimd128Register(0), USE_DELAY_SLOT); ++ __ li(dst, 1); // branch delay slot ++ __ li(dst, 0l); ++ __ bind(&all_true); ++ break; ++ } ++ case kSw64V8x16AllTrue: { ++ CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ Register dst = i.OutputRegister(); ++ Label all_true; ++ __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero, ++ i.InputSimd128Register(0), USE_DELAY_SLOT); ++ __ li(dst, 1); // branch delay slot ++ __ li(dst, 0l); ++ __ bind(&all_true); ++ break; ++ } ++ case kSw64MsaLd: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ ld_b(i.OutputSimd128Register(), i.MemoryOperand()); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64MsaSt: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ st_b(i.InputSimd128Register(2), i.MemoryOperand()); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S32x4InterleaveRight: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(), ++ // src0 = i.InputSimd128Register(0), ++ // src1 = i.InputSimd128Register(1); ++ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] ++ // dst = [5, 1, 4, 0] ++ //__ ilvr_w(dst, src1, src0); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S32x4InterleaveLeft: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(), ++ // src0 = i.InputSimd128Register(0), ++ // src1 = i.InputSimd128Register(1); ++ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] ++ // dst = [7, 3, 6, 2] ++ //__ ilvl_w(dst, src1, src0); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S32x4PackEven: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(), ++ // src0 = i.InputSimd128Register(0), ++ // src1 = i.InputSimd128Register(1); ++ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] ++ // dst = [6, 4, 2, 0] ++ //__ pckev_w(dst, src1, src0); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S32x4PackOdd: { ++ UNREACHABLE(); ++ } ++ case kSw64S32x4InterleaveEven: { ++ UNREACHABLE(); ++ } ++ case kSw64S32x4InterleaveOdd: { ++ UNREACHABLE(); ++ } ++ case kSw64S32x4Shuffle: { ++ UNREACHABLE(); ++ } ++ case kSw64S16x8InterleaveRight: { ++ UNREACHABLE(); ++ } ++ case kSw64S16x8InterleaveLeft: { ++ UNREACHABLE(); ++ } ++ case kSw64S16x8PackEven: { ++ UNREACHABLE(); ++ } ++ case kSw64S16x8PackOdd: { ++ UNREACHABLE(); ++ } ++ case kSw64S16x8InterleaveEven: { ++ UNREACHABLE(); ++ } ++ case kSw64S16x8InterleaveOdd: { ++ UNREACHABLE(); ++ } ++ case kSw64S16x4Reverse: { ++ UNREACHABLE(); ++ } ++ case kSw64S16x2Reverse: { ++ UNREACHABLE(); ++ } ++ case kSw64S8x16InterleaveRight: { ++ UNREACHABLE(); ++ } ++ case kSw64S8x16InterleaveLeft: { ++ UNREACHABLE(); ++ } ++ case kSw64S8x16PackEven: { ++ UNREACHABLE(); ++ } ++ case kSw64S8x16PackOdd: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(), ++ // src0 = i.InputSimd128Register(0), ++ // src1 = i.InputSimd128Register(1); ++ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] ++ // dst = [31, 29, ... 7, 5, 3, 1] ++ //__ pckod_b(dst, src1, src0); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S8x16InterleaveEven: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(), ++ // src0 = i.InputSimd128Register(0), ++ // src1 = i.InputSimd128Register(1); ++ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] ++ // dst = [30, 14, ... 18, 2, 16, 0] ++ //__ ilvev_b(dst, src1, src0); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S8x16InterleaveOdd: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(), ++ // src0 = i.InputSimd128Register(0), ++ // src1 = i.InputSimd128Register(1); ++ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] ++ // dst = [31, 15, ... 19, 3, 17, 1] ++ //__ ilvod_b(dst, src1, src0); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S8x16Concat: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //DCHECK(dst == i.InputSimd128Register(0)); ++ //__ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16Shuffle: { ++ UNREACHABLE(); ++ //if (dst == src0) { ++ // __ move_v(kSimd128ScratchReg, src0); ++ // src0 = kSimd128ScratchReg; ++ //} else if (dst == src1) { ++ // __ move_v(kSimd128ScratchReg, src1); ++ // src1 = kSimd128ScratchReg; ++ //} ++ ++ //int64_t control_low = ++ // static_cast(i.InputInt32(3)) << 32 | i.InputInt32(2); ++ //int64_t control_hi = ++ // static_cast(i.InputInt32(5)) << 32 | i.InputInt32(4); ++ //__ li(kScratchReg, control_low); ++ //__ insert_d(dst, 0, kScratchReg); ++ //__ li(kScratchReg, control_hi); ++ //__ insert_d(dst, 1, kScratchReg); ++ //__ vshf_b(dst, src1, src0); ++ break; ++ } ++ case kSw64I8x16Swizzle: { ++ UNREACHABLE(); ++ } ++ case kSw64S8x8Reverse: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ++ // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7] ++ // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1 ++ // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B ++ //__ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1); ++ //__ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S8x4Reverse: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3] ++ // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B ++ //__ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64S8x2Reverse: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1] ++ // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1 ++ //__ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4SConvertI16x8Low: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //Simd128Register src = i.InputSimd128Register(0); ++ //__ ilvr_h(kSimd128ScratchReg, src, src); ++ //__ slli_w(dst, kSimd128ScratchReg, 16); ++ //__ srai_w(dst, dst, 16); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4SConvertI16x8High: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //Simd128Register src = i.InputSimd128Register(0); ++ //__ ilvl_h(kSimd128ScratchReg, src, src); ++ //__ slli_w(dst, kSimd128ScratchReg, 16); ++ //__ srai_w(dst, dst, 16); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4UConvertI16x8Low: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ //__ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero, ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4UConvertI16x8High: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ //__ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero, ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8SConvertI8x16Low: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //Simd128Register src = i.InputSimd128Register(0); ++ //__ ilvr_b(kSimd128ScratchReg, src, src); ++ //__ slli_h(dst, kSimd128ScratchReg, 8); ++ //__ srai_h(dst, dst, 8); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8SConvertI8x16High: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //Simd128Register src = i.InputSimd128Register(0); ++ //__ ilvl_b(kSimd128ScratchReg, src, src); ++ //__ slli_h(dst, kSimd128ScratchReg, 8); ++ //__ srai_h(dst, dst, 8); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8SConvertI32x4: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //Simd128Register src0 = i.InputSimd128Register(0); ++ //Simd128Register src1 = i.InputSimd128Register(1); ++ //__ sat_s_w(kSimd128ScratchReg, src0, 15); ++ //__ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch ++ //__ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8UConvertI32x4: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //Simd128Register src0 = i.InputSimd128Register(0); ++ //Simd128Register src1 = i.InputSimd128Register(1); ++ //__ sat_u_w(kSimd128ScratchReg, src0, 15); ++ //__ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch ++ //__ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8UConvertI8x16Low: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ //__ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero, ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8UConvertI8x16High: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ //__ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero, ++ // i.InputSimd128Register(0)); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16SConvertI16x8: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //Simd128Register src0 = i.InputSimd128Register(0); ++ //Simd128Register src1 = i.InputSimd128Register(1); ++ //__ sat_s_h(kSimd128ScratchReg, src0, 7); ++ //__ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch ++ //__ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I8x16UConvertI16x8: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //Simd128Register src0 = i.InputSimd128Register(0); ++ //Simd128Register src1 = i.InputSimd128Register(1); ++ //__ sat_u_h(kSimd128ScratchReg, src0, 7); ++ //__ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch ++ //__ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64F32x4AddHoriz: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register src0 = i.InputSimd128Register(0); ++ //Simd128Register src1 = i.InputSimd128Register(1); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //__ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1 ++ //__ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch ++ //__ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0); ++ //__ fadd_w(kSimd128RegZero, kSimd128RegZero, src1); ++ //__ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I32x4AddHoriz: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register src0 = i.InputSimd128Register(0); ++ //Simd128Register src1 = i.InputSimd128Register(1); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //__ hadd_s_d(kSimd128ScratchReg, src0, src0); ++ //__ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch ++ //__ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg); ++ //break; ++ UNREACHABLE(); ++ } ++ case kSw64I16x8AddHoriz: { ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //Simd128Register src0 = i.InputSimd128Register(0); ++ //Simd128Register src1 = i.InputSimd128Register(1); ++ //Simd128Register dst = i.OutputSimd128Register(); ++ //__ hadd_s_w(kSimd128ScratchReg, src0, src0); ++ //__ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch ++ //__ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg); ++ //break; ++ UNREACHABLE(); ++ } ++ default: ++ UNREACHABLE(); ++ } ++ return kSuccess; ++} // NOLINT(readability/fn_size) ++ ++ ++#define UNSUPPORTED_COND(opcode, condition) \ ++ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \ ++ << "\""; \ ++ UNIMPLEMENTED(); ++ ++ ++void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, ++ Instruction* instr, FlagsCondition condition, ++ Label* tlabel, Label* flabel, bool fallthru) { ++#undef __ ++#define __ tasm-> ++ Sw64OperandConverter i(gen, instr); ++ ++ Condition cc = kNoCondition; ++ // SW64 does not have condition code flags, so compare and branch are ++ // implemented differently than on the other arch's. The compare operations ++ // emit sw64 pseudo-instructions, which are handled here by branch ++ // instructions that do the actual comparison. Essential that the input ++ // registers to compare pseudo-op are not modified before this branch op, as ++ // they are tested here. ++ ++ if (instr->arch_opcode() == kSw64Tst) { ++ cc = FlagsConditionToConditionTst(condition); ++ __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg)); ++ } else if (instr->arch_opcode() == kSw64Dadd || ++ instr->arch_opcode() == kSw64Dsub) { ++ cc = FlagsConditionToConditionOvf(condition); ++ __ sral(i.OutputRegister(), 32 ,kScratchReg); ++ __ Sraw(kScratchReg2, i.OutputRegister(), 31); ++ __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg)); ++ } else if (instr->arch_opcode() == kSw64DaddOvf || ++ instr->arch_opcode() == kSw64DsubOvf) { ++ switch (condition) { ++ // Overflow occurs if overflow register is negative ++ case kOverflow: ++ __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg)); ++ break; ++ case kNotOverflow: ++ __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg)); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ break; ++ } ++ } else if (instr->arch_opcode() == kSw64MulOvf) { ++ // Overflow occurs if overflow register is not zero ++ switch (condition) { ++ case kOverflow: ++ __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg)); ++ break; ++ case kNotOverflow: ++ __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg)); ++ break; ++ default: ++ UNSUPPORTED_COND(kSw64MulOvf, condition); ++ break; ++ } ++ } else if (instr->arch_opcode() == kSw64Cmp) { ++ cc = FlagsConditionToConditionCmp(condition); ++ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); ++ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { ++ cc = FlagsConditionToConditionCmp(condition); ++ Register lhs_register = sp; ++ uint32_t offset; ++ if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) { ++ lhs_register = i.TempRegister(0); ++ __ Subl(lhs_register, sp, offset); ++ } ++ __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0))); ++ } else if (instr->arch_opcode() == kSw64CmpS || ++ instr->arch_opcode() == kSw64CmpD) { ++ bool predicate; ++ FlagsConditionToConditionCmpFPU(&predicate, condition); ++ if (predicate) { ++ __ BranchTrueF(tlabel); ++ } else { ++ __ BranchFalseF(tlabel); ++ } ++ } else { ++ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", ++ instr->arch_opcode()); ++ UNIMPLEMENTED(); ++ } ++ if (!fallthru) __ Branch(flabel); // no fallthru to flabel. ++#undef __ ++#define __ tasm()-> ++} ++ ++// Assembles branches after an instruction. ++void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { ++ Label* tlabel = branch->true_label; ++ Label* flabel = branch->false_label; ++ ++ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, ++ branch->fallthru); ++} ++ ++void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, ++ Instruction* instr) { ++ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). ++ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { ++ return; ++ } ++ ++ Sw64OperandConverter i(this, instr); ++ condition = NegateFlagsCondition(condition); ++ ++ switch (instr->arch_opcode()) { ++ case kSw64Cmp: { ++ __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0), ++ i.InputOperand(1), ++ FlagsConditionToConditionCmp(condition)); ++ } ++ return; ++ case kSw64Tst: { ++ switch (condition) { ++ case kEqual: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); ++ break; ++ case kNotEqual: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ return; ++ case kSw64Dadd: ++ case kSw64Dsub: { ++ // Check for overflow creates 1 or 0 for result. ++ __ srll(i.OutputRegister(), 63 ,kScratchReg); ++ __ Srlw(kScratchReg2, i.OutputRegister(), 31); ++ __ xor_ins(kScratchReg, kScratchReg2, kScratchReg2); ++ switch (condition) { ++ case kOverflow: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg2); ++ break; ++ case kNotOverflow: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ } ++ } ++ return; ++ case kSw64DaddOvf: ++ case kSw64DsubOvf: { ++ // Overflow occurs if overflow register is negative ++ __ Cmplt(kScratchReg2, kScratchReg, zero_reg); ++ switch (condition) { ++ case kOverflow: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg2); ++ break; ++ case kNotOverflow: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ } ++ } ++ return; ++ case kSw64MulOvf: { ++ // Overflow occurs if overflow register is not zero ++ switch (condition) { ++ case kOverflow: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg); ++ break; ++ case kNotOverflow: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ } ++ } ++ return; ++ case kSw64CmpS: ++ case kSw64CmpD: { ++ bool predicate; ++ FlagsConditionToConditionCmpFPU(&predicate, condition); ++ if (predicate) { ++ __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); ++ } else { ++ __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister); ++ } ++ } ++ return; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++#undef UNSUPPORTED_COND ++ ++void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, ++ BranchInfo* branch) { ++ AssembleArchBranch(instr, branch); ++} ++ ++void CodeGenerator::AssembleArchJump(RpoNumber target) { ++ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target)); ++} ++ ++void CodeGenerator::AssembleArchTrap(Instruction* instr, ++ FlagsCondition condition) { ++ class OutOfLineTrap final : public OutOfLineCode { ++ public: ++ OutOfLineTrap(CodeGenerator* gen, Instruction* instr) ++ : OutOfLineCode(gen), instr_(instr), gen_(gen) {} ++ void Generate() final { ++ Sw64OperandConverter i(gen_, instr_); ++ TrapId trap_id = ++ static_cast(i.InputInt32(instr_->InputCount() - 1)); ++ GenerateCallToTrap(trap_id); ++ } ++ ++ private: ++ void GenerateCallToTrap(TrapId trap_id) { ++ if (trap_id == TrapId::kInvalid) { ++ // We cannot test calls to the runtime in cctest/test-run-wasm. ++ // Therefore we emit a call to C here instead of a call to the runtime. ++ // We use the context register as the scratch register, because we do ++ // not have a context here. ++ __ PrepareCallCFunction(0, 0, cp); ++ __ CallCFunction( ++ ExternalReference::wasm_call_trap_callback_for_testing(), 0); ++ __ LeaveFrame(StackFrame::WASM); ++ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor(); ++ int pop_count = ++ static_cast(call_descriptor->StackParameterCount()); ++ pop_count += (pop_count & 1); // align ++ __ Drop(pop_count); ++ __ Ret(); ++ } else { ++ gen_->AssembleSourcePosition(instr_); ++ // A direct call to a wasm runtime stub defined in this module. ++ // Just encode the stub index. This will be patched when the code ++ // is added to the native module and copied into wasm code space. ++ __ Call(static_cast
(trap_id), RelocInfo::WASM_STUB_CALL); ++ ReferenceMap* reference_map = ++ gen_->zone()->New(gen_->zone()); ++ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); ++ if (FLAG_debug_code) { ++ __ halt();//stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); ++ } ++ } ++ } ++ Instruction* instr_; ++ CodeGenerator* gen_; ++ }; ++ auto ool = zone()->New(this, instr); ++ Label* tlabel = ool->entry(); ++ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); ++} ++ ++// Assembles boolean materializations after an instruction. ++void CodeGenerator::AssembleArchBoolean(Instruction* instr, ++ FlagsCondition condition) { ++ Sw64OperandConverter i(this, instr); ++ ++ // Materialize a full 32-bit 1 or 0 value. The result register is always the ++ // last output of the instruction. ++ DCHECK_NE(0u, instr->OutputCount()); ++ Register result = i.OutputRegister(instr->OutputCount() - 1); ++ Condition cc = kNoCondition; ++ // SW64 does not have condition code flags, so compare and branch are ++ // implemented differently than on the other arch's. The compare operations ++ // emit sw64 pseudo-instructions, which are checked and handled here. ++ ++ if (instr->arch_opcode() == kSw64Tst) { ++ cc = FlagsConditionToConditionTst(condition); ++ if (cc == eq) { ++ __ Cmpult(result, kScratchReg, 1); ++ } else { ++ __ Cmpult(result, zero_reg, kScratchReg); ++ } ++ return; ++ } else if (instr->arch_opcode() == kSw64Dadd || ++ instr->arch_opcode() == kSw64Dsub) { ++ cc = FlagsConditionToConditionOvf(condition); ++ // Check for overflow creates 1 or 0 for result. ++ __ srll(i.OutputRegister(), 63 ,kScratchReg); ++ __ Srlw(kScratchReg2, i.OutputRegister(), 31); ++ __ xor_ins(kScratchReg, kScratchReg2, result); ++ if (cc == eq) // Toggle result for not overflow. ++ __ xor_ins(result, 1, result); ++ return; ++ } else if (instr->arch_opcode() == kSw64DaddOvf || ++ instr->arch_opcode() == kSw64DsubOvf) { ++ // Overflow occurs if overflow register is negative ++ __ cmplt(kScratchReg, zero_reg, result); ++ } else if (instr->arch_opcode() == kSw64MulOvf) { ++ // Overflow occurs if overflow register is not zero ++ __ Cmpugt(result, kScratchReg, zero_reg); ++ } else if (instr->arch_opcode() == kSw64Cmp) { ++ cc = FlagsConditionToConditionCmp(condition); ++ switch (cc) { ++ case eq: ++ case ne: { ++ Register left = i.InputRegister(0); ++ Operand right = i.InputOperand(1); ++ if (instr->InputAt(1)->IsImmediate()) { ++ if (is_int16(-right.immediate())) { ++ if (right.immediate() == 0) { ++ if (cc == eq) { ++ __ Cmpult(result, left, 1); ++ } else { ++ __ Cmpult(result, zero_reg, left); ++ } ++ } else { ++ __ Addl(result, left, Operand(-right.immediate())); ++ if (cc == eq) { ++ __ Cmpult(result, result, 1); ++ } else { ++ __ Cmpult(result, zero_reg, result); ++ } ++ } ++ } else { ++ if (is_uint16(right.immediate())) { ++ __ Xor(result, left, right); ++ } else { ++ __ li(kScratchReg, right); ++ __ Xor(result, left, kScratchReg); ++ } ++ if (cc == eq) { ++ __ Cmpult(result, result, 1); ++ } else { ++ __ Cmpult(result, zero_reg, result); ++ } ++ } ++ } else { ++ __ Xor(result, left, right); ++ if (cc == eq) { ++ __ Cmpult(result, result, 1); ++ } else { ++ __ Cmpult(result, zero_reg, result); ++ } ++ } ++ } break; ++ case lt: ++ case ge: { ++ Register left = i.InputRegister(0); ++ Operand right = i.InputOperand(1); ++ __ Cmplt(result, left, right); ++ if (cc == ge) { ++ __ xor_ins(result, 1, result); ++ } ++ } break; ++ case gt: ++ case le: { ++ Register left = i.InputRegister(1); ++ Operand right = i.InputOperand(0); ++ __ Cmplt(result, left, right); ++ if (cc == le) { ++ __ xor_ins(result, 1, result); ++ } ++ } break; ++ case lo: ++ case hs: { ++ Register left = i.InputRegister(0); ++ Operand right = i.InputOperand(1); ++ __ Cmpult(result, left, right); ++ if (cc == hs) { ++ __ xor_ins(result, 1, result); ++ } ++ } break; ++ case hi: ++ case ls: { ++ Register left = i.InputRegister(1); ++ Operand right = i.InputOperand(0); ++ __ Cmpult(result, left, right); ++ if (cc == ls) { ++ __ xor_ins(result, 1, result); ++ } ++ } break; ++ default: ++ UNREACHABLE(); ++ } ++ return; ++ } else if (instr->arch_opcode() == kSw64CmpD || ++ instr->arch_opcode() == kSw64CmpS) { ++ bool predicate; ++ FlagsConditionToConditionCmpFPU(&predicate, condition); ++#ifdef SW64 ++ __ li(result, Operand(1)); ++ if (predicate) { ++ __ LoadZeroIfNotFPUCondition(result); ++ } else { ++ __ LoadZeroIfFPUCondition(result); ++ } ++#endif ++ return; ++ } else { ++ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n", ++ instr->arch_opcode()); ++ TRACE_UNIMPL(); ++ UNIMPLEMENTED(); ++ } ++} ++ ++void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { ++ Sw64OperandConverter i(this, instr); ++ Register input = i.InputRegister(0); ++ std::vector> cases; ++ for (size_t index = 2; index < instr->InputCount(); index += 2) { ++ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))}); ++ } ++ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(), ++ cases.data() + cases.size()); ++} ++ ++void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { ++ Sw64OperandConverter i(this, instr); ++ Register input = i.InputRegister(0); ++ size_t const case_count = instr->InputCount() - 2; ++ ++ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count)); ++ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) { ++ return GetLabel(i.InputRpo(index + 2)); ++ }); ++} ++ ++void CodeGenerator::FinishFrame(Frame* frame) { ++ auto call_descriptor = linkage()->GetIncomingDescriptor(); ++ ++ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); ++ if (saves_fpu != 0) { ++ int count = base::bits::CountPopulation(saves_fpu); ++ DCHECK_EQ(kNumCalleeSavedFPU, count); ++ frame->AllocateSavedCalleeRegisterSlots(count * ++ (kDoubleSize / kSystemPointerSize)); ++ } ++ ++ const RegList saves = call_descriptor->CalleeSavedRegisters(); ++ if (saves != 0) { ++ int count = base::bits::CountPopulation(saves); ++ DCHECK_EQ(kNumCalleeSaved, count + 1); ++ frame->AllocateSavedCalleeRegisterSlots(count); ++ } ++} ++ ++void CodeGenerator::AssembleConstructFrame() { ++ auto call_descriptor = linkage()->GetIncomingDescriptor(); ++ ++ if (frame_access_state()->has_frame()) { ++ if (call_descriptor->IsCFunctionCall()) { ++ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { ++ __ StubPrologue(StackFrame::C_WASM_ENTRY); ++ // Reserve stack space for saving the c_entry_fp later. ++ __ Subl(sp, sp, Operand(kSystemPointerSize)); ++ } else { ++ __ Push(ra, fp); ++ __ mov(fp, sp); ++ } ++ } else if (call_descriptor->IsJSFunctionCall()) { ++ __ Prologue(); ++ } else { ++ __ StubPrologue(info()->GetOutputStackFrameType()); ++ if (call_descriptor->IsWasmFunctionCall()) { ++ __ Push(kWasmInstanceRegister); ++ } else if (call_descriptor->IsWasmImportWrapper() || ++ call_descriptor->IsWasmCapiFunction()) { ++ // Wasm import wrappers are passed a tuple in the place of the instance. ++ // Unpack the tuple into the instance and the target callable. ++ // This must be done here in the codegen because it cannot be expressed ++ // properly in the graph. ++ __ Ldl(kJSFunctionRegister, ++ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset)); ++ __ Ldl(kWasmInstanceRegister, ++ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); ++ __ Push(kWasmInstanceRegister); ++ if (call_descriptor->IsWasmCapiFunction()) { ++ // Reserve space for saving the PC later. ++ __ Subl(sp, sp, Operand(kSystemPointerSize)); ++ } ++ } ++ } ++ } ++ ++ int required_slots = ++ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); ++ ++ if (info()->is_osr()) { ++ // TurboFan OSR-compiled functions cannot be entered directly. ++ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); ++ ++ // Unoptimized code jumps directly to this entrypoint while the unoptimized ++ // frame is still on the stack. Optimized code uses OSR values directly from ++ // the unoptimized frame. Thus, all that needs to be done is to allocate the ++ // remaining stack slots. ++ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); ++ osr_pc_offset_ = __ pc_offset(); ++ required_slots -= osr_helper()->UnoptimizedFrameSlots(); ++ ResetSpeculationPoison(); ++ } ++ ++ const RegList saves = call_descriptor->CalleeSavedRegisters(); ++ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); ++ ++ if (required_slots > 0) { ++ DCHECK(frame_access_state()->has_frame()); ++ if (info()->IsWasm() && required_slots > 128) { ++ // For WebAssembly functions with big frames we have to do the stack ++ // overflow check before we construct the frame. Otherwise we may not ++ // have enough space on the stack to call the runtime for the stack ++ // overflow. ++ Label done; ++ ++ // If the frame is bigger than the stack, we throw the stack overflow ++ // exception unconditionally. Thereby we can avoid the integer overflow ++ // check in the condition code. ++ if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { ++ __ Ldl( ++ kScratchReg, ++ FieldMemOperand(kWasmInstanceRegister, ++ WasmInstanceObject::kRealStackLimitAddressOffset)); ++ __ Ldl(kScratchReg, MemOperand(kScratchReg)); ++ __ Addl(kScratchReg, kScratchReg, ++ Operand(required_slots * kSystemPointerSize)); ++ __ Branch(&done, uge, sp, Operand(kScratchReg)); ++ } ++ ++ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); ++ // We come from WebAssembly, there are no references for the GC. ++ ReferenceMap* reference_map = zone()->New(zone()); ++ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); ++ if (FLAG_debug_code) { ++ __ halt(); ++ } ++ ++ __ bind(&done); ++ } ++ } ++ ++ const int returns = frame()->GetReturnSlotCount(); ++ ++ // Skip callee-saved and return slots, which are pushed below. ++ required_slots -= base::bits::CountPopulation(saves); ++ required_slots -= base::bits::CountPopulation(saves_fpu); ++ required_slots -= returns; ++ if (required_slots > 0) { ++ __ Subl(sp, sp, Operand(required_slots * kSystemPointerSize)); ++ } ++ ++ if (saves_fpu != 0) { ++ // Save callee-saved FPU registers. ++ __ MultiPushFPU(saves_fpu); ++ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu)); ++ } ++ ++ if (saves != 0) { ++ // Save callee-saved registers. ++ __ MultiPush(saves); ++ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1); ++ } ++ ++ if (returns != 0) { ++ // Create space for returns. ++ __ Subl(sp, sp, Operand(returns * kSystemPointerSize)); ++ } ++ ++#ifdef SW64 ++ // should consider float INF, which will lead to SIGFPE. ++ __ setfpec1(); ++#endif ++} ++ ++void CodeGenerator::AssembleReturn(InstructionOperand* pop) { ++ auto call_descriptor = linkage()->GetIncomingDescriptor(); ++ ++ const int returns = frame()->GetReturnSlotCount(); ++ if (returns != 0) { ++ __ Addl(sp, sp, Operand(returns * kSystemPointerSize)); ++ } ++ ++ // Restore GP registers. ++ const RegList saves = call_descriptor->CalleeSavedRegisters(); ++ if (saves != 0) { ++ __ MultiPop(saves); ++ } ++ ++ // Restore FPU registers. ++ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); ++ if (saves_fpu != 0) { ++ __ MultiPopFPU(saves_fpu); ++ } ++ ++ Sw64OperandConverter g(this, nullptr); ++ if (call_descriptor->IsCFunctionCall()) { ++ AssembleDeconstructFrame(); ++ } else if (frame_access_state()->has_frame()) { ++ // Canonicalize JSFunction return sites for now unless they have an variable ++ // number of stack slot pops. ++ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) { ++ if (return_label_.is_bound()) { ++ __ Branch(&return_label_); ++ return; ++ } else { ++ __ bind(&return_label_); ++ AssembleDeconstructFrame(); ++ } ++ } else { ++ AssembleDeconstructFrame(); ++ } ++ } ++ int pop_count = static_cast(call_descriptor->StackParameterCount()); ++ if (pop->IsImmediate()) { ++ pop_count += g.ToConstant(pop).ToInt32(); ++ } else { ++ Register pop_reg = g.ToRegister(pop); ++ __ slll(pop_reg, kSystemPointerSizeLog2, pop_reg); ++ __ Addl(sp, sp, pop_reg); ++ } ++ if (pop_count != 0) { ++ __ DropAndRet(pop_count); ++ } else { ++ __ Ret(); ++ } ++} ++ ++void CodeGenerator::FinishCode() {} ++ ++void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} ++ ++void CodeGenerator::AssembleMove(InstructionOperand* source, ++ InstructionOperand* destination) { ++ Sw64OperandConverter g(this, nullptr); ++ // Dispatch on the source and destination operand kinds. Not all ++ // combinations are possible. ++ if (source->IsRegister()) { ++ DCHECK(destination->IsRegister() || destination->IsStackSlot()); ++ Register src = g.ToRegister(source); ++ if (destination->IsRegister()) { ++ __ mov(g.ToRegister(destination), src); ++ } else { ++ __ Stl(src, g.ToMemOperand(destination)); ++ } ++ } else if (source->IsStackSlot()) { ++ DCHECK(destination->IsRegister() || destination->IsStackSlot()); ++ MemOperand src = g.ToMemOperand(source); ++ if (destination->IsRegister()) { ++ __ Ldl(g.ToRegister(destination), src); ++ } else { ++ Register temp = kScratchReg; ++ __ Ldl(temp, src); ++ __ Stl(temp, g.ToMemOperand(destination)); ++ } ++ } else if (source->IsConstant()) { ++ Constant src = g.ToConstant(source); ++ if (destination->IsRegister() || destination->IsStackSlot()) { ++ Register dst = ++ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; ++ switch (src.type()) { ++ case Constant::kInt32: ++ __ li(dst, Operand(src.ToInt32())); ++ break; ++ case Constant::kFloat32: ++ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32())); ++ break; ++ case Constant::kInt64: ++ if (RelocInfo::IsWasmReference(src.rmode())) { ++ __ li(dst, Operand(src.ToInt64(), src.rmode())); ++ } else { ++ __ li(dst, Operand(src.ToInt64())); ++ } ++ break; ++ case Constant::kFloat64: ++ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value())); ++ break; ++ case Constant::kExternalReference: ++ __ li(dst, src.ToExternalReference()); ++ break; ++ case Constant::kDelayedStringConstant: ++ __ li(dst, src.ToDelayedStringConstant()); ++ break; ++ case Constant::kHeapObject: { ++ Handle src_object = src.ToHeapObject(); ++ RootIndex index; ++ if (IsMaterializableFromRoot(src_object, &index)) { ++ __ LoadRoot(dst, index); ++ } else { ++ __ li(dst, src_object); ++ } ++ break; ++ } ++ case Constant::kCompressedHeapObject: ++ UNREACHABLE(); ++ case Constant::kRpoNumber: ++ UNREACHABLE(); // TODO(titzer): loading RPO numbers on sw64. ++ break; ++ } ++ if (destination->IsStackSlot()) __ Stl(dst, g.ToMemOperand(destination)); ++ } else if (src.type() == Constant::kFloat32) { ++ if (destination->IsFPStackSlot()) { ++ MemOperand dst = g.ToMemOperand(destination); ++ if (bit_cast(src.ToFloat32()) == 0) { ++ __ Stl(zero_reg, dst); ++ } else { ++ __ li(kScratchReg, Operand(bit_cast(src.ToFloat32()))); ++ __ Stl(kScratchReg, dst); ++ } ++ } else { ++ DCHECK(destination->IsFPRegister()); ++ FloatRegister dst = g.ToSingleRegister(destination); ++ __ Move(dst, src.ToFloat32()); ++ } ++ } else { ++ DCHECK_EQ(Constant::kFloat64, src.type()); ++ DoubleRegister dst = destination->IsFPRegister() ++ ? g.ToDoubleRegister(destination) ++ : kScratchDoubleReg; ++ __ Move(dst, src.ToFloat64().value()); ++ if (destination->IsFPStackSlot()) { ++ __ Fstd(dst, g.ToMemOperand(destination)); ++ } ++ } ++ } else if (source->IsFPRegister()) { ++ MachineRepresentation rep = LocationOperand::cast(source)->representation(); ++ if (rep == MachineRepresentation::kSimd128) { ++ UNREACHABLE(); ++ // CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ // MSARegister src = g.ToSimd128Register(source); ++ // if (destination->IsSimd128Register()) { ++ //MSARegister dst = g.ToSimd128Register(destination); ++ //__ move_v(dst, src); ++ // } else { ++ //DCHECK(destination->IsSimd128StackSlot()); ++ //__ st_b(src, g.ToMemOperand(destination)); ++ //} ++ } else { ++ FPURegister src = g.ToDoubleRegister(source); ++ if (destination->IsFPRegister()) { ++ FPURegister dst = g.ToDoubleRegister(destination); ++ __ Move(dst, src); ++ } else { ++ DCHECK(destination->IsFPStackSlot()); ++ destination->IsDoubleStackSlot() ? ++ __ Fstd(src, g.ToMemOperand(destination)) : ++ __ Fsts(src, g.ToMemOperand(destination)); ++ } ++ } ++ } else if (source->IsFPStackSlot()) { ++ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); ++ MemOperand src = g.ToMemOperand(source); ++ MachineRepresentation rep = LocationOperand::cast(source)->representation(); ++ if (rep == MachineRepresentation::kSimd128) { ++ UNREACHABLE(); ++ // CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ // if (destination->IsSimd128Register()) { ++ //__ ld_b(g.ToSimd128Register(destination), src); ++ // } else { ++ //DCHECK(destination->IsSimd128StackSlot()); ++ //MSARegister temp = kSimd128ScratchReg; ++ //__ ld_b(temp, src); ++ //__ st_b(temp, g.ToMemOperand(destination)); ++ // } ++ } else { ++ if (destination->IsFPRegister()) { ++ source->IsDoubleStackSlot() ? ++ __ Fldd(g.ToDoubleRegister(destination), src) : ++ __ Flds(g.ToFloatRegister(destination), src); ++ } else { ++ DCHECK(destination->IsFPStackSlot()); ++ FPURegister temp = kScratchDoubleReg; ++ source->IsDoubleStackSlot() ? __ Fldd(temp, src) : __ Flds(temp, src); ++ destination->IsDoubleStackSlot() ? ++ __ Fstd(temp, g.ToMemOperand(destination)) : ++ __ Fsts(temp, g.ToMemOperand(destination)); ++ } ++ } ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void CodeGenerator::AssembleSwap(InstructionOperand* source, ++ InstructionOperand* destination) { ++ Sw64OperandConverter g(this, nullptr); ++ // Dispatch on the source and destination operand kinds. Not all ++ // combinations are possible. ++ if (source->IsRegister()) { ++ // Register-register. ++ Register temp = kScratchReg; ++ Register src = g.ToRegister(source); ++ if (destination->IsRegister()) { ++ Register dst = g.ToRegister(destination); ++ __ Move(temp, src); ++ __ Move(src, dst); ++ __ Move(dst, temp); ++ } else { ++ DCHECK(destination->IsStackSlot()); ++ MemOperand dst = g.ToMemOperand(destination); ++ __ mov(temp, src); ++ __ Ldl(src, dst); ++ __ Stl(temp, dst); ++ } ++ } else if (source->IsStackSlot()) { ++ DCHECK(destination->IsStackSlot()); ++ Register temp_0 = kScratchReg; ++ Register temp_1 = kScratchReg2; ++ MemOperand src = g.ToMemOperand(source); ++ MemOperand dst = g.ToMemOperand(destination); ++ __ Ldl(temp_0, src); ++ __ Ldl(temp_1, dst); ++ __ Stl(temp_0, dst); ++ __ Stl(temp_1, src); ++ } else if (source->IsFPRegister()) { ++ MachineRepresentation rep = LocationOperand::cast(source)->representation(); ++ if (rep == MachineRepresentation::kSimd128) { ++ UNREACHABLE(); ++ // CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ // MSARegister temp = kSimd128ScratchReg; ++ // MSARegister src = g.ToSimd128Register(source); ++ // if (destination->IsSimd128Register()) { ++ //MSARegister dst = g.ToSimd128Register(destination); ++ //__ move_v(temp, src); ++ //__ move_v(src, dst); ++ //__ move_v(dst, temp); ++ // } else { ++ //DCHECK(destination->IsSimd128StackSlot()); ++ //MemOperand dst = g.ToMemOperand(destination); ++ //__ move_v(temp, src); ++ //__ ld_b(src, dst); ++ //__ st_b(temp, dst); ++ // } ++ } else { ++ FPURegister temp = kScratchDoubleReg; ++ FPURegister src = g.ToDoubleRegister(source); ++ if (destination->IsFPRegister()) { ++ FPURegister dst = g.ToDoubleRegister(destination); ++ __ Move(temp, src); ++ __ Move(src, dst); ++ __ Move(dst, temp); ++ } else { ++ DCHECK(destination->IsFPStackSlot()); ++ MemOperand dst = g.ToMemOperand(destination); ++ __ Move(temp, src); ++ destination->IsDoubleStackSlot() ? __ Fldd(src, dst) : __ Flds(src, dst); ++ source->IsDoubleRegister() ? __ Fstd(temp, dst) : __ Fsts(temp, dst); ++ } ++ } ++ } else if (source->IsFPStackSlot()) { ++ DCHECK(destination->IsFPStackSlot()); ++ Register temp_0 = kScratchReg; ++ MemOperand src0 = g.ToMemOperand(source); ++ MemOperand src1(src0.rm(), src0.offset() + kIntSize); ++ MemOperand dst0 = g.ToMemOperand(destination); ++ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize); ++ MachineRepresentation rep = LocationOperand::cast(source)->representation(); ++ if (rep == MachineRepresentation::kSimd128) { ++ //MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize); ++ //MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize); ++ //MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize); ++ //MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize); ++ //CpuFeatureScope msa_scope(tasm(), SW64_SIMD); ++ //MSARegister temp_1 = kSimd128ScratchReg; ++ //__ ld_b(temp_1, dst0); // Save destination in temp_1. ++ //__ Ldw(temp_0, src0); // Then use temp_0 to copy source to destination. ++ //__ Stw(temp_0, dst0); ++ //__ Ldw(temp_0, src1); ++ //__ Stw(temp_0, dst1); ++ //__ Ldw(temp_0, src2); ++ //__ Stw(temp_0, dst2); ++ //__ Ldw(temp_0, src3); ++ //__ Stw(temp_0, dst3); ++ //__ st_b(temp_1, src0); ++ UNREACHABLE(); ++ } else { ++ FPURegister temp_1 = kScratchDoubleReg; ++ __ Fldd(temp_1, dst0); // Save destination in temp_1. ++ __ Ldw(temp_0, src0); // Then use temp_0 to copy source to destination. ++ __ Stw(temp_0, dst0); ++ __ Ldw(temp_0, src1); ++ __ Stw(temp_0, dst1); ++ __ Fstd(temp_1, src0); ++ } ++ } else { ++ // No other combinations are possible. ++ UNREACHABLE(); ++ } ++} ++ ++void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { ++ // On 64-bit SW64 we emit the jump tables inline. ++ UNREACHABLE(); ++} ++ ++#undef ASSEMBLE_ATOMIC_LOAD_INTEGER ++#undef ASSEMBLE_ATOMIC_STORE_INTEGER ++#undef ASSEMBLE_ATOMIC_BINOP ++#undef ASSEMBLE_ATOMIC_BINOP_EXT ++#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER ++#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT ++#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER ++#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT ++#undef ASSEMBLE_IEEE754_BINOP ++#undef ASSEMBLE_IEEE754_UNOP ++ ++#undef TRACE_MSG ++#undef TRACE_UNIMPL ++#undef __ ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-codes-sw64.h b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-codes-sw64.h +new file mode 100755 +index 000000000..65df932f1 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-codes-sw64.h +@@ -0,0 +1,427 @@ ++// Copyright 2014 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_COMPILER_BACKEND_SW64_INSTRUCTION_CODES_SW64_H_ ++#define V8_COMPILER_BACKEND_SW64_INSTRUCTION_CODES_SW64_H_ ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++// SW64-specific opcodes that specify which assembly sequence to emit. ++// Most opcodes specify a single instruction. ++#define TARGET_ARCH_OPCODE_LIST(V) \ ++ V(Sw64Add) \ ++ V(Sw64Dadd) \ ++ V(Sw64DaddOvf) \ ++ V(Sw64Sub) \ ++ V(Sw64Dsub) \ ++ V(Sw64DsubOvf) \ ++ V(Sw64Mul) \ ++ V(Sw64MulOvf) \ ++ V(Sw64MulHigh) \ ++ V(Sw64DMulHigh) \ ++ V(Sw64MulHighU) \ ++ V(Sw64Dmul) \ ++ V(Sw64Div) \ ++ V(Sw64Ddiv) \ ++ V(Sw64DivU) \ ++ V(Sw64DdivU) \ ++ V(Sw64Mod) \ ++ V(Sw64Dmod) \ ++ V(Sw64ModU) \ ++ V(Sw64DmodU) \ ++ V(Sw64And) \ ++ V(Sw64And32) \ ++ V(Sw64Or) \ ++ V(Sw64Or32) \ ++ V(Sw64Nor) \ ++ V(Sw64Nor32) \ ++ V(Sw64Xor) \ ++ V(Sw64Xor32) \ ++ V(Sw64Clz) \ ++ V(Sw64Lsa) \ ++ V(Sw64Dlsa) \ ++ V(Sw64Shl) \ ++ V(Sw64Shr) \ ++ V(Sw64Sar) \ ++ V(Sw64Ext) \ ++ V(Sw64Ins) \ ++ V(Sw64Dext) \ ++ V(Sw64Dins) \ ++ V(Sw64Dclz) \ ++ V(Sw64Ctz) \ ++ V(Sw64Dctz) \ ++ V(Sw64Popcnt) \ ++ V(Sw64Dpopcnt) \ ++ V(Sw64Dshl) \ ++ V(Sw64Dshr) \ ++ V(Sw64Dsar) \ ++ V(Sw64Ror) \ ++ V(Sw64Dror) \ ++ V(Sw64Mov) \ ++ V(Sw64Tst) \ ++ V(Sw64Cmp) \ ++ V(Sw64CmpS) \ ++ V(Sw64AddS) \ ++ V(Sw64SubS) \ ++ V(Sw64MulS) \ ++ V(Sw64DivS) \ ++ V(Sw64ModS) \ ++ V(Sw64AbsS) \ ++ V(Sw64NegS) \ ++ V(Sw64SqrtS) \ ++ V(Sw64MaxS) \ ++ V(Sw64MinS) \ ++ V(Sw64CmpD) \ ++ V(Sw64AddD) \ ++ V(Sw64SubD) \ ++ V(Sw64MulD) \ ++ V(Sw64DivD) \ ++ V(Sw64ModD) \ ++ V(Sw64AbsD) \ ++ V(Sw64NegD) \ ++ V(Sw64SqrtD) \ ++ V(Sw64MaxD) \ ++ V(Sw64MinD) \ ++ V(Sw64Float64RoundDown) \ ++ V(Sw64Float64RoundTruncate) \ ++ V(Sw64Float64RoundUp) \ ++ V(Sw64Float64RoundTiesEven) \ ++ V(Sw64Float32RoundDown) \ ++ V(Sw64Float32RoundTruncate) \ ++ V(Sw64Float32RoundUp) \ ++ V(Sw64Float32RoundTiesEven) \ ++ V(Sw64CvtSD) \ ++ V(Sw64CvtDS) \ ++ V(Sw64TruncWD) \ ++ V(Sw64RoundWD) \ ++ V(Sw64FloorWD) \ ++ V(Sw64CeilWD) \ ++ V(Sw64TruncWS) \ ++ V(Sw64RoundWS) \ ++ V(Sw64FloorWS) \ ++ V(Sw64CeilWS) \ ++ V(Sw64TruncLS) \ ++ V(Sw64TruncLD) \ ++ V(Sw64TruncUwD) \ ++ V(Sw64TruncUwS) \ ++ V(Sw64TruncUlS) \ ++ V(Sw64TruncUlD) \ ++ V(Sw64CvtDW) \ ++ V(Sw64CvtSL) \ ++ V(Sw64CvtSW) \ ++ V(Sw64CvtSUw) \ ++ V(Sw64CvtSUl) \ ++ V(Sw64CvtDL) \ ++ V(Sw64CvtDUw) \ ++ V(Sw64CvtDUl) \ ++ V(Sw64Ldb) \ ++ V(Sw64Ldbu) \ ++ V(Sw64Stb) \ ++ V(Sw64Ldh) \ ++ V(Sw64Uldh) \ ++ V(Sw64Ldhu) \ ++ V(Sw64Uldhu) \ ++ V(Sw64Sth) \ ++ V(Sw64Usth) \ ++ V(Sw64Ldl) \ ++ V(Sw64Uldl) \ ++ V(Sw64Ldw) \ ++ V(Sw64Uldw) \ ++ V(Sw64Ldwu) \ ++ V(Sw64Uldwu) \ ++ V(Sw64Stw) \ ++ V(Sw64Ustw) \ ++ V(Sw64Stl) \ ++ V(Sw64Ustl) \ ++ V(Sw64Flds) \ ++ V(Sw64Uflds) \ ++ V(Sw64Fsts) \ ++ V(Sw64Ufsts) \ ++ V(Sw64Fldd) \ ++ V(Sw64Ufldd) \ ++ V(Sw64Fstd) \ ++ V(Sw64Ufstd) \ ++ V(Sw64BitcastDL) \ ++ V(Sw64BitcastLD) \ ++ V(Sw64BitcastSW) \ ++ V(Sw64BitcastWS) \ ++ V(Sw64Float64ExtractLowWord32) \ ++ V(Sw64Float64ExtractHighWord32) \ ++ V(Sw64Float64InsertLowWord32) \ ++ V(Sw64Float64InsertHighWord32) \ ++ V(Sw64Float32Max) \ ++ V(Sw64Float64Max) \ ++ V(Sw64Float32Min) \ ++ V(Sw64Float64Min) \ ++ V(Sw64Float64SilenceNaN) \ ++ V(Sw64Push) \ ++ V(Sw64Peek) \ ++ V(Sw64StoreToStackSlot) \ ++ V(Sw64ByteSwap64) \ ++ V(Sw64ByteSwap32) \ ++ V(Sw64StackClaim) \ ++ V(Sw64Seb) \ ++ V(Sw64Seh) \ ++ V(Sw64Sync) \ ++ V(Sw64AssertEqual) \ ++ V(Sw64S128Const) \ ++ V(Sw64S128Zero) \ ++ V(Sw64S128AllOnes) \ ++ V(Sw64I32x4Splat) \ ++ V(Sw64I32x4ExtractLane) \ ++ V(Sw64I32x4ReplaceLane) \ ++ V(Sw64I32x4Add) \ ++ V(Sw64I32x4AddHoriz) \ ++ V(Sw64I32x4Sub) \ ++ V(Sw64F64x2Abs) \ ++ V(Sw64F64x2Neg) \ ++ V(Sw64F32x4Splat) \ ++ V(Sw64F32x4ExtractLane) \ ++ V(Sw64F32x4ReplaceLane) \ ++ V(Sw64F32x4SConvertI32x4) \ ++ V(Sw64F32x4UConvertI32x4) \ ++ V(Sw64I32x4Mul) \ ++ V(Sw64I32x4MaxS) \ ++ V(Sw64I32x4MinS) \ ++ V(Sw64I32x4Eq) \ ++ V(Sw64I32x4Ne) \ ++ V(Sw64I32x4Shl) \ ++ V(Sw64I32x4ShrS) \ ++ V(Sw64I32x4ShrU) \ ++ V(Sw64I32x4MaxU) \ ++ V(Sw64I32x4MinU) \ ++ V(Sw64F64x2Sqrt) \ ++ V(Sw64F64x2Add) \ ++ V(Sw64F64x2Sub) \ ++ V(Sw64F64x2Mul) \ ++ V(Sw64F64x2Div) \ ++ V(Sw64F64x2Min) \ ++ V(Sw64F64x2Max) \ ++ V(Sw64F64x2Eq) \ ++ V(Sw64F64x2Ne) \ ++ V(Sw64F64x2Lt) \ ++ V(Sw64F64x2Le) \ ++ V(Sw64F64x2Splat) \ ++ V(Sw64F64x2ExtractLane) \ ++ V(Sw64F64x2ReplaceLane) \ ++ V(Sw64F64x2Pmin) \ ++ V(Sw64F64x2Pmax) \ ++ V(Sw64F64x2Ceil) \ ++ V(Sw64F64x2Floor) \ ++ V(Sw64F64x2Trunc) \ ++ V(Sw64F64x2NearestInt) \ ++ V(Sw64I64x2Splat) \ ++ V(Sw64I64x2ExtractLane) \ ++ V(Sw64I64x2ReplaceLane) \ ++ V(Sw64I64x2Add) \ ++ V(Sw64I64x2Sub) \ ++ V(Sw64I64x2Mul) \ ++ V(Sw64I64x2Neg) \ ++ V(Sw64I64x2Shl) \ ++ V(Sw64I64x2ShrS) \ ++ V(Sw64I64x2ShrU) \ ++ V(Sw64F32x4Abs) \ ++ V(Sw64F32x4Neg) \ ++ V(Sw64F32x4Sqrt) \ ++ V(Sw64F32x4RecipApprox) \ ++ V(Sw64F32x4RecipSqrtApprox) \ ++ V(Sw64F32x4Add) \ ++ V(Sw64F32x4AddHoriz) \ ++ V(Sw64F32x4Sub) \ ++ V(Sw64F32x4Mul) \ ++ V(Sw64F32x4Div) \ ++ V(Sw64F32x4Max) \ ++ V(Sw64F32x4Min) \ ++ V(Sw64F32x4Eq) \ ++ V(Sw64F32x4Ne) \ ++ V(Sw64F32x4Lt) \ ++ V(Sw64F32x4Le) \ ++ V(Sw64F32x4Pmin) \ ++ V(Sw64F32x4Pmax) \ ++ V(Sw64F32x4Ceil) \ ++ V(Sw64F32x4Floor) \ ++ V(Sw64F32x4Trunc) \ ++ V(Sw64F32x4NearestInt) \ ++ V(Sw64I32x4SConvertF32x4) \ ++ V(Sw64I32x4UConvertF32x4) \ ++ V(Sw64I32x4Neg) \ ++ V(Sw64I32x4GtS) \ ++ V(Sw64I32x4GeS) \ ++ V(Sw64I32x4GtU) \ ++ V(Sw64I32x4GeU) \ ++ V(Sw64I32x4Abs) \ ++ V(Sw64I32x4BitMask) \ ++ V(Sw64I16x8Splat) \ ++ V(Sw64I16x8ExtractLaneU) \ ++ V(Sw64I16x8ExtractLaneS) \ ++ V(Sw64I16x8ReplaceLane) \ ++ V(Sw64I16x8Neg) \ ++ V(Sw64I16x8Shl) \ ++ V(Sw64I16x8ShrS) \ ++ V(Sw64I16x8ShrU) \ ++ V(Sw64I16x8Add) \ ++ V(Sw64I16x8AddSaturateS) \ ++ V(Sw64I16x8AddHoriz) \ ++ V(Sw64I16x8Sub) \ ++ V(Sw64I16x8SubSaturateS) \ ++ V(Sw64I16x8Mul) \ ++ V(Sw64I16x8MaxS) \ ++ V(Sw64I16x8MinS) \ ++ V(Sw64I16x8Eq) \ ++ V(Sw64I16x8Ne) \ ++ V(Sw64I16x8GtS) \ ++ V(Sw64I16x8GeS) \ ++ V(Sw64I16x8AddSaturateU) \ ++ V(Sw64I16x8SubSaturateU) \ ++ V(Sw64I16x8MaxU) \ ++ V(Sw64I16x8MinU) \ ++ V(Sw64I16x8GtU) \ ++ V(Sw64I16x8GeU) \ ++ V(Sw64I16x8RoundingAverageU) \ ++ V(Sw64I16x8Abs) \ ++ V(Sw64I16x8BitMask) \ ++ V(Sw64I8x16Splat) \ ++ V(Sw64I8x16ExtractLaneU) \ ++ V(Sw64I8x16ExtractLaneS) \ ++ V(Sw64I8x16ReplaceLane) \ ++ V(Sw64I8x16Neg) \ ++ V(Sw64I8x16Shl) \ ++ V(Sw64I8x16ShrS) \ ++ V(Sw64I8x16Add) \ ++ V(Sw64I8x16AddSaturateS) \ ++ V(Sw64I8x16Sub) \ ++ V(Sw64I8x16SubSaturateS) \ ++ V(Sw64I8x16Mul) \ ++ V(Sw64I8x16MaxS) \ ++ V(Sw64I8x16MinS) \ ++ V(Sw64I8x16Eq) \ ++ V(Sw64I8x16Ne) \ ++ V(Sw64I8x16GtS) \ ++ V(Sw64I8x16GeS) \ ++ V(Sw64I8x16ShrU) \ ++ V(Sw64I8x16AddSaturateU) \ ++ V(Sw64I8x16SubSaturateU) \ ++ V(Sw64I8x16MaxU) \ ++ V(Sw64I8x16MinU) \ ++ V(Sw64I8x16GtU) \ ++ V(Sw64I8x16GeU) \ ++ V(Sw64I8x16RoundingAverageU) \ ++ V(Sw64I8x16Abs) \ ++ V(Sw64I8x16BitMask) \ ++ V(Sw64S128And) \ ++ V(Sw64S128Or) \ ++ V(Sw64S128Xor) \ ++ V(Sw64S128Not) \ ++ V(Sw64S128Select) \ ++ V(Sw64S128AndNot) \ ++ V(Sw64V32x4AnyTrue) \ ++ V(Sw64V32x4AllTrue) \ ++ V(Sw64V16x8AnyTrue) \ ++ V(Sw64V16x8AllTrue) \ ++ V(Sw64V8x16AnyTrue) \ ++ V(Sw64V8x16AllTrue) \ ++ V(Sw64S32x4InterleaveRight) \ ++ V(Sw64S32x4InterleaveLeft) \ ++ V(Sw64S32x4PackEven) \ ++ V(Sw64S32x4PackOdd) \ ++ V(Sw64S32x4InterleaveEven) \ ++ V(Sw64S32x4InterleaveOdd) \ ++ V(Sw64S32x4Shuffle) \ ++ V(Sw64S16x8InterleaveRight) \ ++ V(Sw64S16x8InterleaveLeft) \ ++ V(Sw64S16x8PackEven) \ ++ V(Sw64S16x8PackOdd) \ ++ V(Sw64S16x8InterleaveEven) \ ++ V(Sw64S16x8InterleaveOdd) \ ++ V(Sw64S16x4Reverse) \ ++ V(Sw64S16x2Reverse) \ ++ V(Sw64S8x16InterleaveRight) \ ++ V(Sw64S8x16InterleaveLeft) \ ++ V(Sw64S8x16PackEven) \ ++ V(Sw64S8x16PackOdd) \ ++ V(Sw64S8x16InterleaveEven) \ ++ V(Sw64S8x16InterleaveOdd) \ ++ V(Sw64I8x16Shuffle) \ ++ V(Sw64I8x16Swizzle) \ ++ V(Sw64S8x16Concat) \ ++ V(Sw64S8x8Reverse) \ ++ V(Sw64S8x4Reverse) \ ++ V(Sw64S8x2Reverse) \ ++ V(Sw64MsaLd) \ ++ V(Sw64MsaSt) \ ++ V(Sw64I32x4SConvertI16x8Low) \ ++ V(Sw64I32x4SConvertI16x8High) \ ++ V(Sw64I32x4UConvertI16x8Low) \ ++ V(Sw64I32x4UConvertI16x8High) \ ++ V(Sw64I16x8SConvertI8x16Low) \ ++ V(Sw64I16x8SConvertI8x16High) \ ++ V(Sw64I16x8SConvertI32x4) \ ++ V(Sw64I16x8UConvertI32x4) \ ++ V(Sw64I16x8UConvertI8x16Low) \ ++ V(Sw64I16x8UConvertI8x16High) \ ++ V(Sw64I8x16SConvertI16x8) \ ++ V(Sw64I8x16UConvertI16x8) \ ++ V(Sw64Word64AtomicLoadUint8) \ ++ V(Sw64Word64AtomicLoadUint16) \ ++ V(Sw64Word64AtomicLoadUint32) \ ++ V(Sw64Word64AtomicLoadUint64) \ ++ V(Sw64Word64AtomicStoreWord8) \ ++ V(Sw64Word64AtomicStoreWord16) \ ++ V(Sw64Word64AtomicStoreWord32) \ ++ V(Sw64Word64AtomicStoreWord64) \ ++ V(Sw64Word64AtomicAddUint8) \ ++ V(Sw64Word64AtomicAddUint16) \ ++ V(Sw64Word64AtomicAddUint32) \ ++ V(Sw64Word64AtomicAddUint64) \ ++ V(Sw64Word64AtomicSubUint8) \ ++ V(Sw64Word64AtomicSubUint16) \ ++ V(Sw64Word64AtomicSubUint32) \ ++ V(Sw64Word64AtomicSubUint64) \ ++ V(Sw64Word64AtomicAndUint8) \ ++ V(Sw64Word64AtomicAndUint16) \ ++ V(Sw64Word64AtomicAndUint32) \ ++ V(Sw64Word64AtomicAndUint64) \ ++ V(Sw64Word64AtomicOrUint8) \ ++ V(Sw64Word64AtomicOrUint16) \ ++ V(Sw64Word64AtomicOrUint32) \ ++ V(Sw64Word64AtomicOrUint64) \ ++ V(Sw64Word64AtomicXorUint8) \ ++ V(Sw64Word64AtomicXorUint16) \ ++ V(Sw64Word64AtomicXorUint32) \ ++ V(Sw64Word64AtomicXorUint64) \ ++ V(Sw64Word64AtomicExchangeUint8) \ ++ V(Sw64Word64AtomicExchangeUint16) \ ++ V(Sw64Word64AtomicExchangeUint32) \ ++ V(Sw64Word64AtomicExchangeUint64) \ ++ V(Sw64Word64AtomicCompareExchangeUint8) \ ++ V(Sw64Word64AtomicCompareExchangeUint16) \ ++ V(Sw64Word64AtomicCompareExchangeUint32) \ ++ V(Sw64Word64AtomicCompareExchangeUint64) ++ ++// Addressing modes represent the "shape" of inputs to an instruction. ++// Many instructions support multiple addressing modes. Addressing modes ++// are encoded into the InstructionCode of the instruction and tell the ++// code generator after register allocation which assembler method to call. ++// ++// We use the following local notation for addressing modes: ++// ++// R = register ++// O = register or stack slot ++// D = double register ++// I = immediate (handle, external, int32) ++// MRI = [register + immediate] ++// MRR = [register + register] ++// TODO(plind): Add the new r6 address modes. ++#define TARGET_ADDRESSING_MODE_LIST(V) \ ++ V(MRI) /* [%r0 + K] */ \ ++ V(MRR) /* [%r0 + %r1] */ ++ ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_COMPILER_SW64_INSTRUCTION_CODES_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-scheduler-sw64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-scheduler-sw64.cc +new file mode 100755 +index 000000000..d9fbb9927 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-scheduler-sw64.cc +@@ -0,0 +1,1558 @@ ++// Copyright 2015 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/compiler/backend/instruction-scheduler.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++bool InstructionScheduler::SchedulerSupported() { return true; } ++ ++int InstructionScheduler::GetTargetInstructionFlags( ++ const Instruction* instr) const { ++ switch(instr->arch_opcode()) ++ { ++ case kSw64Add: ++ case kSw64Dadd: ++ case kSw64DaddOvf: ++ case kSw64Sub: ++ case kSw64Dsub: ++ case kSw64DsubOvf: ++ case kSw64Mul: ++ case kSw64MulOvf: ++ case kSw64MulHigh: ++ case kSw64DMulHigh: ++ case kSw64MulHighU: ++ case kSw64Dmul: ++ case kSw64Div: ++ case kSw64Ddiv: ++ case kSw64DivU: ++ case kSw64DdivU: ++ case kSw64Mod: ++ case kSw64Dmod: ++ case kSw64ModU: ++ case kSw64DmodU: ++ case kSw64And: ++ case kSw64And32: ++ case kSw64Or: ++ case kSw64Or32: ++ case kSw64Nor: ++ case kSw64Nor32: ++ case kSw64Xor: ++ case kSw64Xor32: ++ case kSw64Clz: ++ case kSw64Lsa: ++ case kSw64Dlsa: ++ case kSw64Shl: ++ case kSw64Shr: ++ case kSw64Sar: ++ case kSw64Ext: ++ case kSw64Ins: ++ case kSw64Dext: ++ case kSw64Dins: ++ case kSw64Dclz: ++ case kSw64Ctz: ++ case kSw64Dctz: ++ ++ case kSw64Popcnt: ++ case kSw64Dpopcnt: ++ case kSw64Dshl: ++ case kSw64Dshr: ++ case kSw64Dsar: ++ ++ case kSw64Ror: ++ case kSw64Dror: ++ case kSw64Mov: ++ case kSw64Tst: ++ case kSw64Cmp: ++ case kSw64CmpS: ++ case kSw64AddS: ++ case kSw64SubS: ++ case kSw64MulS: ++ case kSw64DivS: ++ case kSw64AbsS: ++ case kSw64NegS: ++ case kSw64SqrtS: ++ case kSw64MaxS: ++ case kSw64MinS: ++ case kSw64CmpD: ++ case kSw64AddD: ++ case kSw64SubD: ++ case kSw64MulD: ++ case kSw64DivD: ++ case kSw64AbsD: ++ case kSw64NegD: ++ case kSw64SqrtD: ++ case kSw64MaxD: ++ case kSw64MinD: ++ ++ case kSw64Float64RoundDown: ++ case kSw64Float64RoundTruncate: ++ case kSw64Float64RoundUp: ++ case kSw64Float64RoundTiesEven: ++ case kSw64Float32RoundDown: ++ case kSw64Float32RoundTruncate: ++ case kSw64Float32RoundUp: ++ case kSw64Float32RoundTiesEven: ++ ++ case kSw64CvtSD: ++ case kSw64CvtDS: ++ case kSw64TruncWD: ++ case kSw64RoundWD: ++ case kSw64FloorWD: ++ case kSw64CeilWD: ++ case kSw64TruncWS: ++ case kSw64RoundWS: ++ case kSw64FloorWS: ++ case kSw64CeilWS: ++ ++ case kSw64TruncLS: ++ case kSw64TruncLD: ++ case kSw64TruncUwD: ++ case kSw64TruncUwS: ++ case kSw64TruncUlS: ++ case kSw64TruncUlD: ++ ++ case kSw64CvtDW: ++ case kSw64CvtSL: ++ case kSw64CvtSW: ++ case kSw64CvtSUw: ++ case kSw64CvtSUl: ++ case kSw64CvtDL: ++ case kSw64CvtDUw: ++ case kSw64CvtDUl: ++ ++ case kSw64BitcastDL: ++ case kSw64BitcastLD: ++ case kSw64BitcastSW: ++ case kSw64BitcastWS: ++ case kSw64Float64ExtractLowWord32: ++ case kSw64Float64ExtractHighWord32: ++ case kSw64Float64InsertLowWord32: ++ case kSw64Float64InsertHighWord32: ++ case kSw64Float32Max: ++ case kSw64Float64Max: ++ case kSw64Float32Min: ++ case kSw64Float64Min: ++ case kSw64Float64SilenceNaN: ++ ++ case kSw64ByteSwap64: ++ case kSw64ByteSwap32: ++ ++ case kSw64Seb: ++ case kSw64Seh: ++ case kSw64AssertEqual: ++/* case kSw64S128Zero: ++ case kSw64I32x4Splat: ++ case kSw64I32x4ExtractLane: ++ case kSw64I32x4ReplaceLane: ++ case kSw64I32x4Add: ++ case kSw64I32x4AddHoriz: ++ case kSw64I32x4Sub: ++ case kSw64F32x4Splat: ++ case kSw64F32x4ExtractLane: ++ case kSw64F32x4ReplaceLane: ++ case kSw64F32x4SConvertI32x4: ++ case kSw64F32x4UConvertI32x4: ++ case kSw64I32x4Mul: ++ case kSw64I32x4MaxS: ++ case kSw64I32x4MinS: ++ case kSw64I32x4Eq: ++ case kSw64I32x4Ne: ++ case kSw64I32x4Shl: ++ case kSw64I32x4ShrS: ++ case kSw64I32x4ShrU: ++ case kSw64I32x4MaxU: ++ case kSw64I32x4MinU: ++ case kSw64F32x4Abs: ++ case kSw64F32x4Neg: ++ case kSw64F32x4RecipApprox: ++ case kSw64F32x4RecipSqrtApprox: ++ case kSw64F32x4Add: ++ case kSw64F32x4AddHoriz: ++ case kSw64F32x4Sub: ++ case kSw64F32x4Mul: ++ case kSw64F32x4Max: ++ case kSw64F32x4Min: ++ case kSw64F32x4Eq: ++ case kSw64F32x4Ne: ++ case kSw64F32x4Lt: ++ case kSw64F32x4Le: ++ case kSw64I32x4SConvertF32x4: ++ case kSw64I32x4UConvertF32x4: ++ case kSw64I32x4Neg: ++ case kSw64I32x4GtS: ++ case kSw64I32x4GeS: ++ case kSw64I32x4GtU: ++ case kSw64I32x4GeU: ++ case kSw64I16x8Splat: ++ case kSw64I16x8ExtractLane: ++ case kSw64I16x8ReplaceLane: ++ case kSw64I16x8Neg: ++ case kSw64I16x8Shl: ++ case kSw64I16x8ShrS: ++ case kSw64I16x8ShrU: ++ case kSw64I16x8Add: ++ case kSw64I16x8AddSaturateS: ++ case kSw64I16x8AddHoriz: ++ case kSw64I16x8Sub: ++ case kSw64I16x8SubSaturateS: ++ case kSw64I16x8Mul: ++ case kSw64I16x8MaxS: ++ case kSw64I16x8MinS: ++ case kSw64I16x8Eq: ++ case kSw64I16x8Ne: ++ case kSw64I16x8GtS: ++ case kSw64I16x8GeS: ++ case kSw64I16x8AddSaturateU: ++ case kSw64I16x8SubSaturateU: ++ case kSw64I16x8MaxU: ++ case kSw64I16x8MinU: ++ case kSw64I16x8GtU: ++ case kSw64I16x8GeU: ++ case kSw64I8x16Splat: ++ case kSw64I8x16ExtractLane: ++ case kSw64I8x16ReplaceLane: ++ case kSw64I8x16Neg: ++ case kSw64I8x16Shl: ++ case kSw64I8x16ShrS: ++ case kSw64I8x16Add: ++ case kSw64I8x16AddSaturateS: ++ case kSw64I8x16Sub: ++ case kSw64I8x16SubSaturateS: ++ case kSw64I8x16Mul: ++ case kSw64I8x16MaxS: ++ case kSw64I8x16MinS: ++ case kSw64I8x16Eq: ++ case kSw64I8x16Ne: ++ case kSw64I8x16GtS: ++ case kSw64I8x16GeS: ++ case kSw64I8x16ShrU: ++ case kSw64I8x16AddSaturateU: ++ case kSw64I8x16SubSaturateU: ++ case kSw64I8x16MaxU: ++ case kSw64I8x16MinU: ++ case kSw64I8x16GtU: ++ case kSw64I8x16GeU: ++ case kSw64S128And: ++ case kSw64S128Or: ++ case kSw64S128Xor: ++ case kSw64S128Not: ++ case kSw64S128Select: */ ++ /* case kSw64S32x4InterleaveRight: ++ case kSw64S32x4InterleaveLeft: ++ case kSw64S32x4PackEven: ++ case kSw64S32x4PackOdd: ++ case kSw64S32x4InterleaveEven: ++ case kSw64S32x4InterleaveOdd: ++ case kSw64S32x4Shuffle: ++ case kSw64S16x8InterleaveRight: ++ case kSw64S16x8InterleaveLeft: ++ case kSw64S16x8PackEven: ++ case kSw64S16x8PackOdd: ++ case kSw64S16x8InterleaveEven: ++ case kSw64S16x8InterleaveOdd: ++ case kSw64S16x4Reverse: ++ case kSw64S16x2Reverse: ++ case kSw64S8x16InterleaveRight: ++ case kSw64S8x16InterleaveLeft: ++ case kSw64S8x16PackEven: ++ case kSw64S8x16PackOdd: ++ case kSw64S8x16InterleaveEven: ++ case kSw64S8x16InterleaveOdd: ++ case kSw64S8x16Shuffle: ++ case kSw64S8x16Concat: ++ case kSw64S8x8Reverse: ++ case kSw64S8x4Reverse: ++ case kSw64S8x2Reverse: ++ case kSw64MsaSt: ++ case kSw64I32x4SConvertI16x8Low: ++ case kSw64I32x4SConvertI16x8High: ++ case kSw64I32x4UConvertI16x8Low: ++ case kSw64I32x4UConvertI16x8High: ++ case kSw64I16x8SConvertI8x16Low: ++ case kSw64I16x8SConvertI8x16High: ++ case kSw64I16x8SConvertI32x4: ++ case kSw64I16x8UConvertI32x4: ++ case kSw64I16x8UConvertI8x16Low: ++ case kSw64I16x8UConvertI8x16High: ++ case kSw64I8x16SConvertI16x8: ++ case kSw64I8x16UConvertI16x8:*/ ++ return kNoOpcodeFlags; ++ case kSw64Ldb: ++ case kSw64Ldbu: ++ case kSw64Ldh: ++ case kSw64Uldh: ++ case kSw64Ldhu: ++ case kSw64Uldhu: ++ case kSw64Ldl: ++ case kSw64Uldl: ++ case kSw64Ldw: ++ case kSw64Uldw: ++ case kSw64Ldwu: ++ case kSw64Uldwu: ++ case kSw64Flds: ++ case kSw64Uflds: ++ case kSw64Fldd: ++ case kSw64Ufldd: ++ case kSw64Peek: ++ case kSw64MsaLd: ++ case kSw64Word64AtomicLoadUint8: ++ case kSw64Word64AtomicLoadUint16: ++ case kSw64Word64AtomicLoadUint32: ++ case kSw64Word64AtomicLoadUint64: ++ return kIsLoadOperation; ++ case kSw64ModS: ++ case kSw64ModD: ++ case kSw64Stb: ++ case kSw64Sth: ++ case kSw64Usth: ++ case kSw64Stw: ++ case kSw64Ustw: ++ case kSw64Stl: ++ case kSw64Ustl: ++ case kSw64Fsts: ++ case kSw64Ufsts: ++ case kSw64Fstd: ++ case kSw64Ufstd: ++ case kSw64Push: ++ case kSw64Word64AtomicStoreWord8: ++ case kSw64Word64AtomicStoreWord16: ++ case kSw64Word64AtomicStoreWord32: ++ case kSw64Word64AtomicStoreWord64: ++ case kSw64Word64AtomicAddUint8: ++ case kSw64Word64AtomicAddUint16: ++ case kSw64Word64AtomicAddUint32: ++ case kSw64Word64AtomicAddUint64: ++ case kSw64Word64AtomicSubUint8: ++ case kSw64Word64AtomicSubUint16: ++ case kSw64Word64AtomicSubUint32: ++ case kSw64Word64AtomicSubUint64: ++ case kSw64Word64AtomicAndUint8: ++ case kSw64Word64AtomicAndUint16: ++ case kSw64Word64AtomicAndUint32: ++ case kSw64Word64AtomicAndUint64: ++ case kSw64Word64AtomicOrUint8: ++ case kSw64Word64AtomicOrUint16: ++ case kSw64Word64AtomicOrUint32: ++ case kSw64Word64AtomicOrUint64: ++ case kSw64Word64AtomicXorUint8: ++ case kSw64Word64AtomicXorUint16: ++ case kSw64Word64AtomicXorUint32: ++ case kSw64Word64AtomicXorUint64: ++ case kSw64Word64AtomicExchangeUint8: ++ case kSw64Word64AtomicExchangeUint16: ++ case kSw64Word64AtomicExchangeUint32: ++ case kSw64Word64AtomicExchangeUint64: ++ case kSw64Word64AtomicCompareExchangeUint8: ++ case kSw64Word64AtomicCompareExchangeUint16: ++ case kSw64Word64AtomicCompareExchangeUint32: ++ case kSw64Word64AtomicCompareExchangeUint64: ++ case kSw64StoreToStackSlot: ++ case kSw64StackClaim: ++ return kHasSideEffect; ++ default: ++ return kHasSideEffect; ++ } ++} ++ ++enum Latency { ++ FSELEQ = 2, ++ FSELNE = 2, ++ FSELLT = 2, ++ FSELLE = 2, ++ FSELGT = 2, ++ FSELGE = 2, ++ ++ FCPYS = 2, ++ FCPYSE = 2, ++ FCPYSN = 2, ++ ++ SLLOW = 3, //256 sll ++ SRLOW = 3, //256 srl ++ ++ BRANCH = 4, // sw ?? ++ RINT_S = 4, // Estimated. ++ RINT_D = 4, // Estimated. ++ ++ LDBU = 4, ++ LDHU = 4, ++ LDW = 4, ++ LDL = 4, ++ LDL_U = 4, ++ STB = 4, ++ STH = 4, ++ STW = 4, ++ STL = 4, ++ ++ IFMOVS = 4, ++ IFMOVD = 4, ++ FIMOVS = 4, ++ FIMOVD = 4, ++ ++ FLDS = 4, ++ FLDD = 4, ++ FSTS = 4, ++ FSTD = 4, ++ ++ MULW = 4, ++ MULL = 4, ++ UMULH = 4, ++ ++ FCVTSD = 4, ++ FCVTDS = 4, ++ FCVTDL = 4, ++ FCVTWL = 4, ++ FCVTLW = 4, ++ FCVTLS = 4, ++ FCVTLD = 4, ++ FCVTDL_Z = 4, ++ FCVTDL_P = 4, ++ FCVTDL_G = 4, ++ FCVTDL_N = 4, ++ ++ FMAS = 6, ++ FMAD = 6, ++ FMSS = 6, ++ FMSD = 6, ++ FNMAS = 6, ++ FNMAD = 6, ++ FNMSS = 6, ++ FNMSD = 6, ++ ++ FADDS = 6, ++ FADDD = 6, ++ FSUBS = 6, ++ FSUBD = 6, ++ FMULS = 6, ++ FMULD = 6, ++ ++ FCMPDEQ = 6, ++ FCMPDLE = 6, ++ FCMPDLT = 6, ++ FCMPDUN = 6, ++ ++ FDIVS = 17, ++ FSQRTS = 17, ++ FSQRTD = 31, ++ FDIVD = 32, ++}; ++ ++int DadduLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 1; ++ } else { ++ return 2; // Estimated max. ++ } ++} ++ ++int SrlwLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 4; ++ } else { ++ return 3; // Estimated max. ++ } ++} ++ ++int DsubuLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int AndLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int OrLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int NorLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 2; ++ } else { ++ return 3; // Estimated max. ++ } ++} ++ ++int XorLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int MulLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return Latency::MULW; ++ } else { ++ return Latency::MULW + 1; ++ } ++} ++ ++int DmulLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::MULL; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++//Mulwh =(li +)mull + sral ++int MulhLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::MULL + 1; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++//Mulhu = zapnot + zapnot + mull ++int MulhuLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::MULL + 2; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++//Dmulh = (li +)umulh + srll + mull + subl + srll +mull +subl ++int DMulhLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::MULL + Latency::MULL + 5; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++//Divw = ifmovd*2 + fcvtld*2 + fdivd + fcvtdl_z + fimovd ++int DivLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD; ++ } else { ++ return Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD + 1; ++ } ++} ++ ++//Divwu = Divw + zapnot * 2 ++int DivuLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD + 2; ++ } else { ++ return Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD + 2 + 1; ++ } ++} ++ ++//Divl = ifmovd * 2 + fcvtld * 2 + fdivd + fcvtdl_z + fimovd ++int DdivLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++//Divlu = Divl ++int DdivuLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++//Modw = Divw + mulw + subw ++int ModLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD + Latency::MULW + DadduLatency(is_operand_register); ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++//Modwu = Modw + zapnot * 2 ++int ModuLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD + Latency::MULW + DadduLatency(is_operand_register);; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++//Modl = Modw ++int DmodLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD + Latency::MULW + DadduLatency(is_operand_register); ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++//Modlu = Modl ++int DmoduLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::IFMOVD * 2 + Latency::FCVTLD * 2 + Latency::FDIVD + Latency::FCVTDL_Z + Latency::FIMOVD + Latency::MULW + DadduLatency(is_operand_register); ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DlsaLatency() { ++ // Estimated max. ++ return DadduLatency() + 1; ++} ++ ++int CallLatency() { ++ // Estimated. ++ return DadduLatency(false) + Latency::BRANCH + 5; ++} ++ ++int JumpLatency() { ++ // Estimated max. ++ return 1 + DadduLatency() + Latency::BRANCH + 2; ++} ++ ++int SmiUntagLatency() { return 1; } ++ ++int PrepareForTailCallLatency() { ++ // Estimated max. ++ return 2 * (DlsaLatency() + DadduLatency(false)) + 2 + Latency::BRANCH + ++ Latency::BRANCH + 2 * DsubuLatency(false) + 2 + Latency::BRANCH + 1; ++} ++ ++int AssemblePopArgumentsAdoptFrameLatency() { ++ return 1 + Latency::BRANCH + 1 + SmiUntagLatency() + ++ PrepareForTailCallLatency(); ++} ++ ++int AssertLatency() { return 1; } ++ ++int PrepareCallCFunctionLatency() { ++ int frame_alignment = TurboAssembler::ActivationFrameAlignment(); ++ if (frame_alignment > kSystemPointerSize) { ++ return 1 + DsubuLatency(false) + AndLatency(false) + 1; ++ } else { ++ return DsubuLatency(false); ++ } ++} ++ ++int AdjustBaseAndOffsetLatency() { ++ return 3; // Estimated max. ++} ++ ++int AlignedMemoryLatency() { return AdjustBaseAndOffsetLatency() + 1; } ++ ++int MultiPushLatency() { ++ int latency = DsubuLatency(false); ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ latency++; ++ } ++ return latency; ++} ++ ++int MultiPushFPULatency() { ++ int latency = DsubuLatency(false); ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ latency += 4; ++ } ++ return latency; ++} ++ ++int PushCallerSavedLatency(SaveFPRegsMode fp_mode) { ++ int latency = MultiPushLatency(); ++ if (fp_mode == kSaveFPRegs) { ++ latency += MultiPushFPULatency(); ++ } ++ return latency; ++} ++ ++int MultiPopLatency() { ++ int latency = DadduLatency(false); ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ latency++; ++ } ++ return latency; ++} ++ ++int MultiPopFPULatency() { ++ int latency = DadduLatency(false); ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ latency += 4; ++ } ++ return latency; ++} ++ ++int PopCallerSavedLatency(SaveFPRegsMode fp_mode) { ++ int latency = MultiPopLatency(); ++ if (fp_mode == kSaveFPRegs) { ++ latency += MultiPopFPULatency(); ++ } ++ return latency; ++} ++ ++int CallCFunctionHelperLatency() { ++ // Estimated. ++ int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency(); ++ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { ++ latency++; ++ } else { ++ latency += DadduLatency(false); ++ } ++ return latency; ++} ++ ++int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); } ++ ++int AssembleArchJumpLatency() { ++ // Estimated max. ++ return Latency::BRANCH; ++} ++ ++int AssembleArchLookupSwitchLatency(const Instruction* instr) { ++ int latency = 0; ++ for (size_t index = 2; index < instr->InputCount(); index += 2) { ++ latency += 1 + Latency::BRANCH; ++ } ++ return latency + AssembleArchJumpLatency(); ++} ++ ++int GenerateSwitchTableLatency() { ++ int latency = 0; ++ latency = DlsaLatency() + 2; ++ ++ latency += 2; ++ return latency; ++} ++ ++int AssembleArchTableSwitchLatency() { ++ return Latency::BRANCH + GenerateSwitchTableLatency(); ++} ++ ++int DropAndRetLatency() { ++ // Estimated max. ++ return DadduLatency(false) + JumpLatency(); ++} ++ ++int AssemblerReturnLatency() { ++ // Estimated max. ++ return DadduLatency(false) + MultiPopLatency() + MultiPopFPULatency() + ++ Latency::BRANCH + DadduLatency() + 1 + DropAndRetLatency(); ++} ++ ++int TryInlineTruncateDoubleToILatency() { ++ return 2 + Latency::FCVTDL_Z + Latency::FCVTLW + Latency::FIMOVS + 1 + 2 + AndLatency(false) + ++ Latency::BRANCH; ++} ++ ++int CallStubDelayedLatency() { return 1 + CallLatency(); } ++ ++int TruncateDoubleToIDelayedLatency() { ++ // TODO(sw64): This no longer reflects how TruncateDoubleToI is called. ++ return TryInlineTruncateDoubleToILatency() + 1 + DsubuLatency(false) + ++ 4 + CallStubDelayedLatency() + DadduLatency(false) + 1; ++} ++ ++int CheckPageFlagLatency() { ++ return AndLatency(false) + AlignedMemoryLatency() + AndLatency(false) + ++ Latency::BRANCH; ++} ++ ++int SltuLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 1; ++ } else { ++ return 2; // Estimated max. ++ } ++} ++ ++int BranchShortHelperR6Latency() { ++ return 2; // Estimated max. ++} ++ ++int BranchShortHelperLatency() { ++ return SltuLatency() + 2; // Estimated max. ++} ++ ++int BranchShortLatency(BranchDelaySlot bdslot = PROTECT) { ++ return BranchShortHelperR6Latency(); ++} ++ ++int MoveLatency() { return 1; } ++ ++int MovToFloatParametersLatency() { return 2 * MoveLatency(); } ++ ++int MovFromFloatResultLatency() { return MoveLatency(); } ++ ++int DaddOverflowLatency() { ++ // Estimated max. ++ return 6; ++} ++ ++int DsubOverflowLatency() { ++ // Estimated max. ++ return 6; ++} ++ ++int MulOverflowLatency() { ++ // Estimated max. ++ return MulLatency() + MulhLatency() + 2; ++} ++ ++//Clz = addw + sellt + blt + ctlz + ldi + subl ++int ClzLatency() { return 6; } ++ ++//Dclz = cttz ++int DclzLatency(){ return 1;} ++ ++//Ctz = cttz + ldi + subl + selge ++int CtzLatency() { ++ return 4; ++} ++ ++//Dctz = cttz ++int DctzLatency() { ++ return 1; ++} ++ ++//popcnt = zapnot + ctpop ++int PopcntLatency() { ++ return 2; ++} ++ ++//Dpopcnt = ctpop ++int DpopcntLatency() { ++ return 1; ++} ++ ++//Ext TODO ++int ExtLatency(){ ++ return 1; ++} ++ ++//Ins = li + and_ins + slll * 2 + bic + bis + addw ++int InsLatency(){ ++ return 7; ++} ++ ++//Dins = Ins - addw ++int DinsLatency(){ ++ return 6; ++} ++ ++//Ror = and_ins + ldi + subw + and_ins + zapnot + srll + addw + and_ins + slll + addw + bis + addw ++int RorLatency(bool is_operand_register = true){ ++ if (is_operand_register) { ++ return 11; ++ } else { ++ return 7; ++ } ++} ++ ++int DrorLatency(bool is_operand_register = true){ ++ if (is_operand_register) { ++ return 5; ++ } else { ++ return 3; ++ } ++} ++ ++int CompareFLatency() { return 1; } ++ ++int CompareF32Latency() { return CompareFLatency(); } ++ ++int CompareF64Latency() { return CompareFLatency(); } ++ ++int CompareIsNanFLatency() { return CompareFLatency(); } ++ ++int CompareIsNanF32Latency() { return CompareIsNanFLatency(); } ++ ++int CompareIsNanF64Latency() { return CompareIsNanFLatency(); } ++ ++int Float64RoundLatency() { ++ return 32; ++} ++ ++int Float32RoundLatency() { ++ return 36; ++} ++ ++int PushLatency() { return DadduLatency() + AlignedMemoryLatency(); } ++ ++int ByteSwapSignedLatency() { return 2; } ++ ++int LlLatency(int offset) { ++ bool is_one_instruction = is_int9(offset); ++ if (is_one_instruction) { ++ return 1; ++ } else { ++ return 3; ++ } ++} ++ ++int ExtractBitsLatency(bool sign_extend, int size) { ++ int latency = 2; ++ if (sign_extend) { ++ switch (size) { ++ case 8: ++ case 16: ++ case 32: ++ latency += 1; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ return latency; ++} ++ ++int InsertBitsLatency() { return 2 + DsubuLatency(false) + 2; } ++ ++int ScLatency(int offset) { ++ bool is_one_instruction = is_int9(offset); ++ if (is_one_instruction) { ++ return 1; ++ } else { ++ return 3; ++ } ++} ++ ++int Word32AtomicExchangeLatency(bool sign_extend, int size) { ++ return DadduLatency(false) + 1 + DsubuLatency() + 2 + LlLatency(0) + ++ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + ++ ScLatency(0) + BranchShortLatency() + 1; ++} ++ ++int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) { ++ return 2 + DsubuLatency() + 2 + LlLatency(0) + ++ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + ++ ScLatency(0) + BranchShortLatency() + 1; ++} ++ ++ ++int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { ++ switch(instr->arch_opcode()) ++ { ++ case kArchCallCodeObject: ++ case kArchCallWasmFunction: ++ return CallLatency(); ++ case kArchTailCallCodeObjectFromJSFunction: ++ case kArchTailCallCodeObject: { ++ int latency = 0; ++ if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) { ++ latency = AssemblePopArgumentsAdoptFrameLatency(); ++ } ++ return latency + JumpLatency(); ++ } ++ case kArchTailCallWasm: ++ case kArchTailCallAddress: ++ return JumpLatency(); ++ case kArchCallJSFunction: { ++ int latency = 0; ++ if (FLAG_debug_code) { ++ latency = 1 + AssertLatency(); ++ } ++ return latency + 1 + DadduLatency(false) + CallLatency(); ++ } ++ case kArchPrepareCallCFunction: ++ return PrepareCallCFunctionLatency(); ++ case kArchSaveCallerRegisters: { ++ auto fp_mode = ++ static_cast(MiscField::decode(instr->opcode())); ++ return PushCallerSavedLatency(fp_mode); ++ } ++ case kArchRestoreCallerRegisters: { ++ auto fp_mode = ++ static_cast(MiscField::decode(instr->opcode())); ++ return PopCallerSavedLatency(fp_mode); ++ } ++ case kArchPrepareTailCall: ++ return 2; ++ case kArchCallCFunction: ++ return CallCFunctionLatency(); ++ case kArchJmp: ++ return AssembleArchJumpLatency(); ++ case kArchTableSwitch: ++ return AssembleArchTableSwitchLatency(); ++ case kArchAbortCSAAssert: ++ return CallLatency() + 1; ++ case kArchDebugBreak: ++ return 1; ++ case kArchComment: ++ case kArchNop: ++ case kArchThrowTerminator: ++ case kArchDeoptimize: ++ return 0; ++ case kArchRet: ++ return AssemblerReturnLatency(); ++ case kArchFramePointer: ++ return 1; ++ case kArchParentFramePointer: ++ return AlignedMemoryLatency(); ++ case kArchTruncateDoubleToI: ++ return TruncateDoubleToIDelayedLatency(); ++ case kArchStoreWithWriteBarrier: ++ return DadduLatency() + 1 + CheckPageFlagLatency(); ++ case kArchStackSlot: ++ return DadduLatency(false) + AndLatency(false) + AssertLatency() + ++ DadduLatency(false) + AndLatency(false) + BranchShortLatency() + ++ 1 + DsubuLatency() + DadduLatency(); ++ case kArchWordPoisonOnSpeculation: ++ return AndLatency(); ++ case kIeee754Float64Acos: ++ case kIeee754Float64Acosh: ++ case kIeee754Float64Asin: ++ case kIeee754Float64Asinh: ++ case kIeee754Float64Atan: ++ case kIeee754Float64Atanh: ++ case kIeee754Float64Atan2: ++ case kIeee754Float64Cos: ++ case kIeee754Float64Cosh: ++ case kIeee754Float64Cbrt: ++ case kIeee754Float64Exp: ++ case kIeee754Float64Expm1: ++ case kIeee754Float64Log: ++ case kIeee754Float64Log1p: ++ case kIeee754Float64Log2: ++ case kIeee754Float64Log10: ++ case kIeee754Float64Pow: ++ case kIeee754Float64Sin: ++ case kIeee754Float64Sinh: ++ case kIeee754Float64Tan: ++ case kIeee754Float64Tanh: ++ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + ++ CallCFunctionLatency() + MovFromFloatResultLatency(); ++ case kSw64Add: ++ case kSw64Dadd: ++ return DadduLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64DaddOvf: ++ return DaddOverflowLatency(); ++ case kSw64Sub: ++ case kSw64Dsub: ++ return DsubuLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64DsubOvf: ++ return DsubOverflowLatency(); ++ case kSw64Mul: ++ return MulLatency(); ++ case kSw64MulOvf: ++ return MulOverflowLatency(); ++ case kSw64MulHigh: ++ return MulhLatency(); ++ case kSw64MulHighU: ++ return MulhuLatency(); ++ case kSw64DMulHigh: ++ return DMulhLatency(); ++ case kSw64Div: ++ return DivLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64DivU: ++ return DivuLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Mod: ++ return ModLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64ModU: ++ return ModuLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Dmul: ++ return DmulLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Ddiv: ++ return ModLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64DdivU: ++ return DdivLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Dmod: ++ return DmodLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64DmodU: ++ return DmodLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Dlsa: ++ case kSw64Lsa: ++ return DlsaLatency(); ++ case kSw64And: ++ return AndLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64And32:{ ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = AndLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kSw64Or: ++ return OrLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Or32:{ ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = OrLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kSw64Nor: ++ return NorLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Nor32:{ ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = NorLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kSw64Xor: ++ return XorLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Xor32:{ ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = XorLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kSw64Clz: ++ return ClzLatency(); ++ case kSw64Dclz: ++ return DclzLatency(); ++ case kSw64Ctz: ++ return CtzLatency(); ++ case kSw64Dctz: ++ return DctzLatency(); ++ case kSw64Popcnt: ++ return PopcntLatency(); ++ case kSw64Dpopcnt: ++ return DpopcntLatency(); ++ case kSw64Shl: ++ return 1; ++ case kSw64Shr: ++ case kSw64Sar: ++ return 2; ++ case kSw64Ext: ++ case kSw64Dext: ++ return ExtLatency(); ++ case kSw64Ins: ++ return InsLatency(); ++ case kSw64Dins: ++ return DinsLatency(); ++ case kSw64Dshl: ++ case kSw64Dshr: ++ case kSw64Dsar: ++ return 1; ++ case kSw64Ror: ++ return RorLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Dror: ++ return DrorLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Tst: ++ return AndLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Mov: ++ return 1; ++ case kSw64CmpS: ++ return CompareF32Latency(); ++ case kSw64AddS: ++ case kSw64SubS: ++ case kSw64MulS: ++ case kSw64DivS: ++ return 1; ++ case kSw64ModS: ++ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + ++ CallCFunctionLatency() + MovFromFloatResultLatency(); ++ case kSw64AbsS: ++ return Latency::FCPYS; ++ case kSw64NegS: ++ return 1; ++ case kSw64SqrtS: ++ return Latency::FSQRTS; ++ // case kSw64MaxS: ++ // case kSw64MinS: ++ case kSw64CmpD: ++ return CompareF64Latency(); ++ case kSw64AddD: ++ case kSw64SubD: ++ case kSw64MulD: ++ case kSw64DivD: ++ return 1; ++ case kSw64ModD: ++ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + ++ CallCFunctionLatency() + MovFromFloatResultLatency(); ++ case kSw64AbsD: ++ return Latency::FCPYS; ++ case kSw64NegD: ++ return 1; ++ case kSw64SqrtD: ++ return Latency::FSQRTD; ++// case kSw64MaxD: ++// case kSw64MinD: ++ case kSw64Float64RoundDown: ++ case kSw64Float64RoundTruncate: ++ case kSw64Float64RoundUp: ++ case kSw64Float64RoundTiesEven: ++ return Float64RoundLatency(); //TODO ++ case kSw64Float32RoundDown: ++ case kSw64Float32RoundTruncate: ++ case kSw64Float32RoundUp: ++ case kSw64Float32RoundTiesEven: ++ return Float32RoundLatency(); //TODO ++ /* case kSw64Float32Max: ++ return Float32MaxLatency(); ++ case kSw64Float64Max: ++ return Float64MaxLatency(); ++ case kSw64Float32Min: ++ return Float32MinLatency(); ++ case kSw64Float64Min: ++ return Float64MinLatency();*/ ++ case kSw64Float64SilenceNaN: ++ return Latency::FSUBD; ++ case kSw64CvtSD: ++ return Latency::FCVTDS; ++ case kSw64CvtDS: ++ return Latency::FCVTSD; ++ case kSw64CvtDW: ++ return Latency::IFMOVD + Latency::FCVTLD; ++ case kSw64CvtSW: ++ return Latency::IFMOVS + Latency::FCVTWL + Latency::FCVTLS; ++ case kSw64CvtSUw: ++ return 1 + Latency::FIMOVS; ++ case kSw64CvtSL: ++ return Latency::IFMOVD + Latency::FCVTLS; ++ case kSw64CvtDL: ++ return Latency::IFMOVD + Latency::FCVTLD; ++ case kSw64CvtDUw: ++ return 1 + Latency::FIMOVS; ++ case kSw64CvtDUl: ++ return 1 + Latency::FIMOVD; ++ case kSw64CvtSUl: ++ return 1 + Latency::FIMOVD; ++ case kSw64FloorWD: ++ return Latency::FCVTDL_N + Latency::FCVTLW + Latency::FIMOVS; ++ case kSw64CeilWD: ++ return Latency::FCVTDL_P + Latency::FCVTLW + Latency::FIMOVS; ++ case kSw64RoundWD: ++ return Latency::FCVTDL_G + Latency::FCVTLW + Latency::FIMOVS; ++ case kSw64TruncWD: ++ return Latency::FCVTDL_Z + Latency::FCVTLW + Latency::FIMOVS; ++ case kSw64FloorWS: ++ return Latency::FCVTSD + Latency::FCVTDL_N + Latency::FCVTLW + Latency::FIMOVS; ++ case kSw64CeilWS: ++ return Latency::FCVTSD + Latency::FCVTDL_P + Latency::FCVTLW + Latency::FIMOVS; ++ case kSw64RoundWS: ++ return Latency::FCVTSD + Latency::FCVTDL_G + Latency::FCVTLW + Latency::FIMOVS; ++ case kSw64TruncWS: ++ return Latency::FCVTSD + Latency::FCVTDL_Z + Latency::FCVTLW + Latency::FIMOVS + 3; //cmplt selne ? ++ // case kSw64TruncLS: ++ //return 5; //TODO ++ // case kSw64TruncLD: ++ //return 5; ++ // case kSw64TruncUwD: ++ //return CompareF64Latency() + 2 * Latency::BRANCH + ++ // 2 * Latency::TRUNC_W_D + Latency::SUB_D + OrLatency() + ++ // Latency::MTC1 + Latency::MFC1 + Latency::MTHC1 + 1; ++ // case kSw64TruncUwS: ++ //return CompareF32Latency() + 2 * Latency::BRANCH + ++ // 2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() + ++ // Latency::MTC1 + 2 * Latency::MFC1 + 2 + MovzLatency(); ++ // case kSw64TruncUlS: ++ // return TruncUlSLatency(); ++ // case kSw64TruncUlD: ++ //return TruncUlDLatency(); ++ //TODO ++ case kSw64BitcastDL: // D -> L ++ return 4; ++ case kSw64BitcastLD: ++ return 4; ++ case kSw64BitcastSW: // W -> S ++ case kSw64BitcastWS: // S -> W ++ return 4; ++ case kSw64Float64ExtractLowWord32: ++ case kSw64Float64InsertLowWord32: ++ return 12; ++ case kSw64Float64ExtractHighWord32: ++ case kSw64Float64InsertHighWord32: ++ return 5; ++ case kSw64Seb: ++ case kSw64Seh: ++ return 1; ++ case kSw64Ldbu: ++ case kSw64Ldhu: ++ case kSw64Ldl: ++ case kSw64Uldl: ++ case kSw64Ldw: ++ case kSw64Uldw: ++ case kSw64Flds: ++ case kSw64Uflds: ++ case kSw64Fldd: ++ case kSw64Ufldd: ++ case kSw64Stb: ++ case kSw64Sth: ++ case kSw64Stw: ++ case kSw64Ustw: ++ case kSw64Stl: ++ case kSw64Ustl: ++ case kSw64Fsts: ++ case kSw64Ufsts: ++ case kSw64Fstd: ++ case kSw64Ufstd: ++ return 4; ++ ++ case kSw64Ldb: ++ case kSw64Ldh: ++ case kSw64Ldwu: ++ case kSw64Uldwu: ++ return 5; ++ ++ case kSw64Uldhu: ++ return 10; ++ case kSw64Uldh: ++ return 11; ++ case kSw64Usth: ++ return 8 + SrlwLatency(instr->InputAt(1)->IsRegister()); ++ case kSw64Push: { ++ int latency = 0; ++ if (instr->InputAt(0)->IsFPRegister()) { ++ latency = 4 + DsubuLatency(false); ++ } else { ++ latency = PushLatency(); ++ } ++ return latency; ++ } ++ case kSw64Peek: { ++ int latency = 0; ++ if (instr->OutputAt(0)->IsFPRegister()) { ++ latency = Latency::FLDD; ++ } else { ++ latency = AlignedMemoryLatency(); ++ } ++ return latency; ++ } ++ case kSw64StackClaim: ++ return DsubuLatency(false); ++ case kSw64StoreToStackSlot: { ++ int latency = 0; ++ if (instr->InputAt(0)->IsFPRegister()) { ++ if (instr->InputAt(0)->IsSimd128Register()) { ++ latency = 1; // Estimated value. ++ } else { ++ latency = 4; ++ } ++ } else { ++ latency = AlignedMemoryLatency(); ++ } ++ return latency; ++ } ++ //case kSw64ByteSwap64: ++ //case kSw64ByteSwap32: ++ case kWord32AtomicLoadInt8: ++ case kWord32AtomicLoadUint8: ++ case kWord32AtomicLoadInt16: ++ case kWord32AtomicLoadUint16: ++ case kWord32AtomicLoadWord32: ++ return 2; ++ case kSw64Word64AtomicLoadUint16: ++ case kSw64Word64AtomicLoadUint32: ++ case kSw64Word64AtomicLoadUint64: ++ case kWord32AtomicStoreWord8: ++ case kWord32AtomicStoreWord16: ++ case kWord32AtomicStoreWord32: ++ return 3; ++ case kSw64Word64AtomicStoreWord8: ++ case kSw64Word64AtomicStoreWord16: ++ case kSw64Word64AtomicStoreWord32: ++ case kSw64Word64AtomicStoreWord64: ++ case kWord32AtomicExchangeInt8: ++ return Word32AtomicExchangeLatency(true, 8); ++ case kWord32AtomicExchangeUint8: ++ return Word32AtomicExchangeLatency(false, 8); ++ case kWord32AtomicExchangeInt16: ++ return Word32AtomicExchangeLatency(true, 16); ++ case kWord32AtomicExchangeUint16: ++ return Word32AtomicExchangeLatency(false, 16); ++ case kWord32AtomicExchangeWord32: ++ return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1; ++ case kSw64Word64AtomicExchangeUint8: ++ case kSw64Word64AtomicExchangeUint16: ++ case kSw64Word64AtomicExchangeUint32: ++ case kSw64Word64AtomicExchangeUint64: ++ case kWord32AtomicCompareExchangeInt8: ++ return Word32AtomicCompareExchangeLatency(true, 8); ++ case kWord32AtomicCompareExchangeUint8: ++ return Word32AtomicCompareExchangeLatency(false, 8); ++ case kWord32AtomicCompareExchangeInt16: ++ return Word32AtomicCompareExchangeLatency(true, 16); ++ case kWord32AtomicCompareExchangeUint16: ++ return Word32AtomicCompareExchangeLatency(false, 16); ++ case kWord32AtomicCompareExchangeWord32: ++ return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) + ++ BranchShortLatency() + 1; ++ /* case kSw64Word64AtomicCompareExchangeUint8: ++ case kSw64Word64AtomicCompareExchangeUint16: ++ case kSw64Word64AtomicCompareExchangeUint32: ++ case kSw64Word64AtomicCompareExchangeUint64:*/ ++ //case kWord32Atomic##op##Int8: \ ++ //case kWord32Atomic##op##Uint8: \ ++ //case kWord32Atomic##op##Int16: \ ++ //case kWord32Atomic##op##Uint16: \ ++ //case kWord32Atomic##op##Word32: \ ++ //case kSw64Word64Atomic##op##Uint8: \ ++ //case kSw64Word64Atomic##op##Uint16: \ ++ //case kSw64Word64Atomic##op##Uint32: \ ++ //case kSw64Word64Atomic##op##Uint64: ++ case kSw64AssertEqual: ++ return AssertLatency(); ++// case kSw64S128Zero: { ++// case kSw64I32x4Splat: { ++// case kSw64I32x4ExtractLane: { ++// case kSw64I32x4ReplaceLane: { ++// case kSw64I32x4Add: { ++// case kSw64I32x4Sub: { ++// case kSw64F32x4Splat: { ++// case kSw64F32x4ExtractLane: { ++// case kSw64F32x4ReplaceLane: { ++// case kSw64F32x4SConvertI32x4: { ++// case kSw64F32x4UConvertI32x4: { ++// case kSw64I32x4Mul: { ++// case kSw64I32x4MaxS: { ++// case kSw64I32x4MinS: { ++// case kSw64I32x4Eq: { ++// case kSw64I32x4Ne: { ++// case kSw64I32x4Shl: { ++// case kSw64I32x4ShrS: { ++// case kSw64I32x4ShrU: { ++// case kSw64I32x4MaxU: { ++// case kSw64I32x4MinU: { ++// case kSw64S128Select: { ++// case kSw64F32x4Abs: { ++// case kSw64F32x4Neg: { ++// case kSw64F32x4RecipApprox: { ++// case kSw64F32x4RecipSqrtApprox: { ++// case kSw64F32x4Add: { ++// case kSw64F32x4Sub: { ++// case kSw64F32x4Mul: { ++// case kSw64F32x4Max: { ++// case kSw64F32x4Min: { ++// case kSw64F32x4Eq: { ++// case kSw64F32x4Ne: { ++// case kSw64F32x4Lt: { ++// case kSw64F32x4Le: { ++//// case kSw64I32x4SConvertF32x4: { ++// case kSw64I32x4UConvertF32x4: { ++// case kSw64I32x4Neg: { ++// case kSw64I32x4GtS: { ++// case kSw64I32x4GeS: { ++// case kSw64I32x4GtU: { ++// case kSw64I32x4GeU: { ++// case kSw64I16x8Splat: { ++// case kSw64I16x8ExtractLane: { ++// case kSw64I16x8ReplaceLane: { ++// case kSw64I16x8Neg: { ++// case kSw64I16x8Shl: { ++// case kSw64I16x8ShrS: { ++// case kSw64I16x8ShrU: { ++// case kSw64I16x8Add: { ++// case kSw64I16x8AddSaturateS: { ++// case kSw64I16x8Sub: { ++// case kSw64I16x8SubSaturateS: { ++// case kSw64I16x8Mul: { ++// case kSw64I16x8MaxS: { ++// case kSw64I16x8MinS: { ++// case kSw64I16x8Eq: { ++// case kSw64I16x8Ne: { ++// case kSw64I16x8GtS: { ++// case kSw64I16x8GeS: { ++// case kSw64I16x8AddSaturateU: { ++// case kSw64I16x8SubSaturateU: { ++// case kSw64I16x8MaxU: { ++// case kSw64I16x8MinU: { ++// case kSw64I16x8GtU: { ++// case kSw64I16x8GeU: { ++// case kSw64I8x16Splat: { ++// case kSw64I8x16ExtractLane: { ++// case kSw64I8x16ReplaceLane: { ++// case kSw64I8x16Neg: { ++// case kSw64I8x16Shl: { ++// case kSw64I8x16ShrS: { ++// case kSw64I8x16Add: { ++// case kSw64I8x16AddSaturateS: { ++// case kSw64I8x16Sub: { ++// case kSw64I8x16SubSaturateS: { ++// case kSw64I8x16Mul: { ++// case kSw64I8x16MaxS: { ++// case kSw64I8x16MinS: { ++// case kSw64I8x16Eq: { ++// case kSw64I8x16Ne: { ++// case kSw64I8x16GtS: { ++// case kSw64I8x16GeS: { ++// case kSw64I8x16ShrU: { ++// case kSw64I8x16AddSaturateU: { ++// case kSw64I8x16SubSaturateU: { ++// case kSw64I8x16MaxU: { ++// case kSw64I8x16MinU: { ++// case kSw64I8x16GtU: { ++// case kSw64I8x16GeU: { ++// case kSw64S128And: { ++// case kSw64S128Or: { ++// case kSw64S128Xor: { ++// case kSw64S128Not: { ++ ++ //case kSw64S1x4AnyTrue: ++ //case kSw64S1x8AnyTrue: ++ //case kSw64S1x16AnyTrue: ++ //case kSw64S1x4AllTrue: ++ //case kSw64S1x8AllTrue: ++ //case kSw64S1x16AllTrue: ++ ++ /* case kSw64MsaLd: { ++ case kSw64MsaSt: { ++ case kSw64S32x4InterleaveRight: { ++ case kSw64S32x4InterleaveLeft: { ++ case kSw64S32x4PackEven: { ++ case kSw64S32x4PackOdd: { ++ case kSw64S32x4InterleaveEven: { ++ case kSw64S32x4InterleaveOdd: { ++ case kSw64S32x4Shuffle: { ++ case kSw64S16x8InterleaveRight: { ++ case kSw64S16x8InterleaveLeft: { ++ case kSw64S16x8PackEven: { ++ case kSw64S16x8PackOdd: { ++ case kSw64S16x8InterleaveEven: { ++ case kSw64S16x8InterleaveOdd: { ++ case kSw64S16x4Reverse: { ++ case kSw64S16x2Reverse: { ++ case kSw64S8x16InterleaveRight: { ++ case kSw64S8x16InterleaveLeft: { ++ case kSw64S8x16PackEven: { ++ case kSw64S8x16PackOdd: { ++ case kSw64S8x16InterleaveEven: { ++ case kSw64S8x16InterleaveOdd: { ++ case kSw64S8x16Concat: { ++ case kSw64S8x16Shuffle: { ++ case kSw64S8x8Reverse: { ++ case kSw64S8x4Reverse: { ++ case kSw64S8x2Reverse: { ++ case kSw64I32x4SConvertI16x8Low: { ++ case kSw64I32x4SConvertI16x8High: { ++ case kSw64I32x4UConvertI16x8Low: { ++ case kSw64I32x4UConvertI16x8High: { ++ case kSw64I16x8SConvertI8x16Low: { ++ case kSw64I16x8SConvertI8x16High: { ++ case kSw64I16x8SConvertI32x4: { ++ case kSw64I16x8UConvertI32x4: { ++ case kSw64I16x8UConvertI8x16Low: { ++ case kSw64I16x8UConvertI8x16High: { ++ case kSw64I8x16SConvertI16x8: { ++ case kSw64I8x16UConvertI16x8: { ++ case kSw64F32x4AddHoriz: ++ case kSw64I32x4AddHoriz: ++ case kSw64I16x8AddHoriz: */ ++ ++ default: ++ return 1; ++ } ++} ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-selector-sw64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-selector-sw64.cc +new file mode 100755 +index 000000000..d152466a7 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-selector-sw64.cc +@@ -0,0 +1,3291 @@ ++// Copyright 2014 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/base/bits.h" ++#include "src/compiler/backend/instruction-selector-impl.h" ++#include "src/compiler/node-matchers.h" ++#include "src/compiler/node-properties.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++#define TRACE_UNIMPL() \ ++ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) ++ ++#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) ++ ++ ++// Adds Sw64-specific methods for generating InstructionOperands. ++class Sw64OperandGenerator final : public OperandGenerator { ++ public: ++ explicit Sw64OperandGenerator(InstructionSelector* selector) ++ : OperandGenerator(selector) {} ++ ++ InstructionOperand UseOperand(Node* node, InstructionCode opcode) { ++ if (CanBeImmediate(node, opcode)) { ++ return UseImmediate(node); ++ } ++ return UseRegister(node); ++ } ++ ++ // Use the zero register if the node has the immediate value zero, otherwise ++ // assign a register. ++ InstructionOperand UseRegisterOrImmediateZero(Node* node) { ++ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) || ++ (IsFloatConstant(node) && ++ (bit_cast(GetFloatConstantValue(node)) == 0))) { ++ return UseImmediate(node); ++ } ++ return UseRegister(node); ++ } ++ ++ bool IsIntegerConstant(Node* node) { ++ return (node->opcode() == IrOpcode::kInt32Constant) || ++ (node->opcode() == IrOpcode::kInt64Constant); ++ } ++ ++ int64_t GetIntegerConstantValue(Node* node) { ++ if (node->opcode() == IrOpcode::kInt32Constant) { ++ return OpParameter(node->op()); ++ } ++ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode()); ++ return OpParameter(node->op()); ++ } ++ ++ bool IsFloatConstant(Node* node) { ++ return (node->opcode() == IrOpcode::kFloat32Constant) || ++ (node->opcode() == IrOpcode::kFloat64Constant); ++ } ++ ++ double GetFloatConstantValue(Node* node) { ++ if (node->opcode() == IrOpcode::kFloat32Constant) { ++ return OpParameter(node->op()); ++ } ++ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode()); ++ return OpParameter(node->op()); ++ } ++ ++ bool CanBeImmediate(Node* node, InstructionCode mode) { ++ return IsIntegerConstant(node) && ++ CanBeImmediate(GetIntegerConstantValue(node), mode); ++ } ++ ++ bool CanBeImmediate(int64_t value, InstructionCode opcode) { ++ switch (ArchOpcodeField::decode(opcode)) { ++ case kSw64Shl: ++ case kSw64Sar: ++ case kSw64Shr: ++ return is_uint5(value); ++ case kSw64Dshl: ++ case kSw64Dsar: ++ case kSw64Dshr: ++ return is_uint6(value); ++ case kSw64Add: ++ case kSw64And32: ++ case kSw64And: ++ case kSw64Dadd: ++ case kSw64Or32: ++ case kSw64Or: ++ case kSw64Tst: ++ case kSw64Xor: ++ return is_uint16(value); ++ case kSw64Ldb: ++ case kSw64Ldbu: ++ case kSw64Stb: ++ case kSw64Ldh: ++ case kSw64Ldhu: ++ case kSw64Sth: ++ case kSw64Ldw: ++ case kSw64Stw: ++ case kSw64Ldl: ++ case kSw64Stl: ++ case kSw64Flds: ++ case kSw64Fsts: ++ case kSw64Fldd: ++ case kSw64Fstd: ++ return is_int32(value); ++ default: ++ return is_int16(value); ++ } ++ } ++ ++ private: ++ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const { ++ TRACE_UNIMPL(); ++ return false; ++ } ++}; ++ ++ ++static void VisitRR(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Sw64OperandGenerator g(selector); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Sw64OperandGenerator g(selector); ++ int32_t imm = OpParameter(node->op()); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm)); ++} ++ ++static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Sw64OperandGenerator g(selector); ++ if (g.IsIntegerConstant(node->InputAt(1))) { ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseImmediate(node->InputAt(1))); ++ } else { ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1))); ++ } ++} ++ ++static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Sw64OperandGenerator g(selector); ++ int32_t imm = OpParameter(node->op()); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm), ++ g.UseRegister(node->InputAt(1))); ++} ++ ++static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Sw64OperandGenerator g(selector); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1))); ++} ++ ++//static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode, ++// Node* node) { ++// Sw64OperandGenerator g(selector); ++// selector->Emit(opcode, g.DefineAsRegister(node), ++// g.UseUniqueRegister(node->InputAt(0)), ++// g.UseUniqueRegister(node->InputAt(1))); ++//} ++ ++void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { ++ Sw64OperandGenerator g(selector); ++ selector->Emit( ++ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); ++} ++ ++static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Sw64OperandGenerator g(selector); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseOperand(node->InputAt(1), opcode)); ++} ++ ++struct ExtendingLoadMatcher { ++ ExtendingLoadMatcher(Node* node, InstructionSelector* selector) ++ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) { ++ Initialize(node); ++ } ++ ++ bool Matches() const { return matches_; } ++ ++ Node* base() const { ++ DCHECK(Matches()); ++ return base_; ++ } ++ int64_t immediate() const { ++ DCHECK(Matches()); ++ return immediate_; ++ } ++ ArchOpcode opcode() const { ++ DCHECK(Matches()); ++ return opcode_; ++ } ++ ++ private: ++ bool matches_; ++ InstructionSelector* selector_; ++ Node* base_; ++ int64_t immediate_; ++ ArchOpcode opcode_; ++ ++ void Initialize(Node* node) { ++ Int64BinopMatcher m(node); ++ // When loading a 64-bit value and shifting by 32, we should ++ // just load and sign-extend the interesting 4 bytes instead. ++ // This happens, for example, when we're loading and untagging SMIs. ++ DCHECK(m.IsWord64Sar()); ++ if (m.left().IsLoad() && m.right().Is(32) && ++ selector_->CanCover(m.node(), m.left().node())) { ++ DCHECK_EQ(selector_->GetEffectLevel(node), ++ selector_->GetEffectLevel(m.left().node())); ++ MachineRepresentation rep = ++ LoadRepresentationOf(m.left().node()->op()).representation(); ++ DCHECK_EQ(3, ElementSizeLog2Of(rep)); ++ if (rep != MachineRepresentation::kTaggedSigned && ++ rep != MachineRepresentation::kTaggedPointer && ++ rep != MachineRepresentation::kTagged && ++ rep != MachineRepresentation::kWord64) { ++ return; ++ } ++ ++ Sw64OperandGenerator g(selector_); ++ Node* load = m.left().node(); ++ Node* offset = load->InputAt(1); ++ base_ = load->InputAt(0); ++ opcode_ = kSw64Ldw; ++ if (g.CanBeImmediate(offset, opcode_)) { ++#if defined(V8_TARGET_LITTLE_ENDIAN) ++ immediate_ = g.GetIntegerConstantValue(offset) + 4; ++#elif defined(V8_TARGET_BIG_ENDIAN) ++ immediate_ = g.GetIntegerConstantValue(offset); ++#endif ++ matches_ = g.CanBeImmediate(immediate_, kSw64Ldw); ++ } ++ } ++ } ++}; ++ ++bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node, ++ Node* output_node) { ++ ExtendingLoadMatcher m(node, selector); ++ Sw64OperandGenerator g(selector); ++ if (m.Matches()) { ++ InstructionOperand inputs[2]; ++ inputs[0] = g.UseRegister(m.base()); ++ InstructionCode opcode = ++ m.opcode() | AddressingModeField::encode(kMode_MRI); ++ DCHECK(is_int32(m.immediate())); ++ inputs[1] = g.TempImmediate(static_cast(m.immediate())); ++ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)}; ++ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs), ++ inputs); ++ return true; ++ } ++ return false; ++} ++ ++bool TryMatchImmediate(InstructionSelector* selector, ++ InstructionCode* opcode_return, Node* node, ++ size_t* input_count_return, InstructionOperand* inputs) { ++ Sw64OperandGenerator g(selector); ++ if (g.CanBeImmediate(node, *opcode_return)) { ++ *opcode_return |= AddressingModeField::encode(kMode_MRI); ++ inputs[0] = g.UseImmediate(node); ++ *input_count_return = 1; ++ return true; ++ } ++ return false; ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, bool has_reverse_opcode, ++ InstructionCode reverse_opcode, ++ FlagsContinuation* cont) { ++ Sw64OperandGenerator g(selector); ++ Int32BinopMatcher m(node); ++ InstructionOperand inputs[2]; ++ size_t input_count = 0; ++ InstructionOperand outputs[1]; ++ size_t output_count = 0; ++ ++ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count, ++ &inputs[1])) { ++ inputs[0] = g.UseRegister(m.left().node()); ++ input_count++; ++ } else if (has_reverse_opcode && ++ TryMatchImmediate(selector, &reverse_opcode, m.left().node(), ++ &input_count, &inputs[1])) { ++ inputs[0] = g.UseRegister(m.right().node()); ++ opcode = reverse_opcode; ++ input_count++; ++ } else { ++ inputs[input_count++] = g.UseRegister(m.left().node()); ++ inputs[input_count++] = g.UseOperand(m.right().node(), opcode); ++ } ++ ++ if (cont->IsDeoptimize()) { ++ // If we can deoptimize as a result of the binop, we need to make sure that ++ // the deopt inputs are not overwritten by the binop result. One way ++ // to achieve that is to declare the output register as same-as-first. ++ outputs[output_count++] = g.DefineSameAsFirst(node); ++ } else { ++ outputs[output_count++] = g.DefineAsRegister(node); ++ } ++ ++ DCHECK_NE(0u, input_count); ++ DCHECK_EQ(1u, output_count); ++ DCHECK_GE(arraysize(inputs), input_count); ++ DCHECK_GE(arraysize(outputs), output_count); ++ ++ selector->EmitWithContinuation(opcode, output_count, outputs, input_count, ++ inputs, cont); ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, bool has_reverse_opcode, ++ InstructionCode reverse_opcode) { ++ FlagsContinuation cont; ++ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont); ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, FlagsContinuation* cont) { ++ VisitBinop(selector, node, opcode, false, kArchNop, cont); ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode) { ++ VisitBinop(selector, node, opcode, false, kArchNop); ++} ++ ++void InstructionSelector::VisitStackSlot(Node* node) { ++ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); ++ int alignment = rep.alignment(); ++ int slot = frame_->AllocateSpillSlot(rep.size(), alignment); ++ OperandGenerator g(this); ++ ++ Emit(kArchStackSlot, g.DefineAsRegister(node), ++ sequence()->AddImmediate(Constant(slot)), ++ sequence()->AddImmediate(Constant(alignment)), 0, nullptr); ++} ++ ++void InstructionSelector::VisitAbortCSAAssert(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); ++} ++ ++void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, ++ Node* output = nullptr) { ++ Sw64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(output == nullptr ? node : output), ++ g.UseRegister(base), g.UseImmediate(index)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ selector->Emit(kSw64Dadd | AddressingModeField::encode(kMode_None), ++ addr_reg, g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired load opcode, using temp addr_reg. ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(output == nullptr ? node : output), ++ addr_reg, g.TempImmediate(0)); ++ } ++} ++ ++void InstructionSelector::VisitLoadTransform(Node* node) { ++ LoadTransformParameters params = LoadTransformParametersOf(node->op()); ++ ++ InstructionCode opcode = kArchNop; ++ switch (params.transformation) { ++//SKTODO ++ default: ++ UNIMPLEMENTED(); ++ } ++ ++ EmitLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ ++ InstructionCode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kFloat32: ++ opcode = kSw64Flds; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kSw64Fldd; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ opcode = load_rep.IsUnsigned() ? kSw64Ldbu : kSw64Ldb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsUnsigned() ? kSw64Ldhu : kSw64Ldh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = load_rep.IsUnsigned() ? kSw64Ldwu : kSw64Ldw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kSw64Ldl; ++ break; ++ case MachineRepresentation::kSimd128: ++ opcode = kSw64MsaLd; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ UNREACHABLE(); ++ } ++ if (node->opcode() == IrOpcode::kPoisonedLoad) { ++ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); ++ opcode |= MiscField::encode(kMemoryAccessPoisoned); ++ } ++ ++ EmitLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } ++ ++void InstructionSelector::VisitProtectedLoad(Node* node) { ++ // TODO(eholk) ++ UNIMPLEMENTED(); ++} ++ ++void InstructionSelector::VisitStore(Node* node) { ++ Sw64OperandGenerator g(this); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ StoreRepresentation store_rep = StoreRepresentationOf(node->op()); ++ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); ++ MachineRepresentation rep = store_rep.representation(); ++ ++ if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) { ++ write_barrier_kind = kFullWriteBarrier; ++ } ++ ++ // TODO(sw64): I guess this could be done in a better way. ++ if (write_barrier_kind != kNoWriteBarrier && ++ V8_LIKELY(!FLAG_disable_write_barriers)) { ++ DCHECK(CanBeTaggedPointer(rep)); ++ InstructionOperand inputs[3]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(value); ++ RecordWriteMode record_write_mode = ++ WriteBarrierKindToRecordWriteMode(write_barrier_kind); ++ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; ++ size_t const temp_count = arraysize(temps); ++ InstructionCode code = kArchStoreWithWriteBarrier; ++ code |= MiscField::encode(static_cast(record_write_mode)); ++ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); ++ } else { ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kFloat32: ++ opcode = kSw64Fsts; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kSw64Fstd; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ opcode = kSw64Stb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kSw64Sth; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kSw64Stw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kSw64Stl; ++ break; ++ case MachineRepresentation::kSimd128: ++ opcode = kSw64MsaSt; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ UNREACHABLE(); ++ return; ++ } ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ g.UseRegister(base), g.UseImmediate(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ Emit(kSw64Dadd | AddressingModeField::encode(kMode_None), addr_reg, ++ g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired store opcode, using temp addr_reg. ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); ++ } ++ } ++} ++ ++void InstructionSelector::VisitProtectedStore(Node* node) { ++ // TODO(eholk) ++ UNIMPLEMENTED(); ++} ++ ++void InstructionSelector::VisitWord32And(Node* node) { ++#ifdef SW64 ++ //TODO: It is an optimizaion! ++ ++#else ++ Sw64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && ++ m.right().HasValue()) { ++ uint32_t mask = m.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 32)) { ++ // The mask must be contiguous, and occupy the least-significant bits. ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); ++ ++ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least ++ // significant bits. ++ Int32BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue()) { ++ // Any shift value can match; int32 shifts use `value % 32`. ++ uint32_t lsb = mleft.right().Value() & 0x1F; ++ ++ // Ext cannot extract bits past the register size, however since ++ // shifting the original value would have introduced some zeros we can ++ // still use Ext with a smaller mask and the remaining bits will be ++ // zeros. ++ if (lsb + mask_width > 32) mask_width = 32 - lsb; ++ ++ Emit(kSw64Ext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(mask_width)); ++ return; ++ } ++ // Other cases fall through to the normal And operation. ++ } ++ } ++ if (m.right().HasValue()) { ++ uint32_t mask = m.right().Value(); ++ uint32_t shift = base::bits::CountPopulation(~mask); ++ uint32_t msb = base::bits::CountLeadingZeros32(~mask); ++ if (shift != 0 && shift != 32 && msb + shift == 32) { ++ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction ++ // and remove constant loading of inverted mask. ++ Emit(kSw64Ins, g.DefineSameAsFirst(node), ++ g.UseRegister(m.left().node()), g.TempImmediate(0), ++ g.TempImmediate(shift)); ++ return; ++ } ++ } ++#endif ++ VisitBinop(this, node, kSw64And32, true, kSw64And32); ++} ++ ++ ++void InstructionSelector::VisitWord64And(Node* node) { ++#ifdef SW64 ++ //TODO: It is an optimizaion! ++ ++#else ++ Sw64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && ++ m.right().HasValue()) { ++ uint64_t mask = m.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 64)) { ++ // The mask must be contiguous, and occupy the least-significant bits. ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); ++ ++ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least ++ // significant bits. ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue()) { ++ // Any shift value can match; int64 shifts use `value % 64`. ++ uint32_t lsb = static_cast(mleft.right().Value() & 0x3F); ++ ++ // Dext cannot extract bits past the register size, however since ++ // shifting the original value would have introduced some zeros we can ++ // still use Dext with a smaller mask and the remaining bits will be ++ // zeros. ++ if (lsb + mask_width > 64) mask_width = 64 - lsb; ++ ++ if (lsb == 0 && mask_width == 64) { ++ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node())); ++ } else { ++ Emit(kSw64Dext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(static_cast(mask_width))); ++ } ++ return; ++ } ++ // Other cases fall through to the normal And operation. ++ } ++ } ++ if (m.right().HasValue()) { ++ uint64_t mask = m.right().Value(); ++ uint32_t shift = base::bits::CountPopulation(~mask); ++ uint32_t msb = base::bits::CountLeadingZeros64(~mask); ++ if (shift != 0 && shift < 32 && msb + shift == 64) { ++ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction ++ // and remove constant loading of inverted mask. Dins cannot insert bits ++ // past word size, so shifts smaller than 32 are covered. ++ Emit(kSw64Dins, g.DefineSameAsFirst(node), ++ g.UseRegister(m.left().node()), g.TempImmediate(0), ++ g.TempImmediate(shift)); ++ return; ++ } ++ } ++#endif ++ VisitBinop(this, node, kSw64And, true, kSw64And); ++} ++ ++ ++void InstructionSelector::VisitWord32Or(Node* node) { ++ VisitBinop(this, node, kSw64Or32, true, kSw64Or32); ++} ++ ++ ++void InstructionSelector::VisitWord64Or(Node* node) { ++ VisitBinop(this, node, kSw64Or, true, kSw64Or); ++} ++ ++ ++void InstructionSelector::VisitWord32Xor(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) && ++ m.right().Is(-1)) { ++ Int32BinopMatcher mleft(m.left().node()); ++ if (!mleft.right().HasValue()) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Nor32, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(mleft.right().node())); ++ return; ++ } ++ } ++ if (m.right().Is(-1)) { ++ // Use Nor for bit negation and eliminate constant loading for xori. ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0)); ++ return; ++ } ++ VisitBinop(this, node, kSw64Xor32, true, kSw64Xor32); ++} ++ ++void InstructionSelector::VisitWord64Xor(Node* node) { ++ Int64BinopMatcher m(node); ++ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) && ++ m.right().Is(-1)) { ++ Int64BinopMatcher mleft(m.left().node()); ++ if (!mleft.right().HasValue()) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Nor, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(mleft.right().node())); ++ return; ++ } ++ } ++ if (m.right().Is(-1)) { ++ // Use Nor for bit negation and eliminate constant loading for xori. ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0)); ++ return; ++ } ++ VisitBinop(this, node, kSw64Xor, true, kSw64Xor); ++} ++ ++ ++void InstructionSelector::VisitWord32Shl(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32And() && CanCover(node, m.left().node()) && ++ m.right().IsInRange(1, 31)) { ++ Sw64OperandGenerator g(this); ++ Int32BinopMatcher mleft(m.left().node()); ++ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is ++ // contiguous, and the shift immediate non-zero. ++ if (mleft.right().HasValue()) { ++ uint32_t mask = mleft.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 32)) { ++ uint32_t shift = m.right().Value(); ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); ++ DCHECK_NE(0u, shift); ++ if ((shift + mask_width) >= 32) { ++ // If the mask is contiguous and reaches or extends beyond the top ++ // bit, only the shift is needed. ++ Emit(kSw64Shl, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ } ++ } ++ } ++ VisitRRO(this, kSw64Shl, node); ++} ++ ++void InstructionSelector::VisitWord32Shr(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32And() && m.right().HasValue()) { ++ uint32_t lsb = m.right().Value() & 0x1F; ++ Int32BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && mleft.right().Value() != 0) { ++ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is ++ // shifted into the least-significant bits. ++ uint32_t mask = (mleft.right().Value() >> lsb) << lsb; ++ unsigned mask_width = base::bits::CountPopulation(mask); ++ unsigned mask_msb = base::bits::CountLeadingZeros32(mask); ++ if ((mask_msb + mask_width + lsb) == 32) { ++ Sw64OperandGenerator g(this); ++ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); ++ Emit(kSw64Ext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(mask_width)); ++ return; ++ } ++ } ++ } ++ VisitRRO(this, kSw64Shr, node); ++} ++ ++void InstructionSelector::VisitWord32Sar(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) { ++ Int32BinopMatcher mleft(m.left().node()); ++ if (m.right().HasValue() && mleft.right().HasValue()) { ++ Sw64OperandGenerator g(this); ++ uint32_t sar = m.right().Value(); ++ uint32_t shl = mleft.right().Value(); ++ if ((sar == shl) && (sar == 16)) { ++ Emit(kSw64Seh, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node())); ++ return; ++ } else if ((sar == shl) && (sar == 24)) { ++ Emit(kSw64Seb, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node())); ++ return; ++ } else if ((sar == shl) && (sar == 32)) { ++ Emit(kSw64Shl, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(0)); ++ return; ++ } ++ } ++ } ++ VisitRRO(this, kSw64Sar, node); ++} ++ ++void InstructionSelector::VisitWord64Shl(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && ++ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) { ++ // There's no need to sign/zero-extend to 64-bit if we shift out the upper ++ // 32 bits anyway. ++ Emit(kSw64Dshl, g.DefineSameAsFirst(node), ++ g.UseRegister(m.left().node()->InputAt(0)), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ if (m.left().IsWord64And() && CanCover(node, m.left().node()) && ++ m.right().IsInRange(1, 63)) { ++ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is ++ // contiguous, and the shift immediate non-zero. ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue()) { ++ uint64_t mask = mleft.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 64)) { ++ uint64_t shift = m.right().Value(); ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); ++ DCHECK_NE(0u, shift); ++ ++ if ((shift + mask_width) >= 64) { ++ // If the mask is contiguous and reaches or extends beyond the top ++ // bit, only the shift is needed. ++ Emit(kSw64Dshl, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ } ++ } ++ } ++ VisitRRO(this, kSw64Dshl, node); ++} ++ ++ ++void InstructionSelector::VisitWord64Shr(Node* node) { ++ Int64BinopMatcher m(node); ++ if (m.left().IsWord64And() && m.right().HasValue()) { ++ uint32_t lsb = m.right().Value() & 0x3F; ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && mleft.right().Value() != 0) { ++ // Select Dext for Shr(And(x, mask), imm) where the result of the mask is ++ // shifted into the least-significant bits. ++ uint64_t mask = (mleft.right().Value() >> lsb) << lsb; ++ unsigned mask_width = base::bits::CountPopulation(mask); ++ unsigned mask_msb = base::bits::CountLeadingZeros64(mask); ++ if ((mask_msb + mask_width + lsb) == 64) { ++ Sw64OperandGenerator g(this); ++ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask)); ++ Emit(kSw64Dext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(mask_width)); ++ return; ++ } ++ } ++ } ++ VisitRRO(this, kSw64Dshr, node); ++} ++ ++ ++void InstructionSelector::VisitWord64Sar(Node* node) { ++ if (TryEmitExtendingLoad(this, node, node)) return; ++ VisitRRO(this, kSw64Dsar, node); ++} ++ ++void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); } ++ ++void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); } ++ ++void InstructionSelector::VisitWord32Ror(Node* node) { ++ VisitRRO(this, kSw64Ror, node); ++} ++ ++ ++void InstructionSelector::VisitWord32Clz(Node* node) { ++ VisitRR(this, kSw64Clz, node); ++} ++ ++ ++void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } ++ ++ ++void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); } ++ ++void InstructionSelector::VisitWord64ReverseBytes(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64ByteSwap64, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord32ReverseBytes(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64ByteSwap32, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { ++ UNREACHABLE(); ++} ++ ++void InstructionSelector::VisitWord32Ctz(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++ ++void InstructionSelector::VisitWord64Ctz(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++ ++void InstructionSelector::VisitWord32Popcnt(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Popcnt, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++ ++void InstructionSelector::VisitWord64Popcnt(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Dpopcnt, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++ ++void InstructionSelector::VisitWord64Ror(Node* node) { ++ VisitRRO(this, kSw64Dror, node); ++} ++ ++ ++void InstructionSelector::VisitWord64Clz(Node* node) { ++ VisitRR(this, kSw64Dclz, node); ++} ++ ++ ++void InstructionSelector::VisitInt32Add(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ ++ // Select Lsa for (left + (left_of_right << imm)). ++ if (m.right().opcode() == IrOpcode::kWord32Shl && ++ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { ++ Int32BinopMatcher mright(m.right().node()); ++ if (mright.right().HasValue() && !m.left().HasValue()) { ++ int32_t shift_value = ++ static_cast(mright.right().Value()); ++ Emit(kSw64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(mright.left().node()), g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ ++ // Select Lsa for ((left_of_left << imm) + right). ++ if (m.left().opcode() == IrOpcode::kWord32Shl && ++ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { ++ Int32BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && !m.right().HasValue()) { ++ int32_t shift_value = static_cast(mleft.right().Value()); ++ Emit(kSw64Lsa, g.DefineAsRegister(node), ++ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), ++ g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ VisitBinop(this, node, kSw64Add, true, kSw64Add); ++} ++ ++ ++void InstructionSelector::VisitInt64Add(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ ++ // Select Dlsa for (left + (left_of_right << imm)). ++ if (m.right().opcode() == IrOpcode::kWord64Shl && ++ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { ++ Int64BinopMatcher mright(m.right().node()); ++ if (mright.right().HasValue() && !m.left().HasValue()) { ++ int32_t shift_value = static_cast(mright.right().Value()); ++ Emit(kSw64Dlsa, g.DefineAsRegister(node), ++ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), ++ g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ ++ // Select Dlsa for ((left_of_left << imm) + right). ++ if (m.left().opcode() == IrOpcode::kWord64Shl && ++ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && !m.right().HasValue()) { ++ int32_t shift_value = static_cast(mleft.right().Value()); ++ Emit(kSw64Dlsa, g.DefineAsRegister(node), ++ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), ++ g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ ++ VisitBinop(this, node, kSw64Dadd, true, kSw64Dadd); ++} ++ ++ ++void InstructionSelector::VisitInt32Sub(Node* node) { ++ VisitBinop(this, node, kSw64Sub); ++} ++ ++ ++void InstructionSelector::VisitInt64Sub(Node* node) { ++ VisitBinop(this, node, kSw64Dsub); ++} ++ ++ ++void InstructionSelector::VisitInt32Mul(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ if (m.right().HasValue() && m.right().Value() > 0) { ++ uint32_t value = static_cast(m.right().Value()); ++ if (base::bits::IsPowerOfTwo(value)) { ++ Emit(kSw64Shl | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value - 1)) { ++ Emit(kSw64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value + 1)) { ++ InstructionOperand temp = g.TempRegister(); ++ Emit(kSw64Shl | AddressingModeField::encode(kMode_None), temp, ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); ++ Emit(kSw64Sub | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); ++ return; ++ } ++ } ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ if (CanCover(node, left) && CanCover(node, right)) { ++ if (left->opcode() == IrOpcode::kWord64Sar && ++ right->opcode() == IrOpcode::kWord64Sar) { ++ Int64BinopMatcher leftInput(left), rightInput(right); ++ if (leftInput.right().Is(32) && rightInput.right().Is(32)) { ++ // Combine untagging shifts with Dmul high. ++ Emit(kSw64DMulHigh, g.DefineSameAsFirst(node), ++ g.UseRegister(leftInput.left().node()), ++ g.UseRegister(rightInput.left().node())); ++ return; ++ } ++ } ++ } ++ VisitRRR(this, kSw64Mul, node); ++} ++ ++ ++void InstructionSelector::VisitInt32MulHigh(Node* node) { ++ VisitRRR(this, kSw64MulHigh, node); ++} ++ ++ ++void InstructionSelector::VisitUint32MulHigh(Node* node) { ++ VisitRRR(this, kSw64MulHighU, node); ++} ++ ++ ++void InstructionSelector::VisitInt64Mul(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ // TODO(dusmil): Add optimization for shifts larger than 32. ++ if (m.right().HasValue() && m.right().Value() > 0) { ++ uint32_t value = static_cast(m.right().Value()); ++ if (base::bits::IsPowerOfTwo(value)) { ++ Emit(kSw64Dshl | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value - 1)) { ++ // Dlsa macro will handle the shifting value out of bound cases. ++ Emit(kSw64Dlsa, g.DefineAsRegister(node), ++ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value + 1)) { ++ InstructionOperand temp = g.TempRegister(); ++ Emit(kSw64Dshl | AddressingModeField::encode(kMode_None), temp, ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); ++ Emit(kSw64Dsub | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); ++ return; ++ } ++ } ++ Emit(kSw64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++ ++void InstructionSelector::VisitInt32Div(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ if (CanCover(node, left) && CanCover(node, right)) { ++ if (left->opcode() == IrOpcode::kWord64Sar && ++ right->opcode() == IrOpcode::kWord64Sar) { ++ Int64BinopMatcher rightInput(right), leftInput(left); ++ if (rightInput.right().Is(32) && leftInput.right().Is(32)) { ++ // Combine both shifted operands with Ddiv. ++ Emit(kSw64Ddiv, g.DefineSameAsFirst(node), ++ g.UseRegister(leftInput.left().node()), ++ g.UseRegister(rightInput.left().node())); ++ return; ++ } ++ } ++ } ++ Emit(kSw64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++ ++void InstructionSelector::VisitUint32Div(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Emit(kSw64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++ ++void InstructionSelector::VisitInt32Mod(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ if (CanCover(node, left) && CanCover(node, right)) { ++ if (left->opcode() == IrOpcode::kWord64Sar && ++ right->opcode() == IrOpcode::kWord64Sar) { ++ Int64BinopMatcher rightInput(right), leftInput(left); ++ if (rightInput.right().Is(32) && leftInput.right().Is(32)) { ++ // Combine both shifted operands with Dmod. ++ Emit(kSw64Dmod, g.DefineSameAsFirst(node), ++ g.UseRegister(leftInput.left().node()), ++ g.UseRegister(rightInput.left().node())); ++ return; ++ } ++ } ++ } ++ Emit(kSw64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++ ++void InstructionSelector::VisitUint32Mod(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Emit(kSw64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++ ++void InstructionSelector::VisitInt64Div(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kSw64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++ ++void InstructionSelector::VisitUint64Div(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kSw64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++ ++void InstructionSelector::VisitInt64Mod(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kSw64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++ ++void InstructionSelector::VisitUint64Mod(Node* node) { ++ Sw64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kSw64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++ ++void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { ++ VisitRR(this, kSw64CvtDS, node); ++} ++ ++ ++void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) { ++ VisitRR(this, kSw64CvtSW, node); ++} ++ ++ ++void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) { ++ VisitRR(this, kSw64CvtSUw, node); ++} ++ ++ ++void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { ++ VisitRR(this, kSw64CvtDW, node); ++} ++ ++void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) { ++ VisitRR(this, kSw64CvtDL, node); ++} ++ ++void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { ++ VisitRR(this, kSw64CvtDUw, node); ++} ++ ++ ++void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) { ++ VisitRR(this, kSw64TruncWS, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) { ++ VisitRR(this, kSw64TruncUwS, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { ++ Sw64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction ++ // which does rounding and conversion to integer format. ++ if (CanCover(node, value)) { ++ switch (value->opcode()) { ++ case IrOpcode::kFloat64RoundDown: ++ Emit(kSw64FloorWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ case IrOpcode::kFloat64RoundUp: ++ Emit(kSw64CeilWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ case IrOpcode::kFloat64RoundTiesEven: ++ Emit(kSw64RoundWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ case IrOpcode::kFloat64RoundTruncate: ++ Emit(kSw64TruncWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ default: ++ break; ++ } ++ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) { ++ Node* next = value->InputAt(0); ++ if (CanCover(value, next)) { ++ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP)) ++ switch (next->opcode()) { ++ case IrOpcode::kFloat32RoundDown: ++ Emit(kSw64FloorWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ case IrOpcode::kFloat32RoundUp: ++ Emit(kSw64CeilWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ case IrOpcode::kFloat32RoundTiesEven: ++ Emit(kSw64RoundWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ case IrOpcode::kFloat32RoundTruncate: ++ Emit(kSw64TruncWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ default: ++ Emit(kSw64TruncWS, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ } ++ } else { ++ // Match float32 -> float64 -> int32 representation change path. ++ Emit(kSw64TruncWS, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ } ++ } ++ } ++ VisitRR(this, kSw64TruncWD, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) { ++ VisitRR(this, kSw64TruncLD, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { ++ VisitRR(this, kSw64TruncUwD, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) { ++ VisitRR(this, kSw64TruncUlD, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) { ++ VisitRR(this, kSw64TruncUwD, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) { ++ VisitRR(this, kSw64TruncLD, node); ++} ++ ++void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) { ++ Sw64OperandGenerator g(this); ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ this->Emit(kSw64TruncLS, output_count, outputs, 1, inputs); ++} ++ ++ ++void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) { ++ Sw64OperandGenerator g(this); ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ Emit(kSw64TruncLD, output_count, outputs, 1, inputs); ++} ++ ++ ++void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) { ++ Sw64OperandGenerator g(this); ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ Emit(kSw64TruncUlS, output_count, outputs, 1, inputs); ++} ++ ++ ++void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) { ++ Sw64OperandGenerator g(this); ++ ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ Emit(kSw64TruncUlD, output_count, outputs, 1, inputs); ++} ++ ++void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { ++ UNIMPLEMENTED(); ++} ++ ++void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { ++ Node* value = node->InputAt(0); ++ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) { ++ // Generate sign-extending load. ++ LoadRepresentation load_rep = LoadRepresentationOf(value->op()); ++ InstructionCode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ opcode = load_rep.IsUnsigned() ? kSw64Ldbu : kSw64Ldb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsUnsigned() ? kSw64Ldhu : kSw64Ldh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kSw64Ldw; ++ break; ++ default: ++ UNREACHABLE(); ++ return; ++ } ++ EmitLoad(this, value, opcode, node); ++ } else { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0)); ++ } ++} ++ ++ ++bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { ++ DCHECK_NE(node->opcode(), IrOpcode::kPhi); ++ switch (node->opcode()) { ++ // 32-bit operations will write their result in a 64 bit register, ++ // clearing the top 32 bits of the destination register. ++ case IrOpcode::kUint32Div: ++ case IrOpcode::kUint32Mod: ++ case IrOpcode::kUint32MulHigh: ++ return true; ++ case IrOpcode::kLoad: { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ if (load_rep.IsUnsigned()) { ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kWord8: ++ case MachineRepresentation::kWord16: ++ case MachineRepresentation::kWord32: ++ return true; ++ default: ++ return false; ++ } ++ } ++ return false; ++ } ++ default: ++ return false; ++ } ++} ++ ++void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { ++ Sw64OperandGenerator g(this); ++#ifdef SW64 ++ //TODO: It is an optimizaion! ++ ++#else ++ Node* value = node->InputAt(0); ++ if (ZeroExtendsWord32ToWord64(value)) { ++ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); ++ return; ++ } ++#endif ++ Emit(kSw64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0), g.TempImmediate(32)); ++} ++ ++void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { ++ Sw64OperandGenerator g(this); ++ ++ Node* value = node->InputAt(0); ++ if (CanCover(node, value)) { ++ switch (value->opcode()) { ++ case IrOpcode::kWord64Sar: { ++ if (CanCoverTransitively(node, value, value->InputAt(0)) && ++ TryEmitExtendingLoad(this, value, node)) { ++ return; ++ } else { ++ Int64BinopMatcher m(value); ++ if (m.right().IsInRange(32, 63)) { ++ // After smi untagging no need for truncate. Combine sequence. ++ Emit(kSw64Dsar, g.DefineAsRegister(node), ++ g.UseRegister(m.left().node()), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ } ++ break; ++ } ++ default: ++ break; ++ } ++ } ++ Emit(kSw64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0), g.TempImmediate(32)); ++} ++ ++ ++void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { ++ Sw64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding ++ // instruction. ++ if (CanCover(node, value) && ++ value->opcode() == IrOpcode::kChangeInt32ToFloat64) { ++ Emit(kSw64CvtSW, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ } ++ VisitRR(this, kSw64CvtSD, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) { ++ VisitRR(this, kArchTruncateDoubleToI, node); ++} ++ ++void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) { ++ VisitRR(this, kSw64TruncWD, node); ++} ++ ++void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) { ++ VisitRR(this, kSw64CvtSL, node); ++} ++ ++ ++void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) { ++ VisitRR(this, kSw64CvtDL, node); ++} ++ ++ ++void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) { ++ VisitRR(this, kSw64CvtSUl, node); ++} ++ ++ ++void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) { ++ VisitRR(this, kSw64CvtDUl, node); ++} ++ ++ ++void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) { ++#ifdef SW64 ++ VisitRR(this, kSw64BitcastWS, node); ++#else ++ VisitRR(this, kSw64Float64ExtractLowWord32, node); ++#endif ++} ++ ++ ++void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) { ++ VisitRR(this, kSw64BitcastDL, node); ++} ++ ++ ++void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { ++#ifdef SW64 ++ VisitRR(this, kSw64BitcastSW, node); ++#else ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Float64InsertLowWord32, g.DefineAsRegister(node), ++ ImmediateOperand(ImmediateOperand::INLINE, 0), ++ g.UseRegister(node->InputAt(0))); ++#endif ++} ++ ++ ++void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { ++ VisitRR(this, kSw64BitcastLD, node); ++} ++ ++ ++void InstructionSelector::VisitFloat32Add(Node* node) { ++ // Optimization with Madd.S(z, x, y) is intentionally removed. ++ // See explanation for madd_s in assembler-sw64.cc. ++ VisitRRR(this, kSw64AddS, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64Add(Node* node) { ++ // Optimization with Madd.D(z, x, y) is intentionally removed. ++ // See explanation for madd_d in assembler-sw64.cc. ++ VisitRRR(this, kSw64AddD, node); ++} ++ ++ ++void InstructionSelector::VisitFloat32Sub(Node* node) { ++ // Optimization with Msub.S(z, x, y) is intentionally removed. ++ // See explanation for madd_s in assembler-sw64.cc. ++ VisitRRR(this, kSw64SubS, node); ++} ++ ++void InstructionSelector::VisitFloat64Sub(Node* node) { ++ // Optimization with Msub.D(z, x, y) is intentionally removed. ++ // See explanation for madd_d in assembler-sw64.cc. ++ VisitRRR(this, kSw64SubD, node); ++} ++ ++void InstructionSelector::VisitFloat32Mul(Node* node) { ++ VisitRRR(this, kSw64MulS, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64Mul(Node* node) { ++ VisitRRR(this, kSw64MulD, node); ++} ++ ++ ++void InstructionSelector::VisitFloat32Div(Node* node) { ++ VisitRRR(this, kSw64DivS, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64Div(Node* node) { ++ VisitRRR(this, kSw64DivD, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64Mod(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64ModD, g.DefineAsFixed(node, f0), ++ g.UseFixed(node->InputAt(0), f16), ++ g.UseFixed(node->InputAt(1), f17))->MarkAsCall(); ++} ++ ++void InstructionSelector::VisitFloat32Max(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Float32Max, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat64Max(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Float64Max, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat32Min(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Float32Min, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat64Min(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Float64Min, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++ ++void InstructionSelector::VisitFloat32Abs(Node* node) { ++ VisitRR(this, kSw64AbsS, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64Abs(Node* node) { ++ VisitRR(this, kSw64AbsD, node); ++} ++ ++void InstructionSelector::VisitFloat32Sqrt(Node* node) { ++ VisitRR(this, kSw64SqrtS, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64Sqrt(Node* node) { ++ VisitRR(this, kSw64SqrtD, node); ++} ++ ++ ++void InstructionSelector::VisitFloat32RoundDown(Node* node) { ++ VisitRR(this, kSw64Float32RoundDown, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64RoundDown(Node* node) { ++ VisitRR(this, kSw64Float64RoundDown, node); ++} ++ ++ ++void InstructionSelector::VisitFloat32RoundUp(Node* node) { ++ VisitRR(this, kSw64Float32RoundUp, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64RoundUp(Node* node) { ++ VisitRR(this, kSw64Float64RoundUp, node); ++} ++ ++ ++void InstructionSelector::VisitFloat32RoundTruncate(Node* node) { ++ VisitRR(this, kSw64Float32RoundTruncate, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { ++ VisitRR(this, kSw64Float64RoundTruncate, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { ++ UNREACHABLE(); ++} ++ ++ ++void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) { ++ VisitRR(this, kSw64Float32RoundTiesEven, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) { ++ VisitRR(this, kSw64Float64RoundTiesEven, node); ++} ++ ++void InstructionSelector::VisitFloat32Neg(Node* node) { ++ VisitRR(this, kSw64NegS, node); ++} ++ ++void InstructionSelector::VisitFloat64Neg(Node* node) { ++ VisitRR(this, kSw64NegD, node); ++} ++ ++void InstructionSelector::VisitFloat64Ieee754Binop(Node* node, ++ InstructionCode opcode) { ++ Sw64OperandGenerator g(this); ++ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2), ++ g.UseFixed(node->InputAt(1), f4)) ++ ->MarkAsCall(); ++} ++ ++void InstructionSelector::VisitFloat64Ieee754Unop(Node* node, ++ InstructionCode opcode) { ++ Sw64OperandGenerator g(this); ++ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f16)) ++ ->MarkAsCall(); ++} ++ ++void InstructionSelector::EmitPrepareArguments( ++ ZoneVector* arguments, const CallDescriptor* call_descriptor, ++ Node* node) { ++ Sw64OperandGenerator g(this); ++ ++ // Prepare for C function call. ++ if (call_descriptor->IsCFunctionCall()) { ++ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast( ++ call_descriptor->ParameterCount())), ++ 0, nullptr, 0, nullptr); ++ ++ // Poke any stack arguments. ++ int slot = kCArgSlotCount; ++ for (PushParameter input : (*arguments)) { ++ Emit(kSw64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), ++ g.TempImmediate(slot << kSystemPointerSizeLog2)); ++ ++slot; ++ } ++ } else { ++ int push_count = static_cast(call_descriptor->StackParameterCount()); ++ if (push_count > 0) { ++ // Calculate needed space ++ int stack_size = 0; ++ for (PushParameter input : (*arguments)) { ++ if (input.node) { ++ stack_size += input.location.GetSizeInPointers(); ++ } ++ } ++ Emit(kSw64StackClaim, g.NoOutput(), ++ g.TempImmediate(stack_size << kSystemPointerSizeLog2)); ++ } ++ for (size_t n = 0; n < arguments->size(); ++n) { ++ PushParameter input = (*arguments)[n]; ++ if (input.node) { ++ Emit(kSw64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), ++ g.TempImmediate(static_cast(n << kSystemPointerSizeLog2))); ++ } ++ } ++ } ++} ++ ++void InstructionSelector::EmitPrepareResults( ++ ZoneVector* results, const CallDescriptor* call_descriptor, ++ Node* node) { ++ Sw64OperandGenerator g(this); ++ ++ int reverse_slot = 1; ++ for (PushParameter output : *results) { ++ if (!output.location.IsCallerFrameSlot()) continue; ++ // Skip any alignment holes in nodes. ++ if (output.node != nullptr) { ++ DCHECK(!call_descriptor->IsCFunctionCall()); ++ if (output.location.GetType() == MachineType::Float32()) { ++ MarkAsFloat32(output.node); ++ } else if (output.location.GetType() == MachineType::Float64()) { ++ MarkAsFloat64(output.node); ++ } else if (output.location.GetType() == MachineType::Simd128()) { ++ MarkAsSimd128(output.node); ++ } ++ Emit(kSw64Peek, g.DefineAsRegister(output.node), ++ g.UseImmediate(reverse_slot)); ++ } ++ reverse_slot += output.location.GetSizeInPointers(); ++ } ++} ++ ++bool InstructionSelector::IsTailCallAddressImmediate() { return false; } ++ ++int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; } ++ ++void InstructionSelector::VisitUnalignedLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ Sw64OperandGenerator g(this); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ ++ ArchOpcode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kFloat32: ++ opcode = kSw64Uflds; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kSw64Ufldd; ++ break; ++ case MachineRepresentation::kWord8: ++ opcode = load_rep.IsUnsigned() ? kSw64Ldbu : kSw64Ldb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsUnsigned() ? kSw64Uldhu : kSw64Uldh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = load_rep.IsUnsigned() ? kSw64Uldwu : kSw64Uldw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kSw64Uldl; ++ break; ++ case MachineRepresentation::kSimd128: ++ opcode = kSw64MsaLd; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ UNREACHABLE(); ++ } ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ Emit(kSw64Dadd | AddressingModeField::encode(kMode_None), addr_reg, ++ g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired load opcode, using temp addr_reg. ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); ++ } ++} ++ ++void InstructionSelector::VisitUnalignedStore(Node* node) { ++ Sw64OperandGenerator g(this); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kFloat32: ++ opcode = kSw64Ufsts; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kSw64Ufstd; ++ break; ++ case MachineRepresentation::kWord8: ++ opcode = kSw64Stb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kSw64Usth; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kSw64Ustw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kSw64Ustl; ++ break; ++ case MachineRepresentation::kSimd128: ++ opcode = kSw64MsaSt; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ UNREACHABLE(); ++ } ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ g.UseRegister(base), g.UseImmediate(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ Emit(kSw64Dadd | AddressingModeField::encode(kMode_None), addr_reg, ++ g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired store opcode, using temp addr_reg. ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); ++ } ++} ++ ++namespace { ++ ++// Shared routine for multiple compare operations. ++static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, ++ InstructionOperand left, InstructionOperand right, ++ FlagsContinuation* cont) { ++ selector->EmitWithContinuation(opcode, left, right, cont); ++} ++ ++ ++// Shared routine for multiple float32 compare operations. ++void VisitFloat32Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ Sw64OperandGenerator g(selector); ++ Float32BinopMatcher m(node); ++ InstructionOperand lhs, rhs; ++ ++ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) ++ : g.UseRegister(m.left().node()); ++ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) ++ : g.UseRegister(m.right().node()); ++ VisitCompare(selector, kSw64CmpS, lhs, rhs, cont); ++} ++ ++ ++// Shared routine for multiple float64 compare operations. ++void VisitFloat64Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ Sw64OperandGenerator g(selector); ++ Float64BinopMatcher m(node); ++ InstructionOperand lhs, rhs; ++ ++ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) ++ : g.UseRegister(m.left().node()); ++ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) ++ : g.UseRegister(m.right().node()); ++ VisitCompare(selector, kSw64CmpD, lhs, rhs, cont); ++} ++ ++ ++// Shared routine for multiple word compare operations. ++void VisitWordCompare(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, FlagsContinuation* cont, ++ bool commutative) { ++ Sw64OperandGenerator g(selector); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ ++ // Match immediates on left or right side of comparison. ++ if (g.CanBeImmediate(right, opcode)) { ++ if (opcode == kSw64Tst) { ++ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), ++ cont); ++ } else { ++ switch (cont->condition()) { ++ case kEqual: ++ case kNotEqual: ++ if (cont->IsSet()) { ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseImmediate(right), cont); ++ } else { ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseRegister(right), cont); ++ } ++ break; ++ case kSignedLessThan: ++ case kSignedGreaterThanOrEqual: ++ case kUnsignedLessThan: ++ case kUnsignedGreaterThanOrEqual: ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseImmediate(right), cont); ++ break; ++ default: ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseRegister(right), cont); ++ } ++ } ++ } else if (g.CanBeImmediate(left, opcode)) { ++ if (!commutative) cont->Commute(); ++ if (opcode == kSw64Tst) { ++ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), ++ cont); ++ } else { ++ switch (cont->condition()) { ++ case kEqual: ++ case kNotEqual: ++ if (cont->IsSet()) { ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseImmediate(left), cont); ++ } else { ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseRegister(left), cont); ++ } ++ break; ++ case kSignedLessThan: ++ case kSignedGreaterThanOrEqual: ++ case kUnsignedLessThan: ++ case kUnsignedGreaterThanOrEqual: ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseImmediate(left), cont); ++ break; ++ default: ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseRegister(left), cont); ++ } ++ } ++ } else { ++ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), ++ cont); ++ } ++} ++ ++bool IsNodeUnsigned(Node* n) { ++ NodeMatcher m(n); ++ ++ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() || ++ m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) { ++ LoadRepresentation load_rep = LoadRepresentationOf(n->op()); ++ return load_rep.IsUnsigned(); ++ } else { ++ return m.IsUint32Div() || m.IsUint32LessThan() || ++ m.IsUint32LessThanOrEqual() || m.IsUint32Mod() || ++ m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() || ++ m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32(); ++ } ++} ++ ++// Shared routine for multiple word compare operations. ++void VisitFullWord32Compare(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, FlagsContinuation* cont) { ++ Sw64OperandGenerator g(selector); ++ InstructionOperand leftOp = g.TempRegister(); ++ InstructionOperand rightOp = g.TempRegister(); ++ ++ selector->Emit(kSw64Dshl, leftOp, g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(32)); ++ selector->Emit(kSw64Dshl, rightOp, g.UseRegister(node->InputAt(1)), ++ g.TempImmediate(32)); ++ ++ VisitCompare(selector, opcode, leftOp, rightOp, cont); ++} ++ ++void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, ++ FlagsContinuation* cont) { ++ if (FLAG_debug_code) { ++ Sw64OperandGenerator g(selector); ++ InstructionOperand leftOp = g.TempRegister(); ++ InstructionOperand rightOp = g.TempRegister(); ++ InstructionOperand optimizedResult = g.TempRegister(); ++ InstructionOperand fullResult = g.TempRegister(); ++ FlagsCondition condition = cont->condition(); ++ InstructionCode testOpcode = opcode | ++ FlagsConditionField::encode(condition) | ++ FlagsModeField::encode(kFlags_set); ++ ++ selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1))); ++ ++ selector->Emit(kSw64Dshl, leftOp, g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(32)); ++ selector->Emit(kSw64Dshl, rightOp, g.UseRegister(node->InputAt(1)), ++ g.TempImmediate(32)); ++ selector->Emit(testOpcode, fullResult, leftOp, rightOp); ++ ++ selector->Emit( ++ kSw64AssertEqual, g.NoOutput(), optimizedResult, fullResult, ++ g.TempImmediate( ++ static_cast(AbortReason::kUnsupportedNonPrimitiveCompare))); ++ } ++ ++ VisitWordCompare(selector, node, opcode, cont, false); ++} ++ ++void VisitWord32Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ // SW64 doesn't support Word32 compare instructions. Instead it relies ++ // that the values in registers are correctly sign-extended and uses ++ // Word64 comparison instead. This behavior is correct in most cases, ++ // but doesn't work when comparing signed with unsigned operands. ++ // We could simulate full Word32 compare in all cases but this would ++ // create an unnecessary overhead since unsigned integers are rarely ++ // used in JavaScript. ++ // The solution proposed here tries to match a comparison of signed ++ // with unsigned operand, and perform full Word32Compare only ++ // in those cases. Unfortunately, the solution is not complete because ++ // it might skip cases where Word32 full compare is needed, so ++ // basically it is a hack. ++ // When call to a host function in simulator, if the function return a ++ // int32 value, the simulator do not sign-extended to int64 because in ++ // simulator we do not know the function whether return a int32 or int64. ++ // so we need do a full word32 compare in this case. ++#ifndef USE_SIMULATOR ++ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) { ++#else ++ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) || ++ node->InputAt(0)->opcode() == IrOpcode::kCall || ++ node->InputAt(1)->opcode() == IrOpcode::kCall ) { ++#endif ++ VisitFullWord32Compare(selector, node, kSw64Cmp, cont); ++ } else { ++ VisitOptimizedWord32Compare(selector, node, kSw64Cmp, cont); ++ } ++} ++ ++ ++void VisitWord64Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ VisitWordCompare(selector, node, kSw64Cmp, cont, false); ++} ++ ++ ++ ++void EmitWordCompareZero(InstructionSelector* selector, Node* value, ++ FlagsContinuation* cont) { ++ Sw64OperandGenerator g(selector); ++ selector->EmitWithContinuation(kSw64Cmp, g.UseRegister(value), ++ g.TempImmediate(0), cont); ++} ++ ++void VisitAtomicLoad(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Sw64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ if (g.CanBeImmediate(index, opcode)) { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), g.UseRegister(base), ++ g.UseImmediate(index)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ selector->Emit(kSw64Dadd | AddressingModeField::encode(kMode_None), ++ addr_reg, g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired load opcode, using temp addr_reg. ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); ++ } ++} ++ ++void VisitAtomicStore(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Sw64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ selector->Emit(kSw64Dadd | AddressingModeField::encode(kMode_None), ++ addr_reg, g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired store opcode, using temp addr_reg. ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.NoOutput(), addr_reg, g.TempImmediate(0), ++ g.UseRegisterOrImmediateZero(value)); ++ } ++} ++ ++void VisitAtomicExchange(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Sw64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ AddressingMode addressing_mode = kMode_MRI; ++ InstructionOperand inputs[3]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(value); ++ InstructionOperand outputs[1]; ++ outputs[0] = g.UseUniqueRegister(node); ++ InstructionOperand temp[3]; ++ temp[0] = g.TempRegister(); ++ temp[1] = g.TempRegister(); ++ temp[2] = g.TempRegister(); ++ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); ++ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); ++} ++ ++void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Sw64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* old_value = node->InputAt(2); ++ Node* new_value = node->InputAt(3); ++ ++ AddressingMode addressing_mode = kMode_MRI; ++ InstructionOperand inputs[4]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(old_value); ++ inputs[input_count++] = g.UseUniqueRegister(new_value); ++ InstructionOperand outputs[1]; ++ outputs[0] = g.UseUniqueRegister(node); ++ InstructionOperand temp[3]; ++ temp[0] = g.TempRegister(); ++ temp[1] = g.TempRegister(); ++ temp[2] = g.TempRegister(); ++ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); ++ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); ++} ++ ++void VisitAtomicBinop(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Sw64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ AddressingMode addressing_mode = kMode_MRI; ++ InstructionOperand inputs[3]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(value); ++ InstructionOperand outputs[1]; ++ outputs[0] = g.UseUniqueRegister(node); ++ InstructionOperand temps[4]; ++ temps[0] = g.TempRegister(); ++ temps[1] = g.TempRegister(); ++ temps[2] = g.TempRegister(); ++ temps[3] = g.TempRegister(); ++ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); ++ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps); ++} ++ ++} // namespace ++ ++void InstructionSelector::VisitStackPointerGreaterThan( ++ Node* node, FlagsContinuation* cont) { ++ StackCheckKind kind = StackCheckKindOf(node->op()); ++ InstructionCode opcode = ++ kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); ++ ++ Sw64OperandGenerator g(this); ++ ++ // No outputs. ++ InstructionOperand* const outputs = nullptr; ++ const int output_count = 0; ++ ++ // Applying an offset to this stack check requires a temp register. Offsets ++ // are only applied to the first stack check. If applying an offset, we must ++ // ensure the input and temp registers do not alias, thus kUniqueRegister. ++ InstructionOperand temps[] = {g.TempRegister()}; ++ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0); ++ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) ++ ? OperandGenerator::kUniqueRegister ++ : OperandGenerator::kRegister; ++ ++ Node* const value = node->InputAt(0); ++ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; ++ static constexpr int input_count = arraysize(inputs); ++ ++ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, ++ temp_count, temps, cont); ++} ++ ++// Shared routine for word comparisons against zero. ++void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, ++ FlagsContinuation* cont) { ++ // Try to combine with comparisons against 0 by simply inverting the branch. ++ while (CanCover(user, value)) { ++ if (value->opcode() == IrOpcode::kWord32Equal) { ++ Int32BinopMatcher m(value); ++ if (!m.right().Is(0)) break; ++ user = value; ++ value = m.left().node(); ++ } else if (value->opcode() == IrOpcode::kWord64Equal) { ++ Int64BinopMatcher m(value); ++ if (!m.right().Is(0)) break; ++ user = value; ++ value = m.left().node(); ++ } else { ++ break; ++ } ++ ++ cont->Negate(); ++ } ++ ++ if (CanCover(user, value)) { ++ switch (value->opcode()) { ++ case IrOpcode::kWord32Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kInt32LessThan: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThan); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kInt32LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kUint32LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kUint32LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kWord64Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kInt64LessThan: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThan); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kInt64LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kUint64LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kUint64LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kFloat32Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitFloat32Compare(this, value, cont); ++ case IrOpcode::kFloat32LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitFloat32Compare(this, value, cont); ++ case IrOpcode::kFloat32LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitFloat32Compare(this, value, cont); ++ case IrOpcode::kFloat64Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitFloat64Compare(this, value, cont); ++ case IrOpcode::kFloat64LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitFloat64Compare(this, value, cont); ++ case IrOpcode::kFloat64LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitFloat64Compare(this, value, cont); ++ case IrOpcode::kProjection: ++ // Check if this is the overflow output projection of an ++ // WithOverflow node. ++ if (ProjectionIndexOf(value->op()) == 1u) { ++ // We cannot combine the WithOverflow with this branch ++ // unless the 0th projection (the use of the actual value of the ++ // is either nullptr, which means there's no use of the ++ // actual value, or was already defined, which means it is scheduled ++ // *AFTER* this branch). ++ Node* const node = value->InputAt(0); ++ Node* const result = NodeProperties::FindProjection(node, 0); ++ if (result == nullptr || IsDefined(result)) { ++ switch (node->opcode()) { ++ case IrOpcode::kInt32AddWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kSw64Dadd, cont); ++ case IrOpcode::kInt32SubWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kSw64Dsub, cont); ++ case IrOpcode::kInt32MulWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kSw64MulOvf, cont); ++ case IrOpcode::kInt64AddWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kSw64DaddOvf, cont); ++ case IrOpcode::kInt64SubWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kSw64DsubOvf, cont); ++ default: ++ break; ++ } ++ } ++ } ++ break; ++ case IrOpcode::kWord32And: ++ case IrOpcode::kWord64And: ++ return VisitWordCompare(this, value, kSw64Tst, cont, true); ++ case IrOpcode::kStackPointerGreaterThan: ++ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); ++ return VisitStackPointerGreaterThan(value, cont); ++ default: ++ break; ++ } ++ } ++ ++ // Continuation could not be combined with a compare, emit compare against 0. ++ EmitWordCompareZero(this, value, cont); ++} ++ ++void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ++ Sw64OperandGenerator g(this); ++ InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); ++ ++ // Emit either ArchTableSwitch or ArchBinarySearchSwitch. ++ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) { ++ static const size_t kMaxTableSwitchValueRange = 2 << 16; ++ size_t table_space_cost = 10 + 2 * sw.value_range(); ++ size_t table_time_cost = 3; ++ size_t lookup_space_cost = 2 + 2 * sw.case_count(); ++ size_t lookup_time_cost = sw.case_count(); ++ if (sw.case_count() > 0 && ++ table_space_cost + 3 * table_time_cost <= ++ lookup_space_cost + 3 * lookup_time_cost && ++ sw.min_value() > std::numeric_limits::min() && ++ sw.value_range() <= kMaxTableSwitchValueRange) { ++ InstructionOperand index_operand = value_operand; ++ if (sw.min_value()) { ++ index_operand = g.TempRegister(); ++ Emit(kSw64Sub, index_operand, value_operand, ++ g.TempImmediate(sw.min_value())); ++ } ++ // Generate a table lookup. ++ return EmitTableSwitch(sw, index_operand); ++ } ++ } ++ ++ // Generate a tree of conditional jumps. ++ return EmitBinarySearchSwitch(sw, value_operand); ++} ++ ++void InstructionSelector::VisitWord32Equal(Node* const node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ Int32BinopMatcher m(node); ++ if (m.right().Is(0)) { ++ return VisitWordCompareZero(m.node(), m.left().node(), &cont); ++ } ++ ++ VisitWord32Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitInt32LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitUint32LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kSw64Dadd, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kSw64Dadd, &cont); ++} ++ ++ ++void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kSw64Dsub, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kSw64Dsub, &cont); ++} ++ ++void InstructionSelector::VisitInt32MulWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kSw64MulOvf, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kSw64MulOvf, &cont); ++} ++ ++void InstructionSelector::VisitInt64AddWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kSw64DaddOvf, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kSw64DaddOvf, &cont); ++} ++ ++ ++void InstructionSelector::VisitInt64SubWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kSw64DsubOvf, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kSw64DsubOvf, &cont); ++} ++ ++ ++void InstructionSelector::VisitWord64Equal(Node* const node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ Int64BinopMatcher m(node); ++ if (m.right().Is(0)) { ++ return VisitWordCompareZero(m.node(), m.left().node(), &cont); ++ } ++ ++ VisitWord64Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitInt64LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitUint64LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitFloat32Equal(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ VisitFloat32Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitFloat32LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitFloat32Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitFloat32Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitFloat64Equal(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ VisitFloat64Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitFloat64LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitFloat64Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitFloat64Compare(this, node, &cont); ++} ++ ++ ++void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) { ++ VisitRR(this, kSw64Float64ExtractLowWord32, node); ++} ++ ++ ++void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) { ++ VisitRR(this, kSw64Float64ExtractHighWord32, node); ++} ++ ++void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { ++ VisitRR(this, kSw64Float64SilenceNaN, node); ++} ++ ++void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { ++ Sw64OperandGenerator g(this); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ Emit(kSw64Float64InsertLowWord32, g.DefineSameAsFirst(node), ++ g.UseRegister(left), g.UseRegister(right)); ++} ++ ++ ++void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { ++ Sw64OperandGenerator g(this); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ Emit(kSw64Float64InsertHighWord32, g.DefineSameAsFirst(node), ++ g.UseRegister(left), g.UseRegister(right)); ++} ++ ++void InstructionSelector::VisitMemoryBarrier(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Sync, g.NoOutput()); ++} ++ ++void InstructionSelector::VisitWord32AtomicLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kWord8: ++ opcode = ++ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16 ++ : kWord32AtomicLoadUint16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kWord32AtomicLoadWord32; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ VisitAtomicLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord32AtomicStore(Node* node) { ++ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kWord8: ++ opcode = kWord32AtomicStoreWord8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kWord32AtomicStoreWord16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kWord32AtomicStoreWord32; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ ++ VisitAtomicStore(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kWord8: ++ opcode = kSw64Word64AtomicLoadUint8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kSw64Word64AtomicLoadUint16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kSw64Word64AtomicLoadUint32; ++ break; ++ case MachineRepresentation::kWord64: ++ opcode = kSw64Word64AtomicLoadUint64; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ VisitAtomicLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicStore(Node* node) { ++ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kWord8: ++ opcode = kSw64Word64AtomicStoreWord8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kSw64Word64AtomicStoreWord16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kSw64Word64AtomicStoreWord32; ++ break; ++ case MachineRepresentation::kWord64: ++ opcode = kSw64Word64AtomicStoreWord64; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ ++ VisitAtomicStore(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord32AtomicExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Int8()) { ++ opcode = kWord32AtomicExchangeInt8; ++ } else if (type == MachineType::Uint8()) { ++ opcode = kWord32AtomicExchangeUint8; ++ } else if (type == MachineType::Int16()) { ++ opcode = kWord32AtomicExchangeInt16; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kWord32AtomicExchangeUint16; ++ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { ++ opcode = kWord32AtomicExchangeWord32; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ ++ VisitAtomicExchange(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Uint8()) { ++ opcode = kSw64Word64AtomicExchangeUint8; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kSw64Word64AtomicExchangeUint16; ++ } else if (type == MachineType::Uint32()) { ++ opcode = kSw64Word64AtomicExchangeUint32; ++ } else if (type == MachineType::Uint64()) { ++ opcode = kSw64Word64AtomicExchangeUint64; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ VisitAtomicExchange(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Int8()) { ++ opcode = kWord32AtomicCompareExchangeInt8; ++ } else if (type == MachineType::Uint8()) { ++ opcode = kWord32AtomicCompareExchangeUint8; ++ } else if (type == MachineType::Int16()) { ++ opcode = kWord32AtomicCompareExchangeInt16; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kWord32AtomicCompareExchangeUint16; ++ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { ++ opcode = kWord32AtomicCompareExchangeWord32; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ ++ VisitAtomicCompareExchange(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Uint8()) { ++ opcode = kSw64Word64AtomicCompareExchangeUint8; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kSw64Word64AtomicCompareExchangeUint16; ++ } else if (type == MachineType::Uint32()) { ++ opcode = kSw64Word64AtomicCompareExchangeUint32; ++ } else if (type == MachineType::Uint64()) { ++ opcode = kSw64Word64AtomicCompareExchangeUint64; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ VisitAtomicCompareExchange(this, node, opcode); ++} ++void InstructionSelector::VisitWord32AtomicBinaryOperation( ++ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ++ ArchOpcode uint16_op, ArchOpcode word32_op) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Int8()) { ++ opcode = int8_op; ++ } else if (type == MachineType::Uint8()) { ++ opcode = uint8_op; ++ } else if (type == MachineType::Int16()) { ++ opcode = int16_op; ++ } else if (type == MachineType::Uint16()) { ++ opcode = uint16_op; ++ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { ++ opcode = word32_op; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ ++ VisitAtomicBinop(this, node, opcode); ++} ++ ++#define VISIT_ATOMIC_BINOP(op) \ ++ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ ++ VisitWord32AtomicBinaryOperation( \ ++ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \ ++ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \ ++ kWord32Atomic##op##Word32); \ ++ } ++VISIT_ATOMIC_BINOP(Add) ++VISIT_ATOMIC_BINOP(Sub) ++VISIT_ATOMIC_BINOP(And) ++VISIT_ATOMIC_BINOP(Or) ++VISIT_ATOMIC_BINOP(Xor) ++#undef VISIT_ATOMIC_BINOP ++ ++void InstructionSelector::VisitWord64AtomicBinaryOperation( ++ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op, ++ ArchOpcode uint64_op) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Uint8()) { ++ opcode = uint8_op; ++ } else if (type == MachineType::Uint16()) { ++ opcode = uint16_op; ++ } else if (type == MachineType::Uint32()) { ++ opcode = uint32_op; ++ } else if (type == MachineType::Uint64()) { ++ opcode = uint64_op; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ VisitAtomicBinop(this, node, opcode); ++} ++ ++#define VISIT_ATOMIC_BINOP(op) \ ++ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \ ++ VisitWord64AtomicBinaryOperation( \ ++ node, kSw64Word64Atomic##op##Uint8, kSw64Word64Atomic##op##Uint16, \ ++ kSw64Word64Atomic##op##Uint32, kSw64Word64Atomic##op##Uint64); \ ++ } ++VISIT_ATOMIC_BINOP(Add) ++VISIT_ATOMIC_BINOP(Sub) ++VISIT_ATOMIC_BINOP(And) ++VISIT_ATOMIC_BINOP(Or) ++VISIT_ATOMIC_BINOP(Xor) ++#undef VISIT_ATOMIC_BINOP ++ ++void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { ++ UNREACHABLE(); ++} ++ ++void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { ++ UNREACHABLE(); ++} ++ ++#define SIMD_TYPE_LIST(V) \ ++ V(F64x2) \ ++ V(F32x4) \ ++ V(I64x2) \ ++ V(I32x4) \ ++ V(I16x8) \ ++ V(I8x16) ++ ++#define SIMD_UNOP_LIST(V) \ ++ V(F64x2Abs, kSw64F64x2Abs) \ ++ V(F64x2Neg, kSw64F64x2Neg) \ ++ V(F64x2Sqrt, kSw64F64x2Sqrt) \ ++ V(F64x2Ceil, kSw64F64x2Ceil) \ ++ V(F64x2Floor, kSw64F64x2Floor) \ ++ V(F64x2Trunc, kSw64F64x2Trunc) \ ++ V(F64x2NearestInt, kSw64F64x2NearestInt) \ ++ V(I64x2Neg, kSw64I64x2Neg) \ ++ V(F32x4Sqrt, kSw64F32x4Sqrt) \ ++ V(F32x4SConvertI32x4, kSw64F32x4SConvertI32x4) \ ++ V(F32x4UConvertI32x4, kSw64F32x4UConvertI32x4) \ ++ V(F32x4Abs, kSw64F32x4Abs) \ ++ V(F32x4Neg, kSw64F32x4Neg) \ ++ V(F32x4RecipApprox, kSw64F32x4RecipApprox) \ ++ V(F32x4RecipSqrtApprox, kSw64F32x4RecipSqrtApprox) \ ++ V(F32x4Ceil, kSw64F32x4Ceil) \ ++ V(F32x4Floor, kSw64F32x4Floor) \ ++ V(F32x4Trunc, kSw64F32x4Trunc) \ ++ V(F32x4NearestInt, kSw64F32x4NearestInt) \ ++ V(I32x4SConvertF32x4, kSw64I32x4SConvertF32x4) \ ++ V(I32x4UConvertF32x4, kSw64I32x4UConvertF32x4) \ ++ V(I32x4Neg, kSw64I32x4Neg) \ ++ V(I32x4SConvertI16x8Low, kSw64I32x4SConvertI16x8Low) \ ++ V(I32x4SConvertI16x8High, kSw64I32x4SConvertI16x8High) \ ++ V(I32x4UConvertI16x8Low, kSw64I32x4UConvertI16x8Low) \ ++ V(I32x4UConvertI16x8High, kSw64I32x4UConvertI16x8High) \ ++ V(I32x4Abs, kSw64I32x4Abs) \ ++ V(I32x4BitMask, kSw64I32x4BitMask) \ ++ V(I16x8Neg, kSw64I16x8Neg) \ ++ V(I16x8SConvertI8x16Low, kSw64I16x8SConvertI8x16Low) \ ++ V(I16x8SConvertI8x16High, kSw64I16x8SConvertI8x16High) \ ++ V(I16x8UConvertI8x16Low, kSw64I16x8UConvertI8x16Low) \ ++ V(I16x8UConvertI8x16High, kSw64I16x8UConvertI8x16High) \ ++ V(I16x8Abs, kSw64I16x8Abs) \ ++ V(I16x8BitMask, kSw64I16x8BitMask) \ ++ V(I8x16Neg, kSw64I8x16Neg) \ ++ V(I8x16Abs, kSw64I8x16Abs) \ ++ V(I8x16BitMask, kSw64I8x16BitMask) \ ++ V(S128Not, kSw64S128Not) \ ++ V(V32x4AnyTrue, kSw64V32x4AnyTrue) \ ++ V(V32x4AllTrue, kSw64V32x4AllTrue) \ ++ V(V16x8AnyTrue, kSw64V16x8AnyTrue) \ ++ V(V16x8AllTrue, kSw64V16x8AllTrue) \ ++ V(V8x16AnyTrue, kSw64V8x16AnyTrue) \ ++ V(V8x16AllTrue, kSw64V8x16AllTrue) ++ ++#define SIMD_SHIFT_OP_LIST(V) \ ++ V(I64x2Shl) \ ++ V(I64x2ShrS) \ ++ V(I64x2ShrU) \ ++ V(I32x4Shl) \ ++ V(I32x4ShrS) \ ++ V(I32x4ShrU) \ ++ V(I16x8Shl) \ ++ V(I16x8ShrS) \ ++ V(I16x8ShrU) \ ++ V(I8x16Shl) \ ++ V(I8x16ShrS) \ ++ V(I8x16ShrU) ++ ++#define SIMD_BINOP_LIST(V) \ ++ V(F64x2Add, kSw64F64x2Add) \ ++ V(F64x2Sub, kSw64F64x2Sub) \ ++ V(F64x2Mul, kSw64F64x2Mul) \ ++ V(F64x2Div, kSw64F64x2Div) \ ++ V(F64x2Min, kSw64F64x2Min) \ ++ V(F64x2Max, kSw64F64x2Max) \ ++ V(F64x2Eq, kSw64F64x2Eq) \ ++ V(F64x2Ne, kSw64F64x2Ne) \ ++ V(F64x2Lt, kSw64F64x2Lt) \ ++ V(F64x2Le, kSw64F64x2Le) \ ++ V(I64x2Add, kSw64I64x2Add) \ ++ V(I64x2Sub, kSw64I64x2Sub) \ ++ V(I64x2Mul, kSw64I64x2Mul) \ ++ V(F32x4Add, kSw64F32x4Add) \ ++ V(F32x4AddHoriz, kSw64F32x4AddHoriz) \ ++ V(F32x4Sub, kSw64F32x4Sub) \ ++ V(F32x4Mul, kSw64F32x4Mul) \ ++ V(F32x4Div, kSw64F32x4Div) \ ++ V(F32x4Max, kSw64F32x4Max) \ ++ V(F32x4Min, kSw64F32x4Min) \ ++ V(F32x4Eq, kSw64F32x4Eq) \ ++ V(F32x4Ne, kSw64F32x4Ne) \ ++ V(F32x4Lt, kSw64F32x4Lt) \ ++ V(F32x4Le, kSw64F32x4Le) \ ++ V(I32x4Add, kSw64I32x4Add) \ ++ V(I32x4AddHoriz, kSw64I32x4AddHoriz) \ ++ V(I32x4Sub, kSw64I32x4Sub) \ ++ V(I32x4Mul, kSw64I32x4Mul) \ ++ V(I32x4MaxS, kSw64I32x4MaxS) \ ++ V(I32x4MinS, kSw64I32x4MinS) \ ++ V(I32x4MaxU, kSw64I32x4MaxU) \ ++ V(I32x4MinU, kSw64I32x4MinU) \ ++ V(I32x4Eq, kSw64I32x4Eq) \ ++ V(I32x4Ne, kSw64I32x4Ne) \ ++ V(I32x4GtS, kSw64I32x4GtS) \ ++ V(I32x4GeS, kSw64I32x4GeS) \ ++ V(I32x4GtU, kSw64I32x4GtU) \ ++ V(I32x4GeU, kSw64I32x4GeU) \ ++ V(I16x8Add, kSw64I16x8Add) \ ++ V(I16x8AddSaturateS, kSw64I16x8AddSaturateS) \ ++ V(I16x8AddSaturateU, kSw64I16x8AddSaturateU) \ ++ V(I16x8AddHoriz, kSw64I16x8AddHoriz) \ ++ V(I16x8Sub, kSw64I16x8Sub) \ ++ V(I16x8SubSaturateS, kSw64I16x8SubSaturateS) \ ++ V(I16x8SubSaturateU, kSw64I16x8SubSaturateU) \ ++ V(I16x8Mul, kSw64I16x8Mul) \ ++ V(I16x8MaxS, kSw64I16x8MaxS) \ ++ V(I16x8MinS, kSw64I16x8MinS) \ ++ V(I16x8MaxU, kSw64I16x8MaxU) \ ++ V(I16x8MinU, kSw64I16x8MinU) \ ++ V(I16x8Eq, kSw64I16x8Eq) \ ++ V(I16x8Ne, kSw64I16x8Ne) \ ++ V(I16x8GtS, kSw64I16x8GtS) \ ++ V(I16x8GeS, kSw64I16x8GeS) \ ++ V(I16x8GtU, kSw64I16x8GtU) \ ++ V(I16x8GeU, kSw64I16x8GeU) \ ++ V(I16x8RoundingAverageU, kSw64I16x8RoundingAverageU) \ ++ V(I16x8SConvertI32x4, kSw64I16x8SConvertI32x4) \ ++ V(I16x8UConvertI32x4, kSw64I16x8UConvertI32x4) \ ++ V(I8x16Add, kSw64I8x16Add) \ ++ V(I8x16AddSaturateS, kSw64I8x16AddSaturateS) \ ++ V(I8x16AddSaturateU, kSw64I8x16AddSaturateU) \ ++ V(I8x16Sub, kSw64I8x16Sub) \ ++ V(I8x16SubSaturateS, kSw64I8x16SubSaturateS) \ ++ V(I8x16SubSaturateU, kSw64I8x16SubSaturateU) \ ++ V(I8x16Mul, kSw64I8x16Mul) \ ++ V(I8x16MaxS, kSw64I8x16MaxS) \ ++ V(I8x16MinS, kSw64I8x16MinS) \ ++ V(I8x16MaxU, kSw64I8x16MaxU) \ ++ V(I8x16MinU, kSw64I8x16MinU) \ ++ V(I8x16Eq, kSw64I8x16Eq) \ ++ V(I8x16Ne, kSw64I8x16Ne) \ ++ V(I8x16GtS, kSw64I8x16GtS) \ ++ V(I8x16GeS, kSw64I8x16GeS) \ ++ V(I8x16GtU, kSw64I8x16GtU) \ ++ V(I8x16GeU, kSw64I8x16GeU) \ ++ V(I8x16RoundingAverageU, kSw64I8x16RoundingAverageU) \ ++ V(I8x16SConvertI16x8, kSw64I8x16SConvertI16x8) \ ++ V(I8x16UConvertI16x8, kSw64I8x16UConvertI16x8) \ ++ V(S128And, kSw64S128And) \ ++ V(S128Or, kSw64S128Or) \ ++ V(S128Xor, kSw64S128Xor) \ ++ V(S128AndNot, kSw64S128AndNot) ++ ++//SKTODO ++void InstructionSelector::VisitS128Const(Node* node) { ++ Sw64OperandGenerator g(this); ++ static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t); ++ uint32_t val[kUint32Immediates]; ++ memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size); ++ // If all bytes are zeros or ones, avoid emitting code for generic constants ++ bool all_zeros = !(val[0] || val[1] || val[2] || val[3]); ++ bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX && ++ val[2] == UINT32_MAX && val[3] == UINT32_MAX; ++ InstructionOperand dst = g.DefineAsRegister(node); ++ if (all_zeros) { ++ Emit(kSw64S128Zero, dst); ++ } else if (all_ones) { ++ Emit(kSw64S128AllOnes, dst); ++ } else { ++ Emit(kSw64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]), ++ g.UseImmediate(val[2]), g.UseImmediate(val[3])); ++ } ++} ++ ++void InstructionSelector::VisitS128Zero(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64S128Zero, g.DefineAsRegister(node)); ++} ++ ++#define SIMD_VISIT_SPLAT(Type) \ ++ void InstructionSelector::Visit##Type##Splat(Node* node) { \ ++ VisitRR(this, kSw64##Type##Splat, node); \ ++ } ++SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) ++#undef SIMD_VISIT_SPLAT ++ ++#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \ ++ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \ ++ VisitRRI(this, kSw64##Type##ExtractLane##Sign, node); \ ++ } ++SIMD_VISIT_EXTRACT_LANE(F64x2, ) ++SIMD_VISIT_EXTRACT_LANE(F32x4, ) ++SIMD_VISIT_EXTRACT_LANE(I64x2, ) ++SIMD_VISIT_EXTRACT_LANE(I32x4, ) ++SIMD_VISIT_EXTRACT_LANE(I16x8, U) ++SIMD_VISIT_EXTRACT_LANE(I16x8, S) ++SIMD_VISIT_EXTRACT_LANE(I8x16, U) ++SIMD_VISIT_EXTRACT_LANE(I8x16, S) ++#undef SIMD_VISIT_EXTRACT_LANE ++ ++#define SIMD_VISIT_REPLACE_LANE(Type) \ ++ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ ++ VisitRRIR(this, kSw64##Type##ReplaceLane, node); \ ++ } ++SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE) ++#undef SIMD_VISIT_REPLACE_LANE ++ ++#define SIMD_VISIT_UNOP(Name, instruction) \ ++ void InstructionSelector::Visit##Name(Node* node) { \ ++ VisitRR(this, instruction, node); \ ++ } ++SIMD_UNOP_LIST(SIMD_VISIT_UNOP) ++#undef SIMD_VISIT_UNOP ++ ++#define SIMD_VISIT_SHIFT_OP(Name) \ ++ void InstructionSelector::Visit##Name(Node* node) { \ ++ VisitSimdShift(this, kSw64##Name, node); \ ++ } ++SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) ++#undef SIMD_VISIT_SHIFT_OP ++ ++#define SIMD_VISIT_BINOP(Name, instruction) \ ++ void InstructionSelector::Visit##Name(Node* node) { \ ++ VisitRRR(this, instruction, node); \ ++ } ++SIMD_BINOP_LIST(SIMD_VISIT_BINOP) ++#undef SIMD_VISIT_BINOP ++ ++void InstructionSelector::VisitS128Select(Node* node) { ++ VisitRRRR(this, kSw64S128Select, node); ++} ++ ++namespace { ++ ++struct ShuffleEntry { ++ uint8_t shuffle[kSimd128Size]; ++ ArchOpcode opcode; ++}; ++ ++static const ShuffleEntry arch_shuffles[] = { ++ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}, ++ kSw64S32x4InterleaveRight}, ++ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}, ++ kSw64S32x4InterleaveLeft}, ++ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}, ++ kSw64S32x4PackEven}, ++ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}, ++ kSw64S32x4PackOdd}, ++ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}, ++ kSw64S32x4InterleaveEven}, ++ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}, ++ kSw64S32x4InterleaveOdd}, ++ ++ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}, ++ kSw64S16x8InterleaveRight}, ++ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}, ++ kSw64S16x8InterleaveLeft}, ++ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}, ++ kSw64S16x8PackEven}, ++ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}, ++ kSw64S16x8PackOdd}, ++ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}, ++ kSw64S16x8InterleaveEven}, ++ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}, ++ kSw64S16x8InterleaveOdd}, ++ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, ++ kSw64S16x4Reverse}, ++ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, ++ kSw64S16x2Reverse}, ++ ++ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}, ++ kSw64S8x16InterleaveRight}, ++ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}, ++ kSw64S8x16InterleaveLeft}, ++ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}, ++ kSw64S8x16PackEven}, ++ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}, ++ kSw64S8x16PackOdd}, ++ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}, ++ kSw64S8x16InterleaveEven}, ++ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}, ++ kSw64S8x16InterleaveOdd}, ++ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, ++ kSw64S8x8Reverse}, ++ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, ++ kSw64S8x4Reverse}, ++ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, ++ kSw64S8x2Reverse}}; ++ ++bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, ++ size_t num_entries, bool is_swizzle, ++ ArchOpcode* opcode) { ++ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1; ++ for (size_t i = 0; i < num_entries; ++i) { ++ const ShuffleEntry& entry = table[i]; ++ int j = 0; ++ for (; j < kSimd128Size; ++j) { ++ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) { ++ break; ++ } ++ } ++ if (j == kSimd128Size) { ++ *opcode = entry.opcode; ++ return true; ++ } ++ } ++ return false; ++} ++ ++} // namespace ++ ++void InstructionSelector::VisitI8x16Shuffle(Node* node) { ++ uint8_t shuffle[kSimd128Size]; ++ bool is_swizzle; ++ CanonicalizeShuffle(node, shuffle, &is_swizzle); ++ uint8_t shuffle32x4[4]; ++ ArchOpcode opcode; ++ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles), ++ is_swizzle, &opcode)) { ++ VisitRRR(this, opcode, node); ++ return; ++ } ++ Node* input0 = node->InputAt(0); ++ Node* input1 = node->InputAt(1); ++ uint8_t offset; ++ Sw64OperandGenerator g(this); ++ if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) { ++ Emit(kSw64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1), ++ g.UseRegister(input0), g.UseImmediate(offset)); ++ return; ++ } ++ if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) { ++ Emit(kSw64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), ++ g.UseRegister(input1), ++ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4))); ++ return; ++ } ++ Emit(kSw64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), ++ g.UseRegister(input1), ++ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)), ++ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)), ++ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)), ++ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12))); ++} ++ ++void InstructionSelector::VisitI8x16Swizzle(Node* node) { ++ Sw64OperandGenerator g(this); ++ InstructionOperand temps[] = {g.TempSimd128Register()}; ++ // We don't want input 0 or input 1 to be the same as output, since we will ++ // modify output before do the calculation. ++ Emit(kSw64I8x16Swizzle, g.DefineAsRegister(node), ++ g.UseUniqueRegister(node->InputAt(0)), ++ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); ++} ++ ++void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) { ++ Sw64OperandGenerator g(this); ++ Emit(kSw64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0)); ++} ++ ++void InstructionSelector::VisitF32x4Pmin(Node* node) { ++ UNREACHABLE(); ++// VisitUniqueRRR(this, kSw64F32x4Pmin, node); ++} ++ ++void InstructionSelector::VisitF32x4Pmax(Node* node) { ++ UNREACHABLE(); ++// VisitUniqueRRR(this, kSw64F32x4Pmax, node); ++} ++ ++void InstructionSelector::VisitF64x2Pmin(Node* node) { ++ UNREACHABLE(); ++// VisitUniqueRRR(this, kSw64F64x2Pmin, node); ++} ++ ++void InstructionSelector::VisitF64x2Pmax(Node* node) { ++ UNREACHABLE(); ++// VisitUniqueRRR(this, kSw64F64x2Pmax, node); ++} ++ ++// static ++MachineOperatorBuilder::Flags ++InstructionSelector::SupportedMachineOperatorFlags() { ++ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags; ++ return flags | MachineOperatorBuilder::kWord32Ctz | ++ MachineOperatorBuilder::kWord64Ctz | ++ MachineOperatorBuilder::kWord32Popcnt | ++ MachineOperatorBuilder::kWord64Popcnt | ++ MachineOperatorBuilder::kWord32ShiftIsSafe | ++ MachineOperatorBuilder::kInt32DivIsSafe | ++ MachineOperatorBuilder::kUint32DivIsSafe | ++ MachineOperatorBuilder::kFloat64RoundDown | ++ MachineOperatorBuilder::kFloat32RoundDown | ++ MachineOperatorBuilder::kFloat64RoundUp | ++ MachineOperatorBuilder::kFloat32RoundUp | ++ MachineOperatorBuilder::kFloat64RoundTruncate | ++ MachineOperatorBuilder::kFloat32RoundTruncate | ++ MachineOperatorBuilder::kFloat64RoundTiesEven | ++ MachineOperatorBuilder::kFloat32RoundTiesEven; ++} ++ ++// static ++MachineOperatorBuilder::AlignmentRequirements ++InstructionSelector::AlignmentRequirements() { ++ if (kArchVariant == kSw64r3) { ++ return MachineOperatorBuilder::AlignmentRequirements:: ++ FullUnalignedAccessSupport(); ++ } else { ++ DCHECK_EQ(kSw64r2, kArchVariant); ++ return MachineOperatorBuilder::AlignmentRequirements:: ++ NoUnalignedAccessSupport(); ++ } ++} ++ ++#undef SIMD_BINOP_LIST ++#undef SIMD_SHIFT_OP_LIST ++#undef SIMD_UNOP_LIST ++#undef SIMD_TYPE_LIST ++#undef TRACE_UNIMPL ++#undef TRACE ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.cc +new file mode 100755 +index 000000000..2eb6c1851 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.cc +@@ -0,0 +1,108 @@ ++// Copyright 2016 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/compiler/backend/sw64/unwinding-info-writer-sw64.h" ++#include "src/compiler/backend/instruction.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++// TODO(v8:10026): When using CFI, we need to generate unwinding info to tell ++// the unwinder that return addresses are signed. ++ ++void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset, ++ const InstructionBlock* block) { ++ if (!enabled()) return; ++ ++ block_will_exit_ = false; ++ ++ DCHECK_LT(block->rpo_number().ToInt(), ++ static_cast(block_initial_states_.size())); ++ const BlockInitialState* initial_state = ++ block_initial_states_[block->rpo_number().ToInt()]; ++ if (!initial_state) return; ++ if (initial_state->saved_lr_ != saved_lr_) { ++ eh_frame_writer_.AdvanceLocation(pc_offset); ++ if (initial_state->saved_lr_) { ++ eh_frame_writer_.RecordRegisterSavedToStack(ra, kSystemPointerSize); ++ eh_frame_writer_.RecordRegisterSavedToStack(fp, 0); ++ } else { ++ eh_frame_writer_.RecordRegisterFollowsInitialRule(ra); ++ } ++ saved_lr_ = initial_state->saved_lr_; ++ } ++} ++ ++void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) { ++ if (!enabled() || block_will_exit_) return; ++ ++ for (const RpoNumber& successor : block->successors()) { ++ int successor_index = successor.ToInt(); ++ DCHECK_LT(successor_index, static_cast(block_initial_states_.size())); ++ const BlockInitialState* existing_state = ++ block_initial_states_[successor_index]; ++ ++ // If we already had an entry for this BB, check that the values are the ++ // same we are trying to insert. ++ if (existing_state) { ++ DCHECK_EQ(existing_state->saved_lr_, saved_lr_); ++ } else { ++ block_initial_states_[successor_index] = ++ zone_->New(saved_lr_); ++ } ++ } ++} ++ ++void UnwindingInfoWriter::MarkFrameConstructed(int at_pc) { ++ if (!enabled()) return; ++ ++ // Regardless of the type of frame constructed, the relevant part of the ++ // layout is always the one in the diagram: ++ // ++ // | .... | higher addresses ++ // +----------+ ^ ++ // | RA | | | ++ // +----------+ | | ++ // | saved FP | | | ++ // +----------+ <-- FP v ++ // | .... | stack growth ++ // ++ // The RA is pushed on the stack, and we can record this fact at the end of ++ // the construction, since the RA itself is not modified in the process. ++ eh_frame_writer_.AdvanceLocation(at_pc); ++ eh_frame_writer_.RecordRegisterSavedToStack(ra, kSystemPointerSize); ++ eh_frame_writer_.RecordRegisterSavedToStack(fp, 0); ++ saved_lr_ = true; ++} ++ ++void UnwindingInfoWriter::MarkFrameDeconstructed(int at_pc) { ++ if (!enabled()) return; ++ ++ // The lr is restored by the last operation in LeaveFrame(). ++ eh_frame_writer_.AdvanceLocation(at_pc); ++ eh_frame_writer_.RecordRegisterFollowsInitialRule(ra); ++ saved_lr_ = false; ++} ++ ++void UnwindingInfoWriter::MarkLinkRegisterOnTopOfStack(int pc_offset, ++ const Register& sp) { ++ if (!enabled()) return; ++ ++ eh_frame_writer_.AdvanceLocation(pc_offset); ++ eh_frame_writer_.SetBaseAddressRegisterAndOffset(sp, 0); ++ eh_frame_writer_.RecordRegisterSavedToStack(ra, 0); ++} ++ ++void UnwindingInfoWriter::MarkPopLinkRegisterFromTopOfStack(int pc_offset) { ++ if (!enabled()) return; ++ ++ eh_frame_writer_.AdvanceLocation(pc_offset); ++ eh_frame_writer_.SetBaseAddressRegisterAndOffset(fp, 0); ++ eh_frame_writer_.RecordRegisterFollowsInitialRule(ra); ++} ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.h b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.h +new file mode 100755 +index 000000000..ea702773e +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.h +@@ -0,0 +1,73 @@ ++// Copyright 2016 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_COMPILER_BACKEND_SW64_UNWINDING_INFO_WRITER_SW64_H_ ++#define V8_COMPILER_BACKEND_SW64_UNWINDING_INFO_WRITER_SW64_H_ ++ ++#include "src/diagnostics/eh-frame.h" ++#include "src/flags/flags.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++class InstructionBlock; ++ ++class UnwindingInfoWriter { ++ public: ++ explicit UnwindingInfoWriter(Zone* zone) ++ : zone_(zone), ++ eh_frame_writer_(zone), ++ saved_lr_(false), ++ block_will_exit_(false), ++ block_initial_states_(zone) { ++ if (enabled()) eh_frame_writer_.Initialize(); ++ } ++ ++ void SetNumberOfInstructionBlocks(int number) { ++ if (enabled()) block_initial_states_.resize(number); ++ } ++ ++ void BeginInstructionBlock(int pc_offset, const InstructionBlock* block); ++ void EndInstructionBlock(const InstructionBlock* block); ++ ++ void MarkLinkRegisterOnTopOfStack(int pc_offset, const Register& sp); ++ void MarkPopLinkRegisterFromTopOfStack(int pc_offset); ++ ++ void MarkFrameConstructed(int at_pc); ++ void MarkFrameDeconstructed(int at_pc); ++ ++ void MarkBlockWillExit() { block_will_exit_ = true; } ++ ++ void Finish(int code_size) { ++ if (enabled()) eh_frame_writer_.Finish(code_size); ++ } ++ ++ EhFrameWriter* eh_frame_writer() { ++ return enabled() ? &eh_frame_writer_ : nullptr; ++ } ++ ++ private: ++ bool enabled() const { return FLAG_perf_prof_unwinding_info; } ++ ++ class BlockInitialState : public ZoneObject { ++ public: ++ explicit BlockInitialState(bool saved_lr) : saved_lr_(saved_lr) {} ++ ++ bool saved_lr_; ++ }; ++ ++ Zone* zone_; ++ EhFrameWriter eh_frame_writer_; ++ bool saved_lr_; ++ bool block_will_exit_; ++ ++ ZoneVector block_initial_states_; ++}; ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_COMPILER_BACKEND_SW64_UNWINDING_INFO_WRITER_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/unwinding-info-writer.h b/src/3rdparty/chromium/v8/src/compiler/backend/unwinding-info-writer.h +index a288e219a..d5380e502 100644 +--- a/src/3rdparty/chromium/v8/src/compiler/backend/unwinding-info-writer.h ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/unwinding-info-writer.h +@@ -17,6 +17,8 @@ + #include "src/compiler/backend/s390/unwinding-info-writer-s390.h" + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 + #include "src/compiler/backend/ppc/unwinding-info-writer-ppc.h" ++#elif V8_TARGET_ARCH_SW64 ++#include "src/compiler/backend/sw64/unwinding-info-writer-sw64.h" + #else + + // Placeholder for unsupported architectures. +diff --git a/src/3rdparty/chromium/v8/src/compiler/c-linkage.cc b/src/3rdparty/chromium/v8/src/compiler/c-linkage.cc +index 4967f2bbf..02aeb6c33 100644 +--- a/src/3rdparty/chromium/v8/src/compiler/c-linkage.cc ++++ b/src/3rdparty/chromium/v8/src/compiler/c-linkage.cc +@@ -97,6 +97,16 @@ namespace { + s7.bit() + #define CALLEE_SAVE_FP_REGISTERS \ + f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit() ++#elif V8_TARGET_ARCH_SW64 ++// =========================================================================== ++// == sw64 =================================================================== ++// =========================================================================== ++#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5 ++#define CALLEE_SAVE_REGISTERS \ ++ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() ++#define CALLEE_SAVE_FP_REGISTERS \ ++ f2.bit() | f3.bit() | f4.bit() | f5.bit() | f6.bit() | f7.bit() | \ ++ f8.bit() | f9.bit() + + #elif V8_TARGET_ARCH_PPC64 + // =========================================================================== +diff --git a/src/3rdparty/chromium/v8/src/debug/debug-evaluate.cc b/src/3rdparty/chromium/v8/src/debug/debug-evaluate.cc +index fcf9b8448..36ab6f993 100644 +--- a/src/3rdparty/chromium/v8/src/debug/debug-evaluate.cc ++++ b/src/3rdparty/chromium/v8/src/debug/debug-evaluate.cc +@@ -1074,6 +1074,8 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) { + // MIPS64 doesn't have PC relative code currently. + // TODO(mips): Add PC relative code to MIPS64. + USE(sanity_check); ++#elif defined(V8_TARGET_ARCH_SW64) ++ USE(sanity_check); + #else + CHECK(sanity_check); + #endif +diff --git a/src/3rdparty/chromium/v8/src/debug/sw64/debug-sw64.cc b/src/3rdparty/chromium/v8/src/debug/sw64/debug-sw64.cc +new file mode 100755 +index 000000000..56748b5de +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/debug/sw64/debug-sw64.cc +@@ -0,0 +1,57 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/debug/debug.h" ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/debug/liveedit.h" ++#include "src/execution/frames-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++#define __ ACCESS_MASM(masm) ++ ++void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) { ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0); ++ } ++ __ MaybeDropFrames(); ++ ++ // Return to caller. ++ __ Ret(); ++} ++ ++void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) { ++ // Frame is being dropped: ++ // - Drop to the target frame specified by a1. ++ // - Look up current function on the frame. ++ // - Leave the frame. ++ // - Restart the frame by calling the function. ++ __ mov(fp, a1); ++ __ Ldl(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ ++ // Pop return address and frame. ++ __ LeaveFrame(StackFrame::INTERNAL); ++ ++ __ Ldl(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ldhu(a0, ++ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ mov(a2, a0); ++ ++ __ InvokeFunction(a1, a2, a0, JUMP_FUNCTION); ++} ++ ++ ++const bool LiveEdit::kFrameDropperSupported = true; ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/deoptimizer/sw64/deoptimizer-sw64.cc b/src/3rdparty/chromium/v8/src/deoptimizer/sw64/deoptimizer-sw64.cc +new file mode 100644 +index 000000000..335a6fe35 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/deoptimizer/sw64/deoptimizer-sw64.cc +@@ -0,0 +1,250 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/register-configuration.h" ++#include "src/codegen/safepoint-table.h" ++#include "src/deoptimizer/deoptimizer.h" ++ ++namespace v8 { ++namespace internal { ++ ++const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false; ++const int Deoptimizer::kNonLazyDeoptExitSize = 0; ++const int Deoptimizer::kLazyDeoptExitSize = 0; ++ ++#define __ masm-> ++ ++// This code tries to be close to ia32 code so that any changes can be ++// easily ported. ++void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ++ Isolate* isolate, ++ DeoptimizeKind deopt_kind) { ++ NoRootArrayScope no_root_array(masm); ++ ++ // Unlike on ARM we don't save all the registers, just the useful ones. ++ // For the rest, there are gaps on the stack, so the offsets remain the same. ++ const int kNumberOfRegisters = Register::kNumRegisters; ++ ++ RegList restored_regs = kJSCallerSaved | kCalleeSaved; ++ RegList saved_regs = restored_regs | sp.bit() | ra.bit(); ++ ++ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; ++ ++ // Save all double FPU registers before messing with them. ++ __ Subl(sp, sp, Operand(kDoubleRegsSize)); ++ const RegisterConfiguration* config = RegisterConfiguration::Default(); ++ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { ++ int code = config->GetAllocatableDoubleCode(i); ++ const DoubleRegister fpu_reg = DoubleRegister::from_code(code); ++ int offset = code * kDoubleSize; ++ __ Fstd(fpu_reg, MemOperand(sp, offset)); ++ } ++ ++ // Push saved_regs (needed to populate FrameDescription::registers_). ++ // Leave gaps for other registers. ++ __ Subl(sp, sp, kNumberOfRegisters * kPointerSize); ++ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { ++ if ((saved_regs & (1 << i)) != 0) { ++ __ Stl(ToRegister(i), MemOperand(sp, kPointerSize * i)); ++ } ++ } ++ ++ __ li(a2, Operand(ExternalReference::Create( ++ IsolateAddressId::kCEntryFPAddress, isolate))); ++ __ Stl(fp, MemOperand(a2)); ++ ++ const int kSavedRegistersAreaSize = ++ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; ++ ++ // Get the bailout is passed as kRootRegister by the caller. ++ __ mov(a2, kRootRegister); ++ ++ // Get the address of the location in the code object (a3) (return ++ // address for lazy deoptimization) and compute the fp-to-sp delta in ++ // register a4. ++ __ mov(a3, ra); ++ __ Addl(a4, sp, Operand(kSavedRegistersAreaSize)); ++ ++ __ Subl(a4, fp, a4); ++ ++ // Allocate a new deoptimizer object. ++ __ PrepareCallCFunction(6, a5); ++ // Pass six arguments, according to n64 ABI. ++ __ mov(a0, zero_reg); ++ Label context_check; ++ __ Ldl(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); ++ __ JumpIfSmi(a1, &context_check); ++ __ Ldl(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ __ bind(&context_check); ++ __ li(a1, Operand(static_cast(deopt_kind))); ++ // a2: bailout id already loaded. ++ // a3: code address or 0 already loaded. ++ // a4: already has fp-to-sp delta. ++ __ li(a5, Operand(ExternalReference::isolate_address(isolate))); ++ ++ // Call Deoptimizer::New(). ++ { ++ AllowExternalCallThatCantCauseGC scope(masm); ++ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); ++ } ++ ++ // Preserve "deoptimizer" object in register v0 and get the input ++ // frame descriptor pointer to a1 (deoptimizer->input_); ++ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. ++ __ mov(a0, v0); ++ __ Ldl(a1, MemOperand(v0, Deoptimizer::input_offset())); ++ ++ // Copy core registers into FrameDescription::registers_[kNumRegisters]. ++ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); ++ for (int i = 0; i < kNumberOfRegisters; i++) { ++ int offset = (i * kPointerSize) + FrameDescription::registers_offset(); ++ if ((saved_regs & (1 << i)) != 0) { ++ __ Ldl(a2, MemOperand(sp, i * kPointerSize)); ++ __ Stl(a2, MemOperand(a1, offset)); ++ } else if (FLAG_debug_code) { ++ __ li(a2, kDebugZapValue); ++ __ Stl(a2, MemOperand(a1, offset)); ++ } ++ } ++ ++ int double_regs_offset = FrameDescription::double_registers_offset(); ++ // Copy FPU registers to ++ // double_registers_[DoubleRegister::kNumAllocatableRegisters] ++ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { ++ int code = config->GetAllocatableDoubleCode(i); ++ int dst_offset = code * kDoubleSize + double_regs_offset; ++ int src_offset = ++ code * kDoubleSize + kNumberOfRegisters * kPointerSize; ++ __ Fldd(f0, MemOperand(sp, src_offset)); ++ __ Fstd(f0, MemOperand(a1, dst_offset)); ++ } ++ ++ // Remove the saved registers from the stack. ++ __ Addl(sp, sp, Operand(kSavedRegistersAreaSize)); ++ ++ // Compute a pointer to the unwinding limit in register a2; that is ++ // the first stack slot not part of the input frame. ++ __ Ldl(a2, MemOperand(a1, FrameDescription::frame_size_offset())); ++ __ Addl(a2, a2, sp); ++ ++ // Unwind the stack down to - but not including - the unwinding ++ // limit and copy the contents of the activation frame to the input ++ // frame description. ++ __ Addl(a3, a1, Operand(FrameDescription::frame_content_offset())); ++ Label pop_loop; ++ Label pop_loop_header; ++ __ BranchShort(&pop_loop_header); ++ __ bind(&pop_loop); ++ __ pop(a4); ++ __ Stl(a4, MemOperand(a3, 0)); ++ __ addl(a3, sizeof(uint64_t), a3); ++ __ bind(&pop_loop_header); ++ __ BranchShort(&pop_loop, ne, a2, Operand(sp)); ++ // Compute the output frame in the deoptimizer. ++ __ push(a0); // Preserve deoptimizer object across call. ++ // a0: deoptimizer object; a1: scratch. ++ __ PrepareCallCFunction(1, a1); ++ // Call Deoptimizer::ComputeOutputFrames(). ++ { ++ AllowExternalCallThatCantCauseGC scope(masm); ++ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); ++ } ++ __ pop(a0); // Restore deoptimizer object (class Deoptimizer). ++ ++ __ Ldl(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset())); ++ ++ // Replace the current (input) frame with the output frames. ++ Label outer_push_loop, inner_push_loop, ++ outer_loop_header, inner_loop_header; ++ // Outer loop state: a4 = current "FrameDescription** output_", ++ // a1 = one past the last FrameDescription**. ++ __ Ldw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); ++ __ Ldl(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_. ++ __ s8addl(a1, a4, a1); DCHECK_EQ(kPointerSizeLog2, 3); ++ __ BranchShort(&outer_loop_header); ++ __ bind(&outer_push_loop); ++ // Inner loop state: a2 = current FrameDescription*, a3 = loop index. ++ __ Ldl(a2, MemOperand(a4, 0)); // output_[ix] ++ __ Ldl(a3, MemOperand(a2, FrameDescription::frame_size_offset())); ++ __ BranchShort(&inner_loop_header); ++ __ bind(&inner_push_loop); ++ __ Subl(a3, a3, Operand(sizeof(uint64_t))); ++ __ Addl(t9, a2, Operand(a3)); ++ __ Ldl(t10, MemOperand(t9, FrameDescription::frame_content_offset())); ++ __ push(t10); ++ __ bind(&inner_loop_header); ++ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); ++ ++ __ Addl(a4, a4, Operand(kPointerSize)); ++ __ bind(&outer_loop_header); ++ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1)); ++ ++ __ Ldl(a1, MemOperand(a0, Deoptimizer::input_offset())); ++ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { ++ int code = config->GetAllocatableDoubleCode(i); ++ const DoubleRegister fpu_reg = DoubleRegister::from_code(code); ++ int src_offset = code * kDoubleSize + double_regs_offset; ++ __ Fldd(fpu_reg, MemOperand(a1, src_offset)); ++ } ++ ++ // Push pc and continuation from the last output frame. ++ __ Ldl(t9, MemOperand(a2, FrameDescription::pc_offset())); ++ __ push(t9); ++ __ Ldl(t9, MemOperand(a2, FrameDescription::continuation_offset())); ++ __ push(t9); ++ ++ // Technically restoring 'at' should work unless zero_reg is also restored ++ // but it's safer to check for this. ++ DCHECK(!(at.bit() & restored_regs)); ++ // Restore the registers from the last output frame. ++ __ mov(at, a2); ++ for (int i = kNumberOfRegisters - 1; i >= 0; i--) { ++ int offset = (i * kPointerSize) + FrameDescription::registers_offset(); ++ if ((restored_regs & (1 << i)) != 0) { ++ __ Ldl(ToRegister(i), MemOperand(at, offset)); ++ } ++ } ++ ++ __ pop(at); // Get continuation, leave pc on stack. ++ __ pop(ra); ++ __ Jump(at); ++ __ halt();//stop("Unreachable."); ++} ++ ++ ++// Maximum size of a table entry generated below. ++#ifdef _SW64_ARCH_SW64R3 ++const int Deoptimizer::table_entry_size_ = 2 * kInstrSize; ++#else ++const int Deoptimizer::table_entry_size_ = 3 * kInstrSize; ++#endif ++ ++Float32 RegisterValues::GetFloatRegister(unsigned n) const { ++ return Float32::FromBits( ++ static_cast(double_registers_[n].get_bits())); ++} ++ ++void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { ++ SetFrameSlot(offset, value); ++} ++ ++ ++void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { ++ SetFrameSlot(offset, value); ++} ++ ++ ++void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { ++ // No embedded constant pool support. ++ UNREACHABLE(); ++} ++ ++void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; } ++ ++#undef __ ++ ++ ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/diagnostics/gdb-jit.cc b/src/3rdparty/chromium/v8/src/diagnostics/gdb-jit.cc +index 5f3643730..b5f5d327d 100644 +--- a/src/3rdparty/chromium/v8/src/diagnostics/gdb-jit.cc ++++ b/src/3rdparty/chromium/v8/src/diagnostics/gdb-jit.cc +@@ -1081,6 +1081,8 @@ class DebugInfoSection : public DebugSection { + w->Write(DW_OP_reg31); // The frame pointer is here on PPC64. + #elif V8_TARGET_ARCH_S390 + w->Write(DW_OP_reg11); // The frame pointer's here on S390. ++#elif V8_TARGET_ARCH_SW64 ++ UNIMPLEMENTED(); + #else + #error Unsupported target architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/diagnostics/perf-jit.h b/src/3rdparty/chromium/v8/src/diagnostics/perf-jit.h +index dbe78ddf2..e80f8b311 100644 +--- a/src/3rdparty/chromium/v8/src/diagnostics/perf-jit.h ++++ b/src/3rdparty/chromium/v8/src/diagnostics/perf-jit.h +@@ -86,6 +86,7 @@ class PerfJitLogger : public CodeEventLogger { + static const uint32_t kElfMachARM64 = 183; + static const uint32_t kElfMachS390x = 22; + static const uint32_t kElfMachPPC64 = 21; ++ static const uint32_t kElfMachSW64 = 0x9916; + + uint32_t GetElfMach() { + #if V8_TARGET_ARCH_IA32 +@@ -104,6 +105,8 @@ class PerfJitLogger : public CodeEventLogger { + return kElfMachS390x; + #elif V8_TARGET_ARCH_PPC64 + return kElfMachPPC64; ++#elif V8_TARGET_ARCH_SW64 ++ return kElfMachSW64; + #else + UNIMPLEMENTED(); + return 0; +diff --git a/src/3rdparty/chromium/v8/src/diagnostics/sw64/disasm-sw64.cc b/src/3rdparty/chromium/v8/src/diagnostics/sw64/disasm-sw64.cc +new file mode 100755 +index 000000000..ff7e4bd7e +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/diagnostics/sw64/disasm-sw64.cc +@@ -0,0 +1,3439 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// A Disassembler object is used to disassemble a block of code instruction by ++// instruction. The default implementation of the NameConverter object can be ++// overriden to modify register names or to do symbol lookup on addresses. ++// ++// The example below will disassemble a block of code and print it to stdout. ++// ++// NameConverter converter; ++// Disassembler d(converter); ++// for (byte* pc = begin; pc < end;) { ++// v8::internal::EmbeddedVector buffer; ++// byte* prev_pc = pc; ++// pc += d.InstructionDecode(buffer, pc); ++// printf("%p %08x %s\n", ++// prev_pc, *reinterpret_cast(prev_pc), buffer); ++// } ++// ++// The Disassembler class also has a convenience method to disassemble a block ++// of code into a FILE*, meaning that the above functionality could also be ++// achieved by just calling Disassembler::Disassemble(stdout, begin, end); ++ ++#include ++#include ++#include ++#include ++ ++#ifdef SW64 ++# include ++# include ++#endif ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/base/platform/platform.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/sw64/constants-sw64.h" ++#include "src/diagnostics/disasm.h" ++ ++namespace v8 { ++namespace internal { ++ ++//------------------------------------------------------------------------------ ++ ++// Decoder decodes and disassembles instructions into an output buffer. ++// It uses the converter to convert register names and call destinations into ++// more informative description. ++class Decoder { ++ public: ++ Decoder(const disasm::NameConverter& converter, ++ v8::internal::Vector out_buffer) ++ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) { ++ out_buffer_[out_buffer_pos_] = '\0'; ++ } ++ ++ ~Decoder() {} ++ ++ // Writes one disassembled instruction into 'buffer' (0-terminated). ++ // Returns the length of the disassembled machine instruction in bytes. ++ int InstructionDecode(byte* instruction); ++#ifdef SW64 ++ // address decode_instructions(address start, address end); ++ byte* decode_instructions(byte* start, byte* end); ++ // tries to load library and return whether it succedded. ++ static bool load_library(); ++ ++ private: ++ // this is the type of the dll entry point: ++ typedef void* (*decode_func)(void* start, void* end, ++ void* (*event_callback)(void*, const char*, void*), ++ void* event_stream, ++ int (*printf_callback)(void*, const char*, ...), ++ void* printf_stream, ++ const char* options); ++ // points to the library. ++ static void* _library; ++ // bailout ++ static bool _tried_to_load_library; ++ // points to the decode function. ++ static decode_func _decode_instructions; ++#endif ++ ++ private: ++ // Bottleneck functions to print into the out_buffer. ++ void PrintChar(const char ch); ++ void Print(const char* str); ++ ++ // Printing of common values. ++ void PrintRegister(int reg); ++ void PrintFPURegister(int freg); ++ void PrintMSARegister(int wreg); ++ void PrintFPUStatusRegister(int freg); ++ void PrintMSAControlRegister(int creg); ++ void PrintRs(Instruction* instr); ++ void PrintRt(Instruction* instr); ++ void PrintRd(Instruction* instr); ++ void PrintFs(Instruction* instr); ++ void PrintFt(Instruction* instr); ++ void PrintFd(Instruction* instr); ++ void PrintSa(Instruction* instr); ++ void PrintLsaSa(Instruction* instr); ++ void PrintSd(Instruction* instr); ++ void PrintSs1(Instruction* instr); ++ void PrintSs2(Instruction* instr); ++ void PrintSs3(Instruction* instr); ++ void PrintSs4(Instruction* instr); ++ void PrintSs5(Instruction* instr); ++ void PrintBc(Instruction* instr); ++ void PrintCc(Instruction* instr); ++ void PrintFunction(Instruction* instr); ++ void PrintSecondaryField(Instruction* instr); ++ void PrintUImm9(Instruction* instr); ++ void PrintSImm9(Instruction* instr); ++ void PrintUImm16(Instruction* instr); ++ void PrintSImm16(Instruction* instr); ++ void PrintXImm16(Instruction* instr); ++ void PrintPCImm16(Instruction* instr, int delta_pc, int n_bits); ++ void PrintXImm18(Instruction* instr); ++ void PrintSImm18(Instruction* instr); ++ void PrintXImm19(Instruction* instr); ++ void PrintSImm19(Instruction* instr); ++ void PrintXImm21(Instruction* instr); ++ void PrintSImm21(Instruction* instr); ++ void PrintPCImm21(Instruction* instr, int delta_pc, int n_bits); ++ void PrintXImm26(Instruction* instr); ++ void PrintSImm26(Instruction* instr); ++ void PrintPCImm26(Instruction* instr, int delta_pc, int n_bits); ++ void PrintPCImm26(Instruction* instr); ++ void PrintCode(Instruction* instr); // For break and trap instructions. ++ void PrintFormat(Instruction* instr); // For floating format postfix. ++ void PrintBp2(Instruction* instr); ++ void PrintBp3(Instruction* instr); ++ void PrintMsaDataFormat(Instruction* instr); ++ void PrintMsaXImm8(Instruction* instr); ++ void PrintMsaImm8(Instruction* instr); ++ void PrintMsaImm5(Instruction* instr); ++ void PrintMsaSImm5(Instruction* instr); ++ void PrintMsaSImm10(Instruction* instr, bool is_mi10 = false); ++ void PrintMsaImmBit(Instruction* instr); ++ void PrintMsaImmElm(Instruction* instr); ++ void PrintMsaCopy(Instruction* instr); ++ // Printing of instruction name. ++ void PrintInstructionName(Instruction* instr); ++ ++ // Handle formatting of instructions and their options. ++ int FormatRegister(Instruction* instr, const char* option); ++ int FormatFPURegister(Instruction* instr, const char* option); ++ int FormatMSARegister(Instruction* instr, const char* option); ++ int FormatOption(Instruction* instr, const char* option); ++ void Format(Instruction* instr, const char* format); ++ void Unknown(Instruction* instr); ++ int DecodeBreakInstr(Instruction* instr); ++ ++ // Each of these functions decodes one particular instruction type. ++ bool DecodeTypeRegisterRsType(Instruction* instr); ++ void DecodeTypeRegisterSRsType(Instruction* instr); ++ void DecodeTypeRegisterDRsType(Instruction* instr); ++ void DecodeTypeRegisterLRsType(Instruction* instr); ++ void DecodeTypeRegisterWRsType(Instruction* instr); ++ void DecodeTypeRegisterSPECIAL(Instruction* instr); ++ void DecodeTypeRegisterSPECIAL2(Instruction* instr); ++ void DecodeTypeRegisterSPECIAL3(Instruction* instr); ++ void DecodeTypeRegisterCOP1(Instruction* instr); ++ void DecodeTypeRegisterCOP1X(Instruction* instr); ++ int DecodeTypeRegister(Instruction* instr); ++ ++ void DecodeTypeImmediateCOP1(Instruction* instr); ++ void DecodeTypeImmediateREGIMM(Instruction* instr); ++ void DecodeTypeImmediateSPECIAL3(Instruction* instr); ++ void DecodeTypeImmediate(Instruction* instr); ++ ++ void DecodeTypeJump(Instruction* instr); ++ ++ void DecodeTypeMsaI8(Instruction* instr); ++ void DecodeTypeMsaI5(Instruction* instr); ++ void DecodeTypeMsaI10(Instruction* instr); ++ void DecodeTypeMsaELM(Instruction* instr); ++ void DecodeTypeMsaBIT(Instruction* instr); ++ void DecodeTypeMsaMI10(Instruction* instr); ++ void DecodeTypeMsa3R(Instruction* instr); ++ void DecodeTypeMsa3RF(Instruction* instr); ++ void DecodeTypeMsaVec(Instruction* instr); ++ void DecodeTypeMsa2R(Instruction* instr); ++ void DecodeTypeMsa2RF(Instruction* instr); ++ ++#ifdef SW64 //jzy 20150213 ++ void SwDecodeTypeSyscall(Instruction* instr); ++ void SwDecodeTypeTransferance(Instruction* instr); ++ void SwDecodeTypeStorage(Instruction* instr); ++ void SwDecodeTypeSimpleCalculation(Instruction* instr); ++ void SwDecodeTypeCompositeCalculation(Instruction* instr); ++ int SwGetInstructionRange(const char* format, int& hi, int& lo); ++ ++ void SwPrintRa(Instruction* instr); ++ void SwPrintRb(Instruction* instr); ++ int SwPrintRc(Instruction* instr, const char* format); ++ void SwPrintRd(Instruction* instr); ++ void SwPrintFa(Instruction* instr); ++ void SwPrintFb(Instruction* instr); ++ int SwPrintFc(Instruction* instr, const char* format); ++ void SwPrintFd(Instruction* instr); ++ ++ //Print unsigned immediate value ++ int SwPrintImm(Instruction* instr, const char* format); ++ ++ //Print signed immediate value ++ int SwPrintDisp(Instruction* instr, const char* format); ++ int SwPrintDispTransfer(Instruction* instr, const char* format);//ld 20150320 ++ ++ //Print int to hex ++ int SwPrintHex(Instruction* instr,const char* format);//cjq 20150319 ++ ++ ++private: ++ void SwDecodeTypeCompositeCalculationInteger(Instruction* instr); ++ void SwDecodeTypeCompositeCalculationFloatintPoint(Instruction* instr); ++#endif ++ ++ ++ const disasm::NameConverter& converter_; ++ v8::internal::Vector out_buffer_; ++ int out_buffer_pos_; ++ byte* instr_pc_; //ld 20150323; ++ ++ DISALLOW_COPY_AND_ASSIGN(Decoder); ++}; ++ ++ ++// Support for assertions in the Decoder formatting functions. ++#define STRING_STARTS_WITH(string, compare_string) \ ++ (strncmp(string, compare_string, strlen(compare_string)) == 0) ++ ++ ++// Append the ch to the output buffer. ++void Decoder::PrintChar(const char ch) { ++ out_buffer_[out_buffer_pos_++] = ch; ++} ++ ++ ++// Append the str to the output buffer. ++void Decoder::Print(const char* str) { ++ char cur = *str++; ++ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) { ++ PrintChar(cur); ++ cur = *str++; ++ } ++ out_buffer_[out_buffer_pos_] = 0; ++} ++ ++ ++// Print the register name according to the active name converter. ++void Decoder::PrintRegister(int reg) { ++ Print(converter_.NameOfCPURegister(reg)); ++} ++ ++ ++void Decoder::PrintRs(Instruction* instr) { ++ int reg = instr->RsValue(); ++ PrintRegister(reg); ++} ++ ++ ++void Decoder::PrintRt(Instruction* instr) { ++ int reg = instr->RtValue(); ++ PrintRegister(reg); ++} ++ ++ ++void Decoder::PrintRd(Instruction* instr) { ++ int reg = instr->RdValue(); ++ PrintRegister(reg); ++} ++ ++ ++// Print the FPUregister name according to the active name converter. ++void Decoder::PrintFPURegister(int freg) { ++ Print(converter_.NameOfXMMRegister(freg)); ++} ++ ++void Decoder::PrintMSARegister(int wreg) { Print(MSARegisters::Name(wreg)); } ++ ++void Decoder::PrintFPUStatusRegister(int freg) { ++ switch (freg) { ++ case kFCSRRegister: ++ Print("FCSR"); ++ break; ++ default: ++ Print(converter_.NameOfXMMRegister(freg)); ++ } ++} ++ ++void Decoder::PrintMSAControlRegister(int creg) { ++ switch (creg) { ++ case kMSAIRRegister: ++ Print("MSAIR"); ++ break; ++ case kMSACSRRegister: ++ Print("MSACSR"); ++ break; ++ default: ++ Print("no_msacreg"); ++ } ++} ++ ++void Decoder::PrintFs(Instruction* instr) { ++ int freg = instr->RsValue(); ++ PrintFPURegister(freg); ++} ++ ++ ++void Decoder::PrintFt(Instruction* instr) { ++ int freg = instr->RtValue(); ++ PrintFPURegister(freg); ++} ++ ++ ++void Decoder::PrintFd(Instruction* instr) { ++ int freg = instr->RdValue(); ++ PrintFPURegister(freg); ++} ++ ++ ++// Print the integer value of the sa field. ++void Decoder::PrintSa(Instruction* instr) { ++ int sa = instr->SaValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); ++} ++ ++ ++// Print the integer value of the sa field of a lsa instruction. ++void Decoder::PrintLsaSa(Instruction* instr) { ++ int sa = instr->LsaSaValue() + 1; ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); ++} ++ ++ ++// Print the integer value of the rd field, when it is not used as reg. ++void Decoder::PrintSd(Instruction* instr) { ++ int sd = instr->RdValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd); ++} ++ ++// Print the integer value of ext/dext/dextu size from the msbd field. ++void Decoder::PrintSs1(Instruction* instr) { ++ int msbd = instr->RdValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msbd + 1); ++} ++ ++// Print the integer value of ins/dins/dinsu size from the msb and lsb fields ++// (for dinsu it is msbminus32 and lsbminus32 fields). ++void Decoder::PrintSs2(Instruction* instr) { ++ int msb = instr->RdValue(); ++ int lsb = instr->SaValue(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msb - lsb + 1); ++} ++ ++// Print the integer value of dextm size from the msbdminus32 field. ++void Decoder::PrintSs3(Instruction* instr) { ++ int msbdminus32 = instr->RdValue(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msbdminus32 + 32 + 1); ++} ++ ++// Print the integer value of dinsm size from the msbminus32 and lsb fields. ++void Decoder::PrintSs4(Instruction* instr) { ++ int msbminus32 = instr->RdValue(); ++ int lsb = instr->SaValue(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msbminus32 + 32 - lsb + 1); ++} ++ ++// Print the integer value of dextu/dinsu pos from the lsbminus32 field. ++void Decoder::PrintSs5(Instruction* instr) { ++ int lsbminus32 = instr->SaValue(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", lsbminus32 + 32); ++} ++ ++ ++// Print the integer value of the cc field for the bc1t/f instructions. ++void Decoder::PrintBc(Instruction* instr) { ++ int cc = instr->FBccValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc); ++} ++ ++ ++// Print the integer value of the cc field for the FP compare instructions. ++void Decoder::PrintCc(Instruction* instr) { ++ int cc = instr->FCccValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc); ++} ++ ++// Print 9-bit unsigned immediate value. ++void Decoder::PrintUImm9(Instruction* instr) { ++ int32_t imm = instr->Imm9Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); ++} ++ ++// Print 9-bit signed immediate value. ++void Decoder::PrintSImm9(Instruction* instr) { ++ int32_t imm = ((instr->Imm9Value()) << 23) >> 23; ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); ++} ++ ++// Print 16-bit unsigned immediate value. ++void Decoder::PrintUImm16(Instruction* instr) { ++ int32_t imm = instr->Imm16Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); ++} ++ ++ ++// Print 16-bit signed immediate value. ++void Decoder::PrintSImm16(Instruction* instr) { ++ int32_t imm = ++ ((instr->Imm16Value()) << (32 - kImm16Bits)) >> (32 - kImm16Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); ++} ++ ++ ++// Print 16-bit hexa immediate value. ++void Decoder::PrintXImm16(Instruction* instr) { ++ int32_t imm = instr->Imm16Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); ++} ++ ++ ++// Print absoulte address for 16-bit offset or immediate value. ++// The absolute address is calculated according following expression: ++// PC + delta_pc + (offset << n_bits) ++void Decoder::PrintPCImm16(Instruction* instr, int delta_pc, int n_bits) { ++ int16_t offset = instr->Imm16Value(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress(reinterpret_cast(instr) + ++ delta_pc + (offset << n_bits))); ++} ++ ++ ++// Print 18-bit signed immediate value. ++void Decoder::PrintSImm18(Instruction* instr) { ++ int32_t imm = ++ ((instr->Imm18Value()) << (32 - kImm18Bits)) >> (32 - kImm18Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); ++} ++ ++ ++// Print 18-bit hexa immediate value. ++void Decoder::PrintXImm18(Instruction* instr) { ++ int32_t imm = instr->Imm18Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); ++} ++ ++ ++// Print 19-bit hexa immediate value. ++void Decoder::PrintXImm19(Instruction* instr) { ++ int32_t imm = instr->Imm19Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); ++} ++ ++ ++// Print 19-bit signed immediate value. ++void Decoder::PrintSImm19(Instruction* instr) { ++ int32_t imm19 = instr->Imm19Value(); ++ // set sign ++ imm19 <<= (32 - kImm19Bits); ++ imm19 >>= (32 - kImm19Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm19); ++} ++ ++ ++// Print 21-bit immediate value. ++void Decoder::PrintXImm21(Instruction* instr) { ++ uint32_t imm = instr->Imm21Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); ++} ++ ++ ++// Print 21-bit signed immediate value. ++void Decoder::PrintSImm21(Instruction* instr) { ++ int32_t imm21 = instr->Imm21Value(); ++ // set sign ++ imm21 <<= (32 - kImm21Bits); ++ imm21 >>= (32 - kImm21Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm21); ++} ++ ++ ++// Print absoulte address for 21-bit offset or immediate value. ++// The absolute address is calculated according following expression: ++// PC + delta_pc + (offset << n_bits) ++void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) { ++ int32_t imm21 = instr->Imm21Value(); ++ // set sign ++ imm21 <<= (32 - kImm21Bits); ++ imm21 >>= (32 - kImm21Bits); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress(reinterpret_cast(instr) + ++ delta_pc + (imm21 << n_bits))); ++} ++ ++ ++// Print 26-bit hex immediate value. ++void Decoder::PrintXImm26(Instruction* instr) { ++ uint64_t target = static_cast(instr->Imm26Value()) ++ << kImmFieldShift; ++ target = (reinterpret_cast(instr) & ~0xFFFFFFF) | target; ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%" PRIx64, target); ++} ++ ++ ++// Print 26-bit signed immediate value. ++void Decoder::PrintSImm26(Instruction* instr) { ++ int32_t imm26 = instr->Imm26Value(); ++ // set sign ++ imm26 <<= (32 - kImm26Bits); ++ imm26 >>= (32 - kImm26Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm26); ++} ++ ++ ++// Print absoulte address for 26-bit offset or immediate value. ++// The absolute address is calculated according following expression: ++// PC + delta_pc + (offset << n_bits) ++void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) { ++ int32_t imm26 = instr->Imm26Value(); ++ // set sign ++ imm26 <<= (32 - kImm26Bits); ++ imm26 >>= (32 - kImm26Bits); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress(reinterpret_cast(instr) + ++ delta_pc + (imm26 << n_bits))); ++} ++ ++ ++// Print absoulte address for 26-bit offset or immediate value. ++// The absolute address is calculated according following expression: ++// PC[GPRLEN-1 .. 28] || instr_index26 || 00 ++void Decoder::PrintPCImm26(Instruction* instr) { ++ int32_t imm26 = instr->Imm26Value(); ++ uint64_t pc_mask = ~0xFFFFFFF; ++ uint64_t pc = ((uint64_t)(instr + 1) & pc_mask) | (imm26 << 2); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress((reinterpret_cast(pc)))); ++} ++ ++ ++void Decoder::PrintBp2(Instruction* instr) { ++ int bp2 = instr->Bp2Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", bp2); ++} ++ ++ ++void Decoder::PrintBp3(Instruction* instr) { ++ int bp3 = instr->Bp3Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", bp3); ++} ++ ++ ++// Print 26-bit immediate value. ++void Decoder::PrintCode(Instruction* instr) { ++ if (instr->OpcodeFieldRaw() != SPECIAL) ++ return; // Not a break or trap instruction. ++ switch (instr->FunctionFieldRaw()) { ++ case BREAK: { ++ int32_t code = instr->Bits(25, 6); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ++ "0x%05x (%d)", code, code); ++ break; ++ } ++ case TGE: ++ case TGEU: ++ case TLT: ++ case TLTU: ++ case TEQ: ++ case TNE: { ++ int32_t code = instr->Bits(15, 6); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code); ++ break; ++ } ++ default: // Not a break or trap instruction. ++ break; ++ } ++} ++ ++void Decoder::PrintMsaXImm8(Instruction* instr) { ++ int32_t imm = instr->MsaImm8Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); ++} ++ ++void Decoder::PrintMsaImm8(Instruction* instr) { ++ int32_t imm = instr->MsaImm8Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); ++} ++ ++void Decoder::PrintMsaImm5(Instruction* instr) { ++ int32_t imm = instr->MsaImm5Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); ++} ++ ++void Decoder::PrintMsaSImm5(Instruction* instr) { ++ int32_t imm = instr->MsaImm5Value(); ++ imm <<= (32 - kMsaImm5Bits); ++ imm >>= (32 - kMsaImm5Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); ++} ++ ++void Decoder::PrintMsaSImm10(Instruction* instr, bool is_mi10) { ++ int32_t imm = is_mi10 ? instr->MsaImmMI10Value() : instr->MsaImm10Value(); ++ imm <<= (32 - kMsaImm10Bits); ++ imm >>= (32 - kMsaImm10Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); ++} ++ ++void Decoder::PrintMsaImmBit(Instruction* instr) { ++ int32_t m = instr->MsaBitMValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", m); ++} ++ ++void Decoder::PrintMsaImmElm(Instruction* instr) { ++ int32_t n = instr->MsaElmNValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", n); ++} ++ ++void Decoder::PrintMsaCopy(Instruction* instr) { ++ int32_t rd = instr->WdValue(); ++ int32_t ws = instr->WsValue(); ++ int32_t n = instr->MsaElmNValue(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "%s, %s[%u]", ++ converter_.NameOfCPURegister(rd), MSARegisters::Name(ws), n); ++} ++ ++void Decoder::PrintFormat(Instruction* instr) { ++ char formatLetter = ' '; ++ switch (instr->RsFieldRaw()) { ++ case S: ++ formatLetter = 's'; ++ break; ++ case D: ++ formatLetter = 'd'; ++ break; ++ case W: ++ formatLetter = 'w'; ++ break; ++ case L: ++ formatLetter = 'l'; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ PrintChar(formatLetter); ++} ++ ++void Decoder::PrintMsaDataFormat(Instruction* instr) { ++ DCHECK(instr->IsMSAInstr()); ++ char df = ' '; ++ if (instr->IsMSABranchInstr()) { ++ switch (instr->RsFieldRaw()) { ++ case BZ_V: ++ case BNZ_V: ++ df = 'v'; ++ break; ++ case BZ_B: ++ case BNZ_B: ++ df = 'b'; ++ break; ++ case BZ_H: ++ case BNZ_H: ++ df = 'h'; ++ break; ++ case BZ_W: ++ case BNZ_W: ++ df = 'w'; ++ break; ++ case BZ_D: ++ case BNZ_D: ++ df = 'd'; ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } else { ++ char DF[] = {'b', 'h', 'w', 'd'}; ++ switch (instr->MSAMinorOpcodeField()) { ++ case kMsaMinorI5: ++ case kMsaMinorI10: ++ case kMsaMinor3R: ++ df = DF[instr->Bits(22, 21)]; ++ break; ++ case kMsaMinorMI10: ++ df = DF[instr->Bits(1, 0)]; ++ break; ++ case kMsaMinorBIT: ++ df = DF[instr->MsaBitDf()]; ++ break; ++ case kMsaMinorELM: ++ df = DF[instr->MsaElmDf()]; ++ break; ++ case kMsaMinor3RF: { ++ uint32_t opcode = instr->InstructionBits() & kMsa3RFMask; ++ switch (opcode) { ++ case FEXDO: ++ case FTQ: ++ case MUL_Q: ++ case MADD_Q: ++ case MSUB_Q: ++ case MULR_Q: ++ case MADDR_Q: ++ case MSUBR_Q: ++ df = DF[1 + instr->Bit(21)]; ++ break; ++ default: ++ df = DF[2 + instr->Bit(21)]; ++ break; ++ } ++ } break; ++ case kMsaMinor2R: ++ df = DF[instr->Bits(17, 16)]; ++ break; ++ case kMsaMinor2RF: ++ df = DF[2 + instr->Bit(16)]; ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } ++ ++ PrintChar(df); ++} ++ ++// Printing of instruction name. ++void Decoder::PrintInstructionName(Instruction* instr) { ++} ++ ++ ++// Handle all register based formatting in this function to reduce the ++// complexity of FormatOption. ++int Decoder::FormatRegister(Instruction* instr, const char* format) { ++ DCHECK_EQ(format[0], 'r'); ++ if (format[1] == 's') { // 'rs: Rs register. ++ int reg = instr->RsValue(); ++ PrintRegister(reg); ++ return 2; ++ } else if (format[1] == 't') { // 'rt: rt register. ++ int reg = instr->RtValue(); ++ PrintRegister(reg); ++ return 2; ++ } else if (format[1] == 'd') { // 'rd: rd register. ++ int reg = instr->RdValue(); ++ PrintRegister(reg); ++ return 2; ++ } ++ UNREACHABLE(); ++} ++ ++ ++// Handle all FPUregister based formatting in this function to reduce the ++// complexity of FormatOption. ++int Decoder::FormatFPURegister(Instruction* instr, const char* format) { ++ DCHECK_EQ(format[0], 'f'); ++ if ((CTC1 == instr->RsFieldRaw()) || (CFC1 == instr->RsFieldRaw())) { ++ if (format[1] == 's') { // 'fs: fs register. ++ int reg = instr->FsValue(); ++ PrintFPUStatusRegister(reg); ++ return 2; ++ } else if (format[1] == 't') { // 'ft: ft register. ++ int reg = instr->FtValue(); ++ PrintFPUStatusRegister(reg); ++ return 2; ++ } else if (format[1] == 'd') { // 'fd: fd register. ++ int reg = instr->FdValue(); ++ PrintFPUStatusRegister(reg); ++ return 2; ++ } else if (format[1] == 'r') { // 'fr: fr register. ++ int reg = instr->FrValue(); ++ PrintFPUStatusRegister(reg); ++ return 2; ++ } ++ } else { ++ if (format[1] == 's') { // 'fs: fs register. ++ int reg = instr->FsValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } else if (format[1] == 't') { // 'ft: ft register. ++ int reg = instr->FtValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } else if (format[1] == 'd') { // 'fd: fd register. ++ int reg = instr->FdValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } else if (format[1] == 'r') { // 'fr: fr register. ++ int reg = instr->FrValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } ++ } ++ UNREACHABLE(); ++} ++ ++// Handle all MSARegister based formatting in this function to reduce the ++// complexity of FormatOption. ++int Decoder::FormatMSARegister(Instruction* instr, const char* format) { ++ DCHECK_EQ(format[0], 'w'); ++ if (format[1] == 's') { ++ int reg = instr->WsValue(); ++ PrintMSARegister(reg); ++ return 2; ++ } else if (format[1] == 't') { ++ int reg = instr->WtValue(); ++ PrintMSARegister(reg); ++ return 2; ++ } else if (format[1] == 'd') { ++ int reg = instr->WdValue(); ++ PrintMSARegister(reg); ++ return 2; ++ } ++ ++ UNREACHABLE(); ++} ++ ++// FormatOption takes a formatting string and interprets it based on ++// the current instructions. The format string points to the first ++// character of the option string (the option escape has already been ++// consumed by the caller.) FormatOption returns the number of ++// characters that were consumed from the formatting string. ++#ifdef SW64//jzy 20150213:TODO ++int Decoder::FormatOption(Instruction* instr, const char* format) { ++ switch (format[0]) { ++ case 'r': { ++ switch (format[1]) { ++ case 'a' : ++ SwPrintRa(instr); ++ break; ++ case 'b': ++ SwPrintRb(instr); ++ break; ++ case 'c': ++ return SwPrintRc(instr, format); ++ case 'd': ++ SwPrintRd(instr); ++ break; ++ } ++ return 2; ++ } ++ ++ case 'f': { ++ switch (format[1]) { ++ case 'a' : ++ SwPrintFa(instr); ++ break; ++ case 'b': ++ SwPrintFb(instr); ++ break; ++ case 'c': ++ return SwPrintFc(instr, format); ++ case 'd': ++ SwPrintFd(instr); ++ break; ++ } ++ return 2; ++ } ++ ++ case 'i': ++ return SwPrintImm(instr, format); ++ case 'd': ++ return SwPrintDisp(instr, format); ++ case 't'://ld 20150323 ++ return SwPrintDispTransfer(instr,format); ++ //modified by cjq 20150318 ++ case '0': ++ return SwPrintHex(instr, format); ++ ++ } ++ UNREACHABLE(); ++ return -1; ++} ++#else ++int Decoder::FormatOption(Instruction* instr, const char* format) { ++ switch (format[0]) { ++ case 'c': { // 'code for break or trap instructions. ++ DCHECK(STRING_STARTS_WITH(format, "code")); ++ PrintCode(instr); ++ return 4; ++ } ++ case 'i': { // 'imm16u or 'imm26. ++ if (format[3] == '1') { ++ if (format[4] == '6') { ++ DCHECK(STRING_STARTS_WITH(format, "imm16")); ++ switch (format[5]) { ++ case 's': ++ DCHECK(STRING_STARTS_WITH(format, "imm16s")); ++ PrintSImm16(instr); ++ break; ++ case 'u': ++ DCHECK(STRING_STARTS_WITH(format, "imm16u")); ++ PrintSImm16(instr); ++ break; ++ case 'x': ++ DCHECK(STRING_STARTS_WITH(format, "imm16x")); ++ PrintXImm16(instr); ++ break; ++ case 'p': { // The PC relative address. ++ DCHECK(STRING_STARTS_WITH(format, "imm16p")); ++ int delta_pc = 0; ++ int n_bits = 0; ++ switch (format[6]) { ++ case '4': { ++ DCHECK(STRING_STARTS_WITH(format, "imm16p4")); ++ delta_pc = 4; ++ switch (format[8]) { ++ case '2': ++ DCHECK(STRING_STARTS_WITH(format, "imm16p4s2")); ++ n_bits = 2; ++ PrintPCImm16(instr, delta_pc, n_bits); ++ return 9; ++ } ++ } ++ } ++ } ++ } ++ return 6; ++ } else if (format[4] == '8') { ++ DCHECK(STRING_STARTS_WITH(format, "imm18")); ++ switch (format[5]) { ++ case 's': ++ DCHECK(STRING_STARTS_WITH(format, "imm18s")); ++ PrintSImm18(instr); ++ break; ++ case 'x': ++ DCHECK(STRING_STARTS_WITH(format, "imm18x")); ++ PrintXImm18(instr); ++ break; ++ } ++ return 6; ++ } else if (format[4] == '9') { ++ DCHECK(STRING_STARTS_WITH(format, "imm19")); ++ switch (format[5]) { ++ case 's': ++ DCHECK(STRING_STARTS_WITH(format, "imm19s")); ++ PrintSImm19(instr); ++ break; ++ case 'x': ++ DCHECK(STRING_STARTS_WITH(format, "imm19x")); ++ PrintXImm19(instr); ++ break; ++ } ++ return 6; ++ } else if (format[4] == '0' && format[5] == 's') { ++ DCHECK(STRING_STARTS_WITH(format, "imm10s")); ++ if (format[6] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "imm10s1")); ++ PrintMsaSImm10(instr, false); ++ } else if (format[6] == '2') { ++ DCHECK(STRING_STARTS_WITH(format, "imm10s2")); ++ PrintMsaSImm10(instr, true); ++ } ++ return 7; ++ } ++ } else if (format[3] == '2' && format[4] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "imm21")); ++ switch (format[5]) { ++ case 's': ++ DCHECK(STRING_STARTS_WITH(format, "imm21s")); ++ PrintSImm21(instr); ++ break; ++ case 'x': ++ DCHECK(STRING_STARTS_WITH(format, "imm21x")); ++ PrintXImm21(instr); ++ break; ++ case 'p': { // The PC relative address. ++ DCHECK(STRING_STARTS_WITH(format, "imm21p")); ++ int delta_pc = 0; ++ int n_bits = 0; ++ switch (format[6]) { ++ case '4': { ++ DCHECK(STRING_STARTS_WITH(format, "imm21p4")); ++ delta_pc = 4; ++ switch (format[8]) { ++ case '2': ++ DCHECK(STRING_STARTS_WITH(format, "imm21p4s2")); ++ n_bits = 2; ++ PrintPCImm21(instr, delta_pc, n_bits); ++ return 9; ++ } ++ } ++ } ++ } ++ } ++ return 6; ++ } else if (format[3] == '2' && format[4] == '6') { ++ DCHECK(STRING_STARTS_WITH(format, "imm26")); ++ switch (format[5]) { ++ case 's': ++ DCHECK(STRING_STARTS_WITH(format, "imm26s")); ++ PrintSImm26(instr); ++ break; ++ case 'x': ++ DCHECK(STRING_STARTS_WITH(format, "imm26x")); ++ PrintXImm26(instr); ++ break; ++ case 'p': { // The PC relative address. ++ DCHECK(STRING_STARTS_WITH(format, "imm26p")); ++ int delta_pc = 0; ++ int n_bits = 0; ++ switch (format[6]) { ++ case '4': { ++ DCHECK(STRING_STARTS_WITH(format, "imm26p4")); ++ delta_pc = 4; ++ switch (format[8]) { ++ case '2': ++ DCHECK(STRING_STARTS_WITH(format, "imm26p4s2")); ++ n_bits = 2; ++ PrintPCImm26(instr, delta_pc, n_bits); ++ return 9; ++ } ++ } ++ } ++ } ++ case 'j': { // Absolute address for jump instructions. ++ DCHECK(STRING_STARTS_WITH(format, "imm26j")); ++ PrintPCImm26(instr); ++ break; ++ } ++ } ++ return 6; ++ } else if (format[3] == '5') { ++ DCHECK(STRING_STARTS_WITH(format, "imm5")); ++ if (format[4] == 'u') { ++ DCHECK(STRING_STARTS_WITH(format, "imm5u")); ++ PrintMsaImm5(instr); ++ } else if (format[4] == 's') { ++ DCHECK(STRING_STARTS_WITH(format, "imm5s")); ++ PrintMsaSImm5(instr); ++ } ++ return 5; ++ } else if (format[3] == '8') { ++ DCHECK(STRING_STARTS_WITH(format, "imm8")); ++ PrintMsaImm8(instr); ++ return 4; ++ } else if (format[3] == '9') { ++ DCHECK(STRING_STARTS_WITH(format, "imm9")); ++ if (format[4] == 'u') { ++ DCHECK(STRING_STARTS_WITH(format, "imm9u")); ++ PrintUImm9(instr); ++ } else if (format[4] == 's') { ++ DCHECK(STRING_STARTS_WITH(format, "imm9s")); ++ PrintSImm9(instr); ++ } ++ return 5; ++ } else if (format[3] == 'b') { ++ DCHECK(STRING_STARTS_WITH(format, "immb")); ++ PrintMsaImmBit(instr); ++ return 4; ++ } else if (format[3] == 'e') { ++ DCHECK(STRING_STARTS_WITH(format, "imme")); ++ PrintMsaImmElm(instr); ++ return 4; ++ } ++ UNREACHABLE(); ++ } ++ case 'r': { // 'r: registers. ++ return FormatRegister(instr, format); ++ } ++ case 'f': { // 'f: FPUregisters. ++ return FormatFPURegister(instr, format); ++ } ++ case 'w': { // 'w: MSA Register ++ return FormatMSARegister(instr, format); ++ } ++ case 's': { // 'sa. ++ switch (format[1]) { ++ case 'a': ++ if (format[2] == '2') { ++ DCHECK(STRING_STARTS_WITH(format, "sa2")); // 'sa2 ++ PrintLsaSa(instr); ++ return 3; ++ } else { ++ DCHECK(STRING_STARTS_WITH(format, "sa")); ++ PrintSa(instr); ++ return 2; ++ } ++ break; ++ case 'd': { ++ DCHECK(STRING_STARTS_WITH(format, "sd")); ++ PrintSd(instr); ++ return 2; ++ } ++ case 's': { ++ if (format[2] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "ss1")); // ext, dext, dextu size ++ PrintSs1(instr); ++ } else if (format[2] == '2') { ++ DCHECK(STRING_STARTS_WITH(format, "ss2")); // ins, dins, dinsu size ++ PrintSs2(instr); ++ } else if (format[2] == '3') { ++ DCHECK(STRING_STARTS_WITH(format, "ss3")); // dextm size ++ PrintSs3(instr); ++ } else if (format[2] == '4') { ++ DCHECK(STRING_STARTS_WITH(format, "ss4")); // dinsm size ++ PrintSs4(instr); ++ } else { ++ DCHECK(STRING_STARTS_WITH(format, "ss5")); // dextu, dinsu pos ++ PrintSs5(instr); ++ } ++ return 3; ++ } ++ } ++ } ++ case 'b': { ++ switch (format[1]) { ++ case 'c': { // 'bc - Special for bc1 cc field. ++ DCHECK(STRING_STARTS_WITH(format, "bc")); ++ PrintBc(instr); ++ return 2; ++ } ++ case 'p': { ++ switch (format[2]) { ++ case '2': { // 'bp2 ++ DCHECK(STRING_STARTS_WITH(format, "bp2")); ++ PrintBp2(instr); ++ return 3; ++ } ++ case '3': { // 'bp3 ++ DCHECK(STRING_STARTS_WITH(format, "bp3")); ++ PrintBp3(instr); ++ return 3; ++ } ++ } ++ } ++ } ++ } ++ case 'C': { // 'Cc - Special for c.xx.d cc field. ++ DCHECK(STRING_STARTS_WITH(format, "Cc")); ++ PrintCc(instr); ++ return 2; ++ } ++ case 't': ++ if (instr->IsMSAInstr()) { ++ PrintMsaDataFormat(instr); ++ } else { ++ PrintFormat(instr); ++ } ++ return 1; ++ } ++ UNREACHABLE(); ++} ++#endif ++ ++ ++// Format takes a formatting string for a whole instruction and prints it into ++// the output buffer. All escaped options are handed to FormatOption to be ++// parsed further. ++void Decoder::Format(Instruction* instr, const char* format) { ++ char cur = *format++; ++ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) { ++ if (cur == '\'') { // Single quote is used as the formatting escape. ++ format += FormatOption(instr, format); ++ } else { ++ out_buffer_[out_buffer_pos_++] = cur; ++ } ++ cur = *format++; ++ } ++ out_buffer_[out_buffer_pos_] = '\0'; ++} ++ ++ ++// For currently unimplemented decodings the disassembler calls Unknown(instr) ++// which will just print "unknown" of the instruction bits. ++void Decoder::Unknown(Instruction* instr) { ++ Format(instr, "unknown"); ++} ++ ++ ++int Decoder::DecodeBreakInstr(Instruction* instr) { ++ // This is already known to be BREAK instr, just extract the code. ++ if (instr->Bits(25, 6) == static_cast(kMaxStopCode)) { ++ // This is stop(msg). ++ Format(instr, "break, code: 'code"); ++ out_buffer_pos_ += SNPrintF( ++ out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64, ++ static_cast(reinterpret_cast(instr + kInstrSize)), ++ reinterpret_cast( ++ *reinterpret_cast(instr + kInstrSize))); ++ // Size 3: the break_ instr, plus embedded 64-bit char pointer. ++ return 3 * kInstrSize; ++ } else { ++ Format(instr, "break, code: 'code"); ++ return kInstrSize; ++ } ++} ++ ++ ++bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) { ++ switch (instr->FunctionFieldRaw()) { ++ case RINT: ++ Format(instr, "rint.'t 'fd, 'fs"); ++ break; ++// case SEL: ++// Format(instr, "sel.'t 'fd, 'fs, 'ft"); ++// break; ++// case SELEQZ_C: ++// Format(instr, "seleqz.'t 'fd, 'fs, 'ft"); ++// break; ++// case SELNEZ_C: ++// Format(instr, "selnez.'t 'fd, 'fs, 'ft"); ++// break; ++// case MOVZ_C: ++// Format(instr, "movz.'t 'fd, 'fs, 'rt"); ++// break; ++// case MOVN_C: ++// Format(instr, "movn.'t 'fd, 'fs, 'rt"); ++// break; ++// case MOVF: ++// if (instr->Bit(16)) { ++// Format(instr, "movt.'t 'fd, 'fs, 'Cc"); ++// } else { ++// Format(instr, "movf.'t 'fd, 'fs, 'Cc"); ++// } ++// break; ++ case MIN: ++ Format(instr, "min.'t 'fd, 'fs, 'ft"); ++ break; ++ case MAX: ++ Format(instr, "max.'t 'fd, 'fs, 'ft"); ++ break; ++// case MINA: ++// Format(instr, "mina.'t 'fd, 'fs, 'ft"); ++// break; ++// case MAXA: ++// Format(instr, "maxa.'t 'fd, 'fs, 'ft"); ++// break; ++ case ADD_D: ++ Format(instr, "add.'t 'fd, 'fs, 'ft"); ++ break; ++ case SUB_D: ++ Format(instr, "sub.'t 'fd, 'fs, 'ft"); ++ break; ++ case MUL_D: ++ Format(instr, "mul.'t 'fd, 'fs, 'ft"); ++ break; ++ case DIV_D: ++ Format(instr, "div.'t 'fd, 'fs, 'ft"); ++ break; ++ case ABS_D: ++ Format(instr, "abs.'t 'fd, 'fs"); ++ break; ++ case MOV_D: ++ Format(instr, "mov.'t 'fd, 'fs"); ++ break; ++ case NEG_D: ++ Format(instr, "neg.'t 'fd, 'fs"); ++ break; ++ case SQRT_D: ++ Format(instr, "sqrt.'t 'fd, 'fs"); ++ break; ++ case RECIP_D: ++ Format(instr, "recip.'t 'fd, 'fs"); ++ break; ++ case RSQRT_D: ++ Format(instr, "rsqrt.'t 'fd, 'fs"); ++ break; ++ case CVT_W_D: ++ Format(instr, "cvt.w.'t 'fd, 'fs"); ++ break; ++ case CVT_L_D: ++ Format(instr, "cvt.l.'t 'fd, 'fs"); ++ break; ++ case TRUNC_W_D: ++ Format(instr, "trunc.w.'t 'fd, 'fs"); ++ break; ++ case TRUNC_L_D: ++ Format(instr, "trunc.l.'t 'fd, 'fs"); ++ break; ++ case ROUND_W_D: ++ Format(instr, "round.w.'t 'fd, 'fs"); ++ break; ++ case ROUND_L_D: ++ Format(instr, "round.l.'t 'fd, 'fs"); ++ break; ++ case FLOOR_W_D: ++ Format(instr, "floor.w.'t 'fd, 'fs"); ++ break; ++ case FLOOR_L_D: ++ Format(instr, "floor.l.'t 'fd, 'fs"); ++ break; ++ case CEIL_W_D: ++ Format(instr, "ceil.w.'t 'fd, 'fs"); ++ break; ++ case CEIL_L_D: ++ Format(instr, "ceil.l.'t 'fd, 'fs"); ++ break; ++ case CLASS_D: ++ Format(instr, "class.'t 'fd, 'fs"); ++ break; ++ case CVT_S_D: ++ Format(instr, "cvt.s.'t 'fd, 'fs"); ++ break; ++ case C_F_D: ++ Format(instr, "c.f.'t 'fs, 'ft, 'Cc"); ++ break; ++ case C_UN_D: ++ Format(instr, "c.un.'t 'fs, 'ft, 'Cc"); ++ break; ++ case C_EQ_D: ++ Format(instr, "c.eq.'t 'fs, 'ft, 'Cc"); ++ break; ++ case C_UEQ_D: ++ Format(instr, "c.ueq.'t 'fs, 'ft, 'Cc"); ++ break; ++ case C_OLT_D: ++ Format(instr, "c.olt.'t 'fs, 'ft, 'Cc"); ++ break; ++ case C_ULT_D: ++ Format(instr, "c.ult.'t 'fs, 'ft, 'Cc"); ++ break; ++ case C_OLE_D: ++ Format(instr, "c.ole.'t 'fs, 'ft, 'Cc"); ++ break; ++ case C_ULE_D: ++ Format(instr, "c.ule.'t 'fs, 'ft, 'Cc"); ++ break; ++ default: ++ return false; ++ } ++ return true; ++} ++ ++ ++void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) { ++ if (!DecodeTypeRegisterRsType(instr)) { ++ switch (instr->FunctionFieldRaw()) { ++ case CVT_D_S: ++ Format(instr, "cvt.d.'t 'fd, 'fs"); ++ break; ++ case MADDF_S: ++ Format(instr, "maddf.s 'fd, 'fs, 'ft"); ++ break; ++ case MSUBF_S: ++ Format(instr, "msubf.s 'fd, 'fs, 'ft"); ++ break; ++ default: ++ Format(instr, "unknown.cop1.'t"); ++ break; ++ } ++ } ++} ++ ++ ++void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) { ++ if (!DecodeTypeRegisterRsType(instr)) { ++ switch (instr->FunctionFieldRaw()) { ++ case MADDF_D: ++ Format(instr, "maddf.d 'fd, 'fs, 'ft"); ++ break; ++ case MSUBF_D: ++ Format(instr, "msubf.d 'fd, 'fs, 'ft"); ++ break; ++ default: ++ Format(instr, "unknown.cop1.'t"); ++ break; ++ } ++ } ++} ++ ++ ++void Decoder::DecodeTypeRegisterLRsType(Instruction* instr) { ++ switch (instr->FunctionFieldRaw()) { ++ case CVT_D_L: ++ Format(instr, "cvt.d.l 'fd, 'fs"); ++ break; ++ case CVT_S_L: ++ Format(instr, "cvt.s.l 'fd, 'fs"); ++ break; ++ case CMP_AF: ++ Format(instr, "cmp.af.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_UN: ++ Format(instr, "cmp.un.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_EQ: ++ Format(instr, "cmp.eq.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_UEQ: ++ Format(instr, "cmp.ueq.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_LT: ++ Format(instr, "cmp.lt.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_ULT: ++ Format(instr, "cmp.ult.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_LE: ++ Format(instr, "cmp.le.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_ULE: ++ Format(instr, "cmp.ule.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_OR: ++ Format(instr, "cmp.or.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_UNE: ++ Format(instr, "cmp.une.d 'fd, 'fs, 'ft"); ++ break; ++ case CMP_NE: ++ Format(instr, "cmp.ne.d 'fd, 'fs, 'ft"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Decoder::DecodeTypeRegisterWRsType(Instruction* instr) { ++ switch (instr->FunctionValue()) { ++ case CVT_S_W: // Convert word to float (single). ++ Format(instr, "cvt.s.w 'fd, 'fs"); ++ break; ++ case CVT_D_W: // Convert word to double. ++ Format(instr, "cvt.d.w 'fd, 'fs"); ++ break; ++ case CMP_AF: ++ Format(instr, "cmp.af.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_UN: ++ Format(instr, "cmp.un.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_EQ: ++ Format(instr, "cmp.eq.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_UEQ: ++ Format(instr, "cmp.ueq.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_LT: ++ Format(instr, "cmp.lt.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_ULT: ++ Format(instr, "cmp.ult.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_LE: ++ Format(instr, "cmp.le.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_ULE: ++ Format(instr, "cmp.ule.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_OR: ++ Format(instr, "cmp.or.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_UNE: ++ Format(instr, "cmp.une.s 'fd, 'fs, 'ft"); ++ break; ++ case CMP_NE: ++ Format(instr, "cmp.ne.s 'fd, 'fs, 'ft"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Decoder::DecodeTypeRegisterCOP1(Instruction* instr) { ++ switch (instr->RsFieldRaw()) { ++// case MFC1: ++// Format(instr, "mfc1 'rt, 'fs"); ++// break; ++// case DMFC1: ++// Format(instr, "dmfc1 'rt, 'fs"); ++// break; ++// case MFHC1: ++// Format(instr, "mfhc1 'rt, 'fs"); ++// break; ++// case MTC1: ++// Format(instr, "mtc1 'rt, 'fs"); ++// break; ++// case DMTC1: ++// Format(instr, "dmtc1 'rt, 'fs"); ++// break; ++ // These are called "fs" too, although they are not FPU registers. ++ case CTC1: ++ Format(instr, "ctc1 'rt, 'fs"); ++ break; ++ case CFC1: ++ Format(instr, "cfc1 'rt, 'fs"); ++ break; ++// case MTHC1: ++// Format(instr, "mthc1 'rt, 'fs"); ++// break; ++ case S: ++ DecodeTypeRegisterSRsType(instr); ++ break; ++ case D: ++ DecodeTypeRegisterDRsType(instr); ++ break; ++ case W: ++ DecodeTypeRegisterWRsType(instr); ++ break; ++ case L: ++ DecodeTypeRegisterLRsType(instr); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Decoder::DecodeTypeRegisterCOP1X(Instruction* instr) { ++ switch (instr->FunctionFieldRaw()) { ++ case MADD_S: ++ Format(instr, "madd.s 'fd, 'fr, 'fs, 'ft"); ++ break; ++ case MADD_D: ++ Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft"); ++ break; ++ case MSUB_S: ++ Format(instr, "msub.s 'fd, 'fr, 'fs, 'ft"); ++ break; ++ case MSUB_D: ++ Format(instr, "msub.d 'fd, 'fr, 'fs, 'ft"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) { ++ switch (instr->FunctionFieldRaw()) { ++#if 0 ++ case DIV: // @Sw64r3 == DIV_MOD. ++ if (kArchVariant != kSw64r3) { ++ Format(instr, "div 'rs, 'rt"); ++ } else { ++ if (instr->SaValue() == DIV_OP) { ++ Format(instr, "div 'rd, 'rs, 'rt"); ++ } else { ++ Format(instr, "mod 'rd, 'rs, 'rt"); ++ } ++ } ++ break; ++ case DDIV: // @Sw64r3 == D_DIV_MOD. ++ if (kArchVariant != kSw64r3) { ++ Format(instr, "ddiv 'rs, 'rt"); ++ } else { ++ if (instr->SaValue() == DIV_OP) { ++ Format(instr, "ddiv 'rd, 'rs, 'rt"); ++ } else { ++ Format(instr, "dmod 'rd, 'rs, 'rt"); ++ } ++ } ++ break; ++ case DIVU: // @Sw64r3 == DIV_MOD_U. ++ if (kArchVariant != kSw64r3) { ++ Format(instr, "divu 'rs, 'rt"); ++ } else { ++ if (instr->SaValue() == DIV_OP) { ++ Format(instr, "divu 'rd, 'rs, 'rt"); ++ } else { ++ Format(instr, "modu 'rd, 'rs, 'rt"); ++ } ++ } ++ break; ++ case DDIVU: // @Sw64r3 == D_DIV_MOD_U. ++ if (kArchVariant != kSw64r3) { ++ Format(instr, "ddivu 'rs, 'rt"); ++ } else { ++ if (instr->SaValue() == DIV_OP) { ++ Format(instr, "ddivu 'rd, 'rs, 'rt"); ++ } else { ++ Format(instr, "dmodu 'rd, 'rs, 'rt"); ++ } ++ } ++ break; ++#endif ++ case ADD: ++ Format(instr, "add 'rd, 'rs, 'rt"); ++ break; ++ case DADD: ++ Format(instr, "dadd 'rd, 'rs, 'rt"); ++ break; ++ case SUB: ++ Format(instr, "sub 'rd, 'rs, 'rt"); ++ break; ++ case DSUB: ++ Format(instr, "dsub 'rd, 'rs, 'rt"); ++ break; ++ case AND: ++ Format(instr, "and 'rd, 'rs, 'rt"); ++ break; ++ case OR: ++ if (0 == instr->RsValue()) { ++ Format(instr, "mov 'rd, 'rt"); ++ } else if (0 == instr->RtValue()) { ++ Format(instr, "mov 'rd, 'rs"); ++ } else { ++ Format(instr, "or 'rd, 'rs, 'rt"); ++ } ++ break; ++ case XOR: ++ Format(instr, "xor 'rd, 'rs, 'rt"); ++ break; ++ case NOR: ++ Format(instr, "nor 'rd, 'rs, 'rt"); ++ break; ++ case MOVCI: ++ if (instr->Bit(16)) { ++ Format(instr, "movt 'rd, 'rs, 'bc"); ++ } else { ++ Format(instr, "movf 'rd, 'rs, 'bc"); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) { ++ switch (instr->FunctionFieldRaw()) { ++ case MUL: ++ Format(instr, "mul 'rd, 'rs, 'rt"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) { ++ switch (instr->FunctionFieldRaw()) { ++#if 0 ++ case EXT: { ++ Format(instr, "ext 'rt, 'rs, 'sa, 'ss1"); ++ break; ++ } ++ case DEXT: { ++ Format(instr, "dext 'rt, 'rs, 'sa, 'ss1"); ++ break; ++ } ++ case DEXTM: { ++ Format(instr, "dextm 'rt, 'rs, 'sa, 'ss3"); ++ break; ++ } ++ case DEXTU: { ++ Format(instr, "dextu 'rt, 'rs, 'ss5, 'ss1"); ++ break; ++ } ++ case INS: { ++ Format(instr, "ins 'rt, 'rs, 'sa, 'ss2"); ++ break; ++ } ++ case DINS: { ++ Format(instr, "dins 'rt, 'rs, 'sa, 'ss2"); ++ break; ++ } ++ case DINSM: { ++ Format(instr, "dinsm 'rt, 'rs, 'sa, 'ss4"); ++ break; ++ } ++ case DINSU: { ++ Format(instr, "dinsu 'rt, 'rs, 'ss5, 'ss2"); ++ break; ++ } ++#endif ++ case BSHFL: { ++ int sa = instr->SaFieldRaw() >> kSaShift; ++ switch (sa) { ++ case BITSWAP: { ++ Format(instr, "bitswap 'rd, 'rt"); ++ break; ++ } ++ default: { ++ sa >>= kBp2Bits; ++ switch (sa) { ++ case ALIGN: { ++ Format(instr, "align 'rd, 'rs, 'rt, 'bp2"); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ break; ++ } ++ } ++ break; ++ } ++ case DBSHFL: { ++ int sa = instr->SaFieldRaw() >> kSaShift; ++ switch (sa) { ++ case DBITSWAP: { ++ switch (instr->SaFieldRaw() >> kSaShift) { ++ case DBITSWAP_SA: ++ Format(instr, "dbitswap 'rd, 'rt"); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ break; ++ } ++ default: { ++ sa >>= kBp3Bits; ++ switch (sa) { ++ case DALIGN: { ++ Format(instr, "dalign 'rd, 'rs, 'rt, 'bp3"); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ break; ++ } ++ } ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++int Decoder::DecodeTypeRegister(Instruction* instr) { ++ switch (instr->OpcodeFieldRaw()) { ++ case COP1: // Coprocessor instructions. ++ DecodeTypeRegisterCOP1(instr); ++ break; ++ case COP1X: ++ DecodeTypeRegisterCOP1X(instr); ++ break; ++ case SPECIAL: ++ switch (instr->FunctionFieldRaw()) { ++ case BREAK: ++ return DecodeBreakInstr(instr); ++ default: ++ DecodeTypeRegisterSPECIAL(instr); ++ break; ++ } ++ break; ++ case SPECIAL2: ++ DecodeTypeRegisterSPECIAL2(instr); ++ break; ++ case SPECIAL3: ++ DecodeTypeRegisterSPECIAL3(instr); ++ break; ++ case MSA: ++ switch (instr->MSAMinorOpcodeField()) { ++ case kMsaMinor3R: ++ DecodeTypeMsa3R(instr); ++ break; ++ case kMsaMinor3RF: ++ DecodeTypeMsa3RF(instr); ++ break; ++ case kMsaMinorVEC: ++ DecodeTypeMsaVec(instr); ++ break; ++ case kMsaMinor2R: ++ DecodeTypeMsa2R(instr); ++ break; ++ case kMsaMinor2RF: ++ DecodeTypeMsa2RF(instr); ++ break; ++ case kMsaMinorELM: ++ DecodeTypeMsaELM(instr); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ return kInstrSize; ++} ++ ++ ++void Decoder::DecodeTypeImmediateCOP1(Instruction* instr) { ++ switch (instr->RsFieldRaw()) { ++ case BC1: ++ if (instr->FBtrueValue()) { ++ Format(instr, "bc1t 'bc, 'imm16u -> 'imm16p4s2"); ++ } else { ++ Format(instr, "bc1f 'bc, 'imm16u -> 'imm16p4s2"); ++ } ++ break; ++ case BC1EQZ: ++ Format(instr, "bc1eqz 'ft, 'imm16u -> 'imm16p4s2"); ++ break; ++ case BC1NEZ: ++ Format(instr, "bc1nez 'ft, 'imm16u -> 'imm16p4s2"); ++ break; ++ case BZ_V: ++ case BZ_B: ++ case BZ_H: ++ case BZ_W: ++ case BZ_D: ++ Format(instr, "bz.'t 'wt, 'imm16s -> 'imm16p4s2"); ++ break; ++ case BNZ_V: ++ case BNZ_B: ++ case BNZ_H: ++ case BNZ_W: ++ case BNZ_D: ++ Format(instr, "bnz.'t 'wt, 'imm16s -> 'imm16p4s2"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Decoder::DecodeTypeImmediateREGIMM(Instruction* instr) { ++ switch (instr->RtFieldRaw()) { ++ case BLTZ: ++ Format(instr, "bltz 'rs, 'imm16u -> 'imm16p4s2"); ++ break; ++ case BLTZAL: ++ Format(instr, "bltzal 'rs, 'imm16u -> 'imm16p4s2"); ++ break; ++ case BGEZ: ++ Format(instr, "bgez 'rs, 'imm16u -> 'imm16p4s2"); ++ break; ++ case BGEZAL: { ++ if (instr->RsValue() == 0) ++ Format(instr, "bal 'imm16s -> 'imm16p4s2"); ++ else ++ Format(instr, "bgezal 'rs, 'imm16u -> 'imm16p4s2"); ++ break; ++ } ++ case BGEZALL: ++ Format(instr, "bgezall 'rs, 'imm16u -> 'imm16p4s2"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeImmediateSPECIAL3(Instruction* instr) { ++ switch (instr->FunctionFieldRaw()) { ++ case LL_R6: { ++ if (kArchVariant == kSw64r3) { ++ Format(instr, "ll 'rt, 'imm9s('rs)"); ++ } else { ++ Unknown(instr); ++ } ++ break; ++ } ++ case LLD_R6: { ++ if (kArchVariant == kSw64r3) { ++ Format(instr, "lld 'rt, 'imm9s('rs)"); ++ } else { ++ Unknown(instr); ++ } ++ break; ++ } ++ case SC_R6: { ++ if (kArchVariant == kSw64r3) { ++ Format(instr, "sc 'rt, 'imm9s('rs)"); ++ } else { ++ Unknown(instr); ++ } ++ break; ++ } ++ case SCD_R6: { ++ if (kArchVariant == kSw64r3) { ++ Format(instr, "scd 'rt, 'imm9s('rs)"); ++ } else { ++ Unknown(instr); ++ } ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeImmediate(Instruction* instr) { ++ switch (instr->OpcodeFieldRaw()) { ++ case COP1: ++ DecodeTypeImmediateCOP1(instr); ++ break; // Case COP1. ++ // ------------- REGIMM class. ++ case REGIMM: ++ DecodeTypeImmediateREGIMM(instr); ++ break; // Case REGIMM. ++ // ------------- Branch instructions. ++ case BEQ: ++ Format(instr, "beq 'rs, 'rt, 'imm16u -> 'imm16p4s2"); ++ break; ++ case BC: ++ Format(instr, "bc 'imm26s -> 'imm26p4s2"); ++ break; ++ case BALC: ++ Format(instr, "balc 'imm26s -> 'imm26p4s2"); ++ break; ++ case BNE: ++ Format(instr, "bne 'rs, 'rt, 'imm16u -> 'imm16p4s2"); ++ break; ++ case BLEZ: ++ if ((instr->RtValue() == 0) && (instr->RsValue() != 0)) { ++ Format(instr, "blez 'rs, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RtValue() != instr->RsValue()) && ++ (instr->RsValue() != 0) && (instr->RtValue() != 0)) { ++ Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RtValue() == instr->RsValue()) && ++ (instr->RtValue() != 0)) { ++ Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) { ++ Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2"); ++ } else { ++ UNREACHABLE(); ++ } ++ break; ++ case BGTZ: ++ if ((instr->RtValue() == 0) && (instr->RsValue() != 0)) { ++ Format(instr, "bgtz 'rs, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RtValue() != instr->RsValue()) && ++ (instr->RsValue() != 0) && (instr->RtValue() != 0)) { ++ Format(instr, "bltuc 'rs, 'rt, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RtValue() == instr->RsValue()) && ++ (instr->RtValue() != 0)) { ++ Format(instr, "bltzalc 'rt, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) { ++ Format(instr, "bgtzalc 'rt, 'imm16u -> 'imm16p4s2"); ++ } else { ++ UNREACHABLE(); ++ } ++ break; ++ case BLEZL: ++ if ((instr->RtValue() == instr->RsValue()) && (instr->RtValue() != 0)) { ++ Format(instr, "bgezc 'rt, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RtValue() != instr->RsValue()) && ++ (instr->RsValue() != 0) && (instr->RtValue() != 0)) { ++ Format(instr, "bgec 'rs, 'rt, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) { ++ Format(instr, "blezc 'rt, 'imm16u -> 'imm16p4s2"); ++ } else { ++ UNREACHABLE(); ++ } ++ break; ++ case BGTZL: ++ if ((instr->RtValue() == instr->RsValue()) && (instr->RtValue() != 0)) { ++ Format(instr, "bltzc 'rt, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RtValue() != instr->RsValue()) && ++ (instr->RsValue() != 0) && (instr->RtValue() != 0)) { ++ Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2"); ++ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) { ++ Format(instr, "bgtzc 'rt, 'imm16u -> 'imm16p4s2"); ++ } else { ++ UNREACHABLE(); ++ } ++ break; ++ case POP66: ++ if (instr->RsValue() == JIC) { ++ Format(instr, "jic 'rt, 'imm16s"); ++ } else { ++ Format(instr, "beqzc 'rs, 'imm21s -> 'imm21p4s2"); ++ } ++ break; ++ case POP76: ++ if (instr->RsValue() == JIALC) { ++ Format(instr, "jialc 'rt, 'imm16s"); ++ } else { ++ Format(instr, "bnezc 'rs, 'imm21s -> 'imm21p4s2"); ++ } ++ break; ++ // ------------- Arithmetic instructions. ++ case ADDI: ++ if (kArchVariant != kSw64r3) { ++ Format(instr, "addi 'rt, 'rs, 'imm16s"); ++ } else { ++ int rs_reg = instr->RsValue(); ++ int rt_reg = instr->RtValue(); ++ // Check if BOVC, BEQZALC or BEQC instruction. ++ if (rs_reg >= rt_reg) { ++ Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2"); ++ } else { ++ DCHECK_GT(rt_reg, 0); ++ if (rs_reg == 0) { ++ Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2"); ++ } else { ++ Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2"); ++ } ++ } ++ } ++ break; ++ case DADDI: ++ if (kArchVariant != kSw64r3) { ++ Format(instr, "daddi 'rt, 'rs, 'imm16s"); ++ } else { ++ int rs_reg = instr->RsValue(); ++ int rt_reg = instr->RtValue(); ++ // Check if BNVC, BNEZALC or BNEC instruction. ++ if (rs_reg >= rt_reg) { ++ Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2"); ++ } else { ++ DCHECK_GT(rt_reg, 0); ++ if (rs_reg == 0) { ++ Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2"); ++ } else { ++ Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2"); ++ } ++ } ++ } ++ break; ++ case PCREL: { ++ int32_t imm21 = instr->Imm21Value(); ++ // rt field: 5-bits checking ++ uint8_t rt = (imm21 >> kImm16Bits); ++ switch (rt) { ++ case ALUIPC: ++ Format(instr, "aluipc 'rs, 'imm16s"); ++ break; ++ case AUIPC: ++ Format(instr, "auipc 'rs, 'imm16s"); ++ break; ++ default: { ++ // rt field: checking of the most significant 3-bits ++ rt = (imm21 >> kImm18Bits); ++ switch (rt) { ++ case LDPC: ++ Format(instr, "ldpc 'rs, 'imm18s"); ++ break; ++ default: { ++ // rt field: checking of the most significant 2-bits ++ rt = (imm21 >> kImm19Bits); ++ switch (rt) { ++ case LWUPC: ++ Format(instr, "lwupc 'rs, 'imm19s"); ++ break; ++ case LWPC: ++ Format(instr, "lwpc 'rs, 'imm19s"); ++ break; ++ case ADDIUPC: ++ Format(instr, "addiupc 'rs, 'imm19s"); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ break; ++ } ++ } ++ break; ++ } ++ } ++ break; ++ } ++ case SPECIAL3: ++ DecodeTypeImmediateSPECIAL3(instr); ++ break; ++ case MSA: ++ switch (instr->MSAMinorOpcodeField()) { ++ case kMsaMinorI8: ++ DecodeTypeMsaI8(instr); ++ break; ++ case kMsaMinorI5: ++ DecodeTypeMsaI5(instr); ++ break; ++ case kMsaMinorI10: ++ DecodeTypeMsaI10(instr); ++ break; ++ case kMsaMinorELM: ++ DecodeTypeMsaELM(instr); ++ break; ++ case kMsaMinorBIT: ++ DecodeTypeMsaBIT(instr); ++ break; ++ case kMsaMinorMI10: ++ DecodeTypeMsaMI10(instr); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ break; ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Decoder::DecodeTypeJump(Instruction* instr) { ++ switch (instr->OpcodeFieldRaw()) { ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsaI8(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsaI8Mask; ++ ++ switch (opcode) { ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsaI5(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsaI5Mask; ++ ++ switch (opcode) { ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsaI10(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsaI5Mask; ++ if (opcode == LDI) { ++ Format(instr, "ldi.'t 'wd, 'imm10s1"); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsaELM(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsaELMMask; ++ switch (opcode) { ++ case SLDI: ++ if (instr->Bits(21, 16) == 0x3E) { ++ Format(instr, "ctcmsa "); ++ PrintMSAControlRegister(instr->WdValue()); ++ Print(", "); ++ PrintRegister(instr->WsValue()); ++ } else { ++ Format(instr, "sldi.'t 'wd, 'ws['imme]"); ++ } ++ break; ++ case SPLATI: ++ if (instr->Bits(21, 16) == 0x3E) { ++ Format(instr, "cfcmsa "); ++ PrintRegister(instr->WdValue()); ++ Print(", "); ++ PrintMSAControlRegister(instr->WsValue()); ++ } else { ++ Format(instr, "splati.'t 'wd, 'ws['imme]"); ++ } ++ break; ++ case COPY_S: ++ if (instr->Bits(21, 16) == 0x3E) { ++ Format(instr, "move.v 'wd, 'ws"); ++ } else { ++ Format(instr, "copy_s.'t "); ++ PrintMsaCopy(instr); ++ } ++ break; ++ case COPY_U: ++ Format(instr, "copy_u.'t "); ++ PrintMsaCopy(instr); ++ break; ++ case INSERT: ++ Format(instr, "insert.'t 'wd['imme], "); ++ PrintRegister(instr->WsValue()); ++ break; ++ case INSVE: ++ Format(instr, "insve.'t 'wd['imme], 'ws[0]"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsaBIT(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsaBITMask; ++ ++ switch (opcode) { ++ case SLLI: ++ Format(instr, "slli.'t 'wd, 'ws, 'immb"); ++ break; ++ case SRAI: ++ Format(instr, "srai.'t 'wd, 'ws, 'immb"); ++ break; ++ case SRLI: ++ Format(instr, "srli.'t 'wd, 'ws, 'immb"); ++ break; ++ case BCLRI: ++ Format(instr, "bclri.'t 'wd, 'ws, 'immb"); ++ break; ++ case BSETI: ++ Format(instr, "bseti.'t 'wd, 'ws, 'immb"); ++ break; ++ case BNEGI: ++ Format(instr, "bnegi.'t 'wd, 'ws, 'immb"); ++ break; ++ case BINSLI: ++ Format(instr, "binsli.'t 'wd, 'ws, 'immb"); ++ break; ++ case BINSRI: ++ Format(instr, "binsri.'t 'wd, 'ws, 'immb"); ++ break; ++ case SAT_S: ++ Format(instr, "sat_s.'t 'wd, 'ws, 'immb"); ++ break; ++ case SAT_U: ++ Format(instr, "sat_u.'t 'wd, 'ws, 'immb"); ++ break; ++ case SRARI: ++ Format(instr, "srari.'t 'wd, 'ws, 'immb"); ++ break; ++ case SRLRI: ++ Format(instr, "srlri.'t 'wd, 'ws, 'immb"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsaMI10(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsaMI10Mask; ++ if (opcode == MSA_LD) { ++ Format(instr, "ld.'t 'wd, 'imm10s2("); ++ PrintRegister(instr->WsValue()); ++ Print(")"); ++ } else if (opcode == MSA_ST) { ++ Format(instr, "st.'t 'wd, 'imm10s2("); ++ PrintRegister(instr->WsValue()); ++ Print(")"); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsa3R(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsa3RMask; ++ switch (opcode) { ++ case SLD: ++ Format(instr, "sld.'t 'wd, 'ws['rt]"); ++ break; ++ case SPLAT: ++ Format(instr, "splat.'t 'wd, 'ws['rt]"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsa3RF(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsa3RFMask; ++ switch (opcode) { ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsaVec(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsaVECMask; ++ switch (opcode) { ++ case AND_V: ++ Format(instr, "and.v 'wd, 'ws, 'wt"); ++ break; ++ case OR_V: ++ Format(instr, "or.v 'wd, 'ws, 'wt"); ++ break; ++ case NOR_V: ++ Format(instr, "nor.v 'wd, 'ws, 'wt"); ++ break; ++ case XOR_V: ++ Format(instr, "xor.v 'wd, 'ws, 'wt"); ++ break; ++ case BMNZ_V: ++ Format(instr, "bmnz.v 'wd, 'ws, 'wt"); ++ break; ++ case BMZ_V: ++ Format(instr, "bmz.v 'wd, 'ws, 'wt"); ++ break; ++ case BSEL_V: ++ Format(instr, "bsel.v 'wd, 'ws, 'wt"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsa2R(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsa2RMask; ++ switch (opcode) { ++ case FILL: { ++ Format(instr, "fill.'t 'wd, "); ++ PrintRegister(instr->WsValue()); // rs value is in ws field ++ } break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypeMsa2RF(Instruction* instr) { ++ uint32_t opcode = instr->InstructionBits() & kMsa2RFMask; ++ switch (opcode) { ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++#ifdef SW64 //jzy 20150213 ++ ++#define OP(x) (((x) & 0x3F) << 26) ++#define OPR(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) ++ ++void Decoder::SwPrintRa(Instruction* instr){ ++ int reg = instr->SwRaValue(); ++ PrintRegister(reg); ++} ++void Decoder::SwPrintRb(Instruction* instr){ ++ int reg = instr->SwRbValue(); ++ PrintRegister(reg); ++} ++ ++int Decoder::SwPrintRc(Instruction* instr, const char* format) { ++ int len, hi, lo; ++ len = SwGetInstructionRange(format, hi, lo); ++ int reg = instr->SwRcValue(hi, lo); ++ PrintRegister(reg); ++ return len; ++} ++ ++void Decoder::SwPrintRd(Instruction* instr){ ++ int reg = instr->SwRdValue(); ++ PrintRegister(reg); ++} ++void Decoder::SwPrintFa(Instruction* instr){ ++ int reg = instr->SwFaValue(); ++ PrintFPURegister(reg); ++} ++void Decoder::SwPrintFb(Instruction* instr){ ++ int reg = instr->SwFbValue(); ++ PrintFPURegister(reg); ++} ++int Decoder::SwPrintFc(Instruction* instr, const char* format){ ++ int len, hi, lo; ++ len = SwGetInstructionRange(format, hi, lo); ++ int reg = instr->SwFcValue(hi, lo); ++ PrintFPURegister(reg); ++ return len; ++} ++void Decoder::SwPrintFd(Instruction* instr) { ++ int reg = instr->SwFdValue(); ++ PrintFPURegister(reg); ++} ++ ++ ++int Decoder::SwPrintDisp(Instruction* instr, const char* format) { ++ int32_t len, hi, lo; ++ len = SwGetInstructionRange(format, hi, lo); ++ ++ int32_t imm = instr->SwImmOrDispFieldValue(hi, lo); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); ++ ++ return len; ++} ++//for transfer ld 20150320 ++int Decoder::SwPrintDispTransfer(Instruction* instr, const char* format) { ++ int32_t len, hi, lo; ++ len = SwGetInstructionRange(format, hi, lo); ++ ++ int32_t imm = instr->SwImmOrDispFieldValue(hi, lo); ++ //int64_t imm_transfer = imm*4+Instruction::kInstrSize+(*reinterpret_cast(&instr_pc_)); ++ void* imm_transfer = imm+1+reinterpret_cast(instr_pc_); //ld 20150429 ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " %p", imm_transfer); ++ ++ return len; ++} ++ ++ ++int Decoder::SwPrintImm(Instruction* instr, const char* format) { ++ int32_t len, hi, lo; ++ len = SwGetInstructionRange(format, hi, lo); ++ ++ int32_t imm = instr->SwImmOrDispFieldRaw(hi, lo) >> lo; ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); ++ ++ return len; ++} ++ ++ ++//modified by cjq ++/**change int to hex ++ * return length of format string ++ */ ++int Decoder::SwPrintHex(Instruction* instr, const char* format){ ++ int32_t len, hi, lo; ++ len = SwGetInstructionRange(format, hi, lo); ++ ++ int32_t imm = instr->SwImmOrDispFieldRaw(hi, lo) >> lo; ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); ++ ++ return len; ++} ++ ++ ++//imm(15-12) disp(25-0) retrun length(format) ++int Decoder::SwGetInstructionRange(const char* format, int& hi, int& lo) { ++ const char* p = format; ++ p = strstr(format, "("); ++ DCHECK('(' == *p ); ++ hi = atoi(++p); ++ p = strstr(format, "-"); ++ DCHECK('-' == *p ); ++ lo = atoi(++p); ++ p = strstr(format, ")"); ++ DCHECK(')' == *p ); ++ return (int)(p-format+1); ++} ++ ++ ++/** modified by cjq ++ * * add function to parse 'sys_call' ++ * */ ++void Decoder::SwDecodeTypeSyscall(Instruction* instr){ ++ if (instr->OpcodeFieldValue() == op_sys_call){ ++ return Format(instr, "sys_call '0x(25-0)"); ++ } ++} ++ ++void Decoder::SwDecodeTypeTransferance(Instruction* instr){//ld 20150319 ++ switch(instr->OpcodeFieldValue()){ ++ case op_br: ++ Format(instr,"br 'ra, 'tr_disp(20-0)");//ld 20150320 ++ break; ++ case op_bsr: ++ Format(instr,"bsr 'ra, 'tr_disp(20-0)"); ++ break; ++ case op_beq: ++ Format(instr,"beq 'ra, 'tr_disp(20-0)"); ++ break; ++ case op_bne: ++ Format(instr,"bne 'ra, 'tr_disp(20-0)"); ++ break; ++ case op_blt: ++ Format(instr,"blt 'ra, 'tr_disp(20-0)"); ++ break; ++ case op_ble: ++ Format(instr,"ble 'ra, 'tr_disp(20-0)"); ++ break; ++ case op_bgt: ++ Format(instr,"bgt 'ra, 'tr_disp(20-0)"); ++ break; ++ case op_bge: ++ Format(instr,"bge 'ra, 'tr_disp(20-0)"); ++ break; ++ case op_blbc: ++ Format(instr,"blbc 'ra, 'tr_disp(20-0)"); ++ break; ++ case op_blbs: ++ Format(instr,"blbs 'ra, 'tr_disp(20-0)"); ++ break; ++ case op_fbeq: ++ Format(instr,"fbeq 'fa, 'tr_disp(20-0)"); ++ break; ++ case op_fbne: ++ Format(instr,"fbne 'fa, 'tr_disp(20-0)"); ++ break; ++ case op_fblt: ++ Format(instr,"fblt 'fa, 'tr_disp(20-0)"); ++ break; ++ case op_fble: ++ Format(instr,"fble 'fa, 'tr_disp(20-0)"); ++ break; ++ case op_fbgt: ++ Format(instr,"fbgt 'fa, 'tr_disp(20-0)"); ++ break; ++ case op_fbge: ++ Format(instr,"fbge 'fa, 'tr_disp(20-0)"); ++ break; ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ } ++} ++void Decoder::SwDecodeTypeStorage(Instruction* instr){ ++ int opcode_func_raw; ++ switch (instr->OpcodeFieldValue()) { ++ case op_call: ++ Format(instr, "call 'ra, ('rb)"); ++ break; ++ case op_ret: ++ Format(instr, "ret 'ra, ('rb)"); ++ break; ++ case op_jmp: ++ Format(instr, "jmp 'ra, ('rb)"); ++ break; ++ ++ case op_ldbu: ++ Format(instr, "ldbu 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_ldhu: ++ Format(instr, "ldhu 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_ldw: ++ Format(instr, "ldw 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_ldl: ++ Format(instr, "ldl 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_ldl_u: ++ Format(instr, "ldl_u 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_stb: ++ Format(instr, "stb 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_sth: ++ Format(instr, "sth 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_stw: ++ Format(instr, "stw 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_stl: ++ Format(instr, "stl 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_stl_u: ++ Format(instr, "stl_u 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_ldi: ++ Format(instr, "ldi 'ra, 'disp(15-0)('rb)"); ++ break; ++ case op_ldih: ++ Format(instr, "ldih 'ra, 'disp(15-0)('rb)"); ++ break; ++ ++ case op_flds: ++ Format(instr, "flds 'fa, 'disp(15-0)('rb)"); ++ break; ++ case op_fldd: ++ Format(instr, "fldd 'fa, 'disp(15-0)('rb)"); ++ break; ++ case op_fsts: ++ Format(instr, "fsts 'fa, 'disp(15-0)('rb)"); ++ break; ++ case op_fstd: ++ Format(instr, "fstd 'fa, 'disp(15-0)('rb)"); ++ break; ++ ++ case op_ldwe: ++ case op_ldse: ++ case op_ldde: ++ case op_vlds: ++ case op_vldd: ++ case op_vsts: ++ case op_vstd: ++ UNIMPLEMENTED_SW64(); ++ break; ++ ++ case OP(0x08): ++ case OP(0x06): //杂项指令 ++ opcode_func_raw = instr->SwFunctionFieldRaw(15, 0) | instr->OpcodeFieldRaw(); ++ switch (opcode_func_raw) { ++ case op_lldw: ++ Format(instr, "lldw 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_lldl: ++ Format(instr, "lldl 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldw_inc: ++ Format(instr, "ldw_inc 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldl_inc: ++ Format(instr, "ldl_inc 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldw_dec: ++ Format(instr, "ldw_dec 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldl_dec: ++ Format(instr, "ldl_dec 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldw_set: ++ Format(instr, "ldw_set 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldl_set: ++ Format(instr, "ldl_set 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_lstw: ++ Format(instr, "lstw 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_lstl: ++ Format(instr, "lstl 'ra, 'disp(11-0)('rb)"); ++ break; ++ ++ case op_memb: ++ Format(instr, "memb"); ++ break; ++ case op_rtc: ++ Format(instr, "rtc 'ra, 'rb"); ++ break; ++ case op_rcid: ++ Format(instr, "rcid 'ra"); ++ break; ++ case op_halt: ++ Format(instr, "halt"); ++ break; ++ case op_rd_f: ++ Format(instr, "rd_f 'ra"); ++ break; ++ case op_wr_f: ++ Format(instr, "wr_f, 'ra"); ++ break; ++ default: ++ UNIMPLEMENTED_SW64(); ++ } ++ break; ++ ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ } ++} ++void Decoder::SwDecodeTypeSimpleCalculation(Instruction* instr){ ++ int simple_calculation_op = instr->SwFunctionFieldRaw(12, 5) | instr->OpcodeFieldValue(); ++ switch (simple_calculation_op) { ++ case op_addw: ++ Format(instr, "addw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_subw: ++ Format(instr, "subw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_s4addw: ++ Format(instr, "s4addw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_s4subw: ++ Format(instr, "s4subw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_s8addw: ++ Format(instr, "s8addw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_s8subw: ++ Format(instr, "s8subw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_addl: ++ Format(instr, "addl 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_subl: ++ Format(instr, "subl 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_s4addl: ++ Format(instr, "s4addl 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_s4subl: ++ Format(instr, "s4subl 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_s8addl: ++ Format(instr, "s8addl 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_s8subl: ++ Format(instr, "s8subl 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_mulw: ++ Format(instr, "mulw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_mull: ++ Format(instr, "mull 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_umulh: ++ Format(instr, "umulh 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_cmpeq: ++ Format(instr, "cmpeq 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_cmplt: ++ Format(instr, "cmplt 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_cmple: ++ Format(instr, "cmple 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_cmpult: ++ Format(instr, "cmpult 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_cmpule: ++ Format(instr, "cmpule 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_and: ++ Format(instr, "and 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_bic: ++ Format(instr, "bic 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_bis: //case op_or: ++ Format(instr, "or 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_ornot: ++ Format(instr, "ornot 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_xor: ++ Format(instr, "xor 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_eqv: ++ Format(instr, "eqv 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_inslb: ++ Format(instr, "inslb 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_inslh: ++ Format(instr, "inslh 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_inslw: ++ Format(instr, "inslw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_insll: ++ Format(instr, "insll 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_inshb: ++ Format(instr, "inshb 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_inshh: ++ Format(instr, "inshh 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_inshw: ++ Format(instr, "inshw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_inshl: ++ Format(instr, "inshl 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_slll: ++ Format(instr, "slll 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_srll: ++ Format(instr, "srll 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_sral: ++ Format(instr, "sral 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_extlb: ++ Format(instr, "extlb 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_extlh: ++ Format(instr, "extlh 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_extlw: ++ Format(instr, "extlw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_extll: ++ Format(instr, "extll 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_exthb: ++ Format(instr, "exthb 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_exthh: ++ Format(instr, "exthh 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_exthw: ++ Format(instr, "exthw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_exthl: ++ Format(instr, "exthl 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_ctpop: ++ Format(instr, "ctpop 'rb, 'rc(4-0)"); ++ break; ++ case op_ctlz: ++ Format(instr, "ctlz 'rb, 'rc(4-0)"); ++ break; ++ case op_cttz: ++ Format(instr, "cttz 'rb, 'rc(4-0)"); ++ break; ++ case op_masklb: ++ Format(instr, "masklb 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_masklh: ++ Format(instr, "masklh 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_masklw: ++ Format(instr, "masklw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_maskll: ++ Format(instr, "maskll 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_maskhb: ++ Format(instr, "maskhb 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_maskhh: ++ Format(instr, "maskhh 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_maskhw: ++ Format(instr, "maskhw 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_maskhl: ++ Format(instr, "maskhl 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_zap: ++ Format(instr, "zap 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_zapnot: ++ Format(instr, "zapnot 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_sextb: ++ Format(instr, "sextb 'rb, 'rc(4-0)"); ++ break; ++ case op_sexth: ++ Format(instr, "sexth 'rb, 'rc(4-0)"); ++ break; ++ case op_cmpgeb: ++ Format(instr, "cmpgeb 'ra, 'rb, 'rc(4-0)"); ++ break; ++ case op_fimovs: ++ Format(instr, "fimovs 'fa, 'rc(4-0)"); ++ break; ++ case op_fimovd: ++ Format(instr, "fimovd 'fa, 'rc(4-0)"); ++ break; ++ ++ case op_addw_l: //立即数版? ++ Format(instr, "addw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_subw_l: ++ Format(instr, "subw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_s4addw_l: ++ Format(instr, "s4addw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_s4subw_l: ++ Format(instr, "s4subw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_s8addw_l: ++ Format(instr, "s8addw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_s8subw_l: ++ Format(instr, "s8subw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_addl_l: ++ Format(instr, "addl 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_subl_l: ++ Format(instr, "subl 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_s4addl_l: ++ Format(instr, "s4addl 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_s4subl_l: ++ Format(instr, "s4subl 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_s8addl_l: ++ Format(instr, "s8addl 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_s8subl_l: ++ Format(instr, "s8subl 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_mulw_l: ++ Format(instr, "mulw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_mull_l: ++ Format(instr, "mull 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_umulh_l: ++ Format(instr, "umulh 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_cmpeq_l: ++ Format(instr, "cmpeq 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_cmplt_l: ++ Format(instr, "cmplt 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_cmple_l: ++ Format(instr, "cmple 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_cmpult_l: ++ Format(instr, "cmpult 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_cmpule_l: ++ Format(instr, "cmpule 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_and_l: ++ Format(instr, "and 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_bic_l: ++ Format(instr, "bic 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_bis_l: //case op_or_l: ++ Format(instr, "or 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_ornot_l: ++ Format(instr, "ornot 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_xor_l: ++ Format(instr, "xor 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_eqv_l: ++ Format(instr, "eqv 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_inslb_l: ++ Format(instr, "inslb 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_inslh_l: ++ Format(instr, "inslh 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_inslw_l: ++ Format(instr, "inslw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_insll_l: ++ Format(instr, "insll 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_inshb_l: ++ Format(instr, "inshb 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_inshh_l: ++ Format(instr, "inshh 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_inshw_l: ++ Format(instr, "inshw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_inshl_l: ++ Format(instr, "inshl 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_slll_l: ++ Format(instr, "slll 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_srll_l: ++ Format(instr, "srll 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_sral_l: ++ Format(instr, "sral 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_extlb_l: ++ Format(instr, "extlb 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_extlh_l: ++ Format(instr, "extlh 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_extlw_l: ++ Format(instr, "extlw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_extll_l: ++ Format(instr, "extll 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_exthb_l: ++ Format(instr, "exthb 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_exthh_l: ++ Format(instr, "exthh 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_exthw_l: ++ Format(instr, "exthw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_exthl_l: ++ Format(instr, "exthl 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_masklb_l: ++ Format(instr, "masklb 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_masklh_l: ++ Format(instr, "masklh 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_masklw_l: ++ Format(instr, "masklw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_maskll_l: ++ Format(instr, "maskll 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_maskhb_l: ++ Format(instr, "maskhb 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_maskhh_l: ++ Format(instr, "maskhh 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_maskhw_l: ++ Format(instr, "maskhw 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_maskhl_l: ++ Format(instr, "maskhl 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_zap_l: ++ Format(instr, "zap 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_zapnot_l: ++ Format(instr, "zapnot 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_sextb_l: ++ Format(instr, "sextb 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_sexth_l: ++ Format(instr, "sexth 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_cmpgeb_l: ++ Format(instr, "cmpgeb 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ ++ case op_fadds: //浮点简单运算指令格? ++ Format(instr, "fadds 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_faddd: ++ Format(instr, "faddd 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fsubs: ++ Format(instr, "fsubs 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fsubd: ++ Format(instr, "fsubd 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fmuls: ++ Format(instr, "fmuls 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fmuld: ++ Format(instr, "fmuld 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fdivs: ++ Format(instr, "fdivs 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fdivd: ++ Format(instr, "fdivd 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fsqrts: ++ Format(instr, "fsqrts 'fb, 'fc(4-0)"); ++ break; ++ case op_fsqrtd: ++ Format(instr, "fsqrtd 'fb, 'fc(4-0)"); ++ break; ++ case op_fcmpeq: ++ Format(instr, "fcmpeq 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fcmple: ++ Format(instr, "fcmple 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fcmplt: ++ Format(instr, "fcmplt 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fcmpun: ++ Format(instr, "fcmpun 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtsd: ++ Format(instr, "fcvtsd 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtds: ++ Format(instr, "fcvtds 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtdl_g: ++ Format(instr, "fcvtdl_g 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtdl_p: ++ Format(instr, "fcvtdl_p 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtdl_z: ++ Format(instr, "fcvtdl_z 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtdl_n: ++ Format(instr, "fcvtdl_n 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtdl: ++ Format(instr, "fcvtdl 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtwl: ++ Format(instr, "fcvtwl 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtlw: ++ Format(instr, "fcvtlw 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtls: ++ Format(instr, "fcvtls 'fb, 'fc(4-0)"); ++ break; ++ case op_fcvtld: ++ Format(instr, "fcvtld 'fb, 'fc(4-0)"); ++ break; ++ case op_fcpys: ++ Format(instr, "fcpys 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fcpyse: ++ Format(instr, "fcpyse 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_fcpysn: ++ Format(instr, "fcpysn 'fa, 'fb, 'fc(4-0)"); ++ break; ++ case op_ifmovs: ++ Format(instr, "ifmovs 'ra, 'fc(4-0)"); ++ break; ++ case op_ifmovd: ++ Format(instr, "ifmovd 'ra, 'fc(4-0)"); ++ break; ++ case op_rfpcr: ++ Format(instr, "rfpcr 'fa, FPCR"); ++ break; ++ case op_wfpcr: ++ Format(instr, "wfpcr 'fa, FPCR"); ++ break; ++ case op_setfpec0: ++ Format(instr, "setfpec0"); ++ break; ++ case op_setfpec1: ++ Format(instr, "setfpec1"); ++ break; ++ case op_setfpec2: ++ Format(instr, "setfpec2"); ++ break; ++ case op_setfpec3: ++ Format(instr, "setfpec3"); ++ break; ++ ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ } ++ ++} ++void Decoder::SwDecodeTypeCompositeCalculation(Instruction* instr){ ++ switch (instr->OpcodeFieldValue()) { ++ case OP(0x11)://整数复合运算指令格式的寄存器形式 ++ case OP(0x13)://整数复合运算指令格式的立即数形式 ++ SwDecodeTypeCompositeCalculationInteger(instr); ++ break; ++ case OP(0x19)://浮点复合运算指令格式的寄存器形式 ++ SwDecodeTypeCompositeCalculationFloatintPoint(instr); ++ break; ++ } ++} ++ ++void Decoder::SwDecodeTypeCompositeCalculationInteger(Instruction* instr) { ++ int composite_calculation_op = instr->SwFunctionFieldRaw(12, 10) | instr->OpcodeFieldValue(); ++ switch (composite_calculation_op) { ++ case op_seleq: ++ Format(instr, "seleq 'ra, 'rb, 'rc(9-5), 'rd"); ++ break; ++ case op_selge: ++ Format(instr, "selge 'ra, 'rb, 'rc(9-5), 'rd"); ++ break; ++ case op_selgt: ++ Format(instr, "selgt 'ra, 'rb, 'rc(9-5), 'rd"); ++ break; ++ case op_selle: ++ Format(instr, "selle 'ra, 'rb, 'rc(9-5), 'rd"); ++ break; ++ case op_sellt: ++ Format(instr, "sellt 'ra, 'rb, 'rc(9-5), 'rd"); ++ break; ++ case op_selne: ++ Format(instr, "selne 'ra, 'rb, 'rc(9-5), 'rd"); ++ break; ++ case op_sellbc: ++ Format(instr, "sellbc 'ra, 'rb, 'rc(9-5), 'rd"); ++ break; ++ case op_sellbs: ++ Format(instr, "sellbs 'ra, 'rb, 'rc(9-5), 'rd"); ++ break; ++ case op_seleq_l: ++ Format(instr, "seleq 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ break; ++ case op_selge_l: ++ Format(instr, "selge 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ break; ++ case op_selgt_l: ++ Format(instr, "selgt 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ break; ++ case op_selle_l: ++ Format(instr, "selle 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ break; ++ case op_sellt_l: ++ Format(instr, "sellt 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ break; ++ case op_selne_l: ++ Format(instr, "selne 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ break; ++ case op_sellbc_l: ++ Format(instr, "sellbc 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ break; ++ case op_sellbs_l: ++ Format(instr, "sellbs 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ break; ++ ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::SwDecodeTypeCompositeCalculationFloatintPoint(Instruction* instr) { ++ int composite_fp_calculation_op = instr->SwFunctionFieldRaw(15, 10) | instr->OpcodeFieldValue(); ++ switch (composite_fp_calculation_op) { ++ case op_fmas: ++ Format(instr, "fmas 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fmad: ++ Format(instr, "fmad 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fmss: ++ Format(instr, "fmss 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fmsd: ++ Format(instr, "fmsd 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fnmas: ++ Format(instr, "fnmas 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fnmad: ++ Format(instr, "fnmad 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fnmss: ++ Format(instr, "fnmss 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fnmsd: ++ Format(instr, "fnmsd 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fseleq: ++ Format(instr, "fseleq 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fselne: ++ Format(instr, "fselne 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fsellt: ++ Format(instr, "fsellt 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fselle: ++ Format(instr, "fselle 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fselgt: ++ Format(instr, "fselgt 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ case op_fselge: ++ Format(instr, "fselge 'fa, 'fb, 'fc(9-5), 'fd"); ++ break; ++ ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ ++ } ++} ++ ++#undef OP ++#undef OPR ++#endif ++ ++ ++// Disassemble the instruction at *instr_ptr into the output buffer. ++// All instructions are one word long, except for the simulator ++// pseudo-instruction stop(msg). For that one special case, we return ++// size larger than one kInstrSize. ++int Decoder::InstructionDecode(byte* instr_ptr) { ++ Instruction* instr = Instruction::At(instr_ptr); ++ instr_pc_ = instr_ptr; //ld 20150323 ++ // Print raw instruction bytes. ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ++ "%08x ", ++ instr->InstructionBits()); ++ switch (instr->InstructionType()) { ++#ifdef SW64 ++ case Instruction::kSwStorageType: { ++ SwDecodeTypeStorage(instr); ++ break; ++ } ++ case Instruction::kSwSimpleCalculationType: { ++ SwDecodeTypeSimpleCalculation(instr); ++ break; ++ } ++ case Instruction::kSwTransferanceType:{ ++ SwDecodeTypeTransferance(instr);//ld 20150319 ++ break; ++ } ++ case Instruction::kSwCompositeCalculationType: { ++ SwDecodeTypeCompositeCalculation(instr); ++ break; ++ } ++//cjq 20150317:TODO ++ case Instruction::kSwSyscallType:{ ++ SwDecodeTypeSyscall(instr); ++ break; ++ } ++ case Instruction::kSwSimulatorTrap:{ ++ Format(instr, "op_trap '0x(25-0)"); ++ break; ++ } ++#endif ++ default: { ++ Format(instr, "UNSUPPORTED"); ++ UNSUPPORTED_SW64(); ++ } ++ } ++ return kInstrSize; ++} ++ ++byte* Decoder::decode_instructions(byte* start, byte* end) { ++ ++ // decode a series of instructions and return the end of the last instruction ++ ++ return (byte*) ++ (*Decoder::_decode_instructions)(start, end, ++ NULL, (void*) this, ++ NULL, (void*) this, ++ NULL /*options()*/); ++} ++ ++#ifdef SW64 ++void* Decoder::_library = NULL; ++bool Decoder::_tried_to_load_library = false; ++ ++// This routine is in the shared library: ++Decoder::decode_func Decoder::_decode_instructions = NULL; ++ ++//static const char hsdis_library_name[] = "hsdis-sw64"; ++static const char decode_instructions_name[] = "decode_instructions"; ++ ++/* Used to protect dlsym() calls */ ++static pthread_mutex_t dl_mutex; ++ ++void* dll_load(const char *filename, char *ebuf, int ebuflen) { ++ void* result= dlopen(filename, RTLD_LAZY); ++ if (result != NULL) { ++ // Successful loading ++ return result; ++ } ++ ++ return NULL; ++} ++ ++void* dll_lookup(void* handle, const char* name) { ++ pthread_mutex_lock(&dl_mutex); ++ void* res = dlsym(handle, name); ++ pthread_mutex_unlock(&dl_mutex); ++ return res; ++} ++ ++bool Decoder::load_library() { ++ if (_decode_instructions != NULL) { ++ // Already succeeded. ++ return true; ++ } ++ ++ if (_tried_to_load_library) { ++ // Do not try twice. ++ // To force retry in debugger: assign _tried_to_load_library=0 ++ return false; ++ } ++ ++ // Try to load it. ++ //// v8::internal::Decoder d(converter_, buffer); ++ char ebuf[1024]; ++ char buf[4096]; ++ // Find the disassembler shared library. ++ // 4. hsdis-.so (using LD_LIBRARY_PATH) ++ if (_library == NULL) { ++ // 4. hsdis-.so (using LD_LIBRARY_PATH) ++ strcpy(&buf[0], "./hsdis-sw64.so"); ++ _library = dll_load(buf, ebuf, sizeof ebuf); ++ } ++ if (_library != NULL) { ++ _decode_instructions = decode_func((byte*) (dll_lookup(_library, decode_instructions_name)) ); ++ } ++ _tried_to_load_library = true; ++ ++ if (_decode_instructions == NULL) { ++ v8::internal::PrintF("Could not load %s; %s\n", buf, ((_library != NULL) ? "entry point is missing" ++ : "library not loadable")); ++ return false; ++ } ++ ++ // Success. ++ v8::internal::PrintF("Loaded disassembler from %s\n", buf); ++ return true; ++} ++#endif ++ ++} // namespace internal ++} // namespace v8 ++ ++ ++//------------------------------------------------------------------------------ ++ ++namespace disasm { ++ ++const char* NameConverter::NameOfAddress(byte* addr) const { ++ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast(addr)); ++ return tmp_buffer_.begin(); ++} ++ ++ ++const char* NameConverter::NameOfConstant(byte* addr) const { ++ return NameOfAddress(addr); ++} ++ ++ ++const char* NameConverter::NameOfCPURegister(int reg) const { ++ return v8::internal::Registers::Name(reg); ++} ++ ++ ++const char* NameConverter::NameOfXMMRegister(int reg) const { ++ return v8::internal::FPURegisters::Name(reg); ++} ++ ++ ++const char* NameConverter::NameOfByteCPURegister(int reg) const { ++ UNREACHABLE(); // SW64 does not have the concept of a byte register. ++ return "nobytereg"; ++} ++ ++ ++const char* NameConverter::NameInCode(byte* addr) const { ++ // The default name converter is called for unknown code. So we will not try ++ // to access any memory. ++ return ""; ++} ++ ++ ++//------------------------------------------------------------------------------ ++ ++int Disassembler::InstructionDecode(v8::internal::Vector buffer, ++ byte* instruction) { ++ v8::internal::Decoder d(converter_, buffer); ++#if 0 //def SW64 //20181029 ++ if( d.load_library() ) { ++ d.decode_instructions(instruction, instruction); ++ } ++ return 4 /*Instruction::kInstrSize*/; ++#else ++ return d.InstructionDecode(instruction); ++#endif ++} ++ ++ ++// The SW64 assembler does not currently use constant pools. ++int Disassembler::ConstantPoolSizeAt(byte* instruction) { ++ return -1; ++} ++ ++ ++void Disassembler::Disassemble(FILE* f, byte* begin, byte* end, ++ UnimplementedOpcodeAction unimplemented_action) { ++ NameConverter converter; ++ Disassembler d(converter, unimplemented_action); ++ for (byte* pc = begin; pc < end;) { ++ v8::internal::EmbeddedVector buffer; ++ buffer[0] = '\0'; ++ byte* prev_pc = pc; ++ pc += d.InstructionDecode(buffer, pc); ++ v8::internal::PrintF(f, "%p %08x %s\n", static_cast(prev_pc), ++ *reinterpret_cast(prev_pc), buffer.begin()); ++ } ++} ++ ++#undef STRING_STARTS_WITH ++ ++} // namespace disasm ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/execution/frame-constants.h b/src/3rdparty/chromium/v8/src/execution/frame-constants.h +index 8c3f77431..c5d82e8fa 100644 +--- a/src/3rdparty/chromium/v8/src/execution/frame-constants.h ++++ b/src/3rdparty/chromium/v8/src/execution/frame-constants.h +@@ -391,6 +391,8 @@ inline static int FrameSlotToFPOffset(int slot) { + #include "src/execution/mips64/frame-constants-mips64.h" // NOLINT + #elif V8_TARGET_ARCH_S390 + #include "src/execution/s390/frame-constants-s390.h" // NOLINT ++#elif V8_TARGET_ARCH_SW64 ++#include "src/execution/sw64/frame-constants-sw64.h" // NOLINT + #else + #error Unsupported target architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/execution/simulator.h b/src/3rdparty/chromium/v8/src/execution/simulator.h +index a4e07b235..6725efee2 100644 +--- a/src/3rdparty/chromium/v8/src/execution/simulator.h ++++ b/src/3rdparty/chromium/v8/src/execution/simulator.h +@@ -26,6 +26,8 @@ + #include "src/execution/mips64/simulator-mips64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/execution/s390/simulator-s390.h" ++#elif V8_TARGET_ARCH_SW64 ++#include "src/execution/sw64/simulator-sw64.h" + #else + #error Unsupported target architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/execution/sw64/frame-constants-sw64.cc b/src/3rdparty/chromium/v8/src/execution/sw64/frame-constants-sw64.cc +new file mode 100755 +index 000000000..d52a4e3e5 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/execution/sw64/frame-constants-sw64.cc +@@ -0,0 +1,32 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/codegen/sw64/assembler-sw64-inl.h" ++#include "src/execution/frame-constants.h" ++#include "src/execution/frames.h" ++ ++#include "src/execution/sw64/frame-constants-sw64.h" ++ ++namespace v8 { ++namespace internal { ++ ++Register JavaScriptFrame::fp_register() { return v8::internal::fp; } ++Register JavaScriptFrame::context_register() { return cp; } ++Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); } ++ ++int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) { ++ return register_count; ++} ++ ++int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) { ++ USE(register_count); ++ return 0; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/execution/sw64/frame-constants-sw64.h b/src/3rdparty/chromium/v8/src/execution/sw64/frame-constants-sw64.h +new file mode 100755 +index 000000000..96b1f2ac6 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/execution/sw64/frame-constants-sw64.h +@@ -0,0 +1,78 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_EXECUTION_SW64_FRAME_CONSTANTS_SW64_H_ ++#define V8_EXECUTION_SW64_FRAME_CONSTANTS_SW64_H_ ++ ++#include "src/base/bits.h" ++#include "src/base/macros.h" ++#include "src/execution/frame-constants.h" ++ ++namespace v8 { ++namespace internal { ++ ++class EntryFrameConstants : public AllStatic { ++ public: ++ // This is the offset to where JSEntry pushes the current value of ++ // Isolate::c_entry_fp onto the stack. ++ static constexpr int kCallerFPOffset = -3 * kSystemPointerSize; ++}; ++ ++class WasmCompileLazyFrameConstants : public TypedFrameConstants { ++ public: ++ static constexpr int kNumberOfSavedGpParamRegs = 6; ++ static constexpr int kNumberOfSavedFpParamRegs = 6; ++ static constexpr int kNumberOfSavedAllParamRegs = 12; ++ ++ // FP-relative. ++ // See Generate_WasmCompileLazy in builtins-sw64.cc. ++ static constexpr int kWasmInstanceOffset = ++ TYPED_FRAME_PUSHED_VALUE_OFFSET(kNumberOfSavedAllParamRegs); ++ static constexpr int kFixedFrameSizeFromFp = ++ TypedFrameConstants::kFixedFrameSizeFromFp + ++ kNumberOfSavedGpParamRegs * kPointerSize + ++ kNumberOfSavedFpParamRegs * kDoubleSize; ++}; ++ ++// Frame constructed by the {WasmDebugBreak} builtin. ++// After pushing the frame type marker, the builtin pushes all Liftoff cache ++// registers (see liftoff-assembler-defs.h). ++class WasmDebugBreakFrameConstants : public TypedFrameConstants { ++ public: ++ // {a0, a1, a2, a3, a4, a5, t0, t1, t2, t3, t4, t9, t10, s5, v0} ++ static constexpr uint32_t kPushedGpRegs = 0b1101111110100000000111111; ++ // {f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, ++ // f22, f23, f24, f25, f26} ++ static constexpr uint32_t kPushedFpRegs = 0b111111111111111111111111111; ++ ++ static constexpr int kNumPushedGpRegisters = ++ base::bits::CountPopulation(kPushedGpRegs); ++ static constexpr int kNumPushedFpRegisters = ++ base::bits::CountPopulation(kPushedFpRegs); ++ ++ static constexpr int kLastPushedGpRegisterOffset = ++ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize; ++ static constexpr int kLastPushedFpRegisterOffset = ++ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize; ++ ++ // Offsets are fp-relative. ++ static int GetPushedGpRegisterOffset(int reg_code) { ++ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code)); ++ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1); ++ return kLastPushedGpRegisterOffset + ++ base::bits::CountPopulation(lower_regs) * kSystemPointerSize; ++ } ++ ++ static int GetPushedFpRegisterOffset(int reg_code) { ++ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code)); ++ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1); ++ return kLastPushedFpRegisterOffset + ++ base::bits::CountPopulation(lower_regs) * kDoubleSize; ++ } ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_SW64_FRAME_CONSTANTS_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/execution/sw64/simulator-sw64.cc b/src/3rdparty/chromium/v8/src/execution/sw64/simulator-sw64.cc +new file mode 100755 +index 000000000..fe50a9002 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/execution/sw64/simulator-sw64.cc +@@ -0,0 +1,8980 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/execution/sw64/simulator-sw64.h" ++ ++// Only build the simulator if not compiling for real SW64 hardware. ++#if defined(USE_SIMULATOR) ++ ++#include ++#include ++#include ++#include ++ ++#include "src/base/bits.h" ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/sw64/constants-sw64.h" ++#include "src/diagnostics/disasm.h" ++#include "src/heap/combined-heap.h" ++#include "src/runtime/runtime-utils.h" ++#include "src/utils/ostreams.h" ++#include "src/utils/vector.h" ++ ++#define IsSw64SoftFloatABI 0 ++enum { ++ SW64_OVI0_BIT = 57, ++ SW64_INE0_BIT = 56, ++ SW64_UNF0_BIT = 55, ++ SW64_OVF0_BIT = 54, ++ SW64_DZE0_BIT = 53, ++ SW64_INV0_BIT = 52, ++}; ++ ++namespace v8 { ++namespace internal { ++ ++// Util functions. ++inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); } ++ ++uint32_t get_fcsr_condition_bit(uint32_t cc) { ++ if (cc == 0) { ++ return 23; ++ } else { ++ return 24 + cc; ++ } ++} ++ ++ ++//static int64_t MultiplyHighSigned(int64_t u, int64_t v) { ++// uint64_t u0, v0, w0; ++// int64_t u1, v1, w1, w2, t; ++ ++// u0 = u & 0xFFFFFFFFL; ++// u1 = u >> 32; ++// v0 = v & 0xFFFFFFFFL; ++// v1 = v >> 32; ++ ++// w0 = u0 * v0; ++// t = u1 * v0 + (w0 >> 32); ++// w1 = t & 0xFFFFFFFFL; ++// w2 = t >> 32; ++// w1 = u0 * v1 + w1; ++ ++// return u1 * v1 + w2 + (w1 >> 32); ++//} ++ ++ ++// This macro provides a platform independent use of sscanf. The reason for ++// SScanF not being implemented in a platform independent was through ++// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time ++// Library does not provide vsscanf. ++#define SScanF sscanf // NOLINT ++ ++// The Sw64Debugger class is used by the simulator while debugging simulated ++// code. ++class Sw64Debugger { ++ public: ++ explicit Sw64Debugger(Simulator* sim) : sim_(sim) { } ++ ++ void Stop(Instruction* instr); ++ void Debug(); ++ // Print all registers with a nice formatting. ++ void PrintAllRegs(); ++ void PrintAllRegsIncludingFPU(); ++ ++ private: ++ // We set the breakpoint code to 0xFFFFF to easily recognize it. ++ static const Instr kBreakpointInstr = op_trap | BREAK; ++ static const Instr kNopInstr = 0x0; ++ ++ Simulator* sim_; ++ ++ int64_t GetRegisterValue(int regnum); ++ int64_t GetFPURegisterValue(int regnum); ++ float GetFPURegisterValueFloat(int regnum); ++ double GetFPURegisterValueDouble(int regnum); ++ bool GetValue(const char* desc, int64_t* value); ++ ++ // Set or delete a breakpoint. Returns true if successful. ++ bool SetBreakpoint(Instruction* breakpc); ++ bool DeleteBreakpoint(Instruction* breakpc); ++ ++ // Undo and redo all breakpoints. This is needed to bracket disassembly and ++ // execution to skip past breakpoints when run from the debugger. ++ void UndoBreakpoints(); ++ void RedoBreakpoints(); ++}; ++ ++inline void UNSUPPORTED() { printf("Sim: Unsupported instruction.\n"); } ++ ++void Sw64Debugger::Stop(Instruction* instr) { ++ // Get the stop code. ++ uint32_t code = instr->Bits(25, 6); ++ PrintF("Simulator hit (%u)\n", code); ++ Debug(); ++} ++ ++int64_t Sw64Debugger::GetRegisterValue(int regnum) { ++ if (regnum == kNumSimuRegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_register(regnum); ++ } ++} ++ ++ ++int64_t Sw64Debugger::GetFPURegisterValue(int regnum) { ++ if (regnum == kNumFPURegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_fpu_register(regnum); ++ } ++} ++ ++ ++float Sw64Debugger::GetFPURegisterValueFloat(int regnum) { ++ if (regnum == kNumFPURegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_fpu_register_float(regnum); ++ } ++} ++ ++ ++double Sw64Debugger::GetFPURegisterValueDouble(int regnum) { ++ if (regnum == kNumFPURegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_fpu_register_double(regnum); ++ } ++} ++ ++ ++bool Sw64Debugger::GetValue(const char* desc, int64_t* value) { ++ int regnum = Registers::Number(desc); ++ int fpuregnum = FPURegisters::Number(desc); ++ ++ if (regnum != kInvalidRegister) { ++ *value = GetRegisterValue(regnum); ++ return true; ++ } else if (fpuregnum != kInvalidFPURegister) { ++ *value = GetFPURegisterValue(fpuregnum); ++ return true; ++ } else if (strncmp(desc, "0x", 2) == 0) { ++ return SScanF(desc + 2, "%" SCNx64, ++ reinterpret_cast(value)) == 1; ++ } else { ++ return SScanF(desc, "%" SCNu64, reinterpret_cast(value)) == 1; ++ } ++ return false; ++} ++ ++ ++bool Sw64Debugger::SetBreakpoint(Instruction* breakpc) { ++ // Check if a breakpoint can be set. If not return without any side-effects. ++ if (sim_->break_pc_ != nullptr) { ++ return false; ++ } ++ ++ // Set the breakpoint. ++ sim_->break_pc_ = breakpc; ++ sim_->break_instr_ = breakpc->InstructionBits(); ++ // Not setting the breakpoint instruction in the code itself. It will be set ++ // when the debugger shell continues. ++ return true; ++} ++ ++ ++bool Sw64Debugger::DeleteBreakpoint(Instruction* breakpc) { ++ if (sim_->break_pc_ != nullptr) { ++ sim_->break_pc_->SetInstructionBits(sim_->break_instr_); ++ } ++ ++ sim_->break_pc_ = nullptr; ++ sim_->break_instr_ = 0; ++ return true; ++} ++ ++ ++void Sw64Debugger::UndoBreakpoints() { ++ if (sim_->break_pc_ != nullptr) { ++ sim_->break_pc_->SetInstructionBits(sim_->break_instr_); ++ } ++} ++ ++ ++void Sw64Debugger::RedoBreakpoints() { ++ if (sim_->break_pc_ != nullptr) { ++ mprotect((void*)((uint64_t)sim_->break_pc_ & ~((uint64_t)4095)), 4096, PROT_WRITE); ++ sim_->break_pc_->SetInstructionBits(kBreakpointInstr); ++ } ++} ++ ++ ++void Sw64Debugger::PrintAllRegs() { ++#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n) ++ ++ PrintF("\n"); ++ // at, v0, a0. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64 ++ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n", ++ REG_INFO(1), REG_INFO(2), REG_INFO(4)); ++ // v1, a1. ++ PrintF("%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \n", ++ "", REG_INFO(3), REG_INFO(5)); ++ // a2. ++ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "", ++ REG_INFO(6)); ++ // a3. ++ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "", ++ REG_INFO(7)); ++ PrintF("\n"); ++ // a4-t3, s0-s7 ++ for (int i = 0; i < 8; i++) { ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \n", ++ REG_INFO(8 + i), REG_INFO(16 + i)); ++ } ++ PrintF("\n"); ++ // t8, k0, LO. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", ++ REG_INFO(24), REG_INFO(26), REG_INFO(32)); ++ // t9, k1, HI. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", ++ REG_INFO(25), REG_INFO(27), REG_INFO(33)); ++ // sp, fp, gp. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", ++ REG_INFO(29), REG_INFO(30), REG_INFO(28)); ++ // pc. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \n", ++ REG_INFO(31), REG_INFO(34)); ++ ++#undef REG_INFO ++#undef FPU_REG_INFO ++} ++ ++ ++void Sw64Debugger::PrintAllRegsIncludingFPU() { ++#define FPU_REG_INFO(n) FPURegisters::Name(n), \ ++ GetFPURegisterValue(n), \ ++ GetFPURegisterValueDouble(n) ++ ++ PrintAllRegs(); ++ ++ PrintF("\n\n"); ++ // f0, f1, f2, ... f31. ++ // TODO(plind): consider printing 2 columns for space efficiency. ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(0)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(1)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(2)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(3)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(4)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(5)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(6)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(7)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(8)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(9)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(10)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(11)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(12)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(13)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(14)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(15)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(16)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(17)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(18)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(19)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(20)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(21)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(22)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(23)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(24)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(25)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(26)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(27)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(28)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(29)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(30)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(31)); ++ ++#undef REG_INFO ++#undef FPU_REG_INFO ++} ++ ++ ++void Sw64Debugger::Debug() { ++ intptr_t last_pc = -1; ++ bool done = false; ++ ++#define COMMAND_SIZE 63 ++#define ARG_SIZE 255 ++ ++#define STR(a) #a ++#define XSTR(a) STR(a) ++ ++ char cmd[COMMAND_SIZE + 1]; ++ char arg1[ARG_SIZE + 1]; ++ char arg2[ARG_SIZE + 1]; ++ char* argv[3] = { cmd, arg1, arg2 }; ++ ++ // Make sure to have a proper terminating character if reaching the limit. ++ cmd[COMMAND_SIZE] = 0; ++ arg1[ARG_SIZE] = 0; ++ arg2[ARG_SIZE] = 0; ++ ++ // Undo all set breakpoints while running in the debugger shell. This will ++ // make them invisible to all commands. ++ UndoBreakpoints(); ++ ++ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) { ++ if (last_pc != sim_->get_pc()) { ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ v8::internal::EmbeddedVector buffer; ++ dasm.InstructionDecode(buffer, reinterpret_cast(sim_->get_pc())); ++ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin()); ++ last_pc = sim_->get_pc(); ++ } ++ char* line = ReadLine("sim> "); ++ if (line == nullptr) { ++ break; ++ } else { ++ char* last_input = sim_->last_debugger_input(); ++ if (strcmp(line, "\n") == 0 && last_input != nullptr) { ++ line = last_input; ++ } else { ++ // Ownership is transferred to sim_; ++ sim_->set_last_debugger_input(line); ++ } ++ // Use sscanf to parse the individual parts of the command line. At the ++ // moment no command expects more than two parameters. ++ int argc = SScanF(line, ++ "%" XSTR(COMMAND_SIZE) "s " ++ "%" XSTR(ARG_SIZE) "s " ++ "%" XSTR(ARG_SIZE) "s", ++ cmd, arg1, arg2); ++ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) { ++ Instruction* instr = reinterpret_cast(sim_->get_pc()); ++ if (!(instr->IsTrap()) || ++ instr->InstructionBits() == rtCallRedirInstr) { ++ sim_->InstructionDecode( ++ reinterpret_cast(sim_->get_pc())); ++ } else { ++ // Allow si to jump over generated breakpoints. ++ PrintF("/!\\ Jumping over generated breakpoint.\n"); ++ sim_->set_pc(sim_->get_pc() + kInstrSize); ++ } ++ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) { ++ // Execute the one instruction we broke at with breakpoints disabled. ++ sim_->InstructionDecode(reinterpret_cast(sim_->get_pc())); ++ // Leave the debugger shell. ++ done = true; ++ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { ++ if (argc == 2) { ++ int64_t value; ++ double dvalue; ++ if (strcmp(arg1, "all") == 0) { ++ PrintAllRegs(); ++ } else if (strcmp(arg1, "allf") == 0) { ++ PrintAllRegsIncludingFPU(); ++ } else { ++ int regnum = Registers::Number(arg1); ++ int fpuregnum = FPURegisters::Number(arg1); ++ ++ if (regnum != kInvalidRegister) { ++ value = GetRegisterValue(regnum); ++ PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value, ++ value); ++ } else if (fpuregnum != kInvalidFPURegister) { ++ value = GetFPURegisterValue(fpuregnum); ++ dvalue = GetFPURegisterValueDouble(fpuregnum); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", ++ FPURegisters::Name(fpuregnum), value, dvalue); ++ } else { ++ PrintF("%s unrecognized\n", arg1); ++ } ++ } ++ } else { ++ if (argc == 3) { ++ if (strcmp(arg2, "single") == 0) { ++ int64_t value; ++ float fvalue; ++ int fpuregnum = FPURegisters::Number(arg1); ++ ++ if (fpuregnum != kInvalidFPURegister) { ++ value = GetFPURegisterValue(fpuregnum); ++ value &= 0xFFFFFFFFUL; ++ fvalue = GetFPURegisterValueFloat(fpuregnum); ++ PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue); ++ } else { ++ PrintF("%s unrecognized\n", arg1); ++ } ++ } else { ++ PrintF("print single\n"); ++ } ++ } else { ++ PrintF("print or print single\n"); ++ } ++ } ++ } else if ((strcmp(cmd, "po") == 0) ++ || (strcmp(cmd, "printobject") == 0)) { ++ if (argc == 2) { ++ int64_t value; ++ StdoutStream os; ++ if (GetValue(arg1, &value)) { ++ Object obj(value); ++ os << arg1 << ": \n"; ++#ifdef DEBUG ++ obj.Print(os); ++ os << "\n"; ++#else ++ os << Brief(obj) << "\n"; ++#endif ++ } else { ++ os << arg1 << " unrecognized\n"; ++ } ++ } else { ++ PrintF("printobject \n"); ++ } ++ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 || ++ strcmp(cmd, "dump") == 0) { ++ int64_t* cur = nullptr; ++ int64_t* end = nullptr; ++ int next_arg = 1; ++ ++ if (strcmp(cmd, "stack") == 0) { ++ cur = reinterpret_cast(sim_->get_register(Simulator::sp)); ++ } else { // Command "mem". ++ int64_t value; ++ if (!GetValue(arg1, &value)) { ++ PrintF("%s unrecognized\n", arg1); ++ continue; ++ } ++ cur = reinterpret_cast(value); ++ next_arg++; ++ } ++ ++ int64_t words; ++ if (argc == next_arg) { ++ words = 10; ++ } else { ++ if (!GetValue(argv[next_arg], &words)) { ++ words = 10; ++ } ++ } ++ end = cur + words; ++ ++ bool skip_obj_print = (strcmp(cmd, "dump") == 0); ++ while (cur < end) { ++ PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ", ++ reinterpret_cast(cur), *cur, *cur); ++ Object obj(*cur); ++ Heap* current_heap = sim_->isolate_->heap(); ++ if (!skip_obj_print) { ++ if (obj.IsSmi() || ++ IsValidHeapObject(current_heap, HeapObject::cast(obj))) { ++ PrintF(" ("); ++ if (obj.IsSmi()) { ++ PrintF("smi %d", Smi::ToInt(obj)); ++ } else { ++ obj.ShortPrint(); ++ } ++ PrintF(")"); ++ } ++ } ++ PrintF("\n"); ++ cur++; ++ } ++ ++ } else if ((strcmp(cmd, "disasm") == 0) || ++ (strcmp(cmd, "dpc") == 0) || ++ (strcmp(cmd, "di") == 0)) { ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ v8::internal::EmbeddedVector buffer; ++ ++ byte* cur = nullptr; ++ byte* end = nullptr; ++ ++ if (argc == 1) { ++ cur = reinterpret_cast(sim_->get_pc()); ++ end = cur + (10 * kInstrSize); ++ } else if (argc == 2) { ++ int regnum = Registers::Number(arg1); ++ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) { ++ // The argument is an address or a register name. ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ cur = reinterpret_cast(value); ++ // Disassemble 10 instructions at . ++ end = cur + (10 * kInstrSize); ++ } ++ } else { ++ // The argument is the number of instructions. ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ cur = reinterpret_cast(sim_->get_pc()); ++ // Disassemble instructions. ++ end = cur + (value * kInstrSize); ++ } ++ } ++ } else { ++ int64_t value1; ++ int64_t value2; ++ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { ++ cur = reinterpret_cast(value1); ++ end = cur + (value2 * kInstrSize); ++ } ++ } ++ ++ while (cur < end) { ++ dasm.InstructionDecode(buffer, cur); ++ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), ++ buffer.begin()); ++ cur += kInstrSize; ++ } ++ } else if (strcmp(cmd, "gdb") == 0) { ++ PrintF("relinquishing control to gdb\n"); ++ v8::base::OS::DebugBreak(); ++ PrintF("regaining control from gdb\n"); ++ } else if (strcmp(cmd, "break") == 0) { ++ if (argc == 2) { ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ if (!SetBreakpoint(reinterpret_cast(value))) { ++ PrintF("setting breakpoint failed\n"); ++ } ++ } else { ++ PrintF("%s unrecognized\n", arg1); ++ } ++ } else { ++ PrintF("break
\n"); ++ } ++ } else if (strcmp(cmd, "del") == 0) { ++ if (!DeleteBreakpoint(nullptr)) { ++ PrintF("deleting breakpoint failed\n"); ++ } ++ } else if (strcmp(cmd, "flags") == 0) { ++ PrintF("No flags on SW64 !\n"); ++ } else if (strcmp(cmd, "stop") == 0) { ++ int64_t value; ++ intptr_t stop_pc = sim_->get_pc() - ++ 2 * kInstrSize; ++ Instruction* stop_instr = reinterpret_cast(stop_pc); ++ Instruction* msg_address = ++ reinterpret_cast(stop_pc + ++ kInstrSize); ++ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) { ++ // Remove the current stop. ++ if (sim_->IsStopInstruction(stop_instr)) { ++ stop_instr->SetInstructionBits(kNopInstr); ++ msg_address->SetInstructionBits(kNopInstr); ++ } else { ++ PrintF("Not at debugger stop.\n"); ++ } ++ } else if (argc == 3) { ++ // Print information about all/the specified breakpoint(s). ++ if (strcmp(arg1, "info") == 0) { ++ if (strcmp(arg2, "all") == 0) { ++ PrintF("Stop information:\n"); ++ for (uint32_t i = kMaxWatchpointCode + 1; ++ i <= kMaxStopCode; ++ i++) { ++ sim_->PrintStopInfo(i); ++ } ++ } else if (GetValue(arg2, &value)) { ++ sim_->PrintStopInfo(value); ++ } else { ++ PrintF("Unrecognized argument.\n"); ++ } ++ } else if (strcmp(arg1, "enable") == 0) { ++ // Enable all/the specified breakpoint(s). ++ if (strcmp(arg2, "all") == 0) { ++ for (uint32_t i = kMaxWatchpointCode + 1; ++ i <= kMaxStopCode; ++ i++) { ++ sim_->EnableStop(i); ++ } ++ } else if (GetValue(arg2, &value)) { ++ sim_->EnableStop(value); ++ } else { ++ PrintF("Unrecognized argument.\n"); ++ } ++ } else if (strcmp(arg1, "disable") == 0) { ++ // Disable all/the specified breakpoint(s). ++ if (strcmp(arg2, "all") == 0) { ++ for (uint32_t i = kMaxWatchpointCode + 1; ++ i <= kMaxStopCode; ++ i++) { ++ sim_->DisableStop(i); ++ } ++ } else if (GetValue(arg2, &value)) { ++ sim_->DisableStop(value); ++ } else { ++ PrintF("Unrecognized argument.\n"); ++ } ++ } ++ } else { ++ PrintF("Wrong usage. Use help command for more information.\n"); ++ } ++ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) { ++ // Print registers and disassemble. ++ PrintAllRegs(); ++ PrintF("\n"); ++ ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ v8::internal::EmbeddedVector buffer; ++ ++ byte* cur = nullptr; ++ byte* end = nullptr; ++ ++ if (argc == 1) { ++ cur = reinterpret_cast(sim_->get_pc()); ++ end = cur + (10 * kInstrSize); ++ } else if (argc == 2) { ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ cur = reinterpret_cast(value); ++ // no length parameter passed, assume 10 instructions ++ end = cur + (10 * kInstrSize); ++ } ++ } else { ++ int64_t value1; ++ int64_t value2; ++ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { ++ cur = reinterpret_cast(value1); ++ end = cur + (value2 * kInstrSize); ++ } ++ } ++ ++ while (cur < end) { ++ dasm.InstructionDecode(buffer, cur); ++ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), ++ buffer.begin()); ++ cur += kInstrSize; ++ } ++ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { ++ PrintF("cont\n"); ++ PrintF(" continue execution (alias 'c')\n"); ++ PrintF("stepi\n"); ++ PrintF(" step one instruction (alias 'si')\n"); ++ PrintF("print \n"); ++ PrintF(" print register content (alias 'p')\n"); ++ PrintF(" use register name 'all' to print all registers\n"); ++ PrintF("printobject \n"); ++ PrintF(" print an object from a register (alias 'po')\n"); ++ PrintF("stack []\n"); ++ PrintF(" dump stack content, default dump 10 words)\n"); ++ PrintF("mem
[]\n"); ++ PrintF(" dump memory content, default dump 10 words)\n"); ++ PrintF("dump []\n"); ++ PrintF( ++ " dump memory content without pretty printing JS objects, default " ++ "dump 10 words)\n"); ++ PrintF("flags\n"); ++ PrintF(" print flags\n"); ++ PrintF("disasm []\n"); ++ PrintF("disasm [
]\n"); ++ PrintF("disasm [[
] ]\n"); ++ PrintF(" disassemble code, default is 10 instructions\n"); ++ PrintF(" from pc (alias 'di')\n"); ++ PrintF("gdb\n"); ++ PrintF(" enter gdb\n"); ++ PrintF("break
\n"); ++ PrintF(" set a break point on the address\n"); ++ PrintF("del\n"); ++ PrintF(" delete the breakpoint\n"); ++ PrintF("stop feature:\n"); ++ PrintF(" Description:\n"); ++ PrintF(" Stops are debug instructions inserted by\n"); ++ PrintF(" the Assembler::stop() function.\n"); ++ PrintF(" When hitting a stop, the Simulator will\n"); ++ PrintF(" stop and and give control to the Debugger.\n"); ++ PrintF(" All stop codes are watched:\n"); ++ PrintF(" - They can be enabled / disabled: the Simulator\n"); ++ PrintF(" will / won't stop when hitting them.\n"); ++ PrintF(" - The Simulator keeps track of how many times they \n"); ++ PrintF(" are met. (See the info command.) Going over a\n"); ++ PrintF(" disabled stop still increases its counter. \n"); ++ PrintF(" Commands:\n"); ++ PrintF(" stop info all/ : print infos about number \n"); ++ PrintF(" or all stop(s).\n"); ++ PrintF(" stop enable/disable all/ : enables / disables\n"); ++ PrintF(" all or number stop(s)\n"); ++ PrintF(" stop unstop\n"); ++ PrintF(" ignore the stop instruction at the current location\n"); ++ PrintF(" from now on\n"); ++ } else { ++ PrintF("Unknown command: %s\n", cmd); ++ } ++ } ++ } ++ ++ // Add all the breakpoints back to stop execution and enter the debugger ++ // shell when hit. ++ RedoBreakpoints(); ++ ++#undef COMMAND_SIZE ++#undef ARG_SIZE ++ ++#undef STR ++#undef XSTR ++} ++ ++bool Simulator::ICacheMatch(void* one, void* two) { ++ DCHECK_EQ(reinterpret_cast(one) & CachePage::kPageMask, 0); ++ DCHECK_EQ(reinterpret_cast(two) & CachePage::kPageMask, 0); ++ return one == two; ++} ++ ++ ++static uint32_t ICacheHash(void* key) { ++ return static_cast(reinterpret_cast(key)) >> 2; ++} ++ ++ ++static bool AllOnOnePage(uintptr_t start, size_t size) { ++ intptr_t start_page = (start & ~CachePage::kPageMask); ++ intptr_t end_page = ((start + size) & ~CachePage::kPageMask); ++ return start_page == end_page; ++} ++ ++ ++void Simulator::set_last_debugger_input(char* input) { ++ DeleteArray(last_debugger_input_); ++ last_debugger_input_ = input; ++} ++ ++void Simulator::SetRedirectInstruction(Instruction* instruction) { ++ instruction->SetInstructionBits(rtCallRedirInstr); ++} ++ ++void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache, ++ void* start_addr, size_t size) { ++ int64_t start = reinterpret_cast(start_addr); ++ int64_t intra_line = (start & CachePage::kLineMask); ++ start -= intra_line; ++ size += intra_line; ++ size = ((size - 1) | CachePage::kLineMask) + 1; ++ int offset = (start & CachePage::kPageMask); ++ while (!AllOnOnePage(start, size - 1)) { ++ int bytes_to_flush = CachePage::kPageSize - offset; ++ FlushOnePage(i_cache, start, bytes_to_flush); ++ start += bytes_to_flush; ++ size -= bytes_to_flush; ++ DCHECK_EQ((int64_t)0, start & CachePage::kPageMask); ++ offset = 0; ++ } ++ if (size != 0) { ++ FlushOnePage(i_cache, start, size); ++ } ++} ++ ++CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache, ++ void* page) { ++ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page)); ++ if (entry->value == nullptr) { ++ CachePage* new_page = new CachePage(); ++ entry->value = new_page; ++ } ++ return reinterpret_cast(entry->value); ++} ++ ++ ++// Flush from start up to and not including start + size. ++void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache, ++ intptr_t start, size_t size) { ++ DCHECK_LE(size, CachePage::kPageSize); ++ DCHECK(AllOnOnePage(start, size - 1)); ++ DCHECK_EQ(start & CachePage::kLineMask, 0); ++ DCHECK_EQ(size & CachePage::kLineMask, 0); ++ void* page = reinterpret_cast(start & (~CachePage::kPageMask)); ++ int offset = (start & CachePage::kPageMask); ++ CachePage* cache_page = GetCachePage(i_cache, page); ++ char* valid_bytemap = cache_page->ValidityByte(offset); ++ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift); ++} ++ ++void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache, ++ Instruction* instr) { ++ int64_t address = reinterpret_cast(instr); ++ void* page = reinterpret_cast(address & (~CachePage::kPageMask)); ++ void* line = reinterpret_cast(address & (~CachePage::kLineMask)); ++ int offset = (address & CachePage::kPageMask); ++ CachePage* cache_page = GetCachePage(i_cache, page); ++ char* cache_valid_byte = cache_page->ValidityByte(offset); ++ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID); ++ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); ++ if (cache_hit) { ++ // Check that the data in memory matches the contents of the I-cache. ++ CHECK_EQ(0, memcmp(reinterpret_cast(instr), ++ cache_page->CachedData(offset), ++ kInstrSize)); ++ } else { ++ // Cache miss. Load memory into the cache. ++ memcpy(cached_line, line, CachePage::kLineLength); ++ *cache_valid_byte = CachePage::LINE_VALID; ++ } ++} ++ ++ ++Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { ++ // Set up simulator support first. Some of this information is needed to ++ // setup the architecture state. ++ stack_size_ = FLAG_sim_stack_size * KB; ++ stack_ = reinterpret_cast(malloc(stack_size_)); ++ pc_modified_ = false; ++ icount_ = 0; ++ break_count_ = 0; ++ break_pc_ = nullptr; ++ break_instr_ = 0; ++ ++ lock_valid = 0; ++ lock_flag = 0; ++ lock_success = 0; ++ lock_register_padd = 0; ++ lock_register_flag = 0; ++ ++ // Set up architecture state. ++ // All registers are initialized to zero to start with. ++ for (int i = 0; i < kNumSimuRegisters; i++) { ++ registers_[i] = 0; ++ } ++ for (int i = 0; i < kNumFPURegisters; i++) { ++ FPUregisters_[2 * i] = 0; ++ FPUregisters_[2 * i + 1] = 0; // upper part for MSA ASE ++ } ++ ++ if (kArchVariant == kSw64r3) { ++ FCSR_ = kFCSRNaN2008FlagMask; ++ MSACSR_ = 0; ++ } else { ++ FCSR_ = 0; ++ } ++ ++ // The sp is initialized to point to the bottom (high address) of the ++ // allocated stack area. To be safe in potential stack underflows we leave ++ // some buffer below. ++ registers_[sp] = reinterpret_cast(stack_) + stack_size_ - 64; ++ // The ra and pc are initialized to a known bad value that will cause an ++ // access violation if the simulator ever tries to execute it. ++ registers_[pc] = bad_ra; ++ registers_[ra] = bad_ra; ++ ++ last_debugger_input_ = nullptr; ++} ++ ++ ++Simulator::~Simulator() { free(stack_); } ++ ++ ++// Get the active Simulator for the current thread. ++Simulator* Simulator::current(Isolate* isolate) { ++ v8::internal::Isolate::PerIsolateThreadData* isolate_data = ++ isolate->FindOrAllocatePerThreadDataForThisThread(); ++ DCHECK_NOT_NULL(isolate_data); ++ ++ Simulator* sim = isolate_data->simulator(); ++ if (sim == nullptr) { ++ // TODO(146): delete the simulator object when a thread/isolate goes away. ++ sim = new Simulator(isolate); ++ isolate_data->set_simulator(sim); ++ } ++ return sim; ++} ++ ++// Sets the register in the architecture state. It will also deal with updating ++// Simulator internal state for special registers such as PC. ++void Simulator::set_register(int reg, int64_t value) { ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ if (reg == pc) { ++ pc_modified_ = true; ++ } ++ ++ // Zero register always holds 0. ++ registers_[reg] = (reg == zero_reg) ? 0 : value; ++} ++ ++ ++void Simulator::set_dw_register(int reg, const int* dbl) { ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ registers_[reg] = dbl[1]; ++ registers_[reg] = registers_[reg] << 32; ++ registers_[reg] += dbl[0]; ++} ++ ++ ++void Simulator::set_fpu_register(int fpureg, int64_t value) { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ FPUregisters_[fpureg * 2] = value; ++} ++ ++ ++void Simulator::set_fpu_register_word(int fpureg, int32_t value) { ++ // Set ONLY lower 32-bits, leaving upper bits untouched. ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ int32_t* pword; ++ if (kArchEndian == kLittle) { ++ pword = reinterpret_cast(&FPUregisters_[fpureg * 2]); ++ } else { ++ pword = reinterpret_cast(&FPUregisters_[fpureg * 2]) + 1; ++ } ++ *pword = value; ++} ++ ++ ++void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) { ++ // Set ONLY upper 32-bits, leaving lower bits untouched. ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ int32_t* phiword; ++ if (kArchEndian == kLittle) { ++ phiword = (reinterpret_cast(&FPUregisters_[fpureg * 2])) + 1; ++ } else { ++ phiword = reinterpret_cast(&FPUregisters_[fpureg * 2]); ++ } ++ *phiword = value; ++} ++ ++ ++void Simulator::set_fpu_register_float(int fpureg, float value) { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ *bit_cast(&FPUregisters_[fpureg * 2]) = value; ++} ++ ++ ++void Simulator::set_fpu_register_double(int fpureg, double value) { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ *bit_cast(&FPUregisters_[fpureg * 2]) = value; ++} ++ ++ ++// Get the register from the architecture state. This function does handle ++// the special case of accessing the PC register. ++int64_t Simulator::get_register(int64_t reg) const { ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ if (reg == zero_reg) ++ return 0; ++ else ++ return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0); ++} ++ ++ ++double Simulator::get_double_from_register_pair(int reg) { ++ // TODO(plind): bad ABI stuff, refactor or remove. ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0)); ++ ++ double dm_val = 0.0; ++ // Read the bits from the unsigned integer register_[] array ++ // into the double precision floating point value and return it. ++ char buffer[sizeof(registers_[0])]; ++ memcpy(buffer, ®isters_[reg], sizeof(registers_[0])); ++ memcpy(&dm_val, buffer, sizeof(registers_[0])); ++ return(dm_val); ++} ++ ++ ++int64_t Simulator::get_fpu_register(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return FPUregisters_[fpureg * 2]; ++} ++ ++ ++int32_t Simulator::get_fpu_register_word(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return static_cast(FPUregisters_[fpureg * 2] & 0xFFFFFFFF); ++} ++ ++ ++int32_t Simulator::get_fpu_register_signed_word(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return static_cast(FPUregisters_[fpureg * 2] & 0xFFFFFFFF); ++} ++ ++ ++int32_t Simulator::get_fpu_register_hi_word(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return static_cast((FPUregisters_[fpureg * 2] >> 32) & 0xFFFFFFFF); ++} ++ ++ ++float Simulator::get_fpu_register_float(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return *bit_cast(const_cast(&FPUregisters_[fpureg * 2])); ++} ++ ++ ++double Simulator::get_fpu_register_double(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return *bit_cast(&FPUregisters_[fpureg * 2]); ++} ++ ++template ++void Simulator::get_msa_register(int wreg, T* value) { ++ DCHECK((wreg >= 0) && (wreg < kNumMSARegisters)); ++ memcpy(value, FPUregisters_ + wreg * 2, kSimd128Size); ++} ++ ++template ++void Simulator::set_msa_register(int wreg, const T* value) { ++ DCHECK((wreg >= 0) && (wreg < kNumMSARegisters)); ++ memcpy(FPUregisters_ + wreg * 2, value, kSimd128Size); ++} ++ ++// Runtime FP routines take up to two double arguments and zero ++// or one integer arguments. All are constructed here, ++// from a0-a3 or f12 and f13 (n64), or f14 (O32). ++void Simulator::GetFpArgs(double* x, double* y, int32_t* z) { ++ if (!IsSw64SoftFloatABI) { ++ *x = get_fpu_register_double(f16); ++ *y = get_fpu_register_double(f17); ++ *z = static_cast(get_register(a1)); ++ } else { ++ // TODO(plind): bad ABI stuff, refactor or remove. ++ // We use a char buffer to get around the strict-aliasing rules which ++ // otherwise allow the compiler to optimize away the copy. ++ char buffer[sizeof(*x)]; ++ int32_t* reg_buffer = reinterpret_cast(buffer); ++ ++ // Registers a0 and a1 -> x. ++ reg_buffer[0] = get_register(a0); ++ reg_buffer[1] = get_register(a1); ++ memcpy(x, buffer, sizeof(buffer)); ++ // Registers a2 and a3 -> y. ++ reg_buffer[0] = get_register(a2); ++ reg_buffer[1] = get_register(a3); ++ memcpy(y, buffer, sizeof(buffer)); ++ // Register 2 -> z. ++ reg_buffer[0] = get_register(a2); ++ memcpy(z, buffer, sizeof(*z)); ++ } ++} ++ ++ ++// The return value is either in v0/v1 or f0. ++void Simulator::SetFpResult(const double& result) { ++ set_fpu_register_double(f0, result); ++} ++ ++ ++// Helper functions for setting and testing the FCSR register's bits. ++void Simulator::set_fcsr_bit(uint32_t cc, bool value) { ++ if (value) { ++ FCSR_ |= (1LL << cc); ++ } else { ++ FCSR_ &= ~(1LL << cc); ++ } ++} ++ ++bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); } ++ ++void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) { ++ FCSR_ |= mode & kFPURoundingModeMask; ++} ++ ++void Simulator::set_msacsr_rounding_mode(FPURoundingMode mode) { ++ MSACSR_ |= mode & kFPURoundingModeMask; ++} ++ ++unsigned int Simulator::get_fcsr_rounding_mode() { ++ return FCSR_ & kFPURoundingModeMask; ++} ++ ++unsigned int Simulator::get_msacsr_rounding_mode() { ++ return MSACSR_ & kFPURoundingModeMask; ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round_error(double original, double rounded) { ++ bool ret = false; ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded > max_int32 || rounded < min_int32) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round64_error(double original, double rounded) { ++ bool ret = false; ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded >= max_int64 || rounded < min_int64) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round_error(float original, float rounded) { ++ bool ret = false; ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded > max_int32 || rounded < min_int32) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++void Simulator::set_fpu_register_word_invalid_result(float original, ++ float rounded) { ++ if (FCSR_ & kFCSRNaN2008FlagMask) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register_word(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++ } else { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResult); ++ } ++} ++ ++ ++void Simulator::set_fpu_register_invalid_result(float original, float rounded) { ++ if (FCSR_ & kFCSRNaN2008FlagMask) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++ } else { ++ set_fpu_register(fd_reg(), kFPUInvalidResult); ++ } ++} ++ ++ ++void Simulator::set_fpu_register_invalid_result64(float original, ++ float rounded) { ++ if (FCSR_ & kFCSRNaN2008FlagMask) { ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded >= max_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResult); ++ } else if (rounded < min_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++ } else { ++ set_fpu_register(fd_reg(), kFPU64InvalidResult); ++ } ++} ++ ++ ++void Simulator::set_fpu_register_word_invalid_result(double original, ++ double rounded) { ++ if (FCSR_ & kFCSRNaN2008FlagMask) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register_word(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++ } else { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResult); ++ } ++} ++ ++ ++void Simulator::set_fpu_register_invalid_result(double original, ++ double rounded) { ++ if (FCSR_ & kFCSRNaN2008FlagMask) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++ } else { ++ set_fpu_register(fd_reg(), kFPUInvalidResult); ++ } ++} ++ ++ ++void Simulator::set_fpu_register_invalid_result64(double original, ++ double rounded) { ++ if (FCSR_ & kFCSRNaN2008FlagMask) { ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded >= max_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResult); ++ } else if (rounded < min_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++ } else { ++ set_fpu_register(fd_reg(), kFPU64InvalidResult); ++ } ++} ++ ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round64_error(float original, float rounded) { ++ bool ret = false; ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded >= max_int64 || rounded < min_int64) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++ ++// For cvt instructions only ++void Simulator::round_according_to_fcsr(double toRound, double* rounded, ++ int32_t* rounded_int, double fs) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. Behave like round_w_d. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or ++ // equal to the infinitely accurate result. Behave like trunc_w_d. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. Behave like ceil_w_d. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. Behave like floor_w_d. ++ switch (FCSR_ & 3) { ++ case kRoundToNearest: ++ *rounded = std::floor(fs + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = trunc(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++void Simulator::round64_according_to_fcsr(double toRound, double* rounded, ++ int64_t* rounded_int, double fs) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. Behave like round_w_d. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or. ++ // equal to the infinitely accurate result. Behave like trunc_w_d. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. Behave like ceil_w_d. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. Behave like floor_w_d. ++ switch (FCSR_ & 3) { ++ case kRoundToNearest: ++ *rounded = std::floor(fs + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = trunc(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++ ++// for cvt instructions only ++void Simulator::round_according_to_fcsr(float toRound, float* rounded, ++ int32_t* rounded_int, float fs) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. Behave like round_w_d. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or ++ // equal to the infinitely accurate result. Behave like trunc_w_d. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. Behave like ceil_w_d. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. Behave like floor_w_d. ++ switch (FCSR_ & 3) { ++ case kRoundToNearest: ++ *rounded = std::floor(fs + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.f; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = trunc(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++void Simulator::round64_according_to_fcsr(float toRound, float* rounded, ++ int64_t* rounded_int, float fs) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. Behave like round_w_d. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or. ++ // equal to the infinitely accurate result. Behave like trunc_w_d. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. Behave like ceil_w_d. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. Behave like floor_w_d. ++ switch (FCSR_ & 3) { ++ case kRoundToNearest: ++ *rounded = std::floor(fs + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.f; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = trunc(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(fs); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++template ++void Simulator::round_according_to_msacsr(T_fp toRound, T_fp* rounded, ++ T_int* rounded_int) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. Behave like round_w_d. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or ++ // equal to the infinitely accurate result. Behave like trunc_w_d. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. Behave like ceil_w_d. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. Behave like floor_w_d. ++ switch (get_msacsr_rounding_mode()) { ++ case kRoundToNearest: ++ *rounded = std::floor(toRound + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = trunc(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++// Raw access to the PC register. ++void Simulator::set_pc(int64_t value) { ++ pc_modified_ = true; ++ registers_[pc] = value; ++} ++ ++ ++bool Simulator::has_bad_pc() const { ++ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc)); ++} ++ ++ ++// Raw access to the PC register without the special adjustment when reading. ++int64_t Simulator::get_pc() const { ++ return registers_[pc]; ++} ++ ++ ++// The SW64 cannot do unaligned reads and writes. On some SW64 platforms an ++// interrupt is caused. On others it does a funky rotation thing. For now we ++// simply disallow unaligned reads, but at some point we may want to move to ++// emulating the rotate behaviour. Note that simulator runs have the runtime ++// system running directly on the host system and only generated code is ++// executed in the simulator. Since the host is typically IA32 we will not ++// get the correct SW64-like behaviour on unaligned accesses. ++ ++// TODO(plind): refactor this messy debug code when we do unaligned access. ++void Simulator::DieOrDebug() { ++ if ((1)) { // Flag for this was removed. ++ Sw64Debugger dbg(this); ++ dbg.Debug(); ++ } else { ++ base::OS::Abort(); ++ } ++} ++ ++void Simulator::TraceRegWr(int64_t value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ union { ++ int64_t fmt_int64; ++ int32_t fmt_int32[2]; ++ float fmt_float[2]; ++ double fmt_double; ++ } v; ++ v.fmt_int64 = value; ++ ++ switch (t) { ++ case WORD: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32 ++ " uint32:%" PRIu32, ++ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64 ++ " uint64:%" PRIu64, ++ value, icount_, value, value); ++ break; ++ case FLOAT: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e", ++ v.fmt_int64, icount_, v.fmt_float[0]); ++ break; ++ case DOUBLE: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e", ++ v.fmt_int64, icount_, v.fmt_double); ++ break; ++ case FLOAT_DOUBLE: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e dbl:%e", ++ v.fmt_int64, icount_, v.fmt_float[0], v.fmt_double); ++ break; ++ case WORD_DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32 ++ " uint32:%" PRIu32 " int64:%" PRId64 " uint64:%" PRIu64, ++ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0], ++ v.fmt_int64, v.fmt_int64); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++template ++void Simulator::TraceMSARegWr(T* value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ union { ++ uint8_t b[16]; ++ uint16_t h[8]; ++ uint32_t w[4]; ++ uint64_t d[2]; ++ float f[4]; ++ double df[2]; ++ } v; ++ memcpy(v.b, value, kSimd128Size); ++ switch (t) { ++ case BYTE: ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")", ++ v.d[0], v.d[1], icount_); ++ break; ++ case HALF: ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")", ++ v.d[0], v.d[1], icount_); ++ break; ++ case WORD: ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ++ ") int32[0..3]:%" PRId32 " %" PRId32 " %" PRId32 ++ " %" PRId32, ++ v.d[0], v.d[1], icount_, v.w[0], v.w[1], v.w[2], v.w[3]); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")", ++ v.d[0], v.d[1], icount_); ++ break; ++ case FLOAT: ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ++ ") flt[0..3]:%e %e %e %e", ++ v.d[0], v.d[1], icount_, v.f[0], v.f[1], v.f[2], v.f[3]); ++ break; ++ case DOUBLE: ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ++ ") dbl[0..1]:%e %e", ++ v.d[0], v.d[1], icount_, v.df[0], v.df[1]); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++template ++void Simulator::TraceMSARegWr(T* value) { ++ if (::v8::internal::FLAG_trace_sim) { ++ union { ++ uint8_t b[kMSALanesByte]; ++ uint16_t h[kMSALanesHalf]; ++ uint32_t w[kMSALanesWord]; ++ uint64_t d[kMSALanesDword]; ++ float f[kMSALanesWord]; ++ double df[kMSALanesDword]; ++ } v; ++ memcpy(v.b, value, kMSALanesByte); ++ ++ if (std::is_same::value) { ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ++ ") int32[0..3]:%" PRId32 " %" PRId32 " %" PRId32 ++ " %" PRId32, ++ v.d[0], v.d[1], icount_, v.w[0], v.w[1], v.w[2], v.w[3]); ++ } else if (std::is_same::value) { ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ++ ") flt[0..3]:%e %e %e %e", ++ v.d[0], v.d[1], icount_, v.f[0], v.f[1], v.f[2], v.f[3]); ++ } else if (std::is_same::value) { ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ++ ") dbl[0..1]:%e %e", ++ v.d[0], v.d[1], icount_, v.df[0], v.df[1]); ++ } else { ++ SNPrintF(trace_buf_, ++ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")", ++ v.d[0], v.d[1], icount_); ++ } ++ } ++} ++ ++// TODO(plind): consider making icount_ printing a flag option. ++void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ union { ++ int64_t fmt_int64; ++ int32_t fmt_int32[2]; ++ float fmt_float[2]; ++ double fmt_double; ++ } v; ++ v.fmt_int64 = value; ++ ++ switch (t) { ++ case WORD: ++ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") int32:%" PRId32 " uint32:%" PRIu32, ++ v.fmt_int64, addr, icount_, v.fmt_int32[0], v.fmt_int32[0]); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") int64:%" PRId64 " uint64:%" PRIu64, ++ value, addr, icount_, value, value); ++ break; ++ case FLOAT: ++ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") flt:%e", ++ v.fmt_int64, addr, icount_, v.fmt_float[0]); ++ break; ++ case DOUBLE: ++ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") dbl:%e", ++ v.fmt_int64, addr, icount_, v.fmt_double); ++ break; ++ case FLOAT_DOUBLE: ++ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") flt:%e dbl:%e", ++ v.fmt_int64, addr, icount_, v.fmt_float[0], v.fmt_double); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++ ++void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (t) { ++ case BYTE: ++ SNPrintF(trace_buf_, " %02" PRIx8 " --> [%016" PRIx64 ++ "] (%" PRId64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case HALF: ++ SNPrintF(trace_buf_, " %04" PRIx16 " --> [%016" PRIx64 ++ "] (%" PRId64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case WORD: ++ SNPrintF(trace_buf_, ++ " %08" PRIx32 " --> [%016" PRIx64 "] (%" PRId64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " --> [%016" PRIx64 "] (%" PRId64 " )", ++ value, addr, icount_); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++template ++void Simulator::TraceMemRd(int64_t addr, T value) { ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (sizeof(T)) { ++ case 1: ++ SNPrintF(trace_buf_, ++ "%08" PRIx8 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int8:%" PRId8 " uint8:%" PRIu8, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ case 2: ++ SNPrintF(trace_buf_, ++ "%08" PRIx16 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int16:%" PRId16 " uint16:%" PRIu16, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ case 4: ++ SNPrintF(trace_buf_, ++ "%08" PRIx32 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int32:%" PRId32 " uint32:%" PRIu32, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ case 8: ++ SNPrintF(trace_buf_, ++ "%08" PRIx64 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int64:%" PRId64 " uint64:%" PRIu64, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++template ++void Simulator::TraceMemWr(int64_t addr, T value) { ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (sizeof(T)) { ++ case 1: ++ SNPrintF(trace_buf_, ++ " %02" PRIx8 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case 2: ++ SNPrintF(trace_buf_, ++ " %04" PRIx16 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case 4: ++ SNPrintF(trace_buf_, ++ "%08" PRIx32 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case 8: ++ SNPrintF(trace_buf_, ++ "%16" PRIx64 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++// TODO(plind): sign-extend and zero-extend not implmented properly ++// on all the ReadXX functions, I don't think re-interpret cast does it. ++int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) { ++ if (addr >=0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ if ((addr & 0x3) == 0 || kArchVariant == kSw64r3) { ++ int32_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr), t); ++ return *ptr; ++ } ++ PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ reinterpret_cast(instr)); ++ DieOrDebug(); ++ return 0; ++} ++ ++ ++uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) { ++ if (addr >=0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ if ((addr & 0x3) == 0 || kArchVariant == kSw64r3) { ++ uint32_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr), WORD); ++ return *ptr; ++ } ++ PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ reinterpret_cast(instr)); ++ DieOrDebug(); ++ return 0; ++} ++ ++ ++void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ if ((addr & 0x3) == 0 || kArchVariant == kSw64r3) { ++ TraceMemWr(addr, value, WORD); ++ int* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ reinterpret_cast(instr)); ++ DieOrDebug(); ++} ++ ++ ++int64_t Simulator::Read2W(int64_t addr, Instruction* instr) { ++ if (addr >=0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kSw64r3) { ++ int64_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, *ptr); ++ return *ptr; ++ } ++ PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ reinterpret_cast(instr)); ++ DieOrDebug(); ++ return 0; ++} ++ ++ ++void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kSw64r3) { ++ TraceMemWr(addr, value, DWORD); ++ int64_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ reinterpret_cast(instr)); ++ DieOrDebug(); ++} ++ ++ ++double Simulator::ReadD(int64_t addr, Instruction* instr) { ++ if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kSw64r3) { ++ double* ptr = reinterpret_cast(addr); ++ return *ptr; ++ } ++ PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", ++ addr, reinterpret_cast(instr)); ++ base::OS::Abort(); ++ return 0; ++} ++ ++ ++void Simulator::WriteD(int64_t addr, double value, Instruction* instr) { ++ if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kSw64r3) { ++ double* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ PrintF("Unaligned (double) write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR ++ "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++} ++ ++ ++uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) { ++ if ((addr & 1) == 0 || kArchVariant == kSw64r3) { ++ uint16_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr; ++ } ++ PrintF("Unaligned unsigned halfword read at 0x%08" PRIx64 ++ " , pc=0x%08" V8PRIxPTR "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ return 0; ++} ++ ++ ++int16_t Simulator::ReadH(int64_t addr, Instruction* instr) { ++ if ((addr & 1) == 0 || kArchVariant == kSw64r3) { ++ int16_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr; ++ } ++ PrintF("Unaligned signed halfword read at 0x%08" PRIx64 ++ " , pc=0x%08" V8PRIxPTR "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ return 0; ++} ++ ++ ++void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) { ++ if ((addr & 1) == 0 || kArchVariant == kSw64r3) { ++ TraceMemWr(addr, value, HALF); ++ uint16_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ PrintF("Unaligned unsigned halfword write at 0x%08" PRIx64 ++ " , pc=0x%08" V8PRIxPTR "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++} ++ ++ ++void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) { ++ if ((addr & 1) == 0 || kArchVariant == kSw64r3) { ++ TraceMemWr(addr, value, HALF); ++ int16_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ PrintF("Unaligned halfword write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR ++ "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++} ++ ++ ++uint32_t Simulator::ReadBU(int64_t addr) { ++ uint8_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr & 0xFF; ++} ++ ++ ++int32_t Simulator::ReadB(int64_t addr) { ++ int8_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr; ++} ++ ++ ++void Simulator::WriteB(int64_t addr, uint8_t value) { ++ TraceMemWr(addr, value, BYTE); ++ uint8_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++} ++ ++ ++void Simulator::WriteB(int64_t addr, int8_t value) { ++ TraceMemWr(addr, value, BYTE); ++ int8_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++} ++ ++template ++T Simulator::ReadMem(int64_t addr, Instruction* instr) { ++ int alignment_mask = (1 << sizeof(T)) - 1; ++ if ((addr & alignment_mask) == 0 || kArchVariant == kSw64r3) { ++ T* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, *ptr); ++ return *ptr; ++ } ++ PrintF("Unaligned read of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR ++ "\n", ++ sizeof(T), addr, reinterpret_cast(instr)); ++ base::OS::Abort(); ++ return 0; ++} ++ ++template ++void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) { ++ int alignment_mask = (1 << sizeof(T)) - 1; ++ if ((addr & alignment_mask) == 0 || kArchVariant == kSw64r3) { ++ T* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ TraceMemWr(addr, value); ++ return; ++ } ++ PrintF("Unaligned write of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR ++ "\n", ++ sizeof(T), addr, reinterpret_cast(instr)); ++ base::OS::Abort(); ++} ++ ++// Returns the limit of the stack area to enable checking for stack overflows. ++uintptr_t Simulator::StackLimit(uintptr_t c_limit) const { ++ // The simulator uses a separate JS stack. If we have exhausted the C stack, ++ // we also drop down the JS limit to reflect the exhaustion on the JS stack. ++ if (GetCurrentStackPosition() < c_limit) { ++ return reinterpret_cast(get_sp()); ++ } ++ ++ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes ++ // to prevent overrunning the stack when pushing values. ++ return reinterpret_cast(stack_) + 1024; ++} ++ ++ ++// Unsupported instructions use Format to print an error and stop execution. ++void Simulator::Format(Instruction* instr, const char* format) { ++ PrintF("Simulator found unsupported instruction: 0x%08" PRIxPTR " : %s\n", ++ reinterpret_cast(instr), format); ++ UNIMPLEMENTED_SW64(); ++} ++ ++ ++// Calls into the V8 runtime are based on this very simple interface. ++// Note: To be able to return two values from some calls the code in runtime.cc ++// uses the ObjectPair which is essentially two 32-bit values stuffed into a ++// 64-bit value. With the code below we assume that all runtime calls return ++// 64 bits of result. If they don't, the v1 result register contains a bogus ++// value, which is fine because it is caller-saved. ++ ++using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1, ++ int64_t arg2, int64_t arg3, ++ int64_t arg4, int64_t arg5, ++ int64_t arg6, int64_t arg7, ++ int64_t arg8, int64_t arg9); ++ ++// These prototypes handle the four types of FP calls. ++using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1); ++using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1); ++using SimulatorRuntimeFPCall = double (*)(double darg0); ++using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0); ++ ++// This signature supports direct call in to API function native callback ++// (refer to InvocationCallback in v8.h). ++using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0); ++using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1); ++ ++// This signature supports direct call to accessor getter callback. ++using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1); ++using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1, ++ void* arg2); ++ ++// Software interrupt instructions are used by the simulator to call into the ++// C-based V8 runtime. They are also used for debugging with simulator. ++void Simulator::SoftwareInterrupt() { ++ //int32_t func = instr_.FunctionFieldRaw(); ++ //uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1; ++ // We first check if we met a REDIRECT. ++ uint32_t code = instr_.SwFunctionFieldValue(0, 0); ++ if (code == REDIRECT) { ++ Redirection* redirection = Redirection::FromInstruction(instr_.instr()); ++ ++ int64_t* stack_pointer = reinterpret_cast(get_register(sp)); ++ ++ int64_t arg0 = get_register(a0); ++ int64_t arg1 = get_register(a1); ++ int64_t arg2 = get_register(a2); ++ int64_t arg3 = get_register(a3); ++ int64_t arg4 = get_register(a4); ++ int64_t arg5 = get_register(a5); ++ int64_t arg6 = stack_pointer[0]; ++ int64_t arg7 = stack_pointer[1]; ++ int64_t arg8 = stack_pointer[2]; ++ int64_t arg9 = stack_pointer[1]; ++ STATIC_ASSERT(kMaxCParameters == 10); ++ ++ bool fp_call = ++ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) || ++ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) || ++ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) || ++ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL); ++ ++ if (!IsSw64SoftFloatABI) { ++ // With the hard floating point calling convention, double ++ // arguments are passed in FPU registers. Fetch the arguments ++ // from there and call the builtin using soft floating point ++ // convention. ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_FP_FP_CALL: ++ case ExternalReference::BUILTIN_COMPARE_CALL: ++ arg0 = get_fpu_register(f12); ++ arg1 = get_fpu_register(f13); ++ arg2 = get_fpu_register(f14); ++ arg3 = get_fpu_register(f15); ++ break; ++ case ExternalReference::BUILTIN_FP_CALL: ++ arg0 = get_fpu_register(f12); ++ arg1 = get_fpu_register(f13); ++ break; ++ case ExternalReference::BUILTIN_FP_INT_CALL: ++ arg0 = get_fpu_register(f12); ++ arg1 = get_fpu_register(f13); ++ arg2 = get_register(a2); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ // This is dodgy but it works because the C entry stubs are never moved. ++ // See comment in codegen-arm.cc and bug 1242173. ++ int64_t saved_ra = get_register(ra); ++ ++ intptr_t external = ++ reinterpret_cast(redirection->external_function()); ++ ++ // Based on CpuFeatures::IsSupported(FPU), Sw64 will use either hardware ++ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this ++ // simulator. Soft-float has additional abstraction of ExternalReference, ++ // to support serialization. ++ if (fp_call) { ++ double dval0, dval1; // one or two double parameters ++ int32_t ival; // zero or one integer parameters ++ int64_t iresult = 0; // integer return value ++ double dresult = 0; // double return value ++ GetFpArgs(&dval0, &dval1, &ival); ++ SimulatorRuntimeCall generic_target = ++ reinterpret_cast(external); ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_FP_FP_CALL: ++ case ExternalReference::BUILTIN_COMPARE_CALL: ++ PrintF("Call to host function at %p with args %f, %f", ++ reinterpret_cast(FUNCTION_ADDR(generic_target)), ++ dval0, dval1); ++ break; ++ case ExternalReference::BUILTIN_FP_CALL: ++ PrintF("Call to host function at %p with arg %f", ++ reinterpret_cast(FUNCTION_ADDR(generic_target)), ++ dval0); ++ break; ++ case ExternalReference::BUILTIN_FP_INT_CALL: ++ PrintF("Call to host function at %p with args %f, %d", ++ reinterpret_cast(FUNCTION_ADDR(generic_target)), ++ dval0, ival); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_COMPARE_CALL: { ++ SimulatorRuntimeCompareCall target = ++ reinterpret_cast(external); ++ iresult = target(dval0, dval1); ++ set_register(v0, static_cast(iresult)); ++ // set_register(v1, static_cast(iresult >> 32)); ++ break; ++ } ++ case ExternalReference::BUILTIN_FP_FP_CALL: { ++ SimulatorRuntimeFPFPCall target = ++ reinterpret_cast(external); ++ dresult = target(dval0, dval1); ++ SetFpResult(dresult); ++ break; ++ } ++ case ExternalReference::BUILTIN_FP_CALL: { ++ SimulatorRuntimeFPCall target = ++ reinterpret_cast(external); ++ dresult = target(dval0); ++ SetFpResult(dresult); ++ break; ++ } ++ case ExternalReference::BUILTIN_FP_INT_CALL: { ++ SimulatorRuntimeFPIntCall target = ++ reinterpret_cast(external); ++ dresult = target(dval0, ival); ++ SetFpResult(dresult); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_COMPARE_CALL: ++ PrintF("Returned %08x\n", static_cast(iresult)); ++ break; ++ case ExternalReference::BUILTIN_FP_FP_CALL: ++ case ExternalReference::BUILTIN_FP_CALL: ++ case ExternalReference::BUILTIN_FP_INT_CALL: ++ PrintF("Returned %f\n", dresult); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } ++ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " \n", ++ reinterpret_cast(external), arg0); ++ } ++ SimulatorRuntimeDirectApiCall target = ++ reinterpret_cast(external); ++ target(arg0); ++ } else if ( ++ redirection->type() == ExternalReference::PROFILING_API_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 ++ " \n", ++ reinterpret_cast(external), arg0, arg1); ++ } ++ SimulatorRuntimeProfilingApiCall target = ++ reinterpret_cast(external); ++ target(arg0, Redirection::ReverseRedirection(arg1)); ++ } else if ( ++ redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 ++ " \n", ++ reinterpret_cast(external), arg0, arg1); ++ } ++ SimulatorRuntimeDirectGetterCall target = ++ reinterpret_cast(external); ++ target(arg0, arg1); ++ } else if ( ++ redirection->type() == ExternalReference::PROFILING_GETTER_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 ++ " %08" PRIx64 " \n", ++ reinterpret_cast(external), arg0, arg1, arg2); ++ } ++ SimulatorRuntimeProfilingGetterCall target = ++ reinterpret_cast(external); ++ target(arg0, arg1, Redirection::ReverseRedirection(arg2)); ++ } else { ++ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL || ++ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR); ++ SimulatorRuntimeCall target = ++ reinterpret_cast(external); ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF( ++ "Call to host function at %p " ++ "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 ++ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 ++ " , %08" PRIx64 " , %08" PRIx64 " \n", ++ reinterpret_cast(FUNCTION_ADDR(target)), arg0, arg1, arg2, ++ arg3, arg4, arg5, arg6, arg7, arg8, arg9); ++ } ++ ObjectPair result = ++ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); ++ set_register(v0, (int64_t)(result.x)); ++ set_register(a5, (int64_t)(result.y)); ++ } ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(t4), ++ get_register(v0)); ++ } ++ set_register(ra, saved_ra); ++ set_pc(get_register(ra)); ++ ++ //} else if (func == BREAK && code <= kMaxStopCode) { ++ // if (IsWatchpoint(code)) { ++ // PrintWatchpoint(code); ++ // } else { ++ // IncreaseStopCounter(code); ++ // HandleStop(code, instr_.instr()); ++ // } ++ } else if (code == BREAK) { ++ // All remaining break_ codes, and all traps are handled here. ++ Sw64Debugger dbg(this); ++ dbg.Debug(); ++ } ++} ++ ++ ++// Stop helper functions. ++bool Simulator::IsWatchpoint(uint64_t code) { ++ return (code <= kMaxWatchpointCode); ++} ++ ++ ++void Simulator::PrintWatchpoint(uint64_t code) { ++ Sw64Debugger dbg(this); ++ ++break_count_; ++ PrintF("\n---- break %" PRId64 " marker: %3d (instr count: %8" PRId64 ++ " ) ----------" ++ "----------------------------------", ++ code, break_count_, icount_); ++ dbg.PrintAllRegs(); // Print registers and continue running. ++} ++ ++ ++void Simulator::HandleStop(uint64_t code, Instruction* instr) { ++ // Stop if it is enabled, otherwise go on jumping over the stop ++ // and the message address. ++ if (IsEnabledStop(code)) { ++ Sw64Debugger dbg(this); ++ dbg.Stop(instr); ++ } ++} ++ ++ ++bool Simulator::IsStopInstruction(Instruction* instr) { ++ int32_t func = instr->FunctionFieldRaw(); ++ uint32_t code = static_cast(instr->Bits(25, 6)); ++ return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode; ++} ++ ++ ++bool Simulator::IsEnabledStop(uint64_t code) { ++ DCHECK_LE(code, kMaxStopCode); ++ DCHECK_GT(code, kMaxWatchpointCode); ++ return !(watched_stops_[code].count & kStopDisabledBit); ++} ++ ++ ++void Simulator::EnableStop(uint64_t code) { ++ if (!IsEnabledStop(code)) { ++ watched_stops_[code].count &= ~kStopDisabledBit; ++ } ++} ++ ++ ++void Simulator::DisableStop(uint64_t code) { ++ if (IsEnabledStop(code)) { ++ watched_stops_[code].count |= kStopDisabledBit; ++ } ++} ++ ++ ++void Simulator::IncreaseStopCounter(uint64_t code) { ++ DCHECK_LE(code, kMaxStopCode); ++ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) { ++ PrintF("Stop counter for code %" PRId64 ++ " has overflowed.\n" ++ "Enabling this code and reseting the counter to 0.\n", ++ code); ++ watched_stops_[code].count = 0; ++ EnableStop(code); ++ } else { ++ watched_stops_[code].count++; ++ } ++} ++ ++ ++// Print a stop status. ++void Simulator::PrintStopInfo(uint64_t code) { ++ if (code <= kMaxWatchpointCode) { ++ PrintF("That is a watchpoint, not a stop.\n"); ++ return; ++ } else if (code > kMaxStopCode) { ++ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1); ++ return; ++ } ++ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled"; ++ int32_t count = watched_stops_[code].count & ~kStopDisabledBit; ++ // Don't print the state of unused breakpoints. ++ if (count != 0) { ++ if (watched_stops_[code].desc) { ++ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n", ++ code, code, state, count, watched_stops_[code].desc); ++ } else { ++ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code, ++ code, state, count); ++ } ++ } ++} ++ ++ ++void Simulator::SignalException(Exception e) { ++ FATAL("Error: Exception %i raised.", static_cast(e)); ++} ++ ++// Min/Max template functions for Double and Single arguments. ++ ++template ++static T FPAbs(T a); ++ ++template <> ++double FPAbs(double a) { ++ return fabs(a); ++} ++ ++template <> ++float FPAbs(float a) { ++ return fabsf(a); ++} ++ ++template ++static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) { ++ if (std::isnan(a) && std::isnan(b)) { ++ *result = a; ++ } else if (std::isnan(a)) { ++ *result = b; ++ } else if (std::isnan(b)) { ++ *result = a; ++ } else if (b == a) { ++ // Handle -0.0 == 0.0 case. ++ // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax ++ // negates the result. ++ *result = std::signbit(b) - static_cast(kind) ? b : a; ++ } else { ++ return false; ++ } ++ return true; ++} ++ ++template ++static T FPUMin(T a, T b) { ++ T result; ++ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { ++ return result; ++ } else { ++ return b < a ? b : a; ++ } ++} ++ ++template ++static T FPUMax(T a, T b) { ++ T result; ++ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) { ++ return result; ++ } else { ++ return b > a ? b : a; ++ } ++} ++ ++template ++static T FPUMinA(T a, T b) { ++ T result; ++ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { ++ if (FPAbs(a) < FPAbs(b)) { ++ result = a; ++ } else if (FPAbs(b) < FPAbs(a)) { ++ result = b; ++ } else { ++ result = a < b ? a : b; ++ } ++ } ++ return result; ++} ++ ++template ++static T FPUMaxA(T a, T b) { ++ T result; ++ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { ++ if (FPAbs(a) > FPAbs(b)) { ++ result = a; ++ } else if (FPAbs(b) > FPAbs(a)) { ++ result = b; ++ } else { ++ result = a > b ? a : b; ++ } ++ } ++ return result; ++} ++ ++enum class KeepSign : bool { no = false, yes }; ++ ++template ::value, ++ int>::type = 0> ++T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) { ++ DCHECK(std::isnan(arg)); ++ T qNaN = std::numeric_limits::quiet_NaN(); ++ if (keepSign == KeepSign::yes) { ++ return std::copysign(qNaN, result); ++ } ++ return qNaN; ++} ++ ++template ++T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) { ++ if (std::isnan(first)) { ++ return FPUCanonalizeNaNArg(result, first, keepSign); ++ } ++ return result; ++} ++ ++template ++T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) { ++ if (std::isnan(first)) { ++ return FPUCanonalizeNaNArg(result, first, keepSign); ++ } ++ return FPUCanonalizeNaNArgs(result, keepSign, args...); ++} ++ ++template ++T FPUCanonalizeOperation(Func f, T first, Args... args) { ++ return FPUCanonalizeOperation(f, KeepSign::no, first, args...); ++} ++ ++template ++T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) { ++ T result = f(first, args...); ++ if (std::isnan(result)) { ++ result = FPUCanonalizeNaNArgs(result, keepSign, first, args...); ++ } ++ return result; ++} ++ ++// Handle execution based on instruction types. ++ ++void Simulator::DecodeTypeRegisterSRsType() { ++ float fs, ft, fd; ++ fs = get_fpu_register_float(fs_reg()); ++ ft = get_fpu_register_float(ft_reg()); ++ fd = get_fpu_register_float(fd_reg()); ++ int32_t ft_int = bit_cast(ft); ++ int32_t fd_int = bit_cast(fd); ++ uint32_t cc, fcsr_cc; ++ cc = instr_.FCccValue(); ++ fcsr_cc = get_fcsr_condition_bit(cc); ++ switch (instr_.FunctionFieldRaw()) { ++ case RINT: { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ float result, temp_result; ++ double temp; ++ float upper = std::ceil(fs); ++ float lower = std::floor(fs); ++ switch (get_fcsr_rounding_mode()) { ++ case kRoundToNearest: ++ if (upper - fs < fs - lower) { ++ result = upper; ++ } else if (upper - fs > fs - lower) { ++ result = lower; ++ } else { ++ temp_result = upper / 2; ++ float reminder = modf(temp_result, &temp); ++ if (reminder == 0) { ++ result = upper; ++ } else { ++ result = lower; ++ } ++ } ++ break; ++ case kRoundToZero: ++ result = (fs > 0 ? lower : upper); ++ break; ++ case kRoundToPlusInf: ++ result = upper; ++ break; ++ case kRoundToMinusInf: ++ result = lower; ++ break; ++ } ++ SetFPUFloatResult(fd_reg(), result); ++ if (result != fs) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ break; ++ } ++ case ADD_S: ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; }, ++ fs, ft)); ++ break; ++ case SUB_S: ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; }, ++ fs, ft)); ++ break; ++ case MADDF_S: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUFloatResult(fd_reg(), std::fma(fs, ft, fd)); ++ break; ++ case MSUBF_S: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUFloatResult(fd_reg(), std::fma(-fs, ft, fd)); ++ break; ++ case MUL_S: ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; }, ++ fs, ft)); ++ break; ++ case DIV_S: ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; }, ++ fs, ft)); ++ break; ++ case ABS_S: ++ SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation( ++ [](float fs) { return FPAbs(fs); }, fs)); ++ break; ++ case MOV_S: ++ SetFPUFloatResult(fd_reg(), fs); ++ break; ++ case NEG_S: ++ SetFPUFloatResult(fd_reg(), ++ FPUCanonalizeOperation([](float src) { return -src; }, ++ KeepSign::yes, fs)); ++ break; ++ case SQRT_S: ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs)); ++ break; ++ case RSQRT_S: ++ SetFPUFloatResult( ++ fd_reg(), FPUCanonalizeOperation( ++ [](float src) { return 1.0 / std::sqrt(src); }, fs)); ++ break; ++ case RECIP_S: ++ SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation( ++ [](float src) { return 1.0 / src; }, fs)); ++ break; ++ case C_F_D: ++ set_fcsr_bit(fcsr_cc, false); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_UN_D: ++ set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft)); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_EQ_D: ++ set_fcsr_bit(fcsr_cc, (fs == ft)); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_UEQ_D: ++ set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft))); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_OLT_D: ++ set_fcsr_bit(fcsr_cc, (fs < ft)); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_ULT_D: ++ set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft))); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_OLE_D: ++ set_fcsr_bit(fcsr_cc, (fs <= ft)); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_ULE_D: ++ set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft))); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case CVT_D_S: ++ SetFPUDoubleResult(fd_reg(), static_cast(fs)); ++ break; ++ case CLASS_S: { ++ // Convert float input to uint32_t for easier bit manipulation ++ uint32_t classed = bit_cast(fs); ++ ++ // Extracting sign, exponent and mantissa from the input float ++ uint32_t sign = (classed >> 31) & 1; ++ uint32_t exponent = (classed >> 23) & 0x000000FF; ++ uint32_t mantissa = classed & 0x007FFFFF; ++ uint32_t result; ++ float fResult; ++ ++ // Setting flags if input float is negative infinity, ++ // positive infinity, negative zero or positive zero ++ bool negInf = (classed == 0xFF800000); ++ bool posInf = (classed == 0x7F800000); ++ bool negZero = (classed == 0x80000000); ++ bool posZero = (classed == 0x00000000); ++ ++ bool signalingNan; ++ bool quietNan; ++ bool negSubnorm; ++ bool posSubnorm; ++ bool negNorm; ++ bool posNorm; ++ ++ // Setting flags if float is NaN ++ signalingNan = false; ++ quietNan = false; ++ if (!negInf && !posInf && (exponent == 0xFF)) { ++ quietNan = ((mantissa & 0x00200000) == 0) && ++ ((mantissa & (0x00200000 - 1)) == 0); ++ signalingNan = !quietNan; ++ } ++ ++ // Setting flags if float is subnormal number ++ posSubnorm = false; ++ negSubnorm = false; ++ if ((exponent == 0) && (mantissa != 0)) { ++ DCHECK(sign == 0 || sign == 1); ++ posSubnorm = (sign == 0); ++ negSubnorm = (sign == 1); ++ } ++ ++ // Setting flags if float is normal number ++ posNorm = false; ++ negNorm = false; ++ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan && ++ !quietNan && !negZero && !posZero) { ++ DCHECK(sign == 0 || sign == 1); ++ posNorm = (sign == 0); ++ negNorm = (sign == 1); ++ } ++ ++ // Calculating result according to description of CLASS.S instruction ++ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) | ++ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) | ++ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan; ++ ++ DCHECK_NE(result, 0); ++ ++ fResult = bit_cast(result); ++ SetFPUFloatResult(fd_reg(), fResult); ++ break; ++ } ++ case CVT_L_S: { ++ float rounded; ++ int64_t result; ++ round64_according_to_fcsr(fs, &rounded, &result, fs); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case CVT_W_S: { ++ float rounded; ++ int32_t result; ++ round_according_to_fcsr(fs, &rounded, &result, fs); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_word_invalid_result(fs, rounded); ++ } ++ break; ++ } ++ case TRUNC_W_S: { // Truncate single to word (round towards 0). ++ float rounded = trunc(fs); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_word_invalid_result(fs, rounded); ++ } ++ } break; ++ case TRUNC_L_S: { ++ float rounded = trunc(fs); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case ROUND_W_S: { ++ float rounded = std::floor(fs + 0.5); ++ int32_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fs == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_word_invalid_result(fs, rounded); ++ } ++ break; ++ } ++ case ROUND_L_S: { ++ float rounded = std::floor(fs + 0.5); ++ int64_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fs == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ int64_t i64 = static_cast(result); ++ SetFPUResult(fd_reg(), i64); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case FLOOR_L_S: { ++ float rounded = floor(fs); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case FLOOR_W_S: // Round double to word towards negative infinity. ++ { ++ float rounded = std::floor(fs); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_word_invalid_result(fs, rounded); ++ } ++ } break; ++ case CEIL_W_S: // Round double to word towards positive infinity. ++ { ++ float rounded = std::ceil(fs); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_invalid_result(fs, rounded); ++ } ++ } break; ++ case CEIL_L_S: { ++ float rounded = ceil(fs); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case MINA: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUFloatResult(fd_reg(), FPUMinA(ft, fs)); ++ break; ++ case MAXA: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUFloatResult(fd_reg(), FPUMaxA(ft, fs)); ++ break; ++ case MIN: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUFloatResult(fd_reg(), FPUMin(ft, fs)); ++ break; ++ case MAX: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUFloatResult(fd_reg(), FPUMax(ft, fs)); ++ break; ++ case SEL: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUFloatResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft); ++ break; ++ case SELEQZ_C: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUFloatResult( ++ fd_reg(), ++ (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg()) : 0.0); ++ break; ++ case SELNEZ_C: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUFloatResult( ++ fd_reg(), ++ (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg()) : 0.0); ++ break; ++ case MOVZ_C: { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ if (rt() == 0) { ++ SetFPUFloatResult(fd_reg(), fs); ++ } ++ break; ++ } ++ case MOVN_C: { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ if (rt() != 0) { ++ SetFPUFloatResult(fd_reg(), fs); ++ } ++ break; ++ } ++ case MOVF: { ++ // Same function field for MOVT.D and MOVF.D ++ uint32_t ft_cc = (ft_reg() >> 2) & 0x7; ++ ft_cc = get_fcsr_condition_bit(ft_cc); ++ ++ if (instr_.Bit(16)) { // Read Tf bit. ++ // MOVT.D ++ if (test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs); ++ } else { ++ // MOVF.D ++ if (!test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs); ++ } ++ break; ++ } ++ default: ++ // TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S ++ // CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented. ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Simulator::DecodeTypeRegisterDRsType() { ++ double ft, fs, fd; ++ uint32_t cc, fcsr_cc; ++ fs = get_fpu_register_double(fs_reg()); ++ ft = (instr_.FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg()) ++ : 0.0; ++ fd = get_fpu_register_double(fd_reg()); ++ cc = instr_.FCccValue(); ++ fcsr_cc = get_fcsr_condition_bit(cc); ++ int64_t ft_int = bit_cast(ft); ++ int64_t fd_int = bit_cast(fd); ++ switch (instr_.FunctionFieldRaw()) { ++ case RINT: { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ double result, temp, temp_result; ++ double upper = std::ceil(fs); ++ double lower = std::floor(fs); ++ switch (get_fcsr_rounding_mode()) { ++ case kRoundToNearest: ++ if (upper - fs < fs - lower) { ++ result = upper; ++ } else if (upper - fs > fs - lower) { ++ result = lower; ++ } else { ++ temp_result = upper / 2; ++ double reminder = modf(temp_result, &temp); ++ if (reminder == 0) { ++ result = upper; ++ } else { ++ result = lower; ++ } ++ } ++ break; ++ case kRoundToZero: ++ result = (fs > 0 ? lower : upper); ++ break; ++ case kRoundToPlusInf: ++ result = upper; ++ break; ++ case kRoundToMinusInf: ++ result = lower; ++ break; ++ } ++ SetFPUDoubleResult(fd_reg(), result); ++ if (result != fs) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ break; ++ } ++ case SEL: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUDoubleResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft); ++ break; ++ case SELEQZ_C: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0); ++ break; ++ case SELNEZ_C: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0); ++ break; ++ case MOVZ_C: { ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ if (rt() == 0) { ++ SetFPUDoubleResult(fd_reg(), fs); ++ } ++ break; ++ } ++ case MOVN_C: { ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ if (rt() != 0) { ++ SetFPUDoubleResult(fd_reg(), fs); ++ } ++ break; ++ } ++ case MOVF: { ++ // Same function field for MOVT.D and MOVF.D ++ uint32_t ft_cc = (ft_reg() >> 2) & 0x7; ++ ft_cc = get_fcsr_condition_bit(ft_cc); ++ if (instr_.Bit(16)) { // Read Tf bit. ++ // MOVT.D ++ if (test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs); ++ } else { ++ // MOVF.D ++ if (!test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs); ++ } ++ break; ++ } ++ case MINA: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUDoubleResult(fd_reg(), FPUMinA(ft, fs)); ++ break; ++ case MAXA: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUDoubleResult(fd_reg(), FPUMaxA(ft, fs)); ++ break; ++ case MIN: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUDoubleResult(fd_reg(), FPUMin(ft, fs)); ++ break; ++ case MAX: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUDoubleResult(fd_reg(), FPUMax(ft, fs)); ++ break; ++ case ADD_D: ++ SetFPUDoubleResult( ++ fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs + rhs; }, fs, ft)); ++ break; ++ case SUB_D: ++ SetFPUDoubleResult( ++ fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs - rhs; }, fs, ft)); ++ break; ++ case MADDF_D: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUDoubleResult(fd_reg(), std::fma(fs, ft, fd)); ++ break; ++ case MSUBF_D: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ SetFPUDoubleResult(fd_reg(), std::fma(-fs, ft, fd)); ++ break; ++ case MUL_D: ++ SetFPUDoubleResult( ++ fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs * rhs; }, fs, ft)); ++ break; ++ case DIV_D: ++ SetFPUDoubleResult( ++ fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs / rhs; }, fs, ft)); ++ break; ++ case ABS_D: ++ SetFPUDoubleResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs)); ++ break; ++ case MOV_D: ++ SetFPUDoubleResult(fd_reg(), fs); ++ break; ++ case NEG_D: ++ SetFPUDoubleResult(fd_reg(), ++ FPUCanonalizeOperation([](double src) { return -src; }, ++ KeepSign::yes, fs)); ++ break; ++ case SQRT_D: ++ SetFPUDoubleResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs)); ++ break; ++ case RSQRT_D: ++ SetFPUDoubleResult( ++ fd_reg(), FPUCanonalizeOperation( ++ [](double fs) { return 1.0 / std::sqrt(fs); }, fs)); ++ break; ++ case RECIP_D: ++ SetFPUDoubleResult(fd_reg(), FPUCanonalizeOperation( ++ [](double fs) { return 1.0 / fs; }, fs)); ++ break; ++ case C_UN_D: ++ set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft)); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_EQ_D: ++ set_fcsr_bit(fcsr_cc, (fs == ft)); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_UEQ_D: ++ set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft))); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_OLT_D: ++ set_fcsr_bit(fcsr_cc, (fs < ft)); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_ULT_D: ++ set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft))); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_OLE_D: ++ set_fcsr_bit(fcsr_cc, (fs <= ft)); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case C_ULE_D: ++ set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft))); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ case CVT_W_D: { // Convert double to word. ++ double rounded; ++ int32_t result; ++ round_according_to_fcsr(fs, &rounded, &result, fs); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_word_invalid_result(fs, rounded); ++ } ++ break; ++ } ++ case ROUND_W_D: // Round double to word (round half to even). ++ { ++ double rounded = std::floor(fs + 0.5); ++ int32_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fs == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_invalid_result(fs, rounded); ++ } ++ } break; ++ case TRUNC_W_D: // Truncate double to word (round towards 0). ++ { ++ double rounded = trunc(fs); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_invalid_result(fs, rounded); ++ } ++ } break; ++ case FLOOR_W_D: // Round double to word towards negative infinity. ++ { ++ double rounded = std::floor(fs); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_invalid_result(fs, rounded); ++ } ++ } break; ++ case CEIL_W_D: // Round double to word towards positive infinity. ++ { ++ double rounded = std::ceil(fs); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult2(fd_reg(), result); ++ if (set_fcsr_round_error(fs, rounded)) { ++ set_fpu_register_invalid_result(fs, rounded); ++ } ++ } break; ++ case CVT_S_D: // Convert double to float (single). ++ SetFPUFloatResult(fd_reg(), static_cast(fs)); ++ break; ++ case CVT_L_D: { ++ double rounded; ++ int64_t result; ++ round64_according_to_fcsr(fs, &rounded, &result, fs); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case ROUND_L_D: { ++ double rounded = std::floor(fs + 0.5); ++ int64_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fs == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ int64_t i64 = static_cast(result); ++ SetFPUResult(fd_reg(), i64); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case TRUNC_L_D: { ++ double rounded = trunc(fs); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case FLOOR_L_D: { ++ double rounded = floor(fs); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case CEIL_L_D: { ++ double rounded = ceil(fs); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fs, rounded)) { ++ set_fpu_register_invalid_result64(fs, rounded); ++ } ++ break; ++ } ++ case CLASS_D: { ++ // Convert double input to uint64_t for easier bit manipulation ++ uint64_t classed = bit_cast(fs); ++ ++ // Extracting sign, exponent and mantissa from the input double ++ uint32_t sign = (classed >> 63) & 1; ++ uint32_t exponent = (classed >> 52) & 0x00000000000007FF; ++ uint64_t mantissa = classed & 0x000FFFFFFFFFFFFF; ++ uint64_t result; ++ double dResult; ++ ++ // Setting flags if input double is negative infinity, ++ // positive infinity, negative zero or positive zero ++ bool negInf = (classed == 0xFFF0000000000000); ++ bool posInf = (classed == 0x7FF0000000000000); ++ bool negZero = (classed == 0x8000000000000000); ++ bool posZero = (classed == 0x0000000000000000); ++ ++ bool signalingNan; ++ bool quietNan; ++ bool negSubnorm; ++ bool posSubnorm; ++ bool negNorm; ++ bool posNorm; ++ ++ // Setting flags if double is NaN ++ signalingNan = false; ++ quietNan = false; ++ if (!negInf && !posInf && exponent == 0x7FF) { ++ quietNan = ((mantissa & 0x0008000000000000) != 0) && ++ ((mantissa & (0x0008000000000000 - 1)) == 0); ++ signalingNan = !quietNan; ++ } ++ ++ // Setting flags if double is subnormal number ++ posSubnorm = false; ++ negSubnorm = false; ++ if ((exponent == 0) && (mantissa != 0)) { ++ DCHECK(sign == 0 || sign == 1); ++ posSubnorm = (sign == 0); ++ negSubnorm = (sign == 1); ++ } ++ ++ // Setting flags if double is normal number ++ posNorm = false; ++ negNorm = false; ++ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan && ++ !quietNan && !negZero && !posZero) { ++ DCHECK(sign == 0 || sign == 1); ++ posNorm = (sign == 0); ++ negNorm = (sign == 1); ++ } ++ ++ // Calculating result according to description of CLASS.D instruction ++ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) | ++ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) | ++ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan; ++ ++ DCHECK_NE(result, 0); ++ ++ dResult = bit_cast(result); ++ SetFPUDoubleResult(fd_reg(), dResult); ++ break; ++ } ++ case C_F_D: { ++ set_fcsr_bit(fcsr_cc, false); ++ TraceRegWr(test_fcsr_bit(fcsr_cc)); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Simulator::DecodeTypeRegisterWRsType() { ++ float fs = get_fpu_register_float(fs_reg()); ++ float ft = get_fpu_register_float(ft_reg()); ++ int64_t alu_out = 0x12345678; ++ switch (instr_.FunctionFieldRaw()) { ++ case CVT_S_W: // Convert word to float (single). ++ alu_out = get_fpu_register_signed_word(fs_reg()); ++ SetFPUFloatResult(fd_reg(), static_cast(alu_out)); ++ break; ++ case CVT_D_W: // Convert word to double. ++ alu_out = get_fpu_register_signed_word(fs_reg()); ++ SetFPUDoubleResult(fd_reg(), static_cast(alu_out)); ++ break; ++ case CMP_AF: ++ SetFPUWordResult2(fd_reg(), 0); ++ break; ++ case CMP_UN: ++ if (std::isnan(fs) || std::isnan(ft)) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ case CMP_EQ: ++ if (fs == ft) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ case CMP_UEQ: ++ if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ case CMP_LT: ++ if (fs < ft) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ case CMP_ULT: ++ if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ case CMP_LE: ++ if (fs <= ft) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ case CMP_ULE: ++ if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ case CMP_OR: ++ if (!std::isnan(fs) && !std::isnan(ft)) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ case CMP_UNE: ++ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ case CMP_NE: ++ if (fs != ft) { ++ SetFPUWordResult2(fd_reg(), -1); ++ } else { ++ SetFPUWordResult2(fd_reg(), 0); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Simulator::DecodeTypeRegisterLRsType() { ++ double fs = get_fpu_register_double(fs_reg()); ++ double ft = get_fpu_register_double(ft_reg()); ++ int64_t i64; ++ switch (instr_.FunctionFieldRaw()) { ++ case CVT_D_L: ++ i64 = get_fpu_register(fs_reg()); ++ SetFPUDoubleResult(fd_reg(), static_cast(i64)); ++ break; ++ case CVT_S_L: ++ i64 = get_fpu_register(fs_reg()); ++ SetFPUFloatResult(fd_reg(), static_cast(i64)); ++ break; ++ case CMP_AF: ++ SetFPUResult(fd_reg(), 0); ++ break; ++ case CMP_UN: ++ if (std::isnan(fs) || std::isnan(ft)) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ case CMP_EQ: ++ if (fs == ft) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ case CMP_UEQ: ++ if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ case CMP_LT: ++ if (fs < ft) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ case CMP_ULT: ++ if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ case CMP_LE: ++ if (fs <= ft) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ case CMP_ULE: ++ if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ case CMP_OR: ++ if (!std::isnan(fs) && !std::isnan(ft)) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ case CMP_UNE: ++ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ case CMP_NE: ++ if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) { ++ SetFPUResult(fd_reg(), -1); ++ } else { ++ SetFPUResult(fd_reg(), 0); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Simulator::DecodeTypeRegisterCOP1() { ++ switch (instr_.RsFieldRaw()) { ++ case BC1: // Branch on coprocessor condition. ++ case BC1EQZ: ++ case BC1NEZ: ++ UNREACHABLE(); ++ break; ++ case CFC1: ++ // At the moment only FCSR is supported. ++ DCHECK_EQ(fs_reg(), kFCSRRegister); ++ SetResult(rt_reg(), FCSR_); ++ break; ++ case MFC1: ++ set_register(rt_reg(), ++ static_cast(get_fpu_register_word(fs_reg()))); ++ TraceRegWr(get_register(rt_reg()), WORD_DWORD); ++ break; ++ case DMFC1: ++ SetResult(rt_reg(), get_fpu_register(fs_reg())); ++ break; ++ case MFHC1: ++ SetResult(rt_reg(), get_fpu_register_hi_word(fs_reg())); ++ break; ++ case CTC1: { ++ // At the moment only FCSR is supported. ++ DCHECK_EQ(fs_reg(), kFCSRRegister); ++ uint32_t reg = static_cast(rt()); ++ if (kArchVariant == kSw64r3) { ++ FCSR_ = reg | kFCSRNaN2008FlagMask; ++ } else { ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ FCSR_ = reg & ~kFCSRNaN2008FlagMask; ++ } ++ TraceRegWr(FCSR_); ++ break; ++ } ++ case MTC1: ++ // Hardware writes upper 32-bits to zero on ifmovs. ++ set_fpu_register_hi_word(fs_reg(), 0); ++ set_fpu_register_word(fs_reg(), static_cast(rt())); ++ TraceRegWr(get_fpu_register(fs_reg()), FLOAT_DOUBLE); ++ break; ++ case DMTC1: ++ SetFPUResult2(fs_reg(), rt()); ++ break; ++ case MTHC1: ++ set_fpu_register_hi_word(fs_reg(), static_cast(rt())); ++ TraceRegWr(get_fpu_register(fs_reg()), DOUBLE); ++ break; ++ case S: ++ DecodeTypeRegisterSRsType(); ++ break; ++ case D: ++ DecodeTypeRegisterDRsType(); ++ break; ++ case W: ++ DecodeTypeRegisterWRsType(); ++ break; ++ case L: ++ DecodeTypeRegisterLRsType(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Simulator::DecodeTypeRegisterCOP1X() { ++ switch (instr_.FunctionFieldRaw()) { ++ case MADD_S: { ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ float fr, ft, fs; ++ fr = get_fpu_register_float(fr_reg()); ++ fs = get_fpu_register_float(fs_reg()); ++ ft = get_fpu_register_float(ft_reg()); ++ SetFPUFloatResult(fd_reg(), fs * ft + fr); ++ break; ++ } ++ case MSUB_S: { ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ float fr, ft, fs; ++ fr = get_fpu_register_float(fr_reg()); ++ fs = get_fpu_register_float(fs_reg()); ++ ft = get_fpu_register_float(ft_reg()); ++ SetFPUFloatResult(fd_reg(), fs * ft - fr); ++ break; ++ } ++ case MADD_D: { ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ double fr, ft, fs; ++ fr = get_fpu_register_double(fr_reg()); ++ fs = get_fpu_register_double(fs_reg()); ++ ft = get_fpu_register_double(ft_reg()); ++ SetFPUDoubleResult(fd_reg(), fs * ft + fr); ++ break; ++ } ++ case MSUB_D: { ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ double fr, ft, fs; ++ fr = get_fpu_register_double(fr_reg()); ++ fs = get_fpu_register_double(fs_reg()); ++ ft = get_fpu_register_double(ft_reg()); ++ SetFPUDoubleResult(fd_reg(), fs * ft - fr); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Simulator::DecodeTypeRegisterSPECIAL() { ++ //int64_t i64hilo; ++ //uint64_t u64hilo; ++ //int64_t alu_out; ++ //bool do_interrupt = false; ++ ++ //switch (instr_.FunctionFieldRaw()) { ++ // case SELEQZ_S: ++ // DCHECK_EQ(kArchVariant, kSw64r3); ++ // SetResult(rd_reg(), rt() == 0 ? rs() : 0); ++ // break; ++ // case SELNEZ_S: ++ // DCHECK_EQ(kArchVariant, kSw64r3); ++ // SetResult(rd_reg(), rt() != 0 ? rs() : 0); ++ // break; ++ // case JR: { ++ // int64_t next_pc = rs(); ++ // int64_t current_pc = get_pc(); ++ // Instruction* branch_delay_instr = ++ // reinterpret_cast(current_pc + Instruction::kInstrSize); ++ // BranchDelayInstructionDecode(branch_delay_instr); ++ // set_pc(next_pc); ++ // pc_modified_ = true; ++ // break; ++ // } ++ // case JALR: { ++ // int64_t next_pc = rs(); ++ // int64_t current_pc = get_pc(); ++ // int32_t return_addr_reg = rd_reg(); ++ // Instruction* branch_delay_instr = ++ // reinterpret_cast(current_pc + Instruction::kInstrSize); ++ // BranchDelayInstructionDecode(branch_delay_instr); ++ // set_register(return_addr_reg, current_pc + 2 * Instruction::kInstrSize); ++ // set_pc(next_pc); ++ // pc_modified_ = true; ++ // break; ++ // } ++ // case SLL: ++ // SetResult(rd_reg(), static_cast(rt()) << sa()); ++ // break; ++ // case DSLL: ++ // SetResult(rd_reg(), rt() << sa()); ++ // break; ++ // case DSLL32: ++ // SetResult(rd_reg(), rt() << sa() << 32); ++ // break; ++ // case SRL: ++ // if (rs_reg() == 0) { ++ // // Regular logical right shift of a word by a fixed number of ++ // // bits instruction. RS field is always equal to 0. ++ // // Sign-extend the 32-bit result. ++ // alu_out = static_cast(static_cast(rt_u()) >> sa()); ++ // } else if (rs_reg() == 1) { ++ // // Logical right-rotate of a word by a fixed number of bits. This ++ // // is special case of SRL instruction. ++ // // RS field is equal to 00001. ++ // alu_out = static_cast( ++ // base::bits::RotateRight32(static_cast(rt_u()), ++ // static_cast(sa()))); ++ // } else { ++ // UNREACHABLE(); ++ // } ++ // SetResult(rd_reg(), alu_out); ++ // break; ++ // case DSRL: ++ // if (rs_reg() == 0) { ++ // // Regular logical right shift of a word by a fixed number of ++ // // bits instruction. RS field is always equal to 0. ++ // // Sign-extend the 64-bit result. ++ // alu_out = static_cast(rt_u() >> sa()); ++ // } else if (rs_reg() == 1) { ++ // // Logical right-rotate of a word by a fixed number of bits. This ++ // // is special case of SRL instruction. ++ // // RS field is equal to 00001. ++ // alu_out = static_cast(base::bits::RotateRight64(rt_u(), sa())); ++ // } else { ++ // UNREACHABLE(); ++ // } ++ // SetResult(rd_reg(), alu_out); ++ // break; ++ // case DSRL32: ++ // if (rs_reg() == 0) { ++ // // Regular logical right shift of a word by a fixed number of ++ // // bits instruction. RS field is always equal to 0. ++ // // Sign-extend the 64-bit result. ++ // alu_out = static_cast(rt_u() >> sa() >> 32); ++ // } else if (rs_reg() == 1) { ++ // // Logical right-rotate of a word by a fixed number of bits. This ++ // // is special case of SRL instruction. ++ // // RS field is equal to 00001. ++ // alu_out = ++ // static_cast(base::bits::RotateRight64(rt_u(), sa() + 32)); ++ // } else { ++ // UNREACHABLE(); ++ // } ++ // SetResult(rd_reg(), alu_out); ++ // break; ++ // case SRA: ++ // SetResult(rd_reg(), (int32_t)rt() >> sa()); ++ // break; ++ // case DSRA: ++ // SetResult(rd_reg(), rt() >> sa()); ++ // break; ++ // case DSRA32: ++ // SetResult(rd_reg(), rt() >> sa() >> 32); ++ // break; ++ // case SLLV: ++ // SetResult(rd_reg(), (int32_t)rt() << rs()); ++ // break; ++ // case DSLLV: ++ // SetResult(rd_reg(), rt() << rs()); ++ // break; ++ // case SRLV: ++ // if (sa() == 0) { ++ // // Regular logical right-shift of a word by a variable number of ++ // // bits instruction. SA field is always equal to 0. ++ // alu_out = static_cast((uint32_t)rt_u() >> rs()); ++ // } else { ++ // // Logical right-rotate of a word by a variable number of bits. ++ // // This is special case od SRLV instruction. ++ // // SA field is equal to 00001. ++ // alu_out = static_cast( ++ // base::bits::RotateRight32(static_cast(rt_u()), ++ // static_cast(rs_u()))); ++ // } ++ // SetResult(rd_reg(), alu_out); ++ // break; ++ // case DSRLV: ++ // if (sa() == 0) { ++ // // Regular logical right-shift of a word by a variable number of ++ // // bits instruction. SA field is always equal to 0. ++ // alu_out = static_cast(rt_u() >> rs()); ++ // } else { ++ // // Logical right-rotate of a word by a variable number of bits. ++ // // This is special case od SRLV instruction. ++ // // SA field is equal to 00001. ++ // alu_out = ++ // static_cast(base::bits::RotateRight64(rt_u(), rs_u())); ++ // } ++ // SetResult(rd_reg(), alu_out); ++ // break; ++ // case SRAV: ++ // SetResult(rd_reg(), (int32_t)rt() >> rs()); ++ // break; ++ // case DSRAV: ++ // SetResult(rd_reg(), rt() >> rs()); ++ // break; ++ // case LSA: { ++ // DCHECK_EQ(kArchVariant, kSw64r3); ++ // int8_t sa = lsa_sa() + 1; ++ // int32_t _rt = static_cast(rt()); ++ // int32_t _rs = static_cast(rs()); ++ // int32_t res = _rs << sa; ++ // res += _rt; ++ // SetResult(rd_reg(), static_cast(res)); ++ // break; ++ // } ++ // case DLSA: ++ // DCHECK_EQ(kArchVariant, kSw64r3); ++ // SetResult(rd_reg(), (rs() << (lsa_sa() + 1)) + rt()); ++ // break; ++ // case MFHI: // MFHI == CLZ on R6. ++ // if (kArchVariant != kSw64r3) { ++ // DCHECK_EQ(sa(), 0); ++ // alu_out = get_register(HI); ++ // } else { ++ // // SW64 spec: If no bits were set in GPR rs(), the result written to ++ // // GPR rd() is 32. ++ // DCHECK_EQ(sa(), 1); ++ // alu_out = base::bits::CountLeadingZeros32(static_cast(rs_u())); ++ // } ++ // SetResult(rd_reg(), alu_out); ++ // break; ++ // case MFLO: // MFLO == DCLZ on R6. ++ // if (kArchVariant != kSw64r3) { ++ // DCHECK_EQ(sa(), 0); ++ // alu_out = get_register(LO); ++ // } else { ++ // // SW64 spec: If no bits were set in GPR rs(), the result written to ++ // // GPR rd() is 64. ++ // DCHECK_EQ(sa(), 1); ++ // alu_out = base::bits::CountLeadingZeros64(static_cast(rs_u())); ++ // } ++ // SetResult(rd_reg(), alu_out); ++ // break; ++ // // Instructions using HI and LO registers. ++ // case MULT: { // MULT == D_MUL_MUH. ++ // int32_t rs_lo = static_cast(rs()); ++ // int32_t rt_lo = static_cast(rt()); ++ // i64hilo = static_cast(rs_lo) * static_cast(rt_lo); ++ // if (kArchVariant != kSw64r3) { ++ // set_register(LO, static_cast(i64hilo & 0xFFFFFFFF)); ++ // set_register(HI, static_cast(i64hilo >> 32)); ++ // } else { ++ // switch (sa()) { ++ // case MUL_OP: ++ // SetResult(rd_reg(), static_cast(i64hilo & 0xFFFFFFFF)); ++ // break; ++ // case MUH_OP: ++ // SetResult(rd_reg(), static_cast(i64hilo >> 32)); ++ // break; ++ // default: ++ // UNIMPLEMENTED_SW64(); ++ // break; ++ // } ++ // } ++ // break; ++ // } ++ // case MULTU: ++ // u64hilo = static_cast(rs_u() & 0xFFFFFFFF) * ++ // static_cast(rt_u() & 0xFFFFFFFF); ++ // if (kArchVariant != kSw64r3) { ++ // set_register(LO, static_cast(u64hilo & 0xFFFFFFFF)); ++ // set_register(HI, static_cast(u64hilo >> 32)); ++ // } else { ++ // switch (sa()) { ++ // case MUL_OP: ++ // SetResult(rd_reg(), static_cast(u64hilo & 0xFFFFFFFF)); ++ // break; ++ // case MUH_OP: ++ // SetResult(rd_reg(), static_cast(u64hilo >> 32)); ++ // break; ++ // default: ++ // UNIMPLEMENTED_SW64(); ++ // break; ++ // } ++ // } ++ // break; ++ // case DMULT: // DMULT == D_MUL_MUH. ++ // if (kArchVariant != kSw64r3) { ++ // set_register(LO, rs() * rt()); ++ // set_register(HI, MultiplyHighSigned(rs(), rt())); ++ // } else { ++ // switch (sa()) { ++ // case MUL_OP: ++ // SetResult(rd_reg(), rs() * rt()); ++ // break; ++ // case MUH_OP: ++ // SetResult(rd_reg(), MultiplyHighSigned(rs(), rt())); ++ // break; ++ // default: ++ // UNIMPLEMENTED_SW64(); ++ // break; ++ // } ++ // } ++ // break; ++ // case DMULTU: ++ // UNIMPLEMENTED_SW64(); ++ // break; ++ // case DIV: ++ // case DDIV: { ++ // const int64_t int_min_value = ++ // instr_.FunctionFieldRaw() == DIV ? INT_MIN : LONG_MIN; ++ // switch (kArchVariant) { ++ // case kSw64r2: ++ // // Divide by zero and overflow was not checked in the ++ // // configuration step - div and divu do not raise exceptions. On ++ // // division by 0 the result will be UNPREDICTABLE. On overflow ++ // // (INT_MIN/-1), return INT_MIN which is what the hardware does. ++ // if (rs() == int_min_value && rt() == -1) { ++ // set_register(LO, int_min_value); ++ // set_register(HI, 0); ++ // } else if (rt() != 0) { ++ // set_register(LO, rs() / rt()); ++ // set_register(HI, rs() % rt()); ++ // } ++ // break; ++ // case kSw64r3: ++ // switch (sa()) { ++ // case DIV_OP: ++ // if (rs() == int_min_value && rt() == -1) { ++ // SetResult(rd_reg(), int_min_value); ++ // } else if (rt() != 0) { ++ // SetResult(rd_reg(), rs() / rt()); ++ // } ++ // break; ++ // case MOD_OP: ++ // if (rs() == int_min_value && rt() == -1) { ++ // SetResult(rd_reg(), 0); ++ // } else if (rt() != 0) { ++ // SetResult(rd_reg(), rs() % rt()); ++ // } ++ // break; ++ // default: ++ // UNIMPLEMENTED_SW64(); ++ // break; ++ // } ++ // break; ++ // default: ++ // break; ++ // } ++ // break; ++ // } ++ // case DIVU: ++ // switch (kArchVariant) { ++ // case kSw64r3: { ++ // uint32_t rt_u_32 = static_cast(rt_u()); ++ // uint32_t rs_u_32 = static_cast(rs_u()); ++ // switch (sa()) { ++ // case DIV_OP: ++ // if (rt_u_32 != 0) { ++ // SetResult(rd_reg(), rs_u_32 / rt_u_32); ++ // } ++ // break; ++ // case MOD_OP: ++ // if (rt_u() != 0) { ++ // SetResult(rd_reg(), rs_u_32 % rt_u_32); ++ // } ++ // break; ++ // default: ++ // UNIMPLEMENTED_SW64(); ++ // break; ++ // } ++ // } break; ++ // default: { ++ // if (rt_u() != 0) { ++ // uint32_t rt_u_32 = static_cast(rt_u()); ++ // uint32_t rs_u_32 = static_cast(rs_u()); ++ // set_register(LO, rs_u_32 / rt_u_32); ++ // set_register(HI, rs_u_32 % rt_u_32); ++ // } ++ // } ++ // } ++ // break; ++ // case DDIVU: ++ // switch (kArchVariant) { ++ // case kSw64r3: { ++ // switch (instr_.SaValue()) { ++ // case DIV_OP: ++ // if (rt_u() != 0) { ++ // SetResult(rd_reg(), rs_u() / rt_u()); ++ // } ++ // break; ++ // case MOD_OP: ++ // if (rt_u() != 0) { ++ // SetResult(rd_reg(), rs_u() % rt_u()); ++ // } ++ // break; ++ // default: ++ // UNIMPLEMENTED_SW64(); ++ // break; ++ // } ++ // } break; ++ // default: { ++ // if (rt_u() != 0) { ++ // set_register(LO, rs_u() / rt_u()); ++ // set_register(HI, rs_u() % rt_u()); ++ // } ++ // } ++ // } ++ // break; ++ // case ADD: ++ // case DADD: ++ // if (HaveSameSign(rs(), rt())) { ++ // if (rs() > 0) { ++ // if (rs() > (Registers::kMaxValue - rt())) { ++ // SignalException(kIntegerOverflow); ++ // } ++ // } else if (rs() < 0) { ++ // if (rs() < (Registers::kMinValue - rt())) { ++ // SignalException(kIntegerUnderflow); ++ // } ++ // } ++ // } ++ // SetResult(rd_reg(), rs() + rt()); ++ // break; ++ // case ADDU: { ++ // int32_t alu32_out = static_cast(rs() + rt()); ++ // // Sign-extend result of 32bit operation into 64bit register. ++ // SetResult(rd_reg(), static_cast(alu32_out)); ++ // break; ++ // } ++ // case DADDU: ++ // SetResult(rd_reg(), rs() + rt()); ++ // break; ++ // case SUB: ++ // case DSUB: ++ // if (!HaveSameSign(rs(), rt())) { ++ // if (rs() > 0) { ++ // if (rs() > (Registers::kMaxValue + rt())) { ++ // SignalException(kIntegerOverflow); ++ // } ++ // } else if (rs() < 0) { ++ // if (rs() < (Registers::kMinValue + rt())) { ++ // SignalException(kIntegerUnderflow); ++ // } ++ // } ++ // } ++ // SetResult(rd_reg(), rs() - rt()); ++ // break; ++ // case SUBU: { ++ // int32_t alu32_out = static_cast(rs() - rt()); ++ // // Sign-extend result of 32bit operation into 64bit register. ++ // SetResult(rd_reg(), static_cast(alu32_out)); ++ // break; ++ // } ++ // case DSUBU: ++ // SetResult(rd_reg(), rs() - rt()); ++ // break; ++ // case AND: ++ // SetResult(rd_reg(), rs() & rt()); ++ // break; ++ // case OR: ++ // SetResult(rd_reg(), rs() | rt()); ++ // break; ++ // case XOR: ++ // SetResult(rd_reg(), rs() ^ rt()); ++ // break; ++ // case NOR: ++ // SetResult(rd_reg(), ~(rs() | rt())); ++ // break; ++ // case SLT: ++ // SetResult(rd_reg(), rs() < rt() ? 1 : 0); ++ // break; ++ // case SLTU: ++ // SetResult(rd_reg(), rs_u() < rt_u() ? 1 : 0); ++ // break; ++ // // Break and trap instructions. ++ // case BREAK: ++ // do_interrupt = true; ++ // break; ++ // case TGE: ++ // do_interrupt = rs() >= rt(); ++ // break; ++ // case TGEU: ++ // do_interrupt = rs_u() >= rt_u(); ++ // break; ++ // case TLT: ++ // do_interrupt = rs() < rt(); ++ // break; ++ // case TLTU: ++ // do_interrupt = rs_u() < rt_u(); ++ // break; ++ // case TEQ: ++ // do_interrupt = rs() == rt(); ++ // break; ++ // case TNE: ++ // do_interrupt = rs() != rt(); ++ // break; ++ // case SYNC: ++ // // TODO(palfia): Ignore memb instruction for now. ++ // break; ++ // // Conditional moves. ++ // case MOVN: ++ // if (rt()) { ++ // SetResult(rd_reg(), rs()); ++ // } ++ // break; ++ // case MOVCI: { ++ // uint32_t cc = instr_.FBccValue(); ++ // uint32_t fcsr_cc = get_fcsr_condition_bit(cc); ++ // if (instr_.Bit(16)) { // Read Tf bit. ++ // if (test_fcsr_bit(fcsr_cc)) SetResult(rd_reg(), rs()); ++ // } else { ++ // if (!test_fcsr_bit(fcsr_cc)) SetResult(rd_reg(), rs()); ++ // } ++ // break; ++ // } ++ // case MOVZ: ++ // if (!rt()) { ++ // SetResult(rd_reg(), rs()); ++ // } ++ // break; ++ // default: ++ // UNREACHABLE(); ++ //} ++ //if (do_interrupt) { ++ // SoftwareInterrupt(); ++ //} ++} ++ ++ ++void Simulator::DecodeTypeRegisterSPECIAL2() { ++ int64_t alu_out; ++ switch (instr_.FunctionFieldRaw()) { ++ case MUL: ++ alu_out = static_cast(rs_u()) * static_cast(rt_u()); ++ SetResult(rd_reg(), alu_out); ++ // HI and LO are UNPREDICTABLE after the operation. ++ set_register(LO, Unpredictable); ++ set_register(HI, Unpredictable); ++ break; ++ case CLZ: ++ // If no bits were set in GPR rs(), the result written to ++ // GPR rd is 32. ++ alu_out = base::bits::CountLeadingZeros32(static_cast(rs_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ case DCLZ: ++ // If no bits were set in GPR rs(), the result written to ++ // GPR rd is 64. ++ alu_out = base::bits::CountLeadingZeros64(static_cast(rs_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ default: ++ alu_out = 0x12345678; ++ UNREACHABLE(); ++ } ++} ++ ++ ++void Simulator::DecodeTypeRegisterSPECIAL3() { ++ int64_t alu_out; ++ switch (instr_.FunctionFieldRaw()) { ++ case EXT: { ++ // Interpret rd field as 5-bit msbd of extract. ++ uint16_t msbd = rd_reg(); ++ // Interpret sa field as 5-bit lsb of extract. ++ uint16_t lsb = sa(); ++ uint16_t size = msbd + 1; ++ uint64_t mask = (1ULL << size) - 1; ++ alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); ++ SetResult(rt_reg(), alu_out); ++ break; ++ } ++ case DEXT: { ++ // Interpret rd field as 5-bit msbd of extract. ++ uint16_t msbd = rd_reg(); ++ // Interpret sa field as 5-bit lsb of extract. ++ uint16_t lsb = sa(); ++ uint16_t size = msbd + 1; ++ uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1; ++ alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); ++ SetResult(rt_reg(), alu_out); ++ break; ++ } ++ case DEXTM: { ++ // Interpret rd field as 5-bit msbdminus32 of extract. ++ uint16_t msbdminus32 = rd_reg(); ++ // Interpret sa field as 5-bit lsb of extract. ++ uint16_t lsb = sa(); ++ uint16_t size = msbdminus32 + 1 + 32; ++ uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1; ++ alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); ++ SetResult(rt_reg(), alu_out); ++ break; ++ } ++ case DEXTU: { ++ // Interpret rd field as 5-bit msbd of extract. ++ uint16_t msbd = rd_reg(); ++ // Interpret sa field as 5-bit lsbminus32 of extract and add 32 to get ++ // lsb. ++ uint16_t lsb = sa() + 32; ++ uint16_t size = msbd + 1; ++ uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1; ++ alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); ++ SetResult(rt_reg(), alu_out); ++ break; ++ } ++ case INS: { ++ // Interpret rd field as 5-bit msb of insert. ++ uint16_t msb = rd_reg(); ++ // Interpret sa field as 5-bit lsb of insert. ++ uint16_t lsb = sa(); ++ uint16_t size = msb - lsb + 1; ++ uint64_t mask = (1ULL << size) - 1; ++ alu_out = static_cast((rt_u() & ~(mask << lsb)) | ++ ((rs_u() & mask) << lsb)); ++ SetResult(rt_reg(), alu_out); ++ break; ++ } ++ case DINS: { ++ // Interpret rd field as 5-bit msb of insert. ++ uint16_t msb = rd_reg(); ++ // Interpret sa field as 5-bit lsb of insert. ++ uint16_t lsb = sa(); ++ uint16_t size = msb - lsb + 1; ++ uint64_t mask = (1ULL << size) - 1; ++ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb); ++ SetResult(rt_reg(), alu_out); ++ break; ++ } ++ case DINSM: { ++ // Interpret rd field as 5-bit msbminus32 of insert. ++ uint16_t msbminus32 = rd_reg(); ++ // Interpret sa field as 5-bit lsb of insert. ++ uint16_t lsb = sa(); ++ uint16_t size = msbminus32 + 32 - lsb + 1; ++ uint64_t mask; ++ if (size < 64) ++ mask = (1ULL << size) - 1; ++ else ++ mask = std::numeric_limits::max(); ++ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb); ++ SetResult(rt_reg(), alu_out); ++ break; ++ } ++ case DINSU: { ++ // Interpret rd field as 5-bit msbminus32 of insert. ++ uint16_t msbminus32 = rd_reg(); ++ // Interpret rd field as 5-bit lsbminus32 of insert. ++ uint16_t lsbminus32 = sa(); ++ uint16_t lsb = lsbminus32 + 32; ++ uint16_t size = msbminus32 + 32 - lsb + 1; ++ uint64_t mask = (1ULL << size) - 1; ++ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb); ++ SetResult(rt_reg(), alu_out); ++ break; ++ } ++ case BSHFL: { ++ int32_t sa = instr_.SaFieldRaw() >> kSaShift; ++ switch (sa) { ++ case BITSWAP: { ++ uint32_t input = static_cast(rt()); ++ uint32_t output = 0; ++ uint8_t i_byte, o_byte; ++ ++ // Reverse the bit in byte for each individual byte ++ for (int i = 0; i < 4; i++) { ++ output = output >> 8; ++ i_byte = input & 0xFF; ++ ++ // Fast way to reverse bits in byte ++ // Devised by Sean Anderson, July 13, 2001 ++ o_byte = static_cast(((i_byte * 0x0802LU & 0x22110LU) | ++ (i_byte * 0x8020LU & 0x88440LU)) * ++ 0x10101LU >> ++ 16); ++ ++ output = output | (static_cast(o_byte << 24)); ++ input = input >> 8; ++ } ++ ++ alu_out = static_cast(static_cast(output)); ++ break; ++ } ++ case SEB: { ++ uint8_t input = static_cast(rt()); ++ uint32_t output = input; ++ uint32_t mask = 0x00000080; ++ ++ // Extending sign ++ if (mask & input) { ++ output |= 0xFFFFFF00; ++ } ++ ++ alu_out = static_cast(output); ++ break; ++ } ++ case SEH: { ++ uint16_t input = static_cast(rt()); ++ uint32_t output = input; ++ uint32_t mask = 0x00008000; ++ ++ // Extending sign ++ if (mask & input) { ++ output |= 0xFFFF0000; ++ } ++ ++ alu_out = static_cast(output); ++ break; ++ } ++ case WSBH: { ++ uint32_t input = static_cast(rt()); ++ uint64_t output = 0; ++ ++ uint32_t mask = 0xFF000000; ++ for (int i = 0; i < 4; i++) { ++ uint32_t tmp = mask & input; ++ if (i % 2 == 0) { ++ tmp = tmp >> 8; ++ } else { ++ tmp = tmp << 8; ++ } ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ mask = 0x80000000; ++ ++ // Extending sign ++ if (mask & output) { ++ output |= 0xFFFFFFFF00000000; ++ } ++ ++ alu_out = static_cast(output); ++ break; ++ } ++ default: { ++ const uint8_t bp2 = instr_.Bp2Value(); ++ sa >>= kBp2Bits; ++ switch (sa) { ++ case ALIGN: { ++ if (bp2 == 0) { ++ alu_out = static_cast(rt()); ++ } else { ++ uint64_t rt_hi = rt() << (8 * bp2); ++ uint64_t rs_lo = rs() >> (8 * (4 - bp2)); ++ alu_out = static_cast(rt_hi | rs_lo); ++ } ++ break; ++ } ++ default: ++ alu_out = 0x12345678; ++ UNREACHABLE(); ++ break; ++ } ++ break; ++ } ++ } ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case DBSHFL: { ++ int32_t sa = instr_.SaFieldRaw() >> kSaShift; ++ switch (sa) { ++ case DBITSWAP: { ++ switch (sa) { ++ case DBITSWAP_SA: { ++ uint64_t input = static_cast(rt()); ++ uint64_t output = 0; ++ uint8_t i_byte, o_byte; ++ ++ // Reverse the bit in byte for each individual byte ++ for (int i = 0; i < 8; i++) { ++ output = output >> 8; ++ i_byte = input & 0xFF; ++ ++ // Fast way to reverse bits in byte ++ // Devised by Sean Anderson, July 13, 2001 ++ o_byte = ++ static_cast(((i_byte * 0x0802LU & 0x22110LU) | ++ (i_byte * 0x8020LU & 0x88440LU)) * ++ 0x10101LU >> ++ 16); ++ ++ output = output | ((static_cast(o_byte) << 56)); ++ input = input >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ break; ++ } ++ } ++ break; ++ } ++ case DSBH: { ++ uint64_t input = static_cast(rt()); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFF00000000000000; ++ for (int i = 0; i < 8; i++) { ++ uint64_t tmp = mask & input; ++ if (i % 2 == 0) ++ tmp = tmp >> 8; ++ else ++ tmp = tmp << 8; ++ ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ break; ++ } ++ case DSHD: { ++ uint64_t input = static_cast(rt()); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFFFF000000000000; ++ for (int i = 0; i < 4; i++) { ++ uint64_t tmp = mask & input; ++ if (i == 0) ++ tmp = tmp >> 48; ++ else if (i == 1) ++ tmp = tmp >> 16; ++ else if (i == 2) ++ tmp = tmp << 16; ++ else ++ tmp = tmp << 48; ++ output = output | tmp; ++ mask = mask >> 16; ++ } ++ ++ alu_out = static_cast(output); ++ break; ++ } ++ default: { ++ const uint8_t bp3 = instr_.Bp3Value(); ++ sa >>= kBp3Bits; ++ switch (sa) { ++ case DALIGN: { ++ if (bp3 == 0) { ++ alu_out = static_cast(rt()); ++ } else { ++ uint64_t rt_hi = rt() << (8 * bp3); ++ uint64_t rs_lo = rs() >> (8 * (8 - bp3)); ++ alu_out = static_cast(rt_hi | rs_lo); ++ } ++ break; ++ } ++ default: ++ alu_out = 0x12345678; ++ UNREACHABLE(); ++ break; ++ } ++ break; ++ } ++ } ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++int Simulator::DecodeMsaDataFormat() { ++ int df = -1; ++ if (instr_.IsMSABranchInstr()) { ++ switch (instr_.RsFieldRaw()) { ++ case BZ_V: ++ case BNZ_V: ++ df = MSA_VECT; ++ break; ++ case BZ_B: ++ case BNZ_B: ++ df = MSA_BYTE; ++ break; ++ case BZ_H: ++ case BNZ_H: ++ df = MSA_HALF; ++ break; ++ case BZ_W: ++ case BNZ_W: ++ df = MSA_WORD; ++ break; ++ case BZ_D: ++ case BNZ_D: ++ df = MSA_DWORD; ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } else { ++ int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD}; ++ switch (instr_.MSAMinorOpcodeField()) { ++ case kMsaMinorI5: ++ case kMsaMinorI10: ++ case kMsaMinor3R: ++ df = DF[instr_.Bits(22, 21)]; ++ break; ++ case kMsaMinorMI10: ++ df = DF[instr_.Bits(1, 0)]; ++ break; ++ case kMsaMinorBIT: ++ df = DF[instr_.MsaBitDf()]; ++ break; ++ case kMsaMinorELM: ++ df = DF[instr_.MsaElmDf()]; ++ break; ++ case kMsaMinor3RF: { ++ uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask; ++ switch (opcode) { ++ case FEXDO: ++ case FTQ: ++ case MUL_Q: ++ case MADD_Q: ++ case MSUB_Q: ++ case MULR_Q: ++ case MADDR_Q: ++ case MSUBR_Q: ++ df = DF[1 + instr_.Bit(21)]; ++ break; ++ default: ++ df = DF[2 + instr_.Bit(21)]; ++ break; ++ } ++ } break; ++ case kMsaMinor2R: ++ df = DF[instr_.Bits(17, 16)]; ++ break; ++ case kMsaMinor2RF: ++ df = DF[2 + instr_.Bit(16)]; ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } ++ return df; ++} ++ ++void Simulator::DecodeTypeMsaI8() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsaI8Mask; ++ int8_t i8 = instr_.MsaImm8Value(); ++ msa_reg_t ws, wd; ++ ++ switch (opcode) { ++ case ANDI_B: ++ get_msa_register(instr_.WsValue(), ws.b); ++ for (int i = 0; i < kMSALanesByte; i++) { ++ wd.b[i] = ws.b[i] & i8; ++ } ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ case ORI_B: ++ get_msa_register(instr_.WsValue(), ws.b); ++ for (int i = 0; i < kMSALanesByte; i++) { ++ wd.b[i] = ws.b[i] | i8; ++ } ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ case NORI_B: ++ get_msa_register(instr_.WsValue(), ws.b); ++ for (int i = 0; i < kMSALanesByte; i++) { ++ wd.b[i] = ~(ws.b[i] | i8); ++ } ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ case XORI_B: ++ get_msa_register(instr_.WsValue(), ws.b); ++ for (int i = 0; i < kMSALanesByte; i++) { ++ wd.b[i] = ws.b[i] ^ i8; ++ } ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ case BMNZI_B: ++ get_msa_register(instr_.WsValue(), ws.b); ++ get_msa_register(instr_.WdValue(), wd.b); ++ for (int i = 0; i < kMSALanesByte; i++) { ++ wd.b[i] = (ws.b[i] & i8) | (wd.b[i] & ~i8); ++ } ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ case BMZI_B: ++ get_msa_register(instr_.WsValue(), ws.b); ++ get_msa_register(instr_.WdValue(), wd.b); ++ for (int i = 0; i < kMSALanesByte; i++) { ++ wd.b[i] = (ws.b[i] & ~i8) | (wd.b[i] & i8); ++ } ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ case BSELI_B: ++ get_msa_register(instr_.WsValue(), ws.b); ++ get_msa_register(instr_.WdValue(), wd.b); ++ for (int i = 0; i < kMSALanesByte; i++) { ++ wd.b[i] = (ws.b[i] & ~wd.b[i]) | (wd.b[i] & i8); ++ } ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ case SHF_B: ++ get_msa_register(instr_.WsValue(), ws.b); ++ for (int i = 0; i < kMSALanesByte; i++) { ++ int j = i % 4; ++ int k = (i8 >> (2 * j)) & 0x3; ++ wd.b[i] = ws.b[i - j + k]; ++ } ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ case SHF_H: ++ get_msa_register(instr_.WsValue(), ws.h); ++ for (int i = 0; i < kMSALanesHalf; i++) { ++ int j = i % 4; ++ int k = (i8 >> (2 * j)) & 0x3; ++ wd.h[i] = ws.h[i - j + k]; ++ } ++ set_msa_register(instr_.WdValue(), wd.h); ++ TraceMSARegWr(wd.h); ++ break; ++ case SHF_W: ++ get_msa_register(instr_.WsValue(), ws.w); ++ for (int i = 0; i < kMSALanesWord; i++) { ++ int j = (i8 >> (2 * i)) & 0x3; ++ wd.w[i] = ws.w[j]; ++ } ++ set_msa_register(instr_.WdValue(), wd.w); ++ TraceMSARegWr(wd.w); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++template ++T Simulator::MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5) { ++ T res; ++ uint32_t ui5 = i5 & 0x1Fu; ++ uint64_t ws_u64 = static_cast(ws); ++ uint64_t ui5_u64 = static_cast(ui5); ++ ++ switch (opcode) { ++ case ADDVI: ++ res = static_cast(ws + ui5); ++ break; ++ case SUBVI: ++ res = static_cast(ws - ui5); ++ break; ++ case MAXI_S: ++ res = static_cast(Max(ws, static_cast(i5))); ++ break; ++ case MINI_S: ++ res = static_cast(Min(ws, static_cast(i5))); ++ break; ++ case MAXI_U: ++ res = static_cast(Max(ws_u64, ui5_u64)); ++ break; ++ case MINI_U: ++ res = static_cast(Min(ws_u64, ui5_u64)); ++ break; ++ case CEQI: ++ res = static_cast(!Compare(ws, static_cast(i5)) ? -1ull : 0ull); ++ break; ++ case CLTI_S: ++ res = static_cast((Compare(ws, static_cast(i5)) == -1) ? -1ull ++ : 0ull); ++ break; ++ case CLTI_U: ++ res = static_cast((Compare(ws_u64, ui5_u64) == -1) ? -1ull : 0ull); ++ break; ++ case CLEI_S: ++ res = ++ static_cast((Compare(ws, static_cast(i5)) != 1) ? -1ull : 0ull); ++ break; ++ case CLEI_U: ++ res = static_cast((Compare(ws_u64, ui5_u64) != 1) ? -1ull : 0ull); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ return res; ++} ++ ++void Simulator::DecodeTypeMsaI5() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask; ++ msa_reg_t ws, wd; ++ ++ // sign extend 5bit value to int32_t ++ int32_t i5 = static_cast(instr_.MsaImm5Value() << 27) >> 27; ++ ++#define MSA_I5_DF(elem, num_of_lanes) \ ++ get_msa_register(instr_.WsValue(), ws.elem); \ ++ for (int i = 0; i < num_of_lanes; i++) { \ ++ wd.elem[i] = MsaI5InstrHelper(opcode, ws.elem[i], i5); \ ++ } \ ++ set_msa_register(instr_.WdValue(), wd.elem); \ ++ TraceMSARegWr(wd.elem) ++ ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ MSA_I5_DF(b, kMSALanesByte); ++ break; ++ case MSA_HALF: ++ MSA_I5_DF(h, kMSALanesHalf); ++ break; ++ case MSA_WORD: ++ MSA_I5_DF(w, kMSALanesWord); ++ break; ++ case MSA_DWORD: ++ MSA_I5_DF(d, kMSALanesDword); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++#undef MSA_I5_DF ++} ++ ++void Simulator::DecodeTypeMsaI10() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask; ++ int64_t s10 = (static_cast(instr_.MsaImm10Value()) << 54) >> 54; ++ msa_reg_t wd; ++ ++#define MSA_I10_DF(elem, num_of_lanes, T) \ ++ for (int i = 0; i < num_of_lanes; ++i) { \ ++ wd.elem[i] = static_cast(s10); \ ++ } \ ++ set_msa_register(instr_.WdValue(), wd.elem); \ ++ TraceMSARegWr(wd.elem) ++ ++ if (opcode == LDI) { ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ MSA_I10_DF(b, kMSALanesByte, int8_t); ++ break; ++ case MSA_HALF: ++ MSA_I10_DF(h, kMSALanesHalf, int16_t); ++ break; ++ case MSA_WORD: ++ MSA_I10_DF(w, kMSALanesWord, int32_t); ++ break; ++ case MSA_DWORD: ++ MSA_I10_DF(d, kMSALanesDword, int64_t); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } else { ++ UNREACHABLE(); ++ } ++#undef MSA_I10_DF ++} ++ ++void Simulator::DecodeTypeMsaELM() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsaLongerELMMask; ++ int32_t n = instr_.MsaElmNValue(); ++ int64_t alu_out; ++ switch (opcode) { ++ case CTCMSA: ++ DCHECK_EQ(sa(), kMSACSRRegister); ++ MSACSR_ = bit_cast( ++ static_cast(registers_[rd_reg()] & kMaxUInt32)); ++ TraceRegWr(static_cast(MSACSR_)); ++ break; ++ case CFCMSA: ++ DCHECK_EQ(rd_reg(), kMSACSRRegister); ++ // FIXME: SetResult(sa(), static_cast(bit_cast(MSACSR_))); ++ break; ++ case MOVE_V: { ++ msa_reg_t ws; ++ get_msa_register(ws_reg(), &ws); ++ set_msa_register(wd_reg(), &ws); ++ TraceMSARegWr(&ws); ++ } break; ++ default: ++ opcode &= kMsaELMMask; ++ switch (opcode) { ++ case COPY_S: ++ case COPY_U: { ++ msa_reg_t ws; ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ DCHECK_LT(n, kMSALanesByte); ++ get_msa_register(instr_.WsValue(), ws.b); ++ alu_out = static_cast(ws.b[n]); ++ SetResult(wd_reg(), ++ (opcode == COPY_U) ? alu_out & 0xFFu : alu_out); ++ break; ++ case MSA_HALF: ++ DCHECK_LT(n, kMSALanesHalf); ++ get_msa_register(instr_.WsValue(), ws.h); ++ alu_out = static_cast(ws.h[n]); ++ SetResult(wd_reg(), ++ (opcode == COPY_U) ? alu_out & 0xFFFFu : alu_out); ++ break; ++ case MSA_WORD: ++ DCHECK_LT(n, kMSALanesWord); ++ get_msa_register(instr_.WsValue(), ws.w); ++ alu_out = static_cast(ws.w[n]); ++ SetResult(wd_reg(), ++ (opcode == COPY_U) ? alu_out & 0xFFFFFFFFu : alu_out); ++ break; ++ case MSA_DWORD: ++ DCHECK_LT(n, kMSALanesDword); ++ get_msa_register(instr_.WsValue(), ws.d); ++ alu_out = static_cast(ws.d[n]); ++ SetResult(wd_reg(), alu_out); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } break; ++ case INSERT: { ++ msa_reg_t wd; ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: { ++ DCHECK_LT(n, kMSALanesByte); ++ int64_t rs = get_register(instr_.WsValue()); ++ get_msa_register(instr_.WdValue(), wd.b); ++ wd.b[n] = rs & 0xFFu; ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ } ++ case MSA_HALF: { ++ DCHECK_LT(n, kMSALanesHalf); ++ int64_t rs = get_register(instr_.WsValue()); ++ get_msa_register(instr_.WdValue(), wd.h); ++ wd.h[n] = rs & 0xFFFFu; ++ set_msa_register(instr_.WdValue(), wd.h); ++ TraceMSARegWr(wd.h); ++ break; ++ } ++ case MSA_WORD: { ++ DCHECK_LT(n, kMSALanesWord); ++ int64_t rs = get_register(instr_.WsValue()); ++ get_msa_register(instr_.WdValue(), wd.w); ++ wd.w[n] = rs & 0xFFFFFFFFu; ++ set_msa_register(instr_.WdValue(), wd.w); ++ TraceMSARegWr(wd.w); ++ break; ++ } ++ case MSA_DWORD: { ++ DCHECK_LT(n, kMSALanesDword); ++ int64_t rs = get_register(instr_.WsValue()); ++ get_msa_register(instr_.WdValue(), wd.d); ++ wd.d[n] = rs; ++ set_msa_register(instr_.WdValue(), wd.d); ++ TraceMSARegWr(wd.d); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++ } break; ++ case SLDI: { ++ uint8_t v[32]; ++ msa_reg_t ws; ++ msa_reg_t wd; ++ get_msa_register(ws_reg(), &ws); ++ get_msa_register(wd_reg(), &wd); ++#define SLDI_DF(s, k) \ ++ for (unsigned i = 0; i < s; i++) { \ ++ v[i] = ws.b[s * k + i]; \ ++ v[i + s] = wd.b[s * k + i]; \ ++ } \ ++ for (unsigned i = 0; i < s; i++) { \ ++ wd.b[s * k + i] = v[i + n]; \ ++ } ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ DCHECK(n < kMSALanesByte); ++ SLDI_DF(kMSARegSize / sizeof(int8_t) / kBitsPerByte, 0) ++ break; ++ case MSA_HALF: ++ DCHECK(n < kMSALanesHalf); ++ for (int k = 0; k < 2; ++k) { ++ SLDI_DF(kMSARegSize / sizeof(int16_t) / kBitsPerByte, k) ++ } ++ break; ++ case MSA_WORD: ++ DCHECK(n < kMSALanesWord); ++ for (int k = 0; k < 4; ++k) { ++ SLDI_DF(kMSARegSize / sizeof(int32_t) / kBitsPerByte, k) ++ } ++ break; ++ case MSA_DWORD: ++ DCHECK(n < kMSALanesDword); ++ for (int k = 0; k < 8; ++k) { ++ SLDI_DF(kMSARegSize / sizeof(int64_t) / kBitsPerByte, k) ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ set_msa_register(wd_reg(), &wd); ++ TraceMSARegWr(&wd); ++ } break; ++#undef SLDI_DF ++ case SPLATI: ++ case INSVE: ++ UNIMPLEMENTED(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ } ++} ++ ++template ++T Simulator::MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m) { ++ using uT = typename std::make_unsigned::type; ++ T res; ++ switch (opcode) { ++ case SLLI: ++ res = static_cast(ws << m); ++ break; ++ case SRAI: ++ res = static_cast(ArithmeticShiftRight(ws, m)); ++ break; ++ case SRLI: ++ res = static_cast(static_cast(ws) >> m); ++ break; ++ case BCLRI: ++ res = static_cast(static_cast(~(1ull << m)) & ws); ++ break; ++ case BSETI: ++ res = static_cast(static_cast(1ull << m) | ws); ++ break; ++ case BNEGI: ++ res = static_cast(static_cast(1ull << m) ^ ws); ++ break; ++ case BINSLI: { ++ int elem_size = 8 * sizeof(T); ++ int bits = m + 1; ++ if (bits == elem_size) { ++ res = static_cast(ws); ++ } else { ++ uint64_t mask = ((1ull << bits) - 1) << (elem_size - bits); ++ res = static_cast((static_cast(mask) & ws) | ++ (static_cast(~mask) & wd)); ++ } ++ } break; ++ case BINSRI: { ++ int elem_size = 8 * sizeof(T); ++ int bits = m + 1; ++ if (bits == elem_size) { ++ res = static_cast(ws); ++ } else { ++ uint64_t mask = (1ull << bits) - 1; ++ res = static_cast((static_cast(mask) & ws) | ++ (static_cast(~mask) & wd)); ++ } ++ } break; ++ case SAT_S: { ++#define M_MAX_INT(x) static_cast((1LL << ((x)-1)) - 1) ++#define M_MIN_INT(x) static_cast(-(1LL << ((x)-1))) ++ int shift = 64 - 8 * sizeof(T); ++ int64_t ws_i64 = (static_cast(ws) << shift) >> shift; ++ res = static_cast(ws_i64 < M_MIN_INT(m + 1) ++ ? M_MIN_INT(m + 1) ++ : ws_i64 > M_MAX_INT(m + 1) ? M_MAX_INT(m + 1) ++ : ws_i64); ++#undef M_MAX_INT ++#undef M_MIN_INT ++ } break; ++ case SAT_U: { ++#define M_MAX_UINT(x) static_cast(-1ULL >> (64 - (x))) ++ uint64_t mask = static_cast(-1ULL >> (64 - 8 * sizeof(T))); ++ uint64_t ws_u64 = static_cast(ws) & mask; ++ res = static_cast(ws_u64 < M_MAX_UINT(m + 1) ? ws_u64 ++ : M_MAX_UINT(m + 1)); ++#undef M_MAX_UINT ++ } break; ++ case SRARI: ++ if (!m) { ++ res = static_cast(ws); ++ } else { ++ res = static_cast(ArithmeticShiftRight(ws, m)) + ++ static_cast((ws >> (m - 1)) & 0x1); ++ } ++ break; ++ case SRLRI: ++ if (!m) { ++ res = static_cast(ws); ++ } else { ++ res = static_cast(static_cast(ws) >> m) + ++ static_cast((ws >> (m - 1)) & 0x1); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ return res; ++} ++ ++void Simulator::DecodeTypeMsaBIT() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsaBITMask; ++ int32_t m = instr_.MsaBitMValue(); ++ msa_reg_t wd, ws; ++ ++#define MSA_BIT_DF(elem, num_of_lanes) \ ++ get_msa_register(instr_.WsValue(), ws.elem); \ ++ if (opcode == BINSLI || opcode == BINSRI) { \ ++ get_msa_register(instr_.WdValue(), wd.elem); \ ++ } \ ++ for (int i = 0; i < num_of_lanes; i++) { \ ++ wd.elem[i] = MsaBitInstrHelper(opcode, wd.elem[i], ws.elem[i], m); \ ++ } \ ++ set_msa_register(instr_.WdValue(), wd.elem); \ ++ TraceMSARegWr(wd.elem) ++ ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ DCHECK(m < kMSARegSize / kMSALanesByte); ++ MSA_BIT_DF(b, kMSALanesByte); ++ break; ++ case MSA_HALF: ++ DCHECK(m < kMSARegSize / kMSALanesHalf); ++ MSA_BIT_DF(h, kMSALanesHalf); ++ break; ++ case MSA_WORD: ++ DCHECK(m < kMSARegSize / kMSALanesWord); ++ MSA_BIT_DF(w, kMSALanesWord); ++ break; ++ case MSA_DWORD: ++ DCHECK(m < kMSARegSize / kMSALanesDword); ++ MSA_BIT_DF(d, kMSALanesDword); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++#undef MSA_BIT_DF ++} ++ ++void Simulator::DecodeTypeMsaMI10() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsaMI10Mask; ++ int64_t s10 = (static_cast(instr_.MsaImmMI10Value()) << 54) >> 54; ++ int64_t rs = get_register(instr_.WsValue()); ++ int64_t addr; ++ msa_reg_t wd; ++ ++#define MSA_MI10_LOAD(elem, num_of_lanes, T) \ ++ for (int i = 0; i < num_of_lanes; ++i) { \ ++ addr = rs + (s10 + i) * sizeof(T); \ ++ wd.elem[i] = ReadMem(addr, instr_.instr()); \ ++ } \ ++ set_msa_register(instr_.WdValue(), wd.elem); ++ ++#define MSA_MI10_STORE(elem, num_of_lanes, T) \ ++ get_msa_register(instr_.WdValue(), wd.elem); \ ++ for (int i = 0; i < num_of_lanes; ++i) { \ ++ addr = rs + (s10 + i) * sizeof(T); \ ++ WriteMem(addr, wd.elem[i], instr_.instr()); \ ++ } ++ ++ if (opcode == MSA_LD) { ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ MSA_MI10_LOAD(b, kMSALanesByte, int8_t); ++ break; ++ case MSA_HALF: ++ MSA_MI10_LOAD(h, kMSALanesHalf, int16_t); ++ break; ++ case MSA_WORD: ++ MSA_MI10_LOAD(w, kMSALanesWord, int32_t); ++ break; ++ case MSA_DWORD: ++ MSA_MI10_LOAD(d, kMSALanesDword, int64_t); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } else if (opcode == MSA_ST) { ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ MSA_MI10_STORE(b, kMSALanesByte, int8_t); ++ break; ++ case MSA_HALF: ++ MSA_MI10_STORE(h, kMSALanesHalf, int16_t); ++ break; ++ case MSA_WORD: ++ MSA_MI10_STORE(w, kMSALanesWord, int32_t); ++ break; ++ case MSA_DWORD: ++ MSA_MI10_STORE(d, kMSALanesDword, int64_t); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } else { ++ UNREACHABLE(); ++ } ++ ++#undef MSA_MI10_LOAD ++#undef MSA_MI10_STORE ++} ++ ++template ++T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) { ++ using uT = typename std::make_unsigned::type; ++ T res; ++ int wt_modulo = wt % (sizeof(T) * 8); ++ switch (opcode) { ++ case SLL_MSA: ++ res = static_cast(ws << wt_modulo); ++ break; ++ case SRA_MSA: ++ res = static_cast(ArithmeticShiftRight(ws, wt_modulo)); ++ break; ++ case SRL_MSA: ++ res = static_cast(static_cast(ws) >> wt_modulo); ++ break; ++ case BCLR: ++ res = static_cast(static_cast(~(1ull << wt_modulo)) & ws); ++ break; ++ case BSET: ++ res = static_cast(static_cast(1ull << wt_modulo) | ws); ++ break; ++ case BNEG: ++ res = static_cast(static_cast(1ull << wt_modulo) ^ ws); ++ break; ++ case BINSL: { ++ int elem_size = 8 * sizeof(T); ++ int bits = wt_modulo + 1; ++ if (bits == elem_size) { ++ res = static_cast(ws); ++ } else { ++ uint64_t mask = ((1ull << bits) - 1) << (elem_size - bits); ++ res = static_cast((static_cast(mask) & ws) | ++ (static_cast(~mask) & wd)); ++ } ++ } break; ++ case BINSR: { ++ int elem_size = 8 * sizeof(T); ++ int bits = wt_modulo + 1; ++ if (bits == elem_size) { ++ res = static_cast(ws); ++ } else { ++ uint64_t mask = (1ull << bits) - 1; ++ res = static_cast((static_cast(mask) & ws) | ++ (static_cast(~mask) & wd)); ++ } ++ } break; ++ case ADDV: ++ res = ws + wt; ++ break; ++ case SUBV: ++ res = ws - wt; ++ break; ++ case MAX_S: ++ res = Max(ws, wt); ++ break; ++ case MAX_U: ++ res = static_cast(Max(static_cast(ws), static_cast(wt))); ++ break; ++ case MIN_S: ++ res = Min(ws, wt); ++ break; ++ case MIN_U: ++ res = static_cast(Min(static_cast(ws), static_cast(wt))); ++ break; ++ case MAX_A: ++ // We use negative abs in order to avoid problems ++ // with corner case for MIN_INT ++ res = Nabs(ws) < Nabs(wt) ? ws : wt; ++ break; ++ case MIN_A: ++ // We use negative abs in order to avoid problems ++ // with corner case for MIN_INT ++ res = Nabs(ws) > Nabs(wt) ? ws : wt; ++ break; ++ case CEQ: ++ res = static_cast(!Compare(ws, wt) ? -1ull : 0ull); ++ break; ++ case CLT_S: ++ res = static_cast((Compare(ws, wt) == -1) ? -1ull : 0ull); ++ break; ++ case CLT_U: ++ res = static_cast( ++ (Compare(static_cast(ws), static_cast(wt)) == -1) ? -1ull ++ : 0ull); ++ break; ++ case CLE_S: ++ res = static_cast((Compare(ws, wt) != 1) ? -1ull : 0ull); ++ break; ++ case CLE_U: ++ res = static_cast( ++ (Compare(static_cast(ws), static_cast(wt)) != 1) ? -1ull ++ : 0ull); ++ break; ++ case ADD_A: ++ res = static_cast(Abs(ws) + Abs(wt)); ++ break; ++ case ADDS_A: { ++ T ws_nabs = Nabs(ws); ++ T wt_nabs = Nabs(wt); ++ if (ws_nabs < -std::numeric_limits::max() - wt_nabs) { ++ res = std::numeric_limits::max(); ++ } else { ++ res = -(ws_nabs + wt_nabs); ++ } ++ } break; ++ case ADDS_S: ++ res = SaturateAdd(ws, wt); ++ break; ++ case ADDS_U: { ++ uT ws_u = static_cast(ws); ++ uT wt_u = static_cast(wt); ++ res = static_cast(SaturateAdd(ws_u, wt_u)); ++ } break; ++ case AVE_S: ++ res = static_cast((wt & ws) + ((wt ^ ws) >> 1)); ++ break; ++ case AVE_U: { ++ uT ws_u = static_cast(ws); ++ uT wt_u = static_cast(wt); ++ res = static_cast((wt_u & ws_u) + ((wt_u ^ ws_u) >> 1)); ++ } break; ++ case AVER_S: ++ res = static_cast((wt | ws) - ((wt ^ ws) >> 1)); ++ break; ++ case AVER_U: { ++ uT ws_u = static_cast(ws); ++ uT wt_u = static_cast(wt); ++ res = static_cast((wt_u | ws_u) - ((wt_u ^ ws_u) >> 1)); ++ } break; ++ case SUBS_S: ++ res = SaturateSub(ws, wt); ++ break; ++ case SUBS_U: { ++ uT ws_u = static_cast(ws); ++ uT wt_u = static_cast(wt); ++ res = static_cast(SaturateSub(ws_u, wt_u)); ++ } break; ++ case SUBSUS_U: { ++ uT wsu = static_cast(ws); ++ if (wt > 0) { ++ uT wtu = static_cast(wt); ++ if (wtu > wsu) { ++ res = 0; ++ } else { ++ res = static_cast(wsu - wtu); ++ } ++ } else { ++ if (wsu > std::numeric_limits::max() + wt) { ++ res = static_cast(std::numeric_limits::max()); ++ } else { ++ res = static_cast(wsu - wt); ++ } ++ } ++ } break; ++ case SUBSUU_S: { ++ uT wsu = static_cast(ws); ++ uT wtu = static_cast(wt); ++ uT wdu; ++ if (wsu > wtu) { ++ wdu = wsu - wtu; ++ if (wdu > std::numeric_limits::max()) { ++ res = std::numeric_limits::max(); ++ } else { ++ res = static_cast(wdu); ++ } ++ } else { ++ wdu = wtu - wsu; ++ CHECK(-std::numeric_limits::max() == ++ std::numeric_limits::min() + 1); ++ if (wdu <= std::numeric_limits::max()) { ++ res = -static_cast(wdu); ++ } else { ++ res = std::numeric_limits::min(); ++ } ++ } ++ } break; ++ case ASUB_S: ++ res = static_cast(Abs(ws - wt)); ++ break; ++ case ASUB_U: { ++ uT wsu = static_cast(ws); ++ uT wtu = static_cast(wt); ++ res = static_cast(wsu > wtu ? wsu - wtu : wtu - wsu); ++ } break; ++ case MULV: ++ res = ws * wt; ++ break; ++ case MADDV: ++ res = wd + ws * wt; ++ break; ++ case MSUBV: ++ res = wd - ws * wt; ++ break; ++ case DIV_S_MSA: ++ res = wt != 0 ? ws / wt : static_cast(Unpredictable); ++ break; ++ case DIV_U: ++ res = wt != 0 ? static_cast(static_cast(ws) / static_cast(wt)) ++ : static_cast(Unpredictable); ++ break; ++ case MOD_S: ++ res = wt != 0 ? ws % wt : static_cast(Unpredictable); ++ break; ++ case MOD_U: ++ res = wt != 0 ? static_cast(static_cast(ws) % static_cast(wt)) ++ : static_cast(Unpredictable); ++ break; ++ case DOTP_S: ++ case DOTP_U: ++ case DPADD_S: ++ case DPADD_U: ++ case DPSUB_S: ++ case DPSUB_U: ++ case SLD: ++ case SPLAT: ++ UNIMPLEMENTED(); ++ break; ++ case SRAR: { ++ int bit = wt_modulo == 0 ? 0 : (ws >> (wt_modulo - 1)) & 1; ++ res = static_cast(ArithmeticShiftRight(ws, wt_modulo) + bit); ++ } break; ++ case SRLR: { ++ uT wsu = static_cast(ws); ++ int bit = wt_modulo == 0 ? 0 : (wsu >> (wt_modulo - 1)) & 1; ++ res = static_cast((wsu >> wt_modulo) + bit); ++ } break; ++ default: ++ UNREACHABLE(); ++ } ++ return res; ++} ++template ++void Msa3RInstrHelper_shuffle(const uint32_t opcode, T_reg ws, T_reg wt, ++ T_reg wd, const int i, const int num_of_lanes) { ++ T_int *ws_p, *wt_p, *wd_p; ++ ws_p = reinterpret_cast(ws); ++ wt_p = reinterpret_cast(wt); ++ wd_p = reinterpret_cast(wd); ++ switch (opcode) { ++ case PCKEV: ++ wd_p[i] = wt_p[2 * i]; ++ wd_p[i + num_of_lanes / 2] = ws_p[2 * i]; ++ break; ++ case PCKOD: ++ wd_p[i] = wt_p[2 * i + 1]; ++ wd_p[i + num_of_lanes / 2] = ws_p[2 * i + 1]; ++ break; ++ case ILVL: ++ wd_p[2 * i] = wt_p[i + num_of_lanes / 2]; ++ wd_p[2 * i + 1] = ws_p[i + num_of_lanes / 2]; ++ break; ++ case ILVR: ++ wd_p[2 * i] = wt_p[i]; ++ wd_p[2 * i + 1] = ws_p[i]; ++ break; ++ case ILVEV: ++ wd_p[2 * i] = wt_p[2 * i]; ++ wd_p[2 * i + 1] = ws_p[2 * i]; ++ break; ++ case ILVOD: ++ wd_p[2 * i] = wt_p[2 * i + 1]; ++ wd_p[2 * i + 1] = ws_p[2 * i + 1]; ++ break; ++ case VSHF: { ++ const int mask_not_valid = 0xC0; ++ const int mask_6_bits = 0x3F; ++ if ((wd_p[i] & mask_not_valid)) { ++ wd_p[i] = 0; ++ } else { ++ int k = (wd_p[i] & mask_6_bits) % (num_of_lanes * 2); ++ wd_p[i] = k >= num_of_lanes ? ws_p[k - num_of_lanes] : wt_p[k]; ++ } ++ } break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++template ++void Msa3RInstrHelper_horizontal(const uint32_t opcode, T_reg ws, T_reg wt, ++ T_reg wd, const int i, ++ const int num_of_lanes) { ++ using T_uint = typename std::make_unsigned::type; ++ using T_smaller_uint = typename std::make_unsigned::type; ++ T_int* wd_p; ++ T_smaller_int *ws_p, *wt_p; ++ ws_p = reinterpret_cast(ws); ++ wt_p = reinterpret_cast(wt); ++ wd_p = reinterpret_cast(wd); ++ T_uint* wd_pu; ++ T_smaller_uint *ws_pu, *wt_pu; ++ ws_pu = reinterpret_cast(ws); ++ wt_pu = reinterpret_cast(wt); ++ wd_pu = reinterpret_cast(wd); ++ switch (opcode) { ++ case HADD_S: ++ wd_p[i] = ++ static_cast(ws_p[2 * i + 1]) + static_cast(wt_p[2 * i]); ++ break; ++ case HADD_U: ++ wd_pu[i] = static_cast(ws_pu[2 * i + 1]) + ++ static_cast(wt_pu[2 * i]); ++ break; ++ case HSUB_S: ++ wd_p[i] = ++ static_cast(ws_p[2 * i + 1]) - static_cast(wt_p[2 * i]); ++ break; ++ case HSUB_U: ++ wd_pu[i] = static_cast(ws_pu[2 * i + 1]) - ++ static_cast(wt_pu[2 * i]); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeMsa3R() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsa3RMask; ++ msa_reg_t ws, wd, wt; ++ get_msa_register(ws_reg(), &ws); ++ get_msa_register(wt_reg(), &wt); ++ get_msa_register(wd_reg(), &wd); ++ switch (opcode) { ++ case HADD_S: ++ case HADD_U: ++ case HSUB_S: ++ case HSUB_U: ++#define HORIZONTAL_ARITHMETIC_DF(num_of_lanes, int_type, lesser_int_type) \ ++ for (int i = 0; i < num_of_lanes; ++i) { \ ++ Msa3RInstrHelper_horizontal( \ ++ opcode, &ws, &wt, &wd, i, num_of_lanes); \ ++ } ++ switch (DecodeMsaDataFormat()) { ++ case MSA_HALF: ++ HORIZONTAL_ARITHMETIC_DF(kMSALanesHalf, int16_t, int8_t); ++ break; ++ case MSA_WORD: ++ HORIZONTAL_ARITHMETIC_DF(kMSALanesWord, int32_t, int16_t); ++ break; ++ case MSA_DWORD: ++ HORIZONTAL_ARITHMETIC_DF(kMSALanesDword, int64_t, int32_t); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++#undef HORIZONTAL_ARITHMETIC_DF ++ case VSHF: ++#define VSHF_DF(num_of_lanes, int_type) \ ++ for (int i = 0; i < num_of_lanes; ++i) { \ ++ Msa3RInstrHelper_shuffle(opcode, &ws, &wt, &wd, i, \ ++ num_of_lanes); \ ++ } ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ VSHF_DF(kMSALanesByte, int8_t); ++ break; ++ case MSA_HALF: ++ VSHF_DF(kMSALanesHalf, int16_t); ++ break; ++ case MSA_WORD: ++ VSHF_DF(kMSALanesWord, int32_t); ++ break; ++ case MSA_DWORD: ++ VSHF_DF(kMSALanesDword, int64_t); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++#undef VSHF_DF ++ break; ++ case PCKEV: ++ case PCKOD: ++ case ILVL: ++ case ILVR: ++ case ILVEV: ++ case ILVOD: ++#define INTERLEAVE_PACK_DF(num_of_lanes, int_type) \ ++ for (int i = 0; i < num_of_lanes / 2; ++i) { \ ++ Msa3RInstrHelper_shuffle(opcode, &ws, &wt, &wd, i, \ ++ num_of_lanes); \ ++ } ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ INTERLEAVE_PACK_DF(kMSALanesByte, int8_t); ++ break; ++ case MSA_HALF: ++ INTERLEAVE_PACK_DF(kMSALanesHalf, int16_t); ++ break; ++ case MSA_WORD: ++ INTERLEAVE_PACK_DF(kMSALanesWord, int32_t); ++ break; ++ case MSA_DWORD: ++ INTERLEAVE_PACK_DF(kMSALanesDword, int64_t); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++#undef INTERLEAVE_PACK_DF ++ default: ++#define MSA_3R_DF(elem, num_of_lanes) \ ++ for (int i = 0; i < num_of_lanes; i++) { \ ++ wd.elem[i] = Msa3RInstrHelper(opcode, wd.elem[i], ws.elem[i], wt.elem[i]); \ ++ } ++ ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ MSA_3R_DF(b, kMSALanesByte); ++ break; ++ case MSA_HALF: ++ MSA_3R_DF(h, kMSALanesHalf); ++ break; ++ case MSA_WORD: ++ MSA_3R_DF(w, kMSALanesWord); ++ break; ++ case MSA_DWORD: ++ MSA_3R_DF(d, kMSALanesDword); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++#undef MSA_3R_DF ++ break; ++ } ++ set_msa_register(wd_reg(), &wd); ++ TraceMSARegWr(&wd); ++} ++ ++template ++void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) { ++ const T_int all_ones = static_cast(-1); ++ const T_fp s_element = *reinterpret_cast(&ws); ++ const T_fp t_element = *reinterpret_cast(&wt); ++ switch (opcode) { ++ case FCUN: { ++ if (std::isnan(s_element) || std::isnan(t_element)) { ++ *wd = all_ones; ++ } else { ++ *wd = 0; ++ } ++ } break; ++ case FCEQ: { ++ if (s_element != t_element || std::isnan(s_element) || ++ std::isnan(t_element)) { ++ *wd = 0; ++ } else { ++ *wd = all_ones; ++ } ++ } break; ++ case FCUEQ: { ++ if (s_element == t_element || std::isnan(s_element) || ++ std::isnan(t_element)) { ++ *wd = all_ones; ++ } else { ++ *wd = 0; ++ } ++ } break; ++ case FCLT: { ++ if (s_element >= t_element || std::isnan(s_element) || ++ std::isnan(t_element)) { ++ *wd = 0; ++ } else { ++ *wd = all_ones; ++ } ++ } break; ++ case FCULT: { ++ if (s_element < t_element || std::isnan(s_element) || ++ std::isnan(t_element)) { ++ *wd = all_ones; ++ } else { ++ *wd = 0; ++ } ++ } break; ++ case FCLE: { ++ if (s_element > t_element || std::isnan(s_element) || ++ std::isnan(t_element)) { ++ *wd = 0; ++ } else { ++ *wd = all_ones; ++ } ++ } break; ++ case FCULE: { ++ if (s_element <= t_element || std::isnan(s_element) || ++ std::isnan(t_element)) { ++ *wd = all_ones; ++ } else { ++ *wd = 0; ++ } ++ } break; ++ case FCOR: { ++ if (std::isnan(s_element) || std::isnan(t_element)) { ++ *wd = 0; ++ } else { ++ *wd = all_ones; ++ } ++ } break; ++ case FCUNE: { ++ if (s_element != t_element || std::isnan(s_element) || ++ std::isnan(t_element)) { ++ *wd = all_ones; ++ } else { ++ *wd = 0; ++ } ++ } break; ++ case FCNE: { ++ if (s_element == t_element || std::isnan(s_element) || ++ std::isnan(t_element)) { ++ *wd = 0; ++ } else { ++ *wd = all_ones; ++ } ++ } break; ++ case FADD: ++ *wd = bit_cast(s_element + t_element); ++ break; ++ case FSUB: ++ *wd = bit_cast(s_element - t_element); ++ break; ++ case FMUL: ++ *wd = bit_cast(s_element * t_element); ++ break; ++ case FDIV: { ++ if (t_element == 0) { ++ *wd = bit_cast(std::numeric_limits::quiet_NaN()); ++ } else { ++ *wd = bit_cast(s_element / t_element); ++ } ++ } break; ++ case FMADD: ++ *wd = bit_cast( ++ std::fma(s_element, t_element, *reinterpret_cast(wd))); ++ break; ++ case FMSUB: ++ *wd = bit_cast( ++ std::fma(-s_element, t_element, *reinterpret_cast(wd))); ++ break; ++ case FEXP2: ++ *wd = bit_cast(std::ldexp(s_element, static_cast(wt))); ++ break; ++ case FMIN: ++ *wd = bit_cast(std::min(s_element, t_element)); ++ break; ++ case FMAX: ++ *wd = bit_cast(std::max(s_element, t_element)); ++ break; ++ case FMIN_A: { ++ *wd = bit_cast( ++ std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element); ++ } break; ++ case FMAX_A: { ++ *wd = bit_cast( ++ std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element); ++ } break; ++ case FSOR: ++ case FSUNE: ++ case FSNE: ++ case FSAF: ++ case FSUN: ++ case FSEQ: ++ case FSUEQ: ++ case FSLT: ++ case FSULT: ++ case FSLE: ++ case FSULE: ++ UNIMPLEMENTED(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++template ++void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) { ++ // using T_uint = typename std::make_unsigned::type; ++ using T_uint_dbl = typename std::make_unsigned::type; ++ const T_int max_int = std::numeric_limits::max(); ++ const T_int min_int = std::numeric_limits::min(); ++ const int shift = kBitsPerByte * sizeof(T_int) - 1; ++ const T_int_dbl reg_s = ws; ++ const T_int_dbl reg_t = wt; ++ T_int_dbl product, result; ++ product = reg_s * reg_t; ++ switch (opcode) { ++ case MUL_Q: { ++ const T_int_dbl min_fix_dbl = ++ bit_cast(std::numeric_limits::min()) >> 1U; ++ const T_int_dbl max_fix_dbl = std::numeric_limits::max() >> 1U; ++ if (product == min_fix_dbl) { ++ product = max_fix_dbl; ++ } ++ *wd = static_cast(product >> shift); ++ } break; ++ case MADD_Q: { ++ result = (product + (static_cast(*wd) << shift)) >> shift; ++ *wd = static_cast( ++ result > max_int ? max_int : result < min_int ? min_int : result); ++ } break; ++ case MSUB_Q: { ++ result = (-product + (static_cast(*wd) << shift)) >> shift; ++ *wd = static_cast( ++ result > max_int ? max_int : result < min_int ? min_int : result); ++ } break; ++ case MULR_Q: { ++ const T_int_dbl min_fix_dbl = ++ bit_cast(std::numeric_limits::min()) >> 1U; ++ const T_int_dbl max_fix_dbl = std::numeric_limits::max() >> 1U; ++ if (product == min_fix_dbl) { ++ *wd = static_cast(max_fix_dbl >> shift); ++ break; ++ } ++ *wd = static_cast((product + (1 << (shift - 1))) >> shift); ++ } break; ++ case MADDR_Q: { ++ result = (product + (static_cast(*wd) << shift) + ++ (1 << (shift - 1))) >> ++ shift; ++ *wd = static_cast( ++ result > max_int ? max_int : result < min_int ? min_int : result); ++ } break; ++ case MSUBR_Q: { ++ result = (-product + (static_cast(*wd) << shift) + ++ (1 << (shift - 1))) >> ++ shift; ++ *wd = static_cast( ++ result > max_int ? max_int : result < min_int ? min_int : result); ++ } break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeMsa3RF() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask; ++ msa_reg_t wd, ws, wt; ++ if (opcode != FCAF) { ++ get_msa_register(ws_reg(), &ws); ++ get_msa_register(wt_reg(), &wt); ++ } ++ switch (opcode) { ++ case FCAF: ++ wd.d[0] = 0; ++ wd.d[1] = 0; ++ break; ++ case FEXDO: ++#define PACK_FLOAT16(sign, exp, frac) \ ++ static_cast(((sign) << 15) + ((exp) << 10) + (frac)) ++#define FEXDO_DF(source, dst) \ ++ do { \ ++ element = source; \ ++ aSign = element >> 31; \ ++ aExp = element >> 23 & 0xFF; \ ++ aFrac = element & 0x007FFFFF; \ ++ if (aExp == 0xFF) { \ ++ if (aFrac) { \ ++ /* Input is a NaN */ \ ++ dst = 0x7DFFU; \ ++ break; \ ++ } \ ++ /* Infinity */ \ ++ dst = PACK_FLOAT16(aSign, 0x1F, 0); \ ++ break; \ ++ } else if (aExp == 0 && aFrac == 0) { \ ++ dst = PACK_FLOAT16(aSign, 0, 0); \ ++ break; \ ++ } else { \ ++ int maxexp = 29; \ ++ uint32_t mask; \ ++ uint32_t increment; \ ++ bool rounding_bumps_exp; \ ++ aFrac |= 0x00800000; \ ++ aExp -= 0x71; \ ++ if (aExp < 1) { \ ++ /* Will be denormal in halfprec */ \ ++ mask = 0x00FFFFFF; \ ++ if (aExp >= -11) { \ ++ mask >>= 11 + aExp; \ ++ } \ ++ } else { \ ++ /* Normal number in halfprec */ \ ++ mask = 0x00001FFF; \ ++ } \ ++ switch (MSACSR_ & 3) { \ ++ case kRoundToNearest: \ ++ increment = (mask + 1) >> 1; \ ++ if ((aFrac & mask) == increment) { \ ++ increment = aFrac & (increment << 1); \ ++ } \ ++ break; \ ++ case kRoundToPlusInf: \ ++ increment = aSign ? 0 : mask; \ ++ break; \ ++ case kRoundToMinusInf: \ ++ increment = aSign ? mask : 0; \ ++ break; \ ++ case kRoundToZero: \ ++ increment = 0; \ ++ break; \ ++ } \ ++ rounding_bumps_exp = (aFrac + increment >= 0x01000000); \ ++ if (aExp > maxexp || (aExp == maxexp && rounding_bumps_exp)) { \ ++ dst = PACK_FLOAT16(aSign, 0x1F, 0); \ ++ break; \ ++ } \ ++ aFrac += increment; \ ++ if (rounding_bumps_exp) { \ ++ aFrac >>= 1; \ ++ aExp++; \ ++ } \ ++ if (aExp < -10) { \ ++ dst = PACK_FLOAT16(aSign, 0, 0); \ ++ break; \ ++ } \ ++ if (aExp < 0) { \ ++ aFrac >>= -aExp; \ ++ aExp = 0; \ ++ } \ ++ dst = PACK_FLOAT16(aSign, aExp, aFrac >> 13); \ ++ } \ ++ } while (0); ++ switch (DecodeMsaDataFormat()) { ++ case MSA_HALF: ++ for (int i = 0; i < kMSALanesWord; i++) { ++ uint_fast32_t element; ++ uint_fast32_t aSign, aFrac; ++ int_fast32_t aExp; ++ FEXDO_DF(ws.uw[i], wd.uh[i + kMSALanesHalf / 2]) ++ FEXDO_DF(wt.uw[i], wd.uh[i]) ++ } ++ break; ++ case MSA_WORD: ++ for (int i = 0; i < kMSALanesDword; i++) { ++ wd.w[i + kMSALanesWord / 2] = bit_cast( ++ static_cast(bit_cast(ws.d[i]))); ++ wd.w[i] = bit_cast( ++ static_cast(bit_cast(wt.d[i]))); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++#undef PACK_FLOAT16 ++#undef FEXDO_DF ++ case FTQ: ++#define FTQ_DF(source, dst, fp_type, int_type) \ ++ element = bit_cast(source) * \ ++ (1U << (sizeof(int_type) * kBitsPerByte - 1)); \ ++ if (element > std::numeric_limits::max()) { \ ++ dst = std::numeric_limits::max(); \ ++ } else if (element < std::numeric_limits::min()) { \ ++ dst = std::numeric_limits::min(); \ ++ } else if (std::isnan(element)) { \ ++ dst = 0; \ ++ } else { \ ++ int_type fixed_point; \ ++ round_according_to_msacsr(element, &element, &fixed_point); \ ++ dst = fixed_point; \ ++ } ++ ++ switch (DecodeMsaDataFormat()) { ++ case MSA_HALF: ++ for (int i = 0; i < kMSALanesWord; i++) { ++ float element; ++ FTQ_DF(ws.w[i], wd.h[i + kMSALanesHalf / 2], float, int16_t) ++ FTQ_DF(wt.w[i], wd.h[i], float, int16_t) ++ } ++ break; ++ case MSA_WORD: ++ double element; ++ for (int i = 0; i < kMSALanesDword; i++) { ++ FTQ_DF(ws.d[i], wd.w[i + kMSALanesWord / 2], double, int32_t) ++ FTQ_DF(wt.d[i], wd.w[i], double, int32_t) ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++#undef FTQ_DF ++#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \ ++ for (int i = 0; i < Lanes; i++) { \ ++ Msa3RFInstrHelper(opcode, ws, wt, &(wd)); \ ++ } ++#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \ ++ for (int i = 0; i < Lanes; i++) { \ ++ Msa3RFInstrHelper2(opcode, ws, wt, &(wd)); \ ++ } ++ case MADD_Q: ++ case MSUB_Q: ++ case MADDR_Q: ++ case MSUBR_Q: ++ get_msa_register(wd_reg(), &wd); ++ V8_FALLTHROUGH; ++ case MUL_Q: ++ case MULR_Q: ++ switch (DecodeMsaDataFormat()) { ++ case MSA_HALF: ++ MSA_3RF_DF2(int16_t, int32_t, kMSALanesHalf, ws.h[i], wt.h[i], ++ wd.h[i]) ++ break; ++ case MSA_WORD: ++ MSA_3RF_DF2(int32_t, int64_t, kMSALanesWord, ws.w[i], wt.w[i], ++ wd.w[i]) ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ default: ++ if (opcode == FMADD || opcode == FMSUB) { ++ get_msa_register(wd_reg(), &wd); ++ } ++ switch (DecodeMsaDataFormat()) { ++ case MSA_WORD: ++ MSA_3RF_DF(int32_t, float, kMSALanesWord, ws.w[i], wt.w[i], wd.w[i]) ++ break; ++ case MSA_DWORD: ++ MSA_3RF_DF(int64_t, double, kMSALanesDword, ws.d[i], wt.d[i], wd.d[i]) ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++#undef MSA_3RF_DF ++#undef MSA_3RF_DF2 ++ } ++ set_msa_register(wd_reg(), &wd); ++ TraceMSARegWr(&wd); ++} ++ ++void Simulator::DecodeTypeMsaVec() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsaVECMask; ++ msa_reg_t wd, ws, wt; ++ ++ get_msa_register(instr_.WsValue(), ws.d); ++ get_msa_register(instr_.WtValue(), wt.d); ++ if (opcode == BMNZ_V || opcode == BMZ_V || opcode == BSEL_V) { ++ get_msa_register(instr_.WdValue(), wd.d); ++ } ++ ++ for (int i = 0; i < kMSALanesDword; i++) { ++ switch (opcode) { ++ case AND_V: ++ wd.d[i] = ws.d[i] & wt.d[i]; ++ break; ++ case OR_V: ++ wd.d[i] = ws.d[i] | wt.d[i]; ++ break; ++ case NOR_V: ++ wd.d[i] = ~(ws.d[i] | wt.d[i]); ++ break; ++ case XOR_V: ++ wd.d[i] = ws.d[i] ^ wt.d[i]; ++ break; ++ case BMNZ_V: ++ wd.d[i] = (wt.d[i] & ws.d[i]) | (~wt.d[i] & wd.d[i]); ++ break; ++ case BMZ_V: ++ wd.d[i] = (~wt.d[i] & ws.d[i]) | (wt.d[i] & wd.d[i]); ++ break; ++ case BSEL_V: ++ wd.d[i] = (~wd.d[i] & ws.d[i]) | (wd.d[i] & wt.d[i]); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ set_msa_register(instr_.WdValue(), wd.d); ++ TraceMSARegWr(wd.d); ++} ++ ++void Simulator::DecodeTypeMsa2R() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsa2RMask; ++ msa_reg_t wd, ws; ++ switch (opcode) { ++ case FILL: ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: { ++ int64_t rs = get_register(instr_.WsValue()); ++ for (int i = 0; i < kMSALanesByte; i++) { ++ wd.b[i] = rs & 0xFFu; ++ } ++ set_msa_register(instr_.WdValue(), wd.b); ++ TraceMSARegWr(wd.b); ++ break; ++ } ++ case MSA_HALF: { ++ int64_t rs = get_register(instr_.WsValue()); ++ for (int i = 0; i < kMSALanesHalf; i++) { ++ wd.h[i] = rs & 0xFFFFu; ++ } ++ set_msa_register(instr_.WdValue(), wd.h); ++ TraceMSARegWr(wd.h); ++ break; ++ } ++ case MSA_WORD: { ++ int64_t rs = get_register(instr_.WsValue()); ++ for (int i = 0; i < kMSALanesWord; i++) { ++ wd.w[i] = rs & 0xFFFFFFFFu; ++ } ++ set_msa_register(instr_.WdValue(), wd.w); ++ TraceMSARegWr(wd.w); ++ break; ++ } ++ case MSA_DWORD: { ++ int64_t rs = get_register(instr_.WsValue()); ++ wd.d[0] = wd.d[1] = rs; ++ set_msa_register(instr_.WdValue(), wd.d); ++ TraceMSARegWr(wd.d); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ case PCNT: ++#define PCNT_DF(elem, num_of_lanes) \ ++ get_msa_register(instr_.WsValue(), ws.elem); \ ++ for (int i = 0; i < num_of_lanes; i++) { \ ++ uint64_t u64elem = static_cast(ws.elem[i]); \ ++ wd.elem[i] = base::bits::CountPopulation(u64elem); \ ++ } \ ++ set_msa_register(instr_.WdValue(), wd.elem); \ ++ TraceMSARegWr(wd.elem) ++ ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ PCNT_DF(ub, kMSALanesByte); ++ break; ++ case MSA_HALF: ++ PCNT_DF(uh, kMSALanesHalf); ++ break; ++ case MSA_WORD: ++ PCNT_DF(uw, kMSALanesWord); ++ break; ++ case MSA_DWORD: ++ PCNT_DF(ud, kMSALanesDword); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++#undef PCNT_DF ++ break; ++ case NLOC: ++#define NLOC_DF(elem, num_of_lanes) \ ++ get_msa_register(instr_.WsValue(), ws.elem); \ ++ for (int i = 0; i < num_of_lanes; i++) { \ ++ const uint64_t mask = (num_of_lanes == kMSALanesDword) \ ++ ? UINT64_MAX \ ++ : (1ULL << (kMSARegSize / num_of_lanes)) - 1; \ ++ uint64_t u64elem = static_cast(~ws.elem[i]) & mask; \ ++ wd.elem[i] = base::bits::CountLeadingZeros64(u64elem) - \ ++ (64 - kMSARegSize / num_of_lanes); \ ++ } \ ++ set_msa_register(instr_.WdValue(), wd.elem); \ ++ TraceMSARegWr(wd.elem) ++ ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ NLOC_DF(ub, kMSALanesByte); ++ break; ++ case MSA_HALF: ++ NLOC_DF(uh, kMSALanesHalf); ++ break; ++ case MSA_WORD: ++ NLOC_DF(uw, kMSALanesWord); ++ break; ++ case MSA_DWORD: ++ NLOC_DF(ud, kMSALanesDword); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++#undef NLOC_DF ++ break; ++ case NLZC: ++#define NLZC_DF(elem, num_of_lanes) \ ++ get_msa_register(instr_.WsValue(), ws.elem); \ ++ for (int i = 0; i < num_of_lanes; i++) { \ ++ uint64_t u64elem = static_cast(ws.elem[i]); \ ++ wd.elem[i] = base::bits::CountLeadingZeros64(u64elem) - \ ++ (64 - kMSARegSize / num_of_lanes); \ ++ } \ ++ set_msa_register(instr_.WdValue(), wd.elem); \ ++ TraceMSARegWr(wd.elem) ++ ++ switch (DecodeMsaDataFormat()) { ++ case MSA_BYTE: ++ NLZC_DF(ub, kMSALanesByte); ++ break; ++ case MSA_HALF: ++ NLZC_DF(uh, kMSALanesHalf); ++ break; ++ case MSA_WORD: ++ NLZC_DF(uw, kMSALanesWord); ++ break; ++ case MSA_DWORD: ++ NLZC_DF(ud, kMSALanesDword); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++#undef NLZC_DF ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++#define BIT(n) (0x1LL << n) ++#define QUIET_BIT_S(nan) (bit_cast(nan) & BIT(22)) ++#define QUIET_BIT_D(nan) (bit_cast(nan) & BIT(51)) ++static inline bool isSnan(float fp) { return !QUIET_BIT_S(fp); } ++static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); } ++#undef QUIET_BIT_S ++#undef QUIET_BIT_D ++ ++template ++T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst, ++ Simulator* sim) { ++ using T_uint = typename std::make_unsigned::type; ++ switch (opcode) { ++ case FCLASS: { ++#define SNAN_BIT BIT(0) ++#define QNAN_BIT BIT(1) ++#define NEG_INFINITY_BIT BIT(2) ++#define NEG_NORMAL_BIT BIT(3) ++#define NEG_SUBNORMAL_BIT BIT(4) ++#define NEG_ZERO_BIT BIT(5) ++#define POS_INFINITY_BIT BIT(6) ++#define POS_NORMAL_BIT BIT(7) ++#define POS_SUBNORMAL_BIT BIT(8) ++#define POS_ZERO_BIT BIT(9) ++ T_fp element = *reinterpret_cast(&src); ++ switch (std::fpclassify(element)) { ++ case FP_INFINITE: ++ if (std::signbit(element)) { ++ *dst = NEG_INFINITY_BIT; ++ } else { ++ *dst = POS_INFINITY_BIT; ++ } ++ break; ++ case FP_NAN: ++ if (isSnan(element)) { ++ *dst = SNAN_BIT; ++ } else { ++ *dst = QNAN_BIT; ++ } ++ break; ++ case FP_NORMAL: ++ if (std::signbit(element)) { ++ *dst = NEG_NORMAL_BIT; ++ } else { ++ *dst = POS_NORMAL_BIT; ++ } ++ break; ++ case FP_SUBNORMAL: ++ if (std::signbit(element)) { ++ *dst = NEG_SUBNORMAL_BIT; ++ } else { ++ *dst = POS_SUBNORMAL_BIT; ++ } ++ break; ++ case FP_ZERO: ++ if (std::signbit(element)) { ++ *dst = NEG_ZERO_BIT; ++ } else { ++ *dst = POS_ZERO_BIT; ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ } ++#undef BIT ++#undef SNAN_BIT ++#undef QNAN_BIT ++#undef NEG_INFINITY_BIT ++#undef NEG_NORMAL_BIT ++#undef NEG_SUBNORMAL_BIT ++#undef NEG_ZERO_BIT ++#undef POS_INFINITY_BIT ++#undef POS_NORMAL_BIT ++#undef POS_SUBNORMAL_BIT ++#undef POS_ZERO_BIT ++ case FTRUNC_S: { ++ T_fp element = bit_cast(src); ++ const T_int max_int = std::numeric_limits::max(); ++ const T_int min_int = std::numeric_limits::min(); ++ if (std::isnan(element)) { ++ *dst = 0; ++ } else if (element >= max_int || element <= min_int) { ++ *dst = element >= max_int ? max_int : min_int; ++ } else { ++ *dst = static_cast(std::trunc(element)); ++ } ++ break; ++ } ++ case FTRUNC_U: { ++ T_fp element = bit_cast(src); ++ const T_uint max_int = std::numeric_limits::max(); ++ if (std::isnan(element)) { ++ *dst = 0; ++ } else if (element >= max_int || element <= 0) { ++ *dst = element >= max_int ? max_int : 0; ++ } else { ++ *dst = static_cast(std::trunc(element)); ++ } ++ break; ++ } ++ case FSQRT: { ++ T_fp element = bit_cast(src); ++ if (element < 0 || std::isnan(element)) { ++ *dst = bit_cast(std::numeric_limits::quiet_NaN()); ++ } else { ++ *dst = bit_cast(std::sqrt(element)); ++ } ++ break; ++ } ++ case FRSQRT: { ++ T_fp element = bit_cast(src); ++ if (element < 0 || std::isnan(element)) { ++ *dst = bit_cast(std::numeric_limits::quiet_NaN()); ++ } else { ++ *dst = bit_cast(1 / std::sqrt(element)); ++ } ++ break; ++ } ++ case FRCP: { ++ T_fp element = bit_cast(src); ++ if (std::isnan(element)) { ++ *dst = bit_cast(std::numeric_limits::quiet_NaN()); ++ } else { ++ *dst = bit_cast(1 / element); ++ } ++ break; ++ } ++ case FRINT: { ++ T_fp element = bit_cast(src); ++ if (std::isnan(element)) { ++ *dst = bit_cast(std::numeric_limits::quiet_NaN()); ++ } else { ++ T_int dummy; ++ sim->round_according_to_msacsr(element, &element, &dummy); ++ *dst = bit_cast(element); ++ } ++ break; ++ } ++ case FLOG2: { ++ T_fp element = bit_cast(src); ++ switch (std::fpclassify(element)) { ++ case FP_NORMAL: ++ case FP_SUBNORMAL: ++ *dst = bit_cast(std::logb(element)); ++ break; ++ case FP_ZERO: ++ *dst = bit_cast(-std::numeric_limits::infinity()); ++ break; ++ case FP_NAN: ++ *dst = bit_cast(std::numeric_limits::quiet_NaN()); ++ break; ++ case FP_INFINITE: ++ if (element < 0) { ++ *dst = bit_cast(std::numeric_limits::quiet_NaN()); ++ } else { ++ *dst = bit_cast(std::numeric_limits::infinity()); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ } ++ case FTINT_S: { ++ T_fp element = bit_cast(src); ++ const T_int max_int = std::numeric_limits::max(); ++ const T_int min_int = std::numeric_limits::min(); ++ if (std::isnan(element)) { ++ *dst = 0; ++ } else if (element < min_int || element > max_int) { ++ *dst = element > max_int ? max_int : min_int; ++ } else { ++ sim->round_according_to_msacsr(element, &element, dst); ++ } ++ break; ++ } ++ case FTINT_U: { ++ T_fp element = bit_cast(src); ++ const T_uint max_uint = std::numeric_limits::max(); ++ if (std::isnan(element)) { ++ *dst = 0; ++ } else if (element < 0 || element > max_uint) { ++ *dst = element > max_uint ? max_uint : 0; ++ } else { ++ T_uint res; ++ sim->round_according_to_msacsr(element, &element, &res); ++ *dst = *reinterpret_cast(&res); ++ } ++ break; ++ } ++ case FFINT_S: ++ *dst = bit_cast(static_cast(src)); ++ break; ++ case FFINT_U: ++ using uT_src = typename std::make_unsigned::type; ++ *dst = bit_cast(static_cast(bit_cast(src))); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ return 0; ++} ++ ++template ++T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) { ++ switch (opcode) { ++#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15) ++#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1F) ++#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3FF) ++#define PACK_FLOAT32(sign, exp, frac) \ ++ static_cast(((sign) << 31) + ((exp) << 23) + (frac)) ++#define FEXUP_DF(src_index) \ ++ uint_fast16_t element = ws.uh[src_index]; \ ++ uint_fast32_t aSign, aFrac; \ ++ int_fast32_t aExp; \ ++ aSign = EXTRACT_FLOAT16_SIGN(element); \ ++ aExp = EXTRACT_FLOAT16_EXP(element); \ ++ aFrac = EXTRACT_FLOAT16_FRAC(element); \ ++ if (V8_LIKELY(aExp && aExp != 0x1F)) { \ ++ return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \ ++ } else if (aExp == 0x1F) { \ ++ if (aFrac) { \ ++ return bit_cast(std::numeric_limits::quiet_NaN()); \ ++ } else { \ ++ return bit_cast(std::numeric_limits::infinity()) | \ ++ static_cast(aSign) << 31; \ ++ } \ ++ } else { \ ++ if (aFrac == 0) { \ ++ return PACK_FLOAT32(aSign, 0, 0); \ ++ } else { \ ++ int_fast16_t shiftCount = \ ++ base::bits::CountLeadingZeros32(static_cast(aFrac)) - 21; \ ++ aFrac <<= shiftCount; \ ++ aExp = -shiftCount; \ ++ return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \ ++ } \ ++ } ++ case FEXUPL: ++ if (std::is_same::value) { ++ FEXUP_DF(i + kMSALanesWord) ++ } else { ++ return bit_cast( ++ static_cast(bit_cast(ws.w[i + kMSALanesDword]))); ++ } ++ case FEXUPR: ++ if (std::is_same::value) { ++ FEXUP_DF(i) ++ } else { ++ return bit_cast(static_cast(bit_cast(ws.w[i]))); ++ } ++ case FFQL: { ++ if (std::is_same::value) { ++ return bit_cast(static_cast(ws.h[i + kMSALanesWord]) / ++ (1U << 15)); ++ } else { ++ return bit_cast(static_cast(ws.w[i + kMSALanesDword]) / ++ (1U << 31)); ++ } ++ break; ++ } ++ case FFQR: { ++ if (std::is_same::value) { ++ return bit_cast(static_cast(ws.h[i]) / (1U << 15)); ++ } else { ++ return bit_cast(static_cast(ws.w[i]) / (1U << 31)); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++#undef EXTRACT_FLOAT16_SIGN ++#undef EXTRACT_FLOAT16_EXP ++#undef EXTRACT_FLOAT16_FRAC ++#undef PACK_FLOAT32 ++#undef FEXUP_DF ++} ++ ++void Simulator::DecodeTypeMsa2RF() { ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK(CpuFeatures::IsSupported(SW64_SIMD)); ++ uint32_t opcode = instr_.InstructionBits() & kMsa2RFMask; ++ msa_reg_t wd, ws; ++ get_msa_register(ws_reg(), &ws); ++ if (opcode == FEXUPL || opcode == FEXUPR || opcode == FFQL || ++ opcode == FFQR) { ++ switch (DecodeMsaDataFormat()) { ++ case MSA_WORD: ++ for (int i = 0; i < kMSALanesWord; i++) { ++ wd.w[i] = Msa2RFInstrHelper2(opcode, ws, i); ++ } ++ break; ++ case MSA_DWORD: ++ for (int i = 0; i < kMSALanesDword; i++) { ++ wd.d[i] = Msa2RFInstrHelper2(opcode, ws, i); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } else { ++ switch (DecodeMsaDataFormat()) { ++ case MSA_WORD: ++ for (int i = 0; i < kMSALanesWord; i++) { ++ Msa2RFInstrHelper(opcode, ws.w[i], &wd.w[i], this); ++ } ++ break; ++ case MSA_DWORD: ++ for (int i = 0; i < kMSALanesDword; i++) { ++ Msa2RFInstrHelper(opcode, ws.d[i], &wd.d[i], this); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ set_msa_register(wd_reg(), &wd); ++ TraceMSARegWr(&wd); ++} ++ ++void Simulator::DecodeTypeRegister() { ++ // ---------- Execution. ++ switch (instr_.OpcodeFieldRaw()) { ++ case COP1: ++ DecodeTypeRegisterCOP1(); ++ break; ++ case COP1X: ++ DecodeTypeRegisterCOP1X(); ++ break; ++ case SPECIAL: ++ DecodeTypeRegisterSPECIAL(); ++ break; ++ case SPECIAL2: ++ DecodeTypeRegisterSPECIAL2(); ++ break; ++ case SPECIAL3: ++ DecodeTypeRegisterSPECIAL3(); ++ break; ++ case MSA: ++ switch (instr_.MSAMinorOpcodeField()) { ++ case kMsaMinor3R: ++ DecodeTypeMsa3R(); ++ break; ++ case kMsaMinor3RF: ++ DecodeTypeMsa3RF(); ++ break; ++ case kMsaMinorVEC: ++ DecodeTypeMsaVec(); ++ break; ++ case kMsaMinor2R: ++ DecodeTypeMsa2R(); ++ break; ++ case kMsaMinor2RF: ++ DecodeTypeMsa2RF(); ++ break; ++ case kMsaMinorELM: ++ DecodeTypeMsaELM(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ // Unimplemented opcodes raised an error in the configuration step before, ++ // so we can use the default here to set the destination register in common ++ // cases. ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++ ++// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc). ++void Simulator::DecodeTypeImmediate() { ++ // Instruction fields. ++ Opcode op = instr_.OpcodeFieldRaw(); ++ int32_t rs_reg = instr_.RsValue(); ++ int64_t rs = get_register(instr_.RsValue()); ++ uint64_t rs_u = static_cast(rs); ++ int32_t rt_reg = instr_.RtValue(); // Destination register. ++ int64_t rt = get_register(rt_reg); ++ int16_t imm16 = instr_.Imm16Value(); ++ int32_t imm18 = instr_.Imm18Value(); ++ ++ int32_t ft_reg = instr_.FtValue(); // Destination register. ++ ++ // Zero extended immediate. ++ uint64_t oe_imm16 = 0xFFFF & imm16; ++ // Sign extended immediate. ++ int64_t se_imm16 = imm16; ++ int64_t se_imm18 = imm18 | ((imm18 & 0x20000) ? 0xFFFFFFFFFFFC0000 : 0); ++ ++ // Next pc. ++ int64_t next_pc = bad_ra; ++ ++ // Used for conditional branch instructions. ++ bool execute_branch_delay_instruction = false; ++ ++ // Used for arithmetic instructions. ++ int64_t alu_out = 0; ++ ++ // Used for memory instructions. ++ int64_t addr = 0x0; ++ // Alignment for 32-bit integers used in LWL, LWR, etc. ++ const int kInt32AlignmentMask = sizeof(uint32_t) - 1; ++ // Alignment for 64-bit integers used in LDL, LDR, etc. ++ const int kInt64AlignmentMask = sizeof(uint64_t) - 1; ++ ++ // Branch instructions common part. ++ auto BranchAndLinkHelper = ++ [this, &next_pc, &execute_branch_delay_instruction](bool do_branch) { ++ execute_branch_delay_instruction = true; ++ int64_t current_pc = get_pc(); ++ if (do_branch) { ++ int16_t imm16 = instr_.Imm16Value(); ++ next_pc = current_pc + (imm16 << 2) + kInstrSize; ++ set_register(31, current_pc + 2 * kInstrSize); ++ } else { ++ next_pc = current_pc + 2 * kInstrSize; ++ } ++ }; ++ ++ auto BranchHelper = [this, &next_pc, ++ &execute_branch_delay_instruction](bool do_branch) { ++ execute_branch_delay_instruction = true; ++ int64_t current_pc = get_pc(); ++ if (do_branch) { ++ int16_t imm16 = instr_.Imm16Value(); ++ next_pc = current_pc + (imm16 << 2) + kInstrSize; ++ } else { ++ next_pc = current_pc + 2 * kInstrSize; ++ } ++ }; ++ ++ auto BranchHelper_MSA = [this, &next_pc, imm16, ++ &execute_branch_delay_instruction](bool do_branch) { ++ execute_branch_delay_instruction = true; ++ int64_t current_pc = get_pc(); ++ const int32_t bitsIn16Int = sizeof(int16_t) * kBitsPerByte; ++ if (do_branch) { ++ if (FLAG_debug_code) { ++ int16_t bits = imm16 & 0xFC; ++ if (imm16 >= 0) { ++ CHECK_EQ(bits, 0); ++ } else { ++ CHECK_EQ(bits ^ 0xFC, 0); ++ } ++ } ++ // jump range :[pc + kInstrSize - 512 * kInstrSize, ++ // pc + kInstrSize + 511 * kInstrSize] ++ int16_t offset = static_cast(imm16 << (bitsIn16Int - 10)) >> ++ (bitsIn16Int - 12); ++ next_pc = current_pc + offset + kInstrSize; ++ } else { ++ next_pc = current_pc + 2 * kInstrSize; ++ } ++ }; ++ ++ auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) { ++ int64_t current_pc = get_pc(); ++ CheckForbiddenSlot(current_pc); ++ if (do_branch) { ++ int32_t imm = instr_.ImmValue(bits); ++ imm <<= 32 - bits; ++ imm >>= 32 - bits; ++ next_pc = current_pc + (imm << 2) + kInstrSize; ++ set_register(31, current_pc + kInstrSize); ++ } ++ }; ++ ++ auto BranchCompactHelper = [this, &next_pc](bool do_branch, int bits) { ++ int64_t current_pc = get_pc(); ++ CheckForbiddenSlot(current_pc); ++ if (do_branch) { ++ int32_t imm = instr_.ImmValue(bits); ++ imm <<= 32 - bits; ++ imm >>= 32 - bits; ++ next_pc = get_pc() + (imm << 2) + kInstrSize; ++ } ++ }; ++ ++ switch (op) { ++ // ------------- COP1. Coprocessor instructions. ++ case COP1: ++ switch (instr_.RsFieldRaw()) { ++ case BC1: { // Branch on coprocessor condition. ++ uint32_t cc = instr_.FBccValue(); ++ uint32_t fcsr_cc = get_fcsr_condition_bit(cc); ++ uint32_t cc_value = test_fcsr_bit(fcsr_cc); ++ bool do_branch = (instr_.FBtrueValue()) ? cc_value : !cc_value; ++ BranchHelper(do_branch); ++ break; ++ } ++ case BC1EQZ: ++ BranchHelper(!(get_fpu_register(ft_reg) & 0x1)); ++ break; ++ case BC1NEZ: ++ BranchHelper(get_fpu_register(ft_reg) & 0x1); ++ break; ++ case BZ_V: { ++ msa_reg_t wt; ++ get_msa_register(wt_reg(), &wt); ++ BranchHelper_MSA(wt.d[0] == 0 && wt.d[1] == 0); ++ } break; ++#define BZ_DF(witdh, lanes) \ ++ { \ ++ msa_reg_t wt; \ ++ get_msa_register(wt_reg(), &wt); \ ++ int i; \ ++ for (i = 0; i < lanes; ++i) { \ ++ if (wt.witdh[i] == 0) { \ ++ break; \ ++ } \ ++ } \ ++ BranchHelper_MSA(i != lanes); \ ++ } ++ case BZ_B: ++ BZ_DF(b, kMSALanesByte) ++ break; ++ case BZ_H: ++ BZ_DF(h, kMSALanesHalf) ++ break; ++ case BZ_W: ++ BZ_DF(w, kMSALanesWord) ++ break; ++ case BZ_D: ++ BZ_DF(d, kMSALanesDword) ++ break; ++#undef BZ_DF ++ case BNZ_V: { ++ msa_reg_t wt; ++ get_msa_register(wt_reg(), &wt); ++ BranchHelper_MSA(wt.d[0] != 0 || wt.d[1] != 0); ++ } break; ++#define BNZ_DF(witdh, lanes) \ ++ { \ ++ msa_reg_t wt; \ ++ get_msa_register(wt_reg(), &wt); \ ++ int i; \ ++ for (i = 0; i < lanes; ++i) { \ ++ if (wt.witdh[i] == 0) { \ ++ break; \ ++ } \ ++ } \ ++ BranchHelper_MSA(i == lanes); \ ++ } ++ case BNZ_B: ++ BNZ_DF(b, kMSALanesByte) ++ break; ++ case BNZ_H: ++ BNZ_DF(h, kMSALanesHalf) ++ break; ++ case BNZ_W: ++ BNZ_DF(w, kMSALanesWord) ++ break; ++ case BNZ_D: ++ BNZ_DF(d, kMSALanesDword) ++ break; ++#undef BNZ_DF ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ // ------------- REGIMM class. ++ case REGIMM: ++ switch (instr_.RtFieldRaw()) { ++ case BLTZ: ++ BranchHelper(rs < 0); ++ break; ++ case BGEZ: ++ BranchHelper(rs >= 0); ++ break; ++ case BLTZAL: ++ BranchAndLinkHelper(rs < 0); ++ break; ++ case BGEZAL: ++ BranchAndLinkHelper(rs >= 0); ++ break; ++ case DAHI: ++ SetResult(rs_reg, rs + (se_imm16 << 32)); ++ break; ++ case DATI: ++ SetResult(rs_reg, rs + (se_imm16 << 48)); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; // case REGIMM. ++ // ------------- Branch instructions. ++ // When comparing to zero, the encoding of rt field is always 0, so we don't ++ // need to replace rt with zero. ++ case BEQ: ++ BranchHelper(rs == rt); ++ break; ++ case BNE: ++ BranchHelper(rs != rt); ++ break; ++ case POP06: // BLEZALC, BGEZALC, BGEUC, BLEZ (pre-r6) ++ if (kArchVariant == kSw64r3) { ++ if (rt_reg != 0) { ++ if (rs_reg == 0) { // BLEZALC ++ BranchAndLinkCompactHelper(rt <= 0, 16); ++ } else { ++ if (rs_reg == rt_reg) { // BGEZALC ++ BranchAndLinkCompactHelper(rt >= 0, 16); ++ } else { // BGEUC ++ BranchCompactHelper( ++ static_cast(rs) >= static_cast(rt), 16); ++ } ++ } ++ } else { // BLEZ ++ BranchHelper(rs <= 0); ++ } ++ } else { // BLEZ ++ BranchHelper(rs <= 0); ++ } ++ break; ++ case POP07: // BGTZALC, BLTZALC, BLTUC, BGTZ (pre-r6) ++ if (kArchVariant == kSw64r3) { ++ if (rt_reg != 0) { ++ if (rs_reg == 0) { // BGTZALC ++ BranchAndLinkCompactHelper(rt > 0, 16); ++ } else { ++ if (rt_reg == rs_reg) { // BLTZALC ++ BranchAndLinkCompactHelper(rt < 0, 16); ++ } else { // BLTUC ++ BranchCompactHelper( ++ static_cast(rs) < static_cast(rt), 16); ++ } ++ } ++ } else { // BGTZ ++ BranchHelper(rs > 0); ++ } ++ } else { // BGTZ ++ BranchHelper(rs > 0); ++ } ++ break; ++ case POP26: // BLEZC, BGEZC, BGEC/BLEC / BLEZL (pre-r6) ++ if (kArchVariant == kSw64r3) { ++ if (rt_reg != 0) { ++ if (rs_reg == 0) { // BLEZC ++ BranchCompactHelper(rt <= 0, 16); ++ } else { ++ if (rs_reg == rt_reg) { // BGEZC ++ BranchCompactHelper(rt >= 0, 16); ++ } else { // BGEC/BLEC ++ BranchCompactHelper(rs >= rt, 16); ++ } ++ } ++ } ++ } else { // BLEZL ++ BranchAndLinkHelper(rs <= 0); ++ } ++ break; ++ case POP27: // BGTZC, BLTZC, BLTC/BGTC / BGTZL (pre-r6) ++ if (kArchVariant == kSw64r3) { ++ if (rt_reg != 0) { ++ if (rs_reg == 0) { // BGTZC ++ BranchCompactHelper(rt > 0, 16); ++ } else { ++ if (rs_reg == rt_reg) { // BLTZC ++ BranchCompactHelper(rt < 0, 16); ++ } else { // BLTC/BGTC ++ BranchCompactHelper(rs < rt, 16); ++ } ++ } ++ } ++ } else { // BGTZL ++ BranchAndLinkHelper(rs > 0); ++ } ++ break; ++ case POP66: // BEQZC, JIC ++ if (rs_reg != 0) { // BEQZC ++ BranchCompactHelper(rs == 0, 21); ++ } else { // JIC ++ next_pc = rt + imm16; ++ } ++ break; ++ case POP76: // BNEZC, JIALC ++ if (rs_reg != 0) { // BNEZC ++ BranchCompactHelper(rs != 0, 21); ++ } else { // JIALC ++ int64_t current_pc = get_pc(); ++ set_register(31, current_pc + kInstrSize); ++ next_pc = rt + imm16; ++ } ++ break; ++ case BC: ++ BranchCompactHelper(true, 26); ++ break; ++ case BALC: ++ BranchAndLinkCompactHelper(true, 26); ++ break; ++ case POP10: // BOVC, BEQZALC, BEQC / ADDI (pre-r6) ++ if (kArchVariant == kSw64r3) { ++ if (rs_reg >= rt_reg) { // BOVC ++ bool condition = !is_int32(rs) || !is_int32(rt) || !is_int32(rs + rt); ++ BranchCompactHelper(condition, 16); ++ } else { ++ if (rs_reg == 0) { // BEQZALC ++ BranchAndLinkCompactHelper(rt == 0, 16); ++ } else { // BEQC ++ BranchCompactHelper(rt == rs, 16); ++ } ++ } ++ } else { // ADDI ++ if (HaveSameSign(rs, se_imm16)) { ++ if (rs > 0) { ++ if (rs <= Registers::kMaxValue - se_imm16) { ++ SignalException(kIntegerOverflow); ++ } ++ } else if (rs < 0) { ++ if (rs >= Registers::kMinValue - se_imm16) { ++ SignalException(kIntegerUnderflow); ++ } ++ } ++ } ++ SetResult(rt_reg, rs + se_imm16); ++ } ++ break; ++ case POP30: // BNVC, BNEZALC, BNEC / DADDI (pre-r6) ++ if (kArchVariant == kSw64r3) { ++ if (rs_reg >= rt_reg) { // BNVC ++ bool condition = is_int32(rs) && is_int32(rt) && is_int32(rs + rt); ++ BranchCompactHelper(condition, 16); ++ } else { ++ if (rs_reg == 0) { // BNEZALC ++ BranchAndLinkCompactHelper(rt != 0, 16); ++ } else { // BNEC ++ BranchCompactHelper(rt != rs, 16); ++ } ++ } ++ } ++ break; ++ // ------------- Arithmetic instructions. ++ case ADDIU: { ++ int32_t alu32_out = static_cast(rs + se_imm16); ++ // Sign-extend result of 32bit operation into 64bit register. ++ SetResult(rt_reg, static_cast(alu32_out)); ++ break; ++ } ++// case DADDIU: ++// SetResult(rt_reg, rs + se_imm16); ++// break; ++ case SLTI: ++ SetResult(rt_reg, rs < se_imm16 ? 1 : 0); ++ break; ++ case SLTIU: ++ SetResult(rt_reg, rs_u < static_cast(se_imm16) ? 1 : 0); ++ break; ++ case ANDI: ++ SetResult(rt_reg, rs & oe_imm16); ++ break; ++ case ORI: ++ SetResult(rt_reg, rs | oe_imm16); ++ break; ++ case XORI: ++ SetResult(rt_reg, rs ^ oe_imm16); ++ break; ++ case LUI: ++ if (rs_reg != 0) { ++ // AUI instruction. ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ int32_t alu32_out = static_cast(rs + (se_imm16 << 16)); ++ SetResult(rt_reg, static_cast(alu32_out)); ++ } else { ++ // LUI instruction. ++ int32_t alu32_out = static_cast(oe_imm16 << 16); ++ // Sign-extend result of 32bit operation into 64bit register. ++ SetResult(rt_reg, static_cast(alu32_out)); ++ } ++ break; ++ case DAUI: ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ DCHECK_NE(rs_reg, 0); ++ SetResult(rt_reg, rs + (se_imm16 << 16)); ++ break; ++ // ------------- Memory instructions. ++ case LB: ++ set_register(rt_reg, ReadB(rs + se_imm16)); ++ break; ++ case LH: ++ set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr())); ++ break; ++ case LWL: { ++ // al_offset is offset of the effective address within an aligned word. ++ uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; ++ uint8_t byte_shift = kInt32AlignmentMask - al_offset; ++ uint32_t mask = (1 << byte_shift * 8) - 1; ++ addr = rs + se_imm16 - al_offset; ++ int32_t val = ReadW(addr, instr_.instr()); ++ val <<= byte_shift * 8; ++ val |= rt & mask; ++ set_register(rt_reg, static_cast(val)); ++ break; ++ } ++ case LW: ++ set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr())); ++ break; ++ case LWU: ++ set_register(rt_reg, ReadWU(rs + se_imm16, instr_.instr())); ++ break; ++ case LD: ++ set_register(rt_reg, Read2W(rs + se_imm16, instr_.instr())); ++ break; ++ case LBU: ++ set_register(rt_reg, ReadBU(rs + se_imm16)); ++ break; ++ case LHU: ++ set_register(rt_reg, ReadHU(rs + se_imm16, instr_.instr())); ++ break; ++ case LWR: { ++ // al_offset is offset of the effective address within an aligned word. ++ uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; ++ uint8_t byte_shift = kInt32AlignmentMask - al_offset; ++ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0; ++ addr = rs + se_imm16 - al_offset; ++ alu_out = ReadW(addr, instr_.instr()); ++ alu_out = static_cast (alu_out) >> al_offset * 8; ++ alu_out |= rt & mask; ++ set_register(rt_reg, alu_out); ++ break; ++ } ++ case LDL: { ++ // al_offset is offset of the effective address within an aligned word. ++ uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; ++ uint8_t byte_shift = kInt64AlignmentMask - al_offset; ++ uint64_t mask = (1UL << byte_shift * 8) - 1; ++ addr = rs + se_imm16 - al_offset; ++ alu_out = Read2W(addr, instr_.instr()); ++ alu_out <<= byte_shift * 8; ++ alu_out |= rt & mask; ++ set_register(rt_reg, alu_out); ++ break; ++ } ++ case LDR: { ++ // al_offset is offset of the effective address within an aligned word. ++ uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; ++ uint8_t byte_shift = kInt64AlignmentMask - al_offset; ++ uint64_t mask = al_offset ? (~0UL << (byte_shift + 1) * 8) : 0UL; ++ addr = rs + se_imm16 - al_offset; ++ alu_out = Read2W(addr, instr_.instr()); ++ alu_out = alu_out >> al_offset * 8; ++ alu_out |= rt & mask; ++ set_register(rt_reg, alu_out); ++ break; ++ } ++ case SB: ++ WriteB(rs + se_imm16, static_cast(rt)); ++ break; ++ case SH: ++ WriteH(rs + se_imm16, static_cast(rt), instr_.instr()); ++ break; ++ case SWL: { ++ uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; ++ uint8_t byte_shift = kInt32AlignmentMask - al_offset; ++ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0; ++ addr = rs + se_imm16 - al_offset; ++ uint64_t mem_value = ReadW(addr, instr_.instr()) & mask; ++ mem_value |= static_cast(rt) >> byte_shift * 8; ++ WriteW(addr, static_cast(mem_value), instr_.instr()); ++ break; ++ } ++ case SW: ++ WriteW(rs + se_imm16, static_cast(rt), instr_.instr()); ++ break; ++ case SD: ++ Write2W(rs + se_imm16, rt, instr_.instr()); ++ break; ++ case SWR: { ++ uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; ++ uint32_t mask = (1 << al_offset * 8) - 1; ++ addr = rs + se_imm16 - al_offset; ++ uint64_t mem_value = ReadW(addr, instr_.instr()); ++ mem_value = (rt << al_offset * 8) | (mem_value & mask); ++ WriteW(addr, static_cast(mem_value), instr_.instr()); ++ break; ++ } ++ case SDL: { ++ uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; ++ uint8_t byte_shift = kInt64AlignmentMask - al_offset; ++ uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0; ++ addr = rs + se_imm16 - al_offset; ++ uint64_t mem_value = Read2W(addr, instr_.instr()) & mask; ++ mem_value |= static_cast(rt) >> byte_shift * 8; ++ Write2W(addr, mem_value, instr_.instr()); ++ break; ++ } ++ case SDR: { ++ uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; ++ uint64_t mask = (1UL << al_offset * 8) - 1; ++ addr = rs + se_imm16 - al_offset; ++ uint64_t mem_value = Read2W(addr, instr_.instr()); ++ mem_value = (rt << al_offset * 8) | (mem_value & mask); ++ Write2W(addr, mem_value, instr_.instr()); ++ break; ++ } ++ case LL: { ++ // LL/SC sequence cannot be simulated properly ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr())); ++ break; ++ } ++ case SC: { ++ // LL/SC sequence cannot be simulated properly ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ WriteW(rs + se_imm16, static_cast(rt), instr_.instr()); ++ set_register(rt_reg, 1); ++ break; ++ } ++ case LLD: { ++ // LL/SC sequence cannot be simulated properly ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ set_register(rt_reg, ReadD(rs + se_imm16, instr_.instr())); ++ break; ++ } ++ case SCD: { ++ // LL/SC sequence cannot be simulated properly ++ DCHECK_EQ(kArchVariant, kSw64r2); ++ WriteD(rs + se_imm16, rt, instr_.instr()); ++ set_register(rt_reg, 1); ++ break; ++ } ++ case LWC1: ++ set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits. ++ set_fpu_register_word(ft_reg, ++ ReadW(rs + se_imm16, instr_.instr(), FLOAT_DOUBLE)); ++ break; ++ case LDC1: ++ set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr())); ++ TraceMemRd(addr, get_fpu_register(ft_reg), DOUBLE); ++ break; ++ case SWC1: { ++ int32_t alu_out_32 = static_cast(get_fpu_register(ft_reg)); ++ WriteW(rs + se_imm16, alu_out_32, instr_.instr()); ++ break; ++ } ++ case SDC1: ++ WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr()); ++ TraceMemWr(rs + se_imm16, get_fpu_register(ft_reg), DWORD); ++ break; ++ // ------------- PC-Relative instructions. ++ case PCREL: { ++ // rt field: checking 5-bits. ++ int32_t imm21 = instr_.Imm21Value(); ++ int64_t current_pc = get_pc(); ++ uint8_t rt = (imm21 >> kImm16Bits); ++ switch (rt) { ++ case ALUIPC: ++ addr = current_pc + (se_imm16 << 16); ++ alu_out = static_cast(~0x0FFFF) & addr; ++ break; ++ case AUIPC: ++ alu_out = current_pc + (se_imm16 << 16); ++ break; ++ default: { ++ int32_t imm19 = instr_.Imm19Value(); ++ // rt field: checking the most significant 3-bits. ++ rt = (imm21 >> kImm18Bits); ++ switch (rt) { ++ case LDPC: ++ addr = ++ (current_pc & static_cast(~0x7)) + (se_imm18 << 3); ++ alu_out = Read2W(addr, instr_.instr()); ++ break; ++ default: { ++ // rt field: checking the most significant 2-bits. ++ rt = (imm21 >> kImm19Bits); ++ switch (rt) { ++ case LWUPC: { ++ // Set sign. ++ imm19 <<= (kOpcodeBits + kRsBits + 2); ++ imm19 >>= (kOpcodeBits + kRsBits + 2); ++ addr = current_pc + (imm19 << 2); ++ uint32_t* ptr = reinterpret_cast(addr); ++ alu_out = *ptr; ++ break; ++ } ++ case LWPC: { ++ // Set sign. ++ imm19 <<= (kOpcodeBits + kRsBits + 2); ++ imm19 >>= (kOpcodeBits + kRsBits + 2); ++ addr = current_pc + (imm19 << 2); ++ int32_t* ptr = reinterpret_cast(addr); ++ alu_out = *ptr; ++ break; ++ } ++ case ADDIUPC: { ++ int64_t se_imm19 = ++ imm19 | ((imm19 & 0x40000) ? 0xFFFFFFFFFFF80000 : 0); ++ alu_out = current_pc + (se_imm19 << 2); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ break; ++ } ++ } ++ break; ++ } ++ } ++ SetResult(rs_reg, alu_out); ++ break; ++ } ++ case SPECIAL3: { ++ switch (instr_.FunctionFieldRaw()) { ++ case LL_R6: { ++ // LL/SC sequence cannot be simulated properly ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ int64_t base = get_register(instr_.BaseValue()); ++ int32_t offset9 = instr_.Imm9Value(); ++ set_register(rt_reg, ReadW(base + offset9, instr_.instr())); ++ break; ++ } ++ case LLD_R6: { ++ // LL/SC sequence cannot be simulated properly ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ int64_t base = get_register(instr_.BaseValue()); ++ int32_t offset9 = instr_.Imm9Value(); ++ set_register(rt_reg, ReadD(base + offset9, instr_.instr())); ++ break; ++ } ++ case SC_R6: { ++ // LL/SC sequence cannot be simulated properly ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ int64_t base = get_register(instr_.BaseValue()); ++ int32_t offset9 = instr_.Imm9Value(); ++ WriteW(base + offset9, static_cast(rt), instr_.instr()); ++ set_register(rt_reg, 1); ++ break; ++ } ++ case SCD_R6: { ++ // LL/SC sequence cannot be simulated properly ++ DCHECK_EQ(kArchVariant, kSw64r3); ++ int64_t base = get_register(instr_.BaseValue()); ++ int32_t offset9 = instr_.Imm9Value(); ++ WriteD(base + offset9, rt, instr_.instr()); ++ set_register(rt_reg, 1); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ } ++ ++ case MSA: ++ switch (instr_.MSAMinorOpcodeField()) { ++ case kMsaMinorI8: ++ DecodeTypeMsaI8(); ++ break; ++ case kMsaMinorI5: ++ DecodeTypeMsaI5(); ++ break; ++ case kMsaMinorI10: ++ DecodeTypeMsaI10(); ++ break; ++ case kMsaMinorELM: ++ DecodeTypeMsaELM(); ++ break; ++ case kMsaMinorBIT: ++ DecodeTypeMsaBIT(); ++ break; ++ case kMsaMinorMI10: ++ DecodeTypeMsaMI10(); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ ++ if (execute_branch_delay_instruction) { ++ // Execute branch delay slot ++ // We don't check for end_sim_pc. First it should not be met as the current ++ // pc is valid. Secondly a jump should always execute its branch delay slot. ++ Instruction* branch_delay_instr = ++ reinterpret_cast(get_pc() + kInstrSize); ++ BranchDelayInstructionDecode(branch_delay_instr); ++ } ++ ++ // If needed update pc after the branch delay execution. ++ if (next_pc != bad_ra) { ++ set_pc(next_pc); ++ } ++} ++ ++ ++// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal). ++void Simulator::DecodeTypeJump() { ++ SimInstruction simInstr = instr_; ++ // Get current pc. ++ int64_t current_pc = get_pc(); ++ // Get unchanged bits of pc. ++ int64_t pc_high_bits = current_pc & 0xFFFFFFFFF0000000; ++ // Next pc. ++ int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2); ++ ++ // Execute branch delay slot. ++ // We don't check for end_sim_pc. First it should not be met as the current pc ++ // is valid. Secondly a jump should always execute its branch delay slot. ++ Instruction* branch_delay_instr = ++ reinterpret_cast(current_pc + kInstrSize); ++ BranchDelayInstructionDecode(branch_delay_instr); ++ ++ // Update pc and ra if necessary. ++ // Do this after the branch delay execution. ++ if (simInstr.IsLinkingInstruction()) { ++ set_register(31, current_pc + 2 * kInstrSize); ++ } ++ set_pc(next_pc); ++ pc_modified_ = true; ++} ++ ++#define SEXT(v) (v) ++ ++static float DoubleRToFloat(int64_t fa_d) ++{ ++ float r = 0.0f; ++ *reinterpret_cast(&r) = ((fa_d & 0xc000000000000000) >> 32) | ++ ((fa_d & 0x07ffffffe0000000) >> 29); ++ return r; ++} ++ ++static int64_t FloatToDoubleR(float fv) ++{ ++ int64_t r; ++ uint32_t fv_m = *reinterpret_cast(&fv); ++ uint64_t s = ((uint64_t)(fv_m >> 31)) << 63; ++ uint64_t e = fv_m << 1 >> 24; ++ if (e != 0) { ++ if (e == 255) ++ e = 0x7ff; ++ else if (e >= 128) ++ e = (e & 0x7f) | 0x400; ++ else ++ e = (e & 0x7f) | 0x380; ++ } ++ e <<= 52; ++ uint64_t f = ((uint64_t)(fv_m & 0x7fffff)) << 29; ++ s = s | e | f; ++ r = *reinterpret_cast(&s); ++ return r; ++} ++ ++static void BYTE_ZAP(void* x, uint8_t y) ++{ ++ char* p = (char*)x; ++ uint8_t mask = 1; ++ for (int i = 0; i<8; ++i) { ++ if (y & mask) { ++ p[i] = 0; ++ } ++ mask <<= 1; ++ } ++} ++ ++static int64_t InsLowXxx(uint32_t mask, int64_t ra, int64_t rb) ++{ ++ uint32_t index = (rb & 0x7); ++ uint32_t byte_mask = mask << index; ++ uint32_t byte_loc = index * 8; ++ int64_t tmp = ra << (byte_loc & 0x3f); ++ BYTE_ZAP(&tmp, ~((uint8_t)byte_mask)); ++ ++ return tmp; ++} ++ ++static int64_t InsHighXxx(uint32_t mask, int64_t ra, int64_t rb) ++{ ++ uint32_t index = (rb & 0x7); ++ uint32_t byte_mask = mask << index; ++ uint32_t byte_loc = 64 - index * 8; ++ int64_t tmp = (uint64_t)ra >> (byte_loc & 0x3f); ++ BYTE_ZAP(&tmp, ~((uint8_t)(byte_mask>>8))); ++ ++ return tmp; ++} ++ ++static int64_t ExtLowXxx(uint32_t byte_mask, int64_t ra, int64_t rb) ++{ ++ uint32_t index = (rb & 0x7); ++ uint32_t byte_loc = index * 8; ++ int64_t tmp = (uint64_t)ra >> (byte_loc & 0x3f); ++ BYTE_ZAP(&tmp, ~((uint8_t)byte_mask)); ++ ++ return tmp; ++} ++ ++static int64_t ExtHighXxx(uint32_t byte_mask, int64_t ra, int64_t rb) ++{ ++ uint32_t index = (rb & 0x7); ++ uint32_t byte_loc = 64 - index * 8; ++ int64_t tmp = ra << (byte_loc & 0x3f); ++ BYTE_ZAP(&tmp, ~((uint8_t)byte_mask)); ++ ++ return tmp; ++} ++ ++static int64_t MaskLowXxx(uint32_t mask, int64_t ra, int64_t rb) ++{ ++ int index = (rb & 0x7); ++ int byte_mask = mask << index; ++ int64_t tmp = ra; ++ BYTE_ZAP(&tmp, (uint8_t)byte_mask); ++ ++ return tmp; ++} ++ ++static int64_t MaskHighXxx(uint32_t mask, int64_t ra, int64_t rb) ++{ ++ uint32_t index = (rb & 0x7); ++ uint32_t byte_mask = mask << index; ++ int64_t tmp = ra; ++ BYTE_ZAP(&tmp, (uint8_t)(byte_mask >> 8)); ++ ++ return tmp; ++} ++ ++void Simulator::DecodeTypeSyscall() ++{ ++ Instruction* instr = instr_.instr(); ++ if (instr->OpcodeFieldValue() == op_sys_call){ ++ Format(instr, "sys_call '0x(25-0)"); ++ } ++} ++ ++void Simulator::DecodeTypeTransfer() ++{ ++ Instruction* instr = instr_.instr(); ++ int Ra = instr->SwRaValue(); ++ int imm21 = instr->SwImmOrDispFieldValue(20, 0); ++ int64_t current_pc = get_pc() + kInstrSize; ++ int64_t next_pc = current_pc + kInstrSize * imm21; ++ int64_t fa; ++ switch(instr->OpcodeFieldValue()){ ++ case op_br: ++ //Format(instr,"br 'ra, 'tr_disp(20-0)");//ld 20150320 ++ set_register(Ra, current_pc); ++ set_pc(next_pc); ++ break; ++ case op_bsr: ++ //Format(instr,"bsr 'ra, 'tr_disp(20-0)"); ++ set_register(Ra, current_pc); ++ set_pc(next_pc); ++ break; ++ case op_beq: ++ //Format(instr,"beq 'ra, 'tr_disp(20-0)"); ++ if (get_register(Ra) == 0) ++ set_pc(next_pc); ++ break; ++ case op_bne: ++ //Format(instr,"bne 'ra, 'tr_disp(20-0)"); ++ if (get_register(Ra) != 0) ++ set_pc(next_pc); ++ break; ++ case op_blt: ++ //Format(instr,"blt 'ra, 'tr_disp(20-0)"); ++ if (get_register(Ra) < 0) ++ set_pc(next_pc); ++ break; ++ case op_ble: ++ //Format(instr,"ble 'ra, 'tr_disp(20-0)"); ++ if (get_register(Ra) <= 0) ++ set_pc(next_pc); ++ break; ++ case op_bgt: ++ //Format(instr,"bgt 'ra, 'tr_disp(20-0)"); ++ if (get_register(Ra) > 0) ++ set_pc(next_pc); ++ break; ++ case op_bge: ++ //Format(instr,"bge 'ra, 'tr_disp(20-0)"); ++ if (get_register(Ra) >= 0) ++ set_pc(next_pc); ++ break; ++ case op_blbc: ++ //Format(instr,"blbc 'ra, 'tr_disp(20-0)"); ++ if (!(get_register(Ra) & 0x1)) ++ set_pc(next_pc); ++ break; ++ case op_blbs: ++ //Format(instr,"blbs 'ra, 'tr_disp(20-0)"); ++ if (get_register(Ra) & 0x1) ++ set_pc(next_pc); ++ break; ++ case op_fbeq: ++ //Format(instr,"fbeq 'fa, 'tr_disp(20-0)"); ++ if ((get_fpu_register(Ra) & 0x7fffffffffffffff) == 0) ++ set_pc(next_pc); ++ break; ++ case op_fbne: ++ //Format(instr,"fbne 'fa, 'tr_disp(20-0)"); ++ if ((get_fpu_register(Ra) & 0x7fffffffffffffff) != 0) ++ set_pc(next_pc); ++ break; ++ case op_fblt: ++ //Format(instr,"fblt 'fa, 'tr_disp(20-0)"); ++ fa = get_fpu_register(Ra); ++ if (*((double*)&fa) < 0) ++ set_pc(next_pc); ++ break; ++ case op_fble: ++ //Format(instr,"fble 'fa, 'tr_disp(20-0)"); ++ fa = get_fpu_register(Ra); ++ if (*((double*)&fa) <= 0) ++ set_pc(next_pc); ++ break; ++ case op_fbgt: ++ //Format(instr,"fbgt 'fa, 'tr_disp(20-0)"); ++ fa = get_fpu_register(Ra); ++ if (*((double*)&fa) > 0) ++ set_pc(next_pc); ++ break; ++ case op_fbge: ++ //Format(instr,"fbge 'fa, 'tr_disp(20-0)"); ++ fa = get_fpu_register(Ra); ++ if (*((double*)&fa) >= 0) ++ set_pc(next_pc); ++ break; ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ } ++} ++ ++#define OP(x) (((x) & 0x3F) << 26) ++void Simulator::DecodeTypeStorage() ++{ ++ int64_t vaddr; ++ int opcode_func_raw = 0; ++ Instruction* instr = instr_.instr(); ++ int Ra = instr->SwRaValue(); ++ int Rb = instr->SwRbValue(); ++ int64_t imm16 = instr->SwImmOrDispFieldValue(15, 0); ++ int64_t imm12 = instr->SwImmOrDispFieldValue(11, 0); ++ ++ //杂项指令 ++ if ( instr->OpcodeFieldValue() == OP(0x06) || ++ instr->OpcodeFieldValue() == OP(0x08) ) { ++ vaddr = get_register(Rb) + imm12; ++ } else { ++ vaddr = get_register(Rb) + imm16; ++ } ++ switch (instr->OpcodeFieldValue()) { ++ case op_call: ++ //Format(instr, "call 'ra, ('rb)"); ++ set_register(Ra, get_pc() + kInstrSize); ++ set_pc(get_register(Rb)); ++ break; ++ case op_ret: ++ //Format(instr, "ret 'ra, ('rb)"); ++ set_register(Ra, get_pc() + kInstrSize); ++ set_pc(get_register(Rb)); ++ break; ++ case op_jmp: ++ //Format(instr, "jmp 'ra, ('rb)"); ++ set_register(Ra, get_pc() + kInstrSize); ++ set_pc(get_register(Rb)); ++ break; ++ ++ case op_ldbu: ++ //Format(instr, "ldbu 'ra, 'disp(15-0)('rb)"); ++ set_register(Ra, *(uint8_t*)(vaddr)); ++ break; ++ case op_ldhu: ++ //Format(instr, "ldhu 'ra, 'disp(15-0)('rb)"); ++ if (vaddr & 0x1) ++ PrintF("unaligned ldhu"); ++ set_register(Ra, *(uint16_t*)(vaddr)); ++ break; ++ case op_ldw: ++ //Format(instr, "ldw 'ra, 'disp(15-0)('rb)"); ++ if (vaddr & 0x3) ++ PrintF("unaligned ldw"); ++ set_register(Ra, *(int32_t*)(vaddr)); ++ break; ++ case op_ldl: ++ //Format(instr, "ldl 'ra, 'disp(15-0)('rb)"); ++ if (vaddr & 0x7) ++ PrintF("unaligned ldl"); ++ set_register(Ra, *(int64_t*)vaddr); ++ break; ++ case op_ldl_u: ++ //Format(instr, "ldl_u 'ra, 'disp(15-0)('rb)"); ++ set_register(Ra, *(int64_t*)(vaddr & ~0x07)); ++ break; ++ case op_stb: ++ //Format(instr, "stb 'ra, 'disp(15-0)('rb)"); ++ *(uint8_t*)(get_register(Rb) + imm16) = get_register(Ra) & 0xff; ++ break; ++ case op_sth: ++ //Format(instr, "sth 'ra, 'disp(15-0)('rb)"); ++ if (vaddr & 0x01) ++ PrintF("unaligned sth"); ++ *(uint16_t*)vaddr = get_register(Ra) & 0xffff; ++ break; ++ case op_stw: ++ //Format(instr, "stw 'ra, 'disp(15-0)('rb)"); ++ if (vaddr & 0x03) ++ PrintF("unaligned stw"); ++ *(uint32_t*)vaddr = get_register(Ra) & 0xffffffff; ++ break; ++ case op_stl: ++ //Format(instr, "stl 'ra, 'disp(15-0)('rb)"); ++ if (vaddr & 0x07) ++ PrintF("unaligned stl"); ++ *(int64_t*)vaddr = get_register(Ra); ++ break; ++ case op_stl_u: ++ //Format(instr, "stl_u 'ra, 'disp(15-0)('rb)"); ++ *(int64_t*)(vaddr & ~0x07) = get_register(Ra); ++ break; ++ case op_ldi: ++ //Format(instr, "ldi 'ra, 'disp(15-0)('rb)"); ++ set_register(Ra, get_register(Rb) + imm16); ++ break; ++ case op_ldih: ++ //Format(instr, "ldih 'ra, 'disp(15-0)('rb)"); ++ set_register(Ra, get_register(Rb) + (imm16 << 16)); ++ break; ++ ++ case op_flds: ++ //Format(instr, "flds 'fa, 'disp(15-0)('rb)"); ++ if (vaddr & 0x03) ++ PrintF("unaligned flds"); ++ set_fpu_register(Ra, FloatToDoubleR(*(float*)vaddr)); ++ break; ++ case op_fldd: ++ //Format(instr, "fldd 'fa, 'disp(15-0)('rb)"); ++ if (vaddr & 0x07) ++ PrintF("unaligned fldd"); ++ set_fpu_register(Ra, *(int64_t*)vaddr); ++ break; ++ case op_fsts: ++ //Format(instr, "fsts 'fa, 'disp(15-0)('rb)"); ++ if (vaddr & 0x03) ++ PrintF("unaligned fsts"); ++ *(float*)vaddr = DoubleRToFloat(get_fpu_register(Ra)); ++ break; ++ case op_fstd: ++ //Format(instr, "fstd 'fa, 'disp(15-0)('rb)"); ++ if (vaddr & 0x07) ++ PrintF("unaligned fstd"); ++ *(int64_t*)vaddr = get_fpu_register(Ra); ++ break; ++ ++ case op_ldwe: ++ case op_ldse: ++ case op_ldde: ++ case op_vlds: ++ case op_vldd: ++ case op_vsts: ++ case op_vstd: ++ UNIMPLEMENTED_SW64(); ++ break; ++ ++ case OP(0x08): ++ case OP(0x06): //杂项指令 ++ opcode_func_raw = instr->SwFunctionFieldRaw(15, 0) | instr->OpcodeFieldRaw(); ++ switch (opcode_func_raw) { ++ case op_lldw: ++ //Format(instr, "lldw 'ra, 'disp(11-0)('rb)"); ++ if (vaddr & 0x3) ++ PrintF("unaligned ldw"); ++ set_register(Ra, *(int32_t*)(vaddr)); ++ lock_valid = 1; ++ lock_register_padd = vaddr; ++ lock_register_flag = 1; ++ break; ++ case op_lldl: ++ //Format(instr, "lldl 'ra, 'disp(11-0)('rb)"); ++ if (vaddr & 0x7) ++ PrintF("unaligned ldl"); ++ set_register(Ra, *(int64_t*)vaddr); ++ lock_valid = 1; ++ lock_register_padd = vaddr; ++ lock_register_flag = 1; ++ break; ++ case op_ldw_inc: ++ Format(instr, "ldw_inc 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldl_inc: ++ Format(instr, "ldl_inc 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldw_dec: ++ Format(instr, "ldw_dec 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldl_dec: ++ Format(instr, "ldl_dec 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldw_set: ++ Format(instr, "ldw_set 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_ldl_set: ++ Format(instr, "ldl_set 'ra, 'disp(11-0)('rb)"); ++ break; ++ case op_lstw: ++ //Format(instr, "lstw 'ra, 'disp(11-0)('rb)"); ++ if(lock_flag == 1 && lock_valid == 1 && ++ vaddr==lock_register_padd && lock_register_flag == 1) { ++ if (vaddr & 0x03) ++ PrintF("unaligned stw"); ++ *(uint32_t*)vaddr = get_register(Ra) & 0xffffffff; ++ lock_success = 1; ++ lock_valid = 0; ++ lock_flag = 0; ++ } else { ++ lock_success = 0; ++ lock_valid = 0; ++ lock_flag = 0; ++ } ++ break; ++ case op_lstl: ++ //Format(instr, "lstl 'ra, 'disp(11-0)('rb)"); ++ if(lock_flag == 1 && lock_valid == 1 && ++ vaddr==lock_register_padd && lock_register_flag == 1) { ++ PrintF("do lstl\n"); ++ if (vaddr & 0x07) ++ PrintF("unaligned stl"); ++ *(int64_t*)vaddr = get_register(Ra); ++ lock_success = 1; ++ lock_valid = 0; ++ lock_flag = 0; ++ } else { ++ lock_success = 0; ++ lock_valid = 0; ++ lock_flag = 0; ++ } ++ break; ++ case op_memb: ++ break; ++ case op_rtc: ++ Format(instr, "rtc 'ra, 'rb"); ++ break; ++ case op_rcid: ++ Format(instr, "rcid 'ra"); ++ break; ++ case op_halt: ++ Format(instr, "halt"); ++ break; ++ case op_rd_f: ++ set_register(Ra,lock_success); ++ lock_success = 0; ++ break; ++ case op_wr_f: ++ lock_flag = get_register(Ra) & 0x0000000000000001; ++ break; ++ default: ++ UNIMPLEMENTED_SW64(); ++ } ++ break; ++ ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeSimpleCalc() ++{ ++ // TODO: how to process compute overflow exception! ++ Instruction* instr = instr_.instr(); ++ int simple_calculation_op = instr->SwFunctionFieldRaw(12, 5) | instr->OpcodeFieldValue(); ++ int op = simple_calculation_op >> 26; ++ if (op == 0x10) { ++ int ra = instr->SwRaValue(); ++ int rb = instr->SwRbValue(); ++ int rc = instr->SwRcValue(4, 0); ++ switch (simple_calculation_op) { ++ case op_addw: ++ // Format(instr, "addw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (int64_t)(int)(registers_[ra] + registers_[rb]); ++ break; ++ case op_subw: ++ // Format(instr, "subw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (int64_t)(int)(registers_[ra] - registers_[rb]); ++ break; ++ case op_s4addw: ++ // Format(instr, "s4addw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (int64_t)(int)(registers_[ra]*4 + registers_[rb]); ++ break; ++ case op_s4subw: ++ // Format(instr, "s4subw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (int64_t)(int)(registers_[ra]*4 - registers_[rb]); ++ break; ++ case op_s8addw: ++ // Format(instr, "s8addw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (int64_t)(int)(registers_[ra]*8 + registers_[rb]); ++ break; ++ case op_s8subw: ++ // Format(instr, "s8subw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (int64_t)(int)(registers_[ra]*8 - registers_[rb]); ++ break; ++ case op_addl: ++ // Format(instr, "addl 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] + registers_[rb]; ++ break; ++ case op_subl: ++ // Format(instr, "subl 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] - registers_[rb]; ++ break; ++ case op_s4addl: ++ // Format(instr, "s4addl 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra]*4 + registers_[rb]; ++ break; ++ case op_s4subl: ++ // Format(instr, "s4subl 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra]*4 - registers_[rb]; ++ break; ++ case op_s8addl: ++ // Format(instr, "s8addl 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra]*8 + registers_[rb]; ++ break; ++ case op_s8subl: ++ // Format(instr, "s8subl 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra]*8 - registers_[rb]; ++ break; ++ case op_mulw: ++ // Format(instr, "mulw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (int64_t)(int)(registers_[ra] * registers_[rb]); ++ break; ++ case op_mull: ++ // Format(instr, "mull 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] * registers_[rb]; ++ break; ++ case op_umulh: ++ Format(instr, "umulh 'ra, 'rb, 'rc(4-0)"); ++ // TODO:how to store high 64 bit of 128 bit result! ++ // registers_[rc] = (uint64_t)registers_[ra] * (uint64_t)registers_[rb]; ++ break; ++ case op_cmpeq: ++ // Format(instr, "cmpeq 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] == registers_[rb] ? 1 : 0; ++ break; ++ case op_cmplt: ++ // Format(instr, "cmplt 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] < registers_[rb] ? 1 : 0; ++ break; ++ case op_cmple: ++ // Format(instr, "cmple 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] <= registers_[rb] ? 1 : 0; ++ break; ++ case op_cmpult: ++ // Format(instr, "cmpult 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (uint64_t)registers_[ra] < (uint64_t)registers_[rb] ? 1 : 0; ++ break; ++ case op_cmpule: ++ // Format(instr, "cmpule 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (uint64_t)registers_[ra] <= (uint64_t)registers_[rb] ? 1 : 0; ++ break; ++ case op_and: ++ // Format(instr, "and 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] & registers_[rb]; ++ break; ++ case op_bic: ++ // Format(instr, "bic 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] & (~registers_[rb]); ++ break; ++ case op_bis: //case op_or: ++ // Format(instr, "or 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] | registers_[rb]; ++ break; ++ case op_ornot: ++ // Format(instr, "ornot 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] | (~registers_[rb]); ++ break; ++ case op_xor: ++ // Format(instr, "xor 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] ^ registers_[rb]; ++ break; ++ case op_eqv: ++ // Format(instr, "eqv 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] ^ (~registers_[rb]); ++ break; ++ case op_inslb: ++ // Format(instr, "inslb 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = InsLowXxx(0x1, registers_[ra], registers_[rb]); ++ break; ++ case op_inslh: ++ // Format(instr, "inslh 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = InsLowXxx(0x3, registers_[ra], registers_[rb]); ++ break; ++ case op_inslw: ++ // Format(instr, "inslw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = InsLowXxx(0xf, registers_[ra], registers_[rb]); ++ break; ++ case op_insll: ++ // Format(instr, "insll 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = InsLowXxx(0xff, registers_[ra], registers_[rb]); ++ break; ++ case op_inshb: ++ // Format(instr, "inshb 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = InsHighXxx(0x1, registers_[ra], registers_[rb]); ++ break; ++ case op_inshh: ++ // Format(instr, "inshh 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = InsHighXxx(0x3, registers_[ra], registers_[rb]); ++ break; ++ case op_inshw: ++ // Format(instr, "inshw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = InsHighXxx(0xf, registers_[ra], registers_[rb]); ++ break; ++ case op_inshl: ++ // Format(instr, "inshl 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = InsHighXxx(0xff, registers_[ra], registers_[rb]); ++ break; ++ case op_slll: ++ // Format(instr, "slll 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] << (registers_[rb] & 0x3f); ++ break; ++ case op_srll: ++ // Format(instr, "srll 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = (uint64_t)registers_[ra] >> (registers_[rb] & 0x3f); ++ break; ++ case op_sral: ++ // Format(instr, "sral 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = registers_[ra] >> (registers_[rb] & 0x3f); ++ break; ++ case op_extlb: ++ // Format(instr, "extlb 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = ExtLowXxx(0x1, registers_[ra], registers_[rb]); ++ break; ++ case op_extlh: ++ // Format(instr, "extlh 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = ExtLowXxx(0x3, registers_[ra], registers_[rb]); ++ break; ++ case op_extlw: ++ // Format(instr, "extlw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = ExtLowXxx(0xf, registers_[ra], registers_[rb]); ++ break; ++ case op_extll: ++ // Format(instr, "extll 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = ExtLowXxx(0xff, registers_[ra], registers_[rb]); ++ break; ++ case op_exthb: ++ // Format(instr, "exthb 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = ExtHighXxx(0x1, registers_[ra], registers_[rb]); ++ break; ++ case op_exthh: ++ // Format(instr, "exthh 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = ExtHighXxx(0x3, registers_[ra], registers_[rb]); ++ break; ++ case op_exthw: ++ // Format(instr, "exthw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = ExtHighXxx(0xf, registers_[ra], registers_[rb]); ++ break; ++ case op_exthl: ++ // Format(instr, "exthl 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = ExtHighXxx(0xff, registers_[ra], registers_[rb]); ++ break; ++ case op_ctpop: ++ // Format(instr, "ctpop 'rb, 'rc(4-0)"); ++ { ++ int c = 0; ++ uint64_t v = registers_[rb]; ++ for (int i = 0; i<64; ++i) ++ { ++ if (v&1) ++c; ++ v >>= 1; ++ } ++ registers_[rc] = c; ++ } ++ break; ++ case op_ctlz: ++ // Format(instr, "ctlz 'rb, 'rc(4-0)"); ++ { ++ int c = 0; ++ uint64_t v = registers_[rb]; ++ for (int i = 0; i<64; ++i) ++ { ++ if (!(v & 0x8000000000000000)) ++c; ++ else break; ++ v <<= 1; ++ } ++ registers_[rc] = c; ++ } ++ break; ++ case op_cttz: ++ // Format(instr, "cttz 'rb, 'rc(4-0)"); ++ { ++ int c = 0; ++ uint64_t v = registers_[rb]; ++ for (int i = 0; i<64; ++i) ++ { ++ if (!(v&1)) ++c; ++ else break; ++ v >>= 1; ++ } ++ registers_[rc] = c; ++ } ++ break; ++ case op_masklb: ++ // Format(instr, "masklb 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = MaskLowXxx(0x1, registers_[ra], registers_[rb]); ++ break; ++ case op_masklh: ++ // Format(instr, "masklh 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = MaskLowXxx(0x3, registers_[ra], registers_[rb]); ++ break; ++ case op_masklw: ++ // Format(instr, "masklw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = MaskLowXxx(0xf, registers_[ra], registers_[rb]); ++ break; ++ case op_maskll: ++ // Format(instr, "maskll 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = MaskLowXxx(0xff, registers_[ra], registers_[rb]); ++ break; ++ case op_maskhb: ++ // Format(instr, "maskhb 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = MaskHighXxx(0x1, registers_[ra], registers_[rb]); ++ break; ++ case op_maskhh: ++ // Format(instr, "maskhh 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = MaskHighXxx(0x3, registers_[ra], registers_[rb]); ++ break; ++ case op_maskhw: ++ // Format(instr, "maskhw 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = MaskHighXxx(0xf, registers_[ra], registers_[rb]); ++ break; ++ case op_maskhl: ++ // Format(instr, "maskhl 'ra, 'rb, 'rc(4-0)"); ++ registers_[rc] = MaskHighXxx(0xff, registers_[ra], registers_[rb]); ++ break; ++ case op_zap: ++ // Format(instr, "zap 'ra, 'rb, 'rc(4-0)"); ++ { ++ long tmp = registers_[ra]; ++ BYTE_ZAP(&tmp, (uint8_t)registers_[rb]); ++ registers_[rc] = tmp; ++ } ++ break; ++ case op_zapnot: ++ // Format(instr, "zapnot 'ra, 'rb, 'rc(4-0)"); ++ { ++ long tmp = registers_[ra]; ++ BYTE_ZAP(&tmp, ~(uint8_t)registers_[rb]); ++ registers_[rc] = tmp; ++ } ++ break; ++ case op_sextb: ++ // Format(instr, "sextb 'rb, 'rc(4-0)"); ++ registers_[rc] = SEXT((int8_t)registers_[rb]); ++ break; ++ case op_sexth: ++ // Format(instr, "sexth 'rb, 'rc(4-0)"); ++ registers_[rc] = SEXT((int16_t)registers_[rb]); ++ break; ++ case op_cmpgeb: ++ // Format(instr, "cmpgeb 'ra, 'rb, 'rc(4-0)"); ++ { ++ uint8_t* p = (uint8_t*)®isters_[ra]; ++ uint8_t* q = (uint8_t*)®isters_[rb]; ++ uint8_t v = 0; ++ for (int i = 0; i<=7; ++i) ++ { ++ if (*p >= *q) { ++ v |= (1<SwFaValue(); ++ *(float*)&v = DoubleRToFloat(get_fpu_register(fa)); ++ v = v << 32 >> 32; ++ set_register(rc, v); ++ } ++ break; ++ case op_fimovd: ++ { ++ // Format(instr, "fimovd 'fa, 'rc(4-0)"); ++ int fa = instr->SwFaValue(); ++ set_register(rc, get_fpu_register(fa)); ++ } ++ break; ++ default: ++ break; ++ } ++ } ++ else if (op == 0x12) { // has imm8 ++ int ra = instr->SwRaValue(); ++ int rc = instr->SwRcValue(4, 0); ++ int imm8 = instr->SwImmOrDispFieldRaw(20, 13) >> 13; ++ switch (simple_calculation_op) { ++ case op_addw_l: ++ // Format(instr, "addw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = SEXT((int)registers_[ra] + imm8); ++ break; ++ case op_subw_l: ++ // Format(instr, "subw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = SEXT((int)registers_[ra] - imm8); ++ break; ++ case op_s4addw_l: ++ // Format(instr, "s4addw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = SEXT((int)(registers_[ra]*4 + imm8)); ++ break; ++ case op_s4subw_l: ++ // Format(instr, "s4subw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = SEXT((int)(registers_[ra]*4 - imm8)); ++ break; ++ case op_s8addw_l: ++ // Format(instr, "s8addw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = SEXT((int)(registers_[ra]*8 + imm8)); ++ break; ++ case op_s8subw_l: ++ // Format(instr, "s8subw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = SEXT((int)(registers_[ra]*8 - imm8)); ++ break; ++ case op_addl_l: ++ // Format(instr, "addl 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] + imm8; ++ break; ++ case op_subl_l: ++ // Format(instr, "subl 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] - imm8; ++ break; ++ case op_s4addl_l: ++ // Format(instr, "s4addl 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra]*4 + imm8; ++ break; ++ case op_s4subl_l: ++ // Format(instr, "s4subl 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra]*4 - imm8; ++ break; ++ case op_s8addl_l: ++ // Format(instr, "s8addl 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra]*8 + imm8; ++ break; ++ case op_s8subl_l: ++ // Format(instr, "s8subl 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra]*8 - imm8; ++ break; ++ case op_mulw_l: ++ // Format(instr, "mulw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = SEXT((int)registers_[ra] * imm8); ++ break; ++ case op_mull_l: ++ // Format(instr, "mull 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] * imm8; ++ break; ++ case op_umulh_l: ++ Format(instr, "umulh 'ra, 'imm(20-13), 'rc(4-0)"); ++ break; ++ case op_cmpeq_l: ++ // Format(instr, "cmpeq 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] == imm8 ? 1 : 0; ++ break; ++ case op_cmplt_l: ++ // Format(instr, "cmplt 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] < imm8 ? 1 : 0; ++ break; ++ case op_cmple_l: ++ // Format(instr, "cmple 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] <= imm8 ? 1 : 0; ++ break; ++ case op_cmpult_l: ++ // Format(instr, "cmpult 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = (uint64_t)registers_[ra] < (uint8_t)imm8 ? 1 : 0; ++ break; ++ case op_cmpule_l: ++ // Format(instr, "cmpule 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = (uint64_t)registers_[ra] <= (uint8_t)imm8 ? 1 : 0; ++ break; ++ case op_and_l: ++ // Format(instr, "and 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] & (uint8_t)imm8; ++ break; ++ case op_bic_l: ++ // Format(instr, "bic 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] & (uint8_t)(~imm8); ++ break; ++ case op_bis_l: //case op_or_l: ++ // Format(instr, "or 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] | (uint8_t)imm8; ++ break; ++ case op_ornot_l: ++ // Format(instr, "ornot 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] | (uint8_t)(~imm8); ++ break; ++ case op_xor_l: ++ // Format(instr, "xor 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] ^ (uint8_t)imm8; ++ break; ++ case op_eqv_l: ++ // Format(instr, "eqv 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] ^ (uint8_t)(~imm8); ++ break; ++ case op_inslb_l: ++ // Format(instr, "inslb 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = InsLowXxx(0x1, registers_[ra], imm8); ++ break; ++ case op_inslh_l: ++ // Format(instr, "inslh 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = InsLowXxx(0x3, registers_[ra], imm8); ++ break; ++ case op_inslw_l: ++ // Format(instr, "inslw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = InsLowXxx(0xf, registers_[ra], imm8); ++ break; ++ case op_insll_l: ++ // Format(instr, "insll 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = InsLowXxx(0xff, registers_[ra], imm8); ++ break; ++ case op_inshb_l: ++ // Format(instr, "inshb 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = InsHighXxx(0x1, registers_[ra], imm8); ++ break; ++ case op_inshh_l: ++ // Format(instr, "inshh 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = InsHighXxx(0x3, registers_[ra], imm8); ++ break; ++ case op_inshw_l: ++ // Format(instr, "inshw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = InsHighXxx(0xf, registers_[ra], imm8); ++ break; ++ case op_inshl_l: ++ // Format(instr, "inshl 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = InsHighXxx(0xff, registers_[ra], imm8); ++ break; ++ case op_slll_l: ++ // Format(instr, "slll 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] << (imm8 & 0x3f); ++ break; ++ case op_srll_l: ++ // Format(instr, "srll 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = (uint64_t)registers_[ra] >> (imm8 & 0x3f); ++ break; ++ case op_sral_l: ++ // Format(instr, "sral 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] >> (imm8 & 0x3f); ++ break; ++ case op_extlb_l: ++ // Format(instr, "extlb 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = ExtLowXxx(0x1, registers_[ra], imm8); ++ break; ++ case op_extlh_l: ++ // Format(instr, "extlh 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = ExtLowXxx(0x3, registers_[ra], imm8); ++ break; ++ case op_extlw_l: ++ // Format(instr, "extlw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = ExtLowXxx(0xf, registers_[ra], imm8); ++ break; ++ case op_extll_l: ++ // Format(instr, "extll 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = ExtLowXxx(0xff, registers_[ra], imm8); ++ break; ++ case op_exthb_l: ++ // Format(instr, "exthb 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = ExtHighXxx(0x1, registers_[ra], imm8); ++ break; ++ case op_exthh_l: ++ // Format(instr, "exthh 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = ExtHighXxx(0x3, registers_[ra], imm8); ++ break; ++ case op_exthw_l: ++ // Format(instr, "exthw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = ExtHighXxx(0xf, registers_[ra], imm8); ++ break; ++ case op_exthl_l: ++ // Format(instr, "exthl 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = ExtHighXxx(0xff, registers_[ra], imm8); ++ break; ++ case op_masklb_l: ++ // Format(instr, "masklb 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = MaskLowXxx(0x1, registers_[ra], imm8); ++ break; ++ case op_masklh_l: ++ // Format(instr, "masklh 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = MaskLowXxx(0x3, registers_[ra], imm8); ++ break; ++ case op_masklw_l: ++ // Format(instr, "masklw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = MaskLowXxx(0xf, registers_[ra], imm8); ++ break; ++ case op_maskll_l: ++ // Format(instr, "maskll 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = MaskLowXxx(0xff, registers_[ra], imm8); ++ break; ++ case op_maskhb_l: ++ // Format(instr, "maskhb 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = MaskHighXxx(0x1, registers_[ra], imm8); ++ break; ++ case op_maskhh_l: ++ // Format(instr, "maskhh 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = MaskHighXxx(0x3, registers_[ra], imm8); ++ break; ++ case op_maskhw_l: ++ // Format(instr, "maskhw 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = MaskHighXxx(0xf, registers_[ra], imm8); ++ break; ++ case op_maskhl_l: ++ // Format(instr, "maskhl 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = MaskHighXxx(0xff, registers_[ra], imm8); ++ break; ++ case op_zap_l: ++ // Format(instr, "zap 'ra, 'imm(20-13), 'rc(4-0)"); ++ { ++ long tmp = registers_[ra]; ++ BYTE_ZAP(&tmp, (uint8_t)imm8); ++ registers_[rc] = tmp; ++ } ++ break; ++ case op_zapnot_l: ++ // Format(instr, "zapnot 'ra, 'imm(20-13), 'rc(4-0)"); ++ { ++ long tmp = registers_[ra]; ++ BYTE_ZAP(&tmp, ~(uint8_t)imm8); ++ registers_[rc] = tmp; ++ } ++ break; ++ case op_sextb_l: ++ // Format(instr, "sextb 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = SEXT((int8_t)imm8); ++ break; ++ case op_sexth_l: ++ // Format(instr, "sexth 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = SEXT((int16_t)imm8); ++ break; ++ case op_cmpgeb_l: ++ // Format(instr, "cmpgeb 'ra, 'imm(20-13), 'rc(4-0)"); ++ registers_[rc] = registers_[ra] == (uint8_t)imm8; ++ break; ++ default: ++ break; ++ } ++ } ++ else if (op == 0x18) { //浮点简单运算指? ++ int fa = instr->SwFaValue(); ++ int fb = instr->SwFbValue(); ++ int fc = instr->SwFcValue(4, 0); ++ //TODO: need support round mode set in fpcr register ++ switch (simple_calculation_op) { ++ case op_fadds: ++ // Format(instr, "fadds 'fa, 'fb, 'fc(4-0)"); ++ // f10 3.3000001907348633 (raw 0x400a666680000000) ++ // f11 1.1000000238418579 (raw 0x3ff19999a0000000) ++ // (Vaddr)<31> || MAP_S((Vaddr)<30:23>) || (Vaddr)<22:0> || 0<28:0> ++ // 1 8->11 23 29 ++ { ++ float a = DoubleRToFloat(get_fpu_register(fa)); ++ float b = DoubleRToFloat(get_fpu_register(fb)); ++ set_fpu_register(fc, FloatToDoubleR(a + b)); ++ } ++ break; ++ case op_faddd: ++ // Format(instr, "faddd 'fa, 'fb, 'fc(4-0)"); ++ // f10 2.2000000000000002 (raw 0x400199999999999a) ++ // f11 1.1000000000000001 (raw 0x3ff199999999999a) ++ set_fpu_register_double(fc, get_fpu_register_double(fa) + get_fpu_register_double(fb)); ++ break; ++ case op_fsubs: ++ // Format(instr, "fsubs 'fa, 'fb, 'fc(4-0)"); ++ { ++ float a = DoubleRToFloat(get_fpu_register(fa)); ++ float b = DoubleRToFloat(get_fpu_register(fb)); ++ set_fpu_register(fc, FloatToDoubleR(a - b)); ++ } ++ break; ++ case op_fsubd: ++ // Format(instr, "fsubd 'fa, 'fb, 'fc(4-0)"); ++ set_fpu_register_double(fc, get_fpu_register_double(fa) - get_fpu_register_double(fb)); ++ break; ++ case op_fmuls: ++ // Format(instr, "fmuls 'fa, 'fb, 'fc(4-0)"); ++ { ++ float a = DoubleRToFloat(get_fpu_register(fa)); ++ float b = DoubleRToFloat(get_fpu_register(fb)); ++ set_fpu_register(fc, FloatToDoubleR(a * b)); ++ } ++ break; ++ case op_fmuld: ++ // Format(instr, "fmuld 'fa, 'fb, 'fc(4-0)"); ++ set_fpu_register_double(fc, get_fpu_register_double(fa) * get_fpu_register_double(fb)); ++ break; ++ break; ++ case op_fdivs: ++ // Format(instr, "fdivs 'fa, 'fb, 'fc(4-0)"); ++ { ++ float a = DoubleRToFloat(get_fpu_register(fa)); ++ float b = DoubleRToFloat(get_fpu_register(fb)); ++ set_fpu_register(fc, FloatToDoubleR(a / b)); ++ } ++ break; ++ case op_fdivd: ++ // Format(instr, "fdivd 'fa, 'fb, 'fc(4-0)"); ++ set_fpu_register_double(fc, get_fpu_register_double(fa) / get_fpu_register_double(fb)); ++ break; ++ case op_fsqrts: ++ // Format(instr, "fsqrts 'fb, 'fc(4-0)"); ++ { ++ float b = DoubleRToFloat(get_fpu_register(fb)); ++ set_fpu_register(fc, FloatToDoubleR(sqrt(b))); ++ } ++ break; ++ case op_fsqrtd: ++ // Format(instr, "fsqrtd 'fb, 'fc(4-0)"); ++ set_fpu_register_double(fc, sqrt(get_fpu_register_double(fb))); ++ break; ++ case op_fcmpeq: ++ // Format(instr, "fcmpeq 'fa, 'fb, 'fc(4-0)"); ++ set_fpu_register(fc, get_fpu_register_double(fa) == get_fpu_register_double(fb) ? 0x4000000000000000LL : 0LL); ++ break; ++ case op_fcmple: ++ // Format(instr, "fcmple 'fa, 'fb, 'fc(4-0)"); ++ set_fpu_register(fc, get_fpu_register_double(fa) <= get_fpu_register_double(fb) ? 0x4000000000000000LL : 0LL); ++ break; ++ case op_fcmplt: ++ // Format(instr, "fcmplt 'fa, 'fb, 'fc(4-0)"); ++ set_fpu_register(fc, get_fpu_register_double(fa) < get_fpu_register_double(fb) ? 0x4000000000000000LL : 0LL); ++ break; ++ case op_fcmpun: ++ //Format(instr, "fcmpun 'fa, 'fb, 'fc(4-0)"); ++ set_fpu_register(fc, (std::isnan(get_fpu_register_double(fa)) || std::isnan(get_fpu_register_double(fb))) ? 0x4000000000000000LL : 0LL); ++ break; ++ case op_fcvtsd: ++ // Format(instr, "fcvtsd 'fb, 'fc(4-0)"); ++ { ++ double b = (double)DoubleRToFloat(get_fpu_register(fb)); ++ set_fpu_register_double(fc, b); ++ } ++ break; ++ case op_fcvtds: ++ // Format(instr, "fcvtds 'fb, 'fc(4-0)"); ++ { ++ float b = get_fpu_register_double(fb); ++ set_fpu_register(fc, FloatToDoubleR(b)); ++ } ++ break; ++ case op_fcvtdl_g: ++ // Format(instr, "fcvtdl_g 'fb, 'fc(4-0)"); ++ { ++ double d = get_fpu_register_double(fb); ++ int64_t v = (int64_t)rint(d); ++ set_fpu_register(fc, v); ++ set_fcsr_bit(SW64_INE0_BIT, v!=d); ++ } ++ break; ++ case op_fcvtdl_p: ++ // Format(instr, "fcvtdl_p 'fb, 'fc(4-0)"); ++ { ++ double d = get_fpu_register_double(fb); ++ int64_t v = (int64_t)ceil(d); ++ set_fpu_register(fc, v); ++ set_fcsr_bit(SW64_INE0_BIT, v>d); ++ } ++ break; ++ case op_fcvtdl_z: ++ // Format(instr, "fcvtdl_z 'fb, 'fc(4-0)"); ++ { ++ double d = get_fpu_register_double(fb); ++ int64_t v = (int64_t)trunc(d); ++ set_fpu_register(fc, v); ++ set_fcsr_bit(SW64_INE0_BIT, v!=d); ++ } ++ break; ++ case op_fcvtdl_n: ++ // Format(instr, "fcvtdl_n 'fb, 'fc(4-0)"); ++ { ++ double d = get_fpu_register_double(fb); ++ int64_t v = (int64_t)floor(d); ++ set_fpu_register(fc, v); ++ set_fcsr_bit(SW64_INE0_BIT, v> 29) & 0x3fffffff; ++ v |= ((uint64_t)fbv >> 62) << 30; ++ fcv = v; ++ set_fpu_register(fc, fcv); ++ } ++ break; ++ case op_fcvtlw: ++ // Format(instr, "fcvtlw 'fb, 'fc(4-0)"); ++ { ++ int64_t fbv = get_fpu_register(fb); ++ int64_t v = ((uint64_t)fbv & 0x3fffffff) << 29; ++ v |= (((uint64_t)fbv >> 30) & 0x3) << 62; ++ set_fpu_register(fc, v); ++ } ++ break; ++ case op_fcvtls: ++ // Format(instr, "fcvtls 'fb, 'fc(4-0)"); ++ set_fpu_register(fc, FloatToDoubleR((float)get_fpu_register(fb))); ++ break; ++ case op_fcvtld: ++ // Format(instr, "fcvtld 'fb, 'fc(4-0)"); ++ set_fpu_register_double(fc, (double)get_fpu_register(fb)); ++ break; ++ case op_fcpys: ++ { ++ // Format(instr, "fcpys 'fa, 'fb, 'fc(4-0)"); ++ int64_t fcv = (get_fpu_register(fb) & 0x7fffffffffffffffLL); ++ if (get_fpu_register(fa) < 0) { ++ fcv |= 0x8000000000000000LL; ++ } ++ set_fpu_register(fc, fcv); ++ } ++ break; ++ case op_fcpyse: ++ { ++ // Format(instr, "fcpyse 'fa, 'fb, 'fc(4-0)"); ++ int64_t fcv = (get_fpu_register(fb) & 0x000fffffffffffffLL); ++ fcv |= (get_fpu_register(fa) & 0xfff0000000000000LL); ++ set_fpu_register(fc, fcv); ++ } ++ break; ++ case op_fcpysn: ++ { ++ // Format(instr, "fcpysn 'fa, 'fb, 'fc(4-0)"); ++ int64_t fcv = (get_fpu_register(fb) & 0x7fffffffffffffffLL); ++ if (get_fpu_register(fa) >= 0) { ++ fcv |= 0x8000000000000000LL; ++ } ++ set_fpu_register(fc, fcv); ++ } ++ break; ++ case op_ifmovs: ++ { ++ // Format(instr, "ifmovs 'ra, 'fc(4-0)"); ++ int ra = instr->SwRaValue(); ++ int64_t v = get_register(ra); ++ set_fpu_register(fc, FloatToDoubleR(*bit_cast(&v))); ++ } ++ break; ++ case op_ifmovd: ++ { ++ // Format(instr, "ifmovd 'ra, 'fc(4-0)"); ++ int ra = instr->SwRaValue(); ++ int64_t v = get_register(ra); ++ set_fpu_register(fc, v); ++ } ++ break; ++ case op_rfpcr: ++ // Format(instr, "rfpcr 'fa, FPCR"); ++ set_fpu_register(fa, FCSR_); ++ break; ++ case op_wfpcr: ++ // Format(instr, "wfpcr 'fa, FPCR"); ++ FCSR_ = get_fpu_register(fa); ++ break; ++ case op_setfpec0: ++ // Format(instr, "setfpec0"); ++ FCSR_ &= 0xfffffffffffffffc; ++ break; ++ case op_setfpec1: ++ //Format(instr, "setfpec1"); ++ FCSR_ &= 0xfffffffffffffffc; ++ FCSR_ |= 0x1; ++ break; ++ case op_setfpec2: ++ // Format(instr, "setfpec2"); ++ FCSR_ &= 0xfffffffffffffffc; ++ FCSR_ |= 0x2; ++ break; ++ case op_setfpec3: ++ // Format(instr, "setfpec3"); ++ FCSR_ |= 0x3; ++ break; ++ default: ++ break; ++ } ++ } ++ else ++ { ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNSUPPORTED(); ++ } ++} ++ ++void Simulator::SwDecodeTypeCompositeCalculationInteger(Instruction* instr) { ++ int ra = instr->SwRaValue(); ++ int rb = instr->SwRbValue(); ++ int rc = instr->SwRcValue(9, 5); ++ int rd = instr->SwRdValue(); ++ int composite_calculation_op = instr->SwFunctionFieldRaw(12, 10) | instr->OpcodeFieldValue(); ++ ++ switch (composite_calculation_op) { ++ case op_seleq: ++ // Format(instr, "seleq 'ra, 'rb, 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] == 0) ? registers_[rb] : registers_[rc]; ++ break; ++ case op_selge: ++ // Format(instr, "selge 'ra, 'rb, 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] >= 0) ? registers_[rb] : registers_[rc]; ++ break; ++ case op_selgt: ++ // Format(instr, "selgt 'ra, 'rb, 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] > 0) ? registers_[rb] : registers_[rc]; ++ break; ++ case op_selle: ++ // Format(instr, "selle 'ra, 'rb, 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] <= 0) ? registers_[rb] : registers_[rc]; ++ break; ++ case op_sellt: ++ // Format(instr, "sellt 'ra, 'rb, 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] < 0) ? registers_[rb] : registers_[rc]; ++ break; ++ case op_selne: ++ // Format(instr, "selne 'ra, 'rb, 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] != 0) ? registers_[rb] : registers_[rc]; ++ break; ++ case op_sellbc: ++ // Format(instr, "sellbc 'ra, 'rb, 'rc(9-5), 'rd"); ++ registers_[rd] = (0 == (registers_[ra] & 1)) ? registers_[rb] : registers_[rc]; ++ break; ++ case op_sellbs: ++ // Format(instr, "sellbs 'ra, 'rb, 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] & 1) ? registers_[rb] : registers_[rc]; ++ break; ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ break; ++ } ++} ++ ++void Simulator::SwDecodeTypeCompositeCalculationIntegerImm(Instruction* instr) ++{ ++ int ra = instr->SwRaValue(); ++ int imm8 = instr->SwImmOrDispFieldRaw(20, 13) >> 13; ++ int rc = instr->SwRcValue(9, 5); ++ int rd = instr->SwRdValue(); ++ int composite_calculation_op = instr->SwFunctionFieldRaw(12, 10) | instr->OpcodeFieldValue(); ++ switch (composite_calculation_op) { ++ case op_seleq_l: ++ // Format(instr, "seleq 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] == 0) ? imm8 : registers_[rc]; ++ break; ++ case op_selge_l: ++ // Format(instr, "selge 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] >= 0) ? imm8 : registers_[rc]; ++ break; ++ case op_selgt_l: ++ // Format(instr, "selgt 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] > 0) ? imm8 : registers_[rc]; ++ break; ++ case op_selle_l: ++ // Format(instr, "selle 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] <= 0) ? imm8 : registers_[rc]; ++ break; ++ case op_sellt_l: ++ // Format(instr, "sellt 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] < 0) ? imm8 : registers_[rc]; ++ break; ++ case op_selne_l: ++ // Format(instr, "selne 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] != 0) ? imm8 : registers_[rc]; ++ break; ++ case op_sellbc_l: ++ // Format(instr, "sellbc 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ registers_[rd] = (0 == (registers_[ra] & 1)) ? imm8 : registers_[rc]; ++ break; ++ case op_sellbs_l: ++ // Format(instr, "sellbs 'ra, 'imm(20-13), 'rc(9-5), 'rd"); ++ registers_[rd] = (registers_[ra] & 1) ? imm8 : registers_[rc]; ++ break; ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ break; ++ } ++} ++ ++void Simulator::SwDecodeTypeCompositeCalculationFloatintPoint(Instruction* instr) { ++ int fa = instr->SwFaValue(); ++ int fb = instr->SwFbValue(); ++ int fc = instr->SwFcValue(9, 5); ++ int fd = instr->SwFdValue(); ++ int composite_fp_calculation_op = instr->SwFunctionFieldRaw(15, 10) | instr->OpcodeFieldValue(); ++ ++ switch (composite_fp_calculation_op) { ++ case op_fmas: ++ // Format(instr, "fmas 'fa, 'fb, 'fc(9-5), 'fd"); ++ { ++ float a = DoubleRToFloat(get_fpu_register(fa)); ++ float b = DoubleRToFloat(get_fpu_register(fb)); ++ float c = DoubleRToFloat(get_fpu_register(fc)); ++ set_fpu_register(fd, FloatToDoubleR(a*b+c)); ++ } ++ break; ++ case op_fmad: ++ // Format(instr, "fmad 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register_double(fd, get_fpu_register_double(fa) * ++ get_fpu_register_double(fb) + get_fpu_register_double(fc)); ++ break; ++ case op_fmss: ++ // Format(instr, "fmss 'fa, 'fb, 'fc(9-5), 'fd"); ++ { ++ float a = DoubleRToFloat(get_fpu_register(fa)); ++ float b = DoubleRToFloat(get_fpu_register(fb)); ++ float c = DoubleRToFloat(get_fpu_register(fc)); ++ set_fpu_register(fd, FloatToDoubleR(a*b-c)); ++ } ++ break; ++ case op_fmsd: ++ // Format(instr, "fmsd 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register_double(fd, get_fpu_register_double(fa) * ++ get_fpu_register_double(fb) - get_fpu_register_double(fc)); ++ break; ++ case op_fnmas: ++ // Format(instr, "fnmas 'fa, 'fb, 'fc(9-5), 'fd"); ++ { ++ float a = DoubleRToFloat(get_fpu_register(fa)); ++ float b = DoubleRToFloat(get_fpu_register(fb)); ++ float c = DoubleRToFloat(get_fpu_register(fc)); ++ set_fpu_register(fd, FloatToDoubleR(-a*b+c)); ++ } ++ break; ++ case op_fnmad: ++ // Format(instr, "fnmad 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register_double(fd, -get_fpu_register_double(fa) * ++ get_fpu_register_double(fb) + get_fpu_register_double(fc)); ++ break; ++ case op_fnmss: ++ // Format(instr, "fnmss 'fa, 'fb, 'fc(9-5), 'fd"); ++ { ++ float a = DoubleRToFloat(get_fpu_register(fa)); ++ float b = DoubleRToFloat(get_fpu_register(fb)); ++ float c = DoubleRToFloat(get_fpu_register(fc)); ++ set_fpu_register(fd, FloatToDoubleR(-a*b-c)); ++ } ++ break; ++ case op_fnmsd: ++ Format(instr, "fnmsd 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register_double(fd, -get_fpu_register_double(fa) * ++ get_fpu_register_double(fb) - get_fpu_register_double(fc)); ++ break; ++ case op_fseleq: ++ // Format(instr, "fseleq 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register(fd, (get_fpu_register_double(fa) == 0) ? get_fpu_register(fb) : get_fpu_register(fc)); ++ break; ++ case op_fselne: ++ // Format(instr, "fselne 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register(fd, (get_fpu_register_double(fa) != 0) ? get_fpu_register(fb) : get_fpu_register(fc)); ++ break; ++ case op_fsellt: ++ // Format(instr, "fsellt 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register(fd, (get_fpu_register_double(fa) < 0) ? get_fpu_register(fb) : get_fpu_register(fc)); ++ break; ++ case op_fselle: ++ // Format(instr, "fselle 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register(fd, (get_fpu_register_double(fa) <= 0) ? get_fpu_register(fb) : get_fpu_register(fc)); ++ break; ++ case op_fselgt: ++ // Format(instr, "fselgt 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register(fd, (get_fpu_register_double(fa) > 0) ? get_fpu_register(fb) : get_fpu_register(fc)); ++ break; ++ case op_fselge: ++ // Format(instr, "fselge 'fa, 'fb, 'fc(9-5), 'fd"); ++ set_fpu_register(fd, (get_fpu_register_double(fa) >= 0) ? get_fpu_register(fb) : get_fpu_register(fc)); ++ break; ++ default: ++ printf("a 0x%x \n", instr->OpcodeFieldRaw()); ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeCompoCalc() ++{ ++ Instruction* instr = instr_.instr(); ++ switch (instr->OpcodeFieldValue()) { ++ case OP(0x11)://整数复合运算指令格式的寄存器形式 ++ SwDecodeTypeCompositeCalculationInteger(instr); ++ break; ++ case OP(0x13)://整数复合运算指令格式的立即数形式 ++ SwDecodeTypeCompositeCalculationIntegerImm(instr); ++ break; ++ case OP(0x19)://浮点复合运算指令格式的寄存器形式 ++ SwDecodeTypeCompositeCalculationFloatintPoint(instr); ++ break; ++ } ++} ++ ++void Simulator::DecodeTypeExten() ++{ ++ UNSUPPORTED(); ++} ++ ++void Simulator::DecodeTypeSimulatorTrap() ++{ ++ SoftwareInterrupt(); ++} ++ ++// Executes the current instruction. ++void Simulator::InstructionDecode(Instruction* instr) { ++ if (v8::internal::FLAG_check_icache) { ++ CheckICache(i_cache(), instr); ++ } ++ pc_modified_ = false; ++ ++ v8::internal::EmbeddedVector buffer; ++ ++ if (::v8::internal::FLAG_trace_sim) { ++ SNPrintF(trace_buf_, " "); ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ dasm.InstructionDecode(buffer, reinterpret_cast(instr)); ++ } ++ ++ instr_ = instr; ++ switch (instr_.InstructionType()) { ++ case Instruction::kImmediateType: ++ DecodeTypeImmediate(); ++ break; ++ case Instruction::kJumpType: ++ DecodeTypeJump(); ++ break; ++ case Instruction::kSwSyscallType: ++ DecodeTypeSyscall(); ++ break; ++ case Instruction::kSwTransferanceType: ++ DecodeTypeTransfer(); ++ break; ++ case Instruction::kSwStorageType: ++ DecodeTypeStorage(); ++ break; ++ case Instruction::kSwSimpleCalculationType: ++ DecodeTypeSimpleCalc(); ++ break; ++ case Instruction::kSwCompositeCalculationType: ++ DecodeTypeCompoCalc(); ++ break; ++ case Instruction::kSwExtendType: ++ DecodeTypeExten(); ++ break; ++ case Instruction::kSwSimulatorTrap: ++ DecodeTypeSimulatorTrap(); ++ break; ++ default: ++ UNSUPPORTED(); ++ } ++ ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF(" 0x%08" PRIxPTR " %-44s %s\n", ++ reinterpret_cast(instr), buffer.begin(), ++ trace_buf_.begin()); ++ } ++ ++ if (!pc_modified_) { ++ set_register(pc, reinterpret_cast(instr) + ++ kInstrSize); ++ } ++} ++ ++ ++ ++void Simulator::Execute() { ++ // Get the PC to simulate. Cannot use the accessor here as we need the ++ // raw PC value and not the one used as input to arithmetic instructions. ++ int64_t program_counter = get_pc(); ++ if (::v8::internal::FLAG_stop_sim_at == 0) { ++ // Fast version of the dispatch loop without checking whether the simulator ++ // should be stopping at a particular executed instruction. ++ while (program_counter != end_sim_pc) { ++ Instruction* instr = reinterpret_cast(program_counter); ++ icount_++; ++ InstructionDecode(instr); ++ program_counter = get_pc(); ++ } ++ } else { ++ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when ++ // we reach the particular instruction count. ++ while (program_counter != end_sim_pc) { ++ Instruction* instr = reinterpret_cast(program_counter); ++ icount_++; ++ PrintF("%ld: \n", icount_); ++ if (icount_ == static_cast(::v8::internal::FLAG_stop_sim_at)) { ++ Sw64Debugger dbg(this); ++ dbg.Debug(); ++ } else { ++ InstructionDecode(instr); ++ } ++ program_counter = get_pc(); ++ } ++ } ++} ++ ++void Simulator::CallInternal(Address entry) { ++ // Adjust JS-based stack limit to C-based stack limit. ++ isolate_->stack_guard()->AdjustStackLimitForSimulator(); ++ ++ // Prepare to execute the code at entry. ++ set_register(pc, static_cast(entry)); ++ // Put down marker for end of simulation. The simulator will stop simulation ++ // when the PC reaches this value. By saving the "end simulation" value into ++ // the LR the simulation stops when returning to this call point. ++ set_register(ra, end_sim_pc); ++ ++ // Remember the values of callee-saved registers. ++ // The code below assumes that r9 is not used as sb (static base) in ++ // simulator code and therefore is regarded as a callee-saved register. ++ int64_t s0_val = get_register(s0); ++ int64_t s1_val = get_register(s1); ++ int64_t s2_val = get_register(s2); ++ int64_t s3_val = get_register(s3); ++ int64_t s4_val = get_register(s4); ++ int64_t s5_val = get_register(s5); ++ int64_t fp_val = get_register(fp); ++ ++ // Set up the callee-saved registers with a known value. To be able to check ++ // that they are preserved properly across JS execution. ++ int64_t callee_saved_value = icount_; ++ set_register(s0, callee_saved_value); ++ set_register(s1, callee_saved_value); ++ set_register(s2, callee_saved_value); ++ set_register(s3, callee_saved_value); ++ set_register(s4, callee_saved_value); ++ set_register(s5, callee_saved_value); ++ set_register(fp, callee_saved_value); ++ ++ // Start the simulation. ++ Execute(); ++ ++ // Check that the callee-saved registers have been preserved. ++ CHECK_EQ(callee_saved_value, get_register(s0)); ++ CHECK_EQ(callee_saved_value, get_register(s1)); ++ CHECK_EQ(callee_saved_value, get_register(s2)); ++ CHECK_EQ(callee_saved_value, get_register(s3)); ++ CHECK_EQ(callee_saved_value, get_register(s4)); ++ CHECK_EQ(callee_saved_value, get_register(s5)); ++ CHECK_EQ(callee_saved_value, get_register(fp)); ++ ++ // Restore callee-saved registers with the original value. ++ set_register(s0, s0_val); ++ set_register(s1, s1_val); ++ set_register(s2, s2_val); ++ set_register(s3, s3_val); ++ set_register(s4, s4_val); ++ set_register(s5, s5_val); ++ set_register(fp, fp_val); ++} ++ ++intptr_t Simulator::CallImpl(Address entry, int argument_count, ++ const intptr_t* arguments) { ++ constexpr int kRegisterPassedArguments = 6; ++ // Set up arguments. ++ ++ // First six arguments passed in registers. ++ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count); ++ if (reg_arg_count > 0) set_register(a0, arguments[0]); ++ if (reg_arg_count > 1) set_register(a1, arguments[1]); ++ if (reg_arg_count > 2) set_register(a2, arguments[2]); ++ if (reg_arg_count > 3) set_register(a3, arguments[3]); ++ if (reg_arg_count > 4) set_register(a4, arguments[4]); ++ if (reg_arg_count > 5) set_register(a5, arguments[5]); ++ ++ // Remaining arguments passed on stack. ++ int64_t original_stack = get_register(sp); ++ // Compute position of stack on entry to generated code. ++ int stack_args_count = argument_count - reg_arg_count; ++ int stack_args_size = stack_args_count * sizeof(*arguments) + kCArgsSlotsSize; ++ int64_t entry_stack = original_stack - stack_args_size; ++ ++ if (base::OS::ActivationFrameAlignment() != 0) { ++ entry_stack &= -base::OS::ActivationFrameAlignment(); ++ } ++ // Store remaining arguments on stack, from low to high memory. ++ intptr_t* stack_argument = reinterpret_cast(entry_stack); ++ memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count, ++ stack_args_count * sizeof(*arguments)); ++ set_register(sp, entry_stack); ++ ++ CallInternal(entry); ++ ++ // Pop stack passed arguments. ++ CHECK_EQ(entry_stack, get_register(sp)); ++ set_register(sp, original_stack); ++ ++ return get_register(v0); ++} ++ ++double Simulator::CallFP(Address entry, double d0, double d1) { ++ if (!IsSw64SoftFloatABI) { ++ const FPURegister fparg2 = f13; ++ set_fpu_register_double(f12, d0); ++ set_fpu_register_double(fparg2, d1); ++ } else { ++ int buffer[2]; ++ DCHECK(sizeof(buffer[0]) * 2 == sizeof(d0)); ++ memcpy(buffer, &d0, sizeof(d0)); ++ set_dw_register(a0, buffer); ++ memcpy(buffer, &d1, sizeof(d1)); ++ set_dw_register(a2, buffer); ++ } ++ CallInternal(entry); ++ if (!IsSw64SoftFloatABI) { ++ return get_fpu_register_double(f0); ++ } else { ++ return get_double_from_register_pair(v0); ++ } ++} ++ ++ ++uintptr_t Simulator::PushAddress(uintptr_t address) { ++ int64_t new_sp = get_register(sp) - sizeof(uintptr_t); ++ uintptr_t* stack_slot = reinterpret_cast(new_sp); ++ *stack_slot = address; ++ set_register(sp, new_sp); ++ return new_sp; ++} ++ ++ ++uintptr_t Simulator::PopAddress() { ++ int64_t current_sp = get_register(sp); ++ uintptr_t* stack_slot = reinterpret_cast(current_sp); ++ uintptr_t address = *stack_slot; ++ set_register(sp, current_sp + sizeof(uintptr_t)); ++ return address; ++} ++ ++ ++#undef UNSUPPORTED ++} // namespace internal ++} // namespace v8 ++ ++#endif // USE_SIMULATOR +diff --git a/src/3rdparty/chromium/v8/src/execution/sw64/simulator-sw64.h b/src/3rdparty/chromium/v8/src/execution/sw64/simulator-sw64.h +new file mode 100755 +index 000000000..cc850b255 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/execution/sw64/simulator-sw64.h +@@ -0,0 +1,620 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Declares a Simulator for SW64 instructions if we are not generating a native ++// SW64 binary. This Simulator allows us to run and debug SW64 code generation ++// on regular desktop machines. ++// V8 calls into generated code via the GeneratedCode wrapper, ++// which will start execution in the Simulator or forwards to the real entry ++// on a SW64 HW platform. ++ ++#ifndef V8_EXECUTION_SW64_SIMULATOR_SW64_H_ ++#define V8_EXECUTION_SW64_SIMULATOR_SW64_H_ ++ ++// globals.h defines USE_SIMULATOR. ++#include "src/common/globals.h" ++ ++template ++int Compare(const T& a, const T& b) { ++ if (a == b) ++ return 0; ++ else if (a < b) ++ return -1; ++ else ++ return 1; ++} ++ ++// Returns the negative absolute value of its argument. ++template ::value>::type> ++T Nabs(T a) { ++ return a < 0 ? a : -a; ++} ++ ++#if defined(USE_SIMULATOR) ++// Running with a simulator. ++ ++#include "src/base/hashmap.h" ++#include "src/codegen/assembler.h" ++#include "src/codegen/sw64/constants-sw64.h" ++#include "src/execution/simulator-base.h" ++#include "src/utils/allocation.h" ++ ++namespace v8 { ++namespace internal { ++ ++// ----------------------------------------------------------------------------- ++// Utility functions ++ ++class CachePage { ++ public: ++ static const int LINE_VALID = 0; ++ static const int LINE_INVALID = 1; ++ ++ static const int kPageShift = 12; ++ static const int kPageSize = 1 << kPageShift; ++ static const int kPageMask = kPageSize - 1; ++ static const int kLineShift = 2; // The cache line is only 4 bytes right now. ++ static const int kLineLength = 1 << kLineShift; ++ static const int kLineMask = kLineLength - 1; ++ ++ CachePage() { ++ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); ++ } ++ ++ char* ValidityByte(int offset) { ++ return &validity_map_[offset >> kLineShift]; ++ } ++ ++ char* CachedData(int offset) { ++ return &data_[offset]; ++ } ++ ++ private: ++ char data_[kPageSize]; // The cached data. ++ static const int kValidityMapSize = kPageSize >> kLineShift; ++ char validity_map_[kValidityMapSize]; // One byte per line. ++}; ++ ++class SimInstructionBase : public InstructionBase { ++ public: ++ Type InstructionType() const { return type_; } ++ inline Instruction* instr() const { return instr_; } ++ inline int32_t operand() const { return operand_; } ++ ++ protected: ++ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {} ++ explicit SimInstructionBase(Instruction* instr) {} ++ ++ int32_t operand_; ++ Instruction* instr_; ++ Type type_; ++ ++ private: ++ DISALLOW_ASSIGN(SimInstructionBase); ++}; ++ ++class SimInstruction : public InstructionGetters { ++ public: ++ SimInstruction() {} ++ ++ explicit SimInstruction(Instruction* instr) { *this = instr; } ++ ++ SimInstruction& operator=(Instruction* instr) { ++ operand_ = *reinterpret_cast(instr); ++ instr_ = instr; ++ type_ = InstructionBase::InstructionType(); ++ DCHECK(reinterpret_cast(&operand_) == this); ++ return *this; ++ } ++}; ++ ++class Simulator : public SimulatorBase { ++ public: ++ friend class Sw64Debugger; ++ ++ // Registers are declared in order. See SMRL chapter 2. ++ enum Register { ++ no_reg = -1, ++ v0, ++ t0, t1, t2, t3, t4, t5, t6, t7, ++ s0, s1, s2, s3, s4, s5, s6, ++ a0, a1, a2, a3, a4, a5, ++ t8, t9, t10, t11, ++ ra, ++ t12, ++ at, ++ gp, ++ sp, ++ zero_reg, ++ // LO, HI, and pc. ++ LO, ++ HI, ++ pc, // pc must be the last register. ++ kNumSimuRegisters, ++ // alias ++ fp = s6, ++ pv = t12 ++ }; ++ ++ // Coprocessor registers. ++ // Generated code will always use doubles. So we will only use even registers. ++ enum FPURegister { ++ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, ++ f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters. ++ f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, ++ f26, f27, f28, f29, f30, f31, ++ kNumFPURegisters ++ }; ++ ++ // MSA registers ++ enum MSARegister { ++ w0, ++ w1, ++ w2, ++ w3, ++ w4, ++ w5, ++ w6, ++ w7, ++ w8, ++ w9, ++ w10, ++ w11, ++ w12, ++ w13, ++ w14, ++ w15, ++ w16, ++ w17, ++ w18, ++ w19, ++ w20, ++ w21, ++ w22, ++ w23, ++ w24, ++ w25, ++ w26, ++ w27, ++ w28, ++ w29, ++ w30, ++ w31, ++ kNumMSARegisters ++ }; ++ ++ explicit Simulator(Isolate* isolate); ++ ~Simulator(); ++ ++ // The currently executing Simulator instance. Potentially there can be one ++ // for each native thread. ++ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate); ++ ++ // Accessors for register state. Reading the pc value adheres to the SW64 ++ // architecture specification and is off by a 8 from the currently executing ++ // instruction. ++ void set_register(int reg, int64_t value); ++ void set_register_word(int reg, int32_t value); ++ void set_dw_register(int dreg, const int* dbl); ++ int64_t get_register(int64_t reg) const; ++ double get_double_from_register_pair(int reg); ++ // Same for FPURegisters. ++ void set_fpu_register(int fpureg, int64_t value); ++ void set_fpu_register_word(int fpureg, int32_t value); ++ void set_fpu_register_hi_word(int fpureg, int32_t value); ++ void set_fpu_register_float(int fpureg, float value); ++ void set_fpu_register_double(int fpureg, double value); ++ void set_fpu_register_invalid_result64(float original, float rounded); ++ void set_fpu_register_invalid_result(float original, float rounded); ++ void set_fpu_register_word_invalid_result(float original, float rounded); ++ void set_fpu_register_invalid_result64(double original, double rounded); ++ void set_fpu_register_invalid_result(double original, double rounded); ++ void set_fpu_register_word_invalid_result(double original, double rounded); ++ int64_t get_fpu_register(int fpureg) const; ++ int32_t get_fpu_register_word(int fpureg) const; ++ int32_t get_fpu_register_signed_word(int fpureg) const; ++ int32_t get_fpu_register_hi_word(int fpureg) const; ++ float get_fpu_register_float(int fpureg) const; ++ double get_fpu_register_double(int fpureg) const; ++ template ++ void get_msa_register(int wreg, T* value); ++ template ++ void set_msa_register(int wreg, const T* value); ++ void set_fcsr_bit(uint32_t cc, bool value); ++ bool test_fcsr_bit(uint32_t cc); ++ bool set_fcsr_round_error(double original, double rounded); ++ bool set_fcsr_round64_error(double original, double rounded); ++ bool set_fcsr_round_error(float original, float rounded); ++ bool set_fcsr_round64_error(float original, float rounded); ++ void round_according_to_fcsr(double toRound, double* rounded, ++ int32_t* rounded_int, double fs); ++ void round64_according_to_fcsr(double toRound, double* rounded, ++ int64_t* rounded_int, double fs); ++ void round_according_to_fcsr(float toRound, float* rounded, ++ int32_t* rounded_int, float fs); ++ void round64_according_to_fcsr(float toRound, float* rounded, ++ int64_t* rounded_int, float fs); ++ template ++ void round_according_to_msacsr(T_fp toRound, T_fp* rounded, ++ T_int* rounded_int); ++ void set_fcsr_rounding_mode(FPURoundingMode mode); ++ void set_msacsr_rounding_mode(FPURoundingMode mode); ++ unsigned int get_fcsr_rounding_mode(); ++ unsigned int get_msacsr_rounding_mode(); ++ // Special case of set_register and get_register to access the raw PC value. ++ void set_pc(int64_t value); ++ int64_t get_pc() const; ++ ++ Address get_sp() const { return static_cast
(get_register(sp)); } ++ ++ // Accessor to the internal simulator stack area. ++ uintptr_t StackLimit(uintptr_t c_limit) const; ++ ++ // Executes SW64 instructions until the PC reaches end_sim_pc. ++ void Execute(); ++ ++ template ++ Return Call(Address entry, Args... args) { ++ return VariadicCall(this, &Simulator::CallImpl, entry, args...); ++ } ++ ++ // Alternative: call a 2-argument double function. ++ double CallFP(Address entry, double d0, double d1); ++ ++ // Push an address onto the JS stack. ++ uintptr_t PushAddress(uintptr_t address); ++ ++ // Pop an address from the JS stack. ++ uintptr_t PopAddress(); ++ ++ // Debugger input. ++ void set_last_debugger_input(char* input); ++ char* last_debugger_input() { return last_debugger_input_; } ++ ++ // Redirection support. ++ static void SetRedirectInstruction(Instruction* instruction); ++ ++ // ICache checking. ++ static bool ICacheMatch(void* one, void* two); ++ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start, ++ size_t size); ++ ++ // Returns true if pc register contains one of the 'special_values' defined ++ // below (bad_ra, end_sim_pc). ++ bool has_bad_pc() const; ++ ++ private: ++ enum special_values { ++ // Known bad pc value to ensure that the simulator does not execute ++ // without being properly setup. ++ bad_ra = -1, ++ // A pc value used to signal the simulator to stop execution. Generally ++ // the ra is set to this value on transition from native C code to ++ // simulated execution, so that the simulator can "return" to the native ++ // C code. ++ end_sim_pc = -2, ++ // Unpredictable value. ++ Unpredictable = 0xbadbeaf ++ }; ++ ++ V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count, ++ const intptr_t* arguments); ++ ++ // Unsupported instructions use Format to print an error and stop execution. ++ void Format(Instruction* instr, const char* format); ++ ++ // Helpers for data value tracing. ++ enum TraceType { ++ BYTE, ++ HALF, ++ WORD, ++ DWORD, ++ FLOAT, ++ DOUBLE, ++ FLOAT_DOUBLE, ++ WORD_DWORD ++ }; ++ ++ // MSA Data Format ++ enum MSADataFormat { MSA_VECT = 0, MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD }; ++ union msa_reg_t { ++ int8_t b[kMSALanesByte]; ++ uint8_t ub[kMSALanesByte]; ++ int16_t h[kMSALanesHalf]; ++ uint16_t uh[kMSALanesHalf]; ++ int32_t w[kMSALanesWord]; ++ uint32_t uw[kMSALanesWord]; ++ int64_t d[kMSALanesDword]; ++ uint64_t ud[kMSALanesDword]; ++ }; ++ ++ // Read and write memory. ++ inline uint32_t ReadBU(int64_t addr); ++ inline int32_t ReadB(int64_t addr); ++ inline void WriteB(int64_t addr, uint8_t value); ++ inline void WriteB(int64_t addr, int8_t value); ++ ++ inline uint16_t ReadHU(int64_t addr, Instruction* instr); ++ inline int16_t ReadH(int64_t addr, Instruction* instr); ++ // Note: Overloaded on the sign of the value. ++ inline void WriteH(int64_t addr, uint16_t value, Instruction* instr); ++ inline void WriteH(int64_t addr, int16_t value, Instruction* instr); ++ ++ inline uint32_t ReadWU(int64_t addr, Instruction* instr); ++ inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD); ++ inline void WriteW(int64_t addr, int32_t value, Instruction* instr); ++ inline int64_t Read2W(int64_t addr, Instruction* instr); ++ inline void Write2W(int64_t addr, int64_t value, Instruction* instr); ++ ++ inline double ReadD(int64_t addr, Instruction* instr); ++ inline void WriteD(int64_t addr, double value, Instruction* instr); ++ ++ template ++ T ReadMem(int64_t addr, Instruction* instr); ++ template ++ void WriteMem(int64_t addr, T value, Instruction* instr); ++ ++ // Helper for debugging memory access. ++ inline void DieOrDebug(); ++ ++ void TraceRegWr(int64_t value, TraceType t = DWORD); ++ template ++ void TraceMSARegWr(T* value, TraceType t); ++ template ++ void TraceMSARegWr(T* value); ++ void TraceMemWr(int64_t addr, int64_t value, TraceType t); ++ void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD); ++ template ++ void TraceMemRd(int64_t addr, T value); ++ template ++ void TraceMemWr(int64_t addr, T value); ++ ++ // Operations depending on endianness. ++ // Get Double Higher / Lower word. ++ inline int32_t GetDoubleHIW(double* addr); ++ inline int32_t GetDoubleLOW(double* addr); ++ // Set Double Higher / Lower word. ++ inline int32_t SetDoubleHIW(double* addr); ++ inline int32_t SetDoubleLOW(double* addr); ++ ++ SimInstruction instr_; ++ ++ // functions called from DecodeTypeRegister. ++ void DecodeTypeRegisterCOP1(); ++ ++ void DecodeTypeRegisterCOP1X(); ++ ++ void DecodeTypeRegisterSPECIAL(); ++ ++ ++ void DecodeTypeRegisterSPECIAL2(); ++ ++ void DecodeTypeRegisterSPECIAL3(); ++ ++ void DecodeTypeRegisterSRsType(); ++ ++ void DecodeTypeRegisterDRsType(); ++ ++ void DecodeTypeRegisterWRsType(); ++ ++ void DecodeTypeRegisterLRsType(); ++ ++ int DecodeMsaDataFormat(); ++ void DecodeTypeMsaI8(); ++ void DecodeTypeMsaI5(); ++ void DecodeTypeMsaI10(); ++ void DecodeTypeMsaELM(); ++ void DecodeTypeMsaBIT(); ++ void DecodeTypeMsaMI10(); ++ void DecodeTypeMsa3R(); ++ void DecodeTypeMsa3RF(); ++ void DecodeTypeMsaVec(); ++ void DecodeTypeMsa2R(); ++ void DecodeTypeMsa2RF(); ++ template ++ T MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5); ++ template ++ T MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m); ++ template ++ T Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt); ++ ++ // Executing is handled based on the instruction type. ++ void DecodeTypeRegister(); ++ ++ inline int32_t rs_reg() const { return instr_.RsValue(); } ++ inline int64_t rs() const { return get_register(rs_reg()); } ++ inline uint64_t rs_u() const { ++ return static_cast(get_register(rs_reg())); ++ } ++ inline int32_t rt_reg() const { return instr_.RtValue(); } ++ inline int64_t rt() const { return get_register(rt_reg()); } ++ inline uint64_t rt_u() const { ++ return static_cast(get_register(rt_reg())); ++ } ++ inline int32_t rd_reg() const { return instr_.RdValue(); } ++ inline int32_t fr_reg() const { return instr_.FrValue(); } ++ inline int32_t fs_reg() const { return instr_.FsValue(); } ++ inline int32_t ft_reg() const { return instr_.FtValue(); } ++ inline int32_t fd_reg() const { return instr_.FdValue(); } ++ inline int32_t sa() const { return instr_.SaValue(); } ++ inline int32_t lsa_sa() const { return instr_.LsaSaValue(); } ++ inline int32_t ws_reg() const { return instr_.WsValue(); } ++ inline int32_t wt_reg() const { return instr_.WtValue(); } ++ inline int32_t wd_reg() const { return instr_.WdValue(); } ++ ++ inline void SetResult(const int32_t rd_reg, const int64_t alu_out) { ++ set_register(rd_reg, alu_out); ++ TraceRegWr(alu_out); ++ } ++ ++ inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) { ++ set_fpu_register_word(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), WORD); ++ } ++ ++ inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) { ++ set_fpu_register_word(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg)); ++ } ++ ++ inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) { ++ set_fpu_register(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg)); ++ } ++ ++ inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) { ++ set_fpu_register(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), DOUBLE); ++ } ++ ++ inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) { ++ set_fpu_register_float(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), FLOAT); ++ } ++ ++ inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) { ++ set_fpu_register_double(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), DOUBLE); ++ } ++ ++ void DecodeTypeImmediate(); ++ void DecodeTypeJump(); ++ void DecodeTypeSyscall(); ++ void DecodeTypeTransfer(); ++ void DecodeTypeStorage(); ++ void DecodeTypeSimpleCalc(); ++ void DecodeTypeCompoCalc(); ++ void DecodeTypeExten(); ++ void DecodeTypeSimulatorTrap(); ++ void SwDecodeTypeCompositeCalculationInteger(Instruction* instr); ++ void SwDecodeTypeCompositeCalculationIntegerImm(Instruction* instr); ++ void SwDecodeTypeCompositeCalculationFloatintPoint(Instruction* instr); ++ ++ // Used for breakpoints and traps. ++ void SoftwareInterrupt(); ++ ++ // Compact branch guard. ++ void CheckForbiddenSlot(int64_t current_pc) { ++ Instruction* instr_after_compact_branch = ++ reinterpret_cast(current_pc + kInstrSize); ++ if (instr_after_compact_branch->IsForbiddenAfterBranch()) { ++ FATAL( ++ "Error: Unexpected instruction 0x%08x immediately after a " ++ "compact branch instruction.", ++ *reinterpret_cast(instr_after_compact_branch)); ++ } ++ } ++ ++ // Stop helper functions. ++ bool IsWatchpoint(uint64_t code); ++ void PrintWatchpoint(uint64_t code); ++ void HandleStop(uint64_t code, Instruction* instr); ++ bool IsStopInstruction(Instruction* instr); ++ bool IsEnabledStop(uint64_t code); ++ void EnableStop(uint64_t code); ++ void DisableStop(uint64_t code); ++ void IncreaseStopCounter(uint64_t code); ++ void PrintStopInfo(uint64_t code); ++ ++ ++ // Executes one instruction. ++ void InstructionDecode(Instruction* instr); ++ // Execute one instruction placed in a branch delay slot. ++ void BranchDelayInstructionDecode(Instruction* instr) { ++ if (instr->InstructionBits() == nopInstr) { ++ // Short-cut generic nop instructions. They are always valid and they ++ // never change the simulator state. ++ return; ++ } ++ ++ if (instr->IsForbiddenAfterBranch()) { ++ FATAL("Eror:Unexpected %i opcode in a branch delay slot.", ++ instr->OpcodeValue()); ++ } ++ InstructionDecode(instr); ++ SNPrintF(trace_buf_, " "); ++ } ++ ++ // ICache. ++ static void CheckICache(base::CustomMatcherHashMap* i_cache, ++ Instruction* instr); ++ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start, ++ size_t size); ++ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache, ++ void* page); ++ ++ enum Exception { ++ none, ++ kIntegerOverflow, ++ kIntegerUnderflow, ++ kDivideByZero, ++ kNumExceptions ++ }; ++ ++ // Exceptions. ++ void SignalException(Exception e); ++ ++ // Handle arguments and return value for runtime FP functions. ++ void GetFpArgs(double* x, double* y, int32_t* z); ++ void SetFpResult(const double& result); ++ ++ void CallInternal(Address entry); ++ ++ // Architecture state. ++ // Registers. ++ int64_t registers_[kNumSimuRegisters]; ++ // Coprocessor Registers. ++ // Note: FPUregisters_[] array is increased to 64 * 8B = 32 * 16B in ++ // order to support MSA registers ++ int64_t FPUregisters_[kNumFPURegisters * 2]; ++ // FPU control register. ++ uint64_t FCSR_; ++ // MSA control register. ++ uint64_t MSACSR_; ++ ++ //for lock instruction ++ int64_t lock_valid; ++ int64_t lock_success; ++ int64_t lock_flag; ++ int64_t lock_register_padd; ++ int64_t lock_register_flag; ++ ++ // Simulator support. ++ // Allocate 1MB for stack. ++ size_t stack_size_; ++ char* stack_; ++ bool pc_modified_; ++ int64_t icount_; ++ int break_count_; ++ EmbeddedVector trace_buf_; ++ ++ // Debugger input. ++ char* last_debugger_input_; ++ ++ v8::internal::Isolate* isolate_; ++ ++ // Registered breakpoints. ++ Instruction* break_pc_; ++ Instr break_instr_; ++ ++ // Stop is disabled if bit 31 is set. ++ static const uint32_t kStopDisabledBit = 1 << 31; ++ ++ // A stop is enabled, meaning the simulator will stop when meeting the ++ // instruction, if bit 31 of watched_stops_[code].count is unset. ++ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times ++ // the breakpoint was hit or gone through. ++ struct StopCountAndDesc { ++ uint32_t count; ++ char* desc; ++ }; ++ StopCountAndDesc watched_stops_[kMaxStopCode + 1]; ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // defined(USE_SIMULATOR) ++#endif // V8_EXECUTION_SW64_SIMULATOR_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/flags/flag-definitions.h b/src/3rdparty/chromium/v8/src/flags/flag-definitions.h +index c3f360cdf..2d9b55c60 100644 +--- a/src/3rdparty/chromium/v8/src/flags/flag-definitions.h ++++ b/src/3rdparty/chromium/v8/src/flags/flag-definitions.h +@@ -1590,7 +1590,7 @@ DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory) + + // --perf-prof-unwinding-info is available only on selected architectures. + #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \ +- !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 ++ !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_SW64 + #undef DEFINE_PERF_PROF_BOOL + #define DEFINE_PERF_PROF_BOOL(nam, cmt) DEFINE_BOOL_READONLY(nam, false, cmt) + #undef DEFINE_PERF_PROF_IMPLICATION +diff --git a/src/3rdparty/chromium/v8/src/interpreter/interpreter-assembler.cc b/src/3rdparty/chromium/v8/src/interpreter/interpreter-assembler.cc +index eaea1c91d..361246869 100644 +--- a/src/3rdparty/chromium/v8/src/interpreter/interpreter-assembler.cc ++++ b/src/3rdparty/chromium/v8/src/interpreter/interpreter-assembler.cc +@@ -1488,7 +1488,7 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() { + return false; + #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \ + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \ +- V8_TARGET_ARCH_PPC64 ++ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_SW64 + return true; + #else + #error "Unknown Architecture" +diff --git a/src/3rdparty/chromium/v8/src/libsampler/sampler.cc b/src/3rdparty/chromium/v8/src/libsampler/sampler.cc +index e2091ceb3..a9499d561 100644 +--- a/src/3rdparty/chromium/v8/src/libsampler/sampler.cc ++++ b/src/3rdparty/chromium/v8/src/libsampler/sampler.cc +@@ -415,6 +415,10 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) { + state->pc = reinterpret_cast(mcontext.pc); + state->sp = reinterpret_cast(mcontext.gregs[29]); + state->fp = reinterpret_cast(mcontext.gregs[30]); ++#elif V8_HOST_ARCH_SW64 ++ state->pc = reinterpret_cast(mcontext.sc_pc); ++ state->sp = reinterpret_cast(mcontext.sc_regs[30]); ++ state->fp = reinterpret_cast(mcontext.sc_regs[15]); + #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 + #if V8_LIBC_GLIBC + state->pc = reinterpret_cast(ucontext->uc_mcontext.regs->nip); +diff --git a/src/3rdparty/chromium/v8/src/logging/log.cc b/src/3rdparty/chromium/v8/src/logging/log.cc +index dc79ffda5..ff0aecba1 100644 +--- a/src/3rdparty/chromium/v8/src/logging/log.cc ++++ b/src/3rdparty/chromium/v8/src/logging/log.cc +@@ -592,6 +592,8 @@ void LowLevelLogger::LogCodeInfo() { + const char arch[] = "arm64"; + #elif V8_TARGET_ARCH_S390 + const char arch[] = "s390"; ++#elif V8_TARGET_ARCH_SW64 ++ const char arch[] = "sw_64"; + #else + const char arch[] = "unknown"; + #endif +diff --git a/src/3rdparty/chromium/v8/src/objects/code.h b/src/3rdparty/chromium/v8/src/objects/code.h +index d80e72fa0..444d04a27 100644 +--- a/src/3rdparty/chromium/v8/src/objects/code.h ++++ b/src/3rdparty/chromium/v8/src/objects/code.h +@@ -425,6 +425,8 @@ class Code : public HeapObject { + FLAG_enable_embedded_constant_pool ? 28 : 0; + #elif V8_TARGET_ARCH_S390X + static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0; ++#elif V8_TARGET_ARCH_SW64 ++ static constexpr int kHeaderPaddingSize = FLAG_enable_embedded_constant_pool ? 28 : 0; + #else + #error Unknown architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/profiler/tick-sample.cc b/src/3rdparty/chromium/v8/src/profiler/tick-sample.cc +index 00bff91cd..2f4706af0 100644 +--- a/src/3rdparty/chromium/v8/src/profiler/tick-sample.cc ++++ b/src/3rdparty/chromium/v8/src/profiler/tick-sample.cc +@@ -117,6 +117,12 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate, + state->sp = reinterpret_cast(simulator->get_register(Simulator::sp)); + state->fp = reinterpret_cast(simulator->get_register(Simulator::fp)); + state->lr = reinterpret_cast(simulator->get_lr()); ++#elif V8_TARGET_ARCH_SW64 ++ if (!simulator->has_bad_pc()) { ++ state->pc = reinterpret_cast(simulator->get_pc()); ++ } ++ state->sp = reinterpret_cast(simulator->get_register(Simulator::sp)); ++ state->fp = reinterpret_cast(simulator->get_register(Simulator::fp)); + #elif V8_TARGET_ARCH_S390 + if (!simulator->has_bad_pc()) { + state->pc = reinterpret_cast(simulator->get_pc()); +diff --git a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-arch.h b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-arch.h +index 8ec12a0ae..32500421d 100644 +--- a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-arch.h ++++ b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-arch.h +@@ -23,6 +23,8 @@ + #include "src/regexp/mips64/regexp-macro-assembler-mips64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/regexp/s390/regexp-macro-assembler-s390.h" ++#elif V8_TARGET_ARCH_SW64 ++#include "src/regexp/sw64/regexp-macro-assembler-sw64.h" + #else + #error Unsupported target architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler.h b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler.h +index e83446cdc..32edcec1f 100644 +--- a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler.h ++++ b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler.h +@@ -47,6 +47,7 @@ class RegExpMacroAssembler { + kPPCImplementation, + kX64Implementation, + kX87Implementation, ++ kSW64Implementation, + kBytecodeImplementation + }; + +diff --git a/src/3rdparty/chromium/v8/src/regexp/regexp.cc b/src/3rdparty/chromium/v8/src/regexp/regexp.cc +index 4319990a3..3fde03a8f 100644 +--- a/src/3rdparty/chromium/v8/src/regexp/regexp.cc ++++ b/src/3rdparty/chromium/v8/src/regexp/regexp.cc +@@ -854,6 +854,9 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data, + #elif V8_TARGET_ARCH_MIPS64 + macro_assembler.reset(new RegExpMacroAssemblerMIPS( + isolate, zone, mode, (data->capture_count + 1) * 2)); ++#elif V8_TARGET_ARCH_SW64 ++ macro_assembler.reset(new RegExpMacroAssemblerSW64( ++ isolate, zone, mode, (data->capture_count + 1) * 2)); + #else + #error "Unsupported architecture" + #endif +diff --git a/src/3rdparty/chromium/v8/src/regexp/sw64/OWNERS b/src/3rdparty/chromium/v8/src/regexp/sw64/OWNERS +new file mode 100755 +index 000000000..42582e993 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/regexp/sw64/OWNERS +@@ -0,0 +1,3 @@ ++ivica.bogosavljevic@sw64.com ++Miran.Karic@sw64.com ++sreten.kovacevic@sw64.com +diff --git a/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.cc b/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.cc +new file mode 100755 +index 000000000..7561532f3 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.cc +@@ -0,0 +1,1370 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_SW64 ++ ++#include "src/regexp/sw64/regexp-macro-assembler-sw64.h" ++ ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/logging/log.h" ++#include "src/objects/objects-inl.h" ++#include "src/regexp/regexp-macro-assembler.h" ++#include "src/regexp/regexp-stack.h" ++#include "src/snapshot/embedded/embedded-data.h" ++#include "src/strings/unicode.h" ++ ++namespace v8 { ++namespace internal { ++ ++/* clang-format off ++ * ++ * This assembler uses the following register assignment convention ++ * - t3 : Temporarily stores the index of capture start after a matching pass ++ * for a global regexp. ++ * - a5 : Pointer to current Code object including heap object tag. ++ * - t9 : Current position in input, as negative offset from end of string. ++ * Please notice that this is the byte offset, not the character offset! ++ * - t10: Currently loaded character. Must be loaded using ++ * LoadCurrentCharacter before using any of the dispatch methods. ++ * - t0 : Points to tip of backtrack stack ++ * - t1 : Unused. ++ * - t2 : End of input (points to byte after last character in input). ++ * - fp : Frame pointer. Used to access arguments, local variables and ++ * RegExp registers. ++ * - sp : Points to tip of C stack. ++ * ++ * The remaining registers are free for computations. ++ * Each call to a public method should retain this convention. ++ * ++ * TODO(plind): O32 documented here with intent of having single 32/64 codebase ++ * in the future. ++ * ++ * The O32 stack will have the following structure: ++ * ++ * - fp[72] Isolate* isolate (address of the current isolate) ++ * - fp[68] direct_call (if 1, direct call from JavaScript code, ++ * if 0, call through the runtime system). ++ * - fp[64] stack_area_base (High end of the memory area to use as ++ * backtracking stack). ++ * - fp[60] capture array size (may fit multiple sets of matches) ++ * - fp[44..59] SW64 O32 four argument slots ++ * - fp[40] int* capture_array (int[num_saved_registers_], for output). ++ * --- sp when called --- ++ * - fp[36] return address (lr). ++ * - fp[32] old frame pointer (r11). ++ * - fp[0..31] backup of registers s0..s7. ++ * --- frame pointer ---- ++ * - fp[-4] end of input (address of end of string). ++ * - fp[-8] start of input (address of first character in string). ++ * - fp[-12] start index (character index of start). ++ * - fp[-16] void* input_string (location of a handle containing the string). ++ * - fp[-20] success counter (only for global regexps to count matches). ++ * - fp[-24] Offset of location before start of input (effectively character ++ * string start - 1). Used to initialize capture registers to a ++ * non-position. ++ * - fp[-28] At start (if 1, we are starting at the start of the ++ * string, otherwise 0) ++ * - fp[-32] register 0 (Only positions must be stored in the first ++ * - register 1 num_saved_registers_ registers) ++ * - ... ++ * - register num_registers-1 ++ * --- sp --- ++ * ++ * ++ * The N64 stack will have the following structure: ++ * ++ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate ++ * kStackFrameHeader ++ * --- sp when called --- ++ * - fp[72] ra Return from RegExp code (ra). kReturnAddress ++ * - fp[64] s9, old-fp Old fp, callee saved(s9). ++ * - fp[0..63] s0..s7 Callee-saved registers s0..s7. ++ * --- frame pointer ---- ++ * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall ++ * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd ++ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters ++ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput ++ * - fp[-40] end of input (address of end of string). kInputEnd ++ * - fp[-48] start of input (address of first character in string). kInputStart ++ * - fp[-56] start index (character index of start). kStartIndex ++ * - fp[-64] void* input_string (location of a handle containing the string). kInputString ++ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures ++ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne ++ * position -1). Used to initialize capture registers to a ++ * non-position. ++ * --------- The following output registers are 32-bit values. --------- ++ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero ++ * - register 1 num_saved_registers_ registers) ++ * - ... ++ * - register num_registers-1 ++ * --- sp --- ++ * ++ * ++ * The sw64 stack will have the following structure: ++ * ++ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate ++ * - fp[72] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall ++ * - fp[64] stack_base (Top of backtracking stack). kStackHighEnd ++ * kStackFrameHeader ++ * --- sp when called --- ++ * - fp[56] ra Return from RegExp code (ra). kReturnAddress ++ * - fp[48] fp fp, callee saved(s6). ++ * - fp[0..47] s0..s5 Callee-saved registers s0..s5. ++ * --- frame pointer ---- ++ * - fp[-8] capture array size (may fit multiple sets of matches) kNumOutputRegisters ++ * - fp[-16] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput ++ * - fp[-24] end of input (address of end of string). kInputEnd ++ * - fp[-32] start of input (address of first character in string). kInputStart ++ * - fp[-40] start index (character index of start). kStartIndex ++ * - fp[-48] void* input_string (location of a handle containing the string). kInputString ++ * - fp[-56] success counter (only for global regexps to count matches). kSuccessfulCaptures ++ * - fp[-64] Offset of location before start of input (effectively character kInputStartMinusOne ++ * position -1). Used to initialize capture registers to a ++ * non-position. ++ * --------- The following output registers are 32-bit values. --------- ++ * - fp[-72] register 0 (Only positions must be stored in the first kRegisterZero ++ * - register 1 num_saved_registers_ registers) ++ * - ... ++ * - register num_registers-1 ++ * --- sp --- ++ * ++ * The first num_saved_registers_ registers are initialized to point to ++ * "character -1" in the string (i.e., char_size() bytes before the first ++ * character of the string). The remaining registers start out as garbage. ++ * ++ * The data up to the return address must be placed there by the calling ++ * code and the remaining arguments are passed in registers, e.g. by calling the ++ * code entry as cast to a function with the signature: ++ * int (*match)(String input_string, ++ * int start_index, ++ * Address start, ++ * Address end, ++ * int* capture_output_array, ++ * int num_capture_registers, ++ * byte* stack_area_base, ++ * bool direct_call = false, ++ * Isolate* isolate); ++ * The call is performed by NativeRegExpMacroAssembler::Execute() ++ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper. ++ * ++ * clang-format on ++ */ ++ ++#define __ ACCESS_MASM(masm_) ++ ++const int RegExpMacroAssemblerSW64::kRegExpCodeSize; ++ ++RegExpMacroAssemblerSW64::RegExpMacroAssemblerSW64(Isolate* isolate, Zone* zone, ++ Mode mode, ++ int registers_to_save) ++ : NativeRegExpMacroAssembler(isolate, zone), ++ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes, ++ NewAssemblerBuffer(kRegExpCodeSize))), ++ mode_(mode), ++ num_registers_(registers_to_save), ++ num_saved_registers_(registers_to_save), ++ entry_label_(), ++ start_label_(), ++ success_label_(), ++ backtrack_label_(), ++ exit_label_(), ++ internal_failure_label_() { ++ masm_->set_root_array_available(false); ++ ++ DCHECK_EQ(0, registers_to_save % 2); ++ __ jmp(&entry_label_); // We'll write the entry code later. ++ // If the code gets too big or corrupted, an internal exception will be ++ // raised, and we will exit right away. ++ __ bind(&internal_failure_label_); ++ __ li(v0, Operand(FAILURE)); ++ __ Ret(); ++ __ bind(&start_label_); // And then continue from here. ++} ++ ++RegExpMacroAssemblerSW64::~RegExpMacroAssemblerSW64() { ++ delete masm_; ++ // Unuse labels in case we throw away the assembler without calling GetCode. ++ entry_label_.Unuse(); ++ start_label_.Unuse(); ++ success_label_.Unuse(); ++ backtrack_label_.Unuse(); ++ exit_label_.Unuse(); ++ check_preempt_label_.Unuse(); ++ stack_overflow_label_.Unuse(); ++ internal_failure_label_.Unuse(); ++} ++ ++ ++int RegExpMacroAssemblerSW64::stack_limit_slack() { ++ return RegExpStack::kStackLimitSlack; ++} ++ ++ ++void RegExpMacroAssemblerSW64::AdvanceCurrentPosition(int by) { ++ if (by != 0) { ++ __ Addl(current_input_offset(), ++ current_input_offset(), Operand(by * char_size())); ++ } ++} ++ ++ ++void RegExpMacroAssemblerSW64::AdvanceRegister(int reg, int by) { ++ DCHECK_LE(0, reg); ++ DCHECK_GT(num_registers_, reg); ++ if (by != 0) { ++ __ Ldl(a0, register_location(reg)); ++ __ Addl(a0, a0, Operand(by)); ++ __ Stl(a0, register_location(reg)); ++ } ++} ++ ++ ++void RegExpMacroAssemblerSW64::Backtrack() { ++ CheckPreemption(); ++ if (has_backtrack_limit()) { ++ Label next; ++ __ Ldl(a0, MemOperand(frame_pointer(), kBacktrackCount)); ++ __ Addl(a0, a0, Operand(1)); ++ __ Stl(a0, MemOperand(frame_pointer(), kBacktrackCount)); ++ __ Branch(&next, ne, a0, Operand(backtrack_limit())); ++ ++ // Exceeded limits are treated as a failed match. ++ Fail(); ++ ++ __ bind(&next); ++ } ++ // Pop Code offset from backtrack stack, add Code and jump to location. ++ Pop(a0); ++ __ Addl(a0, a0, code_pointer()); ++ __ Jump(a0); ++} ++ ++ ++void RegExpMacroAssemblerSW64::Bind(Label* label) { ++ __ bind(label); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckCharacter(uint32_t c, Label* on_equal) { ++ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckCharacterGT(uc16 limit, Label* on_greater) { ++ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckAtStart(int cp_offset, Label* on_at_start) { ++ __ Ldl(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Addl(a0, current_input_offset(), ++ Operand(-char_size() + cp_offset * char_size())); ++ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckNotAtStart(int cp_offset, ++ Label* on_not_at_start) { ++ __ Ldl(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Addl(a0, current_input_offset(), ++ Operand(-char_size() + cp_offset * char_size())); ++ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckCharacterLT(uc16 limit, Label* on_less) { ++ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckGreedyLoop(Label* on_equal) { ++ Label backtrack_non_equal; ++ __ Ldw(a0, MemOperand(backtrack_stackpointer(), 0)); ++ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0)); ++ __ Addl(backtrack_stackpointer(), ++ backtrack_stackpointer(), ++ Operand(kIntSize)); ++ __ bind(&backtrack_non_equal); ++ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckNotBackReferenceIgnoreCase( ++ int start_reg, bool read_backward, bool unicode, Label* on_no_match) { ++ Label fallthrough; ++ __ Ldl(a0, register_location(start_reg)); // Index of start of capture. ++ __ Ldl(a1, register_location(start_reg + 1)); // Index of end of capture. ++ __ Subl(a1, a1, a0); // Length of capture. ++ ++ // At this point, the capture registers are either both set or both cleared. ++ // If the capture length is zero, then the capture is either empty or cleared. ++ // Fall through in both cases. ++ __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); ++ ++ if (read_backward) { ++ __ Ldl(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Addl(t1, t1, a1); ++ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1)); ++ } else { ++ __ Addl(t1, a1, current_input_offset()); ++ // Check that there are enough characters left in the input. ++ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); ++ } ++ ++ if (mode_ == LATIN1) { ++ Label success; ++ Label fail; ++ Label loop_check; ++ ++ // a0 - offset of start of capture. ++ // a1 - length of capture. ++ __ Addl(a0, a0, Operand(end_of_input_address())); ++ __ Addl(a2, end_of_input_address(), Operand(current_input_offset())); ++ if (read_backward) { ++ __ Subl(a2, a2, Operand(a1)); ++ } ++ __ Addl(a1, a0, Operand(a1)); ++ ++ // a0 - Address of start of capture. ++ // a1 - Address of end of capture. ++ // a2 - Address of current input position. ++ ++ Label loop; ++ __ bind(&loop); ++ __ Ldbu(a3, MemOperand(a0, 0)); ++ __ addl(a0, char_size(), a0); ++ __ Ldbu(a4, MemOperand(a2, 0)); ++ __ addl(a2, char_size(), a2); ++ ++ __ Branch(&loop_check, eq, a4, Operand(a3)); ++ ++ // Mismatch, try case-insensitive match (converting letters to lower-case). ++ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case. ++ __ Or(a4, a4, Operand(0x20)); // Also convert input character. ++ __ Branch(&fail, ne, a4, Operand(a3)); ++ __ Subl(a3, a3, Operand('a')); ++ __ Branch(&loop_check, ls, a3, Operand('z' - 'a')); ++ // Latin-1: Check for values in range [224,254] but not 247. ++ __ Subl(a3, a3, Operand(224 - 'a')); ++ // Weren't Latin-1 letters. ++ __ Branch(&fail, hi, a3, Operand(254 - 224)); ++ // Check for 247. ++ __ Branch(&fail, eq, a3, Operand(247 - 224)); ++ ++ __ bind(&loop_check); ++ __ Branch(&loop, lt, a0, Operand(a1)); ++ __ jmp(&success); ++ ++ __ bind(&fail); ++ GoTo(on_no_match); ++ ++ __ bind(&success); ++ // Compute new value of character position after the matched part. ++ __ Subl(current_input_offset(), a2, end_of_input_address()); ++ if (read_backward) { ++ __ Ldl(t1, register_location(start_reg)); // Index of start of capture. ++ __ Ldl(a2, register_location(start_reg + 1)); // Index of end of capture. ++ __ Addl(current_input_offset(), current_input_offset(), Operand(t1)); ++ __ Subl(current_input_offset(), current_input_offset(), Operand(a2)); ++ } ++ } else { ++ DCHECK(mode_ == UC16); ++ // Put regexp engine registers on stack. ++ RegList regexp_registers_to_retain = current_input_offset().bit() | ++ current_character().bit() | backtrack_stackpointer().bit(); ++ __ MultiPush(regexp_registers_to_retain); ++ ++ int argument_count = 4; ++ __ PrepareCallCFunction(argument_count, a2); ++ ++ // a0 - offset of start of capture. ++ // a1 - length of capture. ++ ++ // Put arguments into arguments registers. ++ // Parameters are ++ // a0: Address byte_offset1 - Address captured substring's start. ++ // a1: Address byte_offset2 - Address of current character position. ++ // a2: size_t byte_length - length of capture in bytes(!). ++ // a3: Isolate* isolate. ++ ++ // Address of start of capture. ++ __ Addl(a0, a0, Operand(end_of_input_address())); ++ // Length of capture. ++ __ mov(a2, a1); ++ // Save length in callee-save register for use on return. ++ __ mov(s3, a1); // Can we use s3 here? ++ // Address of current input position. ++ __ Addl(a1, current_input_offset(), Operand(end_of_input_address())); ++ if (read_backward) { ++ __ Subl(a1, a1, Operand(s3)); ++ } ++ // Isolate. ++ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate()))); ++ ++ { ++ AllowExternalCallThatCantCauseGC scope(masm_); ++ ExternalReference function = ++ unicode ? ExternalReference::re_case_insensitive_compare_unicode( ++ isolate()) ++ : ExternalReference::re_case_insensitive_compare_non_unicode( ++ isolate()); ++ __ CallCFunction(function, argument_count); ++ } ++ ++ // Restore regexp engine registers. ++ __ MultiPop(regexp_registers_to_retain); ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ __ Ldl(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ ++ // Check if function returned non-zero for success or zero for failure. ++ BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg)); ++ // On success, increment position by length of capture. ++ if (read_backward) { ++ __ Subl(current_input_offset(), current_input_offset(), Operand(s3)); ++ } else { ++ __ Addl(current_input_offset(), current_input_offset(), Operand(s3)); ++ } ++ } ++ ++ __ bind(&fallthrough); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckNotBackReference(int start_reg, ++ bool read_backward, ++ Label* on_no_match) { ++ Label fallthrough; ++ ++ // Find length of back-referenced capture. ++ __ Ldl(a0, register_location(start_reg)); ++ __ Ldl(a1, register_location(start_reg + 1)); ++ __ Subl(a1, a1, a0); // Length to check. ++ ++ // At this point, the capture registers are either both set or both cleared. ++ // If the capture length is zero, then the capture is either empty or cleared. ++ // Fall through in both cases. ++ __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); ++ ++ if (read_backward) { ++ __ Ldl(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Addl(t1, t1, a1); ++ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1)); ++ } else { ++ __ Addl(t1, a1, current_input_offset()); ++ // Check that there are enough characters left in the input. ++ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); ++ } ++ ++ // Compute pointers to match string and capture string. ++ __ Addl(a0, a0, Operand(end_of_input_address())); ++ __ Addl(a2, end_of_input_address(), Operand(current_input_offset())); ++ if (read_backward) { ++ __ Subl(a2, a2, Operand(a1)); ++ } ++ __ Addl(a1, a1, Operand(a0)); ++ ++ Label loop; ++ __ bind(&loop); ++ if (mode_ == LATIN1) { ++ __ Ldbu(a3, MemOperand(a0, 0)); ++ __ addl(a0, char_size(), a0); ++ __ Ldbu(a4, MemOperand(a2, 0)); ++ __ addl(a2, char_size(), a2); ++ } else { ++ DCHECK(mode_ == UC16); ++ __ Ldhu(a3, MemOperand(a0, 0)); ++ __ addl(a0, char_size(), a0); ++ __ Ldhu(a4, MemOperand(a2, 0)); ++ __ addl(a2, char_size(), a2); ++ } ++ BranchOrBacktrack(on_no_match, ne, a3, Operand(a4)); ++ __ Branch(&loop, lt, a0, Operand(a1)); ++ ++ // Move current character position to position after match. ++ __ Subl(current_input_offset(), a2, end_of_input_address()); ++ if (read_backward) { ++ __ Ldl(t1, register_location(start_reg)); // Index of start of capture. ++ __ Ldl(a2, register_location(start_reg + 1)); // Index of end of capture. ++ __ Addl(current_input_offset(), current_input_offset(), Operand(t1)); ++ __ Subl(current_input_offset(), current_input_offset(), Operand(a2)); ++ } ++ __ bind(&fallthrough); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckNotCharacter(uint32_t c, ++ Label* on_not_equal) { ++ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckCharacterAfterAnd(uint32_t c, ++ uint32_t mask, ++ Label* on_equal) { ++ __ And(a0, current_character(), Operand(mask)); ++ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); ++ BranchOrBacktrack(on_equal, eq, a0, rhs); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckNotCharacterAfterAnd(uint32_t c, ++ uint32_t mask, ++ Label* on_not_equal) { ++ __ And(a0, current_character(), Operand(mask)); ++ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); ++ BranchOrBacktrack(on_not_equal, ne, a0, rhs); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckNotCharacterAfterMinusAnd( ++ uc16 c, ++ uc16 minus, ++ uc16 mask, ++ Label* on_not_equal) { ++ DCHECK_GT(String::kMaxUtf16CodeUnit, minus); ++ __ Subl(a0, current_character(), Operand(minus)); ++ __ And(a0, a0, Operand(mask)); ++ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckCharacterInRange( ++ uc16 from, ++ uc16 to, ++ Label* on_in_range) { ++ __ Subl(a0, current_character(), Operand(from)); ++ // Unsigned lower-or-same condition. ++ BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckCharacterNotInRange( ++ uc16 from, ++ uc16 to, ++ Label* on_not_in_range) { ++ __ Subl(a0, current_character(), Operand(from)); ++ // Unsigned higher condition. ++ BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckBitInTable( ++ Handle table, ++ Label* on_bit_set) { ++ __ li(a0, Operand(table)); ++ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) { ++ __ And(a1, current_character(), Operand(kTableSize - 1)); ++ __ Addl(a0, a0, a1); ++ } else { ++ __ Addl(a0, a0, current_character()); ++ } ++ ++ __ Ldbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize)); ++ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg)); ++} ++ ++ ++bool RegExpMacroAssemblerSW64::CheckSpecialCharacterClass(uc16 type, ++ Label* on_no_match) { ++ // Range checks (c in min..max) are generally implemented by an unsigned ++ // (c - min) <= (max - min) check. ++ switch (type) { ++ case 's': ++ // Match space-characters. ++ if (mode_ == LATIN1) { ++ // One byte space characters are '\t'..'\r', ' ' and \u00a0. ++ Label success; ++ __ Branch(&success, eq, current_character(), Operand(' ')); ++ // Check range 0x09..0x0D. ++ __ Subl(a0, current_character(), Operand('\t')); ++ __ Branch(&success, ls, a0, Operand('\r' - '\t')); ++ // \u00a0 (NBSP). ++ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t')); ++ __ bind(&success); ++ return true; ++ } ++ return false; ++ case 'S': ++ // The emitted code for generic character classes is good enough. ++ return false; ++ case 'd': ++ // Match Latin1 digits ('0'..'9'). ++ __ Subl(a0, current_character(), Operand('0')); ++ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0')); ++ return true; ++ case 'D': ++ // Match non Latin1-digits. ++ __ Subl(a0, current_character(), Operand('0')); ++ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0')); ++ return true; ++ case '.': { ++ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). ++ __ Xor(a0, current_character(), Operand(0x01)); ++ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. ++ __ Subl(a0, a0, Operand(0x0B)); ++ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B)); ++ if (mode_ == UC16) { ++ // Compare original value to 0x2028 and 0x2029, using the already ++ // computed (current_char ^ 0x01 - 0x0B). I.e., check for ++ // 0x201D (0x2028 - 0x0B) or 0x201E. ++ __ Subl(a0, a0, Operand(0x2028 - 0x0B)); ++ BranchOrBacktrack(on_no_match, ls, a0, Operand(1)); ++ } ++ return true; ++ } ++ case 'n': { ++ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). ++ __ Xor(a0, current_character(), Operand(0x01)); ++ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. ++ __ Subl(a0, a0, Operand(0x0B)); ++ if (mode_ == LATIN1) { ++ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B)); ++ } else { ++ Label done; ++ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B)); ++ // Compare original value to 0x2028 and 0x2029, using the already ++ // computed (current_char ^ 0x01 - 0x0B). I.e., check for ++ // 0x201D (0x2028 - 0x0B) or 0x201E. ++ __ Subl(a0, a0, Operand(0x2028 - 0x0B)); ++ BranchOrBacktrack(on_no_match, hi, a0, Operand(1)); ++ __ bind(&done); ++ } ++ return true; ++ } ++ case 'w': { ++ if (mode_ != LATIN1) { ++ // Table is 256 entries, so all Latin1 characters can be tested. ++ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z')); ++ } ++ ExternalReference map = ExternalReference::re_word_character_map(isolate()); ++ __ li(a0, Operand(map)); ++ __ Addl(a0, a0, current_character()); ++ __ Ldbu(a0, MemOperand(a0, 0)); ++ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg)); ++ return true; ++ } ++ case 'W': { ++ Label done; ++ if (mode_ != LATIN1) { ++ // Table is 256 entries, so all Latin1 characters can be tested. ++ __ Branch(&done, hi, current_character(), Operand('z')); ++ } ++ ExternalReference map = ExternalReference::re_word_character_map(isolate()); ++ __ li(a0, Operand(map)); ++ __ Addl(a0, a0, current_character()); ++ __ Ldbu(a0, MemOperand(a0, 0)); ++ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg)); ++ if (mode_ != LATIN1) { ++ __ bind(&done); ++ } ++ return true; ++ } ++ case '*': ++ // Match any character. ++ return true; ++ // No custom implementation (yet): s(UC16), S(UC16). ++ default: ++ return false; ++ } ++} ++ ++ ++void RegExpMacroAssemblerSW64::Fail() { ++ __ li(v0, Operand(FAILURE)); ++ __ jmp(&exit_label_); ++} ++ ++ ++Handle RegExpMacroAssemblerSW64::GetCode(Handle source) { ++ Label return_v0; ++ if (masm_->has_exception()) { ++ // If the code gets corrupted due to long regular expressions and lack of ++ // space on trampolines, an internal exception flag is set. If this case ++ // is detected, we will jump into exit sequence right away. ++ __ bind_to(&entry_label_, internal_failure_label_.pos()); ++ } else { ++ // Finalize code - write the entry point code now we know how many ++ // registers we need. ++ ++ // Entry code: ++ __ bind(&entry_label_); ++ ++ // Tell the system that we have a stack frame. Because the type is MANUAL, ++ // no is generated. ++ FrameScope scope(masm_, StackFrame::MANUAL); ++ ++ // Actually emit code to start a new stack frame. ++ // Push arguments ++ // Save callee-save registers. ++ // Start new stack frame. ++ // Store link register in existing stack-cell. ++ // Order here should correspond to order of offset constants in header file. ++ // TODO(plind): we save s0..s5, but ONLY use s3 here - use the regs ++ // or dont save. ++ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() | ++ s3.bit() | s4.bit() | s5.bit() | fp.bit(); ++ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit() | ++ a4.bit() | a5.bit(); ++ ++ __ MultiPush(registers_to_retain | ra.bit()); __ MultiPush(argument_registers); ++ // Set frame pointer in space for it if this is not a direct call ++ // from generated code. ++ __ Addl(frame_pointer(), sp, Operand(6 * kPointerSize)); // 6 argument regs ++ STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize); ++ __ mov(a0, zero_reg); ++ __ push(a0); // Make room for success counter and initialize it to 0. ++ STATIC_ASSERT(kStringStartMinusOne == ++ kSuccessfulCaptures - kSystemPointerSize); ++ __ push(a0); // Make room for "string start - 1" constant. ++ STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize); ++ __ push(a0); // The backtrack counter ++ ++ // Check if we have space on the stack for registers. ++ Label stack_limit_hit; ++ Label stack_ok; ++ ++ ExternalReference stack_limit = ++ ExternalReference::address_of_jslimit(masm_->isolate()); ++ __ li(a0, Operand(stack_limit)); ++ __ Ldl(a0, MemOperand(a0)); ++ __ Subl(a0, sp, a0); ++ // Handle it if the stack pointer is already below the stack limit. ++ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg)); ++ // Check if there is room for the variable number of registers above ++ // the stack limit. ++ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize)); ++ // Exit with OutOfMemory exception. There is not enough space on the stack ++ // for our working registers. ++ __ li(v0, Operand(EXCEPTION)); ++ __ jmp(&return_v0); ++ ++ __ bind(&stack_limit_hit); ++ CallCheckStackGuardState(a0); ++ // If returned value is non-zero, we exit with the returned value as result. ++ __ Branch(&return_v0, ne, v0, Operand(zero_reg)); ++ ++ __ bind(&stack_ok); ++ // Allocate space on stack for registers. ++ __ Subl(sp, sp, Operand(num_registers_ * kPointerSize)); ++ // Load string end. ++ __ Ldl(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ // Load input start. ++ __ Ldl(a0, MemOperand(frame_pointer(), kInputStart)); ++ // Find negative length (offset of start relative to end). ++ __ Subl(current_input_offset(), a0, end_of_input_address()); ++ // Set a0 to address of char before start of the input string ++ // (effectively string position -1). ++ __ Ldl(a1, MemOperand(frame_pointer(), kStartIndex)); ++ __ Subl(a0, current_input_offset(), Operand(char_size())); ++ __ slll(a1, (mode_ == UC16) ? 1 : 0, t1); ++ __ Subl(a0, a0, t1); ++ // Store this value in a local variable, for use when clearing ++ // position registers. ++ __ Stl(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ ++ // Initialize code pointer register ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ ++ Label load_char_start_regexp, start_regexp; ++ // Load newline if index is at start, previous character otherwise. ++ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg)); ++ __ li(current_character(), Operand('\n')); ++ __ jmp(&start_regexp); ++ ++ // Global regexp restarts matching here. ++ __ bind(&load_char_start_regexp); ++ // Load previous char as initial value of current character register. ++ LoadCurrentCharacterUnchecked(-1, 1); ++ __ bind(&start_regexp); ++ ++ // Initialize on-stack registers. ++ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. ++ // Fill saved registers with initial value = start offset - 1. ++ if (num_saved_registers_ > 8) { ++ // Address of register 0. ++ __ Addl(a1, frame_pointer(), Operand(kRegisterZero)); ++ __ li(a2, Operand(num_saved_registers_)); ++ Label init_loop; ++ __ bind(&init_loop); ++ __ Stl(a0, MemOperand(a1)); ++ __ Addl(a1, a1, Operand(-kPointerSize)); ++ __ Subl(a2, a2, Operand(1)); ++ __ Branch(&init_loop, ne, a2, Operand(zero_reg)); ++ } else { ++ for (int i = 0; i < num_saved_registers_; i++) { ++ __ Stl(a0, register_location(i)); ++ } ++ } ++ } ++ ++ // Initialize backtrack stack pointer. ++ __ Ldl(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); ++ ++ __ jmp(&start_label_); ++ ++ ++ // Exit code: ++ if (success_label_.is_linked()) { ++ // Save captures when successful. ++ __ bind(&success_label_); ++ if (num_saved_registers_ > 0) { ++ // Copy captures to output. ++ __ Ldl(a1, MemOperand(frame_pointer(), kInputStart)); ++ __ Ldl(a0, MemOperand(frame_pointer(), kRegisterOutput)); ++ __ Ldl(a2, MemOperand(frame_pointer(), kStartIndex)); ++ __ Subl(a1, end_of_input_address(), a1); ++ // a1 is length of input in bytes. ++ if (mode_ == UC16) { ++ __ srll(a1, 1, a1); ++ } ++ // a1 is length of input in characters. ++ __ Addl(a1, a1, Operand(a2)); ++ // a1 is length of string in characters. ++ ++ DCHECK_EQ(0, num_saved_registers_ % 2); ++ // Always an even number of capture registers. This allows us to ++ // unroll the loop once to add an operation between a load of a register ++ // and the following use of that register. ++ for (int i = 0; i < num_saved_registers_; i += 2) { ++ __ Ldl(a2, register_location(i)); ++ __ Ldl(a3, register_location(i + 1)); ++ if (i == 0 && global_with_zero_length_check()) { ++ // Keep capture start in a4 for the zero-length check later. ++ __ mov(t3, a2); ++ } ++ if (mode_ == UC16) { ++ __ sral(a2, 1, a2); ++ __ Addl(a2, a2, a1); ++ __ sral(a3, 1, a3); ++ __ Addl(a3, a3, a1); ++ } else { ++ __ Addl(a2, a1, Operand(a2)); ++ __ Addl(a3, a1, Operand(a3)); ++ } ++ // V8 expects the output to be an int32_t array. ++ __ Stw(a2, MemOperand(a0)); ++ __ Addl(a0, a0, kIntSize); ++ __ Stw(a3, MemOperand(a0)); ++ __ Addl(a0, a0, kIntSize); ++ } ++ } ++ ++ if (global()) { ++ // Restart matching if the regular expression is flagged as global. ++ __ Ldl(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); ++ __ Ldl(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); ++ __ Ldl(a2, MemOperand(frame_pointer(), kRegisterOutput)); ++ // Increment success counter. ++ __ Addl(a0, a0, 1); ++ __ Stl(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); ++ // Capture results have been stored, so the number of remaining global ++ // output registers is reduced by the number of stored captures. ++ __ Subl(a1, a1, num_saved_registers_); ++ // Check whether we have enough room for another set of capture results. ++ __ mov(v0, a0); ++ __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_)); ++ ++ __ Stl(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); ++ // Advance the location for output. ++ __ Addl(a2, a2, num_saved_registers_ * kIntSize); ++ __ Stl(a2, MemOperand(frame_pointer(), kRegisterOutput)); ++ ++ // Prepare a0 to initialize registers with its value in the next run. ++ __ Ldl(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ ++ if (global_with_zero_length_check()) { ++ // Special case for zero-length matches. ++ // t3: capture start index ++ // Not a zero-length match, restart. ++ __ Branch( ++ &load_char_start_regexp, ne, current_input_offset(), Operand(t3)); ++ // Offset from the end is zero if we already reached the end. ++ __ Branch(&exit_label_, eq, current_input_offset(), ++ Operand(zero_reg)); ++ // Advance current position after a zero-length match. ++ Label advance; ++ __ bind(&advance); ++ __ Addl(current_input_offset(), ++ current_input_offset(), ++ Operand((mode_ == UC16) ? 2 : 1)); ++ if (global_unicode()) CheckNotInSurrogatePair(0, &advance); ++ } ++ ++ __ Branch(&load_char_start_regexp); ++ } else { ++ __ li(v0, Operand(SUCCESS)); ++ } ++ } ++ // Exit and return v0. ++ __ bind(&exit_label_); ++ if (global()) { ++ __ Ldl(v0, MemOperand(frame_pointer(), kSuccessfulCaptures)); ++ } ++ ++ __ bind(&return_v0); ++ // Skip sp past regexp registers and local variables.. ++ __ mov(sp, frame_pointer()); ++ // Restore registers s0..s7 and return (restoring ra to pc). ++ __ MultiPop(registers_to_retain | ra.bit()); ++ __ Ret(); ++ ++ // Backtrack code (branch target for conditional backtracks). ++ if (backtrack_label_.is_linked()) { ++ __ bind(&backtrack_label_); ++ Backtrack(); ++ } ++ ++ Label exit_with_exception; ++ ++ // Preempt-code. ++ if (check_preempt_label_.is_linked()) { ++ SafeCallTarget(&check_preempt_label_); ++ // Put regexp engine registers on stack. ++ RegList regexp_registers_to_retain = current_input_offset().bit() | ++ current_character().bit() | backtrack_stackpointer().bit(); ++ __ MultiPush(regexp_registers_to_retain); ++ CallCheckStackGuardState(a0); ++ __ MultiPop(regexp_registers_to_retain); ++ // If returning non-zero, we should end execution with the given ++ // result as return value. ++ __ Branch(&return_v0, ne, v0, Operand(zero_reg)); ++ ++ // String might have moved: Reload end of string from frame. ++ __ Ldl(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ SafeReturn(); ++ } ++ ++ // Backtrack stack overflow code. ++ if (stack_overflow_label_.is_linked()) { ++ SafeCallTarget(&stack_overflow_label_); ++ // Reached if the backtrack-stack limit has been hit. ++ // Put regexp engine registers on stack first. ++ RegList regexp_registers = current_input_offset().bit() | ++ current_character().bit(); ++ __ MultiPush(regexp_registers); ++ ++ // Call GrowStack(backtrack_stackpointer(), &stack_base) ++ static const int num_arguments = 3; ++ __ PrepareCallCFunction(num_arguments, a0); ++ __ mov(a0, backtrack_stackpointer()); ++ __ Addl(a1, frame_pointer(), Operand(kStackHighEnd)); ++ __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate()))); ++ ExternalReference grow_stack = ++ ExternalReference::re_grow_stack(masm_->isolate()); ++ __ CallCFunction(grow_stack, num_arguments); ++ // Restore regexp registers. ++ __ MultiPop(regexp_registers); ++ // If return nullptr, we have failed to grow the stack, and ++ // must exit with a stack-overflow exception. ++ __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg)); ++ // Otherwise use return value as new stack pointer. ++ __ mov(backtrack_stackpointer(), v0); ++ // Restore saved registers and continue. ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ __ Ldl(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ SafeReturn(); ++ } ++ ++ if (exit_with_exception.is_linked()) { ++ // If any of the code above needed to exit with an exception. ++ __ bind(&exit_with_exception); ++ // Exit with Result EXCEPTION(-1) to signal thrown exception. ++ __ li(v0, Operand(EXCEPTION)); ++ __ jmp(&return_v0); ++ } ++ } ++ ++ CodeDesc code_desc; ++ masm_->GetCode(isolate(), &code_desc); ++ Handle code = ++ Factory::CodeBuilder(isolate(), code_desc, CodeKind::REGEXP) ++ .set_self_reference(masm_->CodeObject()) ++ .Build(); ++ LOG(masm_->isolate(), ++ RegExpCodeCreateEvent(Handle::cast(code), source)); ++ return Handle::cast(code); ++} ++ ++ ++void RegExpMacroAssemblerSW64::GoTo(Label* to) { ++ if (to == nullptr) { ++ Backtrack(); ++ return; ++ } ++ __ jmp(to); ++ return; ++} ++ ++ ++void RegExpMacroAssemblerSW64::IfRegisterGE(int reg, ++ int comparand, ++ Label* if_ge) { ++ __ Ldl(a0, register_location(reg)); ++ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::IfRegisterLT(int reg, ++ int comparand, ++ Label* if_lt) { ++ __ Ldl(a0, register_location(reg)); ++ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::IfRegisterEqPos(int reg, ++ Label* if_eq) { ++ __ Ldl(a0, register_location(reg)); ++ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset())); ++} ++ ++ ++RegExpMacroAssembler::IrregexpImplementation ++ RegExpMacroAssemblerSW64::Implementation() { ++ return kSW64Implementation; ++} ++ ++ ++void RegExpMacroAssemblerSW64::PopCurrentPosition() { ++ Pop(current_input_offset()); ++} ++ ++ ++void RegExpMacroAssemblerSW64::PopRegister(int register_index) { ++ Pop(a0); ++ __ Stl(a0, register_location(register_index)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::PushBacktrack(Label* label) { ++ if (label->is_bound()) { ++ int target = label->pos(); ++ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag)); ++ } else { ++ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); ++ Label after_constant; ++ __ Branch(&after_constant); ++ int offset = masm_->pc_offset(); ++ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag; ++ __ emitSW(0); ++ masm_->label_at_put(label, offset); ++ __ bind(&after_constant); ++ if (is_int16(cp_offset)) { ++ __ Ldwu(a0, MemOperand(code_pointer(), cp_offset)); ++ } else { ++ __ Addl(a0, code_pointer(), cp_offset); ++ __ Ldwu(a0, MemOperand(a0, 0)); ++ } ++ } ++ Push(a0); ++ CheckStackLimit(); ++} ++ ++ ++void RegExpMacroAssemblerSW64::PushCurrentPosition() { ++ Push(current_input_offset()); ++} ++ ++ ++void RegExpMacroAssemblerSW64::PushRegister(int register_index, ++ StackCheckFlag check_stack_limit) { ++ __ Ldl(a0, register_location(register_index)); ++ Push(a0); ++ if (check_stack_limit) CheckStackLimit(); ++} ++ ++ ++void RegExpMacroAssemblerSW64::ReadCurrentPositionFromRegister(int reg) { ++ __ Ldl(current_input_offset(), register_location(reg)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::ReadStackPointerFromRegister(int reg) { ++ __ Ldl(backtrack_stackpointer(), register_location(reg)); ++ __ Ldl(a0, MemOperand(frame_pointer(), kStackHighEnd)); ++ __ Addl(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::SetCurrentPositionFromEnd(int by) { ++ Label after_position; ++ __ Branch(&after_position, ++ ge, ++ current_input_offset(), ++ Operand(-by * char_size())); ++ __ li(current_input_offset(), -by * char_size()); ++ // On RegExp code entry (where this operation is used), the character before ++ // the current position is expected to be already loaded. ++ // We have advanced the position, so it's safe to read backwards. ++ LoadCurrentCharacterUnchecked(-1, 1); ++ __ bind(&after_position); ++} ++ ++ ++void RegExpMacroAssemblerSW64::SetRegister(int register_index, int to) { ++ DCHECK(register_index >= num_saved_registers_); // Reserved for positions! ++ __ li(a0, Operand(to)); ++ __ Stl(a0, register_location(register_index)); ++} ++ ++ ++bool RegExpMacroAssemblerSW64::Succeed() { ++ __ jmp(&success_label_); ++ return global(); ++} ++ ++ ++void RegExpMacroAssemblerSW64::WriteCurrentPositionToRegister(int reg, ++ int cp_offset) { ++ if (cp_offset == 0) { ++ __ Stl(current_input_offset(), register_location(reg)); ++ } else { ++ __ Addl(a0, current_input_offset(), Operand(cp_offset * char_size())); ++ __ Stl(a0, register_location(reg)); ++ } ++} ++ ++ ++void RegExpMacroAssemblerSW64::ClearRegisters(int reg_from, int reg_to) { ++ DCHECK(reg_from <= reg_to); ++ __ Ldl(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ for (int reg = reg_from; reg <= reg_to; reg++) { ++ __ Stl(a0, register_location(reg)); ++ } ++} ++ ++ ++void RegExpMacroAssemblerSW64::WriteStackPointerToRegister(int reg) { ++ __ Ldl(a1, MemOperand(frame_pointer(), kStackHighEnd)); ++ __ Subl(a0, backtrack_stackpointer(), a1); ++ __ Stl(a0, register_location(reg)); ++} ++ ++ ++bool RegExpMacroAssemblerSW64::CanReadUnaligned() { ++ return false; ++} ++ ++ ++// Private methods: ++ ++void RegExpMacroAssemblerSW64::CallCheckStackGuardState(Register scratch) { ++ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins()); ++ DCHECK(!masm_->options().isolate_independent_code); ++ ++ int stack_alignment = base::OS::ActivationFrameAlignment(); ++ ++ // Align the stack pointer and save the original sp value on the stack. ++ __ mov(scratch, sp); ++ __ Subl(sp, sp, Operand(kPointerSize)); ++ DCHECK(base::bits::IsPowerOfTwo(stack_alignment)); ++ __ And(sp, sp, Operand(-stack_alignment)); ++ __ Stl(scratch, MemOperand(sp)); ++ ++ __ mov(a2, frame_pointer()); ++ // Code of self. ++ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ ++ // We need to make room for the return address on the stack. ++ DCHECK(IsAligned(stack_alignment, kPointerSize)); ++ __ Subl(sp, sp, Operand(stack_alignment)); ++ ++ // The stack pointer now points to cell where the return address will be ++ // written. Arguments are in registers, meaning we treat the return address as ++ // argument 5. Since DirectCEntry will handle allocating space for the C ++ // argument slots, we don't need to care about that here. This is how the ++ // stack will look (sp meaning the value of sp at this moment): ++ // [sp + 3] - empty slot if needed for alignment. ++ // [sp + 2] - saved sp. ++ // [sp + 1] - second word reserved for return value. ++ // [sp + 0] - first word reserved for return value. ++ ++ // a0 will point to the return address, placed by DirectCEntry. ++ __ mov(a0, sp); ++ ++ ExternalReference stack_guard_check = ++ ExternalReference::re_check_stack_guard_state(masm_->isolate()); ++ __ li(t12, Operand(stack_guard_check)); ++ ++ EmbeddedData d = EmbeddedData::FromBlob(); ++ CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry)); ++ Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry); ++ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ __ Call(kScratchReg); ++ ++ // DirectCEntryStub allocated space for the C argument slots so we have to ++ // drop them with the return address from the stack with loading saved sp. ++ // At this point stack must look: ++ // [sp + 7] - empty slot if needed for alignment. ++ // [sp + 6] - saved sp. ++ // [sp + 5] - second word reserved for return value. ++ // [sp + 4] - first word reserved for return value. ++ // [sp + 3] - C argument slot. ++ // [sp + 2] - C argument slot. ++ // [sp + 1] - C argument slot. ++ // [sp + 0] - C argument slot. ++ __ Ldl(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize)); ++ ++ __ li(code_pointer(), Operand(masm_->CodeObject())); ++} ++ ++ ++// Helper function for reading a value out of a stack frame. ++template ++static T& frame_entry(Address re_frame, int frame_offset) { ++ return reinterpret_cast(Memory(re_frame + frame_offset)); ++} ++ ++ ++template ++static T* frame_entry_address(Address re_frame, int frame_offset) { ++ return reinterpret_cast(re_frame + frame_offset); ++} ++ ++int64_t RegExpMacroAssemblerSW64::CheckStackGuardState(Address* return_address, ++ Address raw_code, ++ Address re_frame) { ++ Code re_code = Code::cast(Object(raw_code)); ++ return NativeRegExpMacroAssembler::CheckStackGuardState( ++ frame_entry(re_frame, kIsolate), ++ static_cast(frame_entry(re_frame, kStartIndex)), ++ static_cast( ++ frame_entry(re_frame, kDirectCall)), ++ return_address, re_code, ++ frame_entry_address
(re_frame, kInputString), ++ frame_entry_address(re_frame, kInputStart), ++ frame_entry_address(re_frame, kInputEnd)); ++} ++ ++ ++MemOperand RegExpMacroAssemblerSW64::register_location(int register_index) { ++ DCHECK(register_index < (1<<30)); ++ if (num_registers_ <= register_index) { ++ num_registers_ = register_index + 1; ++ } ++ return MemOperand(frame_pointer(), ++ kRegisterZero - register_index * kPointerSize); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckPosition(int cp_offset, ++ Label* on_outside_input) { ++ if (cp_offset >= 0) { ++ BranchOrBacktrack(on_outside_input, ge, current_input_offset(), ++ Operand(-cp_offset * char_size())); ++ } else { ++ __ Ldl(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Addl(a0, current_input_offset(), Operand(cp_offset * char_size())); ++ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1)); ++ } ++} ++ ++ ++void RegExpMacroAssemblerSW64::BranchOrBacktrack(Label* to, ++ Condition condition, ++ Register rs, ++ const Operand& rt) { ++ if (condition == al) { // Unconditional. ++ if (to == nullptr) { ++ Backtrack(); ++ return; ++ } ++ __ jmp(to); ++ return; ++ } ++ if (to == nullptr) { ++ __ Branch(&backtrack_label_, condition, rs, rt); ++ return; ++ } ++ __ Branch(to, condition, rs, rt); ++} ++ ++ ++void RegExpMacroAssemblerSW64::SafeCall(Label* to, ++ Condition cond, ++ Register rs, ++ const Operand& rt) { ++ __ BranchAndLink(to, cond, rs, rt); ++} ++ ++ ++void RegExpMacroAssemblerSW64::SafeReturn() { ++ __ pop(ra); ++ __ Addl(t1, ra, Operand(masm_->CodeObject())); ++ __ Jump(t1); ++} ++ ++ ++void RegExpMacroAssemblerSW64::SafeCallTarget(Label* name) { ++ __ bind(name); ++ __ Subl(ra, ra, Operand(masm_->CodeObject())); ++ __ push(ra); ++} ++ ++ ++void RegExpMacroAssemblerSW64::Push(Register source) { ++ DCHECK(source != backtrack_stackpointer()); ++ __ Addl(backtrack_stackpointer(), ++ backtrack_stackpointer(), ++ Operand(-kIntSize)); ++ __ Stw(source, MemOperand(backtrack_stackpointer())); ++} ++ ++ ++void RegExpMacroAssemblerSW64::Pop(Register target) { ++ DCHECK(target != backtrack_stackpointer()); ++ __ Ldw(target, MemOperand(backtrack_stackpointer())); ++ __ Addl(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckPreemption() { ++ // Check for preemption. ++ ExternalReference stack_limit = ++ ExternalReference::address_of_jslimit(masm_->isolate()); ++ __ li(a0, Operand(stack_limit)); ++ __ Ldl(a0, MemOperand(a0)); ++ SafeCall(&check_preempt_label_, ls, sp, Operand(a0)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::CheckStackLimit() { ++ ExternalReference stack_limit = ++ ExternalReference::address_of_regexp_stack_limit_address( ++ masm_->isolate()); ++ ++ __ li(a0, Operand(stack_limit)); ++ __ Ldl(a0, MemOperand(a0)); ++ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0)); ++} ++ ++ ++void RegExpMacroAssemblerSW64::LoadCurrentCharacterUnchecked(int cp_offset, ++ int characters) { ++ Register offset = current_input_offset(); ++ if (cp_offset != 0) { ++ // t3 is not being used to store the capture start index at this point. ++ __ Addl(t3, current_input_offset(), Operand(cp_offset * char_size())); ++ offset = t3; ++ } ++ // We assume that we cannot do unaligned loads on SW64, so this function ++ // must only be used to load a single character at a time. ++ DCHECK_EQ(1, characters); ++ __ Addl(t1, end_of_input_address(), Operand(offset)); ++ if (mode_ == LATIN1) { ++ __ Ldbu(current_character(), MemOperand(t1, 0)); ++ } else { ++ DCHECK(mode_ == UC16); ++ __ Ldhu(current_character(), MemOperand(t1, 0)); ++ } ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_SW64 +diff --git a/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.h b/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.h +new file mode 100755 +index 000000000..ab4c62db6 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.h +@@ -0,0 +1,224 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_REGEXP_SW64_REGEXP_MACRO_ASSEMBLER_SW64_H_ ++#define V8_REGEXP_SW64_REGEXP_MACRO_ASSEMBLER_SW64_H_ ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/sw64/assembler-sw64.h" ++#include "src/regexp/regexp-macro-assembler.h" ++ ++namespace v8 { ++namespace internal { ++ ++class V8_EXPORT_PRIVATE RegExpMacroAssemblerSW64 ++ : public NativeRegExpMacroAssembler { ++ public: ++ RegExpMacroAssemblerSW64(Isolate* isolate, Zone* zone, Mode mode, ++ int registers_to_save); ++ virtual ~RegExpMacroAssemblerSW64(); ++ virtual int stack_limit_slack(); ++ virtual void AdvanceCurrentPosition(int by); ++ virtual void AdvanceRegister(int reg, int by); ++ virtual void Backtrack(); ++ virtual void Bind(Label* label); ++ virtual void CheckAtStart(int cp_offset, Label* on_at_start); ++ virtual void CheckCharacter(uint32_t c, Label* on_equal); ++ virtual void CheckCharacterAfterAnd(uint32_t c, ++ uint32_t mask, ++ Label* on_equal); ++ virtual void CheckCharacterGT(uc16 limit, Label* on_greater); ++ virtual void CheckCharacterLT(uc16 limit, Label* on_less); ++ // A "greedy loop" is a loop that is both greedy and with a simple ++ // body. It has a particularly simple implementation. ++ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position); ++ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start); ++ virtual void CheckNotBackReference(int start_reg, bool read_backward, ++ Label* on_no_match); ++ virtual void CheckNotBackReferenceIgnoreCase(int start_reg, ++ bool read_backward, bool unicode, ++ Label* on_no_match); ++ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal); ++ virtual void CheckNotCharacterAfterAnd(uint32_t c, ++ uint32_t mask, ++ Label* on_not_equal); ++ virtual void CheckNotCharacterAfterMinusAnd(uc16 c, ++ uc16 minus, ++ uc16 mask, ++ Label* on_not_equal); ++ virtual void CheckCharacterInRange(uc16 from, ++ uc16 to, ++ Label* on_in_range); ++ virtual void CheckCharacterNotInRange(uc16 from, ++ uc16 to, ++ Label* on_not_in_range); ++ virtual void CheckBitInTable(Handle table, Label* on_bit_set); ++ ++ // Checks whether the given offset from the current position is before ++ // the end of the string. ++ virtual void CheckPosition(int cp_offset, Label* on_outside_input); ++ virtual bool CheckSpecialCharacterClass(uc16 type, ++ Label* on_no_match); ++ virtual void Fail(); ++ virtual Handle GetCode(Handle source); ++ virtual void GoTo(Label* label); ++ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); ++ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); ++ virtual void IfRegisterEqPos(int reg, Label* if_eq); ++ virtual IrregexpImplementation Implementation(); ++ virtual void LoadCurrentCharacterUnchecked(int cp_offset, ++ int character_count); ++ virtual void PopCurrentPosition(); ++ virtual void PopRegister(int register_index); ++ virtual void PushBacktrack(Label* label); ++ virtual void PushCurrentPosition(); ++ virtual void PushRegister(int register_index, ++ StackCheckFlag check_stack_limit); ++ virtual void ReadCurrentPositionFromRegister(int reg); ++ virtual void ReadStackPointerFromRegister(int reg); ++ virtual void SetCurrentPositionFromEnd(int by); ++ virtual void SetRegister(int register_index, int to); ++ virtual bool Succeed(); ++ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); ++ virtual void ClearRegisters(int reg_from, int reg_to); ++ virtual void WriteStackPointerToRegister(int reg); ++ virtual bool CanReadUnaligned(); ++ ++ // Called from RegExp if the stack-guard is triggered. ++ // If the code object is relocated, the return address is fixed before ++ // returning. ++ // {raw_code} is an Address because this is called via ExternalReference. ++ static int64_t CheckStackGuardState(Address* return_address, Address raw_code, ++ Address re_frame); ++ ++ void print_regexp_frame_constants(); ++ ++ private: ++ // Offsets from frame_pointer() of function parameters and stored registers. ++ static const int kFramePointer = 0; ++ ++ // Above the frame pointer - Stored registers and stack passed parameters. ++ // Registers s0 to s5, fp, and ra. ++ static const int kStoredRegisters = kFramePointer; ++ // Return address (stored from link register, read into pc on return). ++ ++ // TODO(plind): This 7 - is 6 s-regs (s0..s5) plus fp. ++ static const int kReturnAddress = kStoredRegisters + 7 * kPointerSize; ++ ++ // Stack frame header. ++ static const int kStackFrameHeader = kReturnAddress; ++ // Stack parameters placed by caller. ++ static const int kStackHighEnd = kStackFrameHeader + kPointerSize; ++ static const int kDirectCall = kStackHighEnd + kPointerSize; ++ static const int kIsolate = kDirectCall + kPointerSize; ++ ++ // Below the frame pointer. ++ // Register parameters stored by setup code. ++ static const int kNumOutputRegisters = kFramePointer - kPointerSize; ++ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize; ++ static const int kInputEnd = kRegisterOutput - kPointerSize; ++ static const int kInputStart = kInputEnd - kPointerSize; ++ static const int kStartIndex = kInputStart - kPointerSize; ++ static const int kInputString = kStartIndex - kPointerSize; ++ // When adding local variables remember to push space for them in ++ // the frame in GetCode. ++ static const int kSuccessfulCaptures = kInputString - kPointerSize; ++ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize; ++ static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize; ++ // First register address. Following registers are below it on the stack. ++ static const int kRegisterZero = kBacktrackCount - kSystemPointerSize; ++ ++ // Initial size of code buffer. ++ static const int kRegExpCodeSize = 1024; ++ ++ // Check whether preemption has been requested. ++ void CheckPreemption(); ++ ++ // Check whether we are exceeding the stack limit on the backtrack stack. ++ void CheckStackLimit(); ++ ++ ++ // Generate a call to CheckStackGuardState. ++ void CallCheckStackGuardState(Register scratch); ++ ++ // The ebp-relative location of a regexp register. ++ MemOperand register_location(int register_index); ++ ++ // Register holding the current input position as negative offset from ++ // the end of the string. ++ inline Register current_input_offset() { return t9; } ++ ++ // The register containing the current character after LoadCurrentCharacter. ++ inline Register current_character() { return t10; } ++ ++ // Register holding address of the end of the input string. ++ inline Register end_of_input_address() { return t2; } ++ ++ // Register holding the frame address. Local variables, parameters and ++ // regexp registers are addressed relative to this. ++ inline Register frame_pointer() { return fp; } ++ ++ // The register containing the backtrack stack top. Provides a meaningful ++ // name to the register. ++ inline Register backtrack_stackpointer() { return t0; } ++ ++ // Register holding pointer to the current code object. ++ inline Register code_pointer() { return a5; } ++ ++ // Byte size of chars in the string to match (decided by the Mode argument). ++ inline int char_size() { return static_cast(mode_); } ++ ++ // Equivalent to a conditional branch to the label, unless the label ++ // is nullptr, in which case it is a conditional Backtrack. ++ void BranchOrBacktrack(Label* to, ++ Condition condition, ++ Register rs, ++ const Operand& rt); ++ ++ // Call and return internally in the generated code in a way that ++ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack) ++ inline void SafeCall(Label* to, ++ Condition cond, ++ Register rs, ++ const Operand& rt); ++ inline void SafeReturn(); ++ inline void SafeCallTarget(Label* name); ++ ++ // Pushes the value of a register on the backtrack stack. Decrements the ++ // stack pointer by a word size and stores the register's value there. ++ inline void Push(Register source); ++ ++ // Pops a value from the backtrack stack. Reads the word at the stack pointer ++ // and increments it by a word size. ++ inline void Pop(Register target); ++ ++ Isolate* isolate() const { return masm_->isolate(); } ++ ++ MacroAssembler* masm_; ++ ++ // Which mode to generate code for (Latin1 or UC16). ++ Mode mode_; ++ ++ // One greater than maximal register index actually used. ++ int num_registers_; ++ ++ // Number of registers to output at the end (the saved registers ++ // are always 0..num_saved_registers_-1). ++ int num_saved_registers_; ++ ++ // Labels used internally. ++ Label entry_label_; ++ Label start_label_; ++ Label success_label_; ++ Label backtrack_label_; ++ Label exit_label_; ++ Label check_preempt_label_; ++ Label stack_overflow_label_; ++ Label internal_failure_label_; ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_REGEXP_SW64_REGEXP_MACRO_ASSEMBLER_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/runtime/runtime-atomics.cc b/src/3rdparty/chromium/v8/src/runtime/runtime-atomics.cc +index 34259c6e6..0eff994c9 100644 +--- a/src/3rdparty/chromium/v8/src/runtime/runtime-atomics.cc ++++ b/src/3rdparty/chromium/v8/src/runtime/runtime-atomics.cc +@@ -20,7 +20,7 @@ namespace internal { + + // Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h. + #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ +- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X ++ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_SW64 + + namespace { + +diff --git a/src/3rdparty/chromium/v8/src/runtime/runtime-utils.h b/src/3rdparty/chromium/v8/src/runtime/runtime-utils.h +index 170c0bcdb..fa6025209 100644 +--- a/src/3rdparty/chromium/v8/src/runtime/runtime-utils.h ++++ b/src/3rdparty/chromium/v8/src/runtime/runtime-utils.h +@@ -119,10 +119,22 @@ namespace internal { + // In Win64 calling convention, a struct of two pointers is returned in memory, + // allocated by the caller, and passed as a pointer in a hidden first parameter. + #ifdef V8_HOST_ARCH_64_BIT ++#ifdef SW64 ++typedef Address ObjectPair; ++#else + struct ObjectPair { + Address x; + Address y; + }; ++#endif ++ ++#ifdef SW64 ++static inline ObjectPair MakePair(Object x, Object y) { ++ // mov a1 to a5 ++ __asm__ __volatile__ ("bis %0,$31,$21\n\t"::"r"(y.ptr())); ++ return x.ptr(); ++} ++#else + + static inline ObjectPair MakePair(Object x, Object y) { + ObjectPair result = {x.ptr(), y.ptr()}; +@@ -130,6 +142,7 @@ static inline ObjectPair MakePair(Object x, Object y) { + // In Win64 they are assigned to a hidden first argument. + return result; + } ++#endif + #else + using ObjectPair = uint64_t; + static inline ObjectPair MakePair(Object x, Object y) { +diff --git a/src/3rdparty/chromium/v8/src/snapshot/deserializer.h b/src/3rdparty/chromium/v8/src/snapshot/deserializer.h +index 62814a881..47217573f 100644 +--- a/src/3rdparty/chromium/v8/src/snapshot/deserializer.h ++++ b/src/3rdparty/chromium/v8/src/snapshot/deserializer.h +@@ -29,7 +29,7 @@ class Object; + // of objects found in code. + #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ + defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \ +- defined(V8_TARGET_ARCH_PPC64) || V8_EMBEDDED_CONSTANT_POOL ++ defined(V8_TARGET_ARCH_PPC64) || V8_EMBEDDED_CONSTANT_POOL || defined(V8_TARGET_ARCH_SW64) + #define V8_CODE_EMBEDS_OBJECT_POINTER 1 + #else + #define V8_CODE_EMBEDS_OBJECT_POINTER 0 +diff --git a/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h b/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h +index 781fb87db..490d49d1c 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h ++++ b/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h +@@ -45,6 +45,21 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = + + constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf( + f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26); ++#elif V8_TARGET_ARCH_SW64 ++ ++// t7, t8 used as two scratch regs instead of s3, s4, so delete t7, t8; ++// t5, t6 same to at, t4 used as v1. ++constexpr RegList kLiftoffAssemblerGpCacheRegs = ++ Register::ListOf(a0, a1, a2, a3, a4, a5, t0, t1, t2, t3, t4, ++ t9, t10, s5, v0); ++ ++// f30 used as kDoubleCompareReg; ++// f27, f28, f29 used as kScratchDoubleReg*; ++constexpr RegList kLiftoffAssemblerFpCacheRegs = ++ DoubleRegister::ListOf(f0, f1, f2, f3, f4, f5, f6, f7, ++ f8, f9, f10, f11, f12, f13, f14, f15, ++ f16, f17, f18, f19, f20, f21, ++ f22, f23, f24, f25, f26); + + #elif V8_TARGET_ARCH_ARM + +@@ -103,6 +118,20 @@ constexpr Condition kUnsignedLessEqual = ule; + constexpr Condition kUnsignedGreaterThan = ugt; + constexpr Condition kUnsignedGreaterEqual = uge; + ++#elif V8_TARGET_ARCH_SW64 ++ ++constexpr Condition kEqual = eq; ++constexpr Condition kUnequal = ne; ++constexpr Condition kSignedLessThan = lt; ++constexpr Condition kSignedLessEqual = le; ++constexpr Condition kSignedGreaterThan = gt; ++constexpr Condition kSignedGreaterEqual = ge; ++constexpr Condition kUnsignedLessThan = ult; ++constexpr Condition kUnsignedLessEqual = ule; ++constexpr Condition kUnsignedGreaterThan = ugt; ++constexpr Condition kUnsignedGreaterEqual = uge; ++ ++ + #elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 + + constexpr Condition kEqual = eq; +diff --git a/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler.h +index 6573ff4aa..90d19651e 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler.h ++++ b/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler.h +@@ -1047,6 +1047,8 @@ class LiftoffStackSlots { + #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/wasm/baseline/s390/liftoff-assembler-s390.h" ++#elif V8_TARGET_ARCH_SW64 ++#include "src/wasm/baseline/sw64/liftoff-assembler-sw64.h" + #else + #error Unsupported architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/wasm/baseline/sw64/liftoff-assembler-sw64.h b/src/3rdparty/chromium/v8/src/wasm/baseline/sw64/liftoff-assembler-sw64.h +new file mode 100755 +index 000000000..76e48d0a6 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/wasm/baseline/sw64/liftoff-assembler-sw64.h +@@ -0,0 +1,2951 @@ ++// Copyright 2017 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_WASM_BASELINE_SW64_LIFTOFF_ASSEMBLER_SW64_H_ ++#define V8_WASM_BASELINE_SW64_LIFTOFF_ASSEMBLER_SW64_H_ ++ ++#include "src/wasm/baseline/liftoff-assembler.h" ++ ++namespace v8 { ++namespace internal { ++namespace wasm { ++ ++namespace liftoff { ++ ++// Liftoff Frames. ++// ++// slot Frame ++// +--------------------+--------------------------- ++// n+4 | optional padding slot to keep the stack 16 byte aligned. ++// n+3 | parameter n | ++// ... | ... | ++// 4 | parameter 1 | or parameter 2 ++// 3 | parameter 0 | or parameter 1 ++// 2 | (result address) | or parameter 0 ++// -----+--------------------+--------------------------- ++// 1 | return addr (ra) | ++// 0 | previous frame (fp)| ++// -----+--------------------+ <-- frame ptr (fp) ++// -1 | 0xa: WASM | ++// -2 | instance | ++// -----+--------------------+--------------------------- ++// -3 | slot 0 | ^ ++// -4 | slot 1 | | ++// | | Frame slots ++// | | | ++// | | v ++// | optional padding slot to keep the stack 16 byte aligned. ++// -----+--------------------+ <-- stack ptr (sp) ++// ++ ++// fp-8 holds the stack marker, fp-16 is the instance parameter. ++constexpr int kInstanceOffset = 16; ++ ++inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); } ++ ++inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } ++ ++inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr, ++ Register offset, uint32_t offset_imm) { ++ if (is_uint31(offset_imm)) { ++ if (offset == no_reg) return MemOperand(addr, offset_imm); ++ assm->addl(addr, offset, kScratchReg); ++ return MemOperand(kScratchReg, offset_imm); ++ } ++ // Offset immediate does not fit in 31 bits. ++ assm->li(kScratchReg, offset_imm); ++ assm->addl(kScratchReg, addr, kScratchReg); ++ if (offset != no_reg) { ++ assm->addl(kScratchReg, offset, kScratchReg); ++ } ++ return MemOperand(kScratchReg, 0); ++} ++ ++inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, ++ ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kI32: ++ assm->Ldw(dst.gp(), src); ++ break; ++ case ValueType::kI64: ++ case ValueType::kRef: ++ case ValueType::kOptRef: ++ assm->Ldl(dst.gp(), src); ++ break; ++ case ValueType::kF32: ++ assm->Flds(dst.fp(), src); ++ break; ++ case ValueType::kF64: ++ assm->Fldd(dst.fp(), src); ++ break; ++// case ValueType::kS128: ++// assm->ld_b(dst.fp().toW(), src); ++// break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, ++ LiftoffRegister src, ValueType type) { ++ MemOperand dst(base, offset); ++ switch (type.kind()) { ++ case ValueType::kI32: ++ assm->Ustw(src.gp(), dst); ++ break; ++ case ValueType::kI64: ++ assm->Ustl(src.gp(), dst); ++ break; ++ case ValueType::kF32: ++ assm->Ufsts(src.fp(), dst, t11); ++ break; ++ case ValueType::kF64: ++ assm->Ufstd(src.fp(), dst, t11); ++ break; ++// case ValueType::kS128: ++// assm->st_b(src.fp().toW(), dst); ++// break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kI32: ++ assm->subl(sp, kSystemPointerSize, sp); ++ assm->Stw(reg.gp(), MemOperand(sp, 0)); ++ break; ++ case ValueType::kI64: ++ assm->push(reg.gp()); ++ break; ++ case ValueType::kF32: ++ assm->subl(sp, kSystemPointerSize, sp); ++ assm->Fsts(reg.fp(), MemOperand(sp, 0)); ++ break; ++ case ValueType::kF64: ++ assm->subl(sp, kSystemPointerSize, sp); ++ assm->Fstd(reg.fp(), MemOperand(sp, 0)); ++ break; ++// case ValueType::kS128: ++// assm->daddiu(sp, sp, -kSystemPointerSize * 2); ++// assm->st_b(reg.fp().toW(), MemOperand(sp, 0)); ++// break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++} // namespace liftoff ++ ++int LiftoffAssembler::PrepareStackFrame() { ++ int offset = pc_offset(); ++ // When constant that represents size of stack frame can't be represented ++ // as 16bit we need three instructions to add it to sp, so we reserve space ++ // for this case. ++ addl(sp, 0, sp); ++ nop(); ++ nop(); ++ return offset; ++} ++ ++void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params, ++ int stack_param_delta) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ ++ // Push the return address and frame pointer to complete the stack frame. ++ Ldl(scratch, MemOperand(fp, 8)); ++ Push(scratch); ++ Ldl(scratch, MemOperand(fp, 0)); ++ Push(scratch); ++ ++ // Shift the whole frame upwards. ++ int slot_count = num_callee_stack_params + 2; ++ for (int i = slot_count - 1; i >= 0; --i) { ++ Ldl(scratch, MemOperand(sp, i * 8)); ++ Stl(scratch, MemOperand(fp, (i - stack_param_delta) * 8)); ++ } ++ ++ // Set the new stack and frame pointer. ++ subl(fp, stack_param_delta * 8, sp); ++ Pop(ra, fp); ++} ++ ++void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) { ++ // We can't run out of space, just pass anything big enough to not cause the ++ // assembler to try to grow the buffer. ++ constexpr int kAvailableSpace = 256; ++ TurboAssembler patching_assembler( ++ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ++ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ++ // If bytes can be represented as 16bit, addl will be generated and two ++ // nops will stay untouched. Otherwise, lui-ori sequence will load it to ++ // register and, as third instruction, addl will be generated. ++ patching_assembler.Addl(sp, sp, Operand(-frame_size)); ++} ++ ++void LiftoffAssembler::FinishCode() {} ++ ++void LiftoffAssembler::AbortCompilation() {} ++ ++// static ++constexpr int LiftoffAssembler::StaticStackFrameSize() { ++ return liftoff::kInstanceOffset; ++} ++ ++int LiftoffAssembler::SlotSizeForType(ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kS128: ++ return type.element_size_bytes(); ++ default: ++ return kStackSlotSize; ++ } ++} ++ ++bool LiftoffAssembler::NeedsAlignment(ValueType type) { ++ return type.kind() == ValueType::kS128 || type.is_reference_type(); ++} ++ ++void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, ++ RelocInfo::Mode rmode) { ++ switch (value.type().kind()) { ++ case ValueType::kI32: ++ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); ++ break; ++ case ValueType::kI64: ++ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); ++ break; ++ case ValueType::kF32: ++ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); ++ break; ++ case ValueType::kF64: ++ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, ++ int size) { ++ DCHECK_LE(offset, kMaxInt); ++ Ldl(dst, liftoff::GetInstanceOperand()); ++ DCHECK(size == 4 || size == 8); ++ if (size == 4) { ++ Ldw(dst, MemOperand(dst, offset)); ++ } else { ++ Ldl(dst, MemOperand(dst, offset)); ++ } ++} ++ ++void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, ++ uint32_t offset) { ++ LoadFromInstance(dst, offset, kTaggedSize); ++} ++ ++void LiftoffAssembler::SpillInstance(Register instance) { ++ Stl(instance, liftoff::GetInstanceOperand()); ++} ++ ++void LiftoffAssembler::FillInstanceInto(Register dst) { ++ Ldl(dst, liftoff::GetInstanceOperand()); ++} ++ ++void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, ++ Register offset_reg, ++ int32_t offset_imm, ++ LiftoffRegList pinned) { ++ DCHECK_GE(offset_imm, 0); ++ STATIC_ASSERT(kTaggedSize == kInt64Size); ++ Load(LiftoffRegister(dst), src_addr, offset_reg, ++ static_cast(offset_imm), LoadType::kI64Load, pinned); ++} ++ ++void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, ++ int32_t offset_imm, ++ LiftoffRegister src, ++ LiftoffRegList pinned) { ++ bailout(kRefTypes, "GlobalSet"); ++} ++ ++void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ++ Register offset_reg, uint32_t offset_imm, ++ LoadType type, LiftoffRegList pinned, ++ uint32_t* protected_load_pc, bool is_load_mem) { ++ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm); ++ ++ if (protected_load_pc) *protected_load_pc = pc_offset(); ++ switch (type.value()) { ++ case LoadType::kI32Load8U: ++ case LoadType::kI64Load8U: ++ Ldbu(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load8S: ++ case LoadType::kI64Load8S: ++ Ldb(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load16U: ++ case LoadType::kI64Load16U: ++ TurboAssembler::Uldhu(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load16S: ++ case LoadType::kI64Load16S: ++ TurboAssembler::Uldh(dst.gp(), src_op); ++ break; ++ case LoadType::kI64Load32U: ++ TurboAssembler::Uldwu(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load: ++ case LoadType::kI64Load32S: ++ TurboAssembler::Uldw(dst.gp(), src_op); ++ break; ++ case LoadType::kI64Load: ++ TurboAssembler::Uldl(dst.gp(), src_op); ++ break; ++ case LoadType::kF32Load: ++ TurboAssembler::Uflds(dst.fp(), src_op, t11); ++ break; ++ case LoadType::kF64Load: ++ TurboAssembler::Ufldd(dst.fp(), src_op, t11); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ ++#if defined(V8_TARGET_BIG_ENDIAN) ++ if (is_load_mem) { ++ pinned.set(src_op.rm()); ++ liftoff::ChangeEndiannessLoad(this, dst, type, pinned); ++ } ++#endif ++} ++ ++void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister src, ++ StoreType type, LiftoffRegList pinned, ++ uint32_t* protected_store_pc, bool is_store_mem) { ++ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm); ++ ++#if defined(V8_TARGET_BIG_ENDIAN) ++ if (is_store_mem) { ++ pinned.set(dst_op.rm()); ++ LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned); ++ // Save original value. ++ Move(tmp, src, type.value_type()); ++ ++ src = tmp; ++ pinned.set(tmp); ++ liftoff::ChangeEndiannessStore(this, src, type, pinned); ++ } ++#endif ++ ++ if (protected_store_pc) *protected_store_pc = pc_offset(); ++ switch (type.value()) { ++ case StoreType::kI32Store8: ++ case StoreType::kI64Store8: ++ Stb(src.gp(), dst_op); ++ break; ++ case StoreType::kI32Store16: ++ case StoreType::kI64Store16: ++ TurboAssembler::Usth(src.gp(), dst_op, t11); ++ break; ++ case StoreType::kI32Store: ++ case StoreType::kI64Store32: ++ TurboAssembler::Ustw(src.gp(), dst_op); ++ break; ++ case StoreType::kI64Store: ++ TurboAssembler::Ustl(src.gp(), dst_op); ++ break; ++ case StoreType::kF32Store: ++ TurboAssembler::Ufsts(src.fp(), dst_op, t11); ++ break; ++ case StoreType::kF64Store: ++ TurboAssembler::Ufstd(src.fp(), dst_op, t11); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, ++ Register offset_reg, uint32_t offset_imm, ++ LoadType type, LiftoffRegList pinned) { ++ bailout(kAtomics, "AtomicLoad"); ++} ++ ++void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister src, ++ StoreType type, LiftoffRegList pinned) { ++ bailout(kAtomics, "AtomicStore"); ++} ++ ++void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicAdd"); ++} ++ ++void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicSub"); ++} ++ ++void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicAnd"); ++} ++ ++void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicOr"); ++} ++ ++void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicXor"); ++} ++ ++void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, ++ LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicExchange"); ++} ++ ++void LiftoffAssembler::AtomicCompareExchange( ++ Register dst_addr, Register offset_reg, uint32_t offset_imm, ++ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result, ++ StoreType type) { ++ bailout(kAtomics, "AtomicCompareExchange"); ++} ++ ++//SKTODO ++void LiftoffAssembler::AtomicFence() {memb();} ++ ++void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ++ uint32_t caller_slot_idx, ++ ValueType type) { ++ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1)); ++ liftoff::Load(this, dst, src, type); ++} ++ ++void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, ++ uint32_t caller_slot_idx, ++ ValueType type) { ++ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1); ++ liftoff::Store(this, fp, offset, src, type); ++} ++ ++void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset, ++ ValueType type) { ++ liftoff::Load(this, dst, MemOperand(sp, offset), type); ++} ++ ++void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ++ ValueType type) { ++ DCHECK_NE(dst_offset, src_offset); ++ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {}); ++ Fill(reg, src_offset, type); ++ Spill(dst_offset, reg, type); ++} ++ ++void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { ++ DCHECK_NE(dst, src); ++ // TODO(ksreten): Handle different sizes here. ++ TurboAssembler::Move(dst, src); ++} ++ ++void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ++ ValueType type) { ++ DCHECK_NE(dst, src); ++ TurboAssembler::Move(dst, src); ++} ++ ++void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { ++ RecordUsedSpillOffset(offset); ++ MemOperand dst = liftoff::GetStackSlot(offset); ++ switch (type.kind()) { ++ case ValueType::kI32: ++ Stw(reg.gp(), dst); ++ break; ++ case ValueType::kI64: ++ case ValueType::kRef: ++ case ValueType::kOptRef: ++ Stl(reg.gp(), dst); ++ break; ++ case ValueType::kF32: ++ Fsts(reg.fp(), dst); ++ break; ++ case ValueType::kF64: ++ TurboAssembler::Fstd(reg.fp(), dst); ++ break; ++// case ValueType::kS128: ++// TurboAssembler::st_b(reg.fp().toW(), dst); ++// break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::Spill(int offset, WasmValue value) { ++ RecordUsedSpillOffset(offset); ++ MemOperand dst = liftoff::GetStackSlot(offset); ++ switch (value.type().kind()) { ++ case ValueType::kI32: { ++ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); ++ TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); ++ Stw(tmp.gp(), dst); ++ break; ++ } ++ case ValueType::kI64: ++ case ValueType::kRef: ++ case ValueType::kOptRef: { ++ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); ++ TurboAssembler::li(tmp.gp(), value.to_i64()); ++ Stl(tmp.gp(), dst); ++ break; ++ } ++ default: ++ // kWasmF32 and kWasmF64 are unreachable, since those ++ // constants are not tracked. ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { ++ MemOperand src = liftoff::GetStackSlot(offset); ++ switch (type.kind()) { ++ case ValueType::kI32: ++ Ldw(reg.gp(), src); ++ break; ++ case ValueType::kI64: ++ case ValueType::kRef: ++ case ValueType::kOptRef: ++ Ldl(reg.gp(), src); ++ break; ++ case ValueType::kF32: ++ Flds(reg.fp(), src); ++ break; ++ case ValueType::kF64: ++ TurboAssembler::Fldd(reg.fp(), src); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) { ++ UNREACHABLE(); ++} ++ ++void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ++ DCHECK_LT(0, size); ++ RecordUsedSpillOffset(start + size); ++ ++ if (size <= 12 * kStackSlotSize) { ++ // Special straight-line code for up to 12 slots. Generates one ++ // instruction per slot (<= 12 instructions total). ++ uint32_t remainder = size; ++ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) { ++ Stl(zero_reg, liftoff::GetStackSlot(start + remainder)); ++ } ++ DCHECK(remainder == 4 || remainder == 0); ++ if (remainder) { ++ Stw(zero_reg, liftoff::GetStackSlot(start + remainder)); ++ } ++ } else { ++ // General case for bigger counts (12 instructions). ++ // Use a0 for start address (inclusive), a1 for end address (exclusive). ++ Push(a1, a0); ++ Addl(a0, fp, Operand(-start - size)); ++ Addl(a1, fp, Operand(-start)); ++ ++ Label loop; ++ bind(&loop); ++ Stl(zero_reg, MemOperand(a0)); ++ addl(a0, kSystemPointerSize, a0); ++ BranchShort(&loop, ne, a0, Operand(a1)); ++ ++ Pop(a1, a0); ++ } ++} ++ ++void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { ++ TurboAssembler::Dclz(dst.gp(), src.gp()); ++} ++ ++void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { ++ TurboAssembler::Dctz(dst.gp(), src.gp()); ++} ++ ++bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, ++ LiftoffRegister src) { ++ TurboAssembler::Dpopcnt(dst.gp(), src.gp()); ++ return true; ++} ++ ++void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { ++ TurboAssembler::Mulw(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero, ++ Label* trap_div_unrepresentable) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ ++ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. ++ TurboAssembler::li(kScratchReg, 1); ++ TurboAssembler::li(kScratchReg2, 1); ++ TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); ++ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); ++ addl(kScratchReg, kScratchReg2,kScratchReg); ++ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, ++ Operand(zero_reg)); ++ ++// addw(lhs, 0, lhs); ++// addw(rhs, 0, rhs); ++ ifmovd(lhs, kScratchDoubleReg1); ++ ifmovd(rhs, kScratchDoubleReg2); ++ fcvtld(kScratchDoubleReg1, kScratchDoubleReg1); ++ fcvtld(kScratchDoubleReg2, kScratchDoubleReg2); ++ fdivd(kScratchDoubleReg1, kScratchDoubleReg2, kScratchDoubleReg1); ++ fcvtdl_z(kScratchDoubleReg1, kScratchDoubleReg2); ++ fimovd(kScratchDoubleReg2, dst); ++ //TurboAssembler::Div(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ zapnot(lhs, 0xf, lhs); ++ zapnot(rhs, 0xf, rhs); ++ ifmovd(lhs, kScratchDoubleReg1); ++ ifmovd(rhs, kScratchDoubleReg2); ++ fcvtld(kScratchDoubleReg1, kScratchDoubleReg1); ++ fcvtld(kScratchDoubleReg2, kScratchDoubleReg2); ++ fdivd(kScratchDoubleReg1, kScratchDoubleReg2, kScratchDoubleReg1); ++ fcvtdl_z(kScratchDoubleReg1, kScratchDoubleReg2); ++ fimovd(kScratchDoubleReg2,dst); ++ //TurboAssembler::Divu(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++// addw(lhs, 0, lhs); ++// addw(rhs, 0, rhs); ++ ifmovd(lhs, kScratchDoubleReg1); ++ ifmovd(rhs, kScratchDoubleReg2); ++ fcvtld(kScratchDoubleReg1, kScratchDoubleReg1); ++ fcvtld(kScratchDoubleReg2, kScratchDoubleReg2); ++ fdivd(kScratchDoubleReg1, kScratchDoubleReg2, kScratchDoubleReg1); ++ fcvtdl_z(kScratchDoubleReg1, kScratchDoubleReg2); ++ fimovd(kScratchDoubleReg2, kScratchReg); ++ mulw(kScratchReg, rhs, kScratchReg); ++ subw(lhs, kScratchReg, dst); ++ //TurboAssembler::Mod(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ zapnot(lhs, 0xf, lhs); ++ zapnot(rhs, 0xf, rhs); ++ ifmovd(lhs, kScratchDoubleReg1); ++ ifmovd(rhs, kScratchDoubleReg2); ++ fcvtld(kScratchDoubleReg1, kScratchDoubleReg1); ++ fcvtld(kScratchDoubleReg2, kScratchDoubleReg2); ++ fdivd(kScratchDoubleReg1, kScratchDoubleReg2, kScratchDoubleReg1); ++ fcvtdl_z(kScratchDoubleReg1, kScratchDoubleReg2); ++ fimovd(kScratchDoubleReg2, kScratchReg); ++ mulw(kScratchReg, rhs, kScratchReg); ++ subw(lhs, kScratchReg, dst); ++ //TurboAssembler::Modu(dst, lhs, rhs); ++} ++ ++#define I32_BINOP(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ ++ Register rhs) { \ ++ instruction(lhs, rhs, dst); \ ++ } ++ ++// clang-format off ++I32_BINOP(add, addw) ++I32_BINOP(sub, subw) ++I32_BINOP(and, and_ins) ++I32_BINOP(or, or_ins) ++I32_BINOP(xor, xor_ins) ++// clang-format on ++ ++#undef I32_BINOP ++ ++#define I32_BINOP_I(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \ ++ int32_t imm) { \ ++ instruction(dst, lhs, Operand(imm)); \ ++ } ++ ++// clang-format off ++I32_BINOP_I(add, Addw) ++I32_BINOP_I(and, And) ++I32_BINOP_I(or, Or) ++I32_BINOP_I(xor, Xor) ++// clang-format on ++ ++#undef I32_BINOP_I ++ ++void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { ++ TurboAssembler::Clz(dst, src); ++} ++ ++void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { ++ TurboAssembler::Ctz(dst, src); ++} ++ ++bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { ++ TurboAssembler::Popcnt(dst, src); ++ return true; ++} ++ ++#define I32_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ ++ Register amount) { \ ++ instruction(dst, src, amount); \ ++ } ++#define I32_SHIFTOP_I(name, instruction) \ ++ I32_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \ ++ int amount) { \ ++ instruction(dst, src, (amount & 31)); \ ++ } ++ ++I32_SHIFTOP_I(shl, Sllw) ++I32_SHIFTOP_I(sar, Sraw) ++I32_SHIFTOP_I(shr, Srlw) ++ ++#undef I32_SHIFTOP ++#undef I32_SHIFTOP_I ++ ++void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ TurboAssembler::Mull(dst.gp(), lhs.gp(), rhs.gp()); ++} ++ ++bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero, ++ Label* trap_div_unrepresentable) { ++ //TODO ++ return false; ++} ++ ++bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero) { ++ //TODO ++ return false; ++ //TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ //TurboAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp()); ++ //return true; ++} ++ ++bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero) { ++ //TODO ++ return false; ++ //TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ //TurboAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp()); ++ //return true; ++} ++ ++bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero) { ++ //TODO ++ return false; ++} ++ ++#define I64_BINOP(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name( \ ++ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \ ++ instruction(lhs.gp(), rhs.gp(), dst.gp()); \ ++ } ++ ++// clang-format off ++I64_BINOP(add, addl) ++I64_BINOP(sub, subl) ++I64_BINOP(and, and_ins) ++I64_BINOP(or, or_ins) ++I64_BINOP(xor, xor_ins) ++// clang-format on ++ ++#undef I64_BINOP ++ ++#define I64_BINOP_I(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name##i( \ ++ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \ ++ instruction(dst.gp(), lhs.gp(), Operand(imm)); \ ++ } ++ ++// clang-format off ++I64_BINOP_I(add, Addl) ++I64_BINOP_I(and, And) ++I64_BINOP_I(or, Or) ++I64_BINOP_I(xor, Xor) ++// clang-format on ++ ++#undef I64_BINOP_I ++ ++#define I64_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name( \ ++ LiftoffRegister dst, LiftoffRegister src, Register amount) { \ ++ instruction(src.gp(), amount, dst.gp()); \ ++ } ++#define I64_SHIFTOP_I(name, instruction) \ ++ I64_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \ ++ LiftoffRegister src, int amount) { \ ++ instruction(src.gp(), (amount & 63), dst.gp()); \ ++ } ++ ++I64_SHIFTOP_I(shl, slll) ++I64_SHIFTOP_I(sar, sral) ++I64_SHIFTOP_I(shr, srll) ++ ++#undef I64_SHIFTOP ++#undef I64_SHIFTOP_I ++ ++void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) { ++// Dext(dst, src, 0, 32); ++ zapnot(src, 0xf, dst); ++} ++ ++void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { ++ TurboAssembler::Fnegs(dst, src); ++} ++ ++void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { ++ TurboAssembler::Fnegd(dst, src); ++} ++ ++void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float32Min(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float32Max(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ bailout(kComplexOperation, "f32_copysign"); ++} ++ ++void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float64Min(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float64Max(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ bailout(kComplexOperation, "f64_copysign"); ++} ++ ++#define FP_BINOP(name, instruction) \ ++ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \ ++ DoubleRegister rhs) { \ ++ instruction(lhs, rhs, dst); \ ++ } ++#define FP_UNOP_X(name, instruction) \ ++ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ ++ instruction(src, dst); \ ++ } ++ ++ ++#define FP_UNOP(name, instruction) \ ++ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ ++ TurboAssembler::instruction(dst, src); \ ++ } ++ ++#define FP_UNOP_RETURN_TRUE(name, instruction) \ ++ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ ++ instruction(dst, src); \ ++ return true; \ ++ } ++ ++FP_BINOP(f32_add, fadds) ++FP_BINOP(f32_sub, fsubs) ++FP_BINOP(f32_mul, fmuls) ++FP_BINOP(f32_div, fdivs) ++FP_UNOP(f32_abs, Abs_sw) ++FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s) ++FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s) ++FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s) ++FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s) ++FP_UNOP_X(f32_sqrt, fsqrts) ++FP_BINOP(f64_add, faddd) ++FP_BINOP(f64_sub, fsubd) ++FP_BINOP(f64_mul, fmuld) ++FP_BINOP(f64_div, fdivd) ++FP_UNOP(f64_abs, Abs_sw) ++FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d_d) ++FP_UNOP_RETURN_TRUE(f64_floor, Floor_d_d) ++FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d_d) ++FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d_d) ++FP_UNOP_X(f64_sqrt, fsqrtd) ++ ++#undef FP_BINOP ++#undef FP_UNOP ++#undef FP_UNOP_RETURN_TRUE ++ ++bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, ++ LiftoffRegister dst, ++ LiftoffRegister src, Label* trap) { ++ switch (opcode) { ++ case kExprI32ConvertI64: ++ addw(src.gp(), zero_reg, dst.gp()); //TurboAssembler::Ext(dst.gp(), src.gp(), 0, 32); ++ return true; ++ case kExprI32SConvertF32: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); ++ ftruncsw(rounded.fp(), kScratchDoubleReg); ++ fimovs(kScratchDoubleReg, dst.gp()); ++ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, ++ // because INT32_MIN allows easier out-of-bounds detection. ++ TurboAssembler::Addw(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Cmplt(kScratchReg2, kScratchReg, dst.gp()); ++ TurboAssembler::Selne(dst.gp(), kScratchReg, kScratchReg2); ++ ++ // Checking if trap. ++ ifmovs(dst.gp(), kScratchDoubleReg); ++ fcvtws(kScratchDoubleReg, converted_back.fp()); ++ TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32UConvertF32: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); ++ TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); ++ // Avoid UINT32_MAX as an overflow indicator and use 0 instead, ++ // because 0 allows easier out-of-bounds detection. ++ TurboAssembler::Addw(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Seleq(dst.gp(), zero_reg, kScratchReg); ++ ++ // Checking if trap. ++ TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); ++ fcvtds_(converted_back.fp(), converted_back.fp()); ++ TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32SConvertF64: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); ++ ftruncdw(rounded.fp(), kScratchDoubleReg); ++ fimovs(kScratchDoubleReg, dst.gp()); ++ ++ // Checking if trap. ++ fcvtwd(kScratchDoubleReg, converted_back.fp()); ++ TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32UConvertF64: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); ++ TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); ++ ++ // Checking if trap. ++ TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); ++ TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32ReinterpretF32: ++ fimovs(src.fp(), dst.gp()); ++ return true; ++ case kExprI64SConvertI32: ++ addw(src.gp(), 0, dst.gp()); //ZHJ Sllw(dst.gp(), src.gp(), 0); ++ return true; ++ case kExprI64UConvertI32: ++ zapnot(src.gp(), 0xf, dst.gp()); //ZHJ TurboAssembler::Dext(dst.gp(), src.gp(), 0, 32); ++ return true; ++ case kExprI64SConvertF32: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); ++ ftruncsl(rounded.fp(), kScratchDoubleReg); ++ fimovd(kScratchDoubleReg, dst.gp()); ++ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, ++ // because INT64_MIN allows easier out-of-bounds detection. ++ TurboAssembler::Addl(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Cmplt(kScratchReg2, kScratchReg, dst.gp()); ++ TurboAssembler::Selne(dst.gp(), kScratchReg, kScratchReg2); ++ ++ // Checking if trap. ++ ifmovd(dst.gp(), kScratchDoubleReg); ++ fcvtls(kScratchDoubleReg, converted_back.fp()); ++ TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI64UConvertF32: { ++ // Real conversion. ++ TurboAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, ++ kScratchReg); ++ ++ // Checking if trap. ++ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); ++ return true; ++ } ++ case kExprI64SConvertF64: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); ++ ftruncdl(rounded.fp(), kScratchDoubleReg); ++ fimovd(kScratchDoubleReg, dst.gp()); ++ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, ++ // because INT64_MIN allows easier out-of-bounds detection. ++ TurboAssembler::Addl(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Cmplt(kScratchReg2, kScratchReg, dst.gp()); ++ TurboAssembler::Selne(dst.gp(), kScratchReg, kScratchReg2); ++ ++ // Checking if trap. ++ ifmovd(dst.gp(), kScratchDoubleReg); ++ fcvtld(kScratchDoubleReg, converted_back.fp()); ++ TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI64UConvertF64: { ++ // Real conversion. ++ TurboAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, ++ kScratchReg); ++ ++ // Checking if trap. ++ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); ++ return true; ++ } ++ case kExprI64ReinterpretF64: ++ fimovd(src.fp(), dst.gp()); ++ return true; ++ case kExprF32SConvertI32: { ++ LiftoffRegister scratch = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst)); ++ ifmovs(src.gp(), scratch.fp()); ++ fcvtws(scratch.fp(), dst.fp()); ++ return true; ++ } ++ case kExprF32UConvertI32: ++ TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); ++ return true; ++ case kExprF32ConvertF64: ++ fcvtds(src.fp(), dst.fp()); ++ return true; ++ case kExprF32ReinterpretI32: ++ ifmovs(src.gp(), dst.fp()); ++ return true; ++ case kExprF64SConvertI32: { ++ LiftoffRegister scratch = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst)); ++ ifmovs(src.gp(), scratch.fp()); ++ fcvtwd(scratch.fp(), dst.fp()); ++ return true; ++ } ++ case kExprF64UConvertI32: ++ TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); ++ return true; ++ case kExprF64ConvertF32: ++ fcvtsd(src.fp(), dst.fp()); ++ return true; ++ case kExprF64ReinterpretI64: ++ ifmovd(src.gp(), dst.fp()); ++ return true; ++ case kExprI32SConvertSatF32: ++ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32"); ++ return true; ++ case kExprI32UConvertSatF32: ++ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32"); ++ return true; ++ case kExprI32SConvertSatF64: ++ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64"); ++ return true; ++ case kExprI32UConvertSatF64: ++ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64"); ++ return true; ++ case kExprI64SConvertSatF32: ++ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32"); ++ return true; ++ case kExprI64UConvertSatF32: ++ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32"); ++ return true; ++ case kExprI64SConvertSatF64: ++ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64"); ++ return true; ++ case kExprI64UConvertSatF64: ++ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64"); ++ return true; ++ default: ++ return false; ++ } ++} ++ ++void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) { ++ bailout(kComplexOperation, "i32_signextend_i8"); ++} ++ ++void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) { ++ bailout(kComplexOperation, "i32_signextend_i16"); ++} ++ ++void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kComplexOperation, "i64_signextend_i8"); ++} ++ ++void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kComplexOperation, "i64_signextend_i16"); ++} ++ ++void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kComplexOperation, "i64_signextend_i32"); ++} ++ ++void LiftoffAssembler::emit_jump(Label* label) { ++ TurboAssembler::Branch(label); ++} ++ ++void LiftoffAssembler::emit_jump(Register target) { ++ TurboAssembler::Jump(target); ++} ++ ++void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, ++ ValueType type, Register lhs, ++ Register rhs) { ++ if (rhs != no_reg) { ++ TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); ++ } else { ++ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); ++} ++} ++ ++void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { ++ cmpeq(src, 0, dst); ++} ++ ++void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, ++ Register lhs, Register rhs) { ++ Register tmp = dst; ++ if (dst == lhs || dst == rhs) { ++ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); ++ } ++ // Write 1 as result. ++ TurboAssembler::li(tmp, 1); ++ ++ // If negative condition is true, write 0 as result. ++ Condition neg_cond = NegateCondition(cond); ++ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); ++ ++ // If tmp != dst, result will be moved. ++ TurboAssembler::Move(dst, tmp); ++} ++ ++void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { ++ cmpeq(src.gp(), 0, dst); ++} ++ ++void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ Register tmp = dst; ++ if (dst == lhs.gp() || dst == rhs.gp()) { ++ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); ++ } ++ // Write 1 as result. ++ TurboAssembler::li(tmp, 1); ++ ++ // If negative condition is true, write 0 as result. ++ Condition neg_cond = NegateCondition(cond); ++ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), ++ neg_cond); ++ ++ // If tmp != dst, result will be moved. ++ TurboAssembler::Move(dst, tmp); ++} ++ ++namespace liftoff { ++ ++inline FPUCondition ConditionToConditionCmpFPU(Condition condition, ++ bool* predicate) { ++ switch (condition) { ++ case kEqual: ++ *predicate = true; ++ return EQ; ++ case kUnequal: ++ *predicate = false; ++ return EQ; ++ case kUnsignedLessThan: ++ *predicate = true; ++ return OLT; ++ case kUnsignedGreaterEqual: ++ *predicate = false; ++ return OLT; ++ case kUnsignedLessEqual: ++ *predicate = true; ++ return OLE; ++ case kUnsignedGreaterThan: ++ *predicate = false; ++ return OLE; ++ default: ++ *predicate = true; ++ break; ++ } ++ UNREACHABLE(); ++} ++//SKTODO ++#if 0 ++inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst, ++ LiftoffRegister src) { ++ Label all_false; ++ assm->BranchMSA(&all_false, MSA_BRANCH_V, all_zero, src.fp().toW(), ++ USE_DELAY_SLOT); ++ assm->li(dst.gp(), 0l); ++ assm->li(dst.gp(), 1); ++ assm->bind(&all_false); ++} ++ ++inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst, ++ LiftoffRegister src, MSABranchDF msa_branch_df) { ++ Label all_true; ++ assm->BranchMSA(&all_true, msa_branch_df, all_not_zero, src.fp().toW(), ++ USE_DELAY_SLOT); ++ assm->li(dst.gp(), 1); ++ assm->li(dst.gp(), 0l); ++ assm->bind(&all_true); ++} ++#endif ++} // namespace liftoff ++ ++void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, ++ DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label not_nan, cont; ++ TurboAssembler::CompareIsNanF32(lhs, rhs); ++ TurboAssembler::BranchFalseF(¬_nan); ++ // If one of the operands is NaN, return 1 for f32.ne, else 0. ++ if (cond == ne) { ++ TurboAssembler::li(dst, 1); ++ } else { ++ TurboAssembler::Move(dst, zero_reg); ++ } ++ TurboAssembler::Branch(&cont); ++ ++ bind(¬_nan); ++ ++ TurboAssembler::li(dst, 1); ++ bool predicate; ++ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); ++ TurboAssembler::CompareF32(fcond, lhs, rhs); ++ if (predicate) { ++ TurboAssembler::LoadZeroIfNotFPUCondition(dst); ++ } else { ++ TurboAssembler::LoadZeroIfFPUCondition(dst); ++ } ++ ++ bind(&cont); ++} ++ ++void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, ++ DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label not_nan, cont; ++ TurboAssembler::CompareIsNanF64(lhs, rhs); ++ TurboAssembler::BranchFalseF(¬_nan); ++ // If one of the operands is NaN, return 1 for f64.ne, else 0. ++ if (cond == ne) { ++ TurboAssembler::li(dst, 1); ++ } else { ++ TurboAssembler::Move(dst, zero_reg); ++ } ++ TurboAssembler::Branch(&cont); ++ ++ bind(¬_nan); ++ ++ TurboAssembler::li(dst, 1); ++ bool predicate; ++ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); ++ TurboAssembler::CompareF64(fcond, lhs, rhs); ++ if (predicate) { ++ TurboAssembler::LoadZeroIfNotFPUCondition(dst); ++ } else { ++ TurboAssembler::LoadZeroIfFPUCondition(dst); ++ } ++ ++ bind(&cont); ++} ++ ++bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, ++ LiftoffRegister true_value, ++ LiftoffRegister false_value, ++ ValueType type) { ++ return false; ++} ++ ++//SKTODO ++void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr, ++ Register offset_reg, uint32_t offset_imm, ++ LoadType type, ++ LoadTransformationKind transform, ++ uint32_t* protected_load_pc) { ++ UNREACHABLE(); ++#if 0 ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Daddu(scratch, src_addr, offset_reg); ++ MemOperand src_op = MemOperand(scratch, offset_imm); ++ MSARegister dst_msa = dst.fp().toW(); ++ *protected_load_pc = pc_offset(); ++ MachineType memtype = type.mem_type(); ++ ++ if (transform == LoadTransformationKind::kExtend) { ++ Ld(scratch, src_op); ++ if (memtype == MachineType::Int8()) { ++ fill_d(dst_msa, scratch); ++ clti_s_b(kSimd128ScratchReg, dst_msa, 0); ++ ilvr_b(dst_msa, kSimd128ScratchReg, dst_msa); ++ } else if (memtype == MachineType::Uint8()) { ++ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ fill_d(dst_msa, scratch); ++ ilvr_b(dst_msa, kSimd128RegZero, dst_msa); ++ } else if (memtype == MachineType::Int16()) { ++ fill_d(dst_msa, scratch); ++ clti_s_h(kSimd128ScratchReg, dst_msa, 0); ++ ilvr_h(dst_msa, kSimd128ScratchReg, dst_msa); ++ } else if (memtype == MachineType::Uint16()) { ++ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ fill_d(dst_msa, scratch); ++ ilvr_h(dst_msa, kSimd128RegZero, dst_msa); ++ } else if (memtype == MachineType::Int32()) { ++ fill_d(dst_msa, scratch); ++ clti_s_w(kSimd128ScratchReg, dst_msa, 0); ++ ilvr_w(dst_msa, kSimd128ScratchReg, dst_msa); ++ } else if (memtype == MachineType::Uint32()) { ++ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ fill_d(dst_msa, scratch); ++ ilvr_w(dst_msa, kSimd128RegZero, dst_msa); ++ } ++ } else { ++ DCHECK_EQ(LoadTransformationKind::kSplat, transform); ++ if (memtype == MachineType::Int8()) { ++ Lb(scratch, src_op); ++ fill_b(dst_msa, scratch); ++ } else if (memtype == MachineType::Int16()) { ++ Lh(scratch, src_op); ++ fill_h(dst_msa, scratch); ++ } else if (memtype == MachineType::Int32()) { ++ Lw(scratch, src_op); ++ fill_w(dst_msa, scratch); ++ } else if (memtype == MachineType::Int64()) { ++ Ld(scratch, src_op); ++ fill_d(dst_msa, scratch); ++ } ++ } ++#endif ++} ++ ++void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ const uint8_t shuffle[16], ++ bool is_swizzle) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister dst_msa = dst.fp().toW(); ++ MSARegister lhs_msa = lhs.fp().toW(); ++ MSARegister rhs_msa = rhs.fp().toW(); ++ ++ uint64_t control_hi = 0; ++ uint64_t control_low = 0; ++ for (int i = 7; i >= 0; i--) { ++ control_hi <<= 8; ++ control_hi |= shuffle[i + 8]; ++ control_low <<= 8; ++ control_low |= shuffle[i]; ++ } ++ ++ if (dst_msa == lhs_msa) { ++ move_v(kSimd128ScratchReg, lhs_msa); ++ lhs_msa = kSimd128ScratchReg; ++ } else if (dst_msa == rhs_msa) { ++ move_v(kSimd128ScratchReg, rhs_msa); ++ rhs_msa = kSimd128ScratchReg; ++ } ++ ++ li(kScratchReg, control_low); ++ insert_d(dst_msa, 0, kScratchReg); ++ li(kScratchReg, control_hi); ++ insert_d(dst_msa, 1, kScratchReg); ++ vshf_b(dst_msa, rhs_msa, lhs_msa); ++#endif ++} ++ ++void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister dst_msa = dst.fp().toW(); ++ MSARegister lhs_msa = lhs.fp().toW(); ++ MSARegister rhs_msa = rhs.fp().toW(); ++ ++ if (dst == lhs) { ++ move_v(kSimd128ScratchReg, lhs_msa); ++ lhs_msa = kSimd128ScratchReg; ++ } ++ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ move_v(dst_msa, rhs_msa); ++ vshf_b(dst_msa, kSimd128RegZero, lhs_msa); ++#endif ++} ++ ++void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// fill_b(dst.fp().toW(), src.gp()); ++} ++ ++void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// fill_h(dst.fp().toW(), src.gp()); ++} ++ ++void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// fill_w(dst.fp().toW(), src.gp()); ++} ++ ++void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// fill_d(dst.fp().toW(), src.gp()); ++} ++ ++void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// TurboAssembler::FmoveLow(kScratchReg, src.fp()); ++// fill_w(dst.fp().toW(), kScratchReg); ++} ++ ++void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// TurboAssembler::Move(kScratchReg, src.fp()); ++// fill_d(dst.fp().toW(), kScratchReg); ++ } ++ ++void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// ceq_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// ceq_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++// nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// clt_s_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// clt_u_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// cle_s_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// cle_u_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// ceq_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// ceq_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++// nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// clt_s_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// clt_u_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// cle_s_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// cle_u_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// ceq_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// ceq_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++// nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// clt_s_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// clt_u_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// cle_s_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// cle_u_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fceq_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fcune_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fclt_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fcle_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fcune_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fclt_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fcle_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, ++ const uint8_t imms[16]) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister dst_msa = dst.fp().toW(); ++ uint64_t vals[2]; ++ memcpy(vals, imms, sizeof(vals)); ++ li(kScratchReg, vals[0]); ++ insert_d(dst_msa, 0, kScratchReg); ++ li(kScratchReg, vals[1]); ++ insert_d(dst_msa, 1, kScratchReg); ++#endif ++} ++ ++void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { ++ UNREACHABLE(); ++// nor_v(dst.fp().toW(), src.fp().toW(), src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// and_v(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// or_v(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// xor_v(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// nor_v(kSimd128ScratchReg, rhs.fp().toW(), rhs.fp().toW()); ++// and_v(dst.fp().toW(), kSimd128ScratchReg, lhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_s128_select(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ LiftoffRegister mask) { ++ UNREACHABLE(); ++#if 0 ++ if (dst == mask) { ++ bsel_v(dst.fp().toW(), src2.fp().toW(), src1.fp().toW()); ++ } else { ++ xor_v(kSimd128ScratchReg, src1.fp().toW(), src2.fp().toW()); ++ and_v(kSimd128ScratchReg, kSimd128ScratchReg, mask.fp().toW()); ++ xor_v(dst.fp().toW(), kSimd128ScratchReg, src2.fp().toW()); ++ } ++#endif ++} ++ ++void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// subv_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// liftoff::EmitAnyTrue(this, dst, src); ++} ++ ++void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_B); ++} ++ ++void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister scratch0 = kSimd128RegZero; ++ MSARegister scratch1 = kSimd128ScratchReg; ++ srli_b(scratch0, src.fp().toW(), 7); ++ srli_h(scratch1, scratch0, 7); ++ or_v(scratch0, scratch0, scratch1); ++ srli_w(scratch1, scratch0, 14); ++ or_v(scratch0, scratch0, scratch1); ++ srli_d(scratch1, scratch0, 28); ++ or_v(scratch0, scratch0, scratch1); ++ shf_w(scratch1, scratch0, 0x0E); ++ ilvev_b(scratch0, scratch1, scratch0); ++ copy_u_h(dst.gp(), scratch0, 0); ++#endif ++} ++ ++void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_b(kSimd128ScratchReg, rhs.gp()); ++// sll_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs) { ++ UNREACHABLE(); ++// slli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7); ++} ++ ++void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_b(kSimd128ScratchReg, rhs.gp()); ++// sra_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst, ++ LiftoffRegister lhs, int32_t rhs) { ++ UNREACHABLE(); ++// srai_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7); ++} ++ ++void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_b(kSimd128ScratchReg, rhs.gp()); ++// srl_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst, ++ LiftoffRegister lhs, int32_t rhs) { ++ UNREACHABLE(); ++// srli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7); ++} ++ ++void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// adds_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// adds_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// subv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// subs_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// subs_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// mulv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// min_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// min_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// max_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// max_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// subv_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// liftoff::EmitAnyTrue(this, dst, src); ++} ++ ++void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_H); ++} ++ ++void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister scratch0 = kSimd128RegZero; ++ MSARegister scratch1 = kSimd128ScratchReg; ++ srli_h(scratch0, src.fp().toW(), 15); ++ srli_w(scratch1, scratch0, 15); ++ or_v(scratch0, scratch0, scratch1); ++ srli_d(scratch1, scratch0, 30); ++ or_v(scratch0, scratch0, scratch1); ++ shf_w(scratch1, scratch0, 0x0E); ++ slli_d(scratch1, scratch1, 4); ++ or_v(scratch0, scratch0, scratch1); ++ copy_u_b(dst.gp(), scratch0, 0); ++#endif ++} ++ ++void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_h(kSimd128ScratchReg, rhs.gp()); ++// sll_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs) { ++ UNREACHABLE(); ++// slli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15); ++} ++ ++void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_h(kSimd128ScratchReg, rhs.gp()); ++// sra_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst, ++ LiftoffRegister lhs, int32_t rhs) { ++ UNREACHABLE(); ++// srai_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15); ++} ++ ++void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_h(kSimd128ScratchReg, rhs.gp()); ++// srl_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst, ++ LiftoffRegister lhs, int32_t rhs) { ++ UNREACHABLE(); ++// srli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15); ++} ++ ++void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// adds_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// adds_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// subv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// subs_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// subs_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// mulv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// min_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// min_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// max_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// max_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// subv_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// liftoff::EmitAnyTrue(this, dst, src); ++} ++ ++void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_W); ++} ++ ++void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister scratch0 = kSimd128RegZero; ++ MSARegister scratch1 = kSimd128ScratchReg; ++ srli_w(scratch0, src.fp().toW(), 31); ++ srli_d(scratch1, scratch0, 31); ++ or_v(scratch0, scratch0, scratch1); ++ shf_w(scratch1, scratch0, 0x0E); ++ slli_d(scratch1, scratch1, 2); ++ or_v(scratch0, scratch0, scratch1); ++ copy_u_b(dst.gp(), scratch0, 0); ++#endif ++} ++ ++void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_w(kSimd128ScratchReg, rhs.gp()); ++// sll_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs) { ++ UNREACHABLE(); ++// slli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31); ++} ++ ++void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_w(kSimd128ScratchReg, rhs.gp()); ++// sra_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst, ++ LiftoffRegister lhs, int32_t rhs) { ++ UNREACHABLE(); ++// srai_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31); ++} ++ ++void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_w(kSimd128ScratchReg, rhs.gp()); ++// srl_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst, ++ LiftoffRegister lhs, int32_t rhs) { ++ UNREACHABLE(); ++// srli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31); ++} ++ ++void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// addv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// subv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// mulv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// min_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// min_u_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// max_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// max_u_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// subv_d(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_d(kSimd128ScratchReg, rhs.gp()); ++// sll_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs) { ++ UNREACHABLE(); ++// slli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63); ++} ++ ++void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_d(kSimd128ScratchReg, rhs.gp()); ++// sra_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst, ++ LiftoffRegister lhs, int32_t rhs) { ++ UNREACHABLE(); ++// srai_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63); ++} ++ ++void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fill_d(kSimd128ScratchReg, rhs.gp()); ++// srl_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst, ++ LiftoffRegister lhs, int32_t rhs) { ++ UNREACHABLE(); ++// srli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63); ++} ++ ++void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// addv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// subv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// mulv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// bclri_w(dst.fp().toW(), src.fp().toW(), 31); ++} ++ ++void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// bnegi_w(dst.fp().toW(), src.fp().toW(), 31); ++} ++ ++void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fadd_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fsub_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fmul_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fdiv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister dst_msa = dst.fp().toW(); ++ MSARegister lhs_msa = lhs.fp().toW(); ++ MSARegister rhs_msa = rhs.fp().toW(); ++ MSARegister scratch0 = kSimd128RegZero; ++ MSARegister scratch1 = kSimd128ScratchReg; ++ // If inputs are -0.0. and +0.0, then write -0.0 to scratch1. ++ // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs). ++ fseq_w(scratch0, lhs_msa, rhs_msa); ++ bsel_v(scratch0, rhs_msa, lhs_msa); ++ or_v(scratch1, scratch0, rhs_msa); ++ // scratch0 = isNaN(scratch1) ? scratch1: lhs. ++ fseq_w(scratch0, scratch1, scratch1); ++ bsel_v(scratch0, scratch1, lhs_msa); ++ // dst = (scratch1 <= scratch0) ? scratch1 : scratch0. ++ fsle_w(dst_msa, scratch1, scratch0); ++ bsel_v(dst_msa, scratch0, scratch1); ++#endif ++} ++ ++void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister dst_msa = dst.fp().toW(); ++ MSARegister lhs_msa = lhs.fp().toW(); ++ MSARegister rhs_msa = rhs.fp().toW(); ++ MSARegister scratch0 = kSimd128RegZero; ++ MSARegister scratch1 = kSimd128ScratchReg; ++ // If inputs are -0.0. and +0.0, then write +0.0 to scratch1. ++ // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs). ++ fseq_w(scratch0, lhs_msa, rhs_msa); ++ bsel_v(scratch0, rhs_msa, lhs_msa); ++ and_v(scratch1, scratch0, rhs_msa); ++ // scratch0 = isNaN(scratch1) ? scratch1: lhs. ++ fseq_w(scratch0, scratch1, scratch1); ++ bsel_v(scratch0, scratch1, lhs_msa); ++ // dst = (scratch0 <= scratch1) ? scratch1 : scratch0. ++ fsle_w(dst_msa, scratch0, scratch1); ++ bsel_v(dst_msa, scratch0, scratch1); ++#endif ++} ++ ++void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++} ++ ++void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++} ++ ++void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// bclri_d(dst.fp().toW(), src.fp().toW(), 63); ++} ++ ++void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// bnegi_d(dst.fp().toW(), src.fp().toW(), 63); ++} ++ ++void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++} ++ ++void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fadd_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fsub_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fmul_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// fdiv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister dst_msa = dst.fp().toW(); ++ MSARegister lhs_msa = lhs.fp().toW(); ++ MSARegister rhs_msa = rhs.fp().toW(); ++ MSARegister scratch0 = kSimd128RegZero; ++ MSARegister scratch1 = kSimd128ScratchReg; ++ // If inputs are -0.0. and +0.0, then write -0.0 to scratch1. ++ // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs). ++ fseq_d(scratch0, lhs_msa, rhs_msa); ++ bsel_v(scratch0, rhs_msa, lhs_msa); ++ or_v(scratch1, scratch0, rhs_msa); ++ // scratch0 = isNaN(scratch1) ? scratch1: lhs. ++ fseq_d(scratch0, scratch1, scratch1); ++ bsel_v(scratch0, scratch1, lhs_msa); ++ // dst = (scratch1 <= scratch0) ? scratch1 : scratch0. ++ fsle_d(dst_msa, scratch1, scratch0); ++ bsel_v(dst_msa, scratch0, scratch1); ++#endif ++} ++ ++void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++#if 0 ++ MSARegister dst_msa = dst.fp().toW(); ++ MSARegister lhs_msa = lhs.fp().toW(); ++ MSARegister rhs_msa = rhs.fp().toW(); ++ MSARegister scratch0 = kSimd128RegZero; ++ MSARegister scratch1 = kSimd128ScratchReg; ++ // If inputs are -0.0. and +0.0, then write +0.0 to scratch1. ++ // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs). ++ fseq_d(scratch0, lhs_msa, rhs_msa); ++ bsel_v(scratch0, rhs_msa, lhs_msa); ++ and_v(scratch1, scratch0, rhs_msa); ++ // scratch0 = isNaN(scratch1) ? scratch1: lhs. ++ fseq_d(scratch0, scratch1, scratch1); ++ bsel_v(scratch0, scratch1, lhs_msa); ++ // dst = (scratch0 <= scratch1) ? scratch1 : scratch0. ++ fsle_d(dst_msa, scratch0, scratch1); ++ bsel_v(dst_msa, scratch0, scratch1); ++ // Canonicalize the result. ++ fmax_d(dst_msa, dst_msa, dst_msa); ++#endif ++} ++ ++void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++} ++ ++void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++} ++ ++void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// ftrunc_s_w(dst.fp().toW(), src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// ftrunc_u_w(dst.fp().toW(), src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// ffint_s_w(dst.fp().toW(), src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// ffint_u_w(dst.fp().toW(), src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// saturate_s_h(kSimd128ScratchReg, lhs.fp().toW(), 7); ++// saturate_s_h(dst.fp().toW(), lhs.fp().toW(), 7); ++// pckev_b(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++#if 0 ++ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ max_s_h(kSimd128ScratchReg, kSimd128RegZero, lhs.fp().toW()); ++ saturate_u_h(kSimd128ScratchReg, kSimd128ScratchReg, 7); ++ max_s_h(dst.fp().toW(), kSimd128RegZero, rhs.fp().toW()); ++ saturate_u_h(dst.fp().toW(), dst.fp().toW(), 7); ++ pckev_b(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg); ++#endif ++} ++ ++void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// saturate_s_w(kSimd128ScratchReg, lhs.fp().toW(), 15); ++// saturate_s_w(dst.fp().toW(), lhs.fp().toW(), 15); ++// pckev_h(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg); ++} ++ ++void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++#if 0 ++ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++ max_s_w(kSimd128ScratchReg, kSimd128RegZero, lhs.fp().toW()); ++ saturate_u_w(kSimd128ScratchReg, kSimd128ScratchReg, 15); ++ max_s_w(dst.fp().toW(), kSimd128RegZero, rhs.fp().toW()); ++ saturate_u_w(dst.fp().toW(), dst.fp().toW(), 15); ++ pckev_h(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg); ++#endif ++} ++ ++void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// ilvr_b(kSimd128ScratchReg, src.fp().toW(), src.fp().toW()); ++// slli_h(dst.fp().toW(), kSimd128ScratchReg, 8); ++// srai_h(dst.fp().toW(), dst.fp().toW(), 8); ++} ++ ++void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// ilvl_b(kSimd128ScratchReg, src.fp().toW(), src.fp().toW()); ++// slli_h(dst.fp().toW(), kSimd128ScratchReg, 8); ++// srai_h(dst.fp().toW(), dst.fp().toW(), 8); ++} ++ ++void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// ilvr_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// ilvl_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// ilvr_h(kSimd128ScratchReg, src.fp().toW(), src.fp().toW()); ++// slli_w(dst.fp().toW(), kSimd128ScratchReg, 16); ++// srai_w(dst.fp().toW(), dst.fp().toW(), 16); ++} ++ ++void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// ilvl_h(kSimd128ScratchReg, src.fp().toW(), src.fp().toW()); ++// slli_w(dst.fp().toW(), kSimd128ScratchReg, 16); ++// srai_w(dst.fp().toW(), dst.fp().toW(), 16); ++} ++ ++void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// ilvr_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// ilvl_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// aver_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ UNREACHABLE(); ++// aver_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); ++} ++ ++void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// asub_s_b(dst.fp().toW(), src.fp().toW(), kSimd128RegZero); ++} ++ ++void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// asub_s_h(dst.fp().toW(), src.fp().toW(), kSimd128RegZero); ++} ++ ++void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst, ++ LiftoffRegister src) { ++ UNREACHABLE(); ++// xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); ++// asub_s_w(dst.fp().toW(), src.fp().toW(), kSimd128RegZero); ++} ++ ++void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++// copy_s_b(dst.gp(), lhs.fp().toW(), imm_lane_idx); ++} ++ ++void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++// copy_u_b(dst.gp(), lhs.fp().toW(), imm_lane_idx); ++} ++ ++void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++// copy_s_h(dst.gp(), lhs.fp().toW(), imm_lane_idx); ++} ++ ++void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++// copy_u_h(dst.gp(), lhs.fp().toW(), imm_lane_idx); ++} ++ ++void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++// copy_s_w(dst.gp(), lhs.fp().toW(), imm_lane_idx); ++} ++ ++void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++// copy_s_d(dst.gp(), lhs.fp().toW(), imm_lane_idx); ++} ++ ++void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++// copy_u_w(kScratchReg, lhs.fp().toW(), imm_lane_idx); ++// TurboAssembler::FmoveLow(dst.fp(), kScratchReg); ++} ++ ++void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++// copy_s_d(kScratchReg, lhs.fp().toW(), imm_lane_idx); ++// TurboAssembler::Move(dst.fp(), kScratchReg); ++} ++ ++void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++#if 0 ++ if (dst != src1) { ++ move_v(dst.fp().toW(), src1.fp().toW()); ++ } ++ insert_b(dst.fp().toW(), imm_lane_idx, src2.gp()); ++#endif ++} ++ ++void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++#if 0 ++ if (dst != src1) { ++ move_v(dst.fp().toW(), src1.fp().toW()); ++ } ++ insert_h(dst.fp().toW(), imm_lane_idx, src2.gp()); ++#endif ++} ++ ++void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++#if 0 ++ if (dst != src1) { ++ move_v(dst.fp().toW(), src1.fp().toW()); ++ } ++ insert_w(dst.fp().toW(), imm_lane_idx, src2.gp()); ++#endif ++} ++ ++void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++#if 0 ++ if (dst != src1) { ++ move_v(dst.fp().toW(), src1.fp().toW()); ++ } ++ insert_d(dst.fp().toW(), imm_lane_idx, src2.gp()); ++#endif ++} ++ ++void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++#if 0 ++ TurboAssembler::FmoveLow(kScratchReg, src2.fp()); ++ if (dst != src1) { ++ move_v(dst.fp().toW(), src1.fp().toW()); ++ } ++ insert_w(dst.fp().toW(), imm_lane_idx, kScratchReg); ++#endif ++} ++ ++void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) { ++ UNREACHABLE(); ++#if 0 ++ TurboAssembler::Move(kScratchReg, src2.fp()); ++ if (dst != src1) { ++ move_v(dst.fp().toW(), src1.fp().toW()); ++ } ++ insert_d(dst.fp().toW(), imm_lane_idx, kScratchReg); ++#endif ++} ++ ++void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { ++ TurboAssembler::Uldl(limit_address, MemOperand(limit_address)); ++ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); ++} ++ ++void LiftoffAssembler::CallTrapCallbackForTesting() { ++ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp()); ++ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); ++} ++ ++void LiftoffAssembler::AssertUnreachable(AbortReason reason) { ++ if (emit_debug_code()) Abort(reason); ++} ++ ++void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { ++ LiftoffRegList gp_regs = regs & kGpCacheRegList; ++ unsigned num_gp_regs = gp_regs.GetNumRegsSet(); ++ if (num_gp_regs) { ++ unsigned offset = num_gp_regs * kSystemPointerSize; ++ subl(sp, offset, sp); ++ while (!gp_regs.is_empty()) { ++ LiftoffRegister reg = gp_regs.GetFirstRegSet(); ++ offset -= kSystemPointerSize; ++ Stl(reg.gp(), MemOperand(sp, offset)); ++ gp_regs.clear(reg); ++ } ++ DCHECK_EQ(offset, 0); ++ } ++ LiftoffRegList fp_regs = regs & kFpCacheRegList; ++ unsigned num_fp_regs = fp_regs.GetNumRegsSet(); ++ if (num_fp_regs) { ++ unsigned slot_size = IsEnabled(SW64_SIMD) ? 16 : 8; ++ subl(sp, (num_fp_regs * slot_size), sp); ++ unsigned offset = 0; ++ while (!fp_regs.is_empty()) { ++ LiftoffRegister reg = fp_regs.GetFirstRegSet(); ++ TurboAssembler::Fstd(reg.fp(), MemOperand(sp, offset)); ++ fp_regs.clear(reg); ++ offset += slot_size; ++ } ++ DCHECK_EQ(offset, num_fp_regs * slot_size); ++ } ++} ++ ++void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { ++ LiftoffRegList fp_regs = regs & kFpCacheRegList; ++ unsigned fp_offset = 0; ++ while (!fp_regs.is_empty()) { ++ LiftoffRegister reg = fp_regs.GetFirstRegSet(); ++ TurboAssembler::Fldd(reg.fp(), MemOperand(sp, fp_offset)); ++ fp_regs.clear(reg); ++ fp_offset += sizeof(double); ++ } ++ if (fp_offset) addl(sp, fp_offset, sp); ++ LiftoffRegList gp_regs = regs & kGpCacheRegList; ++ unsigned gp_offset = 0; ++ while (!gp_regs.is_empty()) { ++ LiftoffRegister reg = gp_regs.GetLastRegSet(); ++ Ldl(reg.gp(), MemOperand(sp, gp_offset)); ++ gp_regs.clear(reg); ++ gp_offset += kSystemPointerSize; ++ } ++ addl(sp, gp_offset, sp); ++} ++ ++void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ++ DCHECK_LT(num_stack_slots, ++ (1 << 16) / kSystemPointerSize); // 16 bit immediate ++ TurboAssembler::DropAndRet(static_cast(num_stack_slots)); ++} ++ ++void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ++ const LiftoffRegister* args, ++ const LiftoffRegister* rets, ++ ValueType out_argument_type, int stack_bytes, ++ ExternalReference ext_ref) { ++ Addl(sp, sp, -stack_bytes); ++ ++ int arg_bytes = 0; ++ for (ValueType param_type : sig->parameters()) { ++ liftoff::Store(this, sp, arg_bytes, *args++, param_type); ++ arg_bytes += param_type.element_size_bytes(); ++ } ++ DCHECK_LE(arg_bytes, stack_bytes); ++ ++ // Pass a pointer to the buffer with the arguments to the C function. ++ // On sw64, the first argument is passed in {a0}. ++ constexpr Register kFirstArgReg = a0; ++ mov(kFirstArgReg, sp); ++ ++ // Now call the C function. ++ constexpr int kNumCCallArgs = 1; ++ PrepareCallCFunction(kNumCCallArgs, kScratchReg); ++ CallCFunction(ext_ref, kNumCCallArgs); ++ ++ // Move return value to the right register. ++ const LiftoffRegister* next_result_reg = rets; ++ if (sig->return_count() > 0) { ++ DCHECK_EQ(1, sig->return_count()); ++ constexpr Register kReturnReg = v0; ++ if (kReturnReg != next_result_reg->gp()) { ++ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0)); ++ } ++ ++next_result_reg; ++ } ++ ++ // Load potential output value from the buffer on the stack. ++ if (out_argument_type != kWasmStmt) { ++ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type); ++ } ++ ++ Addl(sp, sp, stack_bytes); ++} ++ ++void LiftoffAssembler::CallNativeWasmCode(Address addr) { ++ Call(addr, RelocInfo::WASM_CALL); ++} ++ ++void LiftoffAssembler::TailCallNativeWasmCode(Address addr) { ++ Jump(addr, RelocInfo::WASM_CALL); ++} ++ ++void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, ++ compiler::CallDescriptor* call_descriptor, ++ Register target) { ++ if (target == no_reg) { ++ pop(kScratchReg); ++ Call(kScratchReg); ++ } else { ++ Call(target); ++ } ++} ++ ++void LiftoffAssembler::TailCallIndirect(Register target) { ++ if (target == no_reg) { ++ Pop(kScratchReg); ++ Jump(kScratchReg); ++ } else { ++ Jump(target); ++ } ++} ++ ++void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { ++ // A direct call to a wasm runtime stub defined in this module. ++ // Just encode the stub index. This will be patched at relocation. ++ Call(static_cast
(sid), RelocInfo::WASM_STUB_CALL); ++} ++ ++void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { ++ Addl(sp, sp, -size); ++ TurboAssembler::Move(addr, sp); ++} ++ ++void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { ++ Addl(sp, sp, size); ++} ++ ++void LiftoffStackSlots::Construct() { ++ for (auto& slot : slots_) { ++ const LiftoffAssembler::VarState& src = slot.src_; ++ switch (src.loc()) { ++ case LiftoffAssembler::VarState::kStack: ++ asm_->Ldl(kScratchReg, liftoff::GetStackSlot(slot.src_offset_)); ++ asm_->push(kScratchReg); ++ break; ++ case LiftoffAssembler::VarState::kRegister: ++ liftoff::push(asm_, src.reg(), src.type()); ++ break; ++ case LiftoffAssembler::VarState::kIntConst: { ++ asm_->li(kScratchReg, Operand(src.i32_const())); ++ asm_->push(kScratchReg); ++ break; ++ } ++ } ++ } ++} ++ ++} // namespace wasm ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_WASM_BASELINE_SW64_LIFTOFF_ASSEMBLER_SW64_H_ +diff --git a/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.cc b/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.cc +index 90cdad467..c9bc0285b 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.cc ++++ b/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.cc +@@ -306,6 +306,41 @@ void JumpTableAssembler::NopBytes(int bytes) { + nop(0); + } + } ++#elif V8_TARGET_ARCH_SW64 ++void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, ++ Address lazy_compile_target) { ++ int start = pc_offset(); ++ li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr ++ // Jump produces max. 4 instructions for 32-bit platform ++ // and max. 6 instructions for 64-bit platform. ++ Jump(lazy_compile_target, RelocInfo::NONE); ++ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset(); ++ DCHECK_EQ(nop_bytes % kInstrSize, 0); ++ for (int i = 0; i < nop_bytes; i += kInstrSize) nop(); ++} ++ ++bool JumpTableAssembler::EmitJumpSlot(Address target) { ++ PatchAndJump(target); ++ return true; ++} ++ ++void JumpTableAssembler::EmitFarJumpSlot(Address target) { ++ JumpToInstructionStream(target); ++} ++ ++// static ++void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { ++ UNREACHABLE(); ++} ++ ++void JumpTableAssembler::NopBytes(int bytes) { ++ DCHECK_LE(0, bytes); ++ DCHECK_EQ(0, bytes % kInstrSize); ++ for (; bytes > 0; bytes -= kInstrSize) { ++ nop(); ++ } ++} ++ + + #else + #error Unknown architecture. +diff --git a/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.h b/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.h +index 253f0bc01..1023e5e58 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.h ++++ b/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.h +@@ -215,6 +215,11 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { + static constexpr int kJumpTableSlotSize = 8 * kInstrSize; + static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize; + static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize; ++#elif V8_TARGET_ARCH_SW64 ++ static constexpr int kJumpTableLineSize = 10 * kInstrSize; ++ static constexpr int kJumpTableSlotSize = 10 * kInstrSize; ++ static constexpr int kFarJumpTableSlotSize = 5 * kInstrSize; ++ static constexpr int kLazyCompileTableSlotSize = 10 * kInstrSize; + #else + #error Unknown architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/wasm/wasm-debug.cc b/src/3rdparty/chromium/v8/src/wasm/wasm-debug.cc +index 10a2e194a..4dc267e5f 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/wasm-debug.cc ++++ b/src/3rdparty/chromium/v8/src/wasm/wasm-debug.cc +@@ -884,9 +884,13 @@ class DebugInfoImpl { + debug_break_fp + + WasmDebugBreakFrameConstants::GetPushedFpRegisterOffset( + reg.fp().code()); ++#ifdef V8_TARGET_ARCH_SW64 ++ return WasmValue((float)(ReadUnalignedValue(spilled_addr))); ++#else + return type == kWasmF32 + ? WasmValue(ReadUnalignedValue(spilled_addr)) + : WasmValue(ReadUnalignedValue(spilled_addr)); ++#endif + } + + // Otherwise load the value from the stack. +diff --git a/src/3rdparty/chromium/v8/src/wasm/wasm-linkage.h b/src/3rdparty/chromium/v8/src/wasm/wasm-linkage.h +index 7e56ea6ea..731cc0b97 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/wasm-linkage.h ++++ b/src/3rdparty/chromium/v8/src/wasm/wasm-linkage.h +@@ -84,6 +84,16 @@ constexpr Register kGpReturnRegisters[] = {r3, r4}; + constexpr DoubleRegister kFpParamRegisters[] = {d1, d2, d3, d4, d5, d6, d7, d8}; + constexpr DoubleRegister kFpReturnRegisters[] = {d1, d2}; + ++#elif V8_TARGET_ARCH_SW64 ++// =========================================================================== ++// == sw64 =================================================================== ++// =========================================================================== ++constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5}; ++constexpr Register kGpReturnRegisters[] = {v0, t4}; // t4 used as v1 ++constexpr DoubleRegister kFpParamRegisters[] = {f16, f17, f18, f19, f20, f21}; ++constexpr DoubleRegister kFpReturnRegisters[] = {f0, f1}; ++ ++ + #elif V8_TARGET_ARCH_S390X + // =========================================================================== + // == s390x ================================================================== +diff --git a/src/3rdparty/gn/tools/gn/args.cc b/src/3rdparty/gn/tools/gn/args.cc +index 802c3731d..0089612ed 100644 +--- a/src/3rdparty/gn/tools/gn/args.cc ++++ b/src/3rdparty/gn/tools/gn/args.cc +@@ -329,6 +329,7 @@ void Args::SetSystemVarsLocked(Scope* dest) const { + static const char kMips64[] = "mips64el"; + static const char kS390X[] = "s390x"; + static const char kPPC64[] = "ppc64"; ++ static const char kSW64[] = "sw_64"; + const char* arch = nullptr; + + // Set the host CPU architecture based on the underlying OS, not +@@ -353,6 +354,8 @@ void Args::SetSystemVarsLocked(Scope* dest) const { + // This allows us to use the same toolchain as ppc64 BE + // and specific flags are included using the host_byteorder logic. + arch = kPPC64; ++ else if (os_arch == "sw_64") ++ arch = kSW64; + else + CHECK(false) << "OS architecture not handled. (" << os_arch << ")"; + +diff --git a/src/3rdparty/gn/util/build_config.h b/src/3rdparty/gn/util/build_config.h +index addd7cfb0..1e2decee9 100644 +--- a/src/3rdparty/gn/util/build_config.h ++++ b/src/3rdparty/gn/util/build_config.h +@@ -172,6 +172,11 @@ + #define ARCH_CPU_32_BITS 1 + #define ARCH_CPU_BIG_ENDIAN 1 + #endif ++#elif defined(__sw_64__) ++#define ARCH_CPU_SW64_FAMILY 1 ++#define ARCH_CPU_SW64 1 ++#define ARCH_CPU_64_BITS 1 ++#define ARCH_CPU_LITTLE_ENDIAN 1 + #else + #error Please add support for your architecture in build_config.h + #endif +-- +2.39.3 + diff --git a/0001-fix-loongarch64-build-error.patch b/0001-fix-loongarch64-build-error.patch new file mode 100644 index 0000000..eb37715 --- /dev/null +++ b/0001-fix-loongarch64-build-error.patch @@ -0,0 +1,67 @@ +From c71163ea900945cd39b04697c4571cb2026c07ca Mon Sep 17 00:00:00 2001 +From: root +Date: Wed, 12 Jun 2024 20:05:50 +0800 +Subject: [PATCH] fix loongarch64 build error + +--- + src/3rdparty/chromium/base/time/pr_time_unittest.cc | 2 +- + src/3rdparty/chromium/base/time/time.cc | 2 +- + src/3rdparty/chromium/third_party/lss/linux_syscall_support.h | 4 ++++ + 3 files changed, 6 insertions(+), 2 deletions(-) + +diff --git a/src/3rdparty/chromium/base/time/pr_time_unittest.cc b/src/3rdparty/chromium/base/time/pr_time_unittest.cc +index 364603451..59a541a56 100644 +--- a/src/3rdparty/chromium/base/time/pr_time_unittest.cc ++++ b/src/3rdparty/chromium/base/time/pr_time_unittest.cc +@@ -7,7 +7,7 @@ + + #include "base/compiler_specific.h" + #include "base/stl_util.h" +-#include ++#include + #include "base/time/time.h" + #include "build/build_config.h" + #include "testing/gtest/include/gtest/gtest.h" +diff --git a/src/3rdparty/chromium/base/time/time.cc b/src/3rdparty/chromium/base/time/time.cc +index 084bed7e6..ba4554b5d 100644 +--- a/src/3rdparty/chromium/base/time/time.cc ++++ b/src/3rdparty/chromium/base/time/time.cc +@@ -14,7 +14,7 @@ + #include "base/macros.h" + #include "base/no_destructor.h" + #include "base/strings/stringprintf.h" +-#include ++#include + #include "base/time/time_override.h" + #include "build/build_config.h" + +diff --git a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h b/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h +index 4ef451a9d..3858a63dd 100644 +--- a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h ++++ b/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h +@@ -3538,9 +3538,11 @@ struct kernel_statfs { + gid_t *, e, gid_t *, s) + LSS_INLINE _syscall3(int, getresuid, uid_t *, r, + uid_t *, e, uid_t *, s) ++#if defined(__NR_getrlimit) + #if !defined(__ARM_EABI__) + LSS_INLINE _syscall2(int, getrlimit, int, r, + struct kernel_rlimit*, l) ++#endif + #endif + LSS_INLINE _syscall1(pid_t, getsid, pid_t, p) + LSS_INLINE _syscall0(pid_t, _gettid) +@@ -3656,8 +3658,10 @@ struct kernel_statfs { + gid_t, e, gid_t, s) + LSS_INLINE _syscall3(int, setresuid, uid_t, r, + uid_t, e, uid_t, s) ++#if defined(__NR_setrlimit) + LSS_INLINE _syscall2(int, setrlimit, int, r, + const struct kernel_rlimit*, l) ++#endif + LSS_INLINE _syscall0(pid_t, setsid) + LSS_INLINE _syscall2(int, sigaltstack, const stack_t*, s, + const stack_t*, o) +-- +2.39.3 + diff --git a/0001-fix-sw_64-build-error.patch b/0001-fix-sw_64-build-error.patch new file mode 100644 index 0000000..028d049 --- /dev/null +++ b/0001-fix-sw_64-build-error.patch @@ -0,0 +1,1081 @@ +From 6012e1b39d91d472012f6e0e3d0a066a7580547a Mon Sep 17 00:00:00 2001 +From: root +Date: Mon, 17 Jun 2024 16:46:02 +0800 +Subject: [PATCH] fix sw_64 build error + +--- + .../linux/seccomp-bpf-helpers/syscall_sets.cc | 2 +- + .../client/linux/handler/exception_handler.cc | 2 +- + .../google_breakpad/common/minidump_format.h | 1 + + .../trace_processor/containers/string_pool.h | 2 +- + .../swiftshader/src/Reactor/BUILD.gn | 2 +- + .../v8/src/builtins/sw64/builtins-sw64.cc | 4 +- + .../sw64/interface-descriptors-sw64.cc | 34 +- + .../src/codegen/sw64/macro-assembler-sw64.cc | 2 +- + .../compiler/backend/instruction-selector.cc | 4 +- + .../backend/sw64/code-generator-sw64.cc | 24 +- + .../backend/sw64/instruction-selector-sw64.cc | 89 +- + .../sw64/unwinding-info-writer-sw64.cc | 6 +- + .../sw64/regexp-macro-assembler-sw64.cc | 40 +- + .../regexp/sw64/regexp-macro-assembler-sw64.h | 5 +- + .../baseline/sw64/liftoff-assembler-sw64.h | 117 +- + 16 files changed, 228 insertions(+), 1764 deletions(-) + +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +index cae75fe12..a039289a3 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +@@ -784,7 +784,7 @@ bool SyscallSets::IsDebug(int sysno) { + + bool SyscallSets::IsGlobalSystemStatus(int sysno) { + switch (sysno) { +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__sw_64__) + case __NR__sysctl: + case __NR_sysfs: + #endif +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc +index 44a5e3222..02b1eeeee 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc +@@ -143,7 +143,7 @@ void InstallAlternateStackLocked() { + // SIGSTKSZ may be too small to prevent the signal handlers from overrunning + // the alternative stack. Ensure that the size of the alternative stack is + // large enough. +- static const unsigned kSigStackSize = std::max(16384, SIGSTKSZ); ++ static const unsigned kSigStackSize = 16384; + + // Only set an alternative stack if there isn't already one, or if the current + // one is too small. +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_format.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_format.h +index 202aec622..8e37504b4 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_format.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/google_breakpad/common/minidump_format.h +@@ -120,6 +120,7 @@ typedef struct { + #include "minidump_cpu_ppc64.h" + #include "minidump_cpu_sparc.h" + #include "minidump_cpu_x86.h" ++#include "minidump_cpu_sw64.h" + + /* + * WinVer.h +diff --git a/src/3rdparty/chromium/third_party/perfetto/src/trace_processor/containers/string_pool.h b/src/3rdparty/chromium/third_party/perfetto/src/trace_processor/containers/string_pool.h +index 11ae91cfe..d2933605e 100644 +--- a/src/3rdparty/chromium/third_party/perfetto/src/trace_processor/containers/string_pool.h ++++ b/src/3rdparty/chromium/third_party/perfetto/src/trace_processor/containers/string_pool.h +@@ -19,7 +19,7 @@ + + #include + #include +- ++#include + #include + #include + +diff --git a/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn b/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn +index 1154dba42..fcebedf4b 100644 +--- a/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn ++++ b/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn +@@ -19,7 +19,7 @@ declare_args() { + # PPC64. + use_swiftshader_with_subzero = + current_cpu != "arm64" && current_cpu != "mips64el" && current_cpu != "ppc64" +- supports_llvm = is_linux || is_fuchsia || is_win || is_android || is_mac ++ supports_llvm = (is_linux || is_fuchsia || is_win || is_android || is_mac) && current_cpu != "sw_64" + } + + config("swiftshader_reactor_private_config") { +diff --git a/src/3rdparty/chromium/v8/src/builtins/sw64/builtins-sw64.cc b/src/3rdparty/chromium/v8/src/builtins/sw64/builtins-sw64.cc +index c9565819f..ea1dad8a2 100755 +--- a/src/3rdparty/chromium/v8/src/builtins/sw64/builtins-sw64.cc ++++ b/src/3rdparty/chromium/v8/src/builtins/sw64/builtins-sw64.cc +@@ -2855,12 +2855,12 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { + __ Pop(result_reg, scratch, scratch2, scratch3); + __ Ret(); + } +- ++/* + void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // TODO(v8:10701): Implement for this platform. + __ Trap(); + } +- ++*/ + namespace { + + int AddressOffset(ExternalReference ref0, ExternalReference ref1) { +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/interface-descriptors-sw64.cc b/src/3rdparty/chromium/v8/src/codegen/sw64/interface-descriptors-sw64.cc +index 8edc3a06f..6d32c59fc 100755 +--- a/src/3rdparty/chromium/v8/src/codegen/sw64/interface-descriptors-sw64.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/interface-descriptors-sw64.cc +@@ -22,6 +22,7 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific( + default_stub_registers); + } + ++ + void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + const Register default_stub_registers[] = {a0, a1, a2, a3}; +@@ -30,6 +31,14 @@ void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( + data->InitializePlatformSpecific(kParameterCount, default_stub_registers); + } + ++void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2}; ++ CHECK_EQ(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ + void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; +@@ -39,6 +48,14 @@ void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific( + default_stub_registers); + } + ++void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2}; ++ CHECK_EQ(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ + void RecordWriteDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0}; +@@ -62,18 +79,21 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific( + arraysize(default_stub_registers)); + data->InitializePlatformSpecific(kParameterCount, default_stub_registers); + } +- ++const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() { ++ return a1; ++} ++const Register FastNewFunctionContextDescriptor::SlotsRegister() { return a0; } + const Register LoadDescriptor::ReceiverRegister() { return a1; } + const Register LoadDescriptor::NameRegister() { return a2; } + const Register LoadDescriptor::SlotRegister() { return a0; } + + const Register LoadWithVectorDescriptor::VectorRegister() { return a3; } +- ++/* + const Register + LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() { + return a4; + } +- ++*/ + const Register StoreDescriptor::ReceiverRegister() { return a1; } + const Register StoreDescriptor::NameRegister() { return a2; } + const Register StoreDescriptor::ValueRegister() { return a0; } +@@ -209,6 +229,11 @@ void AbortDescriptor::InitializePlatformSpecific( + data->InitializePlatformSpecific(arraysize(registers), registers); + } + ++void AllocateHeapNumberDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ data->InitializePlatformSpecific(0, nullptr); ++} ++ + void CompareDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + Register registers[] = {a1, a0}; +@@ -296,7 +321,7 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific( + Register registers[] = {a0, a1}; + data->InitializePlatformSpecific(arraysize(registers), registers); + } +- ++/* + void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // TODO(v8:8888): Implement on this platform. +@@ -344,6 +369,7 @@ void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific( + // TODO(v8:8888): Implement on this platform. + DefaultInitializePlatformSpecific(data, 3); + } ++*/ + + } // namespace internal + } // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.cc b/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.cc +index a678af140..6e891db7f 100755 +--- a/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/sw64/macro-assembler-sw64.cc +@@ -16,7 +16,7 @@ + #include "src/codegen/register-configuration.h" + #include "src/debug/debug.h" + #include "src/execution/frames-inl.h" +-#include "src/heap/memory-chunk.h" ++#include "src/heap/heap-inl.h" + #include "src/init/bootstrapper.h" + #include "src/logging/counters.h" + #include "src/objects/heap-number.h" +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc +index 697d70bbf..d3b408604 100644 +--- a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc +@@ -2611,10 +2611,12 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) { + #endif // !V8_TARGET_ARCH_IA32 + + #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X +-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_SW64 ++#if !V8_TARGET_ARCH_ARM64 ++#if !V8_TARGET_ARCH_SW64 + void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } ++#endif + void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); } +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/code-generator-sw64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/code-generator-sw64.cc +index 02840f457..af7e05e16 100755 +--- a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/code-generator-sw64.cc ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/code-generator-sw64.cc +@@ -12,7 +12,7 @@ + #include "src/compiler/backend/gap-resolver.h" + #include "src/compiler/node-matchers.h" + #include "src/compiler/osr.h" +-#include "src/heap/memory-chunk.h" ++#include "src/heap/heap-inl.h" + #include "src/wasm/wasm-code-manager.h" + + namespace v8 { +@@ -903,9 +903,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( + Register value = i.InputRegister(2); + Register scratch0 = i.TempRegister(0); + Register scratch1 = i.TempRegister(1); +- auto ool = zone()->New(this, object, index, value, +- scratch0, scratch1, mode, +- DetermineStubCallMode()); ++ auto ool = new (zone()) ++ OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1, ++ mode, DetermineStubCallMode()); + __ Addl(kScratchReg, object, index); + __ Stl(value, MemOperand(kScratchReg)); + __ CheckPageFlag(object, scratch0, +@@ -1523,7 +1523,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( + FPURegister dst = i.OutputSingleRegister(); + FPURegister src1 = i.InputSingleRegister(0); + FPURegister src2 = i.InputSingleRegister(1); +- auto ool = zone()->New(this, dst, src1, src2); ++ auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2); + __ Float32Max(dst, src1, src2, ool->entry()); + __ bind(ool->exit()); + break; +@@ -1532,7 +1532,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( + FPURegister dst = i.OutputDoubleRegister(); + FPURegister src1 = i.InputDoubleRegister(0); + FPURegister src2 = i.InputDoubleRegister(1); +- auto ool = zone()->New(this, dst, src1, src2); ++ auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2); + __ Float64Max(dst, src1, src2, ool->entry()); + __ bind(ool->exit()); + break; +@@ -1541,7 +1541,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( + FPURegister dst = i.OutputSingleRegister(); + FPURegister src1 = i.InputSingleRegister(0); + FPURegister src2 = i.InputSingleRegister(1); +- auto ool = zone()->New(this, dst, src1, src2); ++ auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2); + __ Float32Min(dst, src1, src2, ool->entry()); + __ bind(ool->exit()); + break; +@@ -1550,7 +1550,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( + FPURegister dst = i.OutputDoubleRegister(); + FPURegister src1 = i.InputDoubleRegister(0); + FPURegister src2 = i.InputDoubleRegister(1); +- auto ool = zone()->New(this, dst, src1, src2); ++ auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2); + __ Float64Min(dst, src1, src2, ool->entry()); + __ bind(ool->exit()); + break; +@@ -3528,7 +3528,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, + __ PrepareCallCFunction(0, 0, cp); + __ CallCFunction( + ExternalReference::wasm_call_trap_callback_for_testing(), 0); +- __ LeaveFrame(StackFrame::WASM); ++ __ LeaveFrame(StackFrame::WASM_COMPILED); + auto call_descriptor = gen_->linkage()->GetIncomingDescriptor(); + int pop_count = + static_cast(call_descriptor->StackParameterCount()); +@@ -3542,7 +3542,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, + // is added to the native module and copied into wasm code space. + __ Call(static_cast
(trap_id), RelocInfo::WASM_STUB_CALL); + ReferenceMap* reference_map = +- gen_->zone()->New(gen_->zone()); ++ new (gen_->zone()) ReferenceMap(gen_->zone()); + gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); + if (FLAG_debug_code) { + __ halt();//stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); +@@ -3552,7 +3552,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, + Instruction* instr_; + CodeGenerator* gen_; + }; +- auto ool = zone()->New(this, instr); ++ auto ool = new (zone()) OutOfLineTrap(this, instr); + Label* tlabel = ool->entry(); + AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); + } +@@ -3826,7 +3826,7 @@ void CodeGenerator::AssembleConstructFrame() { + + __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); + // We come from WebAssembly, there are no references for the GC. +- ReferenceMap* reference_map = zone()->New(zone()); ++ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); + RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); + if (FLAG_debug_code) { + __ halt(); +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-selector-sw64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-selector-sw64.cc +index d152466a7..7a39f8052 100755 +--- a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-selector-sw64.cc ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/instruction-selector-sw64.cc +@@ -455,11 +455,11 @@ void InstructionSelector::VisitStore(Node* node) { + StoreRepresentation store_rep = StoreRepresentationOf(node->op()); + WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); + MachineRepresentation rep = store_rep.representation(); +- ++/* + if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) { + write_barrier_kind = kFullWriteBarrier; + } +- ++*/ + // TODO(sw64): I guess this could be done in a better way. + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { +@@ -853,11 +853,11 @@ void InstructionSelector::VisitWord64Sar(Node* node) { + if (TryEmitExtendingLoad(this, node, node)) return; + VisitRRO(this, kSw64Dsar, node); + } +- ++/* + void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); } + + void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); } +- ++*/ + void InstructionSelector::VisitWord32Ror(Node* node) { + VisitRRO(this, kSw64Ror, node); + } +@@ -1402,7 +1402,7 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { + } + } + +- ++/* + bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { + DCHECK_NE(node->opcode(), IrOpcode::kPhi); + switch (node->opcode()) { +@@ -1430,7 +1430,7 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { + return false; + } + } +- ++*/ + void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { + Sw64OperandGenerator g(this); + #ifdef SW64 +@@ -2996,7 +2996,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { + V(S128Or, kSw64S128Or) \ + V(S128Xor, kSw64S128Xor) \ + V(S128AndNot, kSw64S128AndNot) +- ++/* + //SKTODO + void InstructionSelector::VisitS128Const(Node* node) { + Sw64OperandGenerator g(this); +@@ -3017,7 +3017,7 @@ void InstructionSelector::VisitS128Const(Node* node) { + g.UseImmediate(val[2]), g.UseImmediate(val[3])); + } + } +- ++*/ + void InstructionSelector::VisitS128Zero(Node* node) { + Sw64OperandGenerator g(this); + Emit(kSw64S128Zero, g.DefineAsRegister(node)); +@@ -3050,14 +3050,14 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, S) + } + SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE) + #undef SIMD_VISIT_REPLACE_LANE +- ++/* + #define SIMD_VISIT_UNOP(Name, instruction) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitRR(this, instruction, node); \ + } + SIMD_UNOP_LIST(SIMD_VISIT_UNOP) + #undef SIMD_VISIT_UNOP +- ++*/ + #define SIMD_VISIT_SHIFT_OP(Name) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitSimdShift(this, kSw64##Name, node); \ +@@ -3154,7 +3154,7 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, + } + + } // namespace +- ++/* + void InstructionSelector::VisitI8x16Shuffle(Node* node) { + uint8_t shuffle[kSimd128Size]; + bool is_swizzle; +@@ -3198,7 +3198,7 @@ void InstructionSelector::VisitI8x16Swizzle(Node* node) { + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); + } +- ++*/ + void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) { + Sw64OperandGenerator g(this); + Emit(kSw64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +@@ -3224,7 +3224,7 @@ void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) { + Emit(kSw64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), + g.TempImmediate(0)); + } +- ++/* + void InstructionSelector::VisitF32x4Pmin(Node* node) { + UNREACHABLE(); + // VisitUniqueRRR(this, kSw64F32x4Pmin, node); +@@ -3244,6 +3244,69 @@ void InstructionSelector::VisitF64x2Pmax(Node* node) { + UNREACHABLE(); + // VisitUniqueRRR(this, kSw64F64x2Pmax, node); + } ++*/ ++void InstructionSelector::VisitI8x16Abs(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitI32x4Abs(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitI16x8Abs(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); } ++void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) { ++ UNIMPLEMENTED(); ++} ++void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); } + + // static + MachineOperatorBuilder::Flags +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.cc +index 2eb6c1851..e40c94ccb 100755 +--- a/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.cc ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/sw64/unwinding-info-writer-sw64.cc +@@ -49,8 +49,10 @@ void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) { + if (existing_state) { + DCHECK_EQ(existing_state->saved_lr_, saved_lr_); + } else { +- block_initial_states_[successor_index] = +- zone_->New(saved_lr_); ++ block_initial_states_[successor_index] = ++ new (zone_) BlockInitialState(saved_lr_); ++// block_initial_states_[successor_index] = ++// zone_->New(saved_lr_); + } + } + } +diff --git a/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.cc b/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.cc +index 7561532f3..f0ba6a80d 100755 +--- a/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.cc ++++ b/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.cc +@@ -292,7 +292,7 @@ void RegExpMacroAssemblerSW64::CheckGreedyLoop(Label* on_equal) { + + + void RegExpMacroAssemblerSW64::CheckNotBackReferenceIgnoreCase( +- int start_reg, bool read_backward, bool unicode, Label* on_no_match) { ++ int start_reg, bool read_backward, Label* on_no_match) { + Label fallthrough; + __ Ldl(a0, register_location(start_reg)); // Index of start of capture. + __ Ldl(a1, register_location(start_reg + 1)); // Index of end of capture. +@@ -406,10 +406,7 @@ void RegExpMacroAssemblerSW64::CheckNotBackReferenceIgnoreCase( + { + AllowExternalCallThatCantCauseGC scope(masm_); + ExternalReference function = +- unicode ? ExternalReference::re_case_insensitive_compare_unicode( +- isolate()) +- : ExternalReference::re_case_insensitive_compare_non_unicode( +- isolate()); ++ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + __ CallCFunction(function, argument_count); + } + +@@ -979,13 +976,12 @@ Handle RegExpMacroAssemblerSW64::GetCode(Handle source) { + + CodeDesc code_desc; + masm_->GetCode(isolate(), &code_desc); +- Handle code = +- Factory::CodeBuilder(isolate(), code_desc, CodeKind::REGEXP) +- .set_self_reference(masm_->CodeObject()) +- .Build(); +- LOG(masm_->isolate(), +- RegExpCodeCreateEvent(Handle::cast(code), source)); +- return Handle::cast(code); ++ Handle code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP) ++ .set_self_reference(masm_->CodeObject()) ++ .Build(); ++ PROFILE(masm_->isolate(), ++ RegExpCodeCreateEvent(Handle::cast(code), source)); ++ return Handle::cast(code); + } + + +@@ -1027,6 +1023,26 @@ RegExpMacroAssembler::IrregexpImplementation + return kSW64Implementation; + } + ++void RegExpMacroAssemblerSW64::LoadCurrentCharacterImpl(int cp_offset, ++ Label* on_end_of_input, ++ bool check_bounds, ++ int characters, ++ int eats_at_least) { ++ // It's possible to preload a small number of characters when each success ++ // path requires a large number of characters, but not the reverse. ++ DCHECK_GE(eats_at_least, characters); ++ ++ DCHECK(cp_offset < (1 << 30)); // Be sane! (And ensure negation works). ++ if (check_bounds) { ++ if (cp_offset >= 0) { ++ CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input); ++ } else { ++ CheckPosition(cp_offset, on_end_of_input); ++ } ++ } ++ LoadCurrentCharacterUnchecked(cp_offset, characters); ++} ++ + + void RegExpMacroAssemblerSW64::PopCurrentPosition() { + Pop(current_input_offset()); +diff --git a/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.h b/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.h +index ab4c62db6..b6ad2ee01 100755 +--- a/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.h ++++ b/src/3rdparty/chromium/v8/src/regexp/sw64/regexp-macro-assembler-sw64.h +@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerSW64 + virtual void CheckNotBackReference(int start_reg, bool read_backward, + Label* on_no_match); + virtual void CheckNotBackReferenceIgnoreCase(int start_reg, +- bool read_backward, bool unicode, ++ bool read_backward, + Label* on_no_match); + virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal); + virtual void CheckNotCharacterAfterAnd(uint32_t c, +@@ -69,6 +69,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerSW64 + virtual IrregexpImplementation Implementation(); + virtual void LoadCurrentCharacterUnchecked(int cp_offset, + int character_count); ++ virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input, ++ bool check_bounds, int characters, ++ int eats_at_least); + virtual void PopCurrentPosition(); + virtual void PopRegister(int register_index); + virtual void PushBacktrack(Label* label); +diff --git a/src/3rdparty/chromium/v8/src/wasm/baseline/sw64/liftoff-assembler-sw64.h b/src/3rdparty/chromium/v8/src/wasm/baseline/sw64/liftoff-assembler-sw64.h +index 76e48d0a6..6c093b89d 100755 +--- a/src/3rdparty/chromium/v8/src/wasm/baseline/sw64/liftoff-assembler-sw64.h ++++ b/src/3rdparty/chromium/v8/src/wasm/baseline/sw64/liftoff-assembler-sw64.h +@@ -69,8 +69,8 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, + assm->Ldw(dst.gp(), src); + break; + case ValueType::kI64: +- case ValueType::kRef: +- case ValueType::kOptRef: ++// case ValueType::kRef: ++// case ValueType::kOptRef: + assm->Ldl(dst.gp(), src); + break; + case ValueType::kF32: +@@ -149,7 +149,7 @@ int LiftoffAssembler::PrepareStackFrame() { + nop(); + return offset; + } +- ++/* + void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params, + int stack_param_delta) { + UseScratchRegisterScope temps(this); +@@ -172,7 +172,7 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params, + subl(fp, stack_param_delta * 8, sp); + Pop(ra, fp); + } +- ++*/ + void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) { + // We can't run out of space, just pass anything big enough to not cause the + // assembler to try to grow the buffer. +@@ -205,7 +205,14 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) { + } + + bool LiftoffAssembler::NeedsAlignment(ValueType type) { +- return type.kind() == ValueType::kS128 || type.is_reference_type(); ++ switch (type.kind()) { ++ case ValueType::kS128: ++ return true; ++ default: ++ // No alignment because all other types are kStackSlotSize. ++ return false; ++ } ++ + } + + void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, +@@ -255,21 +262,21 @@ void LiftoffAssembler::FillInstanceInto(Register dst) { + + void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, + Register offset_reg, +- int32_t offset_imm, ++ uint32_t offset_imm, + LiftoffRegList pinned) { + DCHECK_GE(offset_imm, 0); + STATIC_ASSERT(kTaggedSize == kInt64Size); + Load(LiftoffRegister(dst), src_addr, offset_reg, + static_cast(offset_imm), LoadType::kI64Load, pinned); + } +- ++/* + void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, + int32_t offset_imm, + LiftoffRegister src, + LiftoffRegList pinned) { + bailout(kRefTypes, "GlobalSet"); + } +- ++*/ + void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, + Register offset_reg, uint32_t offset_imm, + LoadType type, LiftoffRegList pinned, +@@ -382,38 +389,37 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg, + } + + void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg, +- uint32_t offset_imm, LiftoffRegister value, ++ uint32_t offset_imm, + LiftoffRegister result, StoreType type) { + bailout(kAtomics, "AtomicAdd"); + } + + void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg, +- uint32_t offset_imm, LiftoffRegister value, ++ uint32_t offset_imm, + LiftoffRegister result, StoreType type) { + bailout(kAtomics, "AtomicSub"); + } + + void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg, +- uint32_t offset_imm, LiftoffRegister value, ++ uint32_t offset_imm, + LiftoffRegister result, StoreType type) { + bailout(kAtomics, "AtomicAnd"); + } + + void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg, +- uint32_t offset_imm, LiftoffRegister value, ++ uint32_t offset_imm, + LiftoffRegister result, StoreType type) { + bailout(kAtomics, "AtomicOr"); + } + + void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg, +- uint32_t offset_imm, LiftoffRegister value, ++ uint32_t offset_imm, + LiftoffRegister result, StoreType type) { + bailout(kAtomics, "AtomicXor"); + } + + void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, + uint32_t offset_imm, +- LiftoffRegister value, + LiftoffRegister result, StoreType type) { + bailout(kAtomics, "AtomicExchange"); + } +@@ -434,7 +440,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, + MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1)); + liftoff::Load(this, dst, src, type); + } +- ++/* + void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, + uint32_t caller_slot_idx, + ValueType type) { +@@ -446,7 +452,7 @@ void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset, + ValueType type) { + liftoff::Load(this, dst, MemOperand(sp, offset), type); + } +- ++*/ + void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, + ValueType type) { + DCHECK_NE(dst_offset, src_offset); +@@ -475,8 +481,8 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { + Stw(reg.gp(), dst); + break; + case ValueType::kI64: +- case ValueType::kRef: +- case ValueType::kOptRef: ++// case ValueType::kRef: ++// case ValueType::kOptRef: + Stl(reg.gp(), dst); + break; + case ValueType::kF32: +@@ -504,8 +510,9 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { + break; + } + case ValueType::kI64: +- case ValueType::kRef: +- case ValueType::kOptRef: { ++// case ValueType::kRef: ++// case ValueType::kOptRef: ++ { + LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); + TurboAssembler::li(tmp.gp(), value.to_i64()); + Stl(tmp.gp(), dst); +@@ -525,8 +532,8 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { + Ldw(reg.gp(), src); + break; + case ValueType::kI64: +- case ValueType::kRef: +- case ValueType::kOptRef: ++// case ValueType::kRef: ++// case ValueType::kOptRef: + Ldl(reg.gp(), src); + break; + case ValueType::kF32: +@@ -686,7 +693,7 @@ I32_BINOP(xor, xor_ins) + #undef I32_BINOP + + #define I32_BINOP_I(name, instruction) \ +- void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ + int32_t imm) { \ + instruction(dst, lhs, Operand(imm)); \ + } +@@ -718,9 +725,10 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { + Register amount) { \ + instruction(dst, src, amount); \ + } ++ + #define I32_SHIFTOP_I(name, instruction) \ + I32_SHIFTOP(name, instruction) \ +- void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ + int amount) { \ + instruction(dst, src, (amount & 31)); \ + } +@@ -789,7 +797,7 @@ I64_BINOP(xor, xor_ins) + #undef I64_BINOP + + #define I64_BINOP_I(name, instruction) \ +- void LiftoffAssembler::emit_i64_##name##i( \ ++ void LiftoffAssembler::emit_i64_##name( \ + LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \ + instruction(dst.gp(), lhs.gp(), Operand(imm)); \ + } +@@ -810,7 +818,7 @@ I64_BINOP_I(xor, Xor) + } + #define I64_SHIFTOP_I(name, instruction) \ + I64_SHIFTOP(name, instruction) \ +- void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \ ++ void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \ + LiftoffRegister src, int amount) { \ + instruction(src.gp(), (amount & 63), dst.gp()); \ + } +@@ -1271,6 +1279,7 @@ inline FPUCondition ConditionToConditionCmpFPU(Condition condition, + } + UNREACHABLE(); + } ++ + //SKTODO + #if 0 + inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst, +@@ -1352,7 +1361,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, + + bind(&cont); + } +- ++/* + bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, + LiftoffRegister true_value, + LiftoffRegister false_value, +@@ -1476,7 +1485,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst, + vshf_b(dst_msa, kSimd128RegZero, lhs_msa); + #endif + } +- ++*/ + void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst, + LiftoffRegister src) { + UNREACHABLE(); +@@ -1514,7 +1523,7 @@ void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, + // TurboAssembler::Move(kScratchReg, src.fp()); + // fill_d(dst.fp().toW(), kScratchReg); + } +- ++/* + void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); +@@ -1813,13 +1822,13 @@ void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst, + UNREACHABLE(); + // srli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7); + } +- ++*/ + void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); + // addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { +@@ -1833,13 +1842,13 @@ void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst, + UNREACHABLE(); + // adds_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++*/ + void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); + // subv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { +@@ -1853,13 +1862,13 @@ void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst, + UNREACHABLE(); + // subs_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++*/ + void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); + // mulv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { +@@ -1965,13 +1974,13 @@ void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst, + UNREACHABLE(); + // srli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15); + } +- ++*/ + void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); + // addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { +@@ -1985,13 +1994,13 @@ void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst, + UNREACHABLE(); + // adds_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++*/ + void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); + // subv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { +@@ -2005,13 +2014,13 @@ void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst, + UNREACHABLE(); + // subs_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++*/ + void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); + // mulv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { +@@ -2115,7 +2124,7 @@ void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst, + UNREACHABLE(); + // srli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31); + } +- ++*/ + void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); +@@ -2133,7 +2142,7 @@ void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, + UNREACHABLE(); + // mulv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { +@@ -2209,7 +2218,7 @@ void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst, + UNREACHABLE(); + // srli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63); + } +- ++*/ + void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); +@@ -2227,7 +2236,7 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, + UNREACHABLE(); + // mulv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst, + LiftoffRegister src) { + UNREACHABLE(); +@@ -2264,7 +2273,7 @@ bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst, + LiftoffRegister src) { + UNREACHABLE(); + } +- ++*/ + void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); +@@ -2282,7 +2291,7 @@ void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, + UNREACHABLE(); + // fmul_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); +@@ -2381,7 +2390,7 @@ bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst, + LiftoffRegister src) { + UNREACHABLE(); + } +- ++*/ + void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); +@@ -2399,7 +2408,7 @@ void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, + UNREACHABLE(); + // fmul_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); + } +- ++/* + void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + UNREACHABLE(); +@@ -2628,7 +2637,7 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst, + // xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + // asub_s_w(dst.fp().toW(), src.fp().toW(), kSimd128RegZero); + } +- ++*/ + void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst, + LiftoffRegister lhs, + uint8_t imm_lane_idx) { +@@ -2884,11 +2893,11 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, + void LiftoffAssembler::CallNativeWasmCode(Address addr) { + Call(addr, RelocInfo::WASM_CALL); + } +- ++/* + void LiftoffAssembler::TailCallNativeWasmCode(Address addr) { + Jump(addr, RelocInfo::WASM_CALL); + } +- ++*/ + void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, + compiler::CallDescriptor* call_descriptor, + Register target) { +@@ -2899,7 +2908,7 @@ void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, + Call(target); + } + } +- ++/* + void LiftoffAssembler::TailCallIndirect(Register target) { + if (target == no_reg) { + Pop(kScratchReg); +@@ -2908,7 +2917,7 @@ void LiftoffAssembler::TailCallIndirect(Register target) { + Jump(target); + } + } +- ++*/ + void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { + // A direct call to a wasm runtime stub defined in this module. + // Just encode the stub index. This will be patched at relocation. +-- +2.39.3 + diff --git a/0001-port-chromium_qt-to-loongarch64.patch b/0001-port-chromium_qt-to-loongarch64.patch new file mode 100644 index 0000000..1fc2f0b --- /dev/null +++ b/0001-port-chromium_qt-to-loongarch64.patch @@ -0,0 +1,1712 @@ +From d8ac894394be259cc259dd93fea1592a30de18a0 Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Tue, 12 Jan 2021 09:17:30 +0000 +Subject: [PATCH 01/13] port chromium_qt to loongarch64 + +--- + src/3rdparty/chromium/DEPS | 28 ++++----- + .../page_allocator_constants.h | 4 +- + .../chromium/base/process/launch_posix.cc | 2 +- + .../double-conversion/utils.h | 2 +- + src/3rdparty/chromium/build/build_config.h | 5 ++ + .../chromium/build/config/compiler/BUILD.gn | 2 +- + .../chromium/build/toolchain/linux/BUILD.gn | 26 ++++++++ + .../animation/worklet_animation_unittest.cc | 8 +-- + .../crash/core/app/breakpad_linux.cc | 3 + + .../browser/paint_preview_client.cc | 2 +- + .../update_client/update_query_params.cc | 4 ++ + .../extensions/common/api/runtime.json | 2 +- + .../chromium/media/base/media_serializers.h | 2 +- + src/3rdparty/chromium/sandbox/features.gni | 3 +- + src/3rdparty/chromium/sandbox/linux/BUILD.gn | 1 + + .../linux/bpf_dsl/linux_syscall_ranges.h | 9 ++- + .../sandbox/linux/bpf_dsl/policy_compiler.cc | 4 ++ + .../sandbox/linux/bpf_dsl/seccomp_macros.h | 41 ++++++++++++ + .../bpf_dsl_seccomp_unittest.cc | 4 +- + .../seccomp-bpf-helpers/baseline_policy.cc | 6 +- + .../baseline_policy_unittest.cc | 6 +- + .../syscall_parameters_restrictions.cc | 6 +- + .../linux/seccomp-bpf-helpers/syscall_sets.cc | 63 ++++++++++--------- + .../linux/seccomp-bpf-helpers/syscall_sets.h | 10 +-- + .../sandbox/linux/seccomp-bpf/syscall.cc | 38 ++++++++++- + .../sandbox/linux/seccomp-bpf/trap.cc | 2 +- + .../sandbox/linux/services/credentials.cc | 4 +- + .../linux/services/syscall_wrappers.cc | 2 +- + .../linux/syscall_broker/broker_process.cc | 20 +++--- + .../syscall_broker/broker_process_unittest.cc | 3 + + .../linux/system_headers/linux_seccomp.h | 6 ++ + .../linux/system_headers/linux_signal.h | 4 +- + .../linux/system_headers/linux_syscalls.h | 4 ++ + .../sandbox/linux/bpf_broker_policy_linux.cc | 2 +- + .../sandbox/linux/bpf_cdm_policy_linux.cc | 2 +- + .../linux/bpf_cros_amd_gpu_policy_linux.cc | 2 +- + .../sandbox/linux/bpf_gpu_policy_linux.cc | 2 +- + .../linux/bpf_renderer_policy_linux.cc | 2 +- + .../linux/sandbox_seccomp_bpf_linux.cc | 4 +- + src/3rdparty/chromium/skia/BUILD.gn | 2 + + .../commands/delete_selection_command.cc | 4 +- + .../blink/renderer/platform/heap/asm/BUILD.gn | 2 + + .../blink/renderer/platform/wtf/hash_table.h | 5 ++ + .../crashpad/client/crashpad_info_note.S | 2 +- + .../minidump/minidump_misc_info_writer.cc | 2 + + .../crashpad/snapshot/capture_memory.cc | 2 +- + .../crashpad/snapshot/cpu_architecture.h | 5 +- + .../crashpad/crashpad/snapshot/cpu_context.cc | 1 + + .../snapshot/crashpad_info_size_test_note.S | 2 +- + .../linux/exception_snapshot_linux.cc | 3 + + .../snapshot/linux/process_reader_linux.cc | 2 + + .../crashpad/snapshot/linux/signal_context.h | 1 + + .../snapshot/linux/system_snapshot_linux.cc | 7 +++ + .../snapshot/linux/thread_snapshot_linux.cc | 1 + + .../snapshot/linux/thread_snapshot_linux.h | 2 +- + .../crashpad/crashpad/util/linux/ptracer.cc | 8 ++- + .../crashpad/util/linux/thread_info.h | 14 ++--- + .../chromium/third_party/libvpx/BUILD.gn | 2 + + .../tcmalloc/chromium/src/base/basictypes.h | 2 + + .../chromium/src/base/spinlock_linux-inl.h | 1 + + src/3rdparty/chromium/ui/base/x/BUILD.gn | 2 + + src/3rdparty/chromium/ui/gl/BUILD.gn | 3 + + .../chromium/ui/views/layout/layout_types.h | 4 +- + 63 files changed, 305 insertions(+), 114 deletions(-) + +diff --git a/src/3rdparty/chromium/DEPS b/src/3rdparty/chromium/DEPS +index 530e4a466..517526264 100644 +--- a/src/3rdparty/chromium/DEPS ++++ b/src/3rdparty/chromium/DEPS +@@ -85,7 +85,7 @@ vars = { + + # Check out and download nacl by default. This can be disabled e.g. with + # custom_vars. +- 'checkout_nacl': True, ++ 'checkout_nacl': False, + + # By default, do not check out src-internal. This can be overridden e.g. with + # custom_vars. +@@ -3582,19 +3582,19 @@ hooks = [ + 'src/tools', + ], + }, +- { +- # Verify that we have the right GN binary and force-install it if we +- # don't, in order to work around crbug.com/944367. +- # TODO(crbug.com/944667) Get rid of this when cipd is ensuring we +- # have the right binary more carefully and we no longer need this. +- 'name': 'ensure_gn_version', +- 'pattern': '.', +- 'action': [ +- 'python', +- 'src/buildtools/ensure_gn_version.py', +- Var('gn_version') +- ], +- }, ++# { ++# # Verify that we have the right GN binary and force-install it if we ++# # don't, in order to work around crbug.com/944367. ++# # TODO(crbug.com/944667) Get rid of this when cipd is ensuring we ++# # have the right binary more carefully and we no longer need this. ++# 'name': 'ensure_gn_version', ++# 'pattern': '.', ++# 'action': [ ++# 'python', ++# 'src/buildtools/ensure_gn_version.py', ++# Var('gn_version') ++# ], ++# }, + { + # This downloads binaries for Native Client's newlib toolchain. + # Done in lieu of building the toolchain from scratch as it can take +diff --git a/src/3rdparty/chromium/base/allocator/partition_allocator/partition_alloc_constants.h b/src/3rdparty/chromium/base/allocator/partition_allocator/partition_alloc_constants.h +index 555700a7d..b4b2a0ab0 100644 +--- a/src/3rdparty/chromium/base/allocator/partition_allocator/partition_alloc_constants.h ++++ b/src/3rdparty/chromium/base/allocator/partition_allocator/partition_alloc_constants.h +@@ -35,7 +35,7 @@ + // other constant values, we pack _all_ `PartitionRootGeneric::Alloc` sizes + // perfectly up against the end of a system page. + +-#if defined(_MIPS_ARCH_LOONGSON) ++#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LA64) + static const size_t kPartitionPageShift = 16; // 64 KiB + #elif defined(ARCH_CPU_PPC64) + static const size_t kPartitionPageShift = 18; // 256 KiB +diff --git a/src/3rdparty/chromium/base/allocator/partition_allocator/page_allocator_constants.h b/src/3rdparty/chromium/base/allocator/partition_allocator/page_allocator_constants.h +index 555700a7d..b4b2a0ab0 100644 +--- a/src/3rdparty/chromium/base/allocator/partition_allocator/page_allocator_constants.h ++++ b/src/3rdparty/chromium/base/allocator/partition_allocator/page_allocator_constants.h +@@ -12,7 +12,7 @@ + namespace base { + #if defined(OS_WIN) || defined(ARCH_CPU_PPC64) + static constexpr size_t kPageAllocationGranularityShift = 16; // 64KB +-#elif defined(_MIPS_ARCH_LOONGSON) ++#elif defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LA64) + static constexpr size_t kPageAllocationGranularityShift = 14; // 16KB + #else + static constexpr size_t kPageAllocationGranularityShift = 12; // 4KB +@@ -24,7 +24,7 @@ static constexpr size_t kPageAllocationGranularityOffsetMask = + static constexpr size_t kPageAllocationGranularityBaseMask = + ~kPageAllocationGranularityOffsetMask; + +-#if defined(_MIPS_ARCH_LOONGSON) ++#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LA64) + static constexpr size_t kSystemPageSize = 16384; + #elif defined(ARCH_CPU_PPC64) + // Modern ppc64 systems support 4KB and 64KB page sizes. +diff --git a/src/3rdparty/chromium/base/process/launch_posix.cc b/src/3rdparty/chromium/base/process/launch_posix.cc +index 9b7573fdc..fffa3c0e6 100644 +--- a/src/3rdparty/chromium/base/process/launch_posix.cc ++++ b/src/3rdparty/chromium/base/process/launch_posix.cc +@@ -702,7 +702,7 @@ NOINLINE pid_t CloneAndLongjmpInChild(unsigned long flags, + // fork-like behavior. + alignas(16) char stack_buf[PTHREAD_STACK_MIN]; + #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \ +- defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_S390_FAMILY) || \ ++ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_S390_FAMILY) || defined(ARCH_CPU_LA64) || \ + defined(ARCH_CPU_PPC64_FAMILY) + // The stack grows downward. + void* stack = stack_buf + sizeof(stack_buf); +diff --git a/src/3rdparty/chromium/base/third_party/double_conversion/double-conversion/utils.h b/src/3rdparty/chromium/base/third_party/double_conversion/double-conversion/utils.h +index 471c3da84..163ef3adf 100644 +--- a/src/3rdparty/chromium/base/third_party/double_conversion/double-conversion/utils.h ++++ b/src/3rdparty/chromium/base/third_party/double_conversion/double-conversion/utils.h +@@ -99,7 +99,7 @@ int main(int argc, char** argv) { + #if defined(_M_X64) || defined(__x86_64__) || \ + defined(__ARMEL__) || defined(__avr32__) || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(__hppa__) || defined(__ia64__) || \ +- defined(__mips__) || \ ++ defined(__mips__) || defined(__loongarch64) || \ + defined(__nios2__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \ + defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ +diff --git a/src/3rdparty/chromium/build/build_config.h b/src/3rdparty/chromium/build/build_config.h +index d3cdd2db4..d97f8dbf2 100644 +--- a/src/3rdparty/chromium/build/build_config.h ++++ b/src/3rdparty/chromium/build/build_config.h +@@ -43,6 +43,7 @@ + #define OS_LINUX 1 + // include a system header to pull in features.h for glibc/uclibc macros. + #include ++#include + #if defined(__GLIBC__) && !defined(__UCLIBC__) + // we really are using glibc, not uClibc pretending to be glibc + #define LIBC_GLIBC 1 +@@ -129,6 +130,10 @@ + #define ARCH_CPU_PPC64 1 + #define ARCH_CPU_64_BITS 1 + #define ARCH_CPU_LITTLE_ENDIAN 1 ++#elif defined(__loongarch64) ++#define ARCH_CPU_LA64 1 ++#define ARCH_CPU_64_BITS 1 ++#define ARCH_CPU_LITTLE_ENDIAN 1 + #elif defined(__ARMEL__) + #define ARCH_CPU_ARM_FAMILY 1 + #define ARCH_CPU_ARMEL 1 +diff --git a/src/3rdparty/chromium/build/config/compiler/BUILD.gn b/src/3rdparty/chromium/build/config/compiler/BUILD.gn +index e6cc686..6a98d80 100644 +--- a/src/3rdparty/chromium/build/config/compiler/BUILD.gn ++++ b/src/3rdparty/chromium/build/config/compiler/BUILD.gn +@@ -242,6 +241,9 @@ config("default_include_dirs") { + config("compiler") { + asmflags = [] + cflags = [] ++ if (current_cpu == "la64") { ++ cflags = ["-mcmodel=large"] ++ } + cflags_c = [] + cflags_cc = [] + cflags_objc = [] +diff --git a/src/3rdparty/chromium/build/toolchain/linux/BUILD.gn b/src/3rdparty/chromium/build/toolchain/linux/BUILD.gn +index fa8b17e9d..f67bebc02 100644 +--- a/src/3rdparty/chromium/build/toolchain/linux/BUILD.gn ++++ b/src/3rdparty/chromium/build/toolchain/linux/BUILD.gn +@@ -185,6 +185,13 @@ clang_toolchain("clang_mips64el") { + } + } + ++clang_toolchain("clang_la64") { ++ toolchain_args = { ++ current_cpu = "la64" ++ current_os = "linux" ++ } ++} ++ + gcc_toolchain("mipsel") { + toolprefix = "mipsel-linux-gnu-" + +@@ -223,6 +230,25 @@ gcc_toolchain("mips64el") { + } + } + ++gcc_toolchain("la64") { ++ toolprefix = "" ++ ++ cc = "${toolprefix}gcc" ++ cxx = "${toolprefix}g++" ++ ar = "${toolprefix}ar" ++ ld = cxx ++ readelf = "${toolprefix}readelf" ++ nm = "${toolprefix}nm" ++ ++ toolchain_args = { ++ cc_wrapper = "" ++ current_cpu = "la64" ++ current_os = "linux" ++ is_clang = false ++ use_goma = false ++ } ++} ++ + clang_toolchain("clang_s390x") { + toolchain_args = { + current_cpu = "s390x" +diff --git a/src/3rdparty/chromium/cc/animation/worklet_animation_unittest.cc b/src/3rdparty/chromium/cc/animation/worklet_animation_unittest.cc +index e827c3740..50a169fd6 100644 +--- a/src/3rdparty/chromium/cc/animation/worklet_animation_unittest.cc ++++ b/src/3rdparty/chromium/cc/animation/worklet_animation_unittest.cc +@@ -69,15 +69,15 @@ class MockScrollTimeline : public ScrollTimeline { + ~MockScrollTimeline() override = default; + }; + +-TEST_F(WorkletAnimationTest, NonImplInstanceDoesNotTickKeyframe) { ++/*TEST_F(WorkletAnimationTest, NonImplInstanceDoesNotTickKeyframe) { + std::unique_ptr effect = + std::make_unique(worklet_animation_.get()); + MockKeyframeEffect* mock_effect = effect.get(); + + scoped_refptr worklet_animation = +- WrapRefCounted(new WorkletAnimation( ++ base::WrapRefCounted(new WorkletAnimation( + 1, worklet_animation_id_, "test_name", 1, nullptr, nullptr, +- false /* not impl instance*/, std::move(effect))); ++ false / not impl instance/, std::move(effect))); + + EXPECT_CALL(*mock_effect, Tick(_)).Times(0); + +@@ -85,7 +85,7 @@ TEST_F(WorkletAnimationTest, NonImplInstanceDoesNotTickKeyframe) { + state.local_times.push_back(base::TimeDelta::FromSecondsD(1)); + worklet_animation->SetOutputState(state); + worklet_animation->Tick(base::TimeTicks()); +-} ++}*/ + + TEST_F(WorkletAnimationTest, LocalTimeIsUsedWhenTicking) { + AttachWorkletAnimation(); +diff --git a/src/3rdparty/chromium/components/crash/core/app/breakpad_linux.cc b/src/3rdparty/chromium/components/crash/core/app/breakpad_linux.cc +index 192b0a7f1..34fddc8d5 100644 +--- a/src/3rdparty/chromium/components/crash/core/app/breakpad_linux.cc ++++ b/src/3rdparty/chromium/components/crash/core/app/breakpad_linux.cc +@@ -1030,11 +1030,14 @@ class NonBrowserCrashHandler : public google_breakpad::CrashGenerationClient { + bool RequestDump(const void* crash_context, + size_t crash_context_size) override { + int fds[2] = { -1, -1 }; ++// TODO:LA64 ++#ifndef ARCH_CPU_LA64 + if (sys_socketpair(AF_UNIX, SOCK_STREAM, 0, fds) < 0) { + static const char msg[] = "Failed to create socket for crash dumping.\n"; + WriteLog(msg, sizeof(msg) - 1); + return false; + } ++#endif + + // Start constructing the message to send to the browser. + char b; // Dummy variable for sys_read below. +diff --git a/src/3rdparty/chromium/components/paint_preview/browser/paint_preview_client.cc b/src/3rdparty/chromium/components/paint_preview/browser/paint_preview_client.cc +index 3728efa1f..b76dfe345 100644 +--- a/src/3rdparty/chromium/components/paint_preview/browser/paint_preview_client.cc ++++ b/src/3rdparty/chromium/components/paint_preview/browser/paint_preview_client.cc +@@ -106,7 +106,7 @@ PaintPreviewClient::PaintPreviewData& PaintPreviewClient::PaintPreviewData:: + operator=(PaintPreviewData&& rhs) = default; + + PaintPreviewClient::PaintPreviewData::PaintPreviewData( +- PaintPreviewData&& other) noexcept = default; ++ PaintPreviewData&& other) /*noexcept*/ = default; + + PaintPreviewClient::CreateResult::CreateResult(base::File file, + base::File::Error error) +diff --git a/src/3rdparty/chromium/components/update_client/update_query_params.cc b/src/3rdparty/chromium/components/update_client/update_query_params.cc +index 56aea40c9..fea279222 100644 +--- a/src/3rdparty/chromium/components/update_client/update_query_params.cc ++++ b/src/3rdparty/chromium/components/update_client/update_query_params.cc +@@ -58,6 +58,8 @@ const char kArch[] = + "mipsel"; + #elif defined(__powerpc64__) + "ppc64"; ++#elif defined(__loongarch64) ++ "la64"; + #else + #error "unknown arch" + #endif +@@ -128,6 +130,8 @@ const char* UpdateQueryParams::GetNaclArch() { + return "mips64"; + #elif defined(ARCH_CPU_PPC64) + return "ppc64"; ++#elif defined(ARCH_CPU_LA64) ++ return "la64"; + #else + // NOTE: when adding new values here, please remember to update the + // comment in the .h file about possible return values from this function. +diff --git a/src/3rdparty/chromium/extensions/common/api/runtime.json b/src/3rdparty/chromium/extensions/common/api/runtime.json +index 5b009eabd..bd5d13335 100644 +--- a/src/3rdparty/chromium/extensions/common/api/runtime.json ++++ b/src/3rdparty/chromium/extensions/common/api/runtime.json +@@ -73,7 +73,7 @@ + { + "id": "PlatformArch", + "type": "string", +- "enum": ["arm", "arm64", "x86-32", "x86-64", "mips", "mips64"], ++ "enum": ["arm", "arm64", "x86-32", "x86-64", "mips", "mips64", "la64"], + "description": "The machine's processor architecture." + }, + { +diff --git a/src/3rdparty/chromium/media/base/media_serializers.h b/src/3rdparty/chromium/media/base/media_serializers.h +index 6333c4417..0e3f223af 100644 +--- a/src/3rdparty/chromium/media/base/media_serializers.h ++++ b/src/3rdparty/chromium/media/base/media_serializers.h +@@ -377,7 +377,7 @@ template <> + struct MediaSerializer { + static base::Value Serialize(const base::Location& value) { + base::Value result(base::Value::Type::DICTIONARY); +- FIELD_SERIALIZE("file", value.file_name()); ++ FIELD_SERIALIZE("file", value.file_name() ? value.file_name() : "unknown"); + FIELD_SERIALIZE("line", value.line_number()); + return result; + } +diff --git a/src/3rdparty/chromium/sandbox/features.gni b/src/3rdparty/chromium/sandbox/features.gni +index 09280d35f..d87ee96e1 100644 +--- a/src/3rdparty/chromium/sandbox/features.gni ++++ b/src/3rdparty/chromium/sandbox/features.gni +@@ -11,6 +11,7 @@ import("//build/config/nacl/config.gni") + use_seccomp_bpf = (is_linux || is_android) && + (current_cpu == "x86" || current_cpu == "x64" || + current_cpu == "arm" || current_cpu == "arm64" || +- current_cpu == "mipsel" || current_cpu == "mips64el") ++ current_cpu == "mipsel" || current_cpu == "mips64el" || ++ current_cpu == "la64") + + use_seccomp_bpf = use_seccomp_bpf || is_nacl_nonsfi +diff --git a/src/3rdparty/chromium/sandbox/linux/BUILD.gn b/src/3rdparty/chromium/sandbox/linux/BUILD.gn +index c27351f9a..ab45fc381 100644 +--- a/src/3rdparty/chromium/sandbox/linux/BUILD.gn ++++ b/src/3rdparty/chromium/sandbox/linux/BUILD.gn +@@ -422,6 +422,7 @@ source_set("sandbox_services_headers") { + "system_headers/mips_linux_syscalls.h", + "system_headers/x86_32_linux_syscalls.h", + "system_headers/x86_64_linux_syscalls.h", ++ "system_headers/la64_linux_syscalls.h", + ] + } + +diff --git a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +index 313511f22..062825902 100644 +--- a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h ++++ b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +@@ -42,7 +42,14 @@ + #define MAX_PUBLIC_SYSCALL (MIN_SYSCALL + __NR_O32_Linux_syscalls) + #define MAX_SYSCALL MAX_PUBLIC_SYSCALL + +-#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS) ++#elif defined(ARCH_CPU_LA64) ++ ++#include ++#define MIN_SYSCALL 0u ++#define MAX_PUBLIC_SYSCALL __NR_syscalls ++#define MAX_SYSCALL MAX_PUBLIC_SYSCALL ++ ++#elif (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) + + #include // for __NR_64_Linux and __NR_64_Linux_syscalls + #define MIN_SYSCALL __NR_64_Linux +diff --git a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/policy_compiler.cc b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/policy_compiler.cc +index 347304889..ef8500ff4 100644 +--- a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/policy_compiler.cc ++++ b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/policy_compiler.cc +@@ -141,6 +141,10 @@ CodeGen::Node PolicyCompiler::AssemblePolicy() { + } + + CodeGen::Node PolicyCompiler::CheckArch(CodeGen::Node passed) { ++// TODO:LA64: Kernel doesn't support AUDIT? ++#ifdef ARCH_CPU_LA64 ++ return passed; ++#endif + // If the architecture doesn't match SECCOMP_ARCH, disallow the + // system call. + return gen_.MakeInstruction( +diff --git a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/seccomp_macros.h b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/seccomp_macros.h +index 1a407b952..f500fab43 100644 +--- a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/seccomp_macros.h ++++ b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/seccomp_macros.h +@@ -346,6 +346,47 @@ struct regs_struct { + #define SECCOMP_PT_PARM4(_regs) (_regs).regs[3] + #define SECCOMP_PT_PARM5(_regs) (_regs).regs[4] + #define SECCOMP_PT_PARM6(_regs) (_regs).regs[5] ++ ++#elif defined(ARCH_CPU_LA64) ++struct regs_struct { ++ uint64_t gpr[32]; ++ uint64_t pc; ++}; ++ ++#define SECCOMP_ARCH AUDIT_ARCH_LOONGARCH64 ++ ++#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.__gregs[_reg]) ++ ++#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, 4) ++#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, 11) ++#define SECCOMP_IP(_ctx) (_ctx)->uc_mcontext.__pc ++#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, 4) ++#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, 5) ++#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, 6) ++#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, 7) ++#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, 8) ++#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, 9) ++ ++#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr)) ++#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch)) ++#define SECCOMP_IP_MSB_IDX \ ++ (offsetof(struct arch_seccomp_data, instruction_pointer) + 4) ++#define SECCOMP_IP_LSB_IDX \ ++ (offsetof(struct arch_seccomp_data, instruction_pointer) + 0) ++#define SECCOMP_ARG_MSB_IDX(nr) \ ++ (offsetof(struct arch_seccomp_data, args) + 8 * (nr) + 4) ++#define SECCOMP_ARG_LSB_IDX(nr) \ ++ (offsetof(struct arch_seccomp_data, args) + 8 * (nr) + 0) ++ ++#define SECCOMP_PT_RESULT(_regs) (_regs).regs[4] ++#define SECCOMP_PT_SYSCALL(_regs) (_regs).regs[11] ++#define SECCOMP_PT_IP(_regs) (_regs).pc ++#define SECCOMP_PT_PARM1(_regs) (_regs).regs[4] ++#define SECCOMP_PT_PARM2(_regs) (_regs).regs[5] ++#define SECCOMP_PT_PARM3(_regs) (_regs).regs[6] ++#define SECCOMP_PT_PARM4(_regs) (_regs).regs[7] ++#define SECCOMP_PT_PARM5(_regs) (_regs).regs[8] ++#define SECCOMP_PT_PARM6(_regs) (_regs).regs[9] + #else + #error Unsupported target platform + +diff --git a/src/3rdparty/chromium/sandbox/linux/integration_tests/bpf_dsl_seccomp_unittest.cc b/src/3rdparty/chromium/sandbox/linux/integration_tests/bpf_dsl_seccomp_unittest.cc +index d30e15560..dc18b6794 100644 +--- a/src/3rdparty/chromium/sandbox/linux/integration_tests/bpf_dsl_seccomp_unittest.cc ++++ b/src/3rdparty/chromium/sandbox/linux/integration_tests/bpf_dsl_seccomp_unittest.cc +@@ -1915,7 +1915,7 @@ BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) { + // + // Depending on the architecture, this may modify regs, so the caller is + // responsible for committing these changes using PTRACE_SETREGS. +-#if !defined(__arm__) && !defined(__aarch64__) && !defined(__mips__) ++#if !defined(__arm__) && !defined(__aarch64__) && !defined(__mips__) && !defined(__loongarch64) + long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) { + #if defined(__arm__) + // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the +@@ -1952,7 +1952,7 @@ SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) { + + // This test is disabled on arm due to a kernel bug. + // See https://code.google.com/p/chromium/issues/detail?id=383977 +-#if defined(__arm__) || defined(__aarch64__) ++#if defined(__arm__) || defined(__aarch64__) || defined(__loongarch64) + printf("This test is currently disabled on ARM32/64 due to a kernel bug."); + #elif defined(__mips__) + // TODO: Figure out how to support specificity of handling indirect syscalls +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc +index 712f9699a..615b8fa08 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc +@@ -165,7 +165,7 @@ ResultExpr EvaluateSyscallImpl(int fs_denied_errno, + return RestrictFcntlCommands(); + #endif + +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + // fork() is never used as a system call (clone() is used instead), but we + // have seen it in fallback code on Android. + if (sysno == __NR_fork) { +@@ -210,7 +210,7 @@ ResultExpr EvaluateSyscallImpl(int fs_denied_errno, + } + + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + if (sysno == __NR_mmap) + return RestrictMmapFlags(); + #endif +@@ -228,7 +228,7 @@ ResultExpr EvaluateSyscallImpl(int fs_denied_errno, + return RestrictPrctl(); + + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + if (sysno == __NR_socketpair) { + // Only allow AF_UNIX, PF_UNIX. Crash if anything else is seen. + static_assert(AF_UNIX == PF_UNIX, +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc +index fc36187c9..fb35d5a3a 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc +@@ -215,7 +215,7 @@ BPF_TEST_C(BaselinePolicy, GetRandom, BaselinePolicy) { + } + + // Not all architectures can restrict the domain for socketpair(). +-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) ++#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || defined(__loongarch64) + BPF_DEATH_TEST_C(BaselinePolicy, + SocketpairWrongDomain, + DEATH_SEGV_MESSAGE(GetErrorMessageContentForTests()), +@@ -224,7 +224,7 @@ BPF_DEATH_TEST_C(BaselinePolicy, + ignore_result(socketpair(AF_INET, SOCK_STREAM, 0, sv)); + _exit(1); + } +-#endif // defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) ++#endif // defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || defined(__loongarch64) + + BPF_TEST_C(BaselinePolicy, EPERM_open, BaselinePolicy) { + errno = 0; +@@ -288,7 +288,7 @@ TEST_BASELINE_SIGSYS(__NR_sysinfo) + TEST_BASELINE_SIGSYS(__NR_syslog) + TEST_BASELINE_SIGSYS(__NR_timer_create) + +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + TEST_BASELINE_SIGSYS(__NR_eventfd) + TEST_BASELINE_SIGSYS(__NR_inotify_init) + TEST_BASELINE_SIGSYS(__NR_vserver) +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc +index 5e0131ac4..d05762df0 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc +@@ -36,12 +36,12 @@ + #include + #include + #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && !defined(__arm__) && \ +- !defined(__aarch64__) && !defined(PTRACE_GET_THREAD_AREA) ++ !defined(__aarch64__) && !defined(__loongarch64) && !defined(PTRACE_GET_THREAD_AREA) + // Also include asm/ptrace-abi.h since ptrace.h in older libc (for instance + // the one in Ubuntu 16.04 LTS) is missing PTRACE_GET_THREAD_AREA. + // asm/ptrace-abi.h doesn't exist on arm32 and PTRACE_GET_THREAD_AREA isn't + // defined on aarch64, so don't try to include this on those platforms. +-#if defined(__mips__) ++#if defined(__mips__) || defined(__loongarch64) + #include + #else + #include +@@ -418,7 +420,7 @@ ResultExpr RestrictPrlimitToGetrlimit(pid_t target_pid) { + ResultExpr RestrictPtrace() { + const Arg request(0); + return Switch(request).CASES(( +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + PTRACE_GETREGS, + PTRACE_GETFPREGS, + #if defined(TRACE_GET_THREAD_AREA) +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +index d9d18822f..1f5e39b91 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +@@ -80,7 +80,7 @@ bool SyscallSets::IsUmask(int sysno) { + // Both EPERM and ENOENT are valid errno unless otherwise noted in comment. + bool SyscallSets::IsFileSystem(int sysno) { + switch (sysno) { +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_access: // EPERM not a valid errno. + case __NR_chmod: + case __NR_chown: +@@ -106,13 +106,13 @@ bool SyscallSets::IsFileSystem(int sysno) { + #endif + case __NR_ustat: // Same as above. Deprecated. + case __NR_utimes: +-#endif // !defined(__aarch64__) ++#endif // !defined(__aarch64__) || defined(__loongarch64) + + case __NR_execve: + case __NR_faccessat: // EPERM not a valid errno. + case __NR_fchmodat: + case __NR_fchownat: // Should be called chownat ? +-#if defined(__x86_64__) || defined(__aarch64__) ++#if defined(__x86_64__) || defined(__aarch64__) || defined(__loongarch64) + case __NR_newfstatat: // fstatat(). EPERM not a valid errno. + #elif defined(__i386__) || defined(__arm__) || \ + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) +@@ -195,7 +195,7 @@ bool SyscallSets::IsAllowedFileSystemAccessViaFd(int sysno) { + case __NR_oldfstat: + #endif + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined (__loongarch64) + case __NR_sync_file_range: // EPERM not a valid errno. + #elif defined(__arm__) + case __NR_arm_sync_file_range: // EPERM not a valid errno. +@@ -219,7 +219,7 @@ bool SyscallSets::IsDeniedFileSystemAccessViaFd(int sysno) { + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) + case __NR_ftruncate64: + #endif +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_getdents: // EPERM not a valid errno. + #endif + case __NR_getdents64: // EPERM not a valid errno. +@@ -298,7 +298,7 @@ bool SyscallSets::IsProcessPrivilegeChange(int sysno) { + bool SyscallSets::IsProcessGroupOrSession(int sysno) { + switch (sysno) { + case __NR_setpgid: +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_getpgrp: + #endif + case __NR_setsid: +@@ -327,7 +327,7 @@ bool SyscallSets::IsAllowedSignalHandling(int sysno) { + case __NR_rt_sigsuspend: + case __NR_rt_tgsigqueueinfo: + case __NR_sigaltstack: +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_signalfd: + #endif + case __NR_signalfd4: +@@ -351,12 +351,12 @@ bool SyscallSets::IsAllowedOperationOnFd(int sysno) { + switch (sysno) { + case __NR_close: + case __NR_dup: +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_dup2: + #endif + case __NR_dup3: + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_shutdown: + #endif + return true; +@@ -395,7 +395,7 @@ bool SyscallSets::IsAllowedProcessStartOrDeath(int sysno) { + return true; + case __NR_clone: // Should be parameter-restricted. + case __NR_setns: // Privileged. +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_fork: + #endif + #if defined(__i386__) || defined(__x86_64__) +@@ -406,7 +406,7 @@ bool SyscallSets::IsAllowedProcessStartOrDeath(int sysno) { + #endif + case __NR_set_tid_address: + case __NR_unshare: +-#if !defined(__mips__) && !defined(__aarch64__) ++#if !defined(__mips__) && !defined(__aarch64__) && !defined(__loongarch64) + case __NR_vfork: + #endif + default: +@@ -427,7 +427,7 @@ bool SyscallSets::IsAllowedFutex(int sysno) { + + bool SyscallSets::IsAllowedEpoll(int sysno) { + switch (sysno) { +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_epoll_create: + case __NR_epoll_wait: + #endif +@@ -448,14 +448,14 @@ bool SyscallSets::IsAllowedEpoll(int sysno) { + + bool SyscallSets::IsAllowedGetOrModifySocket(int sysno) { + switch (sysno) { +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_pipe: + #endif + case __NR_pipe2: + return true; + default: + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_socketpair: // We will want to inspect its argument. + #endif + return false; +@@ -465,7 +465,7 @@ bool SyscallSets::IsAllowedGetOrModifySocket(int sysno) { + bool SyscallSets::IsDeniedGetOrModifySocket(int sysno) { + switch (sysno) { + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_accept: + case __NR_accept4: + case __NR_bind: +@@ -519,7 +519,7 @@ bool SyscallSets::IsAllowedAddressSpaceAccess(int sysno) { + case __NR_mincore: + case __NR_mlockall: + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_mmap: + #endif + #if defined(__i386__) || defined(__arm__) || \ +@@ -552,7 +552,7 @@ bool SyscallSets::IsAllowedGeneralIo(int sysno) { + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) + case __NR__llseek: + #endif +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_poll: + #endif + case __NR_ppoll: +@@ -565,7 +565,7 @@ bool SyscallSets::IsAllowedGeneralIo(int sysno) { + case __NR_recv: + #endif + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_recvfrom: // Could specify source. + case __NR_recvmsg: // Could specify source. + #endif +@@ -580,7 +580,7 @@ bool SyscallSets::IsAllowedGeneralIo(int sysno) { + case __NR_send: + #endif + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_sendmsg: // Could specify destination. + case __NR_sendto: // Could specify destination. + #endif +@@ -630,7 +630,7 @@ bool SyscallSets::IsSeccomp(int sysno) { + bool SyscallSets::IsAllowedBasicScheduler(int sysno) { + switch (sysno) { + case __NR_sched_yield: +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_pause: + #endif + case __NR_nanosleep: +@@ -714,7 +714,7 @@ bool SyscallSets::IsNuma(int sysno) { + case __NR_getcpu: + case __NR_mbind: + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_migrate_pages: + #endif + case __NR_move_pages: +@@ -743,7 +743,7 @@ bool SyscallSets::IsGlobalProcessEnvironment(int sysno) { + switch (sysno) { + case __NR_acct: // Privileged. + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_getrlimit: + #endif + #if defined(__i386__) || defined(__arm__) +@@ -778,7 +778,7 @@ bool SyscallSets::IsDebug(int sysno) { + + bool SyscallSets::IsGlobalSystemStatus(int sysno) { + switch (sysno) { +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR__sysctl: + case __NR_sysfs: + #endif +@@ -796,7 +796,7 @@ bool SyscallSets::IsGlobalSystemStatus(int sysno) { + + bool SyscallSets::IsEventFd(int sysno) { + switch (sysno) { +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_eventfd: + #endif + case __NR_eventfd2: +@@ -832,7 +832,8 @@ bool SyscallSets::IsKeyManagement(int sysno) { + } + + #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) || \ ++ defined(__loongarch64) + bool SyscallSets::IsSystemVSemaphores(int sysno) { + switch (sysno) { + case __NR_semctl: +@@ -847,7 +848,7 @@ bool SyscallSets::IsSystemVSemaphores(int sysno) { + #endif + + #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ +- defined(__aarch64__) || \ ++ defined(__aarch64__) || defined(__loongarch64) || \ + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) + // These give a lot of ambient authority and bypass the setuid sandbox. + bool SyscallSets::IsSystemVSharedMemory(int sysno) { +@@ -864,7 +865,8 @@ bool SyscallSets::IsSystemVSharedMemory(int sysno) { + #endif + + #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) || \ ++ defined(__loongarch64) + bool SyscallSets::IsSystemVMessageQueue(int sysno) { + switch (sysno) { + case __NR_msgctl: +@@ -895,7 +897,8 @@ bool SyscallSets::IsSystemVIpc(int sysno) { + + bool SyscallSets::IsAnySystemV(int sysno) { + #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) || \ ++ defined(__loongarch64) + return IsSystemVMessageQueue(sysno) || IsSystemVSemaphores(sysno) || + IsSystemVSharedMemory(sysno); + #elif defined(__i386__) || \ +@@ -928,7 +931,7 @@ bool SyscallSets::IsAdvancedScheduler(int sysno) { + bool SyscallSets::IsInotify(int sysno) { + switch (sysno) { + case __NR_inotify_add_watch: +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_inotify_init: + #endif + case __NR_inotify_init1: +@@ -1043,7 +1046,7 @@ bool SyscallSets::IsMisc(int sysno) { + #if defined(__x86_64__) + case __NR_tuxcall: + #endif +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_vserver: + #endif + return true; +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h +index 923533ec9..fbe7c7910 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h +@@ -49,7 +49,7 @@ class SANDBOX_EXPORT SyscallSets { + #endif + + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + static bool IsNetworkSocketInformation(int sysno); + #endif + +@@ -72,18 +72,20 @@ class SANDBOX_EXPORT SyscallSets { + static bool IsAsyncIo(int sysno); + static bool IsKeyManagement(int sysno); + #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) || \ ++ defined(__loongarch64) + static bool IsSystemVSemaphores(int sysno); + #endif + #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ +- defined(__aarch64__) || \ ++ defined(__aarch64__) || defined(__loongarch64) || \ + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) + // These give a lot of ambient authority and bypass the setuid sandbox. + static bool IsSystemVSharedMemory(int sysno); + #endif + + #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) || \ ++ defined(__loongarch64) + static bool IsSystemVMessageQueue(int sysno); + #endif + +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.cc +index 34edabd2b..ddc2446ef 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/syscall.cc +@@ -16,7 +16,7 @@ namespace sandbox { + namespace { + + #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \ +- defined(ARCH_CPU_MIPS_FAMILY) ++ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // Number that's not currently used by any Linux kernel ABIs. + const int kInvalidSyscallNumber = 0x351d3; + #else +@@ -310,6 +310,25 @@ asm(// We need to be able to tell the kernel exactly where we made a + "2:ret\n" + ".cfi_endproc\n" + ".size SyscallAsm, .-SyscallAsm\n" ++#elif defined(__loongarch64) ++ ".text\n" ++ ".global SyscallAsm\n" ++ ".type SyscallAsm, %function\n" ++ "SyscallAsm:\n" ++ "bge $a0, $zero, 1f\n" ++ "la $a0, 2f\n" ++ "b 2f\n" ++ "1:ld.d $a5, $a6, 40\n" ++ "ld.d $a4, $a6, 32\n" ++ "ld.d $a3, $a6, 24\n" ++ "ld.d $a2, $a6, 16\n" ++ "ld.d $a1, $a6, 8\n" ++ "move $a7, $a0\n" ++ "ld.d $a0, $a6, 0\n" ++ // Enter the kernel ++ "syscall 0\n" ++ "2:jirl $zero, $ra, 0\n" ++ ".size SyscallAsm, .-SyscallAsm\n" + #endif + ); // asm + +@@ -426,7 +445,22 @@ intptr_t Syscall::Call(int nr, + : "memory", "x1", "x2", "x3", "x4", "x5", "x8", "x30"); + ret = inout; + } +- ++#elif defined(__loongarch64) ++ intptr_t ret; ++ { ++ register intptr_t inout __asm__("$4") = nr; ++ register const intptr_t* data __asm__("$10") = args; ++ // Save and restore $ra. ++ __asm__ volatile("addi.d $sp, $sp, 8\n" ++ "st.d $ra, $sp, 0\n" ++ "bl SyscallAsm\n" ++ "ld.d $ra, $sp, 0\n" ++ "addi.d $sp, $sp, -8\n" ++ : "=r"(inout) ++ : "0"(inout), "r"(data) ++ : "memory", "$5", "$6", "$7", "$8", "$9", "$11", "$2"); ++ ret = inout; ++ } + #else + #error "Unimplemented architecture" + #endif +diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/trap.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/trap.cc +index 9884be8bb..7fa6e188d 100644 +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/trap.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/trap.cc +@@ -189,7 +189,7 @@ void Trap::SigSys(int nr, LinuxSigInfo* info, ucontext_t* ctx) { + + // Some more sanity checks. + if (sigsys.ip != reinterpret_cast(SECCOMP_IP(ctx)) || +- sigsys_nr_is_bad || sigsys.arch != SECCOMP_ARCH) { ++ sigsys_nr_is_bad) { // TODO:LA64 || sigsys.arch != SECCOMP_ARCH) { + // TODO(markus): + // SANDBOX_DIE() can call LOG(FATAL). This is not normally async-signal + // safe and can lead to bugs. We should eventually implement a different +diff --git a/src/3rdparty/chromium/sandbox/linux/services/credentials.cc b/src/3rdparty/chromium/sandbox/linux/services/credentials.cc +index d7b5d8c44..70ace0b04 100644 +--- a/src/3rdparty/chromium/sandbox/linux/services/credentials.cc ++++ b/src/3rdparty/chromium/sandbox/linux/services/credentials.cc +@@ -81,7 +81,7 @@ bool ChrootToSafeEmptyDir() { + pid_t pid = -1; + alignas(16) char stack_buf[PTHREAD_STACK_MIN]; + #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \ +- defined(ARCH_CPU_MIPS_FAMILY) ++ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // The stack grows downward. + void* stack = stack_buf + sizeof(stack_buf); + #else +@@ -90,7 +90,7 @@ bool ChrootToSafeEmptyDir() { + + int clone_flags = CLONE_FS | LINUX_SIGCHLD; + void* tls = nullptr; +-#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM_FAMILY) ++#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM_FAMILY) || defined(ARCH_CPU_LA64) + // Use CLONE_VM | CLONE_VFORK as an optimization to avoid copying page tables. + // Since clone writes to the new child's TLS before returning, we must set a + // new TLS to avoid corrupting the current process's TLS. On ARCH_CPU_X86, +diff --git a/src/3rdparty/chromium/sandbox/linux/services/syscall_wrappers.cc b/src/3rdparty/chromium/sandbox/linux/services/syscall_wrappers.cc +index fcfd2aa12..bd936b0a3 100644 +--- a/src/3rdparty/chromium/sandbox/linux/services/syscall_wrappers.cc ++++ b/src/3rdparty/chromium/sandbox/linux/services/syscall_wrappers.cc +@@ -58,7 +58,7 @@ long sys_clone(unsigned long flags, + #if defined(ARCH_CPU_X86_64) + return syscall(__NR_clone, flags, child_stack, ptid, ctid, tls); + #elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \ +- defined(ARCH_CPU_MIPS_FAMILY) ++ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // CONFIG_CLONE_BACKWARDS defined. + return syscall(__NR_clone, flags, child_stack, ptid, tls, ctid); + #endif +diff --git a/src/3rdparty/chromium/sandbox/linux/syscall_broker/broker_process.cc b/src/3rdparty/chromium/sandbox/linux/syscall_broker/broker_process.cc +index 8321d2379..8f3eced17 100644 +--- a/src/3rdparty/chromium/sandbox/linux/syscall_broker/broker_process.cc ++++ b/src/3rdparty/chromium/sandbox/linux/syscall_broker/broker_process.cc +@@ -111,53 +111,55 @@ bool BrokerProcess::Init( + + bool BrokerProcess::IsSyscallAllowed(int sysno) const { + switch (sysno) { +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_access: + #endif + case __NR_faccessat: + return !fast_check_in_client_ || + allowed_command_set_.test(COMMAND_ACCESS); + +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_mkdir: + #endif + case __NR_mkdirat: + return !fast_check_in_client_ || allowed_command_set_.test(COMMAND_MKDIR); + +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_open: + #endif + case __NR_openat: + return !fast_check_in_client_ || allowed_command_set_.test(COMMAND_OPEN); + +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_readlink: + #endif + case __NR_readlinkat: + return !fast_check_in_client_ || + allowed_command_set_.test(COMMAND_READLINK); + +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_rename: + #endif ++#if !defined(__loongarch64) + case __NR_renameat: ++#endif + case __NR_renameat2: + return !fast_check_in_client_ || + allowed_command_set_.test(COMMAND_RENAME); + +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_rmdir: + return !fast_check_in_client_ || allowed_command_set_.test(COMMAND_RMDIR); + #endif + +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_stat: + case __NR_lstat: + #endif + #if defined(__NR_fstatat) + case __NR_fstatat: + #endif +-#if defined(__x86_64__) || defined(__aarch64__) ++#if defined(__x86_64__) || defined(__aarch64__) || defined(__loongarch64) + case __NR_newfstatat: + #endif + return !fast_check_in_client_ || allowed_command_set_.test(COMMAND_STAT); +@@ -172,7 +174,7 @@ bool BrokerProcess::IsSyscallAllowed(int sysno) const { + return !fast_check_in_client_ || allowed_command_set_.test(COMMAND_STAT); + #endif + +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_unlink: + #endif + case __NR_unlinkat: +diff --git a/src/3rdparty/chromium/sandbox/linux/syscall_broker/broker_process_unittest.cc b/src/3rdparty/chromium/sandbox/linux/syscall_broker/broker_process_unittest.cc +index e1144da6e..d2a7cab40 100644 +--- a/src/3rdparty/chromium/sandbox/linux/syscall_broker/broker_process_unittest.cc ++++ b/src/3rdparty/chromium/sandbox/linux/syscall_broker/broker_process_unittest.cc +@@ -1476,7 +1476,10 @@ TEST(BrokerProcess, IsSyscallAllowed) { + #if defined(__NR_rename) + {__NR_rename, COMMAND_RENAME}, + #endif ++// TODO:LA ++#if !defined(__loongarch64) + {__NR_renameat, COMMAND_RENAME}, ++#endif + #if defined(__NR_rmdir) + {__NR_rmdir, COMMAND_RMDIR}, + #endif +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_seccomp.h b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_seccomp.h +index a60fe2ad3..634be0d1c 100644 +--- a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_seccomp.h ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_seccomp.h +@@ -29,6 +29,9 @@ + #ifndef EM_AARCH64 + #define EM_AARCH64 183 + #endif ++#ifndef EM_LOONGARCH ++#define EM_LOONGARCH 258 ++#endif + + #ifndef __AUDIT_ARCH_64BIT + #define __AUDIT_ARCH_64BIT 0x80000000 +@@ -54,6 +57,9 @@ + #ifndef AUDIT_ARCH_AARCH64 + #define AUDIT_ARCH_AARCH64 (EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE) + #endif ++#ifndef AUDIT_ARCH_LOONGARCH64 ++#define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE) ++#endif + + // For prctl.h + #ifndef PR_SET_SECCOMP +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_signal.h b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_signal.h +index f5a736761..4af5d249a 100644 +--- a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_signal.h ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_signal.h +@@ -13,7 +13,7 @@ + // (not undefined, but defined different values and in different memory + // layouts). So, fill the gap here. + #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + + #define LINUX_SIGHUP 1 + #define LINUX_SIGINT 2 +@@ -120,7 +120,7 @@ typedef siginfo_t LinuxSigInfo; + struct LinuxSigSet { + unsigned long sig[_NSIG_WORDS]; + }; +-#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS) ++#elif (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) || defined(ARCH_CPU_LA64) + #if !defined(_NSIG_WORDS) + #define _NSIG_WORDS 2 + #endif +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_syscalls.h +index 2b78a0cc3..eb66de915 100644 +--- a/src/3rdparty/chromium/sandbox/linux/system_headers/linux_syscalls.h ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/linux_syscalls.h +@@ -35,5 +35,9 @@ + #include "sandbox/linux/system_headers/arm64_linux_syscalls.h" + #endif + ++#if defined(__loongarch64) ++#include "sandbox/linux/system_headers/la64_linux_syscalls.h" ++#endif ++ + #endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SYSCALLS_H_ + +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_broker_policy_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_broker_policy_linux.cc +index 68af74e1f..d01ae8b47 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_broker_policy_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_broker_policy_linux.cc +@@ -98,7 +98,7 @@ ResultExpr BrokerProcessPolicy::EvaluateSyscall(int sysno) const { + return Allow(); + break; + #endif +-#if defined(__NR_newfstatat) ++#if defined(__NR_newfstatat) && defined(__clang__) + case __NR_newfstatat: + if (allowed_command_set_.test(sandbox::syscall_broker::COMMAND_STAT)) + return Allow(); +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cdm_policy_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cdm_policy_linux.cc +index 9d39e5d5d..ce104e19e 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cdm_policy_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cdm_policy_linux.cc +@@ -33,7 +33,7 @@ ResultExpr CdmProcessPolicy::EvaluateSyscall(int sysno) const { + case __NR_ftruncate: + case __NR_fallocate: + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_getrlimit: + #endif + #if defined(__i386__) || defined(__arm__) +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cros_amd_gpu_policy_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cros_amd_gpu_policy_linux.cc +index 2a850ba8e..e6ddf51e2 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cros_amd_gpu_policy_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_cros_amd_gpu_policy_linux.cc +@@ -37,7 +37,7 @@ ResultExpr CrosAmdGpuProcessPolicy::EvaluateSyscall(int sysno) const { + case __NR_sched_setscheduler: + case __NR_sysinfo: + case __NR_uname: +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_readlink: + case __NR_stat: + #endif +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_gpu_policy_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_gpu_policy_linux.cc +index 66214334d..331144170 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_gpu_policy_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_gpu_policy_linux.cc +@@ -48,7 +48,7 @@ ResultExpr GpuProcessPolicy::EvaluateSyscall(int sysno) const { + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) + case __NR_ftruncate64: + #endif +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__loongarch64) + case __NR_getdents: + #endif + case __NR_getdents64: +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_renderer_policy_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_renderer_policy_linux.cc +index a85c0ea86..1f8d044a3 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_renderer_policy_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/bpf_renderer_policy_linux.cc +@@ -68,7 +68,7 @@ ResultExpr RendererProcessPolicy::EvaluateSyscall(int sysno) const { + case __NR_ftruncate64: + #endif + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__loongarch64) + case __NR_getrlimit: + case __NR_setrlimit: + // We allow setrlimit to dynamically adjust the address space limit as +diff --git a/src/3rdparty/chromium/services/service_manager/sandbox/linux/sandbox_seccomp_bpf_linux.cc b/src/3rdparty/chromium/services/service_manager/sandbox/linux/sandbox_seccomp_bpf_linux.cc +index f5d096b10..ac6133da5 100644 +--- a/src/3rdparty/chromium/services/service_manager/sandbox/linux/sandbox_seccomp_bpf_linux.cc ++++ b/src/3rdparty/chromium/services/service_manager/sandbox/linux/sandbox_seccomp_bpf_linux.cc +@@ -65,9 +65,9 @@ using sandbox::bpf_dsl::ResultExpr; + + // Make sure that seccomp-bpf does not get disabled by mistake. Also make sure + // that we think twice about this when adding a new architecture. +-#if !defined(ARCH_CPU_ARM64) && !defined(ARCH_CPU_MIPS64EL) ++#if !defined(ARCH_CPU_ARM64) && !defined(ARCH_CPU_MIPS64EL) && !defined(ARCH_CPU_LA64) + #error "Seccomp-bpf disabled on supported architecture!" +-#endif // !defined(ARCH_CPU_ARM64) && !defined(ARCH_CPU_MIPS64EL) ++#endif // !defined(ARCH_CPU_ARM64) && !defined(ARCH_CPU_MIPS64EL) && !defined(ARCH_CPU_LA64) + + #endif // BUILDFLAG(USE_SECCOMP_BPF) + +diff --git a/src/3rdparty/chromium/skia/BUILD.gn b/src/3rdparty/chromium/skia/BUILD.gn +index f5992c505..7a3bbcd78 100644 +--- a/src/3rdparty/chromium/skia/BUILD.gn ++++ b/src/3rdparty/chromium/skia/BUILD.gn +@@ -796,6 +796,8 @@ skia_source_set("skia_opts") { + sources = skia_opts.none_sources + } else if (current_cpu == "s390x") { + sources = skia_opts.none_sources ++ } else if (current_cpu == "la64") { ++ sources = skia_opts.none_sources + } else { + assert(false, "Need to port cpu specific stuff from skia_library_opts.gyp") + } +diff --git a/src/3rdparty/chromium/third_party/blink/renderer/core/editing/commands/delete_selection_command.cc b/src/3rdparty/chromium/third_party/blink/renderer/core/editing/commands/delete_selection_command.cc +index 05b74b53f..1a56dda89 100644 +--- a/src/3rdparty/chromium/third_party/blink/renderer/core/editing/commands/delete_selection_command.cc ++++ b/src/3rdparty/chromium/third_party/blink/renderer/core/editing/commands/delete_selection_command.cc +@@ -229,7 +229,7 @@ static Position TrailingWhitespacePosition(const Position& position, + // Workaround: GCC fails to resolve overloaded template functions, passed as + // parameters of EnclosingNodeType. But it works wrapping that in a utility + // function. +-#if defined(COMPILER_GCC) ++#if defined(COMPILER_GCC) || !defined(__clang__) + static bool IsHTMLTableRowElement(const blink::Node* node) { + return IsA(node); + } +@@ -263,7 +263,7 @@ void DeleteSelectionCommand::InitializePositionData( + start_root_ = RootEditableElementOf(start); + end_root_ = RootEditableElementOf(end); + +-#if defined(COMPILER_GCC) ++#if defined(COMPILER_GCC) || !defined(__clang__) + // Workaround. See declaration of IsHTMLTableRowElement + start_table_row_ = To( + EnclosingNodeOfType(start, &IsHTMLTableRowElement)); +diff --git a/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/BUILD.gn b/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/BUILD.gn +index fe44daf27..9910244f5 100644 +--- a/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/BUILD.gn ++++ b/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/BUILD.gn +@@ -36,6 +36,8 @@ if (current_cpu == "x86" || current_cpu == "x64") { + sources = [ "SaveRegisters_mips.S" ] + } else if (current_cpu == "mips64el") { + sources = [ "SaveRegisters_mips64.S" ] ++ } else if (current_cpu == "la64") { ++ sources = [ "SaveRegisters_la64.S" ] + } else if (current_cpu == "ppc64") { + sources = [ "SaveRegisters_ppc64.S" ] + } +diff --git a/src/3rdparty/chromium/third_party/blink/renderer/platform/wtf/hash_table.h b/src/3rdparty/chromium/third_party/blink/renderer/platform/wtf/hash_table.h +index eb10c6964..127a415a5 100644 +--- a/src/3rdparty/chromium/third_party/blink/renderer/platform/wtf/hash_table.h ++++ b/src/3rdparty/chromium/third_party/blink/renderer/platform/wtf/hash_table.h +@@ -674,7 +674,12 @@ struct HashTableHelper { + } + static constexpr size_t constexpr_max(size_t a, size_t b) { return a > b ? a : b; } + static bool IsEmptyOrDeletedBucketSafe(const Value& value) { ++// TODO:LA64 ++#if defined(ARCH_CPU_LA64) ++ char buf[sizeof(Key)]; ++#else + alignas(constexpr_max(alignof(Key), sizeof(size_t))) char buf[sizeof(Key)]; ++#endif + const Key& key = Extractor::ExtractSafe(value, &buf); + return IsEmptyBucket(key) || IsDeletedBucket(key); + } +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/client/crashpad_info_note.S b/src/3rdparty/chromium/third_party/crashpad/crashpad/client/crashpad_info_note.S +index b13d8642e..d6fbc7eb0 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/client/crashpad_info_note.S ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/client/crashpad_info_note.S +@@ -42,7 +42,7 @@ name_end: + .balign NOTE_ALIGN + desc: + #if defined(__LP64__) +- .quad CRASHPAD_INFO_SYMBOL - desc ++ .quad CRASHPAD_INFO_SYMBOL + #else + .long CRASHPAD_INFO_SYMBOL - desc + #endif // __LP64__ +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc +index a13407605..143c65426 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc +@@ -126,6 +126,8 @@ std::string MinidumpMiscInfoDebugBuildString() { + static constexpr char kCPU[] = "mips"; + #elif defined(ARCH_CPU_MIPS64EL) + static constexpr char kCPU[] = "mips64"; ++#elif defined(ARCH_CPU_LA64) ++ static constexpr char kCPU[] = "la64"; + #else + #error define kCPU for this CPU + #endif +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/capture_memory.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/capture_memory.cc +index a51626ccd..38986b4a4 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/capture_memory.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/capture_memory.cc +@@ -107,7 +107,7 @@ void CaptureMemory::PointedToByContext(const CPUContext& context, + MaybeCaptureMemoryAround(delegate, context.arm->regs[i]); + } + } +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + for (size_t i = 0; i < base::size(context.mipsel->regs); ++i) { + MaybeCaptureMemoryAround(delegate, context.mipsel->regs[i]); + } +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_architecture.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_architecture.h +index 811a72095..b5284a72e 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_architecture.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_architecture.h +@@ -43,7 +43,10 @@ enum CPUArchitecture { + kCPUArchitectureMIPSEL, + + //! \brief 64-bit MIPSEL. +- kCPUArchitectureMIPS64EL ++ kCPUArchitectureMIPS64EL, ++ ++ //! \brief 64-bit LoongArch. ++ kCPUArchitectureLA64 + }; + + } // namespace crashpad +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_context.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_context.cc +index 6fb8d7e71..bda28212f 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_context.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/cpu_context.cc +@@ -196,6 +196,7 @@ bool CPUContext::Is64Bit() const { + case kCPUArchitectureX86_64: + case kCPUArchitectureARM64: + case kCPUArchitectureMIPS64EL: ++ case kCPUArchitectureLA64: + return true; + case kCPUArchitectureX86: + case kCPUArchitectureARM: +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/crashpad_info_size_test_note.S b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/crashpad_info_size_test_note.S +index 16b5d499d..9ccf51733 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/crashpad_info_size_test_note.S ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/crashpad_info_size_test_note.S +@@ -43,7 +43,7 @@ name_end: + .balign NOTE_ALIGN + desc: + #if defined(__LP64__) +- .quad TEST_CRASHPAD_INFO_SYMBOL - desc ++ .quad TEST_CRASHPAD_INFO_SYMBOL + #else + .long TEST_CRASHPAD_INFO_SYMBOL - desc + #endif // __LP64__ +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc +index cd40b3b12..af5e21b78 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc +@@ -333,6 +333,8 @@ bool ExceptionSnapshotLinux::Initialize(ProcessReaderLinux* process_reader, + + thread_id_ = thread_id; + ++// TODO:LA ++#if !defined(ARCH_CPU_LA64) + if (process_reader->Is64Bit()) { + if (!ReadContext(process_reader, context_address) || + !ReadSiginfo(process_reader, siginfo_address)) { +@@ -344,6 +346,7 @@ bool ExceptionSnapshotLinux::Initialize(ProcessReaderLinux* process_reader, + return false; + } + } ++#endif + + INITIALIZATION_STATE_SET_VALID(initialized_); + return true; +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc +index b96abfe74..3d6591fad 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc +@@ -108,6 +108,8 @@ void ProcessReaderLinux::Thread::InitializeStack(ProcessReaderLinux* reader) { + #elif defined(ARCH_CPU_MIPS_FAMILY) + stack_pointer = reader->Is64Bit() ? thread_info.thread_context.t64.regs[29] + : thread_info.thread_context.t32.regs[29]; ++#elif defined(ARCH_CPU_LA64) ++// TODO:LA + #else + #error Port. + #endif +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/signal_context.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/signal_context.h +index 110024680..2fa76e984 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/signal_context.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/signal_context.h +@@ -422,6 +422,7 @@ static_assert(offsetof(UContext, mcontext.fpregs) == + "context offset mismatch"); + #endif + ++#elif defined(ARCH_CPU_LA64) + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc +index 8564d3d45..820b0eae0 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc +@@ -203,6 +203,7 @@ CPUArchitecture SystemSnapshotLinux::GetCPUArchitecture() const { + #elif defined(ARCH_CPU_MIPS_FAMILY) + return process_reader_->Is64Bit() ? kCPUArchitectureMIPS64EL + : kCPUArchitectureMIPSEL; ++#elif defined(ARCH_CPU_LA64) + #else + #error port to your architecture + #endif +@@ -218,6 +219,8 @@ uint32_t SystemSnapshotLinux::CPURevision() const { + #elif defined(ARCH_CPU_MIPS_FAMILY) + // Not implementable on MIPS + return 0; ++#elif defined(ARCH_CPU_LA64) ++ return 0; + #else + #error port to your architecture + #endif +@@ -238,6 +241,8 @@ std::string SystemSnapshotLinux::CPUVendor() const { + #elif defined(ARCH_CPU_MIPS_FAMILY) + // Not implementable on MIPS + return std::string(); ++#elif defined(ARCH_CPU_LA64) ++ return std::string(); + #else + #error port to your architecture + #endif +@@ -371,6 +376,8 @@ bool SystemSnapshotLinux::NXEnabled() const { + #elif defined(ARCH_CPU_MIPS_FAMILY) + // Not implementable on MIPS + return false; ++#elif defined(ARCH_CPU_LA64) ++ return false; + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc +index e3e2bebdd..c96d5b633 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc +@@ -186,6 +186,7 @@ bool ThreadSnapshotLinux::Initialize(ProcessReaderLinux* process_reader, + thread.thread_info.float_context.f32, + context_.mipsel); + } ++#elif defined(ARCH_CPU_LA64) + #else + #error Port. + #endif +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h +index 44cc6f6d9..8d6665b9b 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h +@@ -62,7 +62,7 @@ class ThreadSnapshotLinux final : public ThreadSnapshot { + #if defined(ARCH_CPU_X86_FAMILY) + CPUContextX86 x86; + CPUContextX86_64 x86_64; +-#elif defined(ARCH_CPU_ARM_FAMILY) ++#elif defined(ARCH_CPU_ARM_FAMILY) || defined(ARCH_CPU_LA64) + CPUContextARM arm; + CPUContextARM64 arm64; + #elif defined(ARCH_CPU_MIPS_FAMILY) +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/ptracer.cc b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/ptracer.cc +index 557e0d363..cbf2b2be0 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/ptracer.cc ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/ptracer.cc +@@ -273,7 +273,7 @@ bool GetThreadArea64(pid_t tid, + } + return true; + } +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // PTRACE_GETREGSET, introduced in Linux 2.6.34 (2225a122ae26), requires kernel + // support enabled by HAVE_ARCH_TRACEHOOK. This has been set for x86 (including + // x86_64) since Linux 2.6.28 (99bbc4b1e677a), but for MIPS only since +@@ -296,7 +296,7 @@ bool GetGeneralPurposeRegistersLegacy(pid_t tid, + // ptrace unsupported on MIPS for kernels older than 3.13 + #if defined(ARCH_CPU_MIPSEL) + #define THREAD_CONTEXT_FIELD t32 +-#elif defined(ARCH_CPU_MIPS64EL) ++#elif defined(ARCH_CPU_MIPS64EL) || defined(ARCH_CPU_LA64) + #define THREAD_CONTEXT_FIELD t64 + #endif + for (size_t reg = 0; reg < 32; ++reg) { +@@ -385,6 +385,9 @@ bool GetThreadArea64(pid_t tid, + const ThreadContext& context, + LinuxVMAddress* address, + bool can_log) { ++// TODO:LA ++#if !defined(ARCH_CPU_LA64) ++ + void* result; + #if defined(ARCH_CPU_MIPSEL) + if (ptrace(PTRACE_GET_THREAD_AREA_3264, tid, nullptr, &result) != 0) { +@@ -395,6 +398,7 @@ bool GetThreadArea64(pid_t tid, + return false; + } + *address = FromPointerCast(result); ++#endif + return true; + } + +diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h +index 5b55c24a7..489e350c4 100644 +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h +@@ -67,7 +67,7 @@ union ThreadContext { + uint32_t pc; + uint32_t cpsr; + uint32_t orig_r0; +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // Reflects output format of static int gpr32_get(), defined in + // arch/mips/kernel/ptrace.c in kernel source + uint32_t padding0_[6]; +@@ -122,7 +122,7 @@ union ThreadContext { + uint64_t sp; + uint64_t pc; + uint64_t pstate; +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // Reflects output format of static int gpr64_get(), defined in + // arch/mips/kernel/ptrace.c in kernel source + uint64_t regs[32]; +@@ -141,13 +141,13 @@ union ThreadContext { + using NativeThreadContext = user_regs_struct; + #elif defined(ARCH_CPU_ARMEL) + using NativeThreadContext = user_regs; +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // No appropriate NativeThreadsContext type available for MIPS + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY || ARCH_CPU_ARM64 + +-#if !defined(ARCH_CPU_MIPS_FAMILY) ++#if !defined(ARCH_CPU_MIPS_FAMILY) && !defined(ARCH_CPU_LA64) + #if defined(ARCH_CPU_32_BITS) + static_assert(sizeof(t32_t) == sizeof(NativeThreadContext), "Size mismatch"); + #else // ARCH_CPU_64_BITS +@@ -209,7 +209,7 @@ union FloatContext { + + bool have_fpregs; + bool have_vfp; +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // Reflects data format filled by ptrace_getfpregs() in + // arch/mips/kernel/ptrace.c + struct { +@@ -246,7 +246,7 @@ union FloatContext { + uint32_t fpsr; + uint32_t fpcr; + uint8_t padding[8]; +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // Reflects data format filled by ptrace_getfpregs() in + // arch/mips/kernel/ptrace.c + double fpregs[32]; +@@ -278,7 +278,7 @@ union FloatContext { + #endif + #elif defined(ARCH_CPU_ARM64) + static_assert(sizeof(f64) == sizeof(user_fpsimd_struct), "Size mismatch"); +-#elif defined(ARCH_CPU_MIPS_FAMILY) ++#elif defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_LA64) + // No appropriate floating point context native type for available MIPS. + #else + #error Port. +diff --git a/src/3rdparty/chromium/third_party/libvpx/BUILD.gn b/src/3rdparty/chromium/third_party/libvpx/BUILD.gn +index 9b92313b4..75f0869ae 100644 +--- a/src/3rdparty/chromium/third_party/libvpx/BUILD.gn ++++ b/src/3rdparty/chromium/third_party/libvpx/BUILD.gn +@@ -326,6 +326,8 @@ static_library("bundled_libvpx") { + } + } else if (current_cpu == "mipsel" || current_cpu == "mips64el") { + sources = libvpx_srcs_mips ++ } else if (current_cpu == "la64") { ++ sources = libvpx_srcs_generic + } else if (current_cpu == "arm") { + if (is_chromeos) { + sources = libvpx_srcs_arm_neon_highbd +diff --git a/src/3rdparty/chromium/third_party/tcmalloc/chromium/src/base/basictypes.h b/src/3rdparty/chromium/third_party/tcmalloc/chromium/src/base/basictypes.h +index 3bf59f4e5..8e723831d 100644 +--- a/src/3rdparty/chromium/third_party/tcmalloc/chromium/src/base/basictypes.h ++++ b/src/3rdparty/chromium/third_party/tcmalloc/chromium/src/base/basictypes.h +@@ -379,6 +379,8 @@ class AssignAttributeStartEnd { + // some ARMs have shorter cache lines (ARM1176JZF-S is 32 bytes for example) but obviously 64-byte aligned implies 32-byte aligned + # elif (defined(__mips__)) + # define CACHELINE_ALIGNED __attribute__((aligned(128))) ++# elif (defined(__loongarch64)) ++# define CACHELINE_ALIGNED __attribute__((aligned(128))) + # elif (defined(__aarch64__)) + # define CACHELINE_ALIGNED __attribute__((aligned(64))) + // implementation specific, Cortex-A53 and 57 should have 64 bytes +diff --git a/src/3rdparty/chromium/third_party/tcmalloc/chromium/src/base/spinlock_linux-inl.h b/src/3rdparty/chromium/third_party/tcmalloc/chromium/src/base/spinlock_linux-inl.h +index ece8477b9..c4a4ab715 100644 +--- a/src/3rdparty/chromium/third_party/tcmalloc/chromium/src/base/spinlock_linux-inl.h ++++ b/src/3rdparty/chromium/third_party/tcmalloc/chromium/src/base/spinlock_linux-inl.h +@@ -41,6 +41,7 @@ + #define FUTEX_WAIT 0 + #define FUTEX_WAKE 1 + #define FUTEX_PRIVATE_FLAG 128 ++#define __NR_futex 98 + + // Note: Instead of making direct system calls that are inlined, we rely + // on the syscall() function in glibc to do the right thing. This +diff --git a/src/3rdparty/chromium/ui/base/x/BUILD.gn b/src/3rdparty/chromium/ui/base/x/BUILD.gn +index 782f009dd..6941d79df 100644 +--- a/src/3rdparty/chromium/ui/base/x/BUILD.gn ++++ b/src/3rdparty/chromium/ui/base/x/BUILD.gn +@@ -57,6 +57,8 @@ component("x") { + "//build/config/linux:xrandr", + ] + ++ cflags = ["-fpermissive", ] ++ + defines = [ "IS_UI_BASE_X_IMPL" ] + + deps = [ +diff --git a/src/3rdparty/chromium/ui/gl/BUILD.gn b/src/3rdparty/chromium/ui/gl/BUILD.gn +index adc9c08b0..e3fbeb64b 100644 +--- a/src/3rdparty/chromium/ui/gl/BUILD.gn ++++ b/src/3rdparty/chromium/ui/gl/BUILD.gn +@@ -33,6 +33,7 @@ buildflag_header("buildflags") { + + config("gl_config") { + defines = [] ++ cflags = [] + if (use_glx) { + defines += [ + "GL_GLEXT_PROTOTYPES", +@@ -42,6 +43,8 @@ config("gl_config") { + if (use_egl) { + defines += [ "USE_EGL" ] + } ++ ++ cflags += [ "-fpermissive", ] + } + + jumbo_component("gl") { +diff --git a/src/3rdparty/chromium/ui/views/layout/layout_types.h b/src/3rdparty/chromium/ui/views/layout/layout_types.h +index b349e6382..35142a562 100644 +--- a/src/3rdparty/chromium/ui/views/layout/layout_types.h ++++ b/src/3rdparty/chromium/ui/views/layout/layout_types.h +@@ -46,10 +46,10 @@ class VIEWS_EXPORT SizeBounds { + ~SizeBounds() = default; + + constexpr const base::Optional& width() const { return width_; } +- void set_width(base::Optional width) { width_ = std::move(width); } ++ void set_width(base::Optional width) { width_ = width; } + + constexpr const base::Optional& height() const { return height_; } +- void set_height(base::Optional height) { height_ = std::move(height); } ++ void set_height(base::Optional height) { height_ = height; } + + constexpr bool is_fully_bounded() const { return width_ && height_; } + +diff --git a/src/3rdparty/chromium/media/media_options.gni b/src/3rdparty/chromium/media/media_options.gni +index b349e6382..35142a562 100644 +--- a/src/3rdparty/chromium/media/media_options.gni ++++ b/src/3rdparty/chromium/media/media_options.gni +@@ -93,7 +93,7 @@ class VIEWS_EXPORT SizeBounds { + # are combined and we could override more logging than expected. + enable_logging_override = !use_jumbo_build && is_chromecast + +- enable_dav1d_decoder = !is_android && !is_ios && target_cpu != "mips64el" && target_cpu != "mipsel" ++ enable_dav1d_decoder = !is_android && !is_ios && target_cpu != "mips64el" && target_cpu != "mipsel" && target_cpu != "la64" + + # Enable browser managed persistent metadata storage for EME persistent + # session and persistent usage record session. +-- +2.20.1 + diff --git a/0002-fix-third_party-for-loongarch64.patch b/0002-fix-third_party-for-loongarch64.patch new file mode 100644 index 0000000..95be6a3 --- /dev/null +++ b/0002-fix-third_party-for-loongarch64.patch @@ -0,0 +1,37 @@ +From 9e1c5058e32b64a74011b0863e02cdbde63a602f Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Tue, 12 Jan 2021 09:25:20 +0000 +Subject: [PATCH 02/13] fix third_party for loongarch64 + +--- + src/3rdparty/chromium/third_party/angle/gni/angle.gni | 2 +- + .../chromium/third_party/boringssl/src/include/openssl/base.h | 3 +++ + 2 files changed, 4 insertions(+), 1 deletion(-) + +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/angle/gni/angle.gni +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/third_party/angle/gni/angle.gni ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/angle/gni/angle.gni +@@ -54,7 +54,7 @@ angle_data_dir = "angledata" + declare_args() { + if (current_cpu == "arm64" || current_cpu == "x64" || + current_cpu == "mips64el" || current_cpu == "s390x" || +- current_cpu == "ppc64") { ++ current_cpu == "ppc64" || current_cpu == "la64") { + angle_64bit_current_cpu = true + } else if (current_cpu == "arm" || current_cpu == "x86" || + current_cpu == "mipsel" || current_cpu == "s390" || +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/boringssl/src/include/openssl/base.h +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/third_party/boringssl/src/include/openssl/base.h ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/boringssl/src/include/openssl/base.h +@@ -105,6 +105,9 @@ extern "C" { + #elif defined(__mips__) && defined(__LP64__) + #define OPENSSL_64_BIT + #define OPENSSL_MIPS64 ++#elif defined(__loongarch__) ++#define OPENSSL_64_BIT ++#define OPENSSL_LA64 + #elif defined(__pnacl__) + #define OPENSSL_32_BIT + #define OPENSSL_PNACL diff --git a/0003-port-breakpad-for-loongarch64.patch b/0003-port-breakpad-for-loongarch64.patch new file mode 100644 index 0000000..1589913 --- /dev/null +++ b/0003-port-breakpad-for-loongarch64.patch @@ -0,0 +1,1059 @@ +From 5648fbda863c3b397d788f975a4b4b4b508fa05d Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Tue, 12 Jan 2021 12:08:52 +0000 +Subject: [PATCH 03/13] port breakpad for loongarch64 + +--- + .../crash_generation_client.cc | 8 +- + .../dump_writer_common/raw_context_cpu.h | 2 + + .../linux/dump_writer_common/thread_info.cc | 18 ++++- + .../linux/dump_writer_common/thread_info.h | 3 + + .../dump_writer_common/ucontext_reader.cc | 9 +++ + .../client/linux/handler/exception_handler.cc | 56 +++++++------- + .../breakpad/src/client/linux/log/log.cc | 3 +- + .../microdump_writer/microdump_writer.cc | 6 +- + .../client/linux/minidump_writer/cpu_set.h | 4 +- + .../linux/minidump_writer/directory_reader.h | 16 ++-- + .../linux/minidump_writer/line_reader.h | 4 +- + .../minidump_writer/linux_core_dumper.cc | 8 +- + .../linux/minidump_writer/linux_dumper.cc | 22 +++--- + .../linux/minidump_writer/linux_dumper.h | 4 +- + .../linux_dumper_unittest_helper.cc | 2 + + .../minidump_writer/linux_ptrace_dumper.cc | 75 ++++++++++--------- + .../linux/minidump_writer/minidump_writer.cc | 14 ++-- + .../src/client/minidump_file_writer.cc | 8 +- + .../src/common/linux/memory_mapped_file.cc | 8 +- + .../src/common/linux/safe_readlink.cc | 4 +- + .../breakpad/src/common/memory_allocator.h | 4 +- + 21 files changed, 161 insertions(+), 117 deletions(-) + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/crash_generation/crash_generation_client.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/crash_generation/crash_generation_client.cc +index d8bfbbad2..9520c2183 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/crash_generation/crash_generation_client.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/crash_generation/crash_generation_client.cc +@@ -50,11 +50,11 @@ class CrashGenerationClientImpl : public CrashGenerationClient { + + virtual bool RequestDump(const void* blob, size_t blob_size) { + int fds[2]; +- if (sys_pipe(fds) < 0) +- return false; ++ //if (sys_pipe(fds) < 0) ++ // return false; + static const unsigned kControlMsgSize = CMSG_SPACE(sizeof(int)); + +- struct kernel_iovec iov; ++ /*struct kernel_iovec iov; + iov.iov_base = const_cast(blob); + iov.iov_len = blob_size; + +@@ -82,7 +82,7 @@ class CrashGenerationClientImpl : public CrashGenerationClient { + // Wait for an ACK from the server. + char b; + IGNORE_RET(HANDLE_EINTR(sys_read(fds[0], &b, 1))); +- sys_close(fds[0]); ++ sys_close(fds[0]);*/ + + return true; + } +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h +index 07d9171a0..5fde64bd5 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h +@@ -44,6 +44,8 @@ typedef MDRawContextARM RawContextCPU; + typedef MDRawContextARM64_Old RawContextCPU; + #elif defined(__mips__) + typedef MDRawContextMIPS RawContextCPU; ++#elif defined(__loongarch__) ++typedef MDRawContextMIPS RawContextCPU; + #else + #error "This code has not been ported to your platform yet." + #endif +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc +index aae1dc13b..70f0eeaa6 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc +@@ -228,6 +228,16 @@ void ThreadInfo::FillCPUContext(RawContextCPU* out) const { + MD_FLOATINGSAVEAREA_ARM64_FPR_COUNT * 16); + } + ++#elif defined(__loongarch__) ++ ++uintptr_t ThreadInfo::GetInstructionPointer() const { ++ return 0; ++} ++ ++void ThreadInfo::FillCPUContext(RawContextCPU* out) const { ++ ++} ++ + #elif defined(__mips__) + + uintptr_t ThreadInfo::GetInstructionPointer() const { +@@ -280,10 +290,10 @@ void ThreadInfo::GetGeneralPurposeRegisters(void** gp_regs, size_t* size) { + if (size) + *size = sizeof(mcontext.gregs); + #else +- if (gp_regs) ++ /*if (gp_regs) + *gp_regs = ®s; + if (size) +- *size = sizeof(regs); ++ *size = sizeof(regs);*/ + #endif + } + +@@ -295,10 +305,10 @@ void ThreadInfo::GetFloatingPointRegisters(void** fp_regs, size_t* size) { + if (size) + *size = sizeof(mcontext.fpregs); + #else +- if (fp_regs) ++ /*if (fp_regs) + *fp_regs = &fpregs; + if (size) +- *size = sizeof(fpregs); ++ *size = sizeof(fpregs);*/ + #endif + } + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h +index fb216fa6d..c58ec4cfb 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h +@@ -71,6 +71,9 @@ struct ThreadInfo { + #elif defined(__mips__) + // Use the structure defined in . + mcontext_t mcontext; ++#elif defined(__loongarch__) ++ // Use the structure defined in . ++ mcontext_t mcontext; + #endif + + // Returns the instruction pointer (platform-dependent impl.). +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc +index 6ee6cc1e4..e53661b28 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc +@@ -208,6 +208,15 @@ void UContextReader::FillCPUContext(RawContextCPU *out, const ucontext_t *uc, + MD_FLOATINGSAVEAREA_ARM64_FPR_COUNT * 16); + } + ++#elif defined(__loongarch__) ++uintptr_t UContextReader::GetStackPointer(const ucontext_t* uc) { ++ return 0; ++} ++ ++uintptr_t UContextReader::GetInstructionPointer(const ucontext_t* uc) { ++ return 0; ++} ++ + #elif defined(__mips__) + + uintptr_t UContextReader::GetStackPointer(const ucontext_t* uc) { +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc +index b895f6d7a..a6b733875 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc +@@ -77,6 +77,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -105,6 +106,8 @@ + #define PR_SET_PTRACER 0x59616d61 + #endif + ++#define sys_sigaltstack sigaltstack ++ + namespace google_breakpad { + + namespace { +@@ -395,12 +398,12 @@ void ExceptionHandler::SignalHandler(int sig, siginfo_t* info, void* uc) { + // In order to retrigger it, we have to queue a new signal by calling + // kill() ourselves. The special case (si_pid == 0 && sig == SIGABRT) is + // due to the kernel sending a SIGABRT from a user request via SysRQ. +- if (sys_tgkill(getpid(), syscall(__NR_gettid), sig) < 0) { ++ /*if (sys_tgkill(getpid(), syscall(__NR_gettid), sig) < 0) { + // If we failed to kill ourselves (e.g. because a sandbox disallows us + // to do so), we instead resort to terminating our process. This will + // result in an incorrect exit code. + _exit(1); +- } ++ }*/ + } else { + // This was a synchronous signal triggered by a hard fault (e.g. SIGSEGV). + // No need to reissue the signal. It will automatically trigger again, +@@ -424,12 +427,12 @@ int ExceptionHandler::ThreadEntry(void *arg) { + + // Close the write end of the pipe. This allows us to fail if the parent dies + // while waiting for the continue signal. +- sys_close(thread_arg->handler->fdes[1]); ++ //sys_close(thread_arg->handler->fdes[1]); + + // Block here until the crashing process unblocks us when + // we're allowed to use ptrace + thread_arg->handler->WaitForContinueSignal(); +- sys_close(thread_arg->handler->fdes[0]); ++ //sys_close(thread_arg->handler->fdes[0]); + + return thread_arg->handler->DoDump(thread_arg->pid, thread_arg->context, + thread_arg->context_size) == false; +@@ -446,7 +449,7 @@ bool ExceptionHandler::HandleSignal(int /*sig*/, siginfo_t* info, void* uc) { + bool signal_pid_trusted = info->si_code == SI_USER || + info->si_code == SI_TKILL; + if (signal_trusted || (signal_pid_trusted && info->si_pid == getpid())) { +- sys_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); ++ //sys_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); + } + + // Fill in all the holes in the struct to make Valgrind happy. +@@ -466,10 +469,10 @@ bool ExceptionHandler::HandleSignal(int /*sig*/, siginfo_t* info, void* uc) { + // In case of MIPS Linux FP state is already part of ucontext_t + // and 'float_state' is not a member of CrashContext. + ucontext_t* uc_ptr = (ucontext_t*)uc; +- if (uc_ptr->uc_mcontext.fpregs) { ++ /*if (uc_ptr->uc_mcontext.fpregs) { + memcpy(&g_crash_context_.float_state, uc_ptr->uc_mcontext.fpregs, + sizeof(g_crash_context_.float_state)); +- } ++ }*/ + #endif + g_crash_context_.tid = syscall(__NR_gettid); + if (crash_handler_ != NULL) { +@@ -521,7 +524,7 @@ bool ExceptionHandler::GenerateDump(CrashContext *context) { + // kernels, but we need to know the PID of the cloned process before we + // can do this. Create a pipe here which we can use to block the + // cloned process after creating it, until we have explicitly enabled ptrace +- if (sys_pipe(fdes) == -1) { ++ /*if (sys_pipe(fdes) == -1) { + // Creating the pipe failed. We'll log an error but carry on anyway, + // as we'll probably still get a useful crash report. All that will happen + // is the write() and read() calls will fail with EBADF +@@ -533,35 +536,35 @@ bool ExceptionHandler::GenerateDump(CrashContext *context) { + + // Ensure fdes[0] and fdes[1] are invalid file descriptors. + fdes[0] = fdes[1] = -1; +- } ++ }*/ + +- const pid_t child = sys_clone( ++ /*const pid_t child = sys_clone( + ThreadEntry, stack, CLONE_FS | CLONE_UNTRACED, &thread_arg, NULL, NULL, + NULL); + if (child == -1) { + sys_close(fdes[0]); + sys_close(fdes[1]); + return false; +- } ++ }*/ + + // Close the read end of the pipe. +- sys_close(fdes[0]); ++ //sys_close(fdes[0]); + // Allow the child to ptrace us +- sys_prctl(PR_SET_PTRACER, child, 0, 0, 0); ++ //sys_prctl(PR_SET_PTRACER, child, 0, 0, 0); + SendContinueSignalToChild(); + int status = 0; +- const int r = HANDLE_EINTR(sys_waitpid(child, &status, __WALL)); ++ //const int r = HANDLE_EINTR(sys_waitpid(child, &status, __WALL)); + +- sys_close(fdes[1]); ++ //sys_close(fdes[1]); + +- if (r == -1) { ++ /*if (r == -1) { + static const char msg[] = "ExceptionHandler::GenerateDump waitpid failed:"; + logger::write(msg, sizeof(msg) - 1); + logger::write(strerror(errno), strlen(strerror(errno))); + logger::write("\n", 1); +- } ++ }*/ + +- bool success = r != -1 && WIFEXITED(status) && WEXITSTATUS(status) == 0; ++ bool success = /*r != -1 &&*/ WIFEXITED(status) && WEXITSTATUS(status) == 0; + if (callback_) + success = callback_(minidump_descriptor_, callback_context_, success); + return success; +@@ -569,7 +572,7 @@ bool ExceptionHandler::GenerateDump(CrashContext *context) { + + // This function runs in a compromised context: see the top of the file. + void ExceptionHandler::SendContinueSignalToChild() { +- static const char okToContinueMessage = 'a'; ++ /*static const char okToContinueMessage = 'a'; + int r; + r = HANDLE_EINTR(sys_write(fdes[1], &okToContinueMessage, sizeof(char))); + if (r == -1) { +@@ -578,13 +581,13 @@ void ExceptionHandler::SendContinueSignalToChild() { + logger::write(msg, sizeof(msg) - 1); + logger::write(strerror(errno), strlen(strerror(errno))); + logger::write("\n", 1); +- } ++ }*/ + } + + // This function runs in a compromised context: see the top of the file. + // Runs on the cloned process. + void ExceptionHandler::WaitForContinueSignal() { +- int r; ++ /*int r; + char receivedMessage; + r = HANDLE_EINTR(sys_read(fdes[0], &receivedMessage, sizeof(char))); + if (r == -1) { +@@ -593,7 +596,7 @@ void ExceptionHandler::WaitForContinueSignal() { + logger::write(msg, sizeof(msg) - 1); + logger::write(strerror(errno), strlen(strerror(errno))); + logger::write("\n", 1); +- } ++ }*/ + } + + // This function runs in a compromised context: see the top of the file. +@@ -672,7 +675,7 @@ bool ExceptionHandler::WriteMinidump() { + } + + // Allow this process to be dumped. +- sys_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); ++ //sys_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); + + CrashContext context; + int getcontext_result = getcontext(&context.context); +@@ -701,12 +704,12 @@ bool ExceptionHandler::WriteMinidump() { + } + #endif + +-#if !defined(__ARM_EABI__) && !defined(__aarch64__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__aarch64__) && !defined(__mips__) && !defined(__loongarch__) + // FPU state is not part of ARM EABI ucontext_t. + memcpy(&context.float_state, context.context.uc_mcontext.fpregs, + sizeof(context.float_state)); + #endif +- context.tid = sys_gettid(); ++ //context.tid = sys_gettid(); + + // Add an exception stream to the minidump for better reporting. + memset(&context.siginfo, 0, sizeof(context.siginfo)); +@@ -726,6 +729,9 @@ bool ExceptionHandler::WriteMinidump() { + #elif defined(__mips__) + context.siginfo.si_addr = + reinterpret_cast(context.context.uc_mcontext.pc); ++#elif defined(__loongarch__) ++ //context.siginfo.si_addr = ++ // reinterpret_cast(context.context.uc_mcontext.pc); + #else + #error "This code has not been ported to your platform yet." + #endif +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/log/log.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/log/log.cc +index fc23aa6d5..ffe19aeb2 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/log/log.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/log/log.cc +@@ -77,7 +77,8 @@ int write(const char* buf, size_t nbytes) { + #if defined(__ANDROID__) + return __android_log_write(ANDROID_LOG_WARN, kAndroidLogTag, buf); + #else +- return sys_write(2, buf, nbytes); ++ //return sys_write(2, buf, nbytes); ++ return 0; + #endif + } + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc +index fa3c1713a..7a00ce7ac 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc +@@ -138,7 +138,7 @@ class MicrodumpWriter { + const MicrodumpExtraInfo& microdump_extra_info, + LinuxDumper* dumper) + : ucontext_(context ? &context->context : NULL), +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__loongarch__) + float_state_(context ? &context->float_state : NULL), + #endif + dumper_(dumper), +@@ -337,6 +337,8 @@ class MicrodumpWriter { + # else + # error "This mips ABI is currently not supported (n32)" + #endif ++#elif defined(__loongarch__) ++ const char kArch[] = "la64"; + #else + #error "This code has not been ported to your platform yet" + #endif +@@ -410,7 +412,7 @@ class MicrodumpWriter { + RawContextCPU cpu; + my_memset(&cpu, 0, sizeof(RawContextCPU)); +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__loongarch__) + UContextReader::FillCPUContext(&cpu, ucontext_, float_state_); + #else + UContextReader::FillCPUContext(&cpu, ucontext_); + #endif +@@ -607,7 +607,7 @@ bool WriteMicrodump(pid_t crashing_process, + void* Alloc(unsigned bytes) { return dumper_->allocator()->Alloc(bytes); } + + const ucontext_t* const ucontext_; +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__loongarch__) + const google_breakpad::fpstate_t* const float_state_; + #endif + LinuxDumper* dumper_; +@@ -648,7 +650,7 @@ bool WriteMicrodump(pid_t crashing_process, + if (blob_size != sizeof(ExceptionHandler::CrashContext)) + return false; + context = reinterpret_cast(blob); +- dumper.SetCrashInfoFromSigInfo(context->siginfo); ++ //dumper.SetCrashInfoFromSigInfo(context->siginfo); + dumper.set_crash_thread(context->tid); + } + MicrodumpWriter writer(context, mappings, +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/cpu_set.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/cpu_set.h +index 1cca9aa5a..145b1b61e 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/cpu_set.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/cpu_set.h +@@ -53,7 +53,7 @@ public: + + // Parse a sysfs file to extract the corresponding CPU set. + bool ParseSysFile(int fd) { +- char buffer[512]; ++ /*char buffer[512]; + int ret = sys_read(fd, buffer, sizeof(buffer)-1); + if (ret < 0) + return false; +@@ -105,7 +105,7 @@ public: + + while (start <= end) + SetBit(start++); +- } ++ }*/ + return true; + } + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/directory_reader.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/directory_reader.h +index a4bde1803..62446712a 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/directory_reader.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/directory_reader.h +@@ -59,19 +59,19 @@ class DirectoryReader { + // After calling this, one must call |PopEntry| otherwise you'll get the same + // entry over and over. + bool GetNextEntry(const char** name) { +- struct kernel_dirent* const dent = +- reinterpret_cast(buf_); ++ //struct kernel_dirent* const dent = ++ // reinterpret_cast(buf_); + + if (buf_used_ == 0) { + // need to read more entries. +- const int n = sys_getdents(fd_, dent, sizeof(buf_)); ++ /*const int n = sys_getdents(fd_, dent, sizeof(buf_)); + if (n < 0) { + return false; + } else if (n == 0) { + hit_eof_ = true; + } else { + buf_used_ += n; +- } ++ }*/ + } + + if (buf_used_ == 0 && hit_eof_) +@@ -79,7 +79,7 @@ class DirectoryReader { + + assert(buf_used_ > 0); + +- *name = dent->d_name; ++ //*name = dent->d_name; + return true; + } + +@@ -87,18 +87,18 @@ class DirectoryReader { + if (!buf_used_) + return; + +- const struct kernel_dirent* const dent = +- reinterpret_cast(buf_); ++ //const struct kernel_dirent* const dent = ++ // reinterpret_cast(buf_); + +- buf_used_ -= dent->d_reclen; +- my_memmove(buf_, buf_ + dent->d_reclen, buf_used_); ++ //buf_used_ -= dent->d_reclen; ++ //my_memmove(buf_, buf_ + dent->d_reclen, buf_used_); + } + + private: + const int fd_; + bool hit_eof_; + unsigned buf_used_; +- uint8_t buf_[sizeof(struct kernel_dirent) + NAME_MAX + 1]; ++ //uint8_t buf_[sizeof(struct kernel_dirent) + NAME_MAX + 1]; + }; + + } // namespace google_breakpad +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/line_reader.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/line_reader.h +index 779cfeb60..206230004 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/line_reader.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/line_reader.h +@@ -95,7 +95,7 @@ class LineReader { + } + + // Otherwise, we should pull in more data from the file +- const ssize_t n = sys_read(fd_, buf_ + buf_used_, ++ /*const ssize_t n = sys_read(fd_, buf_ + buf_used_, + sizeof(buf_) - buf_used_); + if (n < 0) { + return false; +@@ -103,7 +103,7 @@ class LineReader { + hit_eof_ = true; + } else { + buf_used_ += n; +- } ++ }*/ + + // At this point, we have either set the hit_eof_ flag, or we have more + // data to process... +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc +index 415068983..44fdadbfd 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc +@@ -109,6 +109,8 @@ bool LinuxCoreDumper::GetThreadInfoByIndex(size_t index, ThreadInfo* info) { + memcpy(&stack_pointer, &info->regs.ARM_sp, sizeof(info->regs.ARM_sp)); + #elif defined(__aarch64__) + memcpy(&stack_pointer, &info->regs.sp, sizeof(info->regs.sp)); ++#elif defined(__loongarch__) ++ //memcpy(&stack_pointer, &info->regs.sp, sizeof(info->regs.sp)); + #elif defined(__mips__) + stack_pointer = + reinterpret_cast(info->mcontext.gregs[MD_CONTEXT_MIPS_REG_SP]); +@@ -209,7 +211,7 @@ bool LinuxCoreDumper::EnumerateThreads() { + info.mcontext.mdhi = status->pr_reg[EF_HI]; + info.mcontext.pc = status->pr_reg[EF_CP0_EPC]; + #else // __mips__ +- memcpy(&info.regs, status->pr_reg, sizeof(info.regs)); ++ //memcpy(&info.regs, status->pr_reg, sizeof(info.regs)); + #endif // __mips__ + if (first_thread) { + crash_thread_ = pid; +@@ -222,7 +224,7 @@ bool LinuxCoreDumper::EnumerateThreads() { + break; + } + case NT_SIGINFO: { +- if (description.length() != sizeof(siginfo_t)) { ++ /*if (description.length() != sizeof(siginfo_t)) { + fprintf(stderr, "Found NT_SIGINFO descriptor of unexpected size\n"); + return false; + } +@@ -259,7 +261,7 @@ bool LinuxCoreDumper::EnumerateThreads() { + }); + #endif + break; +- } ++ }*/ + break; + } + #if defined(__i386) || defined(__x86_64) +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.cc +index 1112035bc..f838abe02 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.cc +@@ -325,14 +325,14 @@ LinuxDumper::ElfFileIdentifierForMapping(const MappingInfo& mapping, + // Special-case linux-gate because it's not a real file. + if (my_strcmp(mapping.name, kLinuxGateLibraryName) == 0) { + void* linux_gate = NULL; +- if (pid_ == sys_getpid()) { ++ /*if (pid_ == sys_getpid()) { + linux_gate = reinterpret_cast(mapping.start_addr); + } else { + linux_gate = allocator_.Alloc(mapping.size); + CopyFromProcess(linux_gate, pid_, + reinterpret_cast(mapping.start_addr), + mapping.size); +- } ++ }*/ + return FileID::ElfFileIdentifierFromMappedFile(linux_gate, identifier); + } + +@@ -355,11 +355,11 @@ LinuxDumper::ElfFileIdentifierForMapping(const MappingInfo& mapping, + return success; + } + +-void LinuxDumper::SetCrashInfoFromSigInfo(const siginfo_t& siginfo) { ++/*void LinuxDumper::SetCrashInfoFromSigInfo(const siginfo_t& siginfo) { + set_crash_address(reinterpret_cast(siginfo.si_addr)); + set_crash_signal(siginfo.si_signo); + set_crash_signal_code(siginfo.si_code); +-} ++}*/ + + const char* LinuxDumper::GetCrashSignalString() const { + switch (static_cast(crash_signal_)) { +@@ -518,7 +518,7 @@ bool LinuxDumper::ReadAuxv() { + return false; + } + +- int fd = sys_open(auxv_path, O_RDONLY, 0); ++ /*int fd = sys_open(auxv_path, O_RDONLY, 0); + if (fd < 0) { + return false; + } +@@ -534,8 +534,8 @@ bool LinuxDumper::ReadAuxv() { + res = true; + } + } +- sys_close(fd); +- return res; ++ sys_close(fd);*/ ++ return false; + } + + bool LinuxDumper::EnumerateMappings() { +@@ -557,7 +557,7 @@ bool LinuxDumper::EnumerateMappings() { + // actual entry point to find the mapping. + const void* entry_point_loc = reinterpret_cast(auxv_[AT_ENTRY]); + +- const int fd = sys_open(maps_path, O_RDONLY, 0); ++ const int fd = -1;//sys_open(maps_path, O_RDONLY, 0); + if (fd < 0) + return false; + LineReader* const line_reader = new(allocator_) LineReader(fd); +@@ -641,7 +641,7 @@ bool LinuxDumper::EnumerateMappings() { + } + } + +- sys_close(fd); ++ //sys_close(fd); + + return !mappings_.empty(); + } +@@ -953,14 +953,14 @@ bool LinuxDumper::HandleDeletedFileInMapping(char* path) const { + return false; + + // Check to see if someone actually named their executable 'foo (deleted)'. +- struct kernel_stat exe_stat; ++ /*struct kernel_stat exe_stat; + struct kernel_stat new_path_stat; + if (sys_stat(exe_link, &exe_stat) == 0 && + sys_stat(new_path, &new_path_stat) == 0 && + exe_stat.st_dev == new_path_stat.st_dev && + exe_stat.st_ino == new_path_stat.st_ino) { + return false; +- } ++ }*/ + + my_memcpy(path, exe_link, NAME_MAX); + return true; +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h +index f4a75d906..8e692559d 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h +@@ -59,7 +59,7 @@ + namespace google_breakpad { + + // Typedef for our parsing of the auxv variables in /proc/pid/auxv. +-#if defined(__i386) || defined(__ARM_EABI__) || \ ++#if defined(__i386) || defined(__ARM_EABI__) || defined(__loongarch__) || \ + (defined(__mips__) && _MIPS_SIM == _ABIO32) + typedef Elf32_auxv_t elf_aux_entry; + #elif defined(__x86_64) || defined(__aarch64__) || \ +@@ -173,7 +173,7 @@ class LinuxDumper { + unsigned int mapping_id, + wasteful_vector& identifier); + +- void SetCrashInfoFromSigInfo(const siginfo_t& siginfo); ++ //void SetCrashInfoFromSigInfo(const siginfo_t& siginfo); + + uintptr_t crash_address() const { return crash_address_; } + void set_crash_address(uintptr_t crash_address) { +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc +index 3ad48e501..7b68905a3 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc +@@ -51,6 +51,8 @@ + #define TID_PTR_REGISTER "rcx" + #elif defined(__mips__) + #define TID_PTR_REGISTER "$1" ++#elif defined(__loongarch__) ++#define TID_PTR_REGISTER "$1" + #else + #error This test has not been ported to this platform. + #endif +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc +index e3ddb81a6..88de7ae30 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc +@@ -38,7 +38,7 @@ + + #include "client/linux/minidump_writer/linux_ptrace_dumper.h" + +-#include ++//#include + #include + #include + #include +@@ -63,16 +63,16 @@ + static bool SuspendThread(pid_t pid) { + // This may fail if the thread has just died or debugged. + errno = 0; +- if (sys_ptrace(PTRACE_ATTACH, pid, NULL, NULL) != 0 && +- errno != 0) { +- return false; +- } +- while (sys_waitpid(pid, NULL, __WALL) < 0) { ++ //if (sys_ptrace(PTRACE_ATTACH, pid, NULL, NULL) != 0 && ++ // errno != 0) { ++ // return false; ++ //} ++ /*while (sys_waitpid(pid, NULL, __WALL) < 0) { + if (errno != EINTR) { + sys_ptrace(PTRACE_DETACH, pid, NULL, NULL); + return false; + } +- } ++ }*/ + #if defined(__i386) || defined(__x86_64) + // On x86, the stack pointer is NULL or -1, when executing trusted code in + // the seccomp sandbox. Not only does this cause difficulties down the line +@@ -98,7 +98,7 @@ static bool SuspendThread(pid_t pid) { + + // Resumes a thread by detaching from it. + static bool ResumeThread(pid_t pid) { +- return sys_ptrace(PTRACE_DETACH, pid, NULL, NULL) >= 0; ++ return false;//sys_ptrace(PTRACE_DETACH, pid, NULL, NULL) >= 0; + } + + namespace google_breakpad { +@@ -132,7 +132,7 @@ bool LinuxPtraceDumper::BuildProcPath(char* path, pid_t pid, + + bool LinuxPtraceDumper::CopyFromProcess(void* dest, pid_t child, + const void* src, size_t length) { +- unsigned long tmp = 55; ++ /*unsigned long tmp = 55; + size_t done = 0; + static const size_t word_size = sizeof(tmp); + uint8_t* const local = (uint8_t*) dest; +@@ -145,14 +145,14 @@ bool LinuxPtraceDumper::CopyFromProcess(void* dest, pid_t child, + } + my_memcpy(local + done, &tmp, l); + done += l; +- } ++ }*/ + return true; + } + + bool LinuxPtraceDumper::ReadRegisterSet(ThreadInfo* info, pid_t tid) + { +-#ifdef PTRACE_GETREGSET +- struct iovec io; ++//#ifdef PTRACE_GETREGSET ++ /*struct iovec io; + info->GetGeneralPurposeRegisters(&io.iov_base, &io.iov_len); + if (sys_ptrace(PTRACE_GETREGSET, tid, (void*)NT_PRSTATUS, (void*)&io) == -1) { + return false; +@@ -161,36 +161,36 @@ bool LinuxPtraceDumper::ReadRegisterSet(ThreadInfo* info, pid_t tid) + info->GetFloatingPointRegisters(&io.iov_base, &io.iov_len); + if (sys_ptrace(PTRACE_GETREGSET, tid, (void*)NT_FPREGSET, (void*)&io) == -1) { + return false; +- } +- return true; +-#else ++ }*/ ++// return true; ++//#else + return false; +-#endif ++//#endif + } + + bool LinuxPtraceDumper::ReadRegisters(ThreadInfo* info, pid_t tid) { +-#ifdef PTRACE_GETREGS +- void* gp_addr; +- info->GetGeneralPurposeRegisters(&gp_addr, NULL); +- if (sys_ptrace(PTRACE_GETREGS, tid, NULL, gp_addr) == -1) { +- return false; +- } +- +-#if !(defined(__ANDROID__) && defined(__ARM_EABI__)) ++//#ifdef PTRACE_GETREGS ++// void* gp_addr; ++// info->GetGeneralPurposeRegisters(&gp_addr, NULL); ++// if (sys_ptrace(PTRACE_GETREGS, tid, NULL, gp_addr) == -1) { ++// return false; ++// } ++// ++//#if !(defined(__ANDROID__) && defined(__ARM_EABI__)) + // When running an arm build on an arm64 device, attempting to get the + // floating point registers fails. On Android, the floating point registers + // aren't written to the cpu context anyway, so just don't get them here. + // See http://crbug.com/508324 +- void* fp_addr; +- info->GetFloatingPointRegisters(&fp_addr, NULL); +- if (sys_ptrace(PTRACE_GETFPREGS, tid, NULL, fp_addr) == -1) { +- return false; +- } +-#endif // !(defined(__ANDROID__) && defined(__ARM_EABI__)) +- return true; +-#else // PTRACE_GETREGS ++// void* fp_addr; ++// info->GetFloatingPointRegisters(&fp_addr, NULL); ++// if (sys_ptrace(PTRACE_GETFPREGS, tid, NULL, fp_addr) == -1) { ++// return false; ++// } ++//#endif // !(defined(__ANDROID__) && defined(__ARM_EABI__)) ++// return true; ++//#else // PTRACE_GETREGS + return false; +-#endif ++//#endif + } + + // Read thread info from /proc/$pid/status. +@@ -208,7 +208,7 @@ bool LinuxPtraceDumper::GetThreadInfoByIndex(size_t index, ThreadInfo* info) { + if (!BuildProcPath(status_path, tid, "status")) + return false; + +- const int fd = sys_open(status_path, O_RDONLY, 0); ++ /*const int fd = sys_open(status_path, O_RDONLY, 0); + if (fd < 0) + return false; + +@@ -227,7 +227,7 @@ bool LinuxPtraceDumper::GetThreadInfoByIndex(size_t index, ThreadInfo* info) { + + line_reader->PopLine(line_len); + } +- sys_close(fd); ++ sys_close(fd);*/ + + if (info->ppid == -1 || info->tgid == -1) + return false; +@@ -295,6 +295,7 @@ bool LinuxPtraceDumper::GetThreadInfoByIndex(size_t index, ThreadInfo* info) { + my_memcpy(&stack_pointer, &info->regs.ARM_sp, sizeof(info->regs.ARM_sp)); + #elif defined(__aarch64__) + my_memcpy(&stack_pointer, &info->regs.sp, sizeof(info->regs.sp)); ++#elif defined(__loongarch__) + #elif defined(__mips__) + stack_pointer = + reinterpret_cast(info->mcontext.gregs[MD_CONTEXT_MIPS_REG_SP]); +@@ -347,7 +348,7 @@ bool LinuxPtraceDumper::EnumerateThreads() { + if (!BuildProcPath(task_path, pid_, "task")) + return false; + +- const int fd = sys_open(task_path, O_RDONLY | O_DIRECTORY, 0); ++ /*const int fd = sys_open(task_path, O_RDONLY | O_DIRECTORY, 0); + if (fd < 0) + return false; + DirectoryReader* dir_reader = new(allocator_) DirectoryReader(fd); +@@ -369,7 +370,7 @@ bool LinuxPtraceDumper::EnumerateThreads() { + dir_reader->PopEntry(); + } + +- sys_close(fd); ++ sys_close(fd);*/ + return true; + } + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc +index f8cdf2a1c..cbbd1dcdd 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc +@@ -136,7 +136,7 @@ class MinidumpWriter { + : fd_(minidump_fd), + path_(minidump_path), + ucontext_(context ? &context->context : NULL), +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__loongarch__) + float_state_(context ? &context->float_state : NULL), + #endif + dumper_(dumper), +@@ -469,7 +469,7 @@ class MinidumpWriter { + return false; + my_memset(cpu.get(), 0, sizeof(RawContextCPU)); +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__loongarch__) + UContextReader::FillCPUContext(cpu.get(), ucontext_, float_state_); + #else + UContextReader::FillCPUContext(cpu.get(), ucontext_); + #endif +@@ -1203,6 +1203,10 @@ class MinidumpWriter { + sys_close(fd); + } + ++ return true; ++ } ++#elif defined(__loongarch__) ++ bool WriteCPUInformation(MDRawSystemInfo* sys_info) { + return true; + } + #else +@@ -1210,7 +1214,7 @@ class MinidumpWriter { + #endif + + bool WriteFile(MDLocationDescriptor* result, const char* filename) { +- const int fd = sys_open(filename, O_RDONLY, 0); ++ const int fd = -1;//sys_open(filename, O_RDONLY, 0); + if (fd < 0) + return false; + +@@ -1227,7 +1231,7 @@ class MinidumpWriter { + buffers->len = 0; + + size_t total = 0; +- for (Buffers* bufptr = buffers;;) { ++ /*for (Buffers* bufptr = buffers;;) { + ssize_t r; + do { + r = sys_read(fd, &bufptr->data[bufptr->len], kBufSize - bufptr->len); +@@ -1245,7 +1249,7 @@ class MinidumpWriter { + bufptr->len = 0; + } + } +- sys_close(fd); ++ sys_close(fd);*/ + + if (!total) + return false; +@@ -1337,7 +1337,7 @@ bool WriteMinidumpImpl(const char* minidump_path, + const char* path_; // Path to the file where the minidum should be written. + + const ucontext_t* const ucontext_; // also from the signal handler +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__loongarch__) + const google_breakpad::fpstate_t* const float_state_; // ditto + #endif + LinuxDumper* dumper_; +@@ -1375,7 +1379,7 @@ bool WriteMinidumpImpl(const char* minidump_path, + if (blob_size != sizeof(ExceptionHandler::CrashContext)) + return false; + context = reinterpret_cast(blob); +- dumper.SetCrashInfoFromSigInfo(context->siginfo); ++ //dumper.SetCrashInfoFromSigInfo(context->siginfo); + dumper.set_crash_thread(context->tid); + } + MinidumpWriter writer(minidump_path, minidump_fd, context, mappings, +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/minidump_file_writer.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/minidump_file_writer.cc +index a1957f324..a267cc976 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/minidump_file_writer.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/minidump_file_writer.cc +@@ -104,7 +104,7 @@ MinidumpFileWriter::~MinidumpFileWriter() { + bool MinidumpFileWriter::Open(const char *path) { + assert(file_ == -1); + #if defined(__linux__) && __linux__ +- file_ = sys_open(path, O_WRONLY | O_CREAT | O_EXCL, 0600); ++ //file_ = sys_open(path, O_WRONLY | O_CREAT | O_EXCL, 0600); + #else + file_ = open(path, O_WRONLY | O_CREAT | O_EXCL, 0600); + #endif +@@ -135,7 +135,7 @@ bool MinidumpFileWriter::Close() { + } + #endif + #if defined(__linux__) && __linux__ +- result = (sys_close(file_) == 0); ++ //result = (sys_close(file_) == 0); + #else + result = (close(file_) == 0); + #endif +@@ -318,11 +318,11 @@ bool MinidumpFileWriter::Copy(MDRVA position, const void *src, ssize_t size) { + + // Seek and write the data + #if defined(__linux__) && __linux__ +- if (sys_lseek(file_, position, SEEK_SET) == static_cast(position)) { ++ /*if (sys_lseek(file_, position, SEEK_SET) == static_cast(position)) { + if (sys_write(file_, src, size) == size) { + return true; + } +- } ++ }*/ + #else + if (lseek(file_, position, SEEK_SET) == static_cast(position)) { + if (write(file_, src, size) == size) { +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc +index 4e938269f..0a053d6af 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc +@@ -59,12 +59,14 @@ MemoryMappedFile::~MemoryMappedFile() { + bool MemoryMappedFile::Map(const char* path, size_t offset) { + Unmap(); + ++ return false; ++ /* + int fd = sys_open(path, O_RDONLY, 0); + if (fd == -1) { + return false; + } + +-#if defined(__x86_64__) || defined(__aarch64__) || \ ++#if defined(__x86_64__) || defined(__aarch64__) || defined(__loongarch__) || \ + (defined(__mips__) && _MIPS_SIM == _ABI64) + + struct kernel_stat st; +@@ -94,12 +96,12 @@ bool MemoryMappedFile::Map(const char* path, size_t offset) { + } + + content_.Set(data, file_len - offset); +- return true; ++ return true;*/ + } + + void MemoryMappedFile::Unmap() { + if (content_.data()) { +- sys_munmap(const_cast(content_.data()), content_.length()); ++ //sys_munmap(const_cast(content_.data()), content_.length()); + content_.Set(NULL, 0); + } + } +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/safe_readlink.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/safe_readlink.cc +index 870c28af3..612d9d606 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/safe_readlink.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/safe_readlink.cc +@@ -42,11 +42,11 @@ bool SafeReadLink(const char* path, char* buffer, size_t buffer_size) { + // one byte longer than the expected path length. Also, sys_readlink() + // returns the actual path length on success, which does not count the + // NULL byte, so |result_size| should be less than |buffer_size|. +- ssize_t result_size = sys_readlink(path, buffer, buffer_size); ++ /*ssize_t result_size = sys_readlink(path, buffer, buffer_size); + if (result_size >= 0 && static_cast(result_size) < buffer_size) { + buffer[result_size] = '\0'; + return true; +- } ++ }*/ + return false; + } + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/memory_allocator.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/memory_allocator.h +index a3159ea46..f64d18814 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/memory_allocator.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/memory_allocator.h +@@ -42,7 +42,7 @@ + #include + #endif + +-#ifdef __APPLE__ ++#if defined(__APPLE__) + #define sys_mmap mmap + #define sys_munmap munmap + #define MAP_ANONYMOUS MAP_ANON +@@ -117,7 +117,7 @@ class PageAllocator { + private: + uint8_t *GetNPages(size_t num_pages) { + void *a = sys_mmap(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE, +- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); ++ MAP_PRIVATE | 0x20 /*MAP_ANONYMOUS*/, -1, 0); + if (a == MAP_FAILED) + return NULL; + +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h +index a3159ea46..f64d18814 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h +@@ -192,7 +192,7 @@ + siginfo_t siginfo; + pid_t tid; // the crashing thread. + ucontext_t context; +-#if !defined(__ARM_EABI__) && !defined(__mips__) ++#if !defined(__ARM_EABI__) && !defined(__mips__) && !defined(__loongarch__) + // #ifdef this out because FP state is not part of user ABI for Linux ARM. + // In case of MIPS Linux FP state is already part of ucontext_t so + // 'float_state' is not required. +-- +2.20.1 + diff --git a/0004-port-ffmpeg-to-loongarch64-for-chromium.patch b/0004-port-ffmpeg-to-loongarch64-for-chromium.patch new file mode 100644 index 0000000..89293d7 --- /dev/null +++ b/0004-port-ffmpeg-to-loongarch64-for-chromium.patch @@ -0,0 +1,956 @@ +From ef2574dd1db8ccdc2643f4d6c50fc35106851a6c Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Wed, 13 Jan 2021 07:51:00 +0000 +Subject: [PATCH 04/13] port ffmpeg to loongarch64 for chromium + +--- + .../third_party/ffmpeg/CREDITS.chromium | 516 ------------------ + .../ffmpeg/chromium/scripts/build_ffmpeg.py | 27 +- + .../ffmpeg/chromium/scripts/copy_config.sh | 2 +- + .../ffmpeg/chromium/scripts/generate_gn.py | 2 +- + .../third_party/ffmpeg/ffmpeg_generated.gni | 182 ++---- + 5 files changed, 64 insertions(+), 665 deletions(-) + +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/CREDITS.chromium b/src/3rdparty/chromium/third_party/ffmpeg/CREDITS.chromium +index 0043df2c5..8cbb66d9d 100644 +--- a/src/3rdparty/chromium/third_party/ffmpeg/CREDITS.chromium ++++ b/src/3rdparty/chromium/third_party/ffmpeg/CREDITS.chromium +@@ -129,517 +129,6 @@ incompatible with the GPLv2 and v3. To the best of our knowledge, they are + compatible with the LGPL. + + +-******************************************************************************** +- +-libavcodec/arm/jrevdct_arm.S +- +-C-like prototype : +- void j_rev_dct_arm(DCTBLOCK data) +- +- With DCTBLOCK being a pointer to an array of 64 'signed shorts' +- +- Copyright (c) 2001 Lionel Ulmer (lionel.ulmer@free.fr / bbrox@bbrox.org) +- +- Permission is hereby granted, free of charge, to any person obtaining a copy +- of this software and associated documentation files (the "Software"), to deal +- in the Software without restriction, including without limitation the rights +- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +- copies of the Software, and to permit persons to whom the Software is +- furnished to do so, subject to the following conditions: +- +- The above copyright notice and this permission notice shall be included in +- all copies or substantial portions of the Software. +- +- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +- COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +- IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +- +-******************************************************************************** +- +-libavcodec/arm/vp8dsp_armv6.S +- +-VP8 ARMv6 optimisations +- +-Copyright (c) 2010 Google Inc. +-Copyright (c) 2010 Rob Clark +-Copyright (c) 2011 Mans Rullgard +- +-This file is part of FFmpeg. +- +-FFmpeg is free software; you can redistribute it and/or +-modify it under the terms of the GNU Lesser General Public +-License as published by the Free Software Foundation; either +-version 2.1 of the License, or (at your option) any later version. +- +-FFmpeg is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-Lesser General Public License for more details. +- +-You should have received a copy of the GNU Lesser General Public +-License along with FFmpeg; if not, write to the Free Software +-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +- +-This code was partially ported from libvpx, which uses this license: +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions of source code must retain the above copyright +-notice, this list of conditions and the following disclaimer. +- +-* Redistributions in binary form must reproduce the above copyright +-notice, this list of conditions and the following disclaimer in +-the documentation and/or other materials provided with the +-distribution. +- +-* Neither the name of Google nor the names of its contributors may +-be used to endorse or promote products derived from this software +-without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-******************************************************************************** +- +-libavcodec/mips/acelp_filters_mips.c +- +-Copyright (c) 2012 +-MIPS Technologies, Inc., California. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +-notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +-notice, this list of conditions and the following disclaimer in the +-documentation and/or other materials provided with the distribution. +-3. Neither the name of the MIPS Technologies, Inc., nor the names of its +-contributors may be used to endorse or promote products derived from +-this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-Author: Nedeljko Babic (nbabic@mips.com) +- +-various filters for ACELP-based codecs optimized for MIPS +- +-This file is part of FFmpeg. +- +-FFmpeg is free software; you can redistribute it and/or +-modify it under the terms of the GNU Lesser General Public +-License as published by the Free Software Foundation; either +-version 2.1 of the License, or (at your option) any later version. +- +-FFmpeg is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-Lesser General Public License for more details. +- +-You should have received a copy of the GNU Lesser General Public +-License along with FFmpeg; if not, write to the Free Software +-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +- +-******************************************************************************** +- +-libavcodec/mips/acelp_vectors_mips.c +- +-Copyright (c) 2012 +-MIPS Technologies, Inc., California. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +-notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +-notice, this list of conditions and the following disclaimer in the +-documentation and/or other materials provided with the distribution. +-3. Neither the name of the MIPS Technologies, Inc., nor the names of its +-contributors may be used to endorse or promote products derived from +-this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-Author: Nedeljko Babic (nbabic@mips.com) +- +-adaptive and fixed codebook vector operations for ACELP-based codecs +-optimized for MIPS +- +-This file is part of FFmpeg. +- +-FFmpeg is free software; you can redistribute it and/or +-modify it under the terms of the GNU Lesser General Public +-License as published by the Free Software Foundation; either +-version 2.1 of the License, or (at your option) any later version. +- +-FFmpeg is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-Lesser General Public License for more details. +- +-You should have received a copy of the GNU Lesser General Public +-License along with FFmpeg; if not, write to the Free Software +-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +- +-******************************************************************************** +- +-libavcodec/mips/amrwbdec_mips.c +- +-Copyright (c) 2012 +-MIPS Technologies, Inc., California. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +-notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +-notice, this list of conditions and the following disclaimer in the +-documentation and/or other materials provided with the distribution. +-3. Neither the name of the MIPS Technologies, Inc., nor the names of its +-contributors may be used to endorse or promote products derived from +-this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-Author: Nedeljko Babic (nbabic@mips.com) +- +-This file is part of FFmpeg. +- +-FFmpeg is free software; you can redistribute it and/or +-modify it under the terms of the GNU Lesser General Public +-License as published by the Free Software Foundation; either +-version 2.1 of the License, or (at your option) any later version. +- +-FFmpeg is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-Lesser General Public License for more details. +- +-You should have received a copy of the GNU Lesser General Public +-License along with FFmpeg; if not, write to the Free Software +-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +- +-******************************************************************************** +- +-libavcodec/mips/celp_filters_mips.c +- +-Copyright (c) 2012 +-MIPS Technologies, Inc., California. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +-notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +-notice, this list of conditions and the following disclaimer in the +-documentation and/or other materials provided with the distribution. +-3. Neither the name of the MIPS Technologies, Inc., nor the names of its +-contributors may be used to endorse or promote products derived from +-this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-Author: Nedeljko Babic (nbabic@mips.com) +- +-various filters for CELP-based codecs optimized for MIPS +- +-This file is part of FFmpeg. +- +-FFmpeg is free software; you can redistribute it and/or +-modify it under the terms of the GNU Lesser General Public +-License as published by the Free Software Foundation; either +-version 2.1 of the License, or (at your option) any later version. +- +-FFmpeg is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-Lesser General Public License for more details. +- +-You should have received a copy of the GNU Lesser General Public +-License along with FFmpeg; if not, write to the Free Software +-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +- +-******************************************************************************** +- +-libavcodec/mips/celp_math_mips.c +- +-Copyright (c) 2012 +-MIPS Technologies, Inc., California. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +-notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +-notice, this list of conditions and the following disclaimer in the +-documentation and/or other materials provided with the distribution. +-3. Neither the name of the MIPS Technologies, Inc., nor the names of its +-contributors may be used to endorse or promote products derived from +-this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-Author: Nedeljko Babic (nbabic@mips.com) +- +-Math operations optimized for MIPS +- +-This file is part of FFmpeg. +- +-FFmpeg is free software; you can redistribute it and/or +-modify it under the terms of the GNU Lesser General Public +-License as published by the Free Software Foundation; either +-version 2.1 of the License, or (at your option) any later version. +- +-FFmpeg is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-Lesser General Public License for more details. +- +-You should have received a copy of the GNU Lesser General Public +-License along with FFmpeg; if not, write to the Free Software +-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +- +-******************************************************************************** +- +-libavcodec/mips/fft_mips.c +- +-Copyright (c) 2012 +-MIPS Technologies, Inc., California. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +-notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +-notice, this list of conditions and the following disclaimer in the +-documentation and/or other materials provided with the distribution. +-3. Neither the name of the MIPS Technologies, Inc., nor the names of its +-contributors may be used to endorse or promote products derived from +-this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-Author: Stanislav Ocovaj (socovaj@mips.com) +-Author: Zoran Lukic (zoranl@mips.com) +- +-Optimized MDCT/IMDCT and FFT transforms +- +-This file is part of FFmpeg. +- +-FFmpeg is free software; you can redistribute it and/or +-modify it under the terms of the GNU Lesser General Public +-License as published by the Free Software Foundation; either +-version 2.1 of the License, or (at your option) any later version. +- +-FFmpeg is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-Lesser General Public License for more details. +- +-You should have received a copy of the GNU Lesser General Public +-License along with FFmpeg; if not, write to the Free Software +-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +- +-******************************************************************************** +- +-libavcodec/mips/mpegaudiodsp_mips_float.c +- +-Copyright (c) 2012 +-MIPS Technologies, Inc., California. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +-notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +-notice, this list of conditions and the following disclaimer in the +-documentation and/or other materials provided with the distribution. +-3. Neither the name of the MIPS Technologies, Inc., nor the names of its +-contributors may be used to endorse or promote products derived from +-this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-Author: Bojan Zivkovic (bojan@mips.com) +- +-MPEG Audio decoder optimized for MIPS floating-point architecture +- +-This file is part of FFmpeg. +- +-FFmpeg is free software; you can redistribute it and/or +-modify it under the terms of the GNU Lesser General Public +-License as published by the Free Software Foundation; either +-version 2.1 of the License, or (at your option) any later version. +- +-FFmpeg is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-Lesser General Public License for more details. +- +-You should have received a copy of the GNU Lesser General Public +-License along with FFmpeg; if not, write to the Free Software +-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +- +-******************************************************************************** +- +-libavcodec/x86/xvididct.asm +- +-XVID MPEG-4 VIDEO CODEC +- +- Conversion from gcc syntax to x264asm syntax with modifications +- by Christophe Gisquet +- +- =========== SSE2 inverse discrete cosine transform =========== +- +- Copyright(C) 2003 Pascal Massimino +- +- Conversion to gcc syntax with modifications +- by Alexander Strange +- +- Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid. +- +- Vertical pass is an implementation of the scheme: +- Loeffler C., Ligtenberg A., and Moschytz C.S.: +- Practical Fast 1D DCT Algorithm with Eleven Multiplications, +- Proc. ICASSP 1989, 988-991. +- +- Horizontal pass is a double 4x4 vector/matrix multiplication, +- (see also Intel's Application Note 922: +- http://developer.intel.com/vtune/cbts/strmsimd/922down.htm +- Copyright (C) 1999 Intel Corporation) +- +- More details at http://skal.planet-d.net/coding/dct.html +- +- ======= MMX and XMM forward discrete cosine transform ======= +- +- Copyright(C) 2001 Peter Ross +- +- Originally provided by Intel at AP-922 +- http://developer.intel.com/vtune/cbts/strmsimd/922down.htm +- (See more app notes at http://developer.intel.com/vtune/cbts/strmsimd/appnotes.htm) +- but in a limited edition. +- New macro implements a column part for precise iDCT +- The routine precision now satisfies IEEE standard 1180-1990. +- +- Copyright(C) 2000-2001 Peter Gubanov +- Rounding trick Copyright(C) 2000 Michel Lespinasse +- +- http://www.elecard.com/peter/idct.html +- http://www.linuxvideo.org/mpeg2dec/ +- +- These examples contain code fragments for first stage iDCT 8x8 +- (for rows) and first stage DCT 8x8 (for columns) +- +- conversion to gcc syntax by Michael Niedermayer +- +- ====================================================================== +- +- This file is part of FFmpeg. +- +- FFmpeg is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- FFmpeg is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public License +- along with FFmpeg; if not, write to the Free Software Foundation, +- Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +- + ******************************************************************************** + + libavformat/oggparsetheora.c +@@ -784,19 +273,14 @@ libavcodec/fft_fixed_32.c + libavcodec/fft_init_table.c + libavcodec/fft_table.h + libavcodec/mdct_fixed_32.c +-libavcodec/mips/aacdec_mips.c + libavcodec/mips/aacdec_mips.h +-libavcodec/mips/aacpsdsp_mips.c +-libavcodec/mips/aacsbr_mips.c + libavcodec/mips/aacsbr_mips.h + libavcodec/mips/amrwbdec_mips.h + libavcodec/mips/compute_antialias_fixed.h + libavcodec/mips/compute_antialias_float.h + libavcodec/mips/lsp_mips.h +-libavcodec/mips/sbrdsp_mips.c + libavutil/fixed_dsp.c + libavutil/fixed_dsp.h +-libavutil/mips/float_dsp_mips.c + libavutil/mips/libm_mips.h + libavutil/softfloat_tables.h + +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/build_ffmpeg.py b/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/build_ffmpeg.py +index 23d5c0f57..dfe821557 100755 +--- a/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/build_ffmpeg.py ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/build_ffmpeg.py +@@ -36,7 +36,7 @@ ARCH_MAP = { + 'android': ['ia32', 'x64', 'arm-neon', 'arm64'], + 'linux': [ + 'ia32', 'x64', 'mipsel', 'mips64el', 'noasm-x64', 'arm', 'arm-neon', +- 'arm64' ++ 'arm64', 'la64' + ], + 'mac': ['x64'], + 'win': ['ia32', 'x64', 'arm64'], +@@ -126,6 +126,8 @@ def DetermineHostOsAndArch(): + host_arch = 'mipsel' + elif platform.machine() == 'mips64': + host_arch = 'mips64el' ++ elif platform.machine() == 'loongarch64': ++ host_arch = 'la64' + elif platform.machine().startswith('arm'): + host_arch = 'arm' + else: +@@ -197,6 +199,10 @@ def SetupAndroidToolchain(target_arch): + elif target_arch == 'mipsel': + sysroot_arch = 'mips' + toolchain_bin_prefix = toolchain_dir_prefix = 'mipsel-linux-android' ++ elif target_arch == 'la64': ++ toolchain_level = api64_level ++ sysroot_arch = 'la64' ++ toolchain_bin_prefix = toolchain_dir_prefix = 'la64-linux-android' + elif target_arch == 'mips64el': + toolchain_level = api64_level + sysroot_arch = 'mips64' +@@ -789,6 +795,21 @@ def ConfigureAndBuild(target_arch, target_os, host_os, host_arch, parallel_jobs, + '--extra-cflags=--target=mips64el-linux-gnuabi64', + '--extra-ldflags=--target=mips64el-linux-gnuabi64', + ]) ++ elif target_arch == 'la64': ++ # These flags taken from android chrome build with target_cpu='mips64el' ++ configure_flags['Common'].extend([ ++ ]) ++ if target_os == 'android': ++ configure_flags['Common'].extend([ ++ '--enable-mips64r6', ++ '--extra-cflags=-mcpu=mips64r6', ++ '--disable-mips64r2', ++ '--enable-msa', ++ ]) ++ if target_os == 'linux': ++ configure_flags['Common'].extend([ ++ '--target-os=linux', ++ ]) + else: + print( + 'Error: Unknown target arch %r for target OS %r!' % (target_arch, +@@ -814,8 +835,8 @@ def ConfigureAndBuild(target_arch, target_os, host_os, host_arch, parallel_jobs, + # typically be the system one, so explicitly configure use of Clang's + # ld.lld, to ensure that things like cross-compilation and LTO work. + # This does not work for ia32 and is always used on mac. +- if target_arch != 'ia32' and target_os != 'mac': +- configure_flags['Common'].append('--extra-ldflags=-fuse-ld=lld') ++ #if target_arch != 'ia32' and target_os != 'mac': ++ # configure_flags['Common'].append('--extra-ldflags=-fuse-ld=lld') + + # Should be run on Mac, unless we're cross-compiling on Linux. + if target_os == 'mac': +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/copy_config.sh b/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/copy_config.sh +index 0e5159d6f..a982a3bd4 100755 +--- a/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/copy_config.sh ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/copy_config.sh +@@ -10,7 +10,7 @@ for os in android linux linux-noasm mac win; do + # Copy config files for various architectures: + # - ia32/x64 have config.asm, config.h + # - arm/arm-neon have config.h +- for arch in arm arm-neon arm64 ia32 x64 mipsel mips64el; do ++ for arch in arm arm-neon arm64 ia32 x64 mipsel mips64el la64; do + # Don't waste time on non-existent configs, if no config.h then skip. + [ ! -e "build.$arch.$os/$target/config.h" ] && continue + for f in config.h config.asm libavutil/avconfig.h libavutil/ffversion.h libavcodec/bsf_list.c libavcodec/codec_list.c libavcodec/parser_list.c libavformat/demuxer_list.c libavformat/muxer_list.c libavformat/protocol_list.c; do +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/generate_gn.py b/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/generate_gn.py +index d2b3d1052..5b4dd10e5 100755 +--- a/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/generate_gn.py ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/scripts/generate_gn.py +@@ -77,7 +77,7 @@ GN_SOURCE_END = """] + Attr = enum('ARCHITECTURE', 'TARGET', 'PLATFORM') + SUPPORT_MATRIX = { + Attr.ARCHITECTURE: +- set(['ia32', 'x64', 'arm', 'arm64', 'arm-neon', 'mipsel', 'mips64el']), ++ set(['ia32', 'x64', 'arm', 'arm64', 'arm-neon', 'mipsel', 'mips64el', 'la64']), + Attr.TARGET: + set(['Chromium', 'Chrome', 'ChromeOS']), + Attr.PLATFORM: +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/ffmpeg_generated.gni b/src/3rdparty/chromium/third_party/ffmpeg/ffmpeg_generated.gni +index fa2b74e09..fdecb888d 100644 +--- a/src/3rdparty/chromium/third_party/ffmpeg/ffmpeg_generated.gni ++++ b/src/3rdparty/chromium/third_party/ffmpeg/ffmpeg_generated.gni +@@ -14,17 +14,14 @@ ffmpeg_asm_sources = [] + + use_linux_config = is_linux || is_fuchsia + +-if ((is_android && current_cpu == "arm" && arm_use_neon) || (is_android && current_cpu == "arm64") || (is_android && current_cpu == "x64") || (is_android && current_cpu == "x86") || (is_mac) || (is_win) || (use_linux_config)) { ++if (use_linux_config && current_cpu == "la64") { + ffmpeg_c_sources += [ + "libavcodec/ac3_parser.c", + "libavcodec/ac3tab.c", + "libavcodec/adts_parser.c", + "libavcodec/allcodecs.c", + "libavcodec/autorename_libavcodec_flacdec.c", +- "libavcodec/autorename_libavcodec_flacdsp.c", +- "libavcodec/autorename_libavcodec_mpegaudiodsp.c", + "libavcodec/autorename_libavcodec_utils.c", +- "libavcodec/autorename_libavcodec_vorbisdsp.c", + "libavcodec/avdct.c", + "libavcodec/avfft.c", + "libavcodec/avpacket.c", +@@ -49,7 +46,10 @@ if ((is_android && current_cpu == "arm" && arm_use_neon) || (is_android && curre + "libavcodec/flac.c", + "libavcodec/flac_parser.c", + "libavcodec/flacdata.c", ++ "libavcodec/flacdsp.c", + "libavcodec/golomb.c", ++ "libavcodec/h264pred.c", ++ "libavcodec/hpeldsp.c", + "libavcodec/imgconvert.c", + "libavcodec/jni.c", + "libavcodec/libopus.c", +@@ -67,6 +67,7 @@ if ((is_android && current_cpu == "arm" && arm_use_neon) || (is_android && curre + "libavcodec/mpegaudiodata.c", + "libavcodec/mpegaudiodec_fixed.c", + "libavcodec/mpegaudiodecheader.c", ++ "libavcodec/mpegaudiodsp.c", + "libavcodec/mpegaudiodsp_data.c", + "libavcodec/mpegaudiodsp_fixed.c", + "libavcodec/mpegaudiodsp_float.c", +@@ -86,10 +87,19 @@ if ((is_android && current_cpu == "arm" && arm_use_neon) || (is_android && curre + "libavcodec/qsv_api.c", + "libavcodec/raw.c", + "libavcodec/rdft.c", ++ "libavcodec/videodsp.c", + "libavcodec/vorbis.c", + "libavcodec/vorbis_data.c", + "libavcodec/vorbis_parser.c", + "libavcodec/vorbisdec.c", ++ "libavcodec/vorbisdsp.c", ++ "libavcodec/vp3.c", ++ "libavcodec/vp3_parser.c", ++ "libavcodec/vp3dsp.c", ++ "libavcodec/vp56rac.c", ++ "libavcodec/vp8.c", ++ "libavcodec/vp8_parser.c", ++ "libavcodec/vp8dsp.c", + "libavcodec/vp9_parser.c", + "libavcodec/xiph.c", + "libavformat/allformats.c", +@@ -135,10 +145,6 @@ if ((is_android && current_cpu == "arm" && arm_use_neon) || (is_android && curre + "libavformat/wavdec.c", + "libavutil/aes.c", + "libavutil/aes_ctr.c", +- "libavutil/autorename_libavutil_cpu.c", +- "libavutil/autorename_libavutil_fixed_dsp.c", +- "libavutil/autorename_libavutil_float_dsp.c", +- "libavutil/autorename_libavutil_imgutils.c", + "libavutil/avsscanf.c", + "libavutil/avstring.c", + "libavutil/base64.c", +@@ -147,6 +153,7 @@ if ((is_android && current_cpu == "arm" && arm_use_neon) || (is_android && curre + "libavutil/camellia.c", + "libavutil/channel_layout.c", + "libavutil/color_utils.c", ++ "libavutil/cpu.c", + "libavutil/crc.c", + "libavutil/dict.c", + "libavutil/display.c", +@@ -156,9 +163,12 @@ if ((is_android && current_cpu == "arm" && arm_use_neon) || (is_android && curre + "libavutil/eval.c", + "libavutil/fifo.c", + "libavutil/file_open.c", ++ "libavutil/fixed_dsp.c", ++ "libavutil/float_dsp.c", + "libavutil/frame.c", + "libavutil/hdr_dynamic_metadata.c", + "libavutil/hwcontext.c", ++ "libavutil/imgutils.c", + "libavutil/integer.c", + "libavutil/intmath.c", + "libavutil/lfg.c", +@@ -193,22 +203,7 @@ if ((is_android && current_cpu == "arm" && arm_use_neon) || (is_android && curre + ] + } + +-if ((is_mac) || (is_win) || (use_linux_config)) { +- ffmpeg_c_sources += [ +- "libavcodec/autorename_libavcodec_hpeldsp.c", +- "libavcodec/autorename_libavcodec_videodsp.c", +- "libavcodec/autorename_libavcodec_vp3dsp.c", +- "libavcodec/autorename_libavcodec_vp8dsp.c", +- "libavcodec/h264pred.c", +- "libavcodec/vp3.c", +- "libavcodec/vp3_parser.c", +- "libavcodec/vp56rac.c", +- "libavcodec/vp8.c", +- "libavcodec/vp8_parser.c", +- ] +-} +- +-if ((current_cpu == "x64" && ffmpeg_branding == "Chrome") || (is_android && current_cpu == "arm" && arm_use_neon && ffmpeg_branding == "Chrome") || (is_android && current_cpu == "arm64" && ffmpeg_branding == "Chrome") || (is_android && current_cpu == "x86" && ffmpeg_branding == "Chrome") || (is_win && ffmpeg_branding == "Chrome") || (use_linux_config && ffmpeg_branding == "Chrome") || (use_linux_config && ffmpeg_branding == "ChromeOS")) { ++if ((use_linux_config && current_cpu == "la64" && ffmpeg_branding == "Chrome") || (use_linux_config && current_cpu == "la64" && ffmpeg_branding == "ChromeOS")) { + ffmpeg_c_sources += [ + "libavcodec/aac_ac3_parser.c", + "libavcodec/aac_parser.c", +@@ -218,21 +213,8 @@ if ((current_cpu == "x64" && ffmpeg_branding == "Chrome") || (is_android && curr + "libavcodec/aactab.c", + "libavcodec/adts_header.c", + "libavcodec/autorename_libavcodec_aacdec.c", +- "libavcodec/autorename_libavcodec_mdct15.c", +- "libavcodec/autorename_libavcodec_sbrdsp.c", +- "libavcodec/cbrt_data.c", +- "libavcodec/kbdwin.c", +- "libavcodec/sinewin.c", +- "libavcodec/sinewin_fixed.c", +- "libavformat/aacdec.c", +- "libavformat/apetag.c", +- "libavformat/img2.c", +- ] +-} +- +-if ((is_mac && ffmpeg_branding == "Chrome") || (is_win && ffmpeg_branding == "Chrome") || (use_linux_config && ffmpeg_branding == "Chrome") || (use_linux_config && ffmpeg_branding == "ChromeOS")) { +- ffmpeg_c_sources += [ + "libavcodec/cabac.c", ++ "libavcodec/cbrt_data.c", + "libavcodec/h2645_parse.c", + "libavcodec/h264_cabac.c", + "libavcodec/h264_cavlc.c", +@@ -252,122 +234,26 @@ if ((is_mac && ffmpeg_branding == "Chrome") || (is_win && ffmpeg_branding == "Ch + "libavcodec/h264dsp.c", + "libavcodec/h264idct.c", + "libavcodec/h264qpel.c", ++ "libavcodec/kbdwin.c", ++ "libavcodec/mdct15.c", ++ "libavcodec/sbrdsp.c", ++ "libavcodec/sinewin.c", ++ "libavcodec/sinewin_fixed.c", + "libavcodec/startcode.c", ++ "libavformat/aacdec.c", ++ "libavformat/apetag.c", ++ "libavformat/img2.c", + ] + } + +-if ((is_android && current_cpu == "x64") || (is_android && current_cpu == "x86") || (is_mac) || (is_win && current_cpu == "x64") || (is_win && current_cpu == "x86") || (use_linux_config && current_cpu == "x64") || (use_linux_config && current_cpu == "x86")) { +- ffmpeg_c_sources += [ +- "libavcodec/x86/autorename_libavcodec_x86_vorbisdsp_init.c", +- "libavcodec/x86/constants.c", +- "libavcodec/x86/dct_init.c", +- "libavcodec/x86/fft_init.c", +- "libavcodec/x86/flacdsp_init.c", +- "libavcodec/x86/mpegaudiodsp.c", +- "libavutil/x86/autorename_libavutil_x86_cpu.c", +- "libavutil/x86/autorename_libavutil_x86_float_dsp_init.c", +- "libavutil/x86/fixed_dsp_init.c", +- "libavutil/x86/imgutils_init.c", +- "libavutil/x86/lls_init.c", +- ] +-} +- +-if ((is_android && current_cpu == "arm" && arm_use_neon) || (use_linux_config && current_cpu == "arm" && arm_use_neon) || (use_linux_config && current_cpu == "arm")) { +- ffmpeg_c_sources += [ +- "libavcodec/arm/fft_fixed_init_arm.c", +- "libavcodec/arm/fft_init_arm.c", +- "libavcodec/arm/flacdsp_init_arm.c", +- "libavcodec/arm/mpegaudiodsp_init_arm.c", +- "libavcodec/arm/rdft_init_arm.c", +- "libavcodec/arm/vorbisdsp_init_arm.c", +- "libavutil/arm/autorename_libavutil_arm_cpu.c", +- "libavutil/arm/float_dsp_init_arm.c", +- "libavutil/arm/float_dsp_init_vfp.c", +- ] +- ffmpeg_gas_sources += [ +- "libavcodec/arm/fft_vfp.S", +- "libavcodec/arm/flacdsp_arm.S", +- "libavcodec/arm/mdct_vfp.S", +- "libavcodec/arm/mpegaudiodsp_fixed_armv6.S", +- "libavutil/arm/float_dsp_vfp.S", +- ] +-} +- +-if ((is_android && current_cpu == "x64") || (is_mac) || (is_win && current_cpu == "x64") || (is_win && current_cpu == "x86") || (use_linux_config && current_cpu == "x64") || (use_linux_config && current_cpu == "x86")) { +- ffmpeg_asm_sources += [ +- "libavcodec/x86/dct32.asm", +- "libavcodec/x86/fft.asm", +- "libavcodec/x86/flacdsp.asm", +- "libavcodec/x86/imdct36.asm", +- "libavcodec/x86/vorbisdsp.asm", +- "libavutil/x86/cpuid.asm", +- "libavutil/x86/fixed_dsp.asm", +- "libavutil/x86/float_dsp.asm", +- "libavutil/x86/imgutils.asm", +- "libavutil/x86/lls.asm", +- ] +-} +- +-if ((is_mac) || (is_win && current_cpu == "x64") || (is_win && current_cpu == "x86") || (use_linux_config && current_cpu == "x64") || (use_linux_config && current_cpu == "x86")) { +- ffmpeg_c_sources += [ +- "libavcodec/x86/autorename_libavcodec_x86_videodsp_init.c", +- "libavcodec/x86/h264_intrapred_init.c", +- "libavcodec/x86/hpeldsp_init.c", +- "libavcodec/x86/hpeldsp_vp3_init.c", +- "libavcodec/x86/vp3dsp_init.c", +- "libavcodec/x86/vp8dsp_init.c", +- ] +- ffmpeg_asm_sources += [ +- "libavcodec/x86/autorename_libavcodec_x86_videodsp.asm", +- "libavcodec/x86/fpel.asm", +- "libavcodec/x86/h264_intrapred.asm", +- "libavcodec/x86/h264_intrapred_10bit.asm", +- "libavcodec/x86/hpeldsp.asm", +- "libavcodec/x86/hpeldsp_vp3.asm", +- "libavcodec/x86/vp3dsp.asm", +- "libavcodec/x86/vp8dsp.asm", +- "libavcodec/x86/vp8dsp_loopfilter.asm", +- ] +-} +- +-if ((current_cpu == "x64" && ffmpeg_branding == "Chrome") || (is_android && current_cpu == "x86" && ffmpeg_branding == "Chrome") || (is_win && current_cpu == "x86" && ffmpeg_branding == "Chrome") || (use_linux_config && current_cpu == "x64" && ffmpeg_branding == "ChromeOS") || (use_linux_config && current_cpu == "x86" && ffmpeg_branding == "Chrome") || (use_linux_config && current_cpu == "x86" && ffmpeg_branding == "ChromeOS")) { +- ffmpeg_c_sources += [ +- "libavcodec/x86/aacpsdsp_init.c", +- "libavcodec/x86/mdct15_init.c", +- "libavcodec/x86/sbrdsp_init.c", +- ] +-} +- +-if ((is_android && current_cpu == "arm" && arm_use_neon) || (is_android && current_cpu == "arm64") || (is_android && current_cpu == "x64") || (is_android && current_cpu == "x86")) { +- ffmpeg_c_sources += [ +- "compat/strtod.c", +- ] +-} +- +-if ((current_cpu == "x64" && ffmpeg_branding == "Chrome") || (is_win && current_cpu == "x86" && ffmpeg_branding == "Chrome") || (use_linux_config && current_cpu == "x64" && ffmpeg_branding == "ChromeOS") || (use_linux_config && current_cpu == "x86" && ffmpeg_branding == "Chrome") || (use_linux_config && current_cpu == "x86" && ffmpeg_branding == "ChromeOS")) { +- ffmpeg_asm_sources += [ +- "libavcodec/x86/aacpsdsp.asm", +- "libavcodec/x86/mdct15.asm", +- "libavcodec/x86/sbrdsp.asm", +- ] +-} +- +-if (use_linux_config && ffmpeg_branding == "ChromeOS") { ++if (use_linux_config && current_cpu == "la64" && ffmpeg_branding == "ChromeOS") { + ffmpeg_c_sources += [ + "libavcodec/acelp_filters.c", + "libavcodec/acelp_pitch_delay.c", + "libavcodec/acelp_vectors.c", + "libavcodec/amrnbdec.c", + "libavcodec/amrwbdec.c", +- "libavcodec/autorename_libavcodec_blockdsp.c", +- "libavcodec/autorename_libavcodec_idctdsp.c", +- "libavcodec/autorename_libavcodec_me_cmp.c", +- "libavcodec/autorename_libavcodec_mpegvideo.c", +- "libavcodec/autorename_libavcodec_mpegvideodsp.c", +- "libavcodec/autorename_libavcodec_pixblockdsp.c", +- "libavcodec/autorename_libavcodec_qpeldsp.c", +- "libavcodec/autorename_libavcodec_simple_idct.c", +- "libavcodec/autorename_libavcodec_xvididct.c", ++ "libavcodec/blockdsp.c", + "libavcodec/celp_filters.c", + "libavcodec/celp_math.c", + "libavcodec/error_resilience.c", +@@ -382,23 +268,31 @@ if (use_linux_config && ffmpeg_branding == "ChromeOS") { + "libavcodec/h263data.c", + "libavcodec/h263dec.c", + "libavcodec/h263dsp.c", ++ "libavcodec/idctdsp.c", + "libavcodec/intelh263dec.c", + "libavcodec/ituh263dec.c", + "libavcodec/jfdctfst.c", + "libavcodec/jfdctint.c", + "libavcodec/jrevdct.c", + "libavcodec/lsp.c", ++ "libavcodec/me_cmp.c", + "libavcodec/mpeg4video.c", + "libavcodec/mpeg4video_parser.c", + "libavcodec/mpeg4videodec.c", + "libavcodec/mpeg_er.c", + "libavcodec/mpegpicture.c", + "libavcodec/mpegutils.c", ++ "libavcodec/mpegvideo.c", + "libavcodec/mpegvideo_motion.c", + "libavcodec/mpegvideodata.c", ++ "libavcodec/mpegvideodsp.c", + "libavcodec/msgsmdec.c", ++ "libavcodec/pixblockdsp.c", ++ "libavcodec/qpeldsp.c", + "libavcodec/rl.c", ++ "libavcodec/simple_idct.c", + "libavcodec/tiff_common.c", ++ "libavcodec/xvididct.c", + "libavformat/amr.c", + "libavformat/avidec.c", + ] +-- +2.20.1 + diff --git a/0005-port-ffmpeg-to-loongarch64-for-chromium-add-la64-rel.patch b/0005-port-ffmpeg-to-loongarch64-for-chromium-add-la64-rel.patch new file mode 100644 index 0000000..3ee032e --- /dev/null +++ b/0005-port-ffmpeg-to-loongarch64-for-chromium-add-la64-rel.patch @@ -0,0 +1,8174 @@ +From 3c7d732964718f85b8999d9b797817eb0d861577 Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Wed, 13 Jan 2021 07:52:05 +0000 +Subject: [PATCH 05/13] port ffmpeg to loongarch64 for chromium add la64 + relative files + +--- + .../config/Chrome/linux/la64/config.h | 2589 +++++++++++++++++ + .../Chrome/linux/la64/libavcodec/bsf_list.c | 3 + + .../Chrome/linux/la64/libavcodec/codec_list.c | 20 + + .../linux/la64/libavcodec/parser_list.c | 11 + + .../linux/la64/libavformat/demuxer_list.c | 9 + + .../linux/la64/libavformat/muxer_list.c | 2 + + .../linux/la64/libavformat/protocol_list.c | 2 + + .../Chrome/linux/la64/libavutil/avconfig.h | 6 + + .../Chrome/linux/la64/libavutil/ffversion.h | 5 + + .../config/ChromeOS/linux/la64/config.h | 2589 +++++++++++++++++ + .../ChromeOS/linux/la64/libavcodec/bsf_list.c | 3 + + .../linux/la64/libavcodec/codec_list.c | 25 + + .../linux/la64/libavcodec/parser_list.c | 14 + + .../linux/la64/libavformat/demuxer_list.c | 11 + + .../linux/la64/libavformat/muxer_list.c | 2 + + .../linux/la64/libavformat/protocol_list.c | 2 + + .../ChromeOS/linux/la64/libavutil/avconfig.h | 6 + + .../ChromeOS/linux/la64/libavutil/ffversion.h | 5 + + .../config/Chromium/linux/la64/config.h | 2589 +++++++++++++++++ + .../Chromium/linux/la64/libavcodec/bsf_list.c | 3 + + .../linux/la64/libavcodec/codec_list.c | 18 + + .../linux/la64/libavcodec/parser_list.c | 9 + + .../linux/la64/libavformat/demuxer_list.c | 8 + + .../linux/la64/libavformat/muxer_list.c | 2 + + .../linux/la64/libavformat/protocol_list.c | 2 + + .../Chromium/linux/la64/libavutil/avconfig.h | 6 + + .../Chromium/linux/la64/libavutil/ffversion.h | 5 + + 27 files changed, 7946 insertions(+) + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/config.h + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/bsf_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/codec_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/parser_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/demuxer_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/muxer_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/protocol_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavutil/avconfig.h + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavutil/ffversion.h + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/config.h + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/bsf_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/codec_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/parser_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/demuxer_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/muxer_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/protocol_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavutil/avconfig.h + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavutil/ffversion.h + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/config.h + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/bsf_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/codec_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/parser_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/demuxer_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/muxer_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/protocol_list.c + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavutil/avconfig.h + create mode 100644 src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavutil/ffversion.h + +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/config.h b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/config.h +new file mode 100644 +index 000000000..a4351739e +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/config.h +@@ -0,0 +1,2589 @@ ++/* Automatically generated by configure - do not modify! */ ++#ifndef FFMPEG_CONFIG_H ++#define FFMPEG_CONFIG_H ++/* #define FFMPEG_CONFIGURATION "--disable-everything --disable-all --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-static --enable-avcodec --enable-avformat --enable-avutil --enable-fft --enable-rdft --enable-static --enable-libopus --disable-debug --disable-bzlib --disable-error-resilience --disable-iconv --disable-lzo --disable-network --disable-schannel --disable-sdl2 --disable-symver --disable-xlib --disable-zlib --disable-securetransport --disable-faan --disable-alsa --disable-autodetect --enable-decoder='vorbis,libopus,flac' --enable-decoder='pcm_u8,pcm_s16le,pcm_s24le,pcm_s32le,pcm_f32le,mp3' --enable-decoder='pcm_s16be,pcm_s24be,pcm_mulaw,pcm_alaw' --enable-demuxer='ogg,matroska,wav,flac,mp3,mov' --enable-parser='opus,vorbis,flac,mpegaudio,vp9' --extra-cflags=-I/mnt/chromium/src/third_party/opus/src/include --disable-linux-perf --x86asmexe=nasm --optflags='\"-O2\"' --enable-decoder='theora,vp8' --enable-parser='vp3,vp8' --target-os=linux --enable-pic --cc=clang --cxx=clang++ --ld=clang --enable-decoder='aac,h264' --enable-demuxer=aac --enable-parser='aac,h264'" -- elide long configuration string from binary */ ++#define FFMPEG_LICENSE "LGPL version 2.1 or later" ++#define CONFIG_THIS_YEAR 2020 ++#define FFMPEG_DATADIR "/usr/local/share/ffmpeg" ++#define AVCONV_DATADIR "/usr/local/share/ffmpeg" ++#define CC_IDENT "clang version 8.0.1" ++#define av_restrict restrict ++#define EXTERN_PREFIX "" ++#define EXTERN_ASM ++#define BUILDSUF "" ++#define SLIBSUF ".so" ++#define HAVE_MMX2 HAVE_MMXEXT ++#define SWS_MAX_FILTER_SIZE 256 ++#define ARCH_AARCH64 0 ++#define ARCH_ALPHA 0 ++#define ARCH_ARM 0 ++#define ARCH_AVR32 0 ++#define ARCH_AVR32_AP 0 ++#define ARCH_AVR32_UC 0 ++#define ARCH_BFIN 0 ++#define ARCH_IA64 0 ++#define ARCH_M68K 0 ++#define ARCH_MIPS 0 ++#define ARCH_MIPS64 0 ++#define ARCH_PARISC 0 ++#define ARCH_PPC 0 ++#define ARCH_PPC64 0 ++#define ARCH_S390 0 ++#define ARCH_SH4 0 ++#define ARCH_SPARC 0 ++#define ARCH_SPARC64 0 ++#define ARCH_TILEGX 0 ++#define ARCH_TILEPRO 0 ++#define ARCH_TOMI 0 ++#define ARCH_X86 0 ++#define ARCH_X86_32 0 ++#define ARCH_X86_64 0 ++#define HAVE_ARMV5TE 0 ++#define HAVE_ARMV6 0 ++#define HAVE_ARMV6T2 0 ++#define HAVE_ARMV8 0 ++#define HAVE_NEON 0 ++#define HAVE_VFP 0 ++#define HAVE_VFPV3 0 ++#define HAVE_SETEND 0 ++#define HAVE_ALTIVEC 0 ++#define HAVE_DCBZL 0 ++#define HAVE_LDBRX 0 ++#define HAVE_POWER8 0 ++#define HAVE_PPC4XX 0 ++#define HAVE_VSX 0 ++#define HAVE_AESNI 0 ++#define HAVE_AMD3DNOW 0 ++#define HAVE_AMD3DNOWEXT 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_AVX512 0 ++#define HAVE_FMA3 0 ++#define HAVE_FMA4 0 ++#define HAVE_MMX 0 ++#define HAVE_MMXEXT 0 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSE4 0 ++#define HAVE_SSE42 0 ++#define HAVE_SSSE3 0 ++#define HAVE_XOP 0 ++#define HAVE_CPUNOP 0 ++#define HAVE_I686 0 ++#define HAVE_MIPSFPU 0 ++#define HAVE_MIPS32R2 0 ++#define HAVE_MIPS32R5 0 ++#define HAVE_MIPS64R2 0 ++#define HAVE_MIPS32R6 0 ++#define HAVE_MIPS64R6 0 ++#define HAVE_MIPSDSP 0 ++#define HAVE_MIPSDSPR2 0 ++#define HAVE_MSA 0 ++#define HAVE_MSA2 0 ++#define HAVE_LOONGSON2 0 ++#define HAVE_LOONGSON3 0 ++#define HAVE_MMI 0 ++#define HAVE_ARMV5TE_EXTERNAL 0 ++#define HAVE_ARMV6_EXTERNAL 0 ++#define HAVE_ARMV6T2_EXTERNAL 0 ++#define HAVE_ARMV8_EXTERNAL 0 ++#define HAVE_NEON_EXTERNAL 0 ++#define HAVE_VFP_EXTERNAL 0 ++#define HAVE_VFPV3_EXTERNAL 0 ++#define HAVE_SETEND_EXTERNAL 0 ++#define HAVE_ALTIVEC_EXTERNAL 0 ++#define HAVE_DCBZL_EXTERNAL 0 ++#define HAVE_LDBRX_EXTERNAL 0 ++#define HAVE_POWER8_EXTERNAL 0 ++#define HAVE_PPC4XX_EXTERNAL 0 ++#define HAVE_VSX_EXTERNAL 0 ++#define HAVE_AESNI_EXTERNAL 0 ++#define HAVE_AMD3DNOW_EXTERNAL 0 ++#define HAVE_AMD3DNOWEXT_EXTERNAL 0 ++#define HAVE_AVX_EXTERNAL 0 ++#define HAVE_AVX2_EXTERNAL 0 ++#define HAVE_AVX512_EXTERNAL 0 ++#define HAVE_FMA3_EXTERNAL 0 ++#define HAVE_FMA4_EXTERNAL 0 ++#define HAVE_MMX_EXTERNAL 0 ++#define HAVE_MMXEXT_EXTERNAL 0 ++#define HAVE_SSE_EXTERNAL 0 ++#define HAVE_SSE2_EXTERNAL 0 ++#define HAVE_SSE3_EXTERNAL 0 ++#define HAVE_SSE4_EXTERNAL 0 ++#define HAVE_SSE42_EXTERNAL 0 ++#define HAVE_SSSE3_EXTERNAL 0 ++#define HAVE_XOP_EXTERNAL 0 ++#define HAVE_CPUNOP_EXTERNAL 0 ++#define HAVE_I686_EXTERNAL 0 ++#define HAVE_MIPSFPU_EXTERNAL 0 ++#define HAVE_MIPS32R2_EXTERNAL 0 ++#define HAVE_MIPS32R5_EXTERNAL 0 ++#define HAVE_MIPS64R2_EXTERNAL 0 ++#define HAVE_MIPS32R6_EXTERNAL 0 ++#define HAVE_MIPS64R6_EXTERNAL 0 ++#define HAVE_MIPSDSP_EXTERNAL 0 ++#define HAVE_MIPSDSPR2_EXTERNAL 0 ++#define HAVE_MSA_EXTERNAL 0 ++#define HAVE_MSA2_EXTERNAL 0 ++#define HAVE_LOONGSON2_EXTERNAL 0 ++#define HAVE_LOONGSON3_EXTERNAL 0 ++#define HAVE_MMI_EXTERNAL 0 ++#define HAVE_ARMV5TE_INLINE 0 ++#define HAVE_ARMV6_INLINE 0 ++#define HAVE_ARMV6T2_INLINE 0 ++#define HAVE_ARMV8_INLINE 0 ++#define HAVE_NEON_INLINE 0 ++#define HAVE_VFP_INLINE 0 ++#define HAVE_VFPV3_INLINE 0 ++#define HAVE_SETEND_INLINE 0 ++#define HAVE_ALTIVEC_INLINE 0 ++#define HAVE_DCBZL_INLINE 0 ++#define HAVE_LDBRX_INLINE 0 ++#define HAVE_POWER8_INLINE 0 ++#define HAVE_PPC4XX_INLINE 0 ++#define HAVE_VSX_INLINE 0 ++#define HAVE_AESNI_INLINE 0 ++#define HAVE_AMD3DNOW_INLINE 0 ++#define HAVE_AMD3DNOWEXT_INLINE 0 ++#define HAVE_AVX_INLINE 0 ++#define HAVE_AVX2_INLINE 0 ++#define HAVE_AVX512_INLINE 0 ++#define HAVE_FMA3_INLINE 0 ++#define HAVE_FMA4_INLINE 0 ++#define HAVE_MMX_INLINE 0 ++#define HAVE_MMXEXT_INLINE 0 ++#define HAVE_SSE_INLINE 0 ++#define HAVE_SSE2_INLINE 0 ++#define HAVE_SSE3_INLINE 0 ++#define HAVE_SSE4_INLINE 0 ++#define HAVE_SSE42_INLINE 0 ++#define HAVE_SSSE3_INLINE 0 ++#define HAVE_XOP_INLINE 0 ++#define HAVE_CPUNOP_INLINE 0 ++#define HAVE_I686_INLINE 0 ++#define HAVE_MIPSFPU_INLINE 0 ++#define HAVE_MIPS32R2_INLINE 0 ++#define HAVE_MIPS32R5_INLINE 0 ++#define HAVE_MIPS64R2_INLINE 0 ++#define HAVE_MIPS32R6_INLINE 0 ++#define HAVE_MIPS64R6_INLINE 0 ++#define HAVE_MIPSDSP_INLINE 0 ++#define HAVE_MIPSDSPR2_INLINE 0 ++#define HAVE_MSA_INLINE 0 ++#define HAVE_MSA2_INLINE 0 ++#define HAVE_LOONGSON2_INLINE 0 ++#define HAVE_LOONGSON3_INLINE 0 ++#define HAVE_MMI_INLINE 0 ++#define HAVE_ALIGNED_STACK 0 ++#define HAVE_FAST_64BIT 0 ++#define HAVE_FAST_CLZ 0 ++#define HAVE_FAST_CMOV 0 ++#define HAVE_LOCAL_ALIGNED 0 ++#define HAVE_SIMD_ALIGN_16 0 ++#define HAVE_SIMD_ALIGN_32 0 ++#define HAVE_SIMD_ALIGN_64 0 ++#define HAVE_ATOMIC_CAS_PTR 0 ++#define HAVE_MACHINE_RW_BARRIER 0 ++#define HAVE_MEMORYBARRIER 0 ++#define HAVE_MM_EMPTY 0 ++#define HAVE_RDTSC 0 ++#define HAVE_SEM_TIMEDWAIT 1 ++#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 ++#define HAVE_CABS 0 ++#define HAVE_CEXP 0 ++#define HAVE_INLINE_ASM 1 ++#define HAVE_SYMVER 0 ++#define HAVE_X86ASM 0 ++#define HAVE_BIGENDIAN 0 ++#define HAVE_FAST_UNALIGNED 0 ++#define HAVE_ARPA_INET_H 0 ++#define HAVE_ASM_TYPES_H 1 ++#define HAVE_CDIO_PARANOIA_H 0 ++#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 ++#define HAVE_CUDA_H 0 ++#define HAVE_DISPATCH_DISPATCH_H 0 ++#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 ++#define HAVE_DEV_IC_BT8XX_H 0 ++#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 ++#define HAVE_DIRECT_H 0 ++#define HAVE_DIRENT_H 1 ++#define HAVE_DXGIDEBUG_H 0 ++#define HAVE_DXVA_H 0 ++#define HAVE_ES2_GL_H 0 ++#define HAVE_GSM_H 0 ++#define HAVE_IO_H 0 ++#define HAVE_LINUX_PERF_EVENT_H 1 ++#define HAVE_MACHINE_IOCTL_BT848_H 0 ++#define HAVE_MACHINE_IOCTL_METEOR_H 0 ++#define HAVE_MALLOC_H 1 ++#define HAVE_OPENCV2_CORE_CORE_C_H 0 ++#define HAVE_OPENGL_GL3_H 0 ++#define HAVE_POLL_H 1 ++#define HAVE_SYS_PARAM_H 1 ++#define HAVE_SYS_RESOURCE_H 1 ++#define HAVE_SYS_SELECT_H 1 ++#define HAVE_SYS_SOUNDCARD_H 1 ++#define HAVE_SYS_TIME_H 1 ++#define HAVE_SYS_UN_H 1 ++#define HAVE_SYS_VIDEOIO_H 0 ++#define HAVE_TERMIOS_H 1 ++#define HAVE_UDPLITE_H 0 ++#define HAVE_UNISTD_H 1 ++#define HAVE_VALGRIND_VALGRIND_H 0 /* #define HAVE_VALGRIND_VALGRIND_H 0 -- forced to 0. See https://crbug.com/590440 */ ++#define HAVE_WINDOWS_H 0 ++#define HAVE_WINSOCK2_H 0 ++#define HAVE_INTRINSICS_NEON 0 ++#define HAVE_ATANF 1 ++#define HAVE_ATAN2F 1 ++#define HAVE_CBRT 1 ++#define HAVE_CBRTF 1 ++#define HAVE_COPYSIGN 1 ++#define HAVE_COSF 1 ++#define HAVE_ERF 1 ++#define HAVE_EXP2 1 ++#define HAVE_EXP2F 1 ++#define HAVE_EXPF 1 ++#define HAVE_HYPOT 1 ++#define HAVE_ISFINITE 1 ++#define HAVE_ISINF 1 ++#define HAVE_ISNAN 1 ++#define HAVE_LDEXPF 1 ++#define HAVE_LLRINT 1 ++#define HAVE_LLRINTF 1 ++#define HAVE_LOG2 1 ++#define HAVE_LOG2F 1 ++#define HAVE_LOG10F 1 ++#define HAVE_LRINT 1 ++#define HAVE_LRINTF 1 ++#define HAVE_POWF 1 ++#define HAVE_RINT 1 ++#define HAVE_ROUND 1 ++#define HAVE_ROUNDF 1 ++#define HAVE_SINF 1 ++#define HAVE_TRUNC 1 ++#define HAVE_TRUNCF 1 ++#define HAVE_DOS_PATHS 0 ++#define HAVE_LIBC_MSVCRT 0 ++#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 ++#define HAVE_SECTION_DATA_REL_RO 1 ++#define HAVE_THREADS 1 ++#define HAVE_UWP 0 ++#define HAVE_WINRT 0 ++#define HAVE_ACCESS 1 ++#define HAVE_ALIGNED_MALLOC 0 ++#define HAVE_ARC4RANDOM 0 ++#define HAVE_CLOCK_GETTIME 1 ++#define HAVE_CLOSESOCKET 0 ++#define HAVE_COMMANDLINETOARGVW 0 ++#define HAVE_FCNTL 1 ++#define HAVE_GETADDRINFO 0 ++#define HAVE_GETHRTIME 0 ++#define HAVE_GETOPT 1 ++#define HAVE_GETMODULEHANDLE 0 ++#define HAVE_GETPROCESSAFFINITYMASK 0 ++#define HAVE_GETPROCESSMEMORYINFO 0 ++#define HAVE_GETPROCESSTIMES 0 ++#define HAVE_GETRUSAGE 1 ++#define HAVE_GETSTDHANDLE 0 ++#define HAVE_GETSYSTEMTIMEASFILETIME 0 ++#define HAVE_GETTIMEOFDAY 1 ++#define HAVE_GLOB 1 ++#define HAVE_GLXGETPROCADDRESS 0 ++#define HAVE_GMTIME_R 1 ++#define HAVE_INET_ATON 0 ++#define HAVE_ISATTY 1 ++#define HAVE_KBHIT 0 ++#define HAVE_LOCALTIME_R 1 ++#define HAVE_LSTAT 1 ++#define HAVE_LZO1X_999_COMPRESS 0 ++#define HAVE_MACH_ABSOLUTE_TIME 0 ++#define HAVE_MAPVIEWOFFILE 0 ++#define HAVE_MEMALIGN 1 ++#define HAVE_MKSTEMP 1 ++#define HAVE_MMAP 1 ++#define HAVE_MPROTECT 1 ++#define HAVE_NANOSLEEP 1 ++#define HAVE_PEEKNAMEDPIPE 0 ++#define HAVE_POSIX_MEMALIGN 1 ++#define HAVE_PTHREAD_CANCEL 1 ++#define HAVE_SCHED_GETAFFINITY 1 ++#define HAVE_SECITEMIMPORT 0 ++#define HAVE_SETCONSOLETEXTATTRIBUTE 0 ++#define HAVE_SETCONSOLECTRLHANDLER 0 ++#define HAVE_SETDLLDIRECTORY 0 ++#define HAVE_SETMODE 0 ++#define HAVE_SETRLIMIT 1 ++#define HAVE_SLEEP 0 ++#define HAVE_STRERROR_R 1 ++#define HAVE_SYSCONF 1 ++#define HAVE_SYSCTL 0 /* #define HAVE_SYSCTL 1 -- forced to 0 for Fuchsia */ ++#define HAVE_USLEEP 1 ++#define HAVE_UTGETOSTYPEFROMSTRING 0 ++#define HAVE_VIRTUALALLOC 0 ++#define HAVE_WGLGETPROCADDRESS 0 ++#define HAVE_BCRYPT 0 ++#define HAVE_VAAPI_DRM 0 ++#define HAVE_VAAPI_X11 0 ++#define HAVE_VDPAU_X11 0 ++#define HAVE_PTHREADS 1 ++#define HAVE_OS2THREADS 0 ++#define HAVE_W32THREADS 0 ++#define HAVE_AS_ARCH_DIRECTIVE 0 ++#define HAVE_AS_DN_DIRECTIVE 0 ++#define HAVE_AS_FPU_DIRECTIVE 0 ++#define HAVE_AS_FUNC 0 ++#define HAVE_AS_OBJECT_ARCH 0 ++#define HAVE_ASM_MOD_Q 0 ++#define HAVE_BLOCKS_EXTENSION 0 ++#define HAVE_EBP_AVAILABLE 0 ++#define HAVE_EBX_AVAILABLE 0 ++#define HAVE_GNU_AS 0 ++#define HAVE_GNU_WINDRES 0 ++#define HAVE_IBM_ASM 0 ++#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0 ++#define HAVE_INLINE_ASM_LABELS 1 ++#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 ++#define HAVE_PRAGMA_DEPRECATED 1 ++#define HAVE_RSYNC_CONTIMEOUT 0 ++#define HAVE_SYMVER_ASM_LABEL 1 ++#define HAVE_SYMVER_GNU_ASM 1 ++#define HAVE_VFP_ARGS 0 ++#define HAVE_XFORM_ASM 0 ++#define HAVE_XMM_CLOBBERS 0 ++#define HAVE_KCMVIDEOCODECTYPE_HEVC 0 ++#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0 ++#define HAVE_SOCKLEN_T 0 ++#define HAVE_STRUCT_ADDRINFO 0 ++#define HAVE_STRUCT_GROUP_SOURCE_REQ 0 ++#define HAVE_STRUCT_IP_MREQ_SOURCE 0 ++#define HAVE_STRUCT_IPV6_MREQ 0 ++#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0 ++#define HAVE_STRUCT_POLLFD 0 ++#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 ++#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 ++#define HAVE_STRUCT_SOCKADDR_IN6 0 ++#define HAVE_STRUCT_SOCKADDR_SA_LEN 0 ++#define HAVE_STRUCT_SOCKADDR_STORAGE 0 ++#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1 ++#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 ++#define HAVE_MAKEINFO 1 ++#define HAVE_MAKEINFO_HTML 1 ++#define HAVE_OPENCL_D3D11 0 ++#define HAVE_OPENCL_DRM_ARM 0 ++#define HAVE_OPENCL_DRM_BEIGNET 0 ++#define HAVE_OPENCL_DXVA2 0 ++#define HAVE_OPENCL_VAAPI_BEIGNET 0 ++#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0 ++#define HAVE_PERL 1 ++#define HAVE_POD2MAN 1 ++#define HAVE_TEXI2HTML 0 ++#define CONFIG_DOC 0 ++#define CONFIG_HTMLPAGES 0 ++#define CONFIG_MANPAGES 0 ++#define CONFIG_PODPAGES 0 ++#define CONFIG_TXTPAGES 0 ++#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1 ++#define CONFIG_AVIO_READING_EXAMPLE 1 ++#define CONFIG_DECODE_AUDIO_EXAMPLE 1 ++#define CONFIG_DECODE_VIDEO_EXAMPLE 1 ++#define CONFIG_DEMUXING_DECODING_EXAMPLE 1 ++#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 ++#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 ++#define CONFIG_EXTRACT_MVS_EXAMPLE 1 ++#define CONFIG_FILTER_AUDIO_EXAMPLE 0 ++#define CONFIG_FILTERING_AUDIO_EXAMPLE 0 ++#define CONFIG_FILTERING_VIDEO_EXAMPLE 0 ++#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 1 ++#define CONFIG_HW_DECODE_EXAMPLE 1 ++#define CONFIG_METADATA_EXAMPLE 1 ++#define CONFIG_MUXING_EXAMPLE 0 ++#define CONFIG_QSVDEC_EXAMPLE 0 ++#define CONFIG_REMUXING_EXAMPLE 1 ++#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0 ++#define CONFIG_SCALING_VIDEO_EXAMPLE 0 ++#define CONFIG_TRANSCODE_AAC_EXAMPLE 0 ++#define CONFIG_TRANSCODING_EXAMPLE 0 ++#define CONFIG_VAAPI_ENCODE_EXAMPLE 0 ++#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0 ++#define CONFIG_AVISYNTH 0 ++#define CONFIG_FREI0R 0 ++#define CONFIG_LIBCDIO 0 ++#define CONFIG_LIBDAVS2 0 ++#define CONFIG_LIBRUBBERBAND 0 ++#define CONFIG_LIBVIDSTAB 0 ++#define CONFIG_LIBX264 0 ++#define CONFIG_LIBX265 0 ++#define CONFIG_LIBXAVS 0 ++#define CONFIG_LIBXAVS2 0 ++#define CONFIG_LIBXVID 0 ++#define CONFIG_DECKLINK 0 ++#define CONFIG_LIBFDK_AAC 0 ++#define CONFIG_OPENSSL 0 ++#define CONFIG_LIBTLS 0 ++#define CONFIG_GMP 0 ++#define CONFIG_LIBARIBB24 0 ++#define CONFIG_LIBLENSFUN 0 ++#define CONFIG_LIBOPENCORE_AMRNB 0 ++#define CONFIG_LIBOPENCORE_AMRWB 0 ++#define CONFIG_LIBVMAF 0 ++#define CONFIG_LIBVO_AMRWBENC 0 ++#define CONFIG_MBEDTLS 0 ++#define CONFIG_RKMPP 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_CHROMAPRINT 0 ++#define CONFIG_GCRYPT 0 ++#define CONFIG_GNUTLS 0 ++#define CONFIG_JNI 0 ++#define CONFIG_LADSPA 0 ++#define CONFIG_LIBAOM 0 ++#define CONFIG_LIBASS 0 ++#define CONFIG_LIBBLURAY 0 ++#define CONFIG_LIBBS2B 0 ++#define CONFIG_LIBCACA 0 ++#define CONFIG_LIBCELT 0 ++#define CONFIG_LIBCODEC2 0 ++#define CONFIG_LIBDAV1D 0 ++#define CONFIG_LIBDC1394 0 ++#define CONFIG_LIBDRM 0 ++#define CONFIG_LIBFLITE 0 ++#define CONFIG_LIBFONTCONFIG 0 ++#define CONFIG_LIBFREETYPE 0 ++#define CONFIG_LIBFRIBIDI 0 ++#define CONFIG_LIBGLSLANG 0 ++#define CONFIG_LIBGME 0 ++#define CONFIG_LIBGSM 0 ++#define CONFIG_LIBIEC61883 0 ++#define CONFIG_LIBILBC 0 ++#define CONFIG_LIBJACK 0 ++#define CONFIG_LIBKLVANC 0 ++#define CONFIG_LIBKVAZAAR 0 ++#define CONFIG_LIBMODPLUG 0 ++#define CONFIG_LIBMP3LAME 0 ++#define CONFIG_LIBMYSOFA 0 ++#define CONFIG_LIBOPENCV 0 ++#define CONFIG_LIBOPENH264 0 ++#define CONFIG_LIBOPENJPEG 0 ++#define CONFIG_LIBOPENMPT 0 ++#define CONFIG_LIBOPUS 1 ++#define CONFIG_LIBPULSE 0 ++#define CONFIG_LIBRABBITMQ 0 ++#define CONFIG_LIBRAV1E 0 ++#define CONFIG_LIBRSVG 0 ++#define CONFIG_LIBRTMP 0 ++#define CONFIG_LIBSHINE 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_LIBSNAPPY 0 ++#define CONFIG_LIBSOXR 0 ++#define CONFIG_LIBSPEEX 0 ++#define CONFIG_LIBSRT 0 ++#define CONFIG_LIBSSH 0 ++#define CONFIG_LIBTENSORFLOW 0 ++#define CONFIG_LIBTESSERACT 0 ++#define CONFIG_LIBTHEORA 0 ++#define CONFIG_LIBTWOLAME 0 ++#define CONFIG_LIBV4L2 0 ++#define CONFIG_LIBVORBIS 0 ++#define CONFIG_LIBVPX 0 ++#define CONFIG_LIBWAVPACK 0 ++#define CONFIG_LIBWEBP 0 ++#define CONFIG_LIBXML2 0 ++#define CONFIG_LIBZIMG 0 ++#define CONFIG_LIBZMQ 0 ++#define CONFIG_LIBZVBI 0 ++#define CONFIG_LV2 0 ++#define CONFIG_MEDIACODEC 0 ++#define CONFIG_OPENAL 0 ++#define CONFIG_OPENGL 0 ++#define CONFIG_POCKETSPHINX 0 ++#define CONFIG_VAPOURSYNTH 0 ++#define CONFIG_ALSA 0 ++#define CONFIG_APPKIT 0 ++#define CONFIG_AVFOUNDATION 0 ++#define CONFIG_BZLIB 0 ++#define CONFIG_COREIMAGE 0 ++#define CONFIG_ICONV 0 ++#define CONFIG_LIBXCB 0 ++#define CONFIG_LIBXCB_SHM 0 ++#define CONFIG_LIBXCB_SHAPE 0 ++#define CONFIG_LIBXCB_XFIXES 0 ++#define CONFIG_LZMA 0 ++#define CONFIG_SCHANNEL 0 ++#define CONFIG_SDL2 0 ++#define CONFIG_SECURETRANSPORT 0 ++#define CONFIG_SNDIO 0 ++#define CONFIG_XLIB 0 ++#define CONFIG_ZLIB 0 ++#define CONFIG_CUDA_NVCC 0 ++#define CONFIG_CUDA_SDK 0 ++#define CONFIG_LIBNPP 0 ++#define CONFIG_LIBMFX 0 ++#define CONFIG_MMAL 0 ++#define CONFIG_OMX 0 ++#define CONFIG_OPENCL 0 ++#define CONFIG_VULKAN 0 ++#define CONFIG_AMF 0 ++#define CONFIG_AUDIOTOOLBOX 0 ++#define CONFIG_CRYSTALHD 0 ++#define CONFIG_CUDA 0 ++#define CONFIG_CUDA_LLVM 0 ++#define CONFIG_CUVID 0 ++#define CONFIG_D3D11VA 0 ++#define CONFIG_DXVA2 0 ++#define CONFIG_FFNVCODEC 0 ++#define CONFIG_NVDEC 0 ++#define CONFIG_NVENC 0 ++#define CONFIG_VAAPI 0 ++#define CONFIG_VDPAU 0 ++#define CONFIG_VIDEOTOOLBOX 0 ++#define CONFIG_V4L2_M2M 0 ++#define CONFIG_XVMC 0 ++#define CONFIG_FTRAPV 0 ++#define CONFIG_GRAY 0 ++#define CONFIG_HARDCODED_TABLES 0 ++#define CONFIG_OMX_RPI 0 ++#define CONFIG_RUNTIME_CPUDETECT 1 ++#define CONFIG_SAFE_BITSTREAM_READER 1 ++#define CONFIG_SHARED 0 ++#define CONFIG_SMALL 0 ++#define CONFIG_STATIC 1 ++#define CONFIG_SWSCALE_ALPHA 1 ++#define CONFIG_GPL 0 ++#define CONFIG_NONFREE 0 ++#define CONFIG_VERSION3 0 ++#define CONFIG_AVDEVICE 0 ++#define CONFIG_AVFILTER 0 ++#define CONFIG_SWSCALE 0 ++#define CONFIG_POSTPROC 0 ++#define CONFIG_AVFORMAT 1 ++#define CONFIG_AVCODEC 1 ++#define CONFIG_SWRESAMPLE 0 ++#define CONFIG_AVRESAMPLE 0 ++#define CONFIG_AVUTIL 1 ++#define CONFIG_FFPLAY 0 ++#define CONFIG_FFPROBE 0 ++#define CONFIG_FFMPEG 0 ++#define CONFIG_DCT 1 ++#define CONFIG_DWT 0 ++#define CONFIG_ERROR_RESILIENCE 0 ++#define CONFIG_FAAN 0 ++#define CONFIG_FAST_UNALIGNED 0 ++#define CONFIG_FFT 1 ++#define CONFIG_LSP 0 ++#define CONFIG_LZO 0 ++#define CONFIG_MDCT 1 ++#define CONFIG_PIXELUTILS 0 ++#define CONFIG_NETWORK 0 ++#define CONFIG_RDFT 1 ++#define CONFIG_AUTODETECT 0 ++#define CONFIG_FONTCONFIG 0 ++#define CONFIG_LARGE_TESTS 1 ++#define CONFIG_LINUX_PERF 0 ++#define CONFIG_MEMORY_POISONING 0 ++#define CONFIG_NEON_CLOBBER_TEST 0 ++#define CONFIG_OSSFUZZ 0 ++#define CONFIG_PIC 1 ++#define CONFIG_THUMB 0 ++#define CONFIG_VALGRIND_BACKTRACE 0 ++#define CONFIG_XMM_CLOBBER_TEST 0 ++#define CONFIG_BSFS 1 ++#define CONFIG_DECODERS 1 ++#define CONFIG_ENCODERS 0 ++#define CONFIG_HWACCELS 0 ++#define CONFIG_PARSERS 1 ++#define CONFIG_INDEVS 0 ++#define CONFIG_OUTDEVS 0 ++#define CONFIG_FILTERS 0 ++#define CONFIG_DEMUXERS 1 ++#define CONFIG_MUXERS 0 ++#define CONFIG_PROTOCOLS 0 ++#define CONFIG_AANDCTTABLES 0 ++#define CONFIG_AC3DSP 0 ++#define CONFIG_ADTS_HEADER 1 ++#define CONFIG_AUDIO_FRAME_QUEUE 0 ++#define CONFIG_AUDIODSP 0 ++#define CONFIG_BLOCKDSP 0 ++#define CONFIG_BSWAPDSP 0 ++#define CONFIG_CABAC 1 ++#define CONFIG_CBS 0 ++#define CONFIG_CBS_AV1 0 ++#define CONFIG_CBS_H264 0 ++#define CONFIG_CBS_H265 0 ++#define CONFIG_CBS_JPEG 0 ++#define CONFIG_CBS_MPEG2 0 ++#define CONFIG_CBS_VP9 0 ++#define CONFIG_DIRAC_PARSE 1 ++#define CONFIG_DNN 0 ++#define CONFIG_DVPROFILE 0 ++#define CONFIG_EXIF 0 ++#define CONFIG_FAANDCT 0 ++#define CONFIG_FAANIDCT 0 ++#define CONFIG_FDCTDSP 0 ++#define CONFIG_FLACDSP 1 ++#define CONFIG_FMTCONVERT 0 ++#define CONFIG_FRAME_THREAD_ENCODER 0 ++#define CONFIG_G722DSP 0 ++#define CONFIG_GOLOMB 1 ++#define CONFIG_GPLV3 0 ++#define CONFIG_H263DSP 0 ++#define CONFIG_H264CHROMA 1 ++#define CONFIG_H264DSP 1 ++#define CONFIG_H264PARSE 1 ++#define CONFIG_H264PRED 1 ++#define CONFIG_H264QPEL 1 ++#define CONFIG_HEVCPARSE 0 ++#define CONFIG_HPELDSP 1 ++#define CONFIG_HUFFMAN 0 ++#define CONFIG_HUFFYUVDSP 0 ++#define CONFIG_HUFFYUVENCDSP 0 ++#define CONFIG_IDCTDSP 0 ++#define CONFIG_IIRFILTER 0 ++#define CONFIG_MDCT15 1 ++#define CONFIG_INTRAX8 0 ++#define CONFIG_ISO_MEDIA 1 ++#define CONFIG_IVIDSP 0 ++#define CONFIG_JPEGTABLES 0 ++#define CONFIG_LGPLV3 0 ++#define CONFIG_LIBX262 0 ++#define CONFIG_LLAUDDSP 0 ++#define CONFIG_LLVIDDSP 0 ++#define CONFIG_LLVIDENCDSP 0 ++#define CONFIG_LPC 0 ++#define CONFIG_LZF 0 ++#define CONFIG_ME_CMP 0 ++#define CONFIG_MPEG_ER 0 ++#define CONFIG_MPEGAUDIO 1 ++#define CONFIG_MPEGAUDIODSP 1 ++#define CONFIG_MPEGAUDIOHEADER 1 ++#define CONFIG_MPEGVIDEO 0 ++#define CONFIG_MPEGVIDEOENC 0 ++#define CONFIG_MSS34DSP 0 ++#define CONFIG_PIXBLOCKDSP 0 ++#define CONFIG_QPELDSP 0 ++#define CONFIG_QSV 0 ++#define CONFIG_QSVDEC 0 ++#define CONFIG_QSVENC 0 ++#define CONFIG_QSVVPP 0 ++#define CONFIG_RANGECODER 0 ++#define CONFIG_RIFFDEC 1 ++#define CONFIG_RIFFENC 0 ++#define CONFIG_RTPDEC 0 ++#define CONFIG_RTPENC_CHAIN 0 ++#define CONFIG_RV34DSP 0 ++#define CONFIG_SCENE_SAD 0 ++#define CONFIG_SINEWIN 1 ++#define CONFIG_SNAPPY 0 ++#define CONFIG_SRTP 0 ++#define CONFIG_STARTCODE 1 ++#define CONFIG_TEXTUREDSP 0 ++#define CONFIG_TEXTUREDSPENC 0 ++#define CONFIG_TPELDSP 0 ++#define CONFIG_VAAPI_1 0 ++#define CONFIG_VAAPI_ENCODE 0 ++#define CONFIG_VC1DSP 0 ++#define CONFIG_VIDEODSP 1 ++#define CONFIG_VP3DSP 1 ++#define CONFIG_VP56DSP 0 ++#define CONFIG_VP8DSP 1 ++#define CONFIG_WMA_FREQS 0 ++#define CONFIG_WMV2DSP 0 ++#define CONFIG_AAC_ADTSTOASC_BSF 0 ++#define CONFIG_AV1_FRAME_MERGE_BSF 0 ++#define CONFIG_AV1_FRAME_SPLIT_BSF 0 ++#define CONFIG_AV1_METADATA_BSF 0 ++#define CONFIG_CHOMP_BSF 0 ++#define CONFIG_DUMP_EXTRADATA_BSF 0 ++#define CONFIG_DCA_CORE_BSF 0 ++#define CONFIG_EAC3_CORE_BSF 0 ++#define CONFIG_EXTRACT_EXTRADATA_BSF 0 ++#define CONFIG_FILTER_UNITS_BSF 0 ++#define CONFIG_H264_METADATA_BSF 0 ++#define CONFIG_H264_MP4TOANNEXB_BSF 0 ++#define CONFIG_H264_REDUNDANT_PPS_BSF 0 ++#define CONFIG_HAPQA_EXTRACT_BSF 0 ++#define CONFIG_HEVC_METADATA_BSF 0 ++#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 ++#define CONFIG_IMX_DUMP_HEADER_BSF 0 ++#define CONFIG_MJPEG2JPEG_BSF 0 ++#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 ++#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 ++#define CONFIG_MPEG2_METADATA_BSF 0 ++#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 ++#define CONFIG_MOV2TEXTSUB_BSF 0 ++#define CONFIG_NOISE_BSF 0 ++#define CONFIG_NULL_BSF 1 ++#define CONFIG_PRORES_METADATA_BSF 0 ++#define CONFIG_REMOVE_EXTRADATA_BSF 0 ++#define CONFIG_TEXT2MOVSUB_BSF 0 ++#define CONFIG_TRACE_HEADERS_BSF 0 ++#define CONFIG_TRUEHD_CORE_BSF 0 ++#define CONFIG_VP9_METADATA_BSF 0 ++#define CONFIG_VP9_RAW_REORDER_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 0 ++#define CONFIG_AASC_DECODER 0 ++#define CONFIG_AIC_DECODER 0 ++#define CONFIG_ALIAS_PIX_DECODER 0 ++#define CONFIG_AGM_DECODER 0 ++#define CONFIG_AMV_DECODER 0 ++#define CONFIG_ANM_DECODER 0 ++#define CONFIG_ANSI_DECODER 0 ++#define CONFIG_APNG_DECODER 0 ++#define CONFIG_ARBC_DECODER 0 ++#define CONFIG_ASV1_DECODER 0 ++#define CONFIG_ASV2_DECODER 0 ++#define CONFIG_AURA_DECODER 0 ++#define CONFIG_AURA2_DECODER 0 ++#define CONFIG_AVRP_DECODER 0 ++#define CONFIG_AVRN_DECODER 0 ++#define CONFIG_AVS_DECODER 0 ++#define CONFIG_AVUI_DECODER 0 ++#define CONFIG_AYUV_DECODER 0 ++#define CONFIG_BETHSOFTVID_DECODER 0 ++#define CONFIG_BFI_DECODER 0 ++#define CONFIG_BINK_DECODER 0 ++#define CONFIG_BITPACKED_DECODER 0 ++#define CONFIG_BMP_DECODER 0 ++#define CONFIG_BMV_VIDEO_DECODER 0 ++#define CONFIG_BRENDER_PIX_DECODER 0 ++#define CONFIG_C93_DECODER 0 ++#define CONFIG_CAVS_DECODER 0 ++#define CONFIG_CDGRAPHICS_DECODER 0 ++#define CONFIG_CDTOONS_DECODER 0 ++#define CONFIG_CDXL_DECODER 0 ++#define CONFIG_CFHD_DECODER 0 ++#define CONFIG_CINEPAK_DECODER 0 ++#define CONFIG_CLEARVIDEO_DECODER 0 ++#define CONFIG_CLJR_DECODER 0 ++#define CONFIG_CLLC_DECODER 0 ++#define CONFIG_COMFORTNOISE_DECODER 0 ++#define CONFIG_CPIA_DECODER 0 ++#define CONFIG_CSCD_DECODER 0 ++#define CONFIG_CYUV_DECODER 0 ++#define CONFIG_DDS_DECODER 0 ++#define CONFIG_DFA_DECODER 0 ++#define CONFIG_DIRAC_DECODER 0 ++#define CONFIG_DNXHD_DECODER 0 ++#define CONFIG_DPX_DECODER 0 ++#define CONFIG_DSICINVIDEO_DECODER 0 ++#define CONFIG_DVAUDIO_DECODER 0 ++#define CONFIG_DVVIDEO_DECODER 0 ++#define CONFIG_DXA_DECODER 0 ++#define CONFIG_DXTORY_DECODER 0 ++#define CONFIG_DXV_DECODER 0 ++#define CONFIG_EACMV_DECODER 0 ++#define CONFIG_EAMAD_DECODER 0 ++#define CONFIG_EATGQ_DECODER 0 ++#define CONFIG_EATGV_DECODER 0 ++#define CONFIG_EATQI_DECODER 0 ++#define CONFIG_EIGHTBPS_DECODER 0 ++#define CONFIG_EIGHTSVX_EXP_DECODER 0 ++#define CONFIG_EIGHTSVX_FIB_DECODER 0 ++#define CONFIG_ESCAPE124_DECODER 0 ++#define CONFIG_ESCAPE130_DECODER 0 ++#define CONFIG_EXR_DECODER 0 ++#define CONFIG_FFV1_DECODER 0 ++#define CONFIG_FFVHUFF_DECODER 0 ++#define CONFIG_FIC_DECODER 0 ++#define CONFIG_FITS_DECODER 0 ++#define CONFIG_FLASHSV_DECODER 0 ++#define CONFIG_FLASHSV2_DECODER 0 ++#define CONFIG_FLIC_DECODER 0 ++#define CONFIG_FLV_DECODER 0 ++#define CONFIG_FMVC_DECODER 0 ++#define CONFIG_FOURXM_DECODER 0 ++#define CONFIG_FRAPS_DECODER 0 ++#define CONFIG_FRWU_DECODER 0 ++#define CONFIG_G2M_DECODER 0 ++#define CONFIG_GDV_DECODER 0 ++#define CONFIG_GIF_DECODER 0 ++#define CONFIG_H261_DECODER 0 ++#define CONFIG_H263_DECODER 0 ++#define CONFIG_H263I_DECODER 0 ++#define CONFIG_H263P_DECODER 0 ++#define CONFIG_H263_V4L2M2M_DECODER 0 ++#define CONFIG_H264_DECODER 1 ++#define CONFIG_H264_CRYSTALHD_DECODER 0 ++#define CONFIG_H264_V4L2M2M_DECODER 0 ++#define CONFIG_H264_MEDIACODEC_DECODER 0 ++#define CONFIG_H264_MMAL_DECODER 0 ++#define CONFIG_H264_QSV_DECODER 0 ++#define CONFIG_H264_RKMPP_DECODER 0 ++#define CONFIG_HAP_DECODER 0 ++#define CONFIG_HEVC_DECODER 0 ++#define CONFIG_HEVC_QSV_DECODER 0 ++#define CONFIG_HEVC_RKMPP_DECODER 0 ++#define CONFIG_HEVC_V4L2M2M_DECODER 0 ++#define CONFIG_HNM4_VIDEO_DECODER 0 ++#define CONFIG_HQ_HQA_DECODER 0 ++#define CONFIG_HQX_DECODER 0 ++#define CONFIG_HUFFYUV_DECODER 0 ++#define CONFIG_HYMT_DECODER 0 ++#define CONFIG_IDCIN_DECODER 0 ++#define CONFIG_IFF_ILBM_DECODER 0 ++#define CONFIG_IMM4_DECODER 0 ++#define CONFIG_IMM5_DECODER 0 ++#define CONFIG_INDEO2_DECODER 0 ++#define CONFIG_INDEO3_DECODER 0 ++#define CONFIG_INDEO4_DECODER 0 ++#define CONFIG_INDEO5_DECODER 0 ++#define CONFIG_INTERPLAY_VIDEO_DECODER 0 ++#define CONFIG_JPEG2000_DECODER 0 ++#define CONFIG_JPEGLS_DECODER 0 ++#define CONFIG_JV_DECODER 0 ++#define CONFIG_KGV1_DECODER 0 ++#define CONFIG_KMVC_DECODER 0 ++#define CONFIG_LAGARITH_DECODER 0 ++#define CONFIG_LOCO_DECODER 0 ++#define CONFIG_LSCR_DECODER 0 ++#define CONFIG_M101_DECODER 0 ++#define CONFIG_MAGICYUV_DECODER 0 ++#define CONFIG_MDEC_DECODER 0 ++#define CONFIG_MIMIC_DECODER 0 ++#define CONFIG_MJPEG_DECODER 0 ++#define CONFIG_MJPEGB_DECODER 0 ++#define CONFIG_MMVIDEO_DECODER 0 ++#define CONFIG_MOTIONPIXELS_DECODER 0 ++#define CONFIG_MPEG1VIDEO_DECODER 0 ++#define CONFIG_MPEG2VIDEO_DECODER 0 ++#define CONFIG_MPEG4_DECODER 0 ++#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG4_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG4_MMAL_DECODER 0 ++#define CONFIG_MPEGVIDEO_DECODER 0 ++#define CONFIG_MPEG1_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_MMAL_DECODER 0 ++#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG2_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_QSV_DECODER 0 ++#define CONFIG_MPEG2_MEDIACODEC_DECODER 0 ++#define CONFIG_MSA1_DECODER 0 ++#define CONFIG_MSCC_DECODER 0 ++#define CONFIG_MSMPEG4V1_DECODER 0 ++#define CONFIG_MSMPEG4V2_DECODER 0 ++#define CONFIG_MSMPEG4V3_DECODER 0 ++#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MSRLE_DECODER 0 ++#define CONFIG_MSS1_DECODER 0 ++#define CONFIG_MSS2_DECODER 0 ++#define CONFIG_MSVIDEO1_DECODER 0 ++#define CONFIG_MSZH_DECODER 0 ++#define CONFIG_MTS2_DECODER 0 ++#define CONFIG_MV30_DECODER 0 ++#define CONFIG_MVC1_DECODER 0 ++#define CONFIG_MVC2_DECODER 0 ++#define CONFIG_MVDV_DECODER 0 ++#define CONFIG_MVHA_DECODER 0 ++#define CONFIG_MWSC_DECODER 0 ++#define CONFIG_MXPEG_DECODER 0 ++#define CONFIG_NUV_DECODER 0 ++#define CONFIG_PAF_VIDEO_DECODER 0 ++#define CONFIG_PAM_DECODER 0 ++#define CONFIG_PBM_DECODER 0 ++#define CONFIG_PCX_DECODER 0 ++#define CONFIG_PGM_DECODER 0 ++#define CONFIG_PGMYUV_DECODER 0 ++#define CONFIG_PICTOR_DECODER 0 ++#define CONFIG_PIXLET_DECODER 0 ++#define CONFIG_PNG_DECODER 0 ++#define CONFIG_PPM_DECODER 0 ++#define CONFIG_PRORES_DECODER 0 ++#define CONFIG_PROSUMER_DECODER 0 ++#define CONFIG_PSD_DECODER 0 ++#define CONFIG_PTX_DECODER 0 ++#define CONFIG_QDRAW_DECODER 0 ++#define CONFIG_QPEG_DECODER 0 ++#define CONFIG_QTRLE_DECODER 0 ++#define CONFIG_R10K_DECODER 0 ++#define CONFIG_R210_DECODER 0 ++#define CONFIG_RASC_DECODER 0 ++#define CONFIG_RAWVIDEO_DECODER 0 ++#define CONFIG_RL2_DECODER 0 ++#define CONFIG_ROQ_DECODER 0 ++#define CONFIG_RPZA_DECODER 0 ++#define CONFIG_RSCC_DECODER 0 ++#define CONFIG_RV10_DECODER 0 ++#define CONFIG_RV20_DECODER 0 ++#define CONFIG_RV30_DECODER 0 ++#define CONFIG_RV40_DECODER 0 ++#define CONFIG_S302M_DECODER 0 ++#define CONFIG_SANM_DECODER 0 ++#define CONFIG_SCPR_DECODER 0 ++#define CONFIG_SCREENPRESSO_DECODER 0 ++#define CONFIG_SGI_DECODER 0 ++#define CONFIG_SGIRLE_DECODER 0 ++#define CONFIG_SHEERVIDEO_DECODER 0 ++#define CONFIG_SMACKER_DECODER 0 ++#define CONFIG_SMC_DECODER 0 ++#define CONFIG_SMVJPEG_DECODER 0 ++#define CONFIG_SNOW_DECODER 0 ++#define CONFIG_SP5X_DECODER 0 ++#define CONFIG_SPEEDHQ_DECODER 0 ++#define CONFIG_SRGC_DECODER 0 ++#define CONFIG_SUNRAST_DECODER 0 ++#define CONFIG_SVQ1_DECODER 0 ++#define CONFIG_SVQ3_DECODER 0 ++#define CONFIG_TARGA_DECODER 0 ++#define CONFIG_TARGA_Y216_DECODER 0 ++#define CONFIG_TDSC_DECODER 0 ++#define CONFIG_THEORA_DECODER 1 ++#define CONFIG_THP_DECODER 0 ++#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 ++#define CONFIG_TIFF_DECODER 0 ++#define CONFIG_TMV_DECODER 0 ++#define CONFIG_TRUEMOTION1_DECODER 0 ++#define CONFIG_TRUEMOTION2_DECODER 0 ++#define CONFIG_TRUEMOTION2RT_DECODER 0 ++#define CONFIG_TSCC_DECODER 0 ++#define CONFIG_TSCC2_DECODER 0 ++#define CONFIG_TXD_DECODER 0 ++#define CONFIG_ULTI_DECODER 0 ++#define CONFIG_UTVIDEO_DECODER 0 ++#define CONFIG_V210_DECODER 0 ++#define CONFIG_V210X_DECODER 0 ++#define CONFIG_V308_DECODER 0 ++#define CONFIG_V408_DECODER 0 ++#define CONFIG_V410_DECODER 0 ++#define CONFIG_VB_DECODER 0 ++#define CONFIG_VBLE_DECODER 0 ++#define CONFIG_VC1_DECODER 0 ++#define CONFIG_VC1_CRYSTALHD_DECODER 0 ++#define CONFIG_VC1IMAGE_DECODER 0 ++#define CONFIG_VC1_MMAL_DECODER 0 ++#define CONFIG_VC1_QSV_DECODER 0 ++#define CONFIG_VC1_V4L2M2M_DECODER 0 ++#define CONFIG_VCR1_DECODER 0 ++#define CONFIG_VMDVIDEO_DECODER 0 ++#define CONFIG_VMNC_DECODER 0 ++#define CONFIG_VP3_DECODER 1 ++#define CONFIG_VP4_DECODER 0 ++#define CONFIG_VP5_DECODER 0 ++#define CONFIG_VP6_DECODER 0 ++#define CONFIG_VP6A_DECODER 0 ++#define CONFIG_VP6F_DECODER 0 ++#define CONFIG_VP7_DECODER 0 ++#define CONFIG_VP8_DECODER 1 ++#define CONFIG_VP8_RKMPP_DECODER 0 ++#define CONFIG_VP8_V4L2M2M_DECODER 0 ++#define CONFIG_VP9_DECODER 0 ++#define CONFIG_VP9_RKMPP_DECODER 0 ++#define CONFIG_VP9_V4L2M2M_DECODER 0 ++#define CONFIG_VQA_DECODER 0 ++#define CONFIG_WEBP_DECODER 0 ++#define CONFIG_WCMV_DECODER 0 ++#define CONFIG_WRAPPED_AVFRAME_DECODER 0 ++#define CONFIG_WMV1_DECODER 0 ++#define CONFIG_WMV2_DECODER 0 ++#define CONFIG_WMV3_DECODER 0 ++#define CONFIG_WMV3_CRYSTALHD_DECODER 0 ++#define CONFIG_WMV3IMAGE_DECODER 0 ++#define CONFIG_WNV1_DECODER 0 ++#define CONFIG_XAN_WC3_DECODER 0 ++#define CONFIG_XAN_WC4_DECODER 0 ++#define CONFIG_XBM_DECODER 0 ++#define CONFIG_XFACE_DECODER 0 ++#define CONFIG_XL_DECODER 0 ++#define CONFIG_XPM_DECODER 0 ++#define CONFIG_XWD_DECODER 0 ++#define CONFIG_Y41P_DECODER 0 ++#define CONFIG_YLC_DECODER 0 ++#define CONFIG_YOP_DECODER 0 ++#define CONFIG_YUV4_DECODER 0 ++#define CONFIG_ZERO12V_DECODER 0 ++#define CONFIG_ZEROCODEC_DECODER 0 ++#define CONFIG_ZLIB_DECODER 0 ++#define CONFIG_ZMBV_DECODER 0 ++#define CONFIG_AAC_DECODER 1 ++#define CONFIG_AAC_FIXED_DECODER 0 ++#define CONFIG_AAC_LATM_DECODER 0 ++#define CONFIG_AC3_DECODER 0 ++#define CONFIG_AC3_FIXED_DECODER 0 ++#define CONFIG_ACELP_KELVIN_DECODER 0 ++#define CONFIG_ALAC_DECODER 0 ++#define CONFIG_ALS_DECODER 0 ++#define CONFIG_AMRNB_DECODER 0 ++#define CONFIG_AMRWB_DECODER 0 ++#define CONFIG_APE_DECODER 0 ++#define CONFIG_APTX_DECODER 0 ++#define CONFIG_APTX_HD_DECODER 0 ++#define CONFIG_ATRAC1_DECODER 0 ++#define CONFIG_ATRAC3_DECODER 0 ++#define CONFIG_ATRAC3AL_DECODER 0 ++#define CONFIG_ATRAC3P_DECODER 0 ++#define CONFIG_ATRAC3PAL_DECODER 0 ++#define CONFIG_ATRAC9_DECODER 0 ++#define CONFIG_BINKAUDIO_DCT_DECODER 0 ++#define CONFIG_BINKAUDIO_RDFT_DECODER 0 ++#define CONFIG_BMV_AUDIO_DECODER 0 ++#define CONFIG_COOK_DECODER 0 ++#define CONFIG_DCA_DECODER 0 ++#define CONFIG_DOLBY_E_DECODER 0 ++#define CONFIG_DSD_LSBF_DECODER 0 ++#define CONFIG_DSD_MSBF_DECODER 0 ++#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 ++#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 ++#define CONFIG_DSICINAUDIO_DECODER 0 ++#define CONFIG_DSS_SP_DECODER 0 ++#define CONFIG_DST_DECODER 0 ++#define CONFIG_EAC3_DECODER 0 ++#define CONFIG_EVRC_DECODER 0 ++#define CONFIG_FFWAVESYNTH_DECODER 0 ++#define CONFIG_FLAC_DECODER 1 ++#define CONFIG_G723_1_DECODER 0 ++#define CONFIG_G729_DECODER 0 ++#define CONFIG_GSM_DECODER 0 ++#define CONFIG_GSM_MS_DECODER 0 ++#define CONFIG_HCA_DECODER 0 ++#define CONFIG_HCOM_DECODER 0 ++#define CONFIG_IAC_DECODER 0 ++#define CONFIG_ILBC_DECODER 0 ++#define CONFIG_IMC_DECODER 0 ++#define CONFIG_INTERPLAY_ACM_DECODER 0 ++#define CONFIG_MACE3_DECODER 0 ++#define CONFIG_MACE6_DECODER 0 ++#define CONFIG_METASOUND_DECODER 0 ++#define CONFIG_MLP_DECODER 0 ++#define CONFIG_MP1_DECODER 0 ++#define CONFIG_MP1FLOAT_DECODER 0 ++#define CONFIG_MP2_DECODER 0 ++#define CONFIG_MP2FLOAT_DECODER 0 ++#define CONFIG_MP3FLOAT_DECODER 0 ++#define CONFIG_MP3_DECODER 1 ++#define CONFIG_MP3ADUFLOAT_DECODER 0 ++#define CONFIG_MP3ADU_DECODER 0 ++#define CONFIG_MP3ON4FLOAT_DECODER 0 ++#define CONFIG_MP3ON4_DECODER 0 ++#define CONFIG_MPC7_DECODER 0 ++#define CONFIG_MPC8_DECODER 0 ++#define CONFIG_NELLYMOSER_DECODER 0 ++#define CONFIG_ON2AVC_DECODER 0 ++#define CONFIG_OPUS_DECODER 0 ++#define CONFIG_PAF_AUDIO_DECODER 0 ++#define CONFIG_QCELP_DECODER 0 ++#define CONFIG_QDM2_DECODER 0 ++#define CONFIG_QDMC_DECODER 0 ++#define CONFIG_RA_144_DECODER 0 ++#define CONFIG_RA_288_DECODER 0 ++#define CONFIG_RALF_DECODER 0 ++#define CONFIG_SBC_DECODER 0 ++#define CONFIG_SHORTEN_DECODER 0 ++#define CONFIG_SIPR_DECODER 0 ++#define CONFIG_SIREN_DECODER 0 ++#define CONFIG_SMACKAUD_DECODER 0 ++#define CONFIG_SONIC_DECODER 0 ++#define CONFIG_TAK_DECODER 0 ++#define CONFIG_TRUEHD_DECODER 0 ++#define CONFIG_TRUESPEECH_DECODER 0 ++#define CONFIG_TTA_DECODER 0 ++#define CONFIG_TWINVQ_DECODER 0 ++#define CONFIG_VMDAUDIO_DECODER 0 ++#define CONFIG_VORBIS_DECODER 1 ++#define CONFIG_WAVPACK_DECODER 0 ++#define CONFIG_WMALOSSLESS_DECODER 0 ++#define CONFIG_WMAPRO_DECODER 0 ++#define CONFIG_WMAV1_DECODER 0 ++#define CONFIG_WMAV2_DECODER 0 ++#define CONFIG_WMAVOICE_DECODER 0 ++#define CONFIG_WS_SND1_DECODER 0 ++#define CONFIG_XMA1_DECODER 0 ++#define CONFIG_XMA2_DECODER 0 ++#define CONFIG_PCM_ALAW_DECODER 1 ++#define CONFIG_PCM_BLURAY_DECODER 0 ++#define CONFIG_PCM_DVD_DECODER 0 ++#define CONFIG_PCM_F16LE_DECODER 0 ++#define CONFIG_PCM_F24LE_DECODER 0 ++#define CONFIG_PCM_F32BE_DECODER 0 ++#define CONFIG_PCM_F32LE_DECODER 1 ++#define CONFIG_PCM_F64BE_DECODER 0 ++#define CONFIG_PCM_F64LE_DECODER 0 ++#define CONFIG_PCM_LXF_DECODER 0 ++#define CONFIG_PCM_MULAW_DECODER 1 ++#define CONFIG_PCM_S8_DECODER 0 ++#define CONFIG_PCM_S8_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16BE_DECODER 1 ++#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16LE_DECODER 1 ++#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S24BE_DECODER 1 ++#define CONFIG_PCM_S24DAUD_DECODER 0 ++#define CONFIG_PCM_S24LE_DECODER 1 ++#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S32BE_DECODER 0 ++#define CONFIG_PCM_S32LE_DECODER 1 ++#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S64BE_DECODER 0 ++#define CONFIG_PCM_S64LE_DECODER 0 ++#define CONFIG_PCM_U8_DECODER 1 ++#define CONFIG_PCM_U16BE_DECODER 0 ++#define CONFIG_PCM_U16LE_DECODER 0 ++#define CONFIG_PCM_U24BE_DECODER 0 ++#define CONFIG_PCM_U24LE_DECODER 0 ++#define CONFIG_PCM_U32BE_DECODER 0 ++#define CONFIG_PCM_U32LE_DECODER 0 ++#define CONFIG_PCM_VIDC_DECODER 0 ++#define CONFIG_DERF_DPCM_DECODER 0 ++#define CONFIG_GREMLIN_DPCM_DECODER 0 ++#define CONFIG_INTERPLAY_DPCM_DECODER 0 ++#define CONFIG_ROQ_DPCM_DECODER 0 ++#define CONFIG_SDX2_DPCM_DECODER 0 ++#define CONFIG_SOL_DPCM_DECODER 0 ++#define CONFIG_XAN_DPCM_DECODER 0 ++#define CONFIG_ADPCM_4XM_DECODER 0 ++#define CONFIG_ADPCM_ADX_DECODER 0 ++#define CONFIG_ADPCM_AFC_DECODER 0 ++#define CONFIG_ADPCM_AGM_DECODER 0 ++#define CONFIG_ADPCM_AICA_DECODER 0 ++#define CONFIG_ADPCM_ARGO_DECODER 0 ++#define CONFIG_ADPCM_CT_DECODER 0 ++#define CONFIG_ADPCM_DTK_DECODER 0 ++#define CONFIG_ADPCM_EA_DECODER 0 ++#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 ++#define CONFIG_ADPCM_EA_R1_DECODER 0 ++#define CONFIG_ADPCM_EA_R2_DECODER 0 ++#define CONFIG_ADPCM_EA_R3_DECODER 0 ++#define CONFIG_ADPCM_EA_XAS_DECODER 0 ++#define CONFIG_ADPCM_G722_DECODER 0 ++#define CONFIG_ADPCM_G726_DECODER 0 ++#define CONFIG_ADPCM_G726LE_DECODER 0 ++#define CONFIG_ADPCM_IMA_AMV_DECODER 0 ++#define CONFIG_ADPCM_IMA_ALP_DECODER 0 ++#define CONFIG_ADPCM_IMA_APC_DECODER 0 ++#define CONFIG_ADPCM_IMA_APM_DECODER 0 ++#define CONFIG_ADPCM_IMA_CUNNING_DECODER 0 ++#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK3_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK4_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_ISS_DECODER 0 ++#define CONFIG_ADPCM_IMA_MTF_DECODER 0 ++#define CONFIG_ADPCM_IMA_OKI_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_DECODER 0 ++#define CONFIG_ADPCM_IMA_RAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_SSI_DECODER 0 ++#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 ++#define CONFIG_ADPCM_IMA_WAV_DECODER 0 ++#define CONFIG_ADPCM_IMA_WS_DECODER 0 ++#define CONFIG_ADPCM_MS_DECODER 0 ++#define CONFIG_ADPCM_MTAF_DECODER 0 ++#define CONFIG_ADPCM_PSX_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_2_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_3_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_4_DECODER 0 ++#define CONFIG_ADPCM_SWF_DECODER 0 ++#define CONFIG_ADPCM_THP_DECODER 0 ++#define CONFIG_ADPCM_THP_LE_DECODER 0 ++#define CONFIG_ADPCM_VIMA_DECODER 0 ++#define CONFIG_ADPCM_XA_DECODER 0 ++#define CONFIG_ADPCM_YAMAHA_DECODER 0 ++#define CONFIG_ADPCM_ZORK_DECODER 0 ++#define CONFIG_SSA_DECODER 0 ++#define CONFIG_ASS_DECODER 0 ++#define CONFIG_CCAPTION_DECODER 0 ++#define CONFIG_DVBSUB_DECODER 0 ++#define CONFIG_DVDSUB_DECODER 0 ++#define CONFIG_JACOSUB_DECODER 0 ++#define CONFIG_MICRODVD_DECODER 0 ++#define CONFIG_MOVTEXT_DECODER 0 ++#define CONFIG_MPL2_DECODER 0 ++#define CONFIG_PGSSUB_DECODER 0 ++#define CONFIG_PJS_DECODER 0 ++#define CONFIG_REALTEXT_DECODER 0 ++#define CONFIG_SAMI_DECODER 0 ++#define CONFIG_SRT_DECODER 0 ++#define CONFIG_STL_DECODER 0 ++#define CONFIG_SUBRIP_DECODER 0 ++#define CONFIG_SUBVIEWER_DECODER 0 ++#define CONFIG_SUBVIEWER1_DECODER 0 ++#define CONFIG_TEXT_DECODER 0 ++#define CONFIG_VPLAYER_DECODER 0 ++#define CONFIG_WEBVTT_DECODER 0 ++#define CONFIG_XSUB_DECODER 0 ++#define CONFIG_AAC_AT_DECODER 0 ++#define CONFIG_AC3_AT_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 ++#define CONFIG_ALAC_AT_DECODER 0 ++#define CONFIG_AMR_NB_AT_DECODER 0 ++#define CONFIG_EAC3_AT_DECODER 0 ++#define CONFIG_GSM_MS_AT_DECODER 0 ++#define CONFIG_ILBC_AT_DECODER 0 ++#define CONFIG_MP1_AT_DECODER 0 ++#define CONFIG_MP2_AT_DECODER 0 ++#define CONFIG_MP3_AT_DECODER 0 ++#define CONFIG_PCM_ALAW_AT_DECODER 0 ++#define CONFIG_PCM_MULAW_AT_DECODER 0 ++#define CONFIG_QDMC_AT_DECODER 0 ++#define CONFIG_QDM2_AT_DECODER 0 ++#define CONFIG_LIBARIBB24_DECODER 0 ++#define CONFIG_LIBCELT_DECODER 0 ++#define CONFIG_LIBCODEC2_DECODER 0 ++#define CONFIG_LIBDAV1D_DECODER 0 ++#define CONFIG_LIBDAVS2_DECODER 0 ++#define CONFIG_LIBFDK_AAC_DECODER 0 ++#define CONFIG_LIBGSM_DECODER 0 ++#define CONFIG_LIBGSM_MS_DECODER 0 ++#define CONFIG_LIBILBC_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 ++#define CONFIG_LIBOPENJPEG_DECODER 0 ++#define CONFIG_LIBOPUS_DECODER 1 ++#define CONFIG_LIBRSVG_DECODER 0 ++#define CONFIG_LIBSPEEX_DECODER 0 ++#define CONFIG_LIBVORBIS_DECODER 0 ++#define CONFIG_LIBVPX_VP8_DECODER 0 ++#define CONFIG_LIBVPX_VP9_DECODER 0 ++#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 ++#define CONFIG_BINTEXT_DECODER 0 ++#define CONFIG_XBIN_DECODER 0 ++#define CONFIG_IDF_DECODER 0 ++#define CONFIG_LIBAOM_AV1_DECODER 0 ++#define CONFIG_LIBOPENH264_DECODER 0 ++#define CONFIG_H264_CUVID_DECODER 0 ++#define CONFIG_HEVC_CUVID_DECODER 0 ++#define CONFIG_HEVC_MEDIACODEC_DECODER 0 ++#define CONFIG_MJPEG_CUVID_DECODER 0 ++#define CONFIG_MJPEG_QSV_DECODER 0 ++#define CONFIG_MPEG1_CUVID_DECODER 0 ++#define CONFIG_MPEG2_CUVID_DECODER 0 ++#define CONFIG_MPEG4_CUVID_DECODER 0 ++#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 ++#define CONFIG_VC1_CUVID_DECODER 0 ++#define CONFIG_VP8_CUVID_DECODER 0 ++#define CONFIG_VP8_MEDIACODEC_DECODER 0 ++#define CONFIG_VP8_QSV_DECODER 0 ++#define CONFIG_VP9_CUVID_DECODER 0 ++#define CONFIG_VP9_MEDIACODEC_DECODER 0 ++#define CONFIG_VP9_QSV_DECODER 0 ++#define CONFIG_A64MULTI_ENCODER 0 ++#define CONFIG_A64MULTI5_ENCODER 0 ++#define CONFIG_ALIAS_PIX_ENCODER 0 ++#define CONFIG_AMV_ENCODER 0 ++#define CONFIG_APNG_ENCODER 0 ++#define CONFIG_ASV1_ENCODER 0 ++#define CONFIG_ASV2_ENCODER 0 ++#define CONFIG_AVRP_ENCODER 0 ++#define CONFIG_AVUI_ENCODER 0 ++#define CONFIG_AYUV_ENCODER 0 ++#define CONFIG_BMP_ENCODER 0 ++#define CONFIG_CINEPAK_ENCODER 0 ++#define CONFIG_CLJR_ENCODER 0 ++#define CONFIG_COMFORTNOISE_ENCODER 0 ++#define CONFIG_DNXHD_ENCODER 0 ++#define CONFIG_DPX_ENCODER 0 ++#define CONFIG_DVVIDEO_ENCODER 0 ++#define CONFIG_FFV1_ENCODER 0 ++#define CONFIG_FFVHUFF_ENCODER 0 ++#define CONFIG_FITS_ENCODER 0 ++#define CONFIG_FLASHSV_ENCODER 0 ++#define CONFIG_FLASHSV2_ENCODER 0 ++#define CONFIG_FLV_ENCODER 0 ++#define CONFIG_GIF_ENCODER 0 ++#define CONFIG_H261_ENCODER 0 ++#define CONFIG_H263_ENCODER 0 ++#define CONFIG_H263P_ENCODER 0 ++#define CONFIG_HAP_ENCODER 0 ++#define CONFIG_HUFFYUV_ENCODER 0 ++#define CONFIG_JPEG2000_ENCODER 0 ++#define CONFIG_JPEGLS_ENCODER 0 ++#define CONFIG_LJPEG_ENCODER 0 ++#define CONFIG_MAGICYUV_ENCODER 0 ++#define CONFIG_MJPEG_ENCODER 0 ++#define CONFIG_MPEG1VIDEO_ENCODER 0 ++#define CONFIG_MPEG2VIDEO_ENCODER 0 ++#define CONFIG_MPEG4_ENCODER 0 ++#define CONFIG_MSMPEG4V2_ENCODER 0 ++#define CONFIG_MSMPEG4V3_ENCODER 0 ++#define CONFIG_MSVIDEO1_ENCODER 0 ++#define CONFIG_PAM_ENCODER 0 ++#define CONFIG_PBM_ENCODER 0 ++#define CONFIG_PCX_ENCODER 0 ++#define CONFIG_PGM_ENCODER 0 ++#define CONFIG_PGMYUV_ENCODER 0 ++#define CONFIG_PNG_ENCODER 0 ++#define CONFIG_PPM_ENCODER 0 ++#define CONFIG_PRORES_ENCODER 0 ++#define CONFIG_PRORES_AW_ENCODER 0 ++#define CONFIG_PRORES_KS_ENCODER 0 ++#define CONFIG_QTRLE_ENCODER 0 ++#define CONFIG_R10K_ENCODER 0 ++#define CONFIG_R210_ENCODER 0 ++#define CONFIG_RAWVIDEO_ENCODER 0 ++#define CONFIG_ROQ_ENCODER 0 ++#define CONFIG_RV10_ENCODER 0 ++#define CONFIG_RV20_ENCODER 0 ++#define CONFIG_S302M_ENCODER 0 ++#define CONFIG_SGI_ENCODER 0 ++#define CONFIG_SNOW_ENCODER 0 ++#define CONFIG_SUNRAST_ENCODER 0 ++#define CONFIG_SVQ1_ENCODER 0 ++#define CONFIG_TARGA_ENCODER 0 ++#define CONFIG_TIFF_ENCODER 0 ++#define CONFIG_UTVIDEO_ENCODER 0 ++#define CONFIG_V210_ENCODER 0 ++#define CONFIG_V308_ENCODER 0 ++#define CONFIG_V408_ENCODER 0 ++#define CONFIG_V410_ENCODER 0 ++#define CONFIG_VC2_ENCODER 0 ++#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 ++#define CONFIG_WMV1_ENCODER 0 ++#define CONFIG_WMV2_ENCODER 0 ++#define CONFIG_XBM_ENCODER 0 ++#define CONFIG_XFACE_ENCODER 0 ++#define CONFIG_XWD_ENCODER 0 ++#define CONFIG_Y41P_ENCODER 0 ++#define CONFIG_YUV4_ENCODER 0 ++#define CONFIG_ZLIB_ENCODER 0 ++#define CONFIG_ZMBV_ENCODER 0 ++#define CONFIG_AAC_ENCODER 0 ++#define CONFIG_AC3_ENCODER 0 ++#define CONFIG_AC3_FIXED_ENCODER 0 ++#define CONFIG_ALAC_ENCODER 0 ++#define CONFIG_APTX_ENCODER 0 ++#define CONFIG_APTX_HD_ENCODER 0 ++#define CONFIG_DCA_ENCODER 0 ++#define CONFIG_EAC3_ENCODER 0 ++#define CONFIG_FLAC_ENCODER 0 ++#define CONFIG_G723_1_ENCODER 0 ++#define CONFIG_MLP_ENCODER 0 ++#define CONFIG_MP2_ENCODER 0 ++#define CONFIG_MP2FIXED_ENCODER 0 ++#define CONFIG_NELLYMOSER_ENCODER 0 ++#define CONFIG_OPUS_ENCODER 0 ++#define CONFIG_RA_144_ENCODER 0 ++#define CONFIG_SBC_ENCODER 0 ++#define CONFIG_SONIC_ENCODER 0 ++#define CONFIG_SONIC_LS_ENCODER 0 ++#define CONFIG_TRUEHD_ENCODER 0 ++#define CONFIG_TTA_ENCODER 0 ++#define CONFIG_VORBIS_ENCODER 0 ++#define CONFIG_WAVPACK_ENCODER 0 ++#define CONFIG_WMAV1_ENCODER 0 ++#define CONFIG_WMAV2_ENCODER 0 ++#define CONFIG_PCM_ALAW_ENCODER 0 ++#define CONFIG_PCM_DVD_ENCODER 0 ++#define CONFIG_PCM_F32BE_ENCODER 0 ++#define CONFIG_PCM_F32LE_ENCODER 0 ++#define CONFIG_PCM_F64BE_ENCODER 0 ++#define CONFIG_PCM_F64LE_ENCODER 0 ++#define CONFIG_PCM_MULAW_ENCODER 0 ++#define CONFIG_PCM_S8_ENCODER 0 ++#define CONFIG_PCM_S8_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16BE_ENCODER 0 ++#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16LE_ENCODER 0 ++#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S24BE_ENCODER 0 ++#define CONFIG_PCM_S24DAUD_ENCODER 0 ++#define CONFIG_PCM_S24LE_ENCODER 0 ++#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S32BE_ENCODER 0 ++#define CONFIG_PCM_S32LE_ENCODER 0 ++#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S64BE_ENCODER 0 ++#define CONFIG_PCM_S64LE_ENCODER 0 ++#define CONFIG_PCM_U8_ENCODER 0 ++#define CONFIG_PCM_U16BE_ENCODER 0 ++#define CONFIG_PCM_U16LE_ENCODER 0 ++#define CONFIG_PCM_U24BE_ENCODER 0 ++#define CONFIG_PCM_U24LE_ENCODER 0 ++#define CONFIG_PCM_U32BE_ENCODER 0 ++#define CONFIG_PCM_U32LE_ENCODER 0 ++#define CONFIG_PCM_VIDC_ENCODER 0 ++#define CONFIG_ROQ_DPCM_ENCODER 0 ++#define CONFIG_ADPCM_ADX_ENCODER 0 ++#define CONFIG_ADPCM_G722_ENCODER 0 ++#define CONFIG_ADPCM_G726_ENCODER 0 ++#define CONFIG_ADPCM_G726LE_ENCODER 0 ++#define CONFIG_ADPCM_IMA_QT_ENCODER 0 ++#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 ++#define CONFIG_ADPCM_MS_ENCODER 0 ++#define CONFIG_ADPCM_SWF_ENCODER 0 ++#define CONFIG_ADPCM_YAMAHA_ENCODER 0 ++#define CONFIG_SSA_ENCODER 0 ++#define CONFIG_ASS_ENCODER 0 ++#define CONFIG_DVBSUB_ENCODER 0 ++#define CONFIG_DVDSUB_ENCODER 0 ++#define CONFIG_MOVTEXT_ENCODER 0 ++#define CONFIG_SRT_ENCODER 0 ++#define CONFIG_SUBRIP_ENCODER 0 ++#define CONFIG_TEXT_ENCODER 0 ++#define CONFIG_WEBVTT_ENCODER 0 ++#define CONFIG_XSUB_ENCODER 0 ++#define CONFIG_AAC_AT_ENCODER 0 ++#define CONFIG_ALAC_AT_ENCODER 0 ++#define CONFIG_ILBC_AT_ENCODER 0 ++#define CONFIG_PCM_ALAW_AT_ENCODER 0 ++#define CONFIG_PCM_MULAW_AT_ENCODER 0 ++#define CONFIG_LIBAOM_AV1_ENCODER 0 ++#define CONFIG_LIBCODEC2_ENCODER 0 ++#define CONFIG_LIBFDK_AAC_ENCODER 0 ++#define CONFIG_LIBGSM_ENCODER 0 ++#define CONFIG_LIBGSM_MS_ENCODER 0 ++#define CONFIG_LIBILBC_ENCODER 0 ++#define CONFIG_LIBMP3LAME_ENCODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 ++#define CONFIG_LIBOPENJPEG_ENCODER 0 ++#define CONFIG_LIBOPUS_ENCODER 0 ++#define CONFIG_LIBRAV1E_ENCODER 0 ++#define CONFIG_LIBSHINE_ENCODER 0 ++#define CONFIG_LIBSPEEX_ENCODER 0 ++#define CONFIG_LIBTHEORA_ENCODER 0 ++#define CONFIG_LIBTWOLAME_ENCODER 0 ++#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 ++#define CONFIG_LIBVORBIS_ENCODER 0 ++#define CONFIG_LIBVPX_VP8_ENCODER 0 ++#define CONFIG_LIBVPX_VP9_ENCODER 0 ++#define CONFIG_LIBWAVPACK_ENCODER 0 ++#define CONFIG_LIBWEBP_ANIM_ENCODER 0 ++#define CONFIG_LIBWEBP_ENCODER 0 ++#define CONFIG_LIBX262_ENCODER 0 ++#define CONFIG_LIBX264_ENCODER 0 ++#define CONFIG_LIBX264RGB_ENCODER 0 ++#define CONFIG_LIBX265_ENCODER 0 ++#define CONFIG_LIBXAVS_ENCODER 0 ++#define CONFIG_LIBXAVS2_ENCODER 0 ++#define CONFIG_LIBXVID_ENCODER 0 ++#define CONFIG_H263_V4L2M2M_ENCODER 0 ++#define CONFIG_LIBOPENH264_ENCODER 0 ++#define CONFIG_H264_AMF_ENCODER 0 ++#define CONFIG_H264_NVENC_ENCODER 0 ++#define CONFIG_H264_OMX_ENCODER 0 ++#define CONFIG_H264_QSV_ENCODER 0 ++#define CONFIG_H264_V4L2M2M_ENCODER 0 ++#define CONFIG_H264_VAAPI_ENCODER 0 ++#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_NVENC_ENCODER 0 ++#define CONFIG_NVENC_H264_ENCODER 0 ++#define CONFIG_NVENC_HEVC_ENCODER 0 ++#define CONFIG_HEVC_AMF_ENCODER 0 ++#define CONFIG_HEVC_NVENC_ENCODER 0 ++#define CONFIG_HEVC_QSV_ENCODER 0 ++#define CONFIG_HEVC_V4L2M2M_ENCODER 0 ++#define CONFIG_HEVC_VAAPI_ENCODER 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_LIBKVAZAAR_ENCODER 0 ++#define CONFIG_MJPEG_QSV_ENCODER 0 ++#define CONFIG_MJPEG_VAAPI_ENCODER 0 ++#define CONFIG_MPEG2_QSV_ENCODER 0 ++#define CONFIG_MPEG2_VAAPI_ENCODER 0 ++#define CONFIG_MPEG4_OMX_ENCODER 0 ++#define CONFIG_MPEG4_V4L2M2M_ENCODER 0 ++#define CONFIG_VP8_V4L2M2M_ENCODER 0 ++#define CONFIG_VP8_VAAPI_ENCODER 0 ++#define CONFIG_VP9_VAAPI_ENCODER 0 ++#define CONFIG_VP9_QSV_ENCODER 0 ++#define CONFIG_H263_VAAPI_HWACCEL 0 ++#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_H264_D3D11VA_HWACCEL 0 ++#define CONFIG_H264_D3D11VA2_HWACCEL 0 ++#define CONFIG_H264_DXVA2_HWACCEL 0 ++#define CONFIG_H264_NVDEC_HWACCEL 0 ++#define CONFIG_H264_VAAPI_HWACCEL 0 ++#define CONFIG_H264_VDPAU_HWACCEL 0 ++#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA2_HWACCEL 0 ++#define CONFIG_HEVC_DXVA2_HWACCEL 0 ++#define CONFIG_HEVC_NVDEC_HWACCEL 0 ++#define CONFIG_HEVC_VAAPI_HWACCEL 0 ++#define CONFIG_HEVC_VDPAU_HWACCEL 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MJPEG_NVDEC_HWACCEL 0 ++#define CONFIG_MJPEG_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG1_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG1_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG1_XVMC_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA2_HWACCEL 0 ++#define CONFIG_MPEG2_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG2_DXVA2_HWACCEL 0 ++#define CONFIG_MPEG2_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG2_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG2_XVMC_HWACCEL 0 ++#define CONFIG_MPEG4_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG4_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG4_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA2_HWACCEL 0 ++#define CONFIG_VC1_DXVA2_HWACCEL 0 ++#define CONFIG_VC1_NVDEC_HWACCEL 0 ++#define CONFIG_VC1_VAAPI_HWACCEL 0 ++#define CONFIG_VC1_VDPAU_HWACCEL 0 ++#define CONFIG_VP8_NVDEC_HWACCEL 0 ++#define CONFIG_VP8_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA2_HWACCEL 0 ++#define CONFIG_VP9_DXVA2_HWACCEL 0 ++#define CONFIG_VP9_NVDEC_HWACCEL 0 ++#define CONFIG_VP9_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_VDPAU_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA2_HWACCEL 0 ++#define CONFIG_WMV3_DXVA2_HWACCEL 0 ++#define CONFIG_WMV3_NVDEC_HWACCEL 0 ++#define CONFIG_WMV3_VAAPI_HWACCEL 0 ++#define CONFIG_WMV3_VDPAU_HWACCEL 0 ++#define CONFIG_AAC_PARSER 1 ++#define CONFIG_AAC_LATM_PARSER 0 ++#define CONFIG_AC3_PARSER 0 ++#define CONFIG_ADX_PARSER 0 ++#define CONFIG_AV1_PARSER 0 ++#define CONFIG_AVS2_PARSER 0 ++#define CONFIG_BMP_PARSER 0 ++#define CONFIG_CAVSVIDEO_PARSER 0 ++#define CONFIG_COOK_PARSER 0 ++#define CONFIG_DCA_PARSER 0 ++#define CONFIG_DIRAC_PARSER 0 ++#define CONFIG_DNXHD_PARSER 0 ++#define CONFIG_DPX_PARSER 0 ++#define CONFIG_DVAUDIO_PARSER 0 ++#define CONFIG_DVBSUB_PARSER 0 ++#define CONFIG_DVDSUB_PARSER 0 ++#define CONFIG_DVD_NAV_PARSER 0 ++#define CONFIG_FLAC_PARSER 1 ++#define CONFIG_G723_1_PARSER 0 ++#define CONFIG_G729_PARSER 0 ++#define CONFIG_GIF_PARSER 0 ++#define CONFIG_GSM_PARSER 0 ++#define CONFIG_H261_PARSER 0 ++#define CONFIG_H263_PARSER 0 ++#define CONFIG_H264_PARSER 1 ++#define CONFIG_HEVC_PARSER 0 ++#define CONFIG_MJPEG_PARSER 0 ++#define CONFIG_MLP_PARSER 0 ++#define CONFIG_MPEG4VIDEO_PARSER 0 ++#define CONFIG_MPEGAUDIO_PARSER 1 ++#define CONFIG_MPEGVIDEO_PARSER 0 ++#define CONFIG_OPUS_PARSER 1 ++#define CONFIG_PNG_PARSER 0 ++#define CONFIG_PNM_PARSER 0 ++#define CONFIG_RV30_PARSER 0 ++#define CONFIG_RV40_PARSER 0 ++#define CONFIG_SBC_PARSER 0 ++#define CONFIG_SIPR_PARSER 0 ++#define CONFIG_TAK_PARSER 0 ++#define CONFIG_VC1_PARSER 0 ++#define CONFIG_VORBIS_PARSER 1 ++#define CONFIG_VP3_PARSER 1 ++#define CONFIG_VP8_PARSER 1 ++#define CONFIG_VP9_PARSER 1 ++#define CONFIG_WEBP_PARSER 0 ++#define CONFIG_XMA_PARSER 0 ++#define CONFIG_ALSA_INDEV 0 ++#define CONFIG_ANDROID_CAMERA_INDEV 0 ++#define CONFIG_AVFOUNDATION_INDEV 0 ++#define CONFIG_BKTR_INDEV 0 ++#define CONFIG_DECKLINK_INDEV 0 ++#define CONFIG_DSHOW_INDEV 0 ++#define CONFIG_FBDEV_INDEV 0 ++#define CONFIG_GDIGRAB_INDEV 0 ++#define CONFIG_IEC61883_INDEV 0 ++#define CONFIG_JACK_INDEV 0 ++#define CONFIG_KMSGRAB_INDEV 0 ++#define CONFIG_LAVFI_INDEV 0 ++#define CONFIG_OPENAL_INDEV 0 ++#define CONFIG_OSS_INDEV 0 ++#define CONFIG_PULSE_INDEV 0 ++#define CONFIG_SNDIO_INDEV 0 ++#define CONFIG_V4L2_INDEV 0 ++#define CONFIG_VFWCAP_INDEV 0 ++#define CONFIG_XCBGRAB_INDEV 0 ++#define CONFIG_LIBCDIO_INDEV 0 ++#define CONFIG_LIBDC1394_INDEV 0 ++#define CONFIG_ALSA_OUTDEV 0 ++#define CONFIG_CACA_OUTDEV 0 ++#define CONFIG_DECKLINK_OUTDEV 0 ++#define CONFIG_FBDEV_OUTDEV 0 ++#define CONFIG_OPENGL_OUTDEV 0 ++#define CONFIG_OSS_OUTDEV 0 ++#define CONFIG_PULSE_OUTDEV 0 ++#define CONFIG_SDL2_OUTDEV 0 ++#define CONFIG_SNDIO_OUTDEV 0 ++#define CONFIG_V4L2_OUTDEV 0 ++#define CONFIG_XV_OUTDEV 0 ++#define CONFIG_ABENCH_FILTER 0 ++#define CONFIG_ACOMPRESSOR_FILTER 0 ++#define CONFIG_ACONTRAST_FILTER 0 ++#define CONFIG_ACOPY_FILTER 0 ++#define CONFIG_ACUE_FILTER 0 ++#define CONFIG_ACROSSFADE_FILTER 0 ++#define CONFIG_ACROSSOVER_FILTER 0 ++#define CONFIG_ACRUSHER_FILTER 0 ++#define CONFIG_ADECLICK_FILTER 0 ++#define CONFIG_ADECLIP_FILTER 0 ++#define CONFIG_ADELAY_FILTER 0 ++#define CONFIG_ADERIVATIVE_FILTER 0 ++#define CONFIG_AECHO_FILTER 0 ++#define CONFIG_AEMPHASIS_FILTER 0 ++#define CONFIG_AEVAL_FILTER 0 ++#define CONFIG_AFADE_FILTER 0 ++#define CONFIG_AFFTDN_FILTER 0 ++#define CONFIG_AFFTFILT_FILTER 0 ++#define CONFIG_AFIR_FILTER 0 ++#define CONFIG_AFORMAT_FILTER 0 ++#define CONFIG_AGATE_FILTER 0 ++#define CONFIG_AIIR_FILTER 0 ++#define CONFIG_AINTEGRAL_FILTER 0 ++#define CONFIG_AINTERLEAVE_FILTER 0 ++#define CONFIG_ALIMITER_FILTER 0 ++#define CONFIG_ALLPASS_FILTER 0 ++#define CONFIG_ALOOP_FILTER 0 ++#define CONFIG_AMERGE_FILTER 0 ++#define CONFIG_AMETADATA_FILTER 0 ++#define CONFIG_AMIX_FILTER 0 ++#define CONFIG_AMULTIPLY_FILTER 0 ++#define CONFIG_ANEQUALIZER_FILTER 0 ++#define CONFIG_ANLMDN_FILTER 0 ++#define CONFIG_ANLMS_FILTER 0 ++#define CONFIG_ANULL_FILTER 0 ++#define CONFIG_APAD_FILTER 0 ++#define CONFIG_APERMS_FILTER 0 ++#define CONFIG_APHASER_FILTER 0 ++#define CONFIG_APULSATOR_FILTER 0 ++#define CONFIG_AREALTIME_FILTER 0 ++#define CONFIG_ARESAMPLE_FILTER 0 ++#define CONFIG_AREVERSE_FILTER 0 ++#define CONFIG_ARNNDN_FILTER 0 ++#define CONFIG_ASELECT_FILTER 0 ++#define CONFIG_ASENDCMD_FILTER 0 ++#define CONFIG_ASETNSAMPLES_FILTER 0 ++#define CONFIG_ASETPTS_FILTER 0 ++#define CONFIG_ASETRATE_FILTER 0 ++#define CONFIG_ASETTB_FILTER 0 ++#define CONFIG_ASHOWINFO_FILTER 0 ++#define CONFIG_ASIDEDATA_FILTER 0 ++#define CONFIG_ASOFTCLIP_FILTER 0 ++#define CONFIG_ASPLIT_FILTER 0 ++#define CONFIG_ASR_FILTER 0 ++#define CONFIG_ASTATS_FILTER 0 ++#define CONFIG_ASTREAMSELECT_FILTER 0 ++#define CONFIG_ATEMPO_FILTER 0 ++#define CONFIG_ATRIM_FILTER 0 ++#define CONFIG_AXCORRELATE_FILTER 0 ++#define CONFIG_AZMQ_FILTER 0 ++#define CONFIG_BANDPASS_FILTER 0 ++#define CONFIG_BANDREJECT_FILTER 0 ++#define CONFIG_BASS_FILTER 0 ++#define CONFIG_BIQUAD_FILTER 0 ++#define CONFIG_BS2B_FILTER 0 ++#define CONFIG_CHROMABER_VULKAN_FILTER 0 ++#define CONFIG_CHANNELMAP_FILTER 0 ++#define CONFIG_CHANNELSPLIT_FILTER 0 ++#define CONFIG_CHORUS_FILTER 0 ++#define CONFIG_COMPAND_FILTER 0 ++#define CONFIG_COMPENSATIONDELAY_FILTER 0 ++#define CONFIG_CROSSFEED_FILTER 0 ++#define CONFIG_CRYSTALIZER_FILTER 0 ++#define CONFIG_DCSHIFT_FILTER 0 ++#define CONFIG_DEESSER_FILTER 0 ++#define CONFIG_DRMETER_FILTER 0 ++#define CONFIG_DYNAUDNORM_FILTER 0 ++#define CONFIG_EARWAX_FILTER 0 ++#define CONFIG_EBUR128_FILTER 0 ++#define CONFIG_EQUALIZER_FILTER 0 ++#define CONFIG_EXTRASTEREO_FILTER 0 ++#define CONFIG_FIREQUALIZER_FILTER 0 ++#define CONFIG_FLANGER_FILTER 0 ++#define CONFIG_HAAS_FILTER 0 ++#define CONFIG_HDCD_FILTER 0 ++#define CONFIG_HEADPHONE_FILTER 0 ++#define CONFIG_HIGHPASS_FILTER 0 ++#define CONFIG_HIGHSHELF_FILTER 0 ++#define CONFIG_JOIN_FILTER 0 ++#define CONFIG_LADSPA_FILTER 0 ++#define CONFIG_LOUDNORM_FILTER 0 ++#define CONFIG_LOWPASS_FILTER 0 ++#define CONFIG_LOWSHELF_FILTER 0 ++#define CONFIG_LV2_FILTER 0 ++#define CONFIG_MCOMPAND_FILTER 0 ++#define CONFIG_PAN_FILTER 0 ++#define CONFIG_REPLAYGAIN_FILTER 0 ++#define CONFIG_RESAMPLE_FILTER 0 ++#define CONFIG_RUBBERBAND_FILTER 0 ++#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 ++#define CONFIG_SIDECHAINGATE_FILTER 0 ++#define CONFIG_SILENCEDETECT_FILTER 0 ++#define CONFIG_SILENCEREMOVE_FILTER 0 ++#define CONFIG_SOFALIZER_FILTER 0 ++#define CONFIG_STEREOTOOLS_FILTER 0 ++#define CONFIG_STEREOWIDEN_FILTER 0 ++#define CONFIG_SUPEREQUALIZER_FILTER 0 ++#define CONFIG_SURROUND_FILTER 0 ++#define CONFIG_TREBLE_FILTER 0 ++#define CONFIG_TREMOLO_FILTER 0 ++#define CONFIG_VIBRATO_FILTER 0 ++#define CONFIG_VOLUME_FILTER 0 ++#define CONFIG_VOLUMEDETECT_FILTER 0 ++#define CONFIG_AEVALSRC_FILTER 0 ++#define CONFIG_AFIRSRC_FILTER 0 ++#define CONFIG_ANOISESRC_FILTER 0 ++#define CONFIG_ANULLSRC_FILTER 0 ++#define CONFIG_FLITE_FILTER 0 ++#define CONFIG_HILBERT_FILTER 0 ++#define CONFIG_SINC_FILTER 0 ++#define CONFIG_SINE_FILTER 0 ++#define CONFIG_ANULLSINK_FILTER 0 ++#define CONFIG_ADDROI_FILTER 0 ++#define CONFIG_ALPHAEXTRACT_FILTER 0 ++#define CONFIG_ALPHAMERGE_FILTER 0 ++#define CONFIG_AMPLIFY_FILTER 0 ++#define CONFIG_ASS_FILTER 0 ++#define CONFIG_ATADENOISE_FILTER 0 ++#define CONFIG_AVGBLUR_FILTER 0 ++#define CONFIG_AVGBLUR_OPENCL_FILTER 0 ++#define CONFIG_AVGBLUR_VULKAN_FILTER 0 ++#define CONFIG_BBOX_FILTER 0 ++#define CONFIG_BENCH_FILTER 0 ++#define CONFIG_BILATERAL_FILTER 0 ++#define CONFIG_BITPLANENOISE_FILTER 0 ++#define CONFIG_BLACKDETECT_FILTER 0 ++#define CONFIG_BLACKFRAME_FILTER 0 ++#define CONFIG_BLEND_FILTER 0 ++#define CONFIG_BM3D_FILTER 0 ++#define CONFIG_BOXBLUR_FILTER 0 ++#define CONFIG_BOXBLUR_OPENCL_FILTER 0 ++#define CONFIG_BWDIF_FILTER 0 ++#define CONFIG_CAS_FILTER 0 ++#define CONFIG_CHROMAHOLD_FILTER 0 ++#define CONFIG_CHROMAKEY_FILTER 0 ++#define CONFIG_CHROMASHIFT_FILTER 0 ++#define CONFIG_CIESCOPE_FILTER 0 ++#define CONFIG_CODECVIEW_FILTER 0 ++#define CONFIG_COLORBALANCE_FILTER 0 ++#define CONFIG_COLORCHANNELMIXER_FILTER 0 ++#define CONFIG_COLORKEY_FILTER 0 ++#define CONFIG_COLORKEY_OPENCL_FILTER 0 ++#define CONFIG_COLORHOLD_FILTER 0 ++#define CONFIG_COLORLEVELS_FILTER 0 ++#define CONFIG_COLORMATRIX_FILTER 0 ++#define CONFIG_COLORSPACE_FILTER 0 ++#define CONFIG_CONVOLUTION_FILTER 0 ++#define CONFIG_CONVOLUTION_OPENCL_FILTER 0 ++#define CONFIG_CONVOLVE_FILTER 0 ++#define CONFIG_COPY_FILTER 0 ++#define CONFIG_COREIMAGE_FILTER 0 ++#define CONFIG_COVER_RECT_FILTER 0 ++#define CONFIG_CROP_FILTER 0 ++#define CONFIG_CROPDETECT_FILTER 0 ++#define CONFIG_CUE_FILTER 0 ++#define CONFIG_CURVES_FILTER 0 ++#define CONFIG_DATASCOPE_FILTER 0 ++#define CONFIG_DCTDNOIZ_FILTER 0 ++#define CONFIG_DEBAND_FILTER 0 ++#define CONFIG_DEBLOCK_FILTER 0 ++#define CONFIG_DECIMATE_FILTER 0 ++#define CONFIG_DECONVOLVE_FILTER 0 ++#define CONFIG_DEDOT_FILTER 0 ++#define CONFIG_DEFLATE_FILTER 0 ++#define CONFIG_DEFLICKER_FILTER 0 ++#define CONFIG_DEINTERLACE_QSV_FILTER 0 ++#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 ++#define CONFIG_DEJUDDER_FILTER 0 ++#define CONFIG_DELOGO_FILTER 0 ++#define CONFIG_DENOISE_VAAPI_FILTER 0 ++#define CONFIG_DERAIN_FILTER 0 ++#define CONFIG_DESHAKE_FILTER 0 ++#define CONFIG_DESHAKE_OPENCL_FILTER 0 ++#define CONFIG_DESPILL_FILTER 0 ++#define CONFIG_DETELECINE_FILTER 0 ++#define CONFIG_DILATION_FILTER 0 ++#define CONFIG_DILATION_OPENCL_FILTER 0 ++#define CONFIG_DISPLACE_FILTER 0 ++#define CONFIG_DNN_PROCESSING_FILTER 0 ++#define CONFIG_DOUBLEWEAVE_FILTER 0 ++#define CONFIG_DRAWBOX_FILTER 0 ++#define CONFIG_DRAWGRAPH_FILTER 0 ++#define CONFIG_DRAWGRID_FILTER 0 ++#define CONFIG_DRAWTEXT_FILTER 0 ++#define CONFIG_EDGEDETECT_FILTER 0 ++#define CONFIG_ELBG_FILTER 0 ++#define CONFIG_ENTROPY_FILTER 0 ++#define CONFIG_EQ_FILTER 0 ++#define CONFIG_EROSION_FILTER 0 ++#define CONFIG_EROSION_OPENCL_FILTER 0 ++#define CONFIG_EXTRACTPLANES_FILTER 0 ++#define CONFIG_FADE_FILTER 0 ++#define CONFIG_FFTDNOIZ_FILTER 0 ++#define CONFIG_FFTFILT_FILTER 0 ++#define CONFIG_FIELD_FILTER 0 ++#define CONFIG_FIELDHINT_FILTER 0 ++#define CONFIG_FIELDMATCH_FILTER 0 ++#define CONFIG_FIELDORDER_FILTER 0 ++#define CONFIG_FILLBORDERS_FILTER 0 ++#define CONFIG_FIND_RECT_FILTER 0 ++#define CONFIG_FLOODFILL_FILTER 0 ++#define CONFIG_FORMAT_FILTER 0 ++#define CONFIG_FPS_FILTER 0 ++#define CONFIG_FRAMEPACK_FILTER 0 ++#define CONFIG_FRAMERATE_FILTER 0 ++#define CONFIG_FRAMESTEP_FILTER 0 ++#define CONFIG_FREEZEDETECT_FILTER 0 ++#define CONFIG_FREEZEFRAMES_FILTER 0 ++#define CONFIG_FREI0R_FILTER 0 ++#define CONFIG_FSPP_FILTER 0 ++#define CONFIG_GBLUR_FILTER 0 ++#define CONFIG_GEQ_FILTER 0 ++#define CONFIG_GRADFUN_FILTER 0 ++#define CONFIG_GRAPHMONITOR_FILTER 0 ++#define CONFIG_GREYEDGE_FILTER 0 ++#define CONFIG_HALDCLUT_FILTER 0 ++#define CONFIG_HFLIP_FILTER 0 ++#define CONFIG_HISTEQ_FILTER 0 ++#define CONFIG_HISTOGRAM_FILTER 0 ++#define CONFIG_HQDN3D_FILTER 0 ++#define CONFIG_HQX_FILTER 0 ++#define CONFIG_HSTACK_FILTER 0 ++#define CONFIG_HUE_FILTER 0 ++#define CONFIG_HWDOWNLOAD_FILTER 0 ++#define CONFIG_HWMAP_FILTER 0 ++#define CONFIG_HWUPLOAD_FILTER 0 ++#define CONFIG_HWUPLOAD_CUDA_FILTER 0 ++#define CONFIG_HYSTERESIS_FILTER 0 ++#define CONFIG_IDET_FILTER 0 ++#define CONFIG_IL_FILTER 0 ++#define CONFIG_INFLATE_FILTER 0 ++#define CONFIG_INTERLACE_FILTER 0 ++#define CONFIG_INTERLEAVE_FILTER 0 ++#define CONFIG_KERNDEINT_FILTER 0 ++#define CONFIG_LAGFUN_FILTER 0 ++#define CONFIG_LENSCORRECTION_FILTER 0 ++#define CONFIG_LENSFUN_FILTER 0 ++#define CONFIG_LIBVMAF_FILTER 0 ++#define CONFIG_LIMITER_FILTER 0 ++#define CONFIG_LOOP_FILTER 0 ++#define CONFIG_LUMAKEY_FILTER 0 ++#define CONFIG_LUT_FILTER 0 ++#define CONFIG_LUT1D_FILTER 0 ++#define CONFIG_LUT2_FILTER 0 ++#define CONFIG_LUT3D_FILTER 0 ++#define CONFIG_LUTRGB_FILTER 0 ++#define CONFIG_LUTYUV_FILTER 0 ++#define CONFIG_MASKEDCLAMP_FILTER 0 ++#define CONFIG_MASKEDMAX_FILTER 0 ++#define CONFIG_MASKEDMERGE_FILTER 0 ++#define CONFIG_MASKEDMIN_FILTER 0 ++#define CONFIG_MASKEDTHRESHOLD_FILTER 0 ++#define CONFIG_MASKFUN_FILTER 0 ++#define CONFIG_MCDEINT_FILTER 0 ++#define CONFIG_MEDIAN_FILTER 0 ++#define CONFIG_MERGEPLANES_FILTER 0 ++#define CONFIG_MESTIMATE_FILTER 0 ++#define CONFIG_METADATA_FILTER 0 ++#define CONFIG_MIDEQUALIZER_FILTER 0 ++#define CONFIG_MINTERPOLATE_FILTER 0 ++#define CONFIG_MIX_FILTER 0 ++#define CONFIG_MPDECIMATE_FILTER 0 ++#define CONFIG_NEGATE_FILTER 0 ++#define CONFIG_NLMEANS_FILTER 0 ++#define CONFIG_NLMEANS_OPENCL_FILTER 0 ++#define CONFIG_NNEDI_FILTER 0 ++#define CONFIG_NOFORMAT_FILTER 0 ++#define CONFIG_NOISE_FILTER 0 ++#define CONFIG_NORMALIZE_FILTER 0 ++#define CONFIG_NULL_FILTER 0 ++#define CONFIG_OCR_FILTER 0 ++#define CONFIG_OCV_FILTER 0 ++#define CONFIG_OSCILLOSCOPE_FILTER 0 ++#define CONFIG_OVERLAY_FILTER 0 ++#define CONFIG_OVERLAY_OPENCL_FILTER 0 ++#define CONFIG_OVERLAY_QSV_FILTER 0 ++#define CONFIG_OVERLAY_VULKAN_FILTER 0 ++#define CONFIG_OVERLAY_CUDA_FILTER 0 ++#define CONFIG_OWDENOISE_FILTER 0 ++#define CONFIG_PAD_FILTER 0 ++#define CONFIG_PAD_OPENCL_FILTER 0 ++#define CONFIG_PALETTEGEN_FILTER 0 ++#define CONFIG_PALETTEUSE_FILTER 0 ++#define CONFIG_PERMS_FILTER 0 ++#define CONFIG_PERSPECTIVE_FILTER 0 ++#define CONFIG_PHASE_FILTER 0 ++#define CONFIG_PHOTOSENSITIVITY_FILTER 0 ++#define CONFIG_PIXDESCTEST_FILTER 0 ++#define CONFIG_PIXSCOPE_FILTER 0 ++#define CONFIG_PP_FILTER 0 ++#define CONFIG_PP7_FILTER 0 ++#define CONFIG_PREMULTIPLY_FILTER 0 ++#define CONFIG_PREWITT_FILTER 0 ++#define CONFIG_PREWITT_OPENCL_FILTER 0 ++#define CONFIG_PROCAMP_VAAPI_FILTER 0 ++#define CONFIG_PROGRAM_OPENCL_FILTER 0 ++#define CONFIG_PSEUDOCOLOR_FILTER 0 ++#define CONFIG_PSNR_FILTER 0 ++#define CONFIG_PULLUP_FILTER 0 ++#define CONFIG_QP_FILTER 0 ++#define CONFIG_RANDOM_FILTER 0 ++#define CONFIG_READEIA608_FILTER 0 ++#define CONFIG_READVITC_FILTER 0 ++#define CONFIG_REALTIME_FILTER 0 ++#define CONFIG_REMAP_FILTER 0 ++#define CONFIG_REMOVEGRAIN_FILTER 0 ++#define CONFIG_REMOVELOGO_FILTER 0 ++#define CONFIG_REPEATFIELDS_FILTER 0 ++#define CONFIG_REVERSE_FILTER 0 ++#define CONFIG_RGBASHIFT_FILTER 0 ++#define CONFIG_ROBERTS_FILTER 0 ++#define CONFIG_ROBERTS_OPENCL_FILTER 0 ++#define CONFIG_ROTATE_FILTER 0 ++#define CONFIG_SAB_FILTER 0 ++#define CONFIG_SCALE_FILTER 0 ++#define CONFIG_SCALE_CUDA_FILTER 0 ++#define CONFIG_SCALE_NPP_FILTER 0 ++#define CONFIG_SCALE_QSV_FILTER 0 ++#define CONFIG_SCALE_VAAPI_FILTER 0 ++#define CONFIG_SCALE_VULKAN_FILTER 0 ++#define CONFIG_SCALE2REF_FILTER 0 ++#define CONFIG_SCROLL_FILTER 0 ++#define CONFIG_SELECT_FILTER 0 ++#define CONFIG_SELECTIVECOLOR_FILTER 0 ++#define CONFIG_SENDCMD_FILTER 0 ++#define CONFIG_SEPARATEFIELDS_FILTER 0 ++#define CONFIG_SETDAR_FILTER 0 ++#define CONFIG_SETFIELD_FILTER 0 ++#define CONFIG_SETPARAMS_FILTER 0 ++#define CONFIG_SETPTS_FILTER 0 ++#define CONFIG_SETRANGE_FILTER 0 ++#define CONFIG_SETSAR_FILTER 0 ++#define CONFIG_SETTB_FILTER 0 ++#define CONFIG_SHARPNESS_VAAPI_FILTER 0 ++#define CONFIG_SHOWINFO_FILTER 0 ++#define CONFIG_SHOWPALETTE_FILTER 0 ++#define CONFIG_SHUFFLEFRAMES_FILTER 0 ++#define CONFIG_SHUFFLEPLANES_FILTER 0 ++#define CONFIG_SIDEDATA_FILTER 0 ++#define CONFIG_SIGNALSTATS_FILTER 0 ++#define CONFIG_SIGNATURE_FILTER 0 ++#define CONFIG_SMARTBLUR_FILTER 0 ++#define CONFIG_SOBEL_FILTER 0 ++#define CONFIG_SOBEL_OPENCL_FILTER 0 ++#define CONFIG_SPLIT_FILTER 0 ++#define CONFIG_SPP_FILTER 0 ++#define CONFIG_SR_FILTER 0 ++#define CONFIG_SSIM_FILTER 0 ++#define CONFIG_STEREO3D_FILTER 0 ++#define CONFIG_STREAMSELECT_FILTER 0 ++#define CONFIG_SUBTITLES_FILTER 0 ++#define CONFIG_SUPER2XSAI_FILTER 0 ++#define CONFIG_SWAPRECT_FILTER 0 ++#define CONFIG_SWAPUV_FILTER 0 ++#define CONFIG_TBLEND_FILTER 0 ++#define CONFIG_TELECINE_FILTER 0 ++#define CONFIG_THISTOGRAM_FILTER 0 ++#define CONFIG_THRESHOLD_FILTER 0 ++#define CONFIG_THUMBNAIL_FILTER 0 ++#define CONFIG_THUMBNAIL_CUDA_FILTER 0 ++#define CONFIG_TILE_FILTER 0 ++#define CONFIG_TINTERLACE_FILTER 0 ++#define CONFIG_TLUT2_FILTER 0 ++#define CONFIG_TMEDIAN_FILTER 0 ++#define CONFIG_TMIX_FILTER 0 ++#define CONFIG_TONEMAP_FILTER 0 ++#define CONFIG_TONEMAP_OPENCL_FILTER 0 ++#define CONFIG_TONEMAP_VAAPI_FILTER 0 ++#define CONFIG_TPAD_FILTER 0 ++#define CONFIG_TRANSPOSE_FILTER 0 ++#define CONFIG_TRANSPOSE_NPP_FILTER 0 ++#define CONFIG_TRANSPOSE_OPENCL_FILTER 0 ++#define CONFIG_TRANSPOSE_VAAPI_FILTER 0 ++#define CONFIG_TRIM_FILTER 0 ++#define CONFIG_UNPREMULTIPLY_FILTER 0 ++#define CONFIG_UNSHARP_FILTER 0 ++#define CONFIG_UNSHARP_OPENCL_FILTER 0 ++#define CONFIG_USPP_FILTER 0 ++#define CONFIG_V360_FILTER 0 ++#define CONFIG_VAGUEDENOISER_FILTER 0 ++#define CONFIG_VECTORSCOPE_FILTER 0 ++#define CONFIG_VFLIP_FILTER 0 ++#define CONFIG_VFRDET_FILTER 0 ++#define CONFIG_VIBRANCE_FILTER 0 ++#define CONFIG_VIDSTABDETECT_FILTER 0 ++#define CONFIG_VIDSTABTRANSFORM_FILTER 0 ++#define CONFIG_VIGNETTE_FILTER 0 ++#define CONFIG_VMAFMOTION_FILTER 0 ++#define CONFIG_VPP_QSV_FILTER 0 ++#define CONFIG_VSTACK_FILTER 0 ++#define CONFIG_W3FDIF_FILTER 0 ++#define CONFIG_WAVEFORM_FILTER 0 ++#define CONFIG_WEAVE_FILTER 0 ++#define CONFIG_XBR_FILTER 0 ++#define CONFIG_XFADE_FILTER 0 ++#define CONFIG_XFADE_OPENCL_FILTER 0 ++#define CONFIG_XMEDIAN_FILTER 0 ++#define CONFIG_XSTACK_FILTER 0 ++#define CONFIG_YADIF_FILTER 0 ++#define CONFIG_YADIF_CUDA_FILTER 0 ++#define CONFIG_YAEPBLUR_FILTER 0 ++#define CONFIG_ZMQ_FILTER 0 ++#define CONFIG_ZOOMPAN_FILTER 0 ++#define CONFIG_ZSCALE_FILTER 0 ++#define CONFIG_ALLRGB_FILTER 0 ++#define CONFIG_ALLYUV_FILTER 0 ++#define CONFIG_CELLAUTO_FILTER 0 ++#define CONFIG_COLOR_FILTER 0 ++#define CONFIG_COREIMAGESRC_FILTER 0 ++#define CONFIG_FREI0R_SRC_FILTER 0 ++#define CONFIG_HALDCLUTSRC_FILTER 0 ++#define CONFIG_LIFE_FILTER 0 ++#define CONFIG_MANDELBROT_FILTER 0 ++#define CONFIG_MPTESTSRC_FILTER 0 ++#define CONFIG_NULLSRC_FILTER 0 ++#define CONFIG_OPENCLSRC_FILTER 0 ++#define CONFIG_PAL75BARS_FILTER 0 ++#define CONFIG_PAL100BARS_FILTER 0 ++#define CONFIG_RGBTESTSRC_FILTER 0 ++#define CONFIG_SIERPINSKI_FILTER 0 ++#define CONFIG_SMPTEBARS_FILTER 0 ++#define CONFIG_SMPTEHDBARS_FILTER 0 ++#define CONFIG_TESTSRC_FILTER 0 ++#define CONFIG_TESTSRC2_FILTER 0 ++#define CONFIG_YUVTESTSRC_FILTER 0 ++#define CONFIG_NULLSINK_FILTER 0 ++#define CONFIG_ABITSCOPE_FILTER 0 ++#define CONFIG_ADRAWGRAPH_FILTER 0 ++#define CONFIG_AGRAPHMONITOR_FILTER 0 ++#define CONFIG_AHISTOGRAM_FILTER 0 ++#define CONFIG_APHASEMETER_FILTER 0 ++#define CONFIG_AVECTORSCOPE_FILTER 0 ++#define CONFIG_CONCAT_FILTER 0 ++#define CONFIG_SHOWCQT_FILTER 0 ++#define CONFIG_SHOWFREQS_FILTER 0 ++#define CONFIG_SHOWSPATIAL_FILTER 0 ++#define CONFIG_SHOWSPECTRUM_FILTER 0 ++#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 ++#define CONFIG_SHOWVOLUME_FILTER 0 ++#define CONFIG_SHOWWAVES_FILTER 0 ++#define CONFIG_SHOWWAVESPIC_FILTER 0 ++#define CONFIG_SPECTRUMSYNTH_FILTER 0 ++#define CONFIG_AMOVIE_FILTER 0 ++#define CONFIG_MOVIE_FILTER 0 ++#define CONFIG_AFIFO_FILTER 0 ++#define CONFIG_FIFO_FILTER 0 ++#define CONFIG_AA_DEMUXER 0 ++#define CONFIG_AAC_DEMUXER 1 ++#define CONFIG_AC3_DEMUXER 0 ++#define CONFIG_ACM_DEMUXER 0 ++#define CONFIG_ACT_DEMUXER 0 ++#define CONFIG_ADF_DEMUXER 0 ++#define CONFIG_ADP_DEMUXER 0 ++#define CONFIG_ADS_DEMUXER 0 ++#define CONFIG_ADX_DEMUXER 0 ++#define CONFIG_AEA_DEMUXER 0 ++#define CONFIG_AFC_DEMUXER 0 ++#define CONFIG_AIFF_DEMUXER 0 ++#define CONFIG_AIX_DEMUXER 0 ++#define CONFIG_ALP_DEMUXER 0 ++#define CONFIG_AMR_DEMUXER 0 ++#define CONFIG_AMRNB_DEMUXER 0 ++#define CONFIG_AMRWB_DEMUXER 0 ++#define CONFIG_ANM_DEMUXER 0 ++#define CONFIG_APC_DEMUXER 0 ++#define CONFIG_APE_DEMUXER 0 ++#define CONFIG_APM_DEMUXER 0 ++#define CONFIG_APNG_DEMUXER 0 ++#define CONFIG_APTX_DEMUXER 0 ++#define CONFIG_APTX_HD_DEMUXER 0 ++#define CONFIG_AQTITLE_DEMUXER 0 ++#define CONFIG_ARGO_ASF_DEMUXER 0 ++#define CONFIG_ASF_DEMUXER 0 ++#define CONFIG_ASF_O_DEMUXER 0 ++#define CONFIG_ASS_DEMUXER 0 ++#define CONFIG_AST_DEMUXER 0 ++#define CONFIG_AU_DEMUXER 0 ++#define CONFIG_AV1_DEMUXER 0 ++#define CONFIG_AVI_DEMUXER 0 ++#define CONFIG_AVISYNTH_DEMUXER 0 ++#define CONFIG_AVR_DEMUXER 0 ++#define CONFIG_AVS_DEMUXER 0 ++#define CONFIG_AVS2_DEMUXER 0 ++#define CONFIG_BETHSOFTVID_DEMUXER 0 ++#define CONFIG_BFI_DEMUXER 0 ++#define CONFIG_BINTEXT_DEMUXER 0 ++#define CONFIG_BINK_DEMUXER 0 ++#define CONFIG_BIT_DEMUXER 0 ++#define CONFIG_BMV_DEMUXER 0 ++#define CONFIG_BFSTM_DEMUXER 0 ++#define CONFIG_BRSTM_DEMUXER 0 ++#define CONFIG_BOA_DEMUXER 0 ++#define CONFIG_C93_DEMUXER 0 ++#define CONFIG_CAF_DEMUXER 0 ++#define CONFIG_CAVSVIDEO_DEMUXER 0 ++#define CONFIG_CDG_DEMUXER 0 ++#define CONFIG_CDXL_DEMUXER 0 ++#define CONFIG_CINE_DEMUXER 0 ++#define CONFIG_CODEC2_DEMUXER 0 ++#define CONFIG_CODEC2RAW_DEMUXER 0 ++#define CONFIG_CONCAT_DEMUXER 0 ++#define CONFIG_DASH_DEMUXER 0 ++#define CONFIG_DATA_DEMUXER 0 ++#define CONFIG_DAUD_DEMUXER 0 ++#define CONFIG_DCSTR_DEMUXER 0 ++#define CONFIG_DERF_DEMUXER 0 ++#define CONFIG_DFA_DEMUXER 0 ++#define CONFIG_DHAV_DEMUXER 0 ++#define CONFIG_DIRAC_DEMUXER 0 ++#define CONFIG_DNXHD_DEMUXER 0 ++#define CONFIG_DSF_DEMUXER 0 ++#define CONFIG_DSICIN_DEMUXER 0 ++#define CONFIG_DSS_DEMUXER 0 ++#define CONFIG_DTS_DEMUXER 0 ++#define CONFIG_DTSHD_DEMUXER 0 ++#define CONFIG_DV_DEMUXER 0 ++#define CONFIG_DVBSUB_DEMUXER 0 ++#define CONFIG_DVBTXT_DEMUXER 0 ++#define CONFIG_DXA_DEMUXER 0 ++#define CONFIG_EA_DEMUXER 0 ++#define CONFIG_EA_CDATA_DEMUXER 0 ++#define CONFIG_EAC3_DEMUXER 0 ++#define CONFIG_EPAF_DEMUXER 0 ++#define CONFIG_FFMETADATA_DEMUXER 0 ++#define CONFIG_FILMSTRIP_DEMUXER 0 ++#define CONFIG_FITS_DEMUXER 0 ++#define CONFIG_FLAC_DEMUXER 1 ++#define CONFIG_FLIC_DEMUXER 0 ++#define CONFIG_FLV_DEMUXER 0 ++#define CONFIG_LIVE_FLV_DEMUXER 0 ++#define CONFIG_FOURXM_DEMUXER 0 ++#define CONFIG_FRM_DEMUXER 0 ++#define CONFIG_FSB_DEMUXER 0 ++#define CONFIG_FWSE_DEMUXER 0 ++#define CONFIG_G722_DEMUXER 0 ++#define CONFIG_G723_1_DEMUXER 0 ++#define CONFIG_G726_DEMUXER 0 ++#define CONFIG_G726LE_DEMUXER 0 ++#define CONFIG_G729_DEMUXER 0 ++#define CONFIG_GDV_DEMUXER 0 ++#define CONFIG_GENH_DEMUXER 0 ++#define CONFIG_GIF_DEMUXER 0 ++#define CONFIG_GSM_DEMUXER 0 ++#define CONFIG_GXF_DEMUXER 0 ++#define CONFIG_H261_DEMUXER 0 ++#define CONFIG_H263_DEMUXER 0 ++#define CONFIG_H264_DEMUXER 0 ++#define CONFIG_HCA_DEMUXER 0 ++#define CONFIG_HCOM_DEMUXER 0 ++#define CONFIG_HEVC_DEMUXER 0 ++#define CONFIG_HLS_DEMUXER 0 ++#define CONFIG_HNM_DEMUXER 0 ++#define CONFIG_ICO_DEMUXER 0 ++#define CONFIG_IDCIN_DEMUXER 0 ++#define CONFIG_IDF_DEMUXER 0 ++#define CONFIG_IFF_DEMUXER 0 ++#define CONFIG_IFV_DEMUXER 0 ++#define CONFIG_ILBC_DEMUXER 0 ++#define CONFIG_IMAGE2_DEMUXER 0 ++#define CONFIG_IMAGE2PIPE_DEMUXER 0 ++#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 ++#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 ++#define CONFIG_INGENIENT_DEMUXER 0 ++#define CONFIG_IPMOVIE_DEMUXER 0 ++#define CONFIG_IRCAM_DEMUXER 0 ++#define CONFIG_ISS_DEMUXER 0 ++#define CONFIG_IV8_DEMUXER 0 ++#define CONFIG_IVF_DEMUXER 0 ++#define CONFIG_IVR_DEMUXER 0 ++#define CONFIG_JACOSUB_DEMUXER 0 ++#define CONFIG_JV_DEMUXER 0 ++#define CONFIG_KUX_DEMUXER 0 ++#define CONFIG_KVAG_DEMUXER 0 ++#define CONFIG_LMLM4_DEMUXER 0 ++#define CONFIG_LOAS_DEMUXER 0 ++#define CONFIG_LRC_DEMUXER 0 ++#define CONFIG_LVF_DEMUXER 0 ++#define CONFIG_LXF_DEMUXER 0 ++#define CONFIG_M4V_DEMUXER 0 ++#define CONFIG_MATROSKA_DEMUXER 1 ++#define CONFIG_MGSTS_DEMUXER 0 ++#define CONFIG_MICRODVD_DEMUXER 0 ++#define CONFIG_MJPEG_DEMUXER 0 ++#define CONFIG_MJPEG_2000_DEMUXER 0 ++#define CONFIG_MLP_DEMUXER 0 ++#define CONFIG_MLV_DEMUXER 0 ++#define CONFIG_MM_DEMUXER 0 ++#define CONFIG_MMF_DEMUXER 0 ++#define CONFIG_MOV_DEMUXER 1 ++#define CONFIG_MP3_DEMUXER 1 ++#define CONFIG_MPC_DEMUXER 0 ++#define CONFIG_MPC8_DEMUXER 0 ++#define CONFIG_MPEGPS_DEMUXER 0 ++#define CONFIG_MPEGTS_DEMUXER 0 ++#define CONFIG_MPEGTSRAW_DEMUXER 0 ++#define CONFIG_MPEGVIDEO_DEMUXER 0 ++#define CONFIG_MPJPEG_DEMUXER 0 ++#define CONFIG_MPL2_DEMUXER 0 ++#define CONFIG_MPSUB_DEMUXER 0 ++#define CONFIG_MSF_DEMUXER 0 ++#define CONFIG_MSNWC_TCP_DEMUXER 0 ++#define CONFIG_MTAF_DEMUXER 0 ++#define CONFIG_MTV_DEMUXER 0 ++#define CONFIG_MUSX_DEMUXER 0 ++#define CONFIG_MV_DEMUXER 0 ++#define CONFIG_MVI_DEMUXER 0 ++#define CONFIG_MXF_DEMUXER 0 ++#define CONFIG_MXG_DEMUXER 0 ++#define CONFIG_NC_DEMUXER 0 ++#define CONFIG_NISTSPHERE_DEMUXER 0 ++#define CONFIG_NSP_DEMUXER 0 ++#define CONFIG_NSV_DEMUXER 0 ++#define CONFIG_NUT_DEMUXER 0 ++#define CONFIG_NUV_DEMUXER 0 ++#define CONFIG_OGG_DEMUXER 1 ++#define CONFIG_OMA_DEMUXER 0 ++#define CONFIG_PAF_DEMUXER 0 ++#define CONFIG_PCM_ALAW_DEMUXER 0 ++#define CONFIG_PCM_MULAW_DEMUXER 0 ++#define CONFIG_PCM_VIDC_DEMUXER 0 ++#define CONFIG_PCM_F64BE_DEMUXER 0 ++#define CONFIG_PCM_F64LE_DEMUXER 0 ++#define CONFIG_PCM_F32BE_DEMUXER 0 ++#define CONFIG_PCM_F32LE_DEMUXER 0 ++#define CONFIG_PCM_S32BE_DEMUXER 0 ++#define CONFIG_PCM_S32LE_DEMUXER 0 ++#define CONFIG_PCM_S24BE_DEMUXER 0 ++#define CONFIG_PCM_S24LE_DEMUXER 0 ++#define CONFIG_PCM_S16BE_DEMUXER 0 ++#define CONFIG_PCM_S16LE_DEMUXER 0 ++#define CONFIG_PCM_S8_DEMUXER 0 ++#define CONFIG_PCM_U32BE_DEMUXER 0 ++#define CONFIG_PCM_U32LE_DEMUXER 0 ++#define CONFIG_PCM_U24BE_DEMUXER 0 ++#define CONFIG_PCM_U24LE_DEMUXER 0 ++#define CONFIG_PCM_U16BE_DEMUXER 0 ++#define CONFIG_PCM_U16LE_DEMUXER 0 ++#define CONFIG_PCM_U8_DEMUXER 0 ++#define CONFIG_PJS_DEMUXER 0 ++#define CONFIG_PMP_DEMUXER 0 ++#define CONFIG_PVA_DEMUXER 0 ++#define CONFIG_PVF_DEMUXER 0 ++#define CONFIG_QCP_DEMUXER 0 ++#define CONFIG_R3D_DEMUXER 0 ++#define CONFIG_RAWVIDEO_DEMUXER 0 ++#define CONFIG_REALTEXT_DEMUXER 0 ++#define CONFIG_REDSPARK_DEMUXER 0 ++#define CONFIG_RL2_DEMUXER 0 ++#define CONFIG_RM_DEMUXER 0 ++#define CONFIG_ROQ_DEMUXER 0 ++#define CONFIG_RPL_DEMUXER 0 ++#define CONFIG_RSD_DEMUXER 0 ++#define CONFIG_RSO_DEMUXER 0 ++#define CONFIG_RTP_DEMUXER 0 ++#define CONFIG_RTSP_DEMUXER 0 ++#define CONFIG_S337M_DEMUXER 0 ++#define CONFIG_SAMI_DEMUXER 0 ++#define CONFIG_SAP_DEMUXER 0 ++#define CONFIG_SBC_DEMUXER 0 ++#define CONFIG_SBG_DEMUXER 0 ++#define CONFIG_SCC_DEMUXER 0 ++#define CONFIG_SDP_DEMUXER 0 ++#define CONFIG_SDR2_DEMUXER 0 ++#define CONFIG_SDS_DEMUXER 0 ++#define CONFIG_SDX_DEMUXER 0 ++#define CONFIG_SEGAFILM_DEMUXER 0 ++#define CONFIG_SER_DEMUXER 0 ++#define CONFIG_SHORTEN_DEMUXER 0 ++#define CONFIG_SIFF_DEMUXER 0 ++#define CONFIG_SLN_DEMUXER 0 ++#define CONFIG_SMACKER_DEMUXER 0 ++#define CONFIG_SMJPEG_DEMUXER 0 ++#define CONFIG_SMUSH_DEMUXER 0 ++#define CONFIG_SOL_DEMUXER 0 ++#define CONFIG_SOX_DEMUXER 0 ++#define CONFIG_SPDIF_DEMUXER 0 ++#define CONFIG_SRT_DEMUXER 0 ++#define CONFIG_STR_DEMUXER 0 ++#define CONFIG_STL_DEMUXER 0 ++#define CONFIG_SUBVIEWER1_DEMUXER 0 ++#define CONFIG_SUBVIEWER_DEMUXER 0 ++#define CONFIG_SUP_DEMUXER 0 ++#define CONFIG_SVAG_DEMUXER 0 ++#define CONFIG_SWF_DEMUXER 0 ++#define CONFIG_TAK_DEMUXER 0 ++#define CONFIG_TEDCAPTIONS_DEMUXER 0 ++#define CONFIG_THP_DEMUXER 0 ++#define CONFIG_THREEDOSTR_DEMUXER 0 ++#define CONFIG_TIERTEXSEQ_DEMUXER 0 ++#define CONFIG_TMV_DEMUXER 0 ++#define CONFIG_TRUEHD_DEMUXER 0 ++#define CONFIG_TTA_DEMUXER 0 ++#define CONFIG_TXD_DEMUXER 0 ++#define CONFIG_TTY_DEMUXER 0 ++#define CONFIG_TY_DEMUXER 0 ++#define CONFIG_V210_DEMUXER 0 ++#define CONFIG_V210X_DEMUXER 0 ++#define CONFIG_VAG_DEMUXER 0 ++#define CONFIG_VC1_DEMUXER 0 ++#define CONFIG_VC1T_DEMUXER 0 ++#define CONFIG_VIVIDAS_DEMUXER 0 ++#define CONFIG_VIVO_DEMUXER 0 ++#define CONFIG_VMD_DEMUXER 0 ++#define CONFIG_VOBSUB_DEMUXER 0 ++#define CONFIG_VOC_DEMUXER 0 ++#define CONFIG_VPK_DEMUXER 0 ++#define CONFIG_VPLAYER_DEMUXER 0 ++#define CONFIG_VQF_DEMUXER 0 ++#define CONFIG_W64_DEMUXER 0 ++#define CONFIG_WAV_DEMUXER 1 ++#define CONFIG_WC3_DEMUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 ++#define CONFIG_WEBVTT_DEMUXER 0 ++#define CONFIG_WSAUD_DEMUXER 0 ++#define CONFIG_WSD_DEMUXER 0 ++#define CONFIG_WSVQA_DEMUXER 0 ++#define CONFIG_WTV_DEMUXER 0 ++#define CONFIG_WVE_DEMUXER 0 ++#define CONFIG_WV_DEMUXER 0 ++#define CONFIG_XA_DEMUXER 0 ++#define CONFIG_XBIN_DEMUXER 0 ++#define CONFIG_XMV_DEMUXER 0 ++#define CONFIG_XVAG_DEMUXER 0 ++#define CONFIG_XWMA_DEMUXER 0 ++#define CONFIG_YOP_DEMUXER 0 ++#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 ++#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_GIF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SVG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XWD_PIPE_DEMUXER 0 ++#define CONFIG_LIBGME_DEMUXER 0 ++#define CONFIG_LIBMODPLUG_DEMUXER 0 ++#define CONFIG_LIBOPENMPT_DEMUXER 0 ++#define CONFIG_VAPOURSYNTH_DEMUXER 0 ++#define CONFIG_A64_MUXER 0 ++#define CONFIG_AC3_MUXER 0 ++#define CONFIG_ADTS_MUXER 0 ++#define CONFIG_ADX_MUXER 0 ++#define CONFIG_AIFF_MUXER 0 ++#define CONFIG_AMR_MUXER 0 ++#define CONFIG_APNG_MUXER 0 ++#define CONFIG_APTX_MUXER 0 ++#define CONFIG_APTX_HD_MUXER 0 ++#define CONFIG_ASF_MUXER 0 ++#define CONFIG_ASS_MUXER 0 ++#define CONFIG_AST_MUXER 0 ++#define CONFIG_ASF_STREAM_MUXER 0 ++#define CONFIG_AU_MUXER 0 ++#define CONFIG_AVI_MUXER 0 ++#define CONFIG_AVM2_MUXER 0 ++#define CONFIG_AVS2_MUXER 0 ++#define CONFIG_BIT_MUXER 0 ++#define CONFIG_CAF_MUXER 0 ++#define CONFIG_CAVSVIDEO_MUXER 0 ++#define CONFIG_CODEC2_MUXER 0 ++#define CONFIG_CODEC2RAW_MUXER 0 ++#define CONFIG_CRC_MUXER 0 ++#define CONFIG_DASH_MUXER 0 ++#define CONFIG_DATA_MUXER 0 ++#define CONFIG_DAUD_MUXER 0 ++#define CONFIG_DIRAC_MUXER 0 ++#define CONFIG_DNXHD_MUXER 0 ++#define CONFIG_DTS_MUXER 0 ++#define CONFIG_DV_MUXER 0 ++#define CONFIG_EAC3_MUXER 0 ++#define CONFIG_F4V_MUXER 0 ++#define CONFIG_FFMETADATA_MUXER 0 ++#define CONFIG_FIFO_MUXER 0 ++#define CONFIG_FIFO_TEST_MUXER 0 ++#define CONFIG_FILMSTRIP_MUXER 0 ++#define CONFIG_FITS_MUXER 0 ++#define CONFIG_FLAC_MUXER 0 ++#define CONFIG_FLV_MUXER 0 ++#define CONFIG_FRAMECRC_MUXER 0 ++#define CONFIG_FRAMEHASH_MUXER 0 ++#define CONFIG_FRAMEMD5_MUXER 0 ++#define CONFIG_G722_MUXER 0 ++#define CONFIG_G723_1_MUXER 0 ++#define CONFIG_G726_MUXER 0 ++#define CONFIG_G726LE_MUXER 0 ++#define CONFIG_GIF_MUXER 0 ++#define CONFIG_GSM_MUXER 0 ++#define CONFIG_GXF_MUXER 0 ++#define CONFIG_H261_MUXER 0 ++#define CONFIG_H263_MUXER 0 ++#define CONFIG_H264_MUXER 0 ++#define CONFIG_HASH_MUXER 0 ++#define CONFIG_HDS_MUXER 0 ++#define CONFIG_HEVC_MUXER 0 ++#define CONFIG_HLS_MUXER 0 ++#define CONFIG_ICO_MUXER 0 ++#define CONFIG_ILBC_MUXER 0 ++#define CONFIG_IMAGE2_MUXER 0 ++#define CONFIG_IMAGE2PIPE_MUXER 0 ++#define CONFIG_IPOD_MUXER 0 ++#define CONFIG_IRCAM_MUXER 0 ++#define CONFIG_ISMV_MUXER 0 ++#define CONFIG_IVF_MUXER 0 ++#define CONFIG_JACOSUB_MUXER 0 ++#define CONFIG_LATM_MUXER 0 ++#define CONFIG_LRC_MUXER 0 ++#define CONFIG_M4V_MUXER 0 ++#define CONFIG_MD5_MUXER 0 ++#define CONFIG_MATROSKA_MUXER 0 ++#define CONFIG_MATROSKA_AUDIO_MUXER 0 ++#define CONFIG_MICRODVD_MUXER 0 ++#define CONFIG_MJPEG_MUXER 0 ++#define CONFIG_MLP_MUXER 0 ++#define CONFIG_MMF_MUXER 0 ++#define CONFIG_MOV_MUXER 0 ++#define CONFIG_MP2_MUXER 0 ++#define CONFIG_MP3_MUXER 0 ++#define CONFIG_MP4_MUXER 0 ++#define CONFIG_MPEG1SYSTEM_MUXER 0 ++#define CONFIG_MPEG1VCD_MUXER 0 ++#define CONFIG_MPEG1VIDEO_MUXER 0 ++#define CONFIG_MPEG2DVD_MUXER 0 ++#define CONFIG_MPEG2SVCD_MUXER 0 ++#define CONFIG_MPEG2VIDEO_MUXER 0 ++#define CONFIG_MPEG2VOB_MUXER 0 ++#define CONFIG_MPEGTS_MUXER 0 ++#define CONFIG_MPJPEG_MUXER 0 ++#define CONFIG_MXF_MUXER 0 ++#define CONFIG_MXF_D10_MUXER 0 ++#define CONFIG_MXF_OPATOM_MUXER 0 ++#define CONFIG_NULL_MUXER 0 ++#define CONFIG_NUT_MUXER 0 ++#define CONFIG_OGA_MUXER 0 ++#define CONFIG_OGG_MUXER 0 ++#define CONFIG_OGV_MUXER 0 ++#define CONFIG_OMA_MUXER 0 ++#define CONFIG_OPUS_MUXER 0 ++#define CONFIG_PCM_ALAW_MUXER 0 ++#define CONFIG_PCM_MULAW_MUXER 0 ++#define CONFIG_PCM_VIDC_MUXER 0 ++#define CONFIG_PCM_F64BE_MUXER 0 ++#define CONFIG_PCM_F64LE_MUXER 0 ++#define CONFIG_PCM_F32BE_MUXER 0 ++#define CONFIG_PCM_F32LE_MUXER 0 ++#define CONFIG_PCM_S32BE_MUXER 0 ++#define CONFIG_PCM_S32LE_MUXER 0 ++#define CONFIG_PCM_S24BE_MUXER 0 ++#define CONFIG_PCM_S24LE_MUXER 0 ++#define CONFIG_PCM_S16BE_MUXER 0 ++#define CONFIG_PCM_S16LE_MUXER 0 ++#define CONFIG_PCM_S8_MUXER 0 ++#define CONFIG_PCM_U32BE_MUXER 0 ++#define CONFIG_PCM_U32LE_MUXER 0 ++#define CONFIG_PCM_U24BE_MUXER 0 ++#define CONFIG_PCM_U24LE_MUXER 0 ++#define CONFIG_PCM_U16BE_MUXER 0 ++#define CONFIG_PCM_U16LE_MUXER 0 ++#define CONFIG_PCM_U8_MUXER 0 ++#define CONFIG_PSP_MUXER 0 ++#define CONFIG_RAWVIDEO_MUXER 0 ++#define CONFIG_RM_MUXER 0 ++#define CONFIG_ROQ_MUXER 0 ++#define CONFIG_RSO_MUXER 0 ++#define CONFIG_RTP_MUXER 0 ++#define CONFIG_RTP_MPEGTS_MUXER 0 ++#define CONFIG_RTSP_MUXER 0 ++#define CONFIG_SAP_MUXER 0 ++#define CONFIG_SBC_MUXER 0 ++#define CONFIG_SCC_MUXER 0 ++#define CONFIG_SEGAFILM_MUXER 0 ++#define CONFIG_SEGMENT_MUXER 0 ++#define CONFIG_STREAM_SEGMENT_MUXER 0 ++#define CONFIG_SINGLEJPEG_MUXER 0 ++#define CONFIG_SMJPEG_MUXER 0 ++#define CONFIG_SMOOTHSTREAMING_MUXER 0 ++#define CONFIG_SOX_MUXER 0 ++#define CONFIG_SPX_MUXER 0 ++#define CONFIG_SPDIF_MUXER 0 ++#define CONFIG_SRT_MUXER 0 ++#define CONFIG_STREAMHASH_MUXER 0 ++#define CONFIG_SUP_MUXER 0 ++#define CONFIG_SWF_MUXER 0 ++#define CONFIG_TEE_MUXER 0 ++#define CONFIG_TG2_MUXER 0 ++#define CONFIG_TGP_MUXER 0 ++#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 ++#define CONFIG_TRUEHD_MUXER 0 ++#define CONFIG_TTA_MUXER 0 ++#define CONFIG_UNCODEDFRAMECRC_MUXER 0 ++#define CONFIG_VC1_MUXER 0 ++#define CONFIG_VC1T_MUXER 0 ++#define CONFIG_VOC_MUXER 0 ++#define CONFIG_W64_MUXER 0 ++#define CONFIG_WAV_MUXER 0 ++#define CONFIG_WEBM_MUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 ++#define CONFIG_WEBM_CHUNK_MUXER 0 ++#define CONFIG_WEBP_MUXER 0 ++#define CONFIG_WEBVTT_MUXER 0 ++#define CONFIG_WTV_MUXER 0 ++#define CONFIG_WV_MUXER 0 ++#define CONFIG_YUV4MPEGPIPE_MUXER 0 ++#define CONFIG_CHROMAPRINT_MUXER 0 ++#define CONFIG_ASYNC_PROTOCOL 0 ++#define CONFIG_BLURAY_PROTOCOL 0 ++#define CONFIG_CACHE_PROTOCOL 0 ++#define CONFIG_CONCAT_PROTOCOL 0 ++#define CONFIG_CRYPTO_PROTOCOL 0 ++#define CONFIG_DATA_PROTOCOL 0 ++#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 ++#define CONFIG_FFRTMPHTTP_PROTOCOL 0 ++#define CONFIG_FILE_PROTOCOL 0 ++#define CONFIG_FTP_PROTOCOL 0 ++#define CONFIG_GOPHER_PROTOCOL 0 ++#define CONFIG_HLS_PROTOCOL 0 ++#define CONFIG_HTTP_PROTOCOL 0 ++#define CONFIG_HTTPPROXY_PROTOCOL 0 ++#define CONFIG_HTTPS_PROTOCOL 0 ++#define CONFIG_ICECAST_PROTOCOL 0 ++#define CONFIG_MMSH_PROTOCOL 0 ++#define CONFIG_MMST_PROTOCOL 0 ++#define CONFIG_MD5_PROTOCOL 0 ++#define CONFIG_PIPE_PROTOCOL 0 ++#define CONFIG_PROMPEG_PROTOCOL 0 ++#define CONFIG_RTMP_PROTOCOL 0 ++#define CONFIG_RTMPE_PROTOCOL 0 ++#define CONFIG_RTMPS_PROTOCOL 0 ++#define CONFIG_RTMPT_PROTOCOL 0 ++#define CONFIG_RTMPTE_PROTOCOL 0 ++#define CONFIG_RTMPTS_PROTOCOL 0 ++#define CONFIG_RTP_PROTOCOL 0 ++#define CONFIG_SCTP_PROTOCOL 0 ++#define CONFIG_SRTP_PROTOCOL 0 ++#define CONFIG_SUBFILE_PROTOCOL 0 ++#define CONFIG_TEE_PROTOCOL 0 ++#define CONFIG_TCP_PROTOCOL 0 ++#define CONFIG_TLS_PROTOCOL 0 ++#define CONFIG_UDP_PROTOCOL 0 ++#define CONFIG_UDPLITE_PROTOCOL 0 ++#define CONFIG_UNIX_PROTOCOL 0 ++#define CONFIG_LIBAMQP_PROTOCOL 0 ++#define CONFIG_LIBRTMP_PROTOCOL 0 ++#define CONFIG_LIBRTMPE_PROTOCOL 0 ++#define CONFIG_LIBRTMPS_PROTOCOL 0 ++#define CONFIG_LIBRTMPT_PROTOCOL 0 ++#define CONFIG_LIBRTMPTE_PROTOCOL 0 ++#define CONFIG_LIBSRT_PROTOCOL 0 ++#define CONFIG_LIBSSH_PROTOCOL 0 ++#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 ++#define CONFIG_LIBZMQ_PROTOCOL 0 ++#endif /* FFMPEG_CONFIG_H */ +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/bsf_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/bsf_list.c +new file mode 100644 +index 000000000..d31ece942 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/bsf_list.c +@@ -0,0 +1,3 @@ ++static const AVBitStreamFilter * const bitstream_filters[] = { ++ &ff_null_bsf, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/codec_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/codec_list.c +new file mode 100644 +index 000000000..49f757b2d +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/codec_list.c +@@ -0,0 +1,20 @@ ++static const AVCodec * const codec_list[] = { ++ &ff_h264_decoder, ++ &ff_theora_decoder, ++ &ff_vp3_decoder, ++ &ff_vp8_decoder, ++ &ff_aac_decoder, ++ &ff_flac_decoder, ++ &ff_mp3_decoder, ++ &ff_vorbis_decoder, ++ &ff_pcm_alaw_decoder, ++ &ff_pcm_f32le_decoder, ++ &ff_pcm_mulaw_decoder, ++ &ff_pcm_s16be_decoder, ++ &ff_pcm_s16le_decoder, ++ &ff_pcm_s24be_decoder, ++ &ff_pcm_s24le_decoder, ++ &ff_pcm_s32le_decoder, ++ &ff_pcm_u8_decoder, ++ &ff_libopus_decoder, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/parser_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/parser_list.c +new file mode 100644 +index 000000000..50acddb28 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavcodec/parser_list.c +@@ -0,0 +1,11 @@ ++static const AVCodecParser * const parser_list[] = { ++ &ff_aac_parser, ++ &ff_flac_parser, ++ &ff_h264_parser, ++ &ff_mpegaudio_parser, ++ &ff_opus_parser, ++ &ff_vorbis_parser, ++ &ff_vp3_parser, ++ &ff_vp8_parser, ++ &ff_vp9_parser, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/demuxer_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/demuxer_list.c +new file mode 100644 +index 000000000..920b22bfa +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/demuxer_list.c +@@ -0,0 +1,9 @@ ++static const AVInputFormat * const demuxer_list[] = { ++ &ff_aac_demuxer, ++ &ff_flac_demuxer, ++ &ff_matroska_demuxer, ++ &ff_mov_demuxer, ++ &ff_mp3_demuxer, ++ &ff_ogg_demuxer, ++ &ff_wav_demuxer, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/muxer_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/muxer_list.c +new file mode 100644 +index 000000000..f36d9499c +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/muxer_list.c +@@ -0,0 +1,2 @@ ++static const AVOutputFormat * const muxer_list[] = { ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/protocol_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/protocol_list.c +new file mode 100644 +index 000000000..247e1e4c3 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavformat/protocol_list.c +@@ -0,0 +1,2 @@ ++static const URLProtocol * const url_protocols[] = { ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavutil/avconfig.h b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavutil/avconfig.h +new file mode 100644 +index 000000000..8558b3502 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavutil/avconfig.h +@@ -0,0 +1,6 @@ ++/* Generated by ffmpeg configure */ ++#ifndef AVUTIL_AVCONFIG_H ++#define AVUTIL_AVCONFIG_H ++#define AV_HAVE_BIGENDIAN 0 ++#define AV_HAVE_FAST_UNALIGNED 0 ++#endif /* AVUTIL_AVCONFIG_H */ +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavutil/ffversion.h b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavutil/ffversion.h +new file mode 100644 +index 000000000..31e5b5036 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chrome/linux/la64/libavutil/ffversion.h +@@ -0,0 +1,5 @@ ++/* Automatically generated by version.sh, do not manually edit! */ ++#ifndef AVUTIL_FFVERSION_H ++#define AVUTIL_FFVERSION_H ++#define FFMPEG_VERSION "git-2020-06-16-23b2a15c25" ++#endif /* AVUTIL_FFVERSION_H */ +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/config.h b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/config.h +new file mode 100644 +index 000000000..23fc2d09f +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/config.h +@@ -0,0 +1,2589 @@ ++/* Automatically generated by configure - do not modify! */ ++#ifndef FFMPEG_CONFIG_H ++#define FFMPEG_CONFIG_H ++/* #define FFMPEG_CONFIGURATION "--disable-everything --disable-all --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-static --enable-avcodec --enable-avformat --enable-avutil --enable-fft --enable-rdft --enable-static --enable-libopus --disable-debug --disable-bzlib --disable-iconv --disable-lzo --disable-network --disable-schannel --disable-sdl2 --disable-symver --disable-xlib --disable-zlib --disable-securetransport --disable-faan --disable-alsa --disable-autodetect --enable-decoder='vorbis,libopus,flac' --enable-decoder='pcm_u8,pcm_s16le,pcm_s24le,pcm_s32le,pcm_f32le,mp3' --enable-decoder='pcm_s16be,pcm_s24be,pcm_mulaw,pcm_alaw' --enable-demuxer='ogg,matroska,wav,flac,mp3,mov' --enable-parser='opus,vorbis,flac,mpegaudio,vp9' --extra-cflags=-I/mnt/chromium/src/third_party/opus/src/include --disable-linux-perf --x86asmexe=nasm --optflags='\"-O2\"' --enable-decoder='theora,vp8' --enable-parser='vp3,vp8' --target-os=linux --enable-pic --cc=clang --cxx=clang++ --ld=clang --enable-decoder='aac,h264' --enable-demuxer=aac --enable-parser='aac,h264' --enable-decoder=mpeg4 --enable-parser='h263,mpeg4video' --enable-demuxer=avi --enable-demuxer=amr --enable-decoder='amrnb,amrwb' --enable-decoder=gsm_ms --enable-parser=gsm" -- elide long configuration string from binary */ ++#define FFMPEG_LICENSE "LGPL version 2.1 or later" ++#define CONFIG_THIS_YEAR 2020 ++#define FFMPEG_DATADIR "/usr/local/share/ffmpeg" ++#define AVCONV_DATADIR "/usr/local/share/ffmpeg" ++#define CC_IDENT "clang version 8.0.1" ++#define av_restrict restrict ++#define EXTERN_PREFIX "" ++#define EXTERN_ASM ++#define BUILDSUF "" ++#define SLIBSUF ".so" ++#define HAVE_MMX2 HAVE_MMXEXT ++#define SWS_MAX_FILTER_SIZE 256 ++#define ARCH_AARCH64 0 ++#define ARCH_ALPHA 0 ++#define ARCH_ARM 0 ++#define ARCH_AVR32 0 ++#define ARCH_AVR32_AP 0 ++#define ARCH_AVR32_UC 0 ++#define ARCH_BFIN 0 ++#define ARCH_IA64 0 ++#define ARCH_M68K 0 ++#define ARCH_MIPS 0 ++#define ARCH_MIPS64 0 ++#define ARCH_PARISC 0 ++#define ARCH_PPC 0 ++#define ARCH_PPC64 0 ++#define ARCH_S390 0 ++#define ARCH_SH4 0 ++#define ARCH_SPARC 0 ++#define ARCH_SPARC64 0 ++#define ARCH_TILEGX 0 ++#define ARCH_TILEPRO 0 ++#define ARCH_TOMI 0 ++#define ARCH_X86 0 ++#define ARCH_X86_32 0 ++#define ARCH_X86_64 0 ++#define HAVE_ARMV5TE 0 ++#define HAVE_ARMV6 0 ++#define HAVE_ARMV6T2 0 ++#define HAVE_ARMV8 0 ++#define HAVE_NEON 0 ++#define HAVE_VFP 0 ++#define HAVE_VFPV3 0 ++#define HAVE_SETEND 0 ++#define HAVE_ALTIVEC 0 ++#define HAVE_DCBZL 0 ++#define HAVE_LDBRX 0 ++#define HAVE_POWER8 0 ++#define HAVE_PPC4XX 0 ++#define HAVE_VSX 0 ++#define HAVE_AESNI 0 ++#define HAVE_AMD3DNOW 0 ++#define HAVE_AMD3DNOWEXT 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_AVX512 0 ++#define HAVE_FMA3 0 ++#define HAVE_FMA4 0 ++#define HAVE_MMX 0 ++#define HAVE_MMXEXT 0 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSE4 0 ++#define HAVE_SSE42 0 ++#define HAVE_SSSE3 0 ++#define HAVE_XOP 0 ++#define HAVE_CPUNOP 0 ++#define HAVE_I686 0 ++#define HAVE_MIPSFPU 0 ++#define HAVE_MIPS32R2 0 ++#define HAVE_MIPS32R5 0 ++#define HAVE_MIPS64R2 0 ++#define HAVE_MIPS32R6 0 ++#define HAVE_MIPS64R6 0 ++#define HAVE_MIPSDSP 0 ++#define HAVE_MIPSDSPR2 0 ++#define HAVE_MSA 0 ++#define HAVE_MSA2 0 ++#define HAVE_LOONGSON2 0 ++#define HAVE_LOONGSON3 0 ++#define HAVE_MMI 0 ++#define HAVE_ARMV5TE_EXTERNAL 0 ++#define HAVE_ARMV6_EXTERNAL 0 ++#define HAVE_ARMV6T2_EXTERNAL 0 ++#define HAVE_ARMV8_EXTERNAL 0 ++#define HAVE_NEON_EXTERNAL 0 ++#define HAVE_VFP_EXTERNAL 0 ++#define HAVE_VFPV3_EXTERNAL 0 ++#define HAVE_SETEND_EXTERNAL 0 ++#define HAVE_ALTIVEC_EXTERNAL 0 ++#define HAVE_DCBZL_EXTERNAL 0 ++#define HAVE_LDBRX_EXTERNAL 0 ++#define HAVE_POWER8_EXTERNAL 0 ++#define HAVE_PPC4XX_EXTERNAL 0 ++#define HAVE_VSX_EXTERNAL 0 ++#define HAVE_AESNI_EXTERNAL 0 ++#define HAVE_AMD3DNOW_EXTERNAL 0 ++#define HAVE_AMD3DNOWEXT_EXTERNAL 0 ++#define HAVE_AVX_EXTERNAL 0 ++#define HAVE_AVX2_EXTERNAL 0 ++#define HAVE_AVX512_EXTERNAL 0 ++#define HAVE_FMA3_EXTERNAL 0 ++#define HAVE_FMA4_EXTERNAL 0 ++#define HAVE_MMX_EXTERNAL 0 ++#define HAVE_MMXEXT_EXTERNAL 0 ++#define HAVE_SSE_EXTERNAL 0 ++#define HAVE_SSE2_EXTERNAL 0 ++#define HAVE_SSE3_EXTERNAL 0 ++#define HAVE_SSE4_EXTERNAL 0 ++#define HAVE_SSE42_EXTERNAL 0 ++#define HAVE_SSSE3_EXTERNAL 0 ++#define HAVE_XOP_EXTERNAL 0 ++#define HAVE_CPUNOP_EXTERNAL 0 ++#define HAVE_I686_EXTERNAL 0 ++#define HAVE_MIPSFPU_EXTERNAL 0 ++#define HAVE_MIPS32R2_EXTERNAL 0 ++#define HAVE_MIPS32R5_EXTERNAL 0 ++#define HAVE_MIPS64R2_EXTERNAL 0 ++#define HAVE_MIPS32R6_EXTERNAL 0 ++#define HAVE_MIPS64R6_EXTERNAL 0 ++#define HAVE_MIPSDSP_EXTERNAL 0 ++#define HAVE_MIPSDSPR2_EXTERNAL 0 ++#define HAVE_MSA_EXTERNAL 0 ++#define HAVE_MSA2_EXTERNAL 0 ++#define HAVE_LOONGSON2_EXTERNAL 0 ++#define HAVE_LOONGSON3_EXTERNAL 0 ++#define HAVE_MMI_EXTERNAL 0 ++#define HAVE_ARMV5TE_INLINE 0 ++#define HAVE_ARMV6_INLINE 0 ++#define HAVE_ARMV6T2_INLINE 0 ++#define HAVE_ARMV8_INLINE 0 ++#define HAVE_NEON_INLINE 0 ++#define HAVE_VFP_INLINE 0 ++#define HAVE_VFPV3_INLINE 0 ++#define HAVE_SETEND_INLINE 0 ++#define HAVE_ALTIVEC_INLINE 0 ++#define HAVE_DCBZL_INLINE 0 ++#define HAVE_LDBRX_INLINE 0 ++#define HAVE_POWER8_INLINE 0 ++#define HAVE_PPC4XX_INLINE 0 ++#define HAVE_VSX_INLINE 0 ++#define HAVE_AESNI_INLINE 0 ++#define HAVE_AMD3DNOW_INLINE 0 ++#define HAVE_AMD3DNOWEXT_INLINE 0 ++#define HAVE_AVX_INLINE 0 ++#define HAVE_AVX2_INLINE 0 ++#define HAVE_AVX512_INLINE 0 ++#define HAVE_FMA3_INLINE 0 ++#define HAVE_FMA4_INLINE 0 ++#define HAVE_MMX_INLINE 0 ++#define HAVE_MMXEXT_INLINE 0 ++#define HAVE_SSE_INLINE 0 ++#define HAVE_SSE2_INLINE 0 ++#define HAVE_SSE3_INLINE 0 ++#define HAVE_SSE4_INLINE 0 ++#define HAVE_SSE42_INLINE 0 ++#define HAVE_SSSE3_INLINE 0 ++#define HAVE_XOP_INLINE 0 ++#define HAVE_CPUNOP_INLINE 0 ++#define HAVE_I686_INLINE 0 ++#define HAVE_MIPSFPU_INLINE 0 ++#define HAVE_MIPS32R2_INLINE 0 ++#define HAVE_MIPS32R5_INLINE 0 ++#define HAVE_MIPS64R2_INLINE 0 ++#define HAVE_MIPS32R6_INLINE 0 ++#define HAVE_MIPS64R6_INLINE 0 ++#define HAVE_MIPSDSP_INLINE 0 ++#define HAVE_MIPSDSPR2_INLINE 0 ++#define HAVE_MSA_INLINE 0 ++#define HAVE_MSA2_INLINE 0 ++#define HAVE_LOONGSON2_INLINE 0 ++#define HAVE_LOONGSON3_INLINE 0 ++#define HAVE_MMI_INLINE 0 ++#define HAVE_ALIGNED_STACK 0 ++#define HAVE_FAST_64BIT 0 ++#define HAVE_FAST_CLZ 0 ++#define HAVE_FAST_CMOV 0 ++#define HAVE_LOCAL_ALIGNED 0 ++#define HAVE_SIMD_ALIGN_16 0 ++#define HAVE_SIMD_ALIGN_32 0 ++#define HAVE_SIMD_ALIGN_64 0 ++#define HAVE_ATOMIC_CAS_PTR 0 ++#define HAVE_MACHINE_RW_BARRIER 0 ++#define HAVE_MEMORYBARRIER 0 ++#define HAVE_MM_EMPTY 0 ++#define HAVE_RDTSC 0 ++#define HAVE_SEM_TIMEDWAIT 1 ++#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 ++#define HAVE_CABS 0 ++#define HAVE_CEXP 0 ++#define HAVE_INLINE_ASM 1 ++#define HAVE_SYMVER 0 ++#define HAVE_X86ASM 0 ++#define HAVE_BIGENDIAN 0 ++#define HAVE_FAST_UNALIGNED 0 ++#define HAVE_ARPA_INET_H 0 ++#define HAVE_ASM_TYPES_H 1 ++#define HAVE_CDIO_PARANOIA_H 0 ++#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 ++#define HAVE_CUDA_H 0 ++#define HAVE_DISPATCH_DISPATCH_H 0 ++#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 ++#define HAVE_DEV_IC_BT8XX_H 0 ++#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 ++#define HAVE_DIRECT_H 0 ++#define HAVE_DIRENT_H 1 ++#define HAVE_DXGIDEBUG_H 0 ++#define HAVE_DXVA_H 0 ++#define HAVE_ES2_GL_H 0 ++#define HAVE_GSM_H 0 ++#define HAVE_IO_H 0 ++#define HAVE_LINUX_PERF_EVENT_H 1 ++#define HAVE_MACHINE_IOCTL_BT848_H 0 ++#define HAVE_MACHINE_IOCTL_METEOR_H 0 ++#define HAVE_MALLOC_H 1 ++#define HAVE_OPENCV2_CORE_CORE_C_H 0 ++#define HAVE_OPENGL_GL3_H 0 ++#define HAVE_POLL_H 1 ++#define HAVE_SYS_PARAM_H 1 ++#define HAVE_SYS_RESOURCE_H 1 ++#define HAVE_SYS_SELECT_H 1 ++#define HAVE_SYS_SOUNDCARD_H 1 ++#define HAVE_SYS_TIME_H 1 ++#define HAVE_SYS_UN_H 1 ++#define HAVE_SYS_VIDEOIO_H 0 ++#define HAVE_TERMIOS_H 1 ++#define HAVE_UDPLITE_H 0 ++#define HAVE_UNISTD_H 1 ++#define HAVE_VALGRIND_VALGRIND_H 0 /* #define HAVE_VALGRIND_VALGRIND_H 0 -- forced to 0. See https://crbug.com/590440 */ ++#define HAVE_WINDOWS_H 0 ++#define HAVE_WINSOCK2_H 0 ++#define HAVE_INTRINSICS_NEON 0 ++#define HAVE_ATANF 1 ++#define HAVE_ATAN2F 1 ++#define HAVE_CBRT 1 ++#define HAVE_CBRTF 1 ++#define HAVE_COPYSIGN 1 ++#define HAVE_COSF 1 ++#define HAVE_ERF 1 ++#define HAVE_EXP2 1 ++#define HAVE_EXP2F 1 ++#define HAVE_EXPF 1 ++#define HAVE_HYPOT 1 ++#define HAVE_ISFINITE 1 ++#define HAVE_ISINF 1 ++#define HAVE_ISNAN 1 ++#define HAVE_LDEXPF 1 ++#define HAVE_LLRINT 1 ++#define HAVE_LLRINTF 1 ++#define HAVE_LOG2 1 ++#define HAVE_LOG2F 1 ++#define HAVE_LOG10F 1 ++#define HAVE_LRINT 1 ++#define HAVE_LRINTF 1 ++#define HAVE_POWF 1 ++#define HAVE_RINT 1 ++#define HAVE_ROUND 1 ++#define HAVE_ROUNDF 1 ++#define HAVE_SINF 1 ++#define HAVE_TRUNC 1 ++#define HAVE_TRUNCF 1 ++#define HAVE_DOS_PATHS 0 ++#define HAVE_LIBC_MSVCRT 0 ++#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 ++#define HAVE_SECTION_DATA_REL_RO 1 ++#define HAVE_THREADS 1 ++#define HAVE_UWP 0 ++#define HAVE_WINRT 0 ++#define HAVE_ACCESS 1 ++#define HAVE_ALIGNED_MALLOC 0 ++#define HAVE_ARC4RANDOM 0 ++#define HAVE_CLOCK_GETTIME 1 ++#define HAVE_CLOSESOCKET 0 ++#define HAVE_COMMANDLINETOARGVW 0 ++#define HAVE_FCNTL 1 ++#define HAVE_GETADDRINFO 0 ++#define HAVE_GETHRTIME 0 ++#define HAVE_GETOPT 1 ++#define HAVE_GETMODULEHANDLE 0 ++#define HAVE_GETPROCESSAFFINITYMASK 0 ++#define HAVE_GETPROCESSMEMORYINFO 0 ++#define HAVE_GETPROCESSTIMES 0 ++#define HAVE_GETRUSAGE 1 ++#define HAVE_GETSTDHANDLE 0 ++#define HAVE_GETSYSTEMTIMEASFILETIME 0 ++#define HAVE_GETTIMEOFDAY 1 ++#define HAVE_GLOB 1 ++#define HAVE_GLXGETPROCADDRESS 0 ++#define HAVE_GMTIME_R 1 ++#define HAVE_INET_ATON 0 ++#define HAVE_ISATTY 1 ++#define HAVE_KBHIT 0 ++#define HAVE_LOCALTIME_R 1 ++#define HAVE_LSTAT 1 ++#define HAVE_LZO1X_999_COMPRESS 0 ++#define HAVE_MACH_ABSOLUTE_TIME 0 ++#define HAVE_MAPVIEWOFFILE 0 ++#define HAVE_MEMALIGN 1 ++#define HAVE_MKSTEMP 1 ++#define HAVE_MMAP 1 ++#define HAVE_MPROTECT 1 ++#define HAVE_NANOSLEEP 1 ++#define HAVE_PEEKNAMEDPIPE 0 ++#define HAVE_POSIX_MEMALIGN 1 ++#define HAVE_PTHREAD_CANCEL 1 ++#define HAVE_SCHED_GETAFFINITY 1 ++#define HAVE_SECITEMIMPORT 0 ++#define HAVE_SETCONSOLETEXTATTRIBUTE 0 ++#define HAVE_SETCONSOLECTRLHANDLER 0 ++#define HAVE_SETDLLDIRECTORY 0 ++#define HAVE_SETMODE 0 ++#define HAVE_SETRLIMIT 1 ++#define HAVE_SLEEP 0 ++#define HAVE_STRERROR_R 1 ++#define HAVE_SYSCONF 1 ++#define HAVE_SYSCTL 0 /* #define HAVE_SYSCTL 1 -- forced to 0 for Fuchsia */ ++#define HAVE_USLEEP 1 ++#define HAVE_UTGETOSTYPEFROMSTRING 0 ++#define HAVE_VIRTUALALLOC 0 ++#define HAVE_WGLGETPROCADDRESS 0 ++#define HAVE_BCRYPT 0 ++#define HAVE_VAAPI_DRM 0 ++#define HAVE_VAAPI_X11 0 ++#define HAVE_VDPAU_X11 0 ++#define HAVE_PTHREADS 1 ++#define HAVE_OS2THREADS 0 ++#define HAVE_W32THREADS 0 ++#define HAVE_AS_ARCH_DIRECTIVE 0 ++#define HAVE_AS_DN_DIRECTIVE 0 ++#define HAVE_AS_FPU_DIRECTIVE 0 ++#define HAVE_AS_FUNC 0 ++#define HAVE_AS_OBJECT_ARCH 0 ++#define HAVE_ASM_MOD_Q 0 ++#define HAVE_BLOCKS_EXTENSION 0 ++#define HAVE_EBP_AVAILABLE 0 ++#define HAVE_EBX_AVAILABLE 0 ++#define HAVE_GNU_AS 0 ++#define HAVE_GNU_WINDRES 0 ++#define HAVE_IBM_ASM 0 ++#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0 ++#define HAVE_INLINE_ASM_LABELS 1 ++#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 ++#define HAVE_PRAGMA_DEPRECATED 1 ++#define HAVE_RSYNC_CONTIMEOUT 0 ++#define HAVE_SYMVER_ASM_LABEL 1 ++#define HAVE_SYMVER_GNU_ASM 1 ++#define HAVE_VFP_ARGS 0 ++#define HAVE_XFORM_ASM 0 ++#define HAVE_XMM_CLOBBERS 0 ++#define HAVE_KCMVIDEOCODECTYPE_HEVC 0 ++#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0 ++#define HAVE_SOCKLEN_T 0 ++#define HAVE_STRUCT_ADDRINFO 0 ++#define HAVE_STRUCT_GROUP_SOURCE_REQ 0 ++#define HAVE_STRUCT_IP_MREQ_SOURCE 0 ++#define HAVE_STRUCT_IPV6_MREQ 0 ++#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0 ++#define HAVE_STRUCT_POLLFD 0 ++#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 ++#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 ++#define HAVE_STRUCT_SOCKADDR_IN6 0 ++#define HAVE_STRUCT_SOCKADDR_SA_LEN 0 ++#define HAVE_STRUCT_SOCKADDR_STORAGE 0 ++#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1 ++#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 ++#define HAVE_MAKEINFO 1 ++#define HAVE_MAKEINFO_HTML 1 ++#define HAVE_OPENCL_D3D11 0 ++#define HAVE_OPENCL_DRM_ARM 0 ++#define HAVE_OPENCL_DRM_BEIGNET 0 ++#define HAVE_OPENCL_DXVA2 0 ++#define HAVE_OPENCL_VAAPI_BEIGNET 0 ++#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0 ++#define HAVE_PERL 1 ++#define HAVE_POD2MAN 1 ++#define HAVE_TEXI2HTML 0 ++#define CONFIG_DOC 0 ++#define CONFIG_HTMLPAGES 0 ++#define CONFIG_MANPAGES 0 ++#define CONFIG_PODPAGES 0 ++#define CONFIG_TXTPAGES 0 ++#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1 ++#define CONFIG_AVIO_READING_EXAMPLE 1 ++#define CONFIG_DECODE_AUDIO_EXAMPLE 1 ++#define CONFIG_DECODE_VIDEO_EXAMPLE 1 ++#define CONFIG_DEMUXING_DECODING_EXAMPLE 1 ++#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 ++#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 ++#define CONFIG_EXTRACT_MVS_EXAMPLE 1 ++#define CONFIG_FILTER_AUDIO_EXAMPLE 0 ++#define CONFIG_FILTERING_AUDIO_EXAMPLE 0 ++#define CONFIG_FILTERING_VIDEO_EXAMPLE 0 ++#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 1 ++#define CONFIG_HW_DECODE_EXAMPLE 1 ++#define CONFIG_METADATA_EXAMPLE 1 ++#define CONFIG_MUXING_EXAMPLE 0 ++#define CONFIG_QSVDEC_EXAMPLE 0 ++#define CONFIG_REMUXING_EXAMPLE 1 ++#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0 ++#define CONFIG_SCALING_VIDEO_EXAMPLE 0 ++#define CONFIG_TRANSCODE_AAC_EXAMPLE 0 ++#define CONFIG_TRANSCODING_EXAMPLE 0 ++#define CONFIG_VAAPI_ENCODE_EXAMPLE 0 ++#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0 ++#define CONFIG_AVISYNTH 0 ++#define CONFIG_FREI0R 0 ++#define CONFIG_LIBCDIO 0 ++#define CONFIG_LIBDAVS2 0 ++#define CONFIG_LIBRUBBERBAND 0 ++#define CONFIG_LIBVIDSTAB 0 ++#define CONFIG_LIBX264 0 ++#define CONFIG_LIBX265 0 ++#define CONFIG_LIBXAVS 0 ++#define CONFIG_LIBXAVS2 0 ++#define CONFIG_LIBXVID 0 ++#define CONFIG_DECKLINK 0 ++#define CONFIG_LIBFDK_AAC 0 ++#define CONFIG_OPENSSL 0 ++#define CONFIG_LIBTLS 0 ++#define CONFIG_GMP 0 ++#define CONFIG_LIBARIBB24 0 ++#define CONFIG_LIBLENSFUN 0 ++#define CONFIG_LIBOPENCORE_AMRNB 0 ++#define CONFIG_LIBOPENCORE_AMRWB 0 ++#define CONFIG_LIBVMAF 0 ++#define CONFIG_LIBVO_AMRWBENC 0 ++#define CONFIG_MBEDTLS 0 ++#define CONFIG_RKMPP 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_CHROMAPRINT 0 ++#define CONFIG_GCRYPT 0 ++#define CONFIG_GNUTLS 0 ++#define CONFIG_JNI 0 ++#define CONFIG_LADSPA 0 ++#define CONFIG_LIBAOM 0 ++#define CONFIG_LIBASS 0 ++#define CONFIG_LIBBLURAY 0 ++#define CONFIG_LIBBS2B 0 ++#define CONFIG_LIBCACA 0 ++#define CONFIG_LIBCELT 0 ++#define CONFIG_LIBCODEC2 0 ++#define CONFIG_LIBDAV1D 0 ++#define CONFIG_LIBDC1394 0 ++#define CONFIG_LIBDRM 0 ++#define CONFIG_LIBFLITE 0 ++#define CONFIG_LIBFONTCONFIG 0 ++#define CONFIG_LIBFREETYPE 0 ++#define CONFIG_LIBFRIBIDI 0 ++#define CONFIG_LIBGLSLANG 0 ++#define CONFIG_LIBGME 0 ++#define CONFIG_LIBGSM 0 ++#define CONFIG_LIBIEC61883 0 ++#define CONFIG_LIBILBC 0 ++#define CONFIG_LIBJACK 0 ++#define CONFIG_LIBKLVANC 0 ++#define CONFIG_LIBKVAZAAR 0 ++#define CONFIG_LIBMODPLUG 0 ++#define CONFIG_LIBMP3LAME 0 ++#define CONFIG_LIBMYSOFA 0 ++#define CONFIG_LIBOPENCV 0 ++#define CONFIG_LIBOPENH264 0 ++#define CONFIG_LIBOPENJPEG 0 ++#define CONFIG_LIBOPENMPT 0 ++#define CONFIG_LIBOPUS 1 ++#define CONFIG_LIBPULSE 0 ++#define CONFIG_LIBRABBITMQ 0 ++#define CONFIG_LIBRAV1E 0 ++#define CONFIG_LIBRSVG 0 ++#define CONFIG_LIBRTMP 0 ++#define CONFIG_LIBSHINE 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_LIBSNAPPY 0 ++#define CONFIG_LIBSOXR 0 ++#define CONFIG_LIBSPEEX 0 ++#define CONFIG_LIBSRT 0 ++#define CONFIG_LIBSSH 0 ++#define CONFIG_LIBTENSORFLOW 0 ++#define CONFIG_LIBTESSERACT 0 ++#define CONFIG_LIBTHEORA 0 ++#define CONFIG_LIBTWOLAME 0 ++#define CONFIG_LIBV4L2 0 ++#define CONFIG_LIBVORBIS 0 ++#define CONFIG_LIBVPX 0 ++#define CONFIG_LIBWAVPACK 0 ++#define CONFIG_LIBWEBP 0 ++#define CONFIG_LIBXML2 0 ++#define CONFIG_LIBZIMG 0 ++#define CONFIG_LIBZMQ 0 ++#define CONFIG_LIBZVBI 0 ++#define CONFIG_LV2 0 ++#define CONFIG_MEDIACODEC 0 ++#define CONFIG_OPENAL 0 ++#define CONFIG_OPENGL 0 ++#define CONFIG_POCKETSPHINX 0 ++#define CONFIG_VAPOURSYNTH 0 ++#define CONFIG_ALSA 0 ++#define CONFIG_APPKIT 0 ++#define CONFIG_AVFOUNDATION 0 ++#define CONFIG_BZLIB 0 ++#define CONFIG_COREIMAGE 0 ++#define CONFIG_ICONV 0 ++#define CONFIG_LIBXCB 0 ++#define CONFIG_LIBXCB_SHM 0 ++#define CONFIG_LIBXCB_SHAPE 0 ++#define CONFIG_LIBXCB_XFIXES 0 ++#define CONFIG_LZMA 0 ++#define CONFIG_SCHANNEL 0 ++#define CONFIG_SDL2 0 ++#define CONFIG_SECURETRANSPORT 0 ++#define CONFIG_SNDIO 0 ++#define CONFIG_XLIB 0 ++#define CONFIG_ZLIB 0 ++#define CONFIG_CUDA_NVCC 0 ++#define CONFIG_CUDA_SDK 0 ++#define CONFIG_LIBNPP 0 ++#define CONFIG_LIBMFX 0 ++#define CONFIG_MMAL 0 ++#define CONFIG_OMX 0 ++#define CONFIG_OPENCL 0 ++#define CONFIG_VULKAN 0 ++#define CONFIG_AMF 0 ++#define CONFIG_AUDIOTOOLBOX 0 ++#define CONFIG_CRYSTALHD 0 ++#define CONFIG_CUDA 0 ++#define CONFIG_CUDA_LLVM 0 ++#define CONFIG_CUVID 0 ++#define CONFIG_D3D11VA 0 ++#define CONFIG_DXVA2 0 ++#define CONFIG_FFNVCODEC 0 ++#define CONFIG_NVDEC 0 ++#define CONFIG_NVENC 0 ++#define CONFIG_VAAPI 0 ++#define CONFIG_VDPAU 0 ++#define CONFIG_VIDEOTOOLBOX 0 ++#define CONFIG_V4L2_M2M 0 ++#define CONFIG_XVMC 0 ++#define CONFIG_FTRAPV 0 ++#define CONFIG_GRAY 0 ++#define CONFIG_HARDCODED_TABLES 0 ++#define CONFIG_OMX_RPI 0 ++#define CONFIG_RUNTIME_CPUDETECT 1 ++#define CONFIG_SAFE_BITSTREAM_READER 1 ++#define CONFIG_SHARED 0 ++#define CONFIG_SMALL 0 ++#define CONFIG_STATIC 1 ++#define CONFIG_SWSCALE_ALPHA 1 ++#define CONFIG_GPL 0 ++#define CONFIG_NONFREE 0 ++#define CONFIG_VERSION3 0 ++#define CONFIG_AVDEVICE 0 ++#define CONFIG_AVFILTER 0 ++#define CONFIG_SWSCALE 0 ++#define CONFIG_POSTPROC 0 ++#define CONFIG_AVFORMAT 1 ++#define CONFIG_AVCODEC 1 ++#define CONFIG_SWRESAMPLE 0 ++#define CONFIG_AVRESAMPLE 0 ++#define CONFIG_AVUTIL 1 ++#define CONFIG_FFPLAY 0 ++#define CONFIG_FFPROBE 0 ++#define CONFIG_FFMPEG 0 ++#define CONFIG_DCT 1 ++#define CONFIG_DWT 0 ++#define CONFIG_ERROR_RESILIENCE 1 ++#define CONFIG_FAAN 0 ++#define CONFIG_FAST_UNALIGNED 0 ++#define CONFIG_FFT 1 ++#define CONFIG_LSP 1 ++#define CONFIG_LZO 0 ++#define CONFIG_MDCT 1 ++#define CONFIG_PIXELUTILS 0 ++#define CONFIG_NETWORK 0 ++#define CONFIG_RDFT 1 ++#define CONFIG_AUTODETECT 0 ++#define CONFIG_FONTCONFIG 0 ++#define CONFIG_LARGE_TESTS 1 ++#define CONFIG_LINUX_PERF 0 ++#define CONFIG_MEMORY_POISONING 0 ++#define CONFIG_NEON_CLOBBER_TEST 0 ++#define CONFIG_OSSFUZZ 0 ++#define CONFIG_PIC 1 ++#define CONFIG_THUMB 0 ++#define CONFIG_VALGRIND_BACKTRACE 0 ++#define CONFIG_XMM_CLOBBER_TEST 0 ++#define CONFIG_BSFS 1 ++#define CONFIG_DECODERS 1 ++#define CONFIG_ENCODERS 0 ++#define CONFIG_HWACCELS 0 ++#define CONFIG_PARSERS 1 ++#define CONFIG_INDEVS 0 ++#define CONFIG_OUTDEVS 0 ++#define CONFIG_FILTERS 0 ++#define CONFIG_DEMUXERS 1 ++#define CONFIG_MUXERS 0 ++#define CONFIG_PROTOCOLS 0 ++#define CONFIG_AANDCTTABLES 0 ++#define CONFIG_AC3DSP 0 ++#define CONFIG_ADTS_HEADER 1 ++#define CONFIG_AUDIO_FRAME_QUEUE 0 ++#define CONFIG_AUDIODSP 0 ++#define CONFIG_BLOCKDSP 1 ++#define CONFIG_BSWAPDSP 0 ++#define CONFIG_CABAC 1 ++#define CONFIG_CBS 0 ++#define CONFIG_CBS_AV1 0 ++#define CONFIG_CBS_H264 0 ++#define CONFIG_CBS_H265 0 ++#define CONFIG_CBS_JPEG 0 ++#define CONFIG_CBS_MPEG2 0 ++#define CONFIG_CBS_VP9 0 ++#define CONFIG_DIRAC_PARSE 1 ++#define CONFIG_DNN 0 ++#define CONFIG_DVPROFILE 0 ++#define CONFIG_EXIF 1 ++#define CONFIG_FAANDCT 0 ++#define CONFIG_FAANIDCT 0 ++#define CONFIG_FDCTDSP 1 ++#define CONFIG_FLACDSP 1 ++#define CONFIG_FMTCONVERT 0 ++#define CONFIG_FRAME_THREAD_ENCODER 0 ++#define CONFIG_G722DSP 0 ++#define CONFIG_GOLOMB 1 ++#define CONFIG_GPLV3 0 ++#define CONFIG_H263DSP 1 ++#define CONFIG_H264CHROMA 1 ++#define CONFIG_H264DSP 1 ++#define CONFIG_H264PARSE 1 ++#define CONFIG_H264PRED 1 ++#define CONFIG_H264QPEL 1 ++#define CONFIG_HEVCPARSE 0 ++#define CONFIG_HPELDSP 1 ++#define CONFIG_HUFFMAN 0 ++#define CONFIG_HUFFYUVDSP 0 ++#define CONFIG_HUFFYUVENCDSP 0 ++#define CONFIG_IDCTDSP 1 ++#define CONFIG_IIRFILTER 0 ++#define CONFIG_MDCT15 1 ++#define CONFIG_INTRAX8 0 ++#define CONFIG_ISO_MEDIA 1 ++#define CONFIG_IVIDSP 0 ++#define CONFIG_JPEGTABLES 0 ++#define CONFIG_LGPLV3 0 ++#define CONFIG_LIBX262 0 ++#define CONFIG_LLAUDDSP 0 ++#define CONFIG_LLVIDDSP 0 ++#define CONFIG_LLVIDENCDSP 0 ++#define CONFIG_LPC 0 ++#define CONFIG_LZF 0 ++#define CONFIG_ME_CMP 1 ++#define CONFIG_MPEG_ER 1 ++#define CONFIG_MPEGAUDIO 1 ++#define CONFIG_MPEGAUDIODSP 1 ++#define CONFIG_MPEGAUDIOHEADER 1 ++#define CONFIG_MPEGVIDEO 1 ++#define CONFIG_MPEGVIDEOENC 0 ++#define CONFIG_MSS34DSP 0 ++#define CONFIG_PIXBLOCKDSP 1 ++#define CONFIG_QPELDSP 1 ++#define CONFIG_QSV 0 ++#define CONFIG_QSVDEC 0 ++#define CONFIG_QSVENC 0 ++#define CONFIG_QSVVPP 0 ++#define CONFIG_RANGECODER 0 ++#define CONFIG_RIFFDEC 1 ++#define CONFIG_RIFFENC 0 ++#define CONFIG_RTPDEC 0 ++#define CONFIG_RTPENC_CHAIN 0 ++#define CONFIG_RV34DSP 0 ++#define CONFIG_SCENE_SAD 0 ++#define CONFIG_SINEWIN 1 ++#define CONFIG_SNAPPY 0 ++#define CONFIG_SRTP 0 ++#define CONFIG_STARTCODE 1 ++#define CONFIG_TEXTUREDSP 0 ++#define CONFIG_TEXTUREDSPENC 0 ++#define CONFIG_TPELDSP 0 ++#define CONFIG_VAAPI_1 0 ++#define CONFIG_VAAPI_ENCODE 0 ++#define CONFIG_VC1DSP 0 ++#define CONFIG_VIDEODSP 1 ++#define CONFIG_VP3DSP 1 ++#define CONFIG_VP56DSP 0 ++#define CONFIG_VP8DSP 1 ++#define CONFIG_WMA_FREQS 0 ++#define CONFIG_WMV2DSP 0 ++#define CONFIG_AAC_ADTSTOASC_BSF 0 ++#define CONFIG_AV1_FRAME_MERGE_BSF 0 ++#define CONFIG_AV1_FRAME_SPLIT_BSF 0 ++#define CONFIG_AV1_METADATA_BSF 0 ++#define CONFIG_CHOMP_BSF 0 ++#define CONFIG_DUMP_EXTRADATA_BSF 0 ++#define CONFIG_DCA_CORE_BSF 0 ++#define CONFIG_EAC3_CORE_BSF 0 ++#define CONFIG_EXTRACT_EXTRADATA_BSF 0 ++#define CONFIG_FILTER_UNITS_BSF 0 ++#define CONFIG_H264_METADATA_BSF 0 ++#define CONFIG_H264_MP4TOANNEXB_BSF 0 ++#define CONFIG_H264_REDUNDANT_PPS_BSF 0 ++#define CONFIG_HAPQA_EXTRACT_BSF 0 ++#define CONFIG_HEVC_METADATA_BSF 0 ++#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 ++#define CONFIG_IMX_DUMP_HEADER_BSF 0 ++#define CONFIG_MJPEG2JPEG_BSF 0 ++#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 ++#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 ++#define CONFIG_MPEG2_METADATA_BSF 0 ++#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 ++#define CONFIG_MOV2TEXTSUB_BSF 0 ++#define CONFIG_NOISE_BSF 0 ++#define CONFIG_NULL_BSF 1 ++#define CONFIG_PRORES_METADATA_BSF 0 ++#define CONFIG_REMOVE_EXTRADATA_BSF 0 ++#define CONFIG_TEXT2MOVSUB_BSF 0 ++#define CONFIG_TRACE_HEADERS_BSF 0 ++#define CONFIG_TRUEHD_CORE_BSF 0 ++#define CONFIG_VP9_METADATA_BSF 0 ++#define CONFIG_VP9_RAW_REORDER_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 0 ++#define CONFIG_AASC_DECODER 0 ++#define CONFIG_AIC_DECODER 0 ++#define CONFIG_ALIAS_PIX_DECODER 0 ++#define CONFIG_AGM_DECODER 0 ++#define CONFIG_AMV_DECODER 0 ++#define CONFIG_ANM_DECODER 0 ++#define CONFIG_ANSI_DECODER 0 ++#define CONFIG_APNG_DECODER 0 ++#define CONFIG_ARBC_DECODER 0 ++#define CONFIG_ASV1_DECODER 0 ++#define CONFIG_ASV2_DECODER 0 ++#define CONFIG_AURA_DECODER 0 ++#define CONFIG_AURA2_DECODER 0 ++#define CONFIG_AVRP_DECODER 0 ++#define CONFIG_AVRN_DECODER 0 ++#define CONFIG_AVS_DECODER 0 ++#define CONFIG_AVUI_DECODER 0 ++#define CONFIG_AYUV_DECODER 0 ++#define CONFIG_BETHSOFTVID_DECODER 0 ++#define CONFIG_BFI_DECODER 0 ++#define CONFIG_BINK_DECODER 0 ++#define CONFIG_BITPACKED_DECODER 0 ++#define CONFIG_BMP_DECODER 0 ++#define CONFIG_BMV_VIDEO_DECODER 0 ++#define CONFIG_BRENDER_PIX_DECODER 0 ++#define CONFIG_C93_DECODER 0 ++#define CONFIG_CAVS_DECODER 0 ++#define CONFIG_CDGRAPHICS_DECODER 0 ++#define CONFIG_CDTOONS_DECODER 0 ++#define CONFIG_CDXL_DECODER 0 ++#define CONFIG_CFHD_DECODER 0 ++#define CONFIG_CINEPAK_DECODER 0 ++#define CONFIG_CLEARVIDEO_DECODER 0 ++#define CONFIG_CLJR_DECODER 0 ++#define CONFIG_CLLC_DECODER 0 ++#define CONFIG_COMFORTNOISE_DECODER 0 ++#define CONFIG_CPIA_DECODER 0 ++#define CONFIG_CSCD_DECODER 0 ++#define CONFIG_CYUV_DECODER 0 ++#define CONFIG_DDS_DECODER 0 ++#define CONFIG_DFA_DECODER 0 ++#define CONFIG_DIRAC_DECODER 0 ++#define CONFIG_DNXHD_DECODER 0 ++#define CONFIG_DPX_DECODER 0 ++#define CONFIG_DSICINVIDEO_DECODER 0 ++#define CONFIG_DVAUDIO_DECODER 0 ++#define CONFIG_DVVIDEO_DECODER 0 ++#define CONFIG_DXA_DECODER 0 ++#define CONFIG_DXTORY_DECODER 0 ++#define CONFIG_DXV_DECODER 0 ++#define CONFIG_EACMV_DECODER 0 ++#define CONFIG_EAMAD_DECODER 0 ++#define CONFIG_EATGQ_DECODER 0 ++#define CONFIG_EATGV_DECODER 0 ++#define CONFIG_EATQI_DECODER 0 ++#define CONFIG_EIGHTBPS_DECODER 0 ++#define CONFIG_EIGHTSVX_EXP_DECODER 0 ++#define CONFIG_EIGHTSVX_FIB_DECODER 0 ++#define CONFIG_ESCAPE124_DECODER 0 ++#define CONFIG_ESCAPE130_DECODER 0 ++#define CONFIG_EXR_DECODER 0 ++#define CONFIG_FFV1_DECODER 0 ++#define CONFIG_FFVHUFF_DECODER 0 ++#define CONFIG_FIC_DECODER 0 ++#define CONFIG_FITS_DECODER 0 ++#define CONFIG_FLASHSV_DECODER 0 ++#define CONFIG_FLASHSV2_DECODER 0 ++#define CONFIG_FLIC_DECODER 0 ++#define CONFIG_FLV_DECODER 0 ++#define CONFIG_FMVC_DECODER 0 ++#define CONFIG_FOURXM_DECODER 0 ++#define CONFIG_FRAPS_DECODER 0 ++#define CONFIG_FRWU_DECODER 0 ++#define CONFIG_G2M_DECODER 0 ++#define CONFIG_GDV_DECODER 0 ++#define CONFIG_GIF_DECODER 0 ++#define CONFIG_H261_DECODER 0 ++#define CONFIG_H263_DECODER 1 ++#define CONFIG_H263I_DECODER 0 ++#define CONFIG_H263P_DECODER 0 ++#define CONFIG_H263_V4L2M2M_DECODER 0 ++#define CONFIG_H264_DECODER 1 ++#define CONFIG_H264_CRYSTALHD_DECODER 0 ++#define CONFIG_H264_V4L2M2M_DECODER 0 ++#define CONFIG_H264_MEDIACODEC_DECODER 0 ++#define CONFIG_H264_MMAL_DECODER 0 ++#define CONFIG_H264_QSV_DECODER 0 ++#define CONFIG_H264_RKMPP_DECODER 0 ++#define CONFIG_HAP_DECODER 0 ++#define CONFIG_HEVC_DECODER 0 ++#define CONFIG_HEVC_QSV_DECODER 0 ++#define CONFIG_HEVC_RKMPP_DECODER 0 ++#define CONFIG_HEVC_V4L2M2M_DECODER 0 ++#define CONFIG_HNM4_VIDEO_DECODER 0 ++#define CONFIG_HQ_HQA_DECODER 0 ++#define CONFIG_HQX_DECODER 0 ++#define CONFIG_HUFFYUV_DECODER 0 ++#define CONFIG_HYMT_DECODER 0 ++#define CONFIG_IDCIN_DECODER 0 ++#define CONFIG_IFF_ILBM_DECODER 0 ++#define CONFIG_IMM4_DECODER 0 ++#define CONFIG_IMM5_DECODER 0 ++#define CONFIG_INDEO2_DECODER 0 ++#define CONFIG_INDEO3_DECODER 0 ++#define CONFIG_INDEO4_DECODER 0 ++#define CONFIG_INDEO5_DECODER 0 ++#define CONFIG_INTERPLAY_VIDEO_DECODER 0 ++#define CONFIG_JPEG2000_DECODER 0 ++#define CONFIG_JPEGLS_DECODER 0 ++#define CONFIG_JV_DECODER 0 ++#define CONFIG_KGV1_DECODER 0 ++#define CONFIG_KMVC_DECODER 0 ++#define CONFIG_LAGARITH_DECODER 0 ++#define CONFIG_LOCO_DECODER 0 ++#define CONFIG_LSCR_DECODER 0 ++#define CONFIG_M101_DECODER 0 ++#define CONFIG_MAGICYUV_DECODER 0 ++#define CONFIG_MDEC_DECODER 0 ++#define CONFIG_MIMIC_DECODER 0 ++#define CONFIG_MJPEG_DECODER 0 ++#define CONFIG_MJPEGB_DECODER 0 ++#define CONFIG_MMVIDEO_DECODER 0 ++#define CONFIG_MOTIONPIXELS_DECODER 0 ++#define CONFIG_MPEG1VIDEO_DECODER 0 ++#define CONFIG_MPEG2VIDEO_DECODER 0 ++#define CONFIG_MPEG4_DECODER 1 ++#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG4_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG4_MMAL_DECODER 0 ++#define CONFIG_MPEGVIDEO_DECODER 0 ++#define CONFIG_MPEG1_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_MMAL_DECODER 0 ++#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG2_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_QSV_DECODER 0 ++#define CONFIG_MPEG2_MEDIACODEC_DECODER 0 ++#define CONFIG_MSA1_DECODER 0 ++#define CONFIG_MSCC_DECODER 0 ++#define CONFIG_MSMPEG4V1_DECODER 0 ++#define CONFIG_MSMPEG4V2_DECODER 0 ++#define CONFIG_MSMPEG4V3_DECODER 0 ++#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MSRLE_DECODER 0 ++#define CONFIG_MSS1_DECODER 0 ++#define CONFIG_MSS2_DECODER 0 ++#define CONFIG_MSVIDEO1_DECODER 0 ++#define CONFIG_MSZH_DECODER 0 ++#define CONFIG_MTS2_DECODER 0 ++#define CONFIG_MV30_DECODER 0 ++#define CONFIG_MVC1_DECODER 0 ++#define CONFIG_MVC2_DECODER 0 ++#define CONFIG_MVDV_DECODER 0 ++#define CONFIG_MVHA_DECODER 0 ++#define CONFIG_MWSC_DECODER 0 ++#define CONFIG_MXPEG_DECODER 0 ++#define CONFIG_NUV_DECODER 0 ++#define CONFIG_PAF_VIDEO_DECODER 0 ++#define CONFIG_PAM_DECODER 0 ++#define CONFIG_PBM_DECODER 0 ++#define CONFIG_PCX_DECODER 0 ++#define CONFIG_PGM_DECODER 0 ++#define CONFIG_PGMYUV_DECODER 0 ++#define CONFIG_PICTOR_DECODER 0 ++#define CONFIG_PIXLET_DECODER 0 ++#define CONFIG_PNG_DECODER 0 ++#define CONFIG_PPM_DECODER 0 ++#define CONFIG_PRORES_DECODER 0 ++#define CONFIG_PROSUMER_DECODER 0 ++#define CONFIG_PSD_DECODER 0 ++#define CONFIG_PTX_DECODER 0 ++#define CONFIG_QDRAW_DECODER 0 ++#define CONFIG_QPEG_DECODER 0 ++#define CONFIG_QTRLE_DECODER 0 ++#define CONFIG_R10K_DECODER 0 ++#define CONFIG_R210_DECODER 0 ++#define CONFIG_RASC_DECODER 0 ++#define CONFIG_RAWVIDEO_DECODER 0 ++#define CONFIG_RL2_DECODER 0 ++#define CONFIG_ROQ_DECODER 0 ++#define CONFIG_RPZA_DECODER 0 ++#define CONFIG_RSCC_DECODER 0 ++#define CONFIG_RV10_DECODER 0 ++#define CONFIG_RV20_DECODER 0 ++#define CONFIG_RV30_DECODER 0 ++#define CONFIG_RV40_DECODER 0 ++#define CONFIG_S302M_DECODER 0 ++#define CONFIG_SANM_DECODER 0 ++#define CONFIG_SCPR_DECODER 0 ++#define CONFIG_SCREENPRESSO_DECODER 0 ++#define CONFIG_SGI_DECODER 0 ++#define CONFIG_SGIRLE_DECODER 0 ++#define CONFIG_SHEERVIDEO_DECODER 0 ++#define CONFIG_SMACKER_DECODER 0 ++#define CONFIG_SMC_DECODER 0 ++#define CONFIG_SMVJPEG_DECODER 0 ++#define CONFIG_SNOW_DECODER 0 ++#define CONFIG_SP5X_DECODER 0 ++#define CONFIG_SPEEDHQ_DECODER 0 ++#define CONFIG_SRGC_DECODER 0 ++#define CONFIG_SUNRAST_DECODER 0 ++#define CONFIG_SVQ1_DECODER 0 ++#define CONFIG_SVQ3_DECODER 0 ++#define CONFIG_TARGA_DECODER 0 ++#define CONFIG_TARGA_Y216_DECODER 0 ++#define CONFIG_TDSC_DECODER 0 ++#define CONFIG_THEORA_DECODER 1 ++#define CONFIG_THP_DECODER 0 ++#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 ++#define CONFIG_TIFF_DECODER 0 ++#define CONFIG_TMV_DECODER 0 ++#define CONFIG_TRUEMOTION1_DECODER 0 ++#define CONFIG_TRUEMOTION2_DECODER 0 ++#define CONFIG_TRUEMOTION2RT_DECODER 0 ++#define CONFIG_TSCC_DECODER 0 ++#define CONFIG_TSCC2_DECODER 0 ++#define CONFIG_TXD_DECODER 0 ++#define CONFIG_ULTI_DECODER 0 ++#define CONFIG_UTVIDEO_DECODER 0 ++#define CONFIG_V210_DECODER 0 ++#define CONFIG_V210X_DECODER 0 ++#define CONFIG_V308_DECODER 0 ++#define CONFIG_V408_DECODER 0 ++#define CONFIG_V410_DECODER 0 ++#define CONFIG_VB_DECODER 0 ++#define CONFIG_VBLE_DECODER 0 ++#define CONFIG_VC1_DECODER 0 ++#define CONFIG_VC1_CRYSTALHD_DECODER 0 ++#define CONFIG_VC1IMAGE_DECODER 0 ++#define CONFIG_VC1_MMAL_DECODER 0 ++#define CONFIG_VC1_QSV_DECODER 0 ++#define CONFIG_VC1_V4L2M2M_DECODER 0 ++#define CONFIG_VCR1_DECODER 0 ++#define CONFIG_VMDVIDEO_DECODER 0 ++#define CONFIG_VMNC_DECODER 0 ++#define CONFIG_VP3_DECODER 1 ++#define CONFIG_VP4_DECODER 0 ++#define CONFIG_VP5_DECODER 0 ++#define CONFIG_VP6_DECODER 0 ++#define CONFIG_VP6A_DECODER 0 ++#define CONFIG_VP6F_DECODER 0 ++#define CONFIG_VP7_DECODER 0 ++#define CONFIG_VP8_DECODER 1 ++#define CONFIG_VP8_RKMPP_DECODER 0 ++#define CONFIG_VP8_V4L2M2M_DECODER 0 ++#define CONFIG_VP9_DECODER 0 ++#define CONFIG_VP9_RKMPP_DECODER 0 ++#define CONFIG_VP9_V4L2M2M_DECODER 0 ++#define CONFIG_VQA_DECODER 0 ++#define CONFIG_WEBP_DECODER 0 ++#define CONFIG_WCMV_DECODER 0 ++#define CONFIG_WRAPPED_AVFRAME_DECODER 0 ++#define CONFIG_WMV1_DECODER 0 ++#define CONFIG_WMV2_DECODER 0 ++#define CONFIG_WMV3_DECODER 0 ++#define CONFIG_WMV3_CRYSTALHD_DECODER 0 ++#define CONFIG_WMV3IMAGE_DECODER 0 ++#define CONFIG_WNV1_DECODER 0 ++#define CONFIG_XAN_WC3_DECODER 0 ++#define CONFIG_XAN_WC4_DECODER 0 ++#define CONFIG_XBM_DECODER 0 ++#define CONFIG_XFACE_DECODER 0 ++#define CONFIG_XL_DECODER 0 ++#define CONFIG_XPM_DECODER 0 ++#define CONFIG_XWD_DECODER 0 ++#define CONFIG_Y41P_DECODER 0 ++#define CONFIG_YLC_DECODER 0 ++#define CONFIG_YOP_DECODER 0 ++#define CONFIG_YUV4_DECODER 0 ++#define CONFIG_ZERO12V_DECODER 0 ++#define CONFIG_ZEROCODEC_DECODER 0 ++#define CONFIG_ZLIB_DECODER 0 ++#define CONFIG_ZMBV_DECODER 0 ++#define CONFIG_AAC_DECODER 1 ++#define CONFIG_AAC_FIXED_DECODER 0 ++#define CONFIG_AAC_LATM_DECODER 0 ++#define CONFIG_AC3_DECODER 0 ++#define CONFIG_AC3_FIXED_DECODER 0 ++#define CONFIG_ACELP_KELVIN_DECODER 0 ++#define CONFIG_ALAC_DECODER 0 ++#define CONFIG_ALS_DECODER 0 ++#define CONFIG_AMRNB_DECODER 1 ++#define CONFIG_AMRWB_DECODER 1 ++#define CONFIG_APE_DECODER 0 ++#define CONFIG_APTX_DECODER 0 ++#define CONFIG_APTX_HD_DECODER 0 ++#define CONFIG_ATRAC1_DECODER 0 ++#define CONFIG_ATRAC3_DECODER 0 ++#define CONFIG_ATRAC3AL_DECODER 0 ++#define CONFIG_ATRAC3P_DECODER 0 ++#define CONFIG_ATRAC3PAL_DECODER 0 ++#define CONFIG_ATRAC9_DECODER 0 ++#define CONFIG_BINKAUDIO_DCT_DECODER 0 ++#define CONFIG_BINKAUDIO_RDFT_DECODER 0 ++#define CONFIG_BMV_AUDIO_DECODER 0 ++#define CONFIG_COOK_DECODER 0 ++#define CONFIG_DCA_DECODER 0 ++#define CONFIG_DOLBY_E_DECODER 0 ++#define CONFIG_DSD_LSBF_DECODER 0 ++#define CONFIG_DSD_MSBF_DECODER 0 ++#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 ++#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 ++#define CONFIG_DSICINAUDIO_DECODER 0 ++#define CONFIG_DSS_SP_DECODER 0 ++#define CONFIG_DST_DECODER 0 ++#define CONFIG_EAC3_DECODER 0 ++#define CONFIG_EVRC_DECODER 0 ++#define CONFIG_FFWAVESYNTH_DECODER 0 ++#define CONFIG_FLAC_DECODER 1 ++#define CONFIG_G723_1_DECODER 0 ++#define CONFIG_G729_DECODER 0 ++#define CONFIG_GSM_DECODER 0 ++#define CONFIG_GSM_MS_DECODER 1 ++#define CONFIG_HCA_DECODER 0 ++#define CONFIG_HCOM_DECODER 0 ++#define CONFIG_IAC_DECODER 0 ++#define CONFIG_ILBC_DECODER 0 ++#define CONFIG_IMC_DECODER 0 ++#define CONFIG_INTERPLAY_ACM_DECODER 0 ++#define CONFIG_MACE3_DECODER 0 ++#define CONFIG_MACE6_DECODER 0 ++#define CONFIG_METASOUND_DECODER 0 ++#define CONFIG_MLP_DECODER 0 ++#define CONFIG_MP1_DECODER 0 ++#define CONFIG_MP1FLOAT_DECODER 0 ++#define CONFIG_MP2_DECODER 0 ++#define CONFIG_MP2FLOAT_DECODER 0 ++#define CONFIG_MP3FLOAT_DECODER 0 ++#define CONFIG_MP3_DECODER 1 ++#define CONFIG_MP3ADUFLOAT_DECODER 0 ++#define CONFIG_MP3ADU_DECODER 0 ++#define CONFIG_MP3ON4FLOAT_DECODER 0 ++#define CONFIG_MP3ON4_DECODER 0 ++#define CONFIG_MPC7_DECODER 0 ++#define CONFIG_MPC8_DECODER 0 ++#define CONFIG_NELLYMOSER_DECODER 0 ++#define CONFIG_ON2AVC_DECODER 0 ++#define CONFIG_OPUS_DECODER 0 ++#define CONFIG_PAF_AUDIO_DECODER 0 ++#define CONFIG_QCELP_DECODER 0 ++#define CONFIG_QDM2_DECODER 0 ++#define CONFIG_QDMC_DECODER 0 ++#define CONFIG_RA_144_DECODER 0 ++#define CONFIG_RA_288_DECODER 0 ++#define CONFIG_RALF_DECODER 0 ++#define CONFIG_SBC_DECODER 0 ++#define CONFIG_SHORTEN_DECODER 0 ++#define CONFIG_SIPR_DECODER 0 ++#define CONFIG_SIREN_DECODER 0 ++#define CONFIG_SMACKAUD_DECODER 0 ++#define CONFIG_SONIC_DECODER 0 ++#define CONFIG_TAK_DECODER 0 ++#define CONFIG_TRUEHD_DECODER 0 ++#define CONFIG_TRUESPEECH_DECODER 0 ++#define CONFIG_TTA_DECODER 0 ++#define CONFIG_TWINVQ_DECODER 0 ++#define CONFIG_VMDAUDIO_DECODER 0 ++#define CONFIG_VORBIS_DECODER 1 ++#define CONFIG_WAVPACK_DECODER 0 ++#define CONFIG_WMALOSSLESS_DECODER 0 ++#define CONFIG_WMAPRO_DECODER 0 ++#define CONFIG_WMAV1_DECODER 0 ++#define CONFIG_WMAV2_DECODER 0 ++#define CONFIG_WMAVOICE_DECODER 0 ++#define CONFIG_WS_SND1_DECODER 0 ++#define CONFIG_XMA1_DECODER 0 ++#define CONFIG_XMA2_DECODER 0 ++#define CONFIG_PCM_ALAW_DECODER 1 ++#define CONFIG_PCM_BLURAY_DECODER 0 ++#define CONFIG_PCM_DVD_DECODER 0 ++#define CONFIG_PCM_F16LE_DECODER 0 ++#define CONFIG_PCM_F24LE_DECODER 0 ++#define CONFIG_PCM_F32BE_DECODER 0 ++#define CONFIG_PCM_F32LE_DECODER 1 ++#define CONFIG_PCM_F64BE_DECODER 0 ++#define CONFIG_PCM_F64LE_DECODER 0 ++#define CONFIG_PCM_LXF_DECODER 0 ++#define CONFIG_PCM_MULAW_DECODER 1 ++#define CONFIG_PCM_S8_DECODER 0 ++#define CONFIG_PCM_S8_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16BE_DECODER 1 ++#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16LE_DECODER 1 ++#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S24BE_DECODER 1 ++#define CONFIG_PCM_S24DAUD_DECODER 0 ++#define CONFIG_PCM_S24LE_DECODER 1 ++#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S32BE_DECODER 0 ++#define CONFIG_PCM_S32LE_DECODER 1 ++#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S64BE_DECODER 0 ++#define CONFIG_PCM_S64LE_DECODER 0 ++#define CONFIG_PCM_U8_DECODER 1 ++#define CONFIG_PCM_U16BE_DECODER 0 ++#define CONFIG_PCM_U16LE_DECODER 0 ++#define CONFIG_PCM_U24BE_DECODER 0 ++#define CONFIG_PCM_U24LE_DECODER 0 ++#define CONFIG_PCM_U32BE_DECODER 0 ++#define CONFIG_PCM_U32LE_DECODER 0 ++#define CONFIG_PCM_VIDC_DECODER 0 ++#define CONFIG_DERF_DPCM_DECODER 0 ++#define CONFIG_GREMLIN_DPCM_DECODER 0 ++#define CONFIG_INTERPLAY_DPCM_DECODER 0 ++#define CONFIG_ROQ_DPCM_DECODER 0 ++#define CONFIG_SDX2_DPCM_DECODER 0 ++#define CONFIG_SOL_DPCM_DECODER 0 ++#define CONFIG_XAN_DPCM_DECODER 0 ++#define CONFIG_ADPCM_4XM_DECODER 0 ++#define CONFIG_ADPCM_ADX_DECODER 0 ++#define CONFIG_ADPCM_AFC_DECODER 0 ++#define CONFIG_ADPCM_AGM_DECODER 0 ++#define CONFIG_ADPCM_AICA_DECODER 0 ++#define CONFIG_ADPCM_ARGO_DECODER 0 ++#define CONFIG_ADPCM_CT_DECODER 0 ++#define CONFIG_ADPCM_DTK_DECODER 0 ++#define CONFIG_ADPCM_EA_DECODER 0 ++#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 ++#define CONFIG_ADPCM_EA_R1_DECODER 0 ++#define CONFIG_ADPCM_EA_R2_DECODER 0 ++#define CONFIG_ADPCM_EA_R3_DECODER 0 ++#define CONFIG_ADPCM_EA_XAS_DECODER 0 ++#define CONFIG_ADPCM_G722_DECODER 0 ++#define CONFIG_ADPCM_G726_DECODER 0 ++#define CONFIG_ADPCM_G726LE_DECODER 0 ++#define CONFIG_ADPCM_IMA_AMV_DECODER 0 ++#define CONFIG_ADPCM_IMA_ALP_DECODER 0 ++#define CONFIG_ADPCM_IMA_APC_DECODER 0 ++#define CONFIG_ADPCM_IMA_APM_DECODER 0 ++#define CONFIG_ADPCM_IMA_CUNNING_DECODER 0 ++#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK3_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK4_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_ISS_DECODER 0 ++#define CONFIG_ADPCM_IMA_MTF_DECODER 0 ++#define CONFIG_ADPCM_IMA_OKI_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_DECODER 0 ++#define CONFIG_ADPCM_IMA_RAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_SSI_DECODER 0 ++#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 ++#define CONFIG_ADPCM_IMA_WAV_DECODER 0 ++#define CONFIG_ADPCM_IMA_WS_DECODER 0 ++#define CONFIG_ADPCM_MS_DECODER 0 ++#define CONFIG_ADPCM_MTAF_DECODER 0 ++#define CONFIG_ADPCM_PSX_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_2_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_3_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_4_DECODER 0 ++#define CONFIG_ADPCM_SWF_DECODER 0 ++#define CONFIG_ADPCM_THP_DECODER 0 ++#define CONFIG_ADPCM_THP_LE_DECODER 0 ++#define CONFIG_ADPCM_VIMA_DECODER 0 ++#define CONFIG_ADPCM_XA_DECODER 0 ++#define CONFIG_ADPCM_YAMAHA_DECODER 0 ++#define CONFIG_ADPCM_ZORK_DECODER 0 ++#define CONFIG_SSA_DECODER 0 ++#define CONFIG_ASS_DECODER 0 ++#define CONFIG_CCAPTION_DECODER 0 ++#define CONFIG_DVBSUB_DECODER 0 ++#define CONFIG_DVDSUB_DECODER 0 ++#define CONFIG_JACOSUB_DECODER 0 ++#define CONFIG_MICRODVD_DECODER 0 ++#define CONFIG_MOVTEXT_DECODER 0 ++#define CONFIG_MPL2_DECODER 0 ++#define CONFIG_PGSSUB_DECODER 0 ++#define CONFIG_PJS_DECODER 0 ++#define CONFIG_REALTEXT_DECODER 0 ++#define CONFIG_SAMI_DECODER 0 ++#define CONFIG_SRT_DECODER 0 ++#define CONFIG_STL_DECODER 0 ++#define CONFIG_SUBRIP_DECODER 0 ++#define CONFIG_SUBVIEWER_DECODER 0 ++#define CONFIG_SUBVIEWER1_DECODER 0 ++#define CONFIG_TEXT_DECODER 0 ++#define CONFIG_VPLAYER_DECODER 0 ++#define CONFIG_WEBVTT_DECODER 0 ++#define CONFIG_XSUB_DECODER 0 ++#define CONFIG_AAC_AT_DECODER 0 ++#define CONFIG_AC3_AT_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 ++#define CONFIG_ALAC_AT_DECODER 0 ++#define CONFIG_AMR_NB_AT_DECODER 0 ++#define CONFIG_EAC3_AT_DECODER 0 ++#define CONFIG_GSM_MS_AT_DECODER 0 ++#define CONFIG_ILBC_AT_DECODER 0 ++#define CONFIG_MP1_AT_DECODER 0 ++#define CONFIG_MP2_AT_DECODER 0 ++#define CONFIG_MP3_AT_DECODER 0 ++#define CONFIG_PCM_ALAW_AT_DECODER 0 ++#define CONFIG_PCM_MULAW_AT_DECODER 0 ++#define CONFIG_QDMC_AT_DECODER 0 ++#define CONFIG_QDM2_AT_DECODER 0 ++#define CONFIG_LIBARIBB24_DECODER 0 ++#define CONFIG_LIBCELT_DECODER 0 ++#define CONFIG_LIBCODEC2_DECODER 0 ++#define CONFIG_LIBDAV1D_DECODER 0 ++#define CONFIG_LIBDAVS2_DECODER 0 ++#define CONFIG_LIBFDK_AAC_DECODER 0 ++#define CONFIG_LIBGSM_DECODER 0 ++#define CONFIG_LIBGSM_MS_DECODER 0 ++#define CONFIG_LIBILBC_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 ++#define CONFIG_LIBOPENJPEG_DECODER 0 ++#define CONFIG_LIBOPUS_DECODER 1 ++#define CONFIG_LIBRSVG_DECODER 0 ++#define CONFIG_LIBSPEEX_DECODER 0 ++#define CONFIG_LIBVORBIS_DECODER 0 ++#define CONFIG_LIBVPX_VP8_DECODER 0 ++#define CONFIG_LIBVPX_VP9_DECODER 0 ++#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 ++#define CONFIG_BINTEXT_DECODER 0 ++#define CONFIG_XBIN_DECODER 0 ++#define CONFIG_IDF_DECODER 0 ++#define CONFIG_LIBAOM_AV1_DECODER 0 ++#define CONFIG_LIBOPENH264_DECODER 0 ++#define CONFIG_H264_CUVID_DECODER 0 ++#define CONFIG_HEVC_CUVID_DECODER 0 ++#define CONFIG_HEVC_MEDIACODEC_DECODER 0 ++#define CONFIG_MJPEG_CUVID_DECODER 0 ++#define CONFIG_MJPEG_QSV_DECODER 0 ++#define CONFIG_MPEG1_CUVID_DECODER 0 ++#define CONFIG_MPEG2_CUVID_DECODER 0 ++#define CONFIG_MPEG4_CUVID_DECODER 0 ++#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 ++#define CONFIG_VC1_CUVID_DECODER 0 ++#define CONFIG_VP8_CUVID_DECODER 0 ++#define CONFIG_VP8_MEDIACODEC_DECODER 0 ++#define CONFIG_VP8_QSV_DECODER 0 ++#define CONFIG_VP9_CUVID_DECODER 0 ++#define CONFIG_VP9_MEDIACODEC_DECODER 0 ++#define CONFIG_VP9_QSV_DECODER 0 ++#define CONFIG_A64MULTI_ENCODER 0 ++#define CONFIG_A64MULTI5_ENCODER 0 ++#define CONFIG_ALIAS_PIX_ENCODER 0 ++#define CONFIG_AMV_ENCODER 0 ++#define CONFIG_APNG_ENCODER 0 ++#define CONFIG_ASV1_ENCODER 0 ++#define CONFIG_ASV2_ENCODER 0 ++#define CONFIG_AVRP_ENCODER 0 ++#define CONFIG_AVUI_ENCODER 0 ++#define CONFIG_AYUV_ENCODER 0 ++#define CONFIG_BMP_ENCODER 0 ++#define CONFIG_CINEPAK_ENCODER 0 ++#define CONFIG_CLJR_ENCODER 0 ++#define CONFIG_COMFORTNOISE_ENCODER 0 ++#define CONFIG_DNXHD_ENCODER 0 ++#define CONFIG_DPX_ENCODER 0 ++#define CONFIG_DVVIDEO_ENCODER 0 ++#define CONFIG_FFV1_ENCODER 0 ++#define CONFIG_FFVHUFF_ENCODER 0 ++#define CONFIG_FITS_ENCODER 0 ++#define CONFIG_FLASHSV_ENCODER 0 ++#define CONFIG_FLASHSV2_ENCODER 0 ++#define CONFIG_FLV_ENCODER 0 ++#define CONFIG_GIF_ENCODER 0 ++#define CONFIG_H261_ENCODER 0 ++#define CONFIG_H263_ENCODER 0 ++#define CONFIG_H263P_ENCODER 0 ++#define CONFIG_HAP_ENCODER 0 ++#define CONFIG_HUFFYUV_ENCODER 0 ++#define CONFIG_JPEG2000_ENCODER 0 ++#define CONFIG_JPEGLS_ENCODER 0 ++#define CONFIG_LJPEG_ENCODER 0 ++#define CONFIG_MAGICYUV_ENCODER 0 ++#define CONFIG_MJPEG_ENCODER 0 ++#define CONFIG_MPEG1VIDEO_ENCODER 0 ++#define CONFIG_MPEG2VIDEO_ENCODER 0 ++#define CONFIG_MPEG4_ENCODER 0 ++#define CONFIG_MSMPEG4V2_ENCODER 0 ++#define CONFIG_MSMPEG4V3_ENCODER 0 ++#define CONFIG_MSVIDEO1_ENCODER 0 ++#define CONFIG_PAM_ENCODER 0 ++#define CONFIG_PBM_ENCODER 0 ++#define CONFIG_PCX_ENCODER 0 ++#define CONFIG_PGM_ENCODER 0 ++#define CONFIG_PGMYUV_ENCODER 0 ++#define CONFIG_PNG_ENCODER 0 ++#define CONFIG_PPM_ENCODER 0 ++#define CONFIG_PRORES_ENCODER 0 ++#define CONFIG_PRORES_AW_ENCODER 0 ++#define CONFIG_PRORES_KS_ENCODER 0 ++#define CONFIG_QTRLE_ENCODER 0 ++#define CONFIG_R10K_ENCODER 0 ++#define CONFIG_R210_ENCODER 0 ++#define CONFIG_RAWVIDEO_ENCODER 0 ++#define CONFIG_ROQ_ENCODER 0 ++#define CONFIG_RV10_ENCODER 0 ++#define CONFIG_RV20_ENCODER 0 ++#define CONFIG_S302M_ENCODER 0 ++#define CONFIG_SGI_ENCODER 0 ++#define CONFIG_SNOW_ENCODER 0 ++#define CONFIG_SUNRAST_ENCODER 0 ++#define CONFIG_SVQ1_ENCODER 0 ++#define CONFIG_TARGA_ENCODER 0 ++#define CONFIG_TIFF_ENCODER 0 ++#define CONFIG_UTVIDEO_ENCODER 0 ++#define CONFIG_V210_ENCODER 0 ++#define CONFIG_V308_ENCODER 0 ++#define CONFIG_V408_ENCODER 0 ++#define CONFIG_V410_ENCODER 0 ++#define CONFIG_VC2_ENCODER 0 ++#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 ++#define CONFIG_WMV1_ENCODER 0 ++#define CONFIG_WMV2_ENCODER 0 ++#define CONFIG_XBM_ENCODER 0 ++#define CONFIG_XFACE_ENCODER 0 ++#define CONFIG_XWD_ENCODER 0 ++#define CONFIG_Y41P_ENCODER 0 ++#define CONFIG_YUV4_ENCODER 0 ++#define CONFIG_ZLIB_ENCODER 0 ++#define CONFIG_ZMBV_ENCODER 0 ++#define CONFIG_AAC_ENCODER 0 ++#define CONFIG_AC3_ENCODER 0 ++#define CONFIG_AC3_FIXED_ENCODER 0 ++#define CONFIG_ALAC_ENCODER 0 ++#define CONFIG_APTX_ENCODER 0 ++#define CONFIG_APTX_HD_ENCODER 0 ++#define CONFIG_DCA_ENCODER 0 ++#define CONFIG_EAC3_ENCODER 0 ++#define CONFIG_FLAC_ENCODER 0 ++#define CONFIG_G723_1_ENCODER 0 ++#define CONFIG_MLP_ENCODER 0 ++#define CONFIG_MP2_ENCODER 0 ++#define CONFIG_MP2FIXED_ENCODER 0 ++#define CONFIG_NELLYMOSER_ENCODER 0 ++#define CONFIG_OPUS_ENCODER 0 ++#define CONFIG_RA_144_ENCODER 0 ++#define CONFIG_SBC_ENCODER 0 ++#define CONFIG_SONIC_ENCODER 0 ++#define CONFIG_SONIC_LS_ENCODER 0 ++#define CONFIG_TRUEHD_ENCODER 0 ++#define CONFIG_TTA_ENCODER 0 ++#define CONFIG_VORBIS_ENCODER 0 ++#define CONFIG_WAVPACK_ENCODER 0 ++#define CONFIG_WMAV1_ENCODER 0 ++#define CONFIG_WMAV2_ENCODER 0 ++#define CONFIG_PCM_ALAW_ENCODER 0 ++#define CONFIG_PCM_DVD_ENCODER 0 ++#define CONFIG_PCM_F32BE_ENCODER 0 ++#define CONFIG_PCM_F32LE_ENCODER 0 ++#define CONFIG_PCM_F64BE_ENCODER 0 ++#define CONFIG_PCM_F64LE_ENCODER 0 ++#define CONFIG_PCM_MULAW_ENCODER 0 ++#define CONFIG_PCM_S8_ENCODER 0 ++#define CONFIG_PCM_S8_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16BE_ENCODER 0 ++#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16LE_ENCODER 0 ++#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S24BE_ENCODER 0 ++#define CONFIG_PCM_S24DAUD_ENCODER 0 ++#define CONFIG_PCM_S24LE_ENCODER 0 ++#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S32BE_ENCODER 0 ++#define CONFIG_PCM_S32LE_ENCODER 0 ++#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S64BE_ENCODER 0 ++#define CONFIG_PCM_S64LE_ENCODER 0 ++#define CONFIG_PCM_U8_ENCODER 0 ++#define CONFIG_PCM_U16BE_ENCODER 0 ++#define CONFIG_PCM_U16LE_ENCODER 0 ++#define CONFIG_PCM_U24BE_ENCODER 0 ++#define CONFIG_PCM_U24LE_ENCODER 0 ++#define CONFIG_PCM_U32BE_ENCODER 0 ++#define CONFIG_PCM_U32LE_ENCODER 0 ++#define CONFIG_PCM_VIDC_ENCODER 0 ++#define CONFIG_ROQ_DPCM_ENCODER 0 ++#define CONFIG_ADPCM_ADX_ENCODER 0 ++#define CONFIG_ADPCM_G722_ENCODER 0 ++#define CONFIG_ADPCM_G726_ENCODER 0 ++#define CONFIG_ADPCM_G726LE_ENCODER 0 ++#define CONFIG_ADPCM_IMA_QT_ENCODER 0 ++#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 ++#define CONFIG_ADPCM_MS_ENCODER 0 ++#define CONFIG_ADPCM_SWF_ENCODER 0 ++#define CONFIG_ADPCM_YAMAHA_ENCODER 0 ++#define CONFIG_SSA_ENCODER 0 ++#define CONFIG_ASS_ENCODER 0 ++#define CONFIG_DVBSUB_ENCODER 0 ++#define CONFIG_DVDSUB_ENCODER 0 ++#define CONFIG_MOVTEXT_ENCODER 0 ++#define CONFIG_SRT_ENCODER 0 ++#define CONFIG_SUBRIP_ENCODER 0 ++#define CONFIG_TEXT_ENCODER 0 ++#define CONFIG_WEBVTT_ENCODER 0 ++#define CONFIG_XSUB_ENCODER 0 ++#define CONFIG_AAC_AT_ENCODER 0 ++#define CONFIG_ALAC_AT_ENCODER 0 ++#define CONFIG_ILBC_AT_ENCODER 0 ++#define CONFIG_PCM_ALAW_AT_ENCODER 0 ++#define CONFIG_PCM_MULAW_AT_ENCODER 0 ++#define CONFIG_LIBAOM_AV1_ENCODER 0 ++#define CONFIG_LIBCODEC2_ENCODER 0 ++#define CONFIG_LIBFDK_AAC_ENCODER 0 ++#define CONFIG_LIBGSM_ENCODER 0 ++#define CONFIG_LIBGSM_MS_ENCODER 0 ++#define CONFIG_LIBILBC_ENCODER 0 ++#define CONFIG_LIBMP3LAME_ENCODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 ++#define CONFIG_LIBOPENJPEG_ENCODER 0 ++#define CONFIG_LIBOPUS_ENCODER 0 ++#define CONFIG_LIBRAV1E_ENCODER 0 ++#define CONFIG_LIBSHINE_ENCODER 0 ++#define CONFIG_LIBSPEEX_ENCODER 0 ++#define CONFIG_LIBTHEORA_ENCODER 0 ++#define CONFIG_LIBTWOLAME_ENCODER 0 ++#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 ++#define CONFIG_LIBVORBIS_ENCODER 0 ++#define CONFIG_LIBVPX_VP8_ENCODER 0 ++#define CONFIG_LIBVPX_VP9_ENCODER 0 ++#define CONFIG_LIBWAVPACK_ENCODER 0 ++#define CONFIG_LIBWEBP_ANIM_ENCODER 0 ++#define CONFIG_LIBWEBP_ENCODER 0 ++#define CONFIG_LIBX262_ENCODER 0 ++#define CONFIG_LIBX264_ENCODER 0 ++#define CONFIG_LIBX264RGB_ENCODER 0 ++#define CONFIG_LIBX265_ENCODER 0 ++#define CONFIG_LIBXAVS_ENCODER 0 ++#define CONFIG_LIBXAVS2_ENCODER 0 ++#define CONFIG_LIBXVID_ENCODER 0 ++#define CONFIG_H263_V4L2M2M_ENCODER 0 ++#define CONFIG_LIBOPENH264_ENCODER 0 ++#define CONFIG_H264_AMF_ENCODER 0 ++#define CONFIG_H264_NVENC_ENCODER 0 ++#define CONFIG_H264_OMX_ENCODER 0 ++#define CONFIG_H264_QSV_ENCODER 0 ++#define CONFIG_H264_V4L2M2M_ENCODER 0 ++#define CONFIG_H264_VAAPI_ENCODER 0 ++#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_NVENC_ENCODER 0 ++#define CONFIG_NVENC_H264_ENCODER 0 ++#define CONFIG_NVENC_HEVC_ENCODER 0 ++#define CONFIG_HEVC_AMF_ENCODER 0 ++#define CONFIG_HEVC_NVENC_ENCODER 0 ++#define CONFIG_HEVC_QSV_ENCODER 0 ++#define CONFIG_HEVC_V4L2M2M_ENCODER 0 ++#define CONFIG_HEVC_VAAPI_ENCODER 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_LIBKVAZAAR_ENCODER 0 ++#define CONFIG_MJPEG_QSV_ENCODER 0 ++#define CONFIG_MJPEG_VAAPI_ENCODER 0 ++#define CONFIG_MPEG2_QSV_ENCODER 0 ++#define CONFIG_MPEG2_VAAPI_ENCODER 0 ++#define CONFIG_MPEG4_OMX_ENCODER 0 ++#define CONFIG_MPEG4_V4L2M2M_ENCODER 0 ++#define CONFIG_VP8_V4L2M2M_ENCODER 0 ++#define CONFIG_VP8_VAAPI_ENCODER 0 ++#define CONFIG_VP9_VAAPI_ENCODER 0 ++#define CONFIG_VP9_QSV_ENCODER 0 ++#define CONFIG_H263_VAAPI_HWACCEL 0 ++#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_H264_D3D11VA_HWACCEL 0 ++#define CONFIG_H264_D3D11VA2_HWACCEL 0 ++#define CONFIG_H264_DXVA2_HWACCEL 0 ++#define CONFIG_H264_NVDEC_HWACCEL 0 ++#define CONFIG_H264_VAAPI_HWACCEL 0 ++#define CONFIG_H264_VDPAU_HWACCEL 0 ++#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA2_HWACCEL 0 ++#define CONFIG_HEVC_DXVA2_HWACCEL 0 ++#define CONFIG_HEVC_NVDEC_HWACCEL 0 ++#define CONFIG_HEVC_VAAPI_HWACCEL 0 ++#define CONFIG_HEVC_VDPAU_HWACCEL 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MJPEG_NVDEC_HWACCEL 0 ++#define CONFIG_MJPEG_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG1_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG1_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG1_XVMC_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA2_HWACCEL 0 ++#define CONFIG_MPEG2_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG2_DXVA2_HWACCEL 0 ++#define CONFIG_MPEG2_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG2_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG2_XVMC_HWACCEL 0 ++#define CONFIG_MPEG4_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG4_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG4_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA2_HWACCEL 0 ++#define CONFIG_VC1_DXVA2_HWACCEL 0 ++#define CONFIG_VC1_NVDEC_HWACCEL 0 ++#define CONFIG_VC1_VAAPI_HWACCEL 0 ++#define CONFIG_VC1_VDPAU_HWACCEL 0 ++#define CONFIG_VP8_NVDEC_HWACCEL 0 ++#define CONFIG_VP8_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA2_HWACCEL 0 ++#define CONFIG_VP9_DXVA2_HWACCEL 0 ++#define CONFIG_VP9_NVDEC_HWACCEL 0 ++#define CONFIG_VP9_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_VDPAU_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA2_HWACCEL 0 ++#define CONFIG_WMV3_DXVA2_HWACCEL 0 ++#define CONFIG_WMV3_NVDEC_HWACCEL 0 ++#define CONFIG_WMV3_VAAPI_HWACCEL 0 ++#define CONFIG_WMV3_VDPAU_HWACCEL 0 ++#define CONFIG_AAC_PARSER 1 ++#define CONFIG_AAC_LATM_PARSER 0 ++#define CONFIG_AC3_PARSER 0 ++#define CONFIG_ADX_PARSER 0 ++#define CONFIG_AV1_PARSER 0 ++#define CONFIG_AVS2_PARSER 0 ++#define CONFIG_BMP_PARSER 0 ++#define CONFIG_CAVSVIDEO_PARSER 0 ++#define CONFIG_COOK_PARSER 0 ++#define CONFIG_DCA_PARSER 0 ++#define CONFIG_DIRAC_PARSER 0 ++#define CONFIG_DNXHD_PARSER 0 ++#define CONFIG_DPX_PARSER 0 ++#define CONFIG_DVAUDIO_PARSER 0 ++#define CONFIG_DVBSUB_PARSER 0 ++#define CONFIG_DVDSUB_PARSER 0 ++#define CONFIG_DVD_NAV_PARSER 0 ++#define CONFIG_FLAC_PARSER 1 ++#define CONFIG_G723_1_PARSER 0 ++#define CONFIG_G729_PARSER 0 ++#define CONFIG_GIF_PARSER 0 ++#define CONFIG_GSM_PARSER 1 ++#define CONFIG_H261_PARSER 0 ++#define CONFIG_H263_PARSER 1 ++#define CONFIG_H264_PARSER 1 ++#define CONFIG_HEVC_PARSER 0 ++#define CONFIG_MJPEG_PARSER 0 ++#define CONFIG_MLP_PARSER 0 ++#define CONFIG_MPEG4VIDEO_PARSER 1 ++#define CONFIG_MPEGAUDIO_PARSER 1 ++#define CONFIG_MPEGVIDEO_PARSER 0 ++#define CONFIG_OPUS_PARSER 1 ++#define CONFIG_PNG_PARSER 0 ++#define CONFIG_PNM_PARSER 0 ++#define CONFIG_RV30_PARSER 0 ++#define CONFIG_RV40_PARSER 0 ++#define CONFIG_SBC_PARSER 0 ++#define CONFIG_SIPR_PARSER 0 ++#define CONFIG_TAK_PARSER 0 ++#define CONFIG_VC1_PARSER 0 ++#define CONFIG_VORBIS_PARSER 1 ++#define CONFIG_VP3_PARSER 1 ++#define CONFIG_VP8_PARSER 1 ++#define CONFIG_VP9_PARSER 1 ++#define CONFIG_WEBP_PARSER 0 ++#define CONFIG_XMA_PARSER 0 ++#define CONFIG_ALSA_INDEV 0 ++#define CONFIG_ANDROID_CAMERA_INDEV 0 ++#define CONFIG_AVFOUNDATION_INDEV 0 ++#define CONFIG_BKTR_INDEV 0 ++#define CONFIG_DECKLINK_INDEV 0 ++#define CONFIG_DSHOW_INDEV 0 ++#define CONFIG_FBDEV_INDEV 0 ++#define CONFIG_GDIGRAB_INDEV 0 ++#define CONFIG_IEC61883_INDEV 0 ++#define CONFIG_JACK_INDEV 0 ++#define CONFIG_KMSGRAB_INDEV 0 ++#define CONFIG_LAVFI_INDEV 0 ++#define CONFIG_OPENAL_INDEV 0 ++#define CONFIG_OSS_INDEV 0 ++#define CONFIG_PULSE_INDEV 0 ++#define CONFIG_SNDIO_INDEV 0 ++#define CONFIG_V4L2_INDEV 0 ++#define CONFIG_VFWCAP_INDEV 0 ++#define CONFIG_XCBGRAB_INDEV 0 ++#define CONFIG_LIBCDIO_INDEV 0 ++#define CONFIG_LIBDC1394_INDEV 0 ++#define CONFIG_ALSA_OUTDEV 0 ++#define CONFIG_CACA_OUTDEV 0 ++#define CONFIG_DECKLINK_OUTDEV 0 ++#define CONFIG_FBDEV_OUTDEV 0 ++#define CONFIG_OPENGL_OUTDEV 0 ++#define CONFIG_OSS_OUTDEV 0 ++#define CONFIG_PULSE_OUTDEV 0 ++#define CONFIG_SDL2_OUTDEV 0 ++#define CONFIG_SNDIO_OUTDEV 0 ++#define CONFIG_V4L2_OUTDEV 0 ++#define CONFIG_XV_OUTDEV 0 ++#define CONFIG_ABENCH_FILTER 0 ++#define CONFIG_ACOMPRESSOR_FILTER 0 ++#define CONFIG_ACONTRAST_FILTER 0 ++#define CONFIG_ACOPY_FILTER 0 ++#define CONFIG_ACUE_FILTER 0 ++#define CONFIG_ACROSSFADE_FILTER 0 ++#define CONFIG_ACROSSOVER_FILTER 0 ++#define CONFIG_ACRUSHER_FILTER 0 ++#define CONFIG_ADECLICK_FILTER 0 ++#define CONFIG_ADECLIP_FILTER 0 ++#define CONFIG_ADELAY_FILTER 0 ++#define CONFIG_ADERIVATIVE_FILTER 0 ++#define CONFIG_AECHO_FILTER 0 ++#define CONFIG_AEMPHASIS_FILTER 0 ++#define CONFIG_AEVAL_FILTER 0 ++#define CONFIG_AFADE_FILTER 0 ++#define CONFIG_AFFTDN_FILTER 0 ++#define CONFIG_AFFTFILT_FILTER 0 ++#define CONFIG_AFIR_FILTER 0 ++#define CONFIG_AFORMAT_FILTER 0 ++#define CONFIG_AGATE_FILTER 0 ++#define CONFIG_AIIR_FILTER 0 ++#define CONFIG_AINTEGRAL_FILTER 0 ++#define CONFIG_AINTERLEAVE_FILTER 0 ++#define CONFIG_ALIMITER_FILTER 0 ++#define CONFIG_ALLPASS_FILTER 0 ++#define CONFIG_ALOOP_FILTER 0 ++#define CONFIG_AMERGE_FILTER 0 ++#define CONFIG_AMETADATA_FILTER 0 ++#define CONFIG_AMIX_FILTER 0 ++#define CONFIG_AMULTIPLY_FILTER 0 ++#define CONFIG_ANEQUALIZER_FILTER 0 ++#define CONFIG_ANLMDN_FILTER 0 ++#define CONFIG_ANLMS_FILTER 0 ++#define CONFIG_ANULL_FILTER 0 ++#define CONFIG_APAD_FILTER 0 ++#define CONFIG_APERMS_FILTER 0 ++#define CONFIG_APHASER_FILTER 0 ++#define CONFIG_APULSATOR_FILTER 0 ++#define CONFIG_AREALTIME_FILTER 0 ++#define CONFIG_ARESAMPLE_FILTER 0 ++#define CONFIG_AREVERSE_FILTER 0 ++#define CONFIG_ARNNDN_FILTER 0 ++#define CONFIG_ASELECT_FILTER 0 ++#define CONFIG_ASENDCMD_FILTER 0 ++#define CONFIG_ASETNSAMPLES_FILTER 0 ++#define CONFIG_ASETPTS_FILTER 0 ++#define CONFIG_ASETRATE_FILTER 0 ++#define CONFIG_ASETTB_FILTER 0 ++#define CONFIG_ASHOWINFO_FILTER 0 ++#define CONFIG_ASIDEDATA_FILTER 0 ++#define CONFIG_ASOFTCLIP_FILTER 0 ++#define CONFIG_ASPLIT_FILTER 0 ++#define CONFIG_ASR_FILTER 0 ++#define CONFIG_ASTATS_FILTER 0 ++#define CONFIG_ASTREAMSELECT_FILTER 0 ++#define CONFIG_ATEMPO_FILTER 0 ++#define CONFIG_ATRIM_FILTER 0 ++#define CONFIG_AXCORRELATE_FILTER 0 ++#define CONFIG_AZMQ_FILTER 0 ++#define CONFIG_BANDPASS_FILTER 0 ++#define CONFIG_BANDREJECT_FILTER 0 ++#define CONFIG_BASS_FILTER 0 ++#define CONFIG_BIQUAD_FILTER 0 ++#define CONFIG_BS2B_FILTER 0 ++#define CONFIG_CHROMABER_VULKAN_FILTER 0 ++#define CONFIG_CHANNELMAP_FILTER 0 ++#define CONFIG_CHANNELSPLIT_FILTER 0 ++#define CONFIG_CHORUS_FILTER 0 ++#define CONFIG_COMPAND_FILTER 0 ++#define CONFIG_COMPENSATIONDELAY_FILTER 0 ++#define CONFIG_CROSSFEED_FILTER 0 ++#define CONFIG_CRYSTALIZER_FILTER 0 ++#define CONFIG_DCSHIFT_FILTER 0 ++#define CONFIG_DEESSER_FILTER 0 ++#define CONFIG_DRMETER_FILTER 0 ++#define CONFIG_DYNAUDNORM_FILTER 0 ++#define CONFIG_EARWAX_FILTER 0 ++#define CONFIG_EBUR128_FILTER 0 ++#define CONFIG_EQUALIZER_FILTER 0 ++#define CONFIG_EXTRASTEREO_FILTER 0 ++#define CONFIG_FIREQUALIZER_FILTER 0 ++#define CONFIG_FLANGER_FILTER 0 ++#define CONFIG_HAAS_FILTER 0 ++#define CONFIG_HDCD_FILTER 0 ++#define CONFIG_HEADPHONE_FILTER 0 ++#define CONFIG_HIGHPASS_FILTER 0 ++#define CONFIG_HIGHSHELF_FILTER 0 ++#define CONFIG_JOIN_FILTER 0 ++#define CONFIG_LADSPA_FILTER 0 ++#define CONFIG_LOUDNORM_FILTER 0 ++#define CONFIG_LOWPASS_FILTER 0 ++#define CONFIG_LOWSHELF_FILTER 0 ++#define CONFIG_LV2_FILTER 0 ++#define CONFIG_MCOMPAND_FILTER 0 ++#define CONFIG_PAN_FILTER 0 ++#define CONFIG_REPLAYGAIN_FILTER 0 ++#define CONFIG_RESAMPLE_FILTER 0 ++#define CONFIG_RUBBERBAND_FILTER 0 ++#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 ++#define CONFIG_SIDECHAINGATE_FILTER 0 ++#define CONFIG_SILENCEDETECT_FILTER 0 ++#define CONFIG_SILENCEREMOVE_FILTER 0 ++#define CONFIG_SOFALIZER_FILTER 0 ++#define CONFIG_STEREOTOOLS_FILTER 0 ++#define CONFIG_STEREOWIDEN_FILTER 0 ++#define CONFIG_SUPEREQUALIZER_FILTER 0 ++#define CONFIG_SURROUND_FILTER 0 ++#define CONFIG_TREBLE_FILTER 0 ++#define CONFIG_TREMOLO_FILTER 0 ++#define CONFIG_VIBRATO_FILTER 0 ++#define CONFIG_VOLUME_FILTER 0 ++#define CONFIG_VOLUMEDETECT_FILTER 0 ++#define CONFIG_AEVALSRC_FILTER 0 ++#define CONFIG_AFIRSRC_FILTER 0 ++#define CONFIG_ANOISESRC_FILTER 0 ++#define CONFIG_ANULLSRC_FILTER 0 ++#define CONFIG_FLITE_FILTER 0 ++#define CONFIG_HILBERT_FILTER 0 ++#define CONFIG_SINC_FILTER 0 ++#define CONFIG_SINE_FILTER 0 ++#define CONFIG_ANULLSINK_FILTER 0 ++#define CONFIG_ADDROI_FILTER 0 ++#define CONFIG_ALPHAEXTRACT_FILTER 0 ++#define CONFIG_ALPHAMERGE_FILTER 0 ++#define CONFIG_AMPLIFY_FILTER 0 ++#define CONFIG_ASS_FILTER 0 ++#define CONFIG_ATADENOISE_FILTER 0 ++#define CONFIG_AVGBLUR_FILTER 0 ++#define CONFIG_AVGBLUR_OPENCL_FILTER 0 ++#define CONFIG_AVGBLUR_VULKAN_FILTER 0 ++#define CONFIG_BBOX_FILTER 0 ++#define CONFIG_BENCH_FILTER 0 ++#define CONFIG_BILATERAL_FILTER 0 ++#define CONFIG_BITPLANENOISE_FILTER 0 ++#define CONFIG_BLACKDETECT_FILTER 0 ++#define CONFIG_BLACKFRAME_FILTER 0 ++#define CONFIG_BLEND_FILTER 0 ++#define CONFIG_BM3D_FILTER 0 ++#define CONFIG_BOXBLUR_FILTER 0 ++#define CONFIG_BOXBLUR_OPENCL_FILTER 0 ++#define CONFIG_BWDIF_FILTER 0 ++#define CONFIG_CAS_FILTER 0 ++#define CONFIG_CHROMAHOLD_FILTER 0 ++#define CONFIG_CHROMAKEY_FILTER 0 ++#define CONFIG_CHROMASHIFT_FILTER 0 ++#define CONFIG_CIESCOPE_FILTER 0 ++#define CONFIG_CODECVIEW_FILTER 0 ++#define CONFIG_COLORBALANCE_FILTER 0 ++#define CONFIG_COLORCHANNELMIXER_FILTER 0 ++#define CONFIG_COLORKEY_FILTER 0 ++#define CONFIG_COLORKEY_OPENCL_FILTER 0 ++#define CONFIG_COLORHOLD_FILTER 0 ++#define CONFIG_COLORLEVELS_FILTER 0 ++#define CONFIG_COLORMATRIX_FILTER 0 ++#define CONFIG_COLORSPACE_FILTER 0 ++#define CONFIG_CONVOLUTION_FILTER 0 ++#define CONFIG_CONVOLUTION_OPENCL_FILTER 0 ++#define CONFIG_CONVOLVE_FILTER 0 ++#define CONFIG_COPY_FILTER 0 ++#define CONFIG_COREIMAGE_FILTER 0 ++#define CONFIG_COVER_RECT_FILTER 0 ++#define CONFIG_CROP_FILTER 0 ++#define CONFIG_CROPDETECT_FILTER 0 ++#define CONFIG_CUE_FILTER 0 ++#define CONFIG_CURVES_FILTER 0 ++#define CONFIG_DATASCOPE_FILTER 0 ++#define CONFIG_DCTDNOIZ_FILTER 0 ++#define CONFIG_DEBAND_FILTER 0 ++#define CONFIG_DEBLOCK_FILTER 0 ++#define CONFIG_DECIMATE_FILTER 0 ++#define CONFIG_DECONVOLVE_FILTER 0 ++#define CONFIG_DEDOT_FILTER 0 ++#define CONFIG_DEFLATE_FILTER 0 ++#define CONFIG_DEFLICKER_FILTER 0 ++#define CONFIG_DEINTERLACE_QSV_FILTER 0 ++#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 ++#define CONFIG_DEJUDDER_FILTER 0 ++#define CONFIG_DELOGO_FILTER 0 ++#define CONFIG_DENOISE_VAAPI_FILTER 0 ++#define CONFIG_DERAIN_FILTER 0 ++#define CONFIG_DESHAKE_FILTER 0 ++#define CONFIG_DESHAKE_OPENCL_FILTER 0 ++#define CONFIG_DESPILL_FILTER 0 ++#define CONFIG_DETELECINE_FILTER 0 ++#define CONFIG_DILATION_FILTER 0 ++#define CONFIG_DILATION_OPENCL_FILTER 0 ++#define CONFIG_DISPLACE_FILTER 0 ++#define CONFIG_DNN_PROCESSING_FILTER 0 ++#define CONFIG_DOUBLEWEAVE_FILTER 0 ++#define CONFIG_DRAWBOX_FILTER 0 ++#define CONFIG_DRAWGRAPH_FILTER 0 ++#define CONFIG_DRAWGRID_FILTER 0 ++#define CONFIG_DRAWTEXT_FILTER 0 ++#define CONFIG_EDGEDETECT_FILTER 0 ++#define CONFIG_ELBG_FILTER 0 ++#define CONFIG_ENTROPY_FILTER 0 ++#define CONFIG_EQ_FILTER 0 ++#define CONFIG_EROSION_FILTER 0 ++#define CONFIG_EROSION_OPENCL_FILTER 0 ++#define CONFIG_EXTRACTPLANES_FILTER 0 ++#define CONFIG_FADE_FILTER 0 ++#define CONFIG_FFTDNOIZ_FILTER 0 ++#define CONFIG_FFTFILT_FILTER 0 ++#define CONFIG_FIELD_FILTER 0 ++#define CONFIG_FIELDHINT_FILTER 0 ++#define CONFIG_FIELDMATCH_FILTER 0 ++#define CONFIG_FIELDORDER_FILTER 0 ++#define CONFIG_FILLBORDERS_FILTER 0 ++#define CONFIG_FIND_RECT_FILTER 0 ++#define CONFIG_FLOODFILL_FILTER 0 ++#define CONFIG_FORMAT_FILTER 0 ++#define CONFIG_FPS_FILTER 0 ++#define CONFIG_FRAMEPACK_FILTER 0 ++#define CONFIG_FRAMERATE_FILTER 0 ++#define CONFIG_FRAMESTEP_FILTER 0 ++#define CONFIG_FREEZEDETECT_FILTER 0 ++#define CONFIG_FREEZEFRAMES_FILTER 0 ++#define CONFIG_FREI0R_FILTER 0 ++#define CONFIG_FSPP_FILTER 0 ++#define CONFIG_GBLUR_FILTER 0 ++#define CONFIG_GEQ_FILTER 0 ++#define CONFIG_GRADFUN_FILTER 0 ++#define CONFIG_GRAPHMONITOR_FILTER 0 ++#define CONFIG_GREYEDGE_FILTER 0 ++#define CONFIG_HALDCLUT_FILTER 0 ++#define CONFIG_HFLIP_FILTER 0 ++#define CONFIG_HISTEQ_FILTER 0 ++#define CONFIG_HISTOGRAM_FILTER 0 ++#define CONFIG_HQDN3D_FILTER 0 ++#define CONFIG_HQX_FILTER 0 ++#define CONFIG_HSTACK_FILTER 0 ++#define CONFIG_HUE_FILTER 0 ++#define CONFIG_HWDOWNLOAD_FILTER 0 ++#define CONFIG_HWMAP_FILTER 0 ++#define CONFIG_HWUPLOAD_FILTER 0 ++#define CONFIG_HWUPLOAD_CUDA_FILTER 0 ++#define CONFIG_HYSTERESIS_FILTER 0 ++#define CONFIG_IDET_FILTER 0 ++#define CONFIG_IL_FILTER 0 ++#define CONFIG_INFLATE_FILTER 0 ++#define CONFIG_INTERLACE_FILTER 0 ++#define CONFIG_INTERLEAVE_FILTER 0 ++#define CONFIG_KERNDEINT_FILTER 0 ++#define CONFIG_LAGFUN_FILTER 0 ++#define CONFIG_LENSCORRECTION_FILTER 0 ++#define CONFIG_LENSFUN_FILTER 0 ++#define CONFIG_LIBVMAF_FILTER 0 ++#define CONFIG_LIMITER_FILTER 0 ++#define CONFIG_LOOP_FILTER 0 ++#define CONFIG_LUMAKEY_FILTER 0 ++#define CONFIG_LUT_FILTER 0 ++#define CONFIG_LUT1D_FILTER 0 ++#define CONFIG_LUT2_FILTER 0 ++#define CONFIG_LUT3D_FILTER 0 ++#define CONFIG_LUTRGB_FILTER 0 ++#define CONFIG_LUTYUV_FILTER 0 ++#define CONFIG_MASKEDCLAMP_FILTER 0 ++#define CONFIG_MASKEDMAX_FILTER 0 ++#define CONFIG_MASKEDMERGE_FILTER 0 ++#define CONFIG_MASKEDMIN_FILTER 0 ++#define CONFIG_MASKEDTHRESHOLD_FILTER 0 ++#define CONFIG_MASKFUN_FILTER 0 ++#define CONFIG_MCDEINT_FILTER 0 ++#define CONFIG_MEDIAN_FILTER 0 ++#define CONFIG_MERGEPLANES_FILTER 0 ++#define CONFIG_MESTIMATE_FILTER 0 ++#define CONFIG_METADATA_FILTER 0 ++#define CONFIG_MIDEQUALIZER_FILTER 0 ++#define CONFIG_MINTERPOLATE_FILTER 0 ++#define CONFIG_MIX_FILTER 0 ++#define CONFIG_MPDECIMATE_FILTER 0 ++#define CONFIG_NEGATE_FILTER 0 ++#define CONFIG_NLMEANS_FILTER 0 ++#define CONFIG_NLMEANS_OPENCL_FILTER 0 ++#define CONFIG_NNEDI_FILTER 0 ++#define CONFIG_NOFORMAT_FILTER 0 ++#define CONFIG_NOISE_FILTER 0 ++#define CONFIG_NORMALIZE_FILTER 0 ++#define CONFIG_NULL_FILTER 0 ++#define CONFIG_OCR_FILTER 0 ++#define CONFIG_OCV_FILTER 0 ++#define CONFIG_OSCILLOSCOPE_FILTER 0 ++#define CONFIG_OVERLAY_FILTER 0 ++#define CONFIG_OVERLAY_OPENCL_FILTER 0 ++#define CONFIG_OVERLAY_QSV_FILTER 0 ++#define CONFIG_OVERLAY_VULKAN_FILTER 0 ++#define CONFIG_OVERLAY_CUDA_FILTER 0 ++#define CONFIG_OWDENOISE_FILTER 0 ++#define CONFIG_PAD_FILTER 0 ++#define CONFIG_PAD_OPENCL_FILTER 0 ++#define CONFIG_PALETTEGEN_FILTER 0 ++#define CONFIG_PALETTEUSE_FILTER 0 ++#define CONFIG_PERMS_FILTER 0 ++#define CONFIG_PERSPECTIVE_FILTER 0 ++#define CONFIG_PHASE_FILTER 0 ++#define CONFIG_PHOTOSENSITIVITY_FILTER 0 ++#define CONFIG_PIXDESCTEST_FILTER 0 ++#define CONFIG_PIXSCOPE_FILTER 0 ++#define CONFIG_PP_FILTER 0 ++#define CONFIG_PP7_FILTER 0 ++#define CONFIG_PREMULTIPLY_FILTER 0 ++#define CONFIG_PREWITT_FILTER 0 ++#define CONFIG_PREWITT_OPENCL_FILTER 0 ++#define CONFIG_PROCAMP_VAAPI_FILTER 0 ++#define CONFIG_PROGRAM_OPENCL_FILTER 0 ++#define CONFIG_PSEUDOCOLOR_FILTER 0 ++#define CONFIG_PSNR_FILTER 0 ++#define CONFIG_PULLUP_FILTER 0 ++#define CONFIG_QP_FILTER 0 ++#define CONFIG_RANDOM_FILTER 0 ++#define CONFIG_READEIA608_FILTER 0 ++#define CONFIG_READVITC_FILTER 0 ++#define CONFIG_REALTIME_FILTER 0 ++#define CONFIG_REMAP_FILTER 0 ++#define CONFIG_REMOVEGRAIN_FILTER 0 ++#define CONFIG_REMOVELOGO_FILTER 0 ++#define CONFIG_REPEATFIELDS_FILTER 0 ++#define CONFIG_REVERSE_FILTER 0 ++#define CONFIG_RGBASHIFT_FILTER 0 ++#define CONFIG_ROBERTS_FILTER 0 ++#define CONFIG_ROBERTS_OPENCL_FILTER 0 ++#define CONFIG_ROTATE_FILTER 0 ++#define CONFIG_SAB_FILTER 0 ++#define CONFIG_SCALE_FILTER 0 ++#define CONFIG_SCALE_CUDA_FILTER 0 ++#define CONFIG_SCALE_NPP_FILTER 0 ++#define CONFIG_SCALE_QSV_FILTER 0 ++#define CONFIG_SCALE_VAAPI_FILTER 0 ++#define CONFIG_SCALE_VULKAN_FILTER 0 ++#define CONFIG_SCALE2REF_FILTER 0 ++#define CONFIG_SCROLL_FILTER 0 ++#define CONFIG_SELECT_FILTER 0 ++#define CONFIG_SELECTIVECOLOR_FILTER 0 ++#define CONFIG_SENDCMD_FILTER 0 ++#define CONFIG_SEPARATEFIELDS_FILTER 0 ++#define CONFIG_SETDAR_FILTER 0 ++#define CONFIG_SETFIELD_FILTER 0 ++#define CONFIG_SETPARAMS_FILTER 0 ++#define CONFIG_SETPTS_FILTER 0 ++#define CONFIG_SETRANGE_FILTER 0 ++#define CONFIG_SETSAR_FILTER 0 ++#define CONFIG_SETTB_FILTER 0 ++#define CONFIG_SHARPNESS_VAAPI_FILTER 0 ++#define CONFIG_SHOWINFO_FILTER 0 ++#define CONFIG_SHOWPALETTE_FILTER 0 ++#define CONFIG_SHUFFLEFRAMES_FILTER 0 ++#define CONFIG_SHUFFLEPLANES_FILTER 0 ++#define CONFIG_SIDEDATA_FILTER 0 ++#define CONFIG_SIGNALSTATS_FILTER 0 ++#define CONFIG_SIGNATURE_FILTER 0 ++#define CONFIG_SMARTBLUR_FILTER 0 ++#define CONFIG_SOBEL_FILTER 0 ++#define CONFIG_SOBEL_OPENCL_FILTER 0 ++#define CONFIG_SPLIT_FILTER 0 ++#define CONFIG_SPP_FILTER 0 ++#define CONFIG_SR_FILTER 0 ++#define CONFIG_SSIM_FILTER 0 ++#define CONFIG_STEREO3D_FILTER 0 ++#define CONFIG_STREAMSELECT_FILTER 0 ++#define CONFIG_SUBTITLES_FILTER 0 ++#define CONFIG_SUPER2XSAI_FILTER 0 ++#define CONFIG_SWAPRECT_FILTER 0 ++#define CONFIG_SWAPUV_FILTER 0 ++#define CONFIG_TBLEND_FILTER 0 ++#define CONFIG_TELECINE_FILTER 0 ++#define CONFIG_THISTOGRAM_FILTER 0 ++#define CONFIG_THRESHOLD_FILTER 0 ++#define CONFIG_THUMBNAIL_FILTER 0 ++#define CONFIG_THUMBNAIL_CUDA_FILTER 0 ++#define CONFIG_TILE_FILTER 0 ++#define CONFIG_TINTERLACE_FILTER 0 ++#define CONFIG_TLUT2_FILTER 0 ++#define CONFIG_TMEDIAN_FILTER 0 ++#define CONFIG_TMIX_FILTER 0 ++#define CONFIG_TONEMAP_FILTER 0 ++#define CONFIG_TONEMAP_OPENCL_FILTER 0 ++#define CONFIG_TONEMAP_VAAPI_FILTER 0 ++#define CONFIG_TPAD_FILTER 0 ++#define CONFIG_TRANSPOSE_FILTER 0 ++#define CONFIG_TRANSPOSE_NPP_FILTER 0 ++#define CONFIG_TRANSPOSE_OPENCL_FILTER 0 ++#define CONFIG_TRANSPOSE_VAAPI_FILTER 0 ++#define CONFIG_TRIM_FILTER 0 ++#define CONFIG_UNPREMULTIPLY_FILTER 0 ++#define CONFIG_UNSHARP_FILTER 0 ++#define CONFIG_UNSHARP_OPENCL_FILTER 0 ++#define CONFIG_USPP_FILTER 0 ++#define CONFIG_V360_FILTER 0 ++#define CONFIG_VAGUEDENOISER_FILTER 0 ++#define CONFIG_VECTORSCOPE_FILTER 0 ++#define CONFIG_VFLIP_FILTER 0 ++#define CONFIG_VFRDET_FILTER 0 ++#define CONFIG_VIBRANCE_FILTER 0 ++#define CONFIG_VIDSTABDETECT_FILTER 0 ++#define CONFIG_VIDSTABTRANSFORM_FILTER 0 ++#define CONFIG_VIGNETTE_FILTER 0 ++#define CONFIG_VMAFMOTION_FILTER 0 ++#define CONFIG_VPP_QSV_FILTER 0 ++#define CONFIG_VSTACK_FILTER 0 ++#define CONFIG_W3FDIF_FILTER 0 ++#define CONFIG_WAVEFORM_FILTER 0 ++#define CONFIG_WEAVE_FILTER 0 ++#define CONFIG_XBR_FILTER 0 ++#define CONFIG_XFADE_FILTER 0 ++#define CONFIG_XFADE_OPENCL_FILTER 0 ++#define CONFIG_XMEDIAN_FILTER 0 ++#define CONFIG_XSTACK_FILTER 0 ++#define CONFIG_YADIF_FILTER 0 ++#define CONFIG_YADIF_CUDA_FILTER 0 ++#define CONFIG_YAEPBLUR_FILTER 0 ++#define CONFIG_ZMQ_FILTER 0 ++#define CONFIG_ZOOMPAN_FILTER 0 ++#define CONFIG_ZSCALE_FILTER 0 ++#define CONFIG_ALLRGB_FILTER 0 ++#define CONFIG_ALLYUV_FILTER 0 ++#define CONFIG_CELLAUTO_FILTER 0 ++#define CONFIG_COLOR_FILTER 0 ++#define CONFIG_COREIMAGESRC_FILTER 0 ++#define CONFIG_FREI0R_SRC_FILTER 0 ++#define CONFIG_HALDCLUTSRC_FILTER 0 ++#define CONFIG_LIFE_FILTER 0 ++#define CONFIG_MANDELBROT_FILTER 0 ++#define CONFIG_MPTESTSRC_FILTER 0 ++#define CONFIG_NULLSRC_FILTER 0 ++#define CONFIG_OPENCLSRC_FILTER 0 ++#define CONFIG_PAL75BARS_FILTER 0 ++#define CONFIG_PAL100BARS_FILTER 0 ++#define CONFIG_RGBTESTSRC_FILTER 0 ++#define CONFIG_SIERPINSKI_FILTER 0 ++#define CONFIG_SMPTEBARS_FILTER 0 ++#define CONFIG_SMPTEHDBARS_FILTER 0 ++#define CONFIG_TESTSRC_FILTER 0 ++#define CONFIG_TESTSRC2_FILTER 0 ++#define CONFIG_YUVTESTSRC_FILTER 0 ++#define CONFIG_NULLSINK_FILTER 0 ++#define CONFIG_ABITSCOPE_FILTER 0 ++#define CONFIG_ADRAWGRAPH_FILTER 0 ++#define CONFIG_AGRAPHMONITOR_FILTER 0 ++#define CONFIG_AHISTOGRAM_FILTER 0 ++#define CONFIG_APHASEMETER_FILTER 0 ++#define CONFIG_AVECTORSCOPE_FILTER 0 ++#define CONFIG_CONCAT_FILTER 0 ++#define CONFIG_SHOWCQT_FILTER 0 ++#define CONFIG_SHOWFREQS_FILTER 0 ++#define CONFIG_SHOWSPATIAL_FILTER 0 ++#define CONFIG_SHOWSPECTRUM_FILTER 0 ++#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 ++#define CONFIG_SHOWVOLUME_FILTER 0 ++#define CONFIG_SHOWWAVES_FILTER 0 ++#define CONFIG_SHOWWAVESPIC_FILTER 0 ++#define CONFIG_SPECTRUMSYNTH_FILTER 0 ++#define CONFIG_AMOVIE_FILTER 0 ++#define CONFIG_MOVIE_FILTER 0 ++#define CONFIG_AFIFO_FILTER 0 ++#define CONFIG_FIFO_FILTER 0 ++#define CONFIG_AA_DEMUXER 0 ++#define CONFIG_AAC_DEMUXER 1 ++#define CONFIG_AC3_DEMUXER 0 ++#define CONFIG_ACM_DEMUXER 0 ++#define CONFIG_ACT_DEMUXER 0 ++#define CONFIG_ADF_DEMUXER 0 ++#define CONFIG_ADP_DEMUXER 0 ++#define CONFIG_ADS_DEMUXER 0 ++#define CONFIG_ADX_DEMUXER 0 ++#define CONFIG_AEA_DEMUXER 0 ++#define CONFIG_AFC_DEMUXER 0 ++#define CONFIG_AIFF_DEMUXER 0 ++#define CONFIG_AIX_DEMUXER 0 ++#define CONFIG_ALP_DEMUXER 0 ++#define CONFIG_AMR_DEMUXER 1 ++#define CONFIG_AMRNB_DEMUXER 0 ++#define CONFIG_AMRWB_DEMUXER 0 ++#define CONFIG_ANM_DEMUXER 0 ++#define CONFIG_APC_DEMUXER 0 ++#define CONFIG_APE_DEMUXER 0 ++#define CONFIG_APM_DEMUXER 0 ++#define CONFIG_APNG_DEMUXER 0 ++#define CONFIG_APTX_DEMUXER 0 ++#define CONFIG_APTX_HD_DEMUXER 0 ++#define CONFIG_AQTITLE_DEMUXER 0 ++#define CONFIG_ARGO_ASF_DEMUXER 0 ++#define CONFIG_ASF_DEMUXER 0 ++#define CONFIG_ASF_O_DEMUXER 0 ++#define CONFIG_ASS_DEMUXER 0 ++#define CONFIG_AST_DEMUXER 0 ++#define CONFIG_AU_DEMUXER 0 ++#define CONFIG_AV1_DEMUXER 0 ++#define CONFIG_AVI_DEMUXER 1 ++#define CONFIG_AVISYNTH_DEMUXER 0 ++#define CONFIG_AVR_DEMUXER 0 ++#define CONFIG_AVS_DEMUXER 0 ++#define CONFIG_AVS2_DEMUXER 0 ++#define CONFIG_BETHSOFTVID_DEMUXER 0 ++#define CONFIG_BFI_DEMUXER 0 ++#define CONFIG_BINTEXT_DEMUXER 0 ++#define CONFIG_BINK_DEMUXER 0 ++#define CONFIG_BIT_DEMUXER 0 ++#define CONFIG_BMV_DEMUXER 0 ++#define CONFIG_BFSTM_DEMUXER 0 ++#define CONFIG_BRSTM_DEMUXER 0 ++#define CONFIG_BOA_DEMUXER 0 ++#define CONFIG_C93_DEMUXER 0 ++#define CONFIG_CAF_DEMUXER 0 ++#define CONFIG_CAVSVIDEO_DEMUXER 0 ++#define CONFIG_CDG_DEMUXER 0 ++#define CONFIG_CDXL_DEMUXER 0 ++#define CONFIG_CINE_DEMUXER 0 ++#define CONFIG_CODEC2_DEMUXER 0 ++#define CONFIG_CODEC2RAW_DEMUXER 0 ++#define CONFIG_CONCAT_DEMUXER 0 ++#define CONFIG_DASH_DEMUXER 0 ++#define CONFIG_DATA_DEMUXER 0 ++#define CONFIG_DAUD_DEMUXER 0 ++#define CONFIG_DCSTR_DEMUXER 0 ++#define CONFIG_DERF_DEMUXER 0 ++#define CONFIG_DFA_DEMUXER 0 ++#define CONFIG_DHAV_DEMUXER 0 ++#define CONFIG_DIRAC_DEMUXER 0 ++#define CONFIG_DNXHD_DEMUXER 0 ++#define CONFIG_DSF_DEMUXER 0 ++#define CONFIG_DSICIN_DEMUXER 0 ++#define CONFIG_DSS_DEMUXER 0 ++#define CONFIG_DTS_DEMUXER 0 ++#define CONFIG_DTSHD_DEMUXER 0 ++#define CONFIG_DV_DEMUXER 0 ++#define CONFIG_DVBSUB_DEMUXER 0 ++#define CONFIG_DVBTXT_DEMUXER 0 ++#define CONFIG_DXA_DEMUXER 0 ++#define CONFIG_EA_DEMUXER 0 ++#define CONFIG_EA_CDATA_DEMUXER 0 ++#define CONFIG_EAC3_DEMUXER 0 ++#define CONFIG_EPAF_DEMUXER 0 ++#define CONFIG_FFMETADATA_DEMUXER 0 ++#define CONFIG_FILMSTRIP_DEMUXER 0 ++#define CONFIG_FITS_DEMUXER 0 ++#define CONFIG_FLAC_DEMUXER 1 ++#define CONFIG_FLIC_DEMUXER 0 ++#define CONFIG_FLV_DEMUXER 0 ++#define CONFIG_LIVE_FLV_DEMUXER 0 ++#define CONFIG_FOURXM_DEMUXER 0 ++#define CONFIG_FRM_DEMUXER 0 ++#define CONFIG_FSB_DEMUXER 0 ++#define CONFIG_FWSE_DEMUXER 0 ++#define CONFIG_G722_DEMUXER 0 ++#define CONFIG_G723_1_DEMUXER 0 ++#define CONFIG_G726_DEMUXER 0 ++#define CONFIG_G726LE_DEMUXER 0 ++#define CONFIG_G729_DEMUXER 0 ++#define CONFIG_GDV_DEMUXER 0 ++#define CONFIG_GENH_DEMUXER 0 ++#define CONFIG_GIF_DEMUXER 0 ++#define CONFIG_GSM_DEMUXER 0 ++#define CONFIG_GXF_DEMUXER 0 ++#define CONFIG_H261_DEMUXER 0 ++#define CONFIG_H263_DEMUXER 0 ++#define CONFIG_H264_DEMUXER 0 ++#define CONFIG_HCA_DEMUXER 0 ++#define CONFIG_HCOM_DEMUXER 0 ++#define CONFIG_HEVC_DEMUXER 0 ++#define CONFIG_HLS_DEMUXER 0 ++#define CONFIG_HNM_DEMUXER 0 ++#define CONFIG_ICO_DEMUXER 0 ++#define CONFIG_IDCIN_DEMUXER 0 ++#define CONFIG_IDF_DEMUXER 0 ++#define CONFIG_IFF_DEMUXER 0 ++#define CONFIG_IFV_DEMUXER 0 ++#define CONFIG_ILBC_DEMUXER 0 ++#define CONFIG_IMAGE2_DEMUXER 0 ++#define CONFIG_IMAGE2PIPE_DEMUXER 0 ++#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 ++#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 ++#define CONFIG_INGENIENT_DEMUXER 0 ++#define CONFIG_IPMOVIE_DEMUXER 0 ++#define CONFIG_IRCAM_DEMUXER 0 ++#define CONFIG_ISS_DEMUXER 0 ++#define CONFIG_IV8_DEMUXER 0 ++#define CONFIG_IVF_DEMUXER 0 ++#define CONFIG_IVR_DEMUXER 0 ++#define CONFIG_JACOSUB_DEMUXER 0 ++#define CONFIG_JV_DEMUXER 0 ++#define CONFIG_KUX_DEMUXER 0 ++#define CONFIG_KVAG_DEMUXER 0 ++#define CONFIG_LMLM4_DEMUXER 0 ++#define CONFIG_LOAS_DEMUXER 0 ++#define CONFIG_LRC_DEMUXER 0 ++#define CONFIG_LVF_DEMUXER 0 ++#define CONFIG_LXF_DEMUXER 0 ++#define CONFIG_M4V_DEMUXER 0 ++#define CONFIG_MATROSKA_DEMUXER 1 ++#define CONFIG_MGSTS_DEMUXER 0 ++#define CONFIG_MICRODVD_DEMUXER 0 ++#define CONFIG_MJPEG_DEMUXER 0 ++#define CONFIG_MJPEG_2000_DEMUXER 0 ++#define CONFIG_MLP_DEMUXER 0 ++#define CONFIG_MLV_DEMUXER 0 ++#define CONFIG_MM_DEMUXER 0 ++#define CONFIG_MMF_DEMUXER 0 ++#define CONFIG_MOV_DEMUXER 1 ++#define CONFIG_MP3_DEMUXER 1 ++#define CONFIG_MPC_DEMUXER 0 ++#define CONFIG_MPC8_DEMUXER 0 ++#define CONFIG_MPEGPS_DEMUXER 0 ++#define CONFIG_MPEGTS_DEMUXER 0 ++#define CONFIG_MPEGTSRAW_DEMUXER 0 ++#define CONFIG_MPEGVIDEO_DEMUXER 0 ++#define CONFIG_MPJPEG_DEMUXER 0 ++#define CONFIG_MPL2_DEMUXER 0 ++#define CONFIG_MPSUB_DEMUXER 0 ++#define CONFIG_MSF_DEMUXER 0 ++#define CONFIG_MSNWC_TCP_DEMUXER 0 ++#define CONFIG_MTAF_DEMUXER 0 ++#define CONFIG_MTV_DEMUXER 0 ++#define CONFIG_MUSX_DEMUXER 0 ++#define CONFIG_MV_DEMUXER 0 ++#define CONFIG_MVI_DEMUXER 0 ++#define CONFIG_MXF_DEMUXER 0 ++#define CONFIG_MXG_DEMUXER 0 ++#define CONFIG_NC_DEMUXER 0 ++#define CONFIG_NISTSPHERE_DEMUXER 0 ++#define CONFIG_NSP_DEMUXER 0 ++#define CONFIG_NSV_DEMUXER 0 ++#define CONFIG_NUT_DEMUXER 0 ++#define CONFIG_NUV_DEMUXER 0 ++#define CONFIG_OGG_DEMUXER 1 ++#define CONFIG_OMA_DEMUXER 0 ++#define CONFIG_PAF_DEMUXER 0 ++#define CONFIG_PCM_ALAW_DEMUXER 0 ++#define CONFIG_PCM_MULAW_DEMUXER 0 ++#define CONFIG_PCM_VIDC_DEMUXER 0 ++#define CONFIG_PCM_F64BE_DEMUXER 0 ++#define CONFIG_PCM_F64LE_DEMUXER 0 ++#define CONFIG_PCM_F32BE_DEMUXER 0 ++#define CONFIG_PCM_F32LE_DEMUXER 0 ++#define CONFIG_PCM_S32BE_DEMUXER 0 ++#define CONFIG_PCM_S32LE_DEMUXER 0 ++#define CONFIG_PCM_S24BE_DEMUXER 0 ++#define CONFIG_PCM_S24LE_DEMUXER 0 ++#define CONFIG_PCM_S16BE_DEMUXER 0 ++#define CONFIG_PCM_S16LE_DEMUXER 0 ++#define CONFIG_PCM_S8_DEMUXER 0 ++#define CONFIG_PCM_U32BE_DEMUXER 0 ++#define CONFIG_PCM_U32LE_DEMUXER 0 ++#define CONFIG_PCM_U24BE_DEMUXER 0 ++#define CONFIG_PCM_U24LE_DEMUXER 0 ++#define CONFIG_PCM_U16BE_DEMUXER 0 ++#define CONFIG_PCM_U16LE_DEMUXER 0 ++#define CONFIG_PCM_U8_DEMUXER 0 ++#define CONFIG_PJS_DEMUXER 0 ++#define CONFIG_PMP_DEMUXER 0 ++#define CONFIG_PVA_DEMUXER 0 ++#define CONFIG_PVF_DEMUXER 0 ++#define CONFIG_QCP_DEMUXER 0 ++#define CONFIG_R3D_DEMUXER 0 ++#define CONFIG_RAWVIDEO_DEMUXER 0 ++#define CONFIG_REALTEXT_DEMUXER 0 ++#define CONFIG_REDSPARK_DEMUXER 0 ++#define CONFIG_RL2_DEMUXER 0 ++#define CONFIG_RM_DEMUXER 0 ++#define CONFIG_ROQ_DEMUXER 0 ++#define CONFIG_RPL_DEMUXER 0 ++#define CONFIG_RSD_DEMUXER 0 ++#define CONFIG_RSO_DEMUXER 0 ++#define CONFIG_RTP_DEMUXER 0 ++#define CONFIG_RTSP_DEMUXER 0 ++#define CONFIG_S337M_DEMUXER 0 ++#define CONFIG_SAMI_DEMUXER 0 ++#define CONFIG_SAP_DEMUXER 0 ++#define CONFIG_SBC_DEMUXER 0 ++#define CONFIG_SBG_DEMUXER 0 ++#define CONFIG_SCC_DEMUXER 0 ++#define CONFIG_SDP_DEMUXER 0 ++#define CONFIG_SDR2_DEMUXER 0 ++#define CONFIG_SDS_DEMUXER 0 ++#define CONFIG_SDX_DEMUXER 0 ++#define CONFIG_SEGAFILM_DEMUXER 0 ++#define CONFIG_SER_DEMUXER 0 ++#define CONFIG_SHORTEN_DEMUXER 0 ++#define CONFIG_SIFF_DEMUXER 0 ++#define CONFIG_SLN_DEMUXER 0 ++#define CONFIG_SMACKER_DEMUXER 0 ++#define CONFIG_SMJPEG_DEMUXER 0 ++#define CONFIG_SMUSH_DEMUXER 0 ++#define CONFIG_SOL_DEMUXER 0 ++#define CONFIG_SOX_DEMUXER 0 ++#define CONFIG_SPDIF_DEMUXER 0 ++#define CONFIG_SRT_DEMUXER 0 ++#define CONFIG_STR_DEMUXER 0 ++#define CONFIG_STL_DEMUXER 0 ++#define CONFIG_SUBVIEWER1_DEMUXER 0 ++#define CONFIG_SUBVIEWER_DEMUXER 0 ++#define CONFIG_SUP_DEMUXER 0 ++#define CONFIG_SVAG_DEMUXER 0 ++#define CONFIG_SWF_DEMUXER 0 ++#define CONFIG_TAK_DEMUXER 0 ++#define CONFIG_TEDCAPTIONS_DEMUXER 0 ++#define CONFIG_THP_DEMUXER 0 ++#define CONFIG_THREEDOSTR_DEMUXER 0 ++#define CONFIG_TIERTEXSEQ_DEMUXER 0 ++#define CONFIG_TMV_DEMUXER 0 ++#define CONFIG_TRUEHD_DEMUXER 0 ++#define CONFIG_TTA_DEMUXER 0 ++#define CONFIG_TXD_DEMUXER 0 ++#define CONFIG_TTY_DEMUXER 0 ++#define CONFIG_TY_DEMUXER 0 ++#define CONFIG_V210_DEMUXER 0 ++#define CONFIG_V210X_DEMUXER 0 ++#define CONFIG_VAG_DEMUXER 0 ++#define CONFIG_VC1_DEMUXER 0 ++#define CONFIG_VC1T_DEMUXER 0 ++#define CONFIG_VIVIDAS_DEMUXER 0 ++#define CONFIG_VIVO_DEMUXER 0 ++#define CONFIG_VMD_DEMUXER 0 ++#define CONFIG_VOBSUB_DEMUXER 0 ++#define CONFIG_VOC_DEMUXER 0 ++#define CONFIG_VPK_DEMUXER 0 ++#define CONFIG_VPLAYER_DEMUXER 0 ++#define CONFIG_VQF_DEMUXER 0 ++#define CONFIG_W64_DEMUXER 0 ++#define CONFIG_WAV_DEMUXER 1 ++#define CONFIG_WC3_DEMUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 ++#define CONFIG_WEBVTT_DEMUXER 0 ++#define CONFIG_WSAUD_DEMUXER 0 ++#define CONFIG_WSD_DEMUXER 0 ++#define CONFIG_WSVQA_DEMUXER 0 ++#define CONFIG_WTV_DEMUXER 0 ++#define CONFIG_WVE_DEMUXER 0 ++#define CONFIG_WV_DEMUXER 0 ++#define CONFIG_XA_DEMUXER 0 ++#define CONFIG_XBIN_DEMUXER 0 ++#define CONFIG_XMV_DEMUXER 0 ++#define CONFIG_XVAG_DEMUXER 0 ++#define CONFIG_XWMA_DEMUXER 0 ++#define CONFIG_YOP_DEMUXER 0 ++#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 ++#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_GIF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SVG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XWD_PIPE_DEMUXER 0 ++#define CONFIG_LIBGME_DEMUXER 0 ++#define CONFIG_LIBMODPLUG_DEMUXER 0 ++#define CONFIG_LIBOPENMPT_DEMUXER 0 ++#define CONFIG_VAPOURSYNTH_DEMUXER 0 ++#define CONFIG_A64_MUXER 0 ++#define CONFIG_AC3_MUXER 0 ++#define CONFIG_ADTS_MUXER 0 ++#define CONFIG_ADX_MUXER 0 ++#define CONFIG_AIFF_MUXER 0 ++#define CONFIG_AMR_MUXER 0 ++#define CONFIG_APNG_MUXER 0 ++#define CONFIG_APTX_MUXER 0 ++#define CONFIG_APTX_HD_MUXER 0 ++#define CONFIG_ASF_MUXER 0 ++#define CONFIG_ASS_MUXER 0 ++#define CONFIG_AST_MUXER 0 ++#define CONFIG_ASF_STREAM_MUXER 0 ++#define CONFIG_AU_MUXER 0 ++#define CONFIG_AVI_MUXER 0 ++#define CONFIG_AVM2_MUXER 0 ++#define CONFIG_AVS2_MUXER 0 ++#define CONFIG_BIT_MUXER 0 ++#define CONFIG_CAF_MUXER 0 ++#define CONFIG_CAVSVIDEO_MUXER 0 ++#define CONFIG_CODEC2_MUXER 0 ++#define CONFIG_CODEC2RAW_MUXER 0 ++#define CONFIG_CRC_MUXER 0 ++#define CONFIG_DASH_MUXER 0 ++#define CONFIG_DATA_MUXER 0 ++#define CONFIG_DAUD_MUXER 0 ++#define CONFIG_DIRAC_MUXER 0 ++#define CONFIG_DNXHD_MUXER 0 ++#define CONFIG_DTS_MUXER 0 ++#define CONFIG_DV_MUXER 0 ++#define CONFIG_EAC3_MUXER 0 ++#define CONFIG_F4V_MUXER 0 ++#define CONFIG_FFMETADATA_MUXER 0 ++#define CONFIG_FIFO_MUXER 0 ++#define CONFIG_FIFO_TEST_MUXER 0 ++#define CONFIG_FILMSTRIP_MUXER 0 ++#define CONFIG_FITS_MUXER 0 ++#define CONFIG_FLAC_MUXER 0 ++#define CONFIG_FLV_MUXER 0 ++#define CONFIG_FRAMECRC_MUXER 0 ++#define CONFIG_FRAMEHASH_MUXER 0 ++#define CONFIG_FRAMEMD5_MUXER 0 ++#define CONFIG_G722_MUXER 0 ++#define CONFIG_G723_1_MUXER 0 ++#define CONFIG_G726_MUXER 0 ++#define CONFIG_G726LE_MUXER 0 ++#define CONFIG_GIF_MUXER 0 ++#define CONFIG_GSM_MUXER 0 ++#define CONFIG_GXF_MUXER 0 ++#define CONFIG_H261_MUXER 0 ++#define CONFIG_H263_MUXER 0 ++#define CONFIG_H264_MUXER 0 ++#define CONFIG_HASH_MUXER 0 ++#define CONFIG_HDS_MUXER 0 ++#define CONFIG_HEVC_MUXER 0 ++#define CONFIG_HLS_MUXER 0 ++#define CONFIG_ICO_MUXER 0 ++#define CONFIG_ILBC_MUXER 0 ++#define CONFIG_IMAGE2_MUXER 0 ++#define CONFIG_IMAGE2PIPE_MUXER 0 ++#define CONFIG_IPOD_MUXER 0 ++#define CONFIG_IRCAM_MUXER 0 ++#define CONFIG_ISMV_MUXER 0 ++#define CONFIG_IVF_MUXER 0 ++#define CONFIG_JACOSUB_MUXER 0 ++#define CONFIG_LATM_MUXER 0 ++#define CONFIG_LRC_MUXER 0 ++#define CONFIG_M4V_MUXER 0 ++#define CONFIG_MD5_MUXER 0 ++#define CONFIG_MATROSKA_MUXER 0 ++#define CONFIG_MATROSKA_AUDIO_MUXER 0 ++#define CONFIG_MICRODVD_MUXER 0 ++#define CONFIG_MJPEG_MUXER 0 ++#define CONFIG_MLP_MUXER 0 ++#define CONFIG_MMF_MUXER 0 ++#define CONFIG_MOV_MUXER 0 ++#define CONFIG_MP2_MUXER 0 ++#define CONFIG_MP3_MUXER 0 ++#define CONFIG_MP4_MUXER 0 ++#define CONFIG_MPEG1SYSTEM_MUXER 0 ++#define CONFIG_MPEG1VCD_MUXER 0 ++#define CONFIG_MPEG1VIDEO_MUXER 0 ++#define CONFIG_MPEG2DVD_MUXER 0 ++#define CONFIG_MPEG2SVCD_MUXER 0 ++#define CONFIG_MPEG2VIDEO_MUXER 0 ++#define CONFIG_MPEG2VOB_MUXER 0 ++#define CONFIG_MPEGTS_MUXER 0 ++#define CONFIG_MPJPEG_MUXER 0 ++#define CONFIG_MXF_MUXER 0 ++#define CONFIG_MXF_D10_MUXER 0 ++#define CONFIG_MXF_OPATOM_MUXER 0 ++#define CONFIG_NULL_MUXER 0 ++#define CONFIG_NUT_MUXER 0 ++#define CONFIG_OGA_MUXER 0 ++#define CONFIG_OGG_MUXER 0 ++#define CONFIG_OGV_MUXER 0 ++#define CONFIG_OMA_MUXER 0 ++#define CONFIG_OPUS_MUXER 0 ++#define CONFIG_PCM_ALAW_MUXER 0 ++#define CONFIG_PCM_MULAW_MUXER 0 ++#define CONFIG_PCM_VIDC_MUXER 0 ++#define CONFIG_PCM_F64BE_MUXER 0 ++#define CONFIG_PCM_F64LE_MUXER 0 ++#define CONFIG_PCM_F32BE_MUXER 0 ++#define CONFIG_PCM_F32LE_MUXER 0 ++#define CONFIG_PCM_S32BE_MUXER 0 ++#define CONFIG_PCM_S32LE_MUXER 0 ++#define CONFIG_PCM_S24BE_MUXER 0 ++#define CONFIG_PCM_S24LE_MUXER 0 ++#define CONFIG_PCM_S16BE_MUXER 0 ++#define CONFIG_PCM_S16LE_MUXER 0 ++#define CONFIG_PCM_S8_MUXER 0 ++#define CONFIG_PCM_U32BE_MUXER 0 ++#define CONFIG_PCM_U32LE_MUXER 0 ++#define CONFIG_PCM_U24BE_MUXER 0 ++#define CONFIG_PCM_U24LE_MUXER 0 ++#define CONFIG_PCM_U16BE_MUXER 0 ++#define CONFIG_PCM_U16LE_MUXER 0 ++#define CONFIG_PCM_U8_MUXER 0 ++#define CONFIG_PSP_MUXER 0 ++#define CONFIG_RAWVIDEO_MUXER 0 ++#define CONFIG_RM_MUXER 0 ++#define CONFIG_ROQ_MUXER 0 ++#define CONFIG_RSO_MUXER 0 ++#define CONFIG_RTP_MUXER 0 ++#define CONFIG_RTP_MPEGTS_MUXER 0 ++#define CONFIG_RTSP_MUXER 0 ++#define CONFIG_SAP_MUXER 0 ++#define CONFIG_SBC_MUXER 0 ++#define CONFIG_SCC_MUXER 0 ++#define CONFIG_SEGAFILM_MUXER 0 ++#define CONFIG_SEGMENT_MUXER 0 ++#define CONFIG_STREAM_SEGMENT_MUXER 0 ++#define CONFIG_SINGLEJPEG_MUXER 0 ++#define CONFIG_SMJPEG_MUXER 0 ++#define CONFIG_SMOOTHSTREAMING_MUXER 0 ++#define CONFIG_SOX_MUXER 0 ++#define CONFIG_SPX_MUXER 0 ++#define CONFIG_SPDIF_MUXER 0 ++#define CONFIG_SRT_MUXER 0 ++#define CONFIG_STREAMHASH_MUXER 0 ++#define CONFIG_SUP_MUXER 0 ++#define CONFIG_SWF_MUXER 0 ++#define CONFIG_TEE_MUXER 0 ++#define CONFIG_TG2_MUXER 0 ++#define CONFIG_TGP_MUXER 0 ++#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 ++#define CONFIG_TRUEHD_MUXER 0 ++#define CONFIG_TTA_MUXER 0 ++#define CONFIG_UNCODEDFRAMECRC_MUXER 0 ++#define CONFIG_VC1_MUXER 0 ++#define CONFIG_VC1T_MUXER 0 ++#define CONFIG_VOC_MUXER 0 ++#define CONFIG_W64_MUXER 0 ++#define CONFIG_WAV_MUXER 0 ++#define CONFIG_WEBM_MUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 ++#define CONFIG_WEBM_CHUNK_MUXER 0 ++#define CONFIG_WEBP_MUXER 0 ++#define CONFIG_WEBVTT_MUXER 0 ++#define CONFIG_WTV_MUXER 0 ++#define CONFIG_WV_MUXER 0 ++#define CONFIG_YUV4MPEGPIPE_MUXER 0 ++#define CONFIG_CHROMAPRINT_MUXER 0 ++#define CONFIG_ASYNC_PROTOCOL 0 ++#define CONFIG_BLURAY_PROTOCOL 0 ++#define CONFIG_CACHE_PROTOCOL 0 ++#define CONFIG_CONCAT_PROTOCOL 0 ++#define CONFIG_CRYPTO_PROTOCOL 0 ++#define CONFIG_DATA_PROTOCOL 0 ++#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 ++#define CONFIG_FFRTMPHTTP_PROTOCOL 0 ++#define CONFIG_FILE_PROTOCOL 0 ++#define CONFIG_FTP_PROTOCOL 0 ++#define CONFIG_GOPHER_PROTOCOL 0 ++#define CONFIG_HLS_PROTOCOL 0 ++#define CONFIG_HTTP_PROTOCOL 0 ++#define CONFIG_HTTPPROXY_PROTOCOL 0 ++#define CONFIG_HTTPS_PROTOCOL 0 ++#define CONFIG_ICECAST_PROTOCOL 0 ++#define CONFIG_MMSH_PROTOCOL 0 ++#define CONFIG_MMST_PROTOCOL 0 ++#define CONFIG_MD5_PROTOCOL 0 ++#define CONFIG_PIPE_PROTOCOL 0 ++#define CONFIG_PROMPEG_PROTOCOL 0 ++#define CONFIG_RTMP_PROTOCOL 0 ++#define CONFIG_RTMPE_PROTOCOL 0 ++#define CONFIG_RTMPS_PROTOCOL 0 ++#define CONFIG_RTMPT_PROTOCOL 0 ++#define CONFIG_RTMPTE_PROTOCOL 0 ++#define CONFIG_RTMPTS_PROTOCOL 0 ++#define CONFIG_RTP_PROTOCOL 0 ++#define CONFIG_SCTP_PROTOCOL 0 ++#define CONFIG_SRTP_PROTOCOL 0 ++#define CONFIG_SUBFILE_PROTOCOL 0 ++#define CONFIG_TEE_PROTOCOL 0 ++#define CONFIG_TCP_PROTOCOL 0 ++#define CONFIG_TLS_PROTOCOL 0 ++#define CONFIG_UDP_PROTOCOL 0 ++#define CONFIG_UDPLITE_PROTOCOL 0 ++#define CONFIG_UNIX_PROTOCOL 0 ++#define CONFIG_LIBAMQP_PROTOCOL 0 ++#define CONFIG_LIBRTMP_PROTOCOL 0 ++#define CONFIG_LIBRTMPE_PROTOCOL 0 ++#define CONFIG_LIBRTMPS_PROTOCOL 0 ++#define CONFIG_LIBRTMPT_PROTOCOL 0 ++#define CONFIG_LIBRTMPTE_PROTOCOL 0 ++#define CONFIG_LIBSRT_PROTOCOL 0 ++#define CONFIG_LIBSSH_PROTOCOL 0 ++#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 ++#define CONFIG_LIBZMQ_PROTOCOL 0 ++#endif /* FFMPEG_CONFIG_H */ +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/bsf_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/bsf_list.c +new file mode 100644 +index 000000000..d31ece942 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/bsf_list.c +@@ -0,0 +1,3 @@ ++static const AVBitStreamFilter * const bitstream_filters[] = { ++ &ff_null_bsf, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/codec_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/codec_list.c +new file mode 100644 +index 000000000..8f4b18388 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/codec_list.c +@@ -0,0 +1,25 @@ ++static const AVCodec * const codec_list[] = { ++ &ff_h263_decoder, ++ &ff_h264_decoder, ++ &ff_mpeg4_decoder, ++ &ff_theora_decoder, ++ &ff_vp3_decoder, ++ &ff_vp8_decoder, ++ &ff_aac_decoder, ++ &ff_amrnb_decoder, ++ &ff_amrwb_decoder, ++ &ff_flac_decoder, ++ &ff_gsm_ms_decoder, ++ &ff_mp3_decoder, ++ &ff_vorbis_decoder, ++ &ff_pcm_alaw_decoder, ++ &ff_pcm_f32le_decoder, ++ &ff_pcm_mulaw_decoder, ++ &ff_pcm_s16be_decoder, ++ &ff_pcm_s16le_decoder, ++ &ff_pcm_s24be_decoder, ++ &ff_pcm_s24le_decoder, ++ &ff_pcm_s32le_decoder, ++ &ff_pcm_u8_decoder, ++ &ff_libopus_decoder, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/parser_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/parser_list.c +new file mode 100644 +index 000000000..48dcf4122 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavcodec/parser_list.c +@@ -0,0 +1,14 @@ ++static const AVCodecParser * const parser_list[] = { ++ &ff_aac_parser, ++ &ff_flac_parser, ++ &ff_gsm_parser, ++ &ff_h263_parser, ++ &ff_h264_parser, ++ &ff_mpeg4video_parser, ++ &ff_mpegaudio_parser, ++ &ff_opus_parser, ++ &ff_vorbis_parser, ++ &ff_vp3_parser, ++ &ff_vp8_parser, ++ &ff_vp9_parser, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/demuxer_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/demuxer_list.c +new file mode 100644 +index 000000000..0c96cf1ff +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/demuxer_list.c +@@ -0,0 +1,11 @@ ++static const AVInputFormat * const demuxer_list[] = { ++ &ff_aac_demuxer, ++ &ff_amr_demuxer, ++ &ff_avi_demuxer, ++ &ff_flac_demuxer, ++ &ff_matroska_demuxer, ++ &ff_mov_demuxer, ++ &ff_mp3_demuxer, ++ &ff_ogg_demuxer, ++ &ff_wav_demuxer, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/muxer_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/muxer_list.c +new file mode 100644 +index 000000000..f36d9499c +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/muxer_list.c +@@ -0,0 +1,2 @@ ++static const AVOutputFormat * const muxer_list[] = { ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/protocol_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/protocol_list.c +new file mode 100644 +index 000000000..247e1e4c3 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavformat/protocol_list.c +@@ -0,0 +1,2 @@ ++static const URLProtocol * const url_protocols[] = { ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavutil/avconfig.h b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavutil/avconfig.h +new file mode 100644 +index 000000000..8558b3502 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavutil/avconfig.h +@@ -0,0 +1,6 @@ ++/* Generated by ffmpeg configure */ ++#ifndef AVUTIL_AVCONFIG_H ++#define AVUTIL_AVCONFIG_H ++#define AV_HAVE_BIGENDIAN 0 ++#define AV_HAVE_FAST_UNALIGNED 0 ++#endif /* AVUTIL_AVCONFIG_H */ +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavutil/ffversion.h b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavutil/ffversion.h +new file mode 100644 +index 000000000..31e5b5036 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/ChromeOS/linux/la64/libavutil/ffversion.h +@@ -0,0 +1,5 @@ ++/* Automatically generated by version.sh, do not manually edit! */ ++#ifndef AVUTIL_FFVERSION_H ++#define AVUTIL_FFVERSION_H ++#define FFMPEG_VERSION "git-2020-06-16-23b2a15c25" ++#endif /* AVUTIL_FFVERSION_H */ +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/config.h b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/config.h +new file mode 100644 +index 000000000..87f5fcb36 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/config.h +@@ -0,0 +1,2589 @@ ++/* Automatically generated by configure - do not modify! */ ++#ifndef FFMPEG_CONFIG_H ++#define FFMPEG_CONFIG_H ++/* #define FFMPEG_CONFIGURATION "--disable-everything --disable-all --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-static --enable-avcodec --enable-avformat --enable-avutil --enable-fft --enable-rdft --enable-static --enable-libopus --disable-debug --disable-bzlib --disable-error-resilience --disable-iconv --disable-lzo --disable-network --disable-schannel --disable-sdl2 --disable-symver --disable-xlib --disable-zlib --disable-securetransport --disable-faan --disable-alsa --disable-autodetect --enable-decoder='vorbis,libopus,flac' --enable-decoder='pcm_u8,pcm_s16le,pcm_s24le,pcm_s32le,pcm_f32le,mp3' --enable-decoder='pcm_s16be,pcm_s24be,pcm_mulaw,pcm_alaw' --enable-demuxer='ogg,matroska,wav,flac,mp3,mov' --enable-parser='opus,vorbis,flac,mpegaudio,vp9' --extra-cflags=-I/mnt/chromium/src/third_party/opus/src/include --disable-linux-perf --x86asmexe=nasm --optflags='\"-O2\"' --enable-decoder='theora,vp8' --enable-parser='vp3,vp8' --target-os=linux --enable-pic --cc=clang --cxx=clang++ --ld=clang" -- elide long configuration string from binary */ ++#define FFMPEG_LICENSE "LGPL version 2.1 or later" ++#define CONFIG_THIS_YEAR 2020 ++#define FFMPEG_DATADIR "/usr/local/share/ffmpeg" ++#define AVCONV_DATADIR "/usr/local/share/ffmpeg" ++#define CC_IDENT "clang version 8.0.1" ++#define av_restrict restrict ++#define EXTERN_PREFIX "" ++#define EXTERN_ASM ++#define BUILDSUF "" ++#define SLIBSUF ".so" ++#define HAVE_MMX2 HAVE_MMXEXT ++#define SWS_MAX_FILTER_SIZE 256 ++#define ARCH_AARCH64 0 ++#define ARCH_ALPHA 0 ++#define ARCH_ARM 0 ++#define ARCH_AVR32 0 ++#define ARCH_AVR32_AP 0 ++#define ARCH_AVR32_UC 0 ++#define ARCH_BFIN 0 ++#define ARCH_IA64 0 ++#define ARCH_M68K 0 ++#define ARCH_MIPS 0 ++#define ARCH_MIPS64 0 ++#define ARCH_PARISC 0 ++#define ARCH_PPC 0 ++#define ARCH_PPC64 0 ++#define ARCH_S390 0 ++#define ARCH_SH4 0 ++#define ARCH_SPARC 0 ++#define ARCH_SPARC64 0 ++#define ARCH_TILEGX 0 ++#define ARCH_TILEPRO 0 ++#define ARCH_TOMI 0 ++#define ARCH_X86 0 ++#define ARCH_X86_32 0 ++#define ARCH_X86_64 0 ++#define HAVE_ARMV5TE 0 ++#define HAVE_ARMV6 0 ++#define HAVE_ARMV6T2 0 ++#define HAVE_ARMV8 0 ++#define HAVE_NEON 0 ++#define HAVE_VFP 0 ++#define HAVE_VFPV3 0 ++#define HAVE_SETEND 0 ++#define HAVE_ALTIVEC 0 ++#define HAVE_DCBZL 0 ++#define HAVE_LDBRX 0 ++#define HAVE_POWER8 0 ++#define HAVE_PPC4XX 0 ++#define HAVE_VSX 0 ++#define HAVE_AESNI 0 ++#define HAVE_AMD3DNOW 0 ++#define HAVE_AMD3DNOWEXT 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_AVX512 0 ++#define HAVE_FMA3 0 ++#define HAVE_FMA4 0 ++#define HAVE_MMX 0 ++#define HAVE_MMXEXT 0 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSE4 0 ++#define HAVE_SSE42 0 ++#define HAVE_SSSE3 0 ++#define HAVE_XOP 0 ++#define HAVE_CPUNOP 0 ++#define HAVE_I686 0 ++#define HAVE_MIPSFPU 0 ++#define HAVE_MIPS32R2 0 ++#define HAVE_MIPS32R5 0 ++#define HAVE_MIPS64R2 0 ++#define HAVE_MIPS32R6 0 ++#define HAVE_MIPS64R6 0 ++#define HAVE_MIPSDSP 0 ++#define HAVE_MIPSDSPR2 0 ++#define HAVE_MSA 0 ++#define HAVE_MSA2 0 ++#define HAVE_LOONGSON2 0 ++#define HAVE_LOONGSON3 0 ++#define HAVE_MMI 0 ++#define HAVE_ARMV5TE_EXTERNAL 0 ++#define HAVE_ARMV6_EXTERNAL 0 ++#define HAVE_ARMV6T2_EXTERNAL 0 ++#define HAVE_ARMV8_EXTERNAL 0 ++#define HAVE_NEON_EXTERNAL 0 ++#define HAVE_VFP_EXTERNAL 0 ++#define HAVE_VFPV3_EXTERNAL 0 ++#define HAVE_SETEND_EXTERNAL 0 ++#define HAVE_ALTIVEC_EXTERNAL 0 ++#define HAVE_DCBZL_EXTERNAL 0 ++#define HAVE_LDBRX_EXTERNAL 0 ++#define HAVE_POWER8_EXTERNAL 0 ++#define HAVE_PPC4XX_EXTERNAL 0 ++#define HAVE_VSX_EXTERNAL 0 ++#define HAVE_AESNI_EXTERNAL 0 ++#define HAVE_AMD3DNOW_EXTERNAL 0 ++#define HAVE_AMD3DNOWEXT_EXTERNAL 0 ++#define HAVE_AVX_EXTERNAL 0 ++#define HAVE_AVX2_EXTERNAL 0 ++#define HAVE_AVX512_EXTERNAL 0 ++#define HAVE_FMA3_EXTERNAL 0 ++#define HAVE_FMA4_EXTERNAL 0 ++#define HAVE_MMX_EXTERNAL 0 ++#define HAVE_MMXEXT_EXTERNAL 0 ++#define HAVE_SSE_EXTERNAL 0 ++#define HAVE_SSE2_EXTERNAL 0 ++#define HAVE_SSE3_EXTERNAL 0 ++#define HAVE_SSE4_EXTERNAL 0 ++#define HAVE_SSE42_EXTERNAL 0 ++#define HAVE_SSSE3_EXTERNAL 0 ++#define HAVE_XOP_EXTERNAL 0 ++#define HAVE_CPUNOP_EXTERNAL 0 ++#define HAVE_I686_EXTERNAL 0 ++#define HAVE_MIPSFPU_EXTERNAL 0 ++#define HAVE_MIPS32R2_EXTERNAL 0 ++#define HAVE_MIPS32R5_EXTERNAL 0 ++#define HAVE_MIPS64R2_EXTERNAL 0 ++#define HAVE_MIPS32R6_EXTERNAL 0 ++#define HAVE_MIPS64R6_EXTERNAL 0 ++#define HAVE_MIPSDSP_EXTERNAL 0 ++#define HAVE_MIPSDSPR2_EXTERNAL 0 ++#define HAVE_MSA_EXTERNAL 0 ++#define HAVE_MSA2_EXTERNAL 0 ++#define HAVE_LOONGSON2_EXTERNAL 0 ++#define HAVE_LOONGSON3_EXTERNAL 0 ++#define HAVE_MMI_EXTERNAL 0 ++#define HAVE_ARMV5TE_INLINE 0 ++#define HAVE_ARMV6_INLINE 0 ++#define HAVE_ARMV6T2_INLINE 0 ++#define HAVE_ARMV8_INLINE 0 ++#define HAVE_NEON_INLINE 0 ++#define HAVE_VFP_INLINE 0 ++#define HAVE_VFPV3_INLINE 0 ++#define HAVE_SETEND_INLINE 0 ++#define HAVE_ALTIVEC_INLINE 0 ++#define HAVE_DCBZL_INLINE 0 ++#define HAVE_LDBRX_INLINE 0 ++#define HAVE_POWER8_INLINE 0 ++#define HAVE_PPC4XX_INLINE 0 ++#define HAVE_VSX_INLINE 0 ++#define HAVE_AESNI_INLINE 0 ++#define HAVE_AMD3DNOW_INLINE 0 ++#define HAVE_AMD3DNOWEXT_INLINE 0 ++#define HAVE_AVX_INLINE 0 ++#define HAVE_AVX2_INLINE 0 ++#define HAVE_AVX512_INLINE 0 ++#define HAVE_FMA3_INLINE 0 ++#define HAVE_FMA4_INLINE 0 ++#define HAVE_MMX_INLINE 0 ++#define HAVE_MMXEXT_INLINE 0 ++#define HAVE_SSE_INLINE 0 ++#define HAVE_SSE2_INLINE 0 ++#define HAVE_SSE3_INLINE 0 ++#define HAVE_SSE4_INLINE 0 ++#define HAVE_SSE42_INLINE 0 ++#define HAVE_SSSE3_INLINE 0 ++#define HAVE_XOP_INLINE 0 ++#define HAVE_CPUNOP_INLINE 0 ++#define HAVE_I686_INLINE 0 ++#define HAVE_MIPSFPU_INLINE 0 ++#define HAVE_MIPS32R2_INLINE 0 ++#define HAVE_MIPS32R5_INLINE 0 ++#define HAVE_MIPS64R2_INLINE 0 ++#define HAVE_MIPS32R6_INLINE 0 ++#define HAVE_MIPS64R6_INLINE 0 ++#define HAVE_MIPSDSP_INLINE 0 ++#define HAVE_MIPSDSPR2_INLINE 0 ++#define HAVE_MSA_INLINE 0 ++#define HAVE_MSA2_INLINE 0 ++#define HAVE_LOONGSON2_INLINE 0 ++#define HAVE_LOONGSON3_INLINE 0 ++#define HAVE_MMI_INLINE 0 ++#define HAVE_ALIGNED_STACK 0 ++#define HAVE_FAST_64BIT 0 ++#define HAVE_FAST_CLZ 0 ++#define HAVE_FAST_CMOV 0 ++#define HAVE_LOCAL_ALIGNED 0 ++#define HAVE_SIMD_ALIGN_16 0 ++#define HAVE_SIMD_ALIGN_32 0 ++#define HAVE_SIMD_ALIGN_64 0 ++#define HAVE_ATOMIC_CAS_PTR 0 ++#define HAVE_MACHINE_RW_BARRIER 0 ++#define HAVE_MEMORYBARRIER 0 ++#define HAVE_MM_EMPTY 0 ++#define HAVE_RDTSC 0 ++#define HAVE_SEM_TIMEDWAIT 1 ++#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 ++#define HAVE_CABS 0 ++#define HAVE_CEXP 0 ++#define HAVE_INLINE_ASM 1 ++#define HAVE_SYMVER 0 ++#define HAVE_X86ASM 0 ++#define HAVE_BIGENDIAN 0 ++#define HAVE_FAST_UNALIGNED 0 ++#define HAVE_ARPA_INET_H 0 ++#define HAVE_ASM_TYPES_H 1 ++#define HAVE_CDIO_PARANOIA_H 0 ++#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 ++#define HAVE_CUDA_H 0 ++#define HAVE_DISPATCH_DISPATCH_H 0 ++#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 ++#define HAVE_DEV_IC_BT8XX_H 0 ++#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 ++#define HAVE_DIRECT_H 0 ++#define HAVE_DIRENT_H 1 ++#define HAVE_DXGIDEBUG_H 0 ++#define HAVE_DXVA_H 0 ++#define HAVE_ES2_GL_H 0 ++#define HAVE_GSM_H 0 ++#define HAVE_IO_H 0 ++#define HAVE_LINUX_PERF_EVENT_H 1 ++#define HAVE_MACHINE_IOCTL_BT848_H 0 ++#define HAVE_MACHINE_IOCTL_METEOR_H 0 ++#define HAVE_MALLOC_H 1 ++#define HAVE_OPENCV2_CORE_CORE_C_H 0 ++#define HAVE_OPENGL_GL3_H 0 ++#define HAVE_POLL_H 1 ++#define HAVE_SYS_PARAM_H 1 ++#define HAVE_SYS_RESOURCE_H 1 ++#define HAVE_SYS_SELECT_H 1 ++#define HAVE_SYS_SOUNDCARD_H 1 ++#define HAVE_SYS_TIME_H 1 ++#define HAVE_SYS_UN_H 1 ++#define HAVE_SYS_VIDEOIO_H 0 ++#define HAVE_TERMIOS_H 1 ++#define HAVE_UDPLITE_H 0 ++#define HAVE_UNISTD_H 1 ++#define HAVE_VALGRIND_VALGRIND_H 0 /* #define HAVE_VALGRIND_VALGRIND_H 0 -- forced to 0. See https://crbug.com/590440 */ ++#define HAVE_WINDOWS_H 0 ++#define HAVE_WINSOCK2_H 0 ++#define HAVE_INTRINSICS_NEON 0 ++#define HAVE_ATANF 1 ++#define HAVE_ATAN2F 1 ++#define HAVE_CBRT 1 ++#define HAVE_CBRTF 1 ++#define HAVE_COPYSIGN 1 ++#define HAVE_COSF 1 ++#define HAVE_ERF 1 ++#define HAVE_EXP2 1 ++#define HAVE_EXP2F 1 ++#define HAVE_EXPF 1 ++#define HAVE_HYPOT 1 ++#define HAVE_ISFINITE 1 ++#define HAVE_ISINF 1 ++#define HAVE_ISNAN 1 ++#define HAVE_LDEXPF 1 ++#define HAVE_LLRINT 1 ++#define HAVE_LLRINTF 1 ++#define HAVE_LOG2 1 ++#define HAVE_LOG2F 1 ++#define HAVE_LOG10F 1 ++#define HAVE_LRINT 1 ++#define HAVE_LRINTF 1 ++#define HAVE_POWF 1 ++#define HAVE_RINT 1 ++#define HAVE_ROUND 1 ++#define HAVE_ROUNDF 1 ++#define HAVE_SINF 1 ++#define HAVE_TRUNC 1 ++#define HAVE_TRUNCF 1 ++#define HAVE_DOS_PATHS 0 ++#define HAVE_LIBC_MSVCRT 0 ++#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 ++#define HAVE_SECTION_DATA_REL_RO 1 ++#define HAVE_THREADS 1 ++#define HAVE_UWP 0 ++#define HAVE_WINRT 0 ++#define HAVE_ACCESS 1 ++#define HAVE_ALIGNED_MALLOC 0 ++#define HAVE_ARC4RANDOM 0 ++#define HAVE_CLOCK_GETTIME 1 ++#define HAVE_CLOSESOCKET 0 ++#define HAVE_COMMANDLINETOARGVW 0 ++#define HAVE_FCNTL 1 ++#define HAVE_GETADDRINFO 0 ++#define HAVE_GETHRTIME 0 ++#define HAVE_GETOPT 1 ++#define HAVE_GETMODULEHANDLE 0 ++#define HAVE_GETPROCESSAFFINITYMASK 0 ++#define HAVE_GETPROCESSMEMORYINFO 0 ++#define HAVE_GETPROCESSTIMES 0 ++#define HAVE_GETRUSAGE 1 ++#define HAVE_GETSTDHANDLE 0 ++#define HAVE_GETSYSTEMTIMEASFILETIME 0 ++#define HAVE_GETTIMEOFDAY 1 ++#define HAVE_GLOB 1 ++#define HAVE_GLXGETPROCADDRESS 0 ++#define HAVE_GMTIME_R 1 ++#define HAVE_INET_ATON 0 ++#define HAVE_ISATTY 1 ++#define HAVE_KBHIT 0 ++#define HAVE_LOCALTIME_R 1 ++#define HAVE_LSTAT 1 ++#define HAVE_LZO1X_999_COMPRESS 0 ++#define HAVE_MACH_ABSOLUTE_TIME 0 ++#define HAVE_MAPVIEWOFFILE 0 ++#define HAVE_MEMALIGN 1 ++#define HAVE_MKSTEMP 1 ++#define HAVE_MMAP 1 ++#define HAVE_MPROTECT 1 ++#define HAVE_NANOSLEEP 1 ++#define HAVE_PEEKNAMEDPIPE 0 ++#define HAVE_POSIX_MEMALIGN 1 ++#define HAVE_PTHREAD_CANCEL 1 ++#define HAVE_SCHED_GETAFFINITY 1 ++#define HAVE_SECITEMIMPORT 0 ++#define HAVE_SETCONSOLETEXTATTRIBUTE 0 ++#define HAVE_SETCONSOLECTRLHANDLER 0 ++#define HAVE_SETDLLDIRECTORY 0 ++#define HAVE_SETMODE 0 ++#define HAVE_SETRLIMIT 1 ++#define HAVE_SLEEP 0 ++#define HAVE_STRERROR_R 1 ++#define HAVE_SYSCONF 1 ++#define HAVE_SYSCTL 0 /* #define HAVE_SYSCTL 1 -- forced to 0 for Fuchsia */ ++#define HAVE_USLEEP 1 ++#define HAVE_UTGETOSTYPEFROMSTRING 0 ++#define HAVE_VIRTUALALLOC 0 ++#define HAVE_WGLGETPROCADDRESS 0 ++#define HAVE_BCRYPT 0 ++#define HAVE_VAAPI_DRM 0 ++#define HAVE_VAAPI_X11 0 ++#define HAVE_VDPAU_X11 0 ++#define HAVE_PTHREADS 1 ++#define HAVE_OS2THREADS 0 ++#define HAVE_W32THREADS 0 ++#define HAVE_AS_ARCH_DIRECTIVE 0 ++#define HAVE_AS_DN_DIRECTIVE 0 ++#define HAVE_AS_FPU_DIRECTIVE 0 ++#define HAVE_AS_FUNC 0 ++#define HAVE_AS_OBJECT_ARCH 0 ++#define HAVE_ASM_MOD_Q 0 ++#define HAVE_BLOCKS_EXTENSION 0 ++#define HAVE_EBP_AVAILABLE 0 ++#define HAVE_EBX_AVAILABLE 0 ++#define HAVE_GNU_AS 0 ++#define HAVE_GNU_WINDRES 0 ++#define HAVE_IBM_ASM 0 ++#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0 ++#define HAVE_INLINE_ASM_LABELS 1 ++#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 ++#define HAVE_PRAGMA_DEPRECATED 1 ++#define HAVE_RSYNC_CONTIMEOUT 0 ++#define HAVE_SYMVER_ASM_LABEL 1 ++#define HAVE_SYMVER_GNU_ASM 1 ++#define HAVE_VFP_ARGS 0 ++#define HAVE_XFORM_ASM 0 ++#define HAVE_XMM_CLOBBERS 0 ++#define HAVE_KCMVIDEOCODECTYPE_HEVC 0 ++#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0 ++#define HAVE_SOCKLEN_T 0 ++#define HAVE_STRUCT_ADDRINFO 0 ++#define HAVE_STRUCT_GROUP_SOURCE_REQ 0 ++#define HAVE_STRUCT_IP_MREQ_SOURCE 0 ++#define HAVE_STRUCT_IPV6_MREQ 0 ++#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0 ++#define HAVE_STRUCT_POLLFD 0 ++#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 ++#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 ++#define HAVE_STRUCT_SOCKADDR_IN6 0 ++#define HAVE_STRUCT_SOCKADDR_SA_LEN 0 ++#define HAVE_STRUCT_SOCKADDR_STORAGE 0 ++#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1 ++#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 ++#define HAVE_MAKEINFO 1 ++#define HAVE_MAKEINFO_HTML 1 ++#define HAVE_OPENCL_D3D11 0 ++#define HAVE_OPENCL_DRM_ARM 0 ++#define HAVE_OPENCL_DRM_BEIGNET 0 ++#define HAVE_OPENCL_DXVA2 0 ++#define HAVE_OPENCL_VAAPI_BEIGNET 0 ++#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0 ++#define HAVE_PERL 1 ++#define HAVE_POD2MAN 1 ++#define HAVE_TEXI2HTML 0 ++#define CONFIG_DOC 0 ++#define CONFIG_HTMLPAGES 0 ++#define CONFIG_MANPAGES 0 ++#define CONFIG_PODPAGES 0 ++#define CONFIG_TXTPAGES 0 ++#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1 ++#define CONFIG_AVIO_READING_EXAMPLE 1 ++#define CONFIG_DECODE_AUDIO_EXAMPLE 1 ++#define CONFIG_DECODE_VIDEO_EXAMPLE 1 ++#define CONFIG_DEMUXING_DECODING_EXAMPLE 1 ++#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 ++#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 ++#define CONFIG_EXTRACT_MVS_EXAMPLE 1 ++#define CONFIG_FILTER_AUDIO_EXAMPLE 0 ++#define CONFIG_FILTERING_AUDIO_EXAMPLE 0 ++#define CONFIG_FILTERING_VIDEO_EXAMPLE 0 ++#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 1 ++#define CONFIG_HW_DECODE_EXAMPLE 1 ++#define CONFIG_METADATA_EXAMPLE 1 ++#define CONFIG_MUXING_EXAMPLE 0 ++#define CONFIG_QSVDEC_EXAMPLE 0 ++#define CONFIG_REMUXING_EXAMPLE 1 ++#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0 ++#define CONFIG_SCALING_VIDEO_EXAMPLE 0 ++#define CONFIG_TRANSCODE_AAC_EXAMPLE 0 ++#define CONFIG_TRANSCODING_EXAMPLE 0 ++#define CONFIG_VAAPI_ENCODE_EXAMPLE 0 ++#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0 ++#define CONFIG_AVISYNTH 0 ++#define CONFIG_FREI0R 0 ++#define CONFIG_LIBCDIO 0 ++#define CONFIG_LIBDAVS2 0 ++#define CONFIG_LIBRUBBERBAND 0 ++#define CONFIG_LIBVIDSTAB 0 ++#define CONFIG_LIBX264 0 ++#define CONFIG_LIBX265 0 ++#define CONFIG_LIBXAVS 0 ++#define CONFIG_LIBXAVS2 0 ++#define CONFIG_LIBXVID 0 ++#define CONFIG_DECKLINK 0 ++#define CONFIG_LIBFDK_AAC 0 ++#define CONFIG_OPENSSL 0 ++#define CONFIG_LIBTLS 0 ++#define CONFIG_GMP 0 ++#define CONFIG_LIBARIBB24 0 ++#define CONFIG_LIBLENSFUN 0 ++#define CONFIG_LIBOPENCORE_AMRNB 0 ++#define CONFIG_LIBOPENCORE_AMRWB 0 ++#define CONFIG_LIBVMAF 0 ++#define CONFIG_LIBVO_AMRWBENC 0 ++#define CONFIG_MBEDTLS 0 ++#define CONFIG_RKMPP 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_CHROMAPRINT 0 ++#define CONFIG_GCRYPT 0 ++#define CONFIG_GNUTLS 0 ++#define CONFIG_JNI 0 ++#define CONFIG_LADSPA 0 ++#define CONFIG_LIBAOM 0 ++#define CONFIG_LIBASS 0 ++#define CONFIG_LIBBLURAY 0 ++#define CONFIG_LIBBS2B 0 ++#define CONFIG_LIBCACA 0 ++#define CONFIG_LIBCELT 0 ++#define CONFIG_LIBCODEC2 0 ++#define CONFIG_LIBDAV1D 0 ++#define CONFIG_LIBDC1394 0 ++#define CONFIG_LIBDRM 0 ++#define CONFIG_LIBFLITE 0 ++#define CONFIG_LIBFONTCONFIG 0 ++#define CONFIG_LIBFREETYPE 0 ++#define CONFIG_LIBFRIBIDI 0 ++#define CONFIG_LIBGLSLANG 0 ++#define CONFIG_LIBGME 0 ++#define CONFIG_LIBGSM 0 ++#define CONFIG_LIBIEC61883 0 ++#define CONFIG_LIBILBC 0 ++#define CONFIG_LIBJACK 0 ++#define CONFIG_LIBKLVANC 0 ++#define CONFIG_LIBKVAZAAR 0 ++#define CONFIG_LIBMODPLUG 0 ++#define CONFIG_LIBMP3LAME 0 ++#define CONFIG_LIBMYSOFA 0 ++#define CONFIG_LIBOPENCV 0 ++#define CONFIG_LIBOPENH264 0 ++#define CONFIG_LIBOPENJPEG 0 ++#define CONFIG_LIBOPENMPT 0 ++#define CONFIG_LIBOPUS 1 ++#define CONFIG_LIBPULSE 0 ++#define CONFIG_LIBRABBITMQ 0 ++#define CONFIG_LIBRAV1E 0 ++#define CONFIG_LIBRSVG 0 ++#define CONFIG_LIBRTMP 0 ++#define CONFIG_LIBSHINE 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_LIBSNAPPY 0 ++#define CONFIG_LIBSOXR 0 ++#define CONFIG_LIBSPEEX 0 ++#define CONFIG_LIBSRT 0 ++#define CONFIG_LIBSSH 0 ++#define CONFIG_LIBTENSORFLOW 0 ++#define CONFIG_LIBTESSERACT 0 ++#define CONFIG_LIBTHEORA 0 ++#define CONFIG_LIBTWOLAME 0 ++#define CONFIG_LIBV4L2 0 ++#define CONFIG_LIBVORBIS 0 ++#define CONFIG_LIBVPX 0 ++#define CONFIG_LIBWAVPACK 0 ++#define CONFIG_LIBWEBP 0 ++#define CONFIG_LIBXML2 0 ++#define CONFIG_LIBZIMG 0 ++#define CONFIG_LIBZMQ 0 ++#define CONFIG_LIBZVBI 0 ++#define CONFIG_LV2 0 ++#define CONFIG_MEDIACODEC 0 ++#define CONFIG_OPENAL 0 ++#define CONFIG_OPENGL 0 ++#define CONFIG_POCKETSPHINX 0 ++#define CONFIG_VAPOURSYNTH 0 ++#define CONFIG_ALSA 0 ++#define CONFIG_APPKIT 0 ++#define CONFIG_AVFOUNDATION 0 ++#define CONFIG_BZLIB 0 ++#define CONFIG_COREIMAGE 0 ++#define CONFIG_ICONV 0 ++#define CONFIG_LIBXCB 0 ++#define CONFIG_LIBXCB_SHM 0 ++#define CONFIG_LIBXCB_SHAPE 0 ++#define CONFIG_LIBXCB_XFIXES 0 ++#define CONFIG_LZMA 0 ++#define CONFIG_SCHANNEL 0 ++#define CONFIG_SDL2 0 ++#define CONFIG_SECURETRANSPORT 0 ++#define CONFIG_SNDIO 0 ++#define CONFIG_XLIB 0 ++#define CONFIG_ZLIB 0 ++#define CONFIG_CUDA_NVCC 0 ++#define CONFIG_CUDA_SDK 0 ++#define CONFIG_LIBNPP 0 ++#define CONFIG_LIBMFX 0 ++#define CONFIG_MMAL 0 ++#define CONFIG_OMX 0 ++#define CONFIG_OPENCL 0 ++#define CONFIG_VULKAN 0 ++#define CONFIG_AMF 0 ++#define CONFIG_AUDIOTOOLBOX 0 ++#define CONFIG_CRYSTALHD 0 ++#define CONFIG_CUDA 0 ++#define CONFIG_CUDA_LLVM 0 ++#define CONFIG_CUVID 0 ++#define CONFIG_D3D11VA 0 ++#define CONFIG_DXVA2 0 ++#define CONFIG_FFNVCODEC 0 ++#define CONFIG_NVDEC 0 ++#define CONFIG_NVENC 0 ++#define CONFIG_VAAPI 0 ++#define CONFIG_VDPAU 0 ++#define CONFIG_VIDEOTOOLBOX 0 ++#define CONFIG_V4L2_M2M 0 ++#define CONFIG_XVMC 0 ++#define CONFIG_FTRAPV 0 ++#define CONFIG_GRAY 0 ++#define CONFIG_HARDCODED_TABLES 0 ++#define CONFIG_OMX_RPI 0 ++#define CONFIG_RUNTIME_CPUDETECT 1 ++#define CONFIG_SAFE_BITSTREAM_READER 1 ++#define CONFIG_SHARED 0 ++#define CONFIG_SMALL 0 ++#define CONFIG_STATIC 1 ++#define CONFIG_SWSCALE_ALPHA 1 ++#define CONFIG_GPL 0 ++#define CONFIG_NONFREE 0 ++#define CONFIG_VERSION3 0 ++#define CONFIG_AVDEVICE 0 ++#define CONFIG_AVFILTER 0 ++#define CONFIG_SWSCALE 0 ++#define CONFIG_POSTPROC 0 ++#define CONFIG_AVFORMAT 1 ++#define CONFIG_AVCODEC 1 ++#define CONFIG_SWRESAMPLE 0 ++#define CONFIG_AVRESAMPLE 0 ++#define CONFIG_AVUTIL 1 ++#define CONFIG_FFPLAY 0 ++#define CONFIG_FFPROBE 0 ++#define CONFIG_FFMPEG 0 ++#define CONFIG_DCT 1 ++#define CONFIG_DWT 0 ++#define CONFIG_ERROR_RESILIENCE 0 ++#define CONFIG_FAAN 0 ++#define CONFIG_FAST_UNALIGNED 0 ++#define CONFIG_FFT 1 ++#define CONFIG_LSP 0 ++#define CONFIG_LZO 0 ++#define CONFIG_MDCT 1 ++#define CONFIG_PIXELUTILS 0 ++#define CONFIG_NETWORK 0 ++#define CONFIG_RDFT 1 ++#define CONFIG_AUTODETECT 0 ++#define CONFIG_FONTCONFIG 0 ++#define CONFIG_LARGE_TESTS 1 ++#define CONFIG_LINUX_PERF 0 ++#define CONFIG_MEMORY_POISONING 0 ++#define CONFIG_NEON_CLOBBER_TEST 0 ++#define CONFIG_OSSFUZZ 0 ++#define CONFIG_PIC 1 ++#define CONFIG_THUMB 0 ++#define CONFIG_VALGRIND_BACKTRACE 0 ++#define CONFIG_XMM_CLOBBER_TEST 0 ++#define CONFIG_BSFS 1 ++#define CONFIG_DECODERS 1 ++#define CONFIG_ENCODERS 0 ++#define CONFIG_HWACCELS 0 ++#define CONFIG_PARSERS 1 ++#define CONFIG_INDEVS 0 ++#define CONFIG_OUTDEVS 0 ++#define CONFIG_FILTERS 0 ++#define CONFIG_DEMUXERS 1 ++#define CONFIG_MUXERS 0 ++#define CONFIG_PROTOCOLS 0 ++#define CONFIG_AANDCTTABLES 0 ++#define CONFIG_AC3DSP 0 ++#define CONFIG_ADTS_HEADER 0 ++#define CONFIG_AUDIO_FRAME_QUEUE 0 ++#define CONFIG_AUDIODSP 0 ++#define CONFIG_BLOCKDSP 0 ++#define CONFIG_BSWAPDSP 0 ++#define CONFIG_CABAC 0 ++#define CONFIG_CBS 0 ++#define CONFIG_CBS_AV1 0 ++#define CONFIG_CBS_H264 0 ++#define CONFIG_CBS_H265 0 ++#define CONFIG_CBS_JPEG 0 ++#define CONFIG_CBS_MPEG2 0 ++#define CONFIG_CBS_VP9 0 ++#define CONFIG_DIRAC_PARSE 1 ++#define CONFIG_DNN 0 ++#define CONFIG_DVPROFILE 0 ++#define CONFIG_EXIF 0 ++#define CONFIG_FAANDCT 0 ++#define CONFIG_FAANIDCT 0 ++#define CONFIG_FDCTDSP 0 ++#define CONFIG_FLACDSP 1 ++#define CONFIG_FMTCONVERT 0 ++#define CONFIG_FRAME_THREAD_ENCODER 0 ++#define CONFIG_G722DSP 0 ++#define CONFIG_GOLOMB 1 ++#define CONFIG_GPLV3 0 ++#define CONFIG_H263DSP 0 ++#define CONFIG_H264CHROMA 0 ++#define CONFIG_H264DSP 0 ++#define CONFIG_H264PARSE 0 ++#define CONFIG_H264PRED 1 ++#define CONFIG_H264QPEL 0 ++#define CONFIG_HEVCPARSE 0 ++#define CONFIG_HPELDSP 1 ++#define CONFIG_HUFFMAN 0 ++#define CONFIG_HUFFYUVDSP 0 ++#define CONFIG_HUFFYUVENCDSP 0 ++#define CONFIG_IDCTDSP 0 ++#define CONFIG_IIRFILTER 0 ++#define CONFIG_MDCT15 0 ++#define CONFIG_INTRAX8 0 ++#define CONFIG_ISO_MEDIA 1 ++#define CONFIG_IVIDSP 0 ++#define CONFIG_JPEGTABLES 0 ++#define CONFIG_LGPLV3 0 ++#define CONFIG_LIBX262 0 ++#define CONFIG_LLAUDDSP 0 ++#define CONFIG_LLVIDDSP 0 ++#define CONFIG_LLVIDENCDSP 0 ++#define CONFIG_LPC 0 ++#define CONFIG_LZF 0 ++#define CONFIG_ME_CMP 0 ++#define CONFIG_MPEG_ER 0 ++#define CONFIG_MPEGAUDIO 1 ++#define CONFIG_MPEGAUDIODSP 1 ++#define CONFIG_MPEGAUDIOHEADER 1 ++#define CONFIG_MPEGVIDEO 0 ++#define CONFIG_MPEGVIDEOENC 0 ++#define CONFIG_MSS34DSP 0 ++#define CONFIG_PIXBLOCKDSP 0 ++#define CONFIG_QPELDSP 0 ++#define CONFIG_QSV 0 ++#define CONFIG_QSVDEC 0 ++#define CONFIG_QSVENC 0 ++#define CONFIG_QSVVPP 0 ++#define CONFIG_RANGECODER 0 ++#define CONFIG_RIFFDEC 1 ++#define CONFIG_RIFFENC 0 ++#define CONFIG_RTPDEC 0 ++#define CONFIG_RTPENC_CHAIN 0 ++#define CONFIG_RV34DSP 0 ++#define CONFIG_SCENE_SAD 0 ++#define CONFIG_SINEWIN 0 ++#define CONFIG_SNAPPY 0 ++#define CONFIG_SRTP 0 ++#define CONFIG_STARTCODE 0 ++#define CONFIG_TEXTUREDSP 0 ++#define CONFIG_TEXTUREDSPENC 0 ++#define CONFIG_TPELDSP 0 ++#define CONFIG_VAAPI_1 0 ++#define CONFIG_VAAPI_ENCODE 0 ++#define CONFIG_VC1DSP 0 ++#define CONFIG_VIDEODSP 1 ++#define CONFIG_VP3DSP 1 ++#define CONFIG_VP56DSP 0 ++#define CONFIG_VP8DSP 1 ++#define CONFIG_WMA_FREQS 0 ++#define CONFIG_WMV2DSP 0 ++#define CONFIG_AAC_ADTSTOASC_BSF 0 ++#define CONFIG_AV1_FRAME_MERGE_BSF 0 ++#define CONFIG_AV1_FRAME_SPLIT_BSF 0 ++#define CONFIG_AV1_METADATA_BSF 0 ++#define CONFIG_CHOMP_BSF 0 ++#define CONFIG_DUMP_EXTRADATA_BSF 0 ++#define CONFIG_DCA_CORE_BSF 0 ++#define CONFIG_EAC3_CORE_BSF 0 ++#define CONFIG_EXTRACT_EXTRADATA_BSF 0 ++#define CONFIG_FILTER_UNITS_BSF 0 ++#define CONFIG_H264_METADATA_BSF 0 ++#define CONFIG_H264_MP4TOANNEXB_BSF 0 ++#define CONFIG_H264_REDUNDANT_PPS_BSF 0 ++#define CONFIG_HAPQA_EXTRACT_BSF 0 ++#define CONFIG_HEVC_METADATA_BSF 0 ++#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 ++#define CONFIG_IMX_DUMP_HEADER_BSF 0 ++#define CONFIG_MJPEG2JPEG_BSF 0 ++#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 ++#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 ++#define CONFIG_MPEG2_METADATA_BSF 0 ++#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 ++#define CONFIG_MOV2TEXTSUB_BSF 0 ++#define CONFIG_NOISE_BSF 0 ++#define CONFIG_NULL_BSF 1 ++#define CONFIG_PRORES_METADATA_BSF 0 ++#define CONFIG_REMOVE_EXTRADATA_BSF 0 ++#define CONFIG_TEXT2MOVSUB_BSF 0 ++#define CONFIG_TRACE_HEADERS_BSF 0 ++#define CONFIG_TRUEHD_CORE_BSF 0 ++#define CONFIG_VP9_METADATA_BSF 0 ++#define CONFIG_VP9_RAW_REORDER_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 0 ++#define CONFIG_AASC_DECODER 0 ++#define CONFIG_AIC_DECODER 0 ++#define CONFIG_ALIAS_PIX_DECODER 0 ++#define CONFIG_AGM_DECODER 0 ++#define CONFIG_AMV_DECODER 0 ++#define CONFIG_ANM_DECODER 0 ++#define CONFIG_ANSI_DECODER 0 ++#define CONFIG_APNG_DECODER 0 ++#define CONFIG_ARBC_DECODER 0 ++#define CONFIG_ASV1_DECODER 0 ++#define CONFIG_ASV2_DECODER 0 ++#define CONFIG_AURA_DECODER 0 ++#define CONFIG_AURA2_DECODER 0 ++#define CONFIG_AVRP_DECODER 0 ++#define CONFIG_AVRN_DECODER 0 ++#define CONFIG_AVS_DECODER 0 ++#define CONFIG_AVUI_DECODER 0 ++#define CONFIG_AYUV_DECODER 0 ++#define CONFIG_BETHSOFTVID_DECODER 0 ++#define CONFIG_BFI_DECODER 0 ++#define CONFIG_BINK_DECODER 0 ++#define CONFIG_BITPACKED_DECODER 0 ++#define CONFIG_BMP_DECODER 0 ++#define CONFIG_BMV_VIDEO_DECODER 0 ++#define CONFIG_BRENDER_PIX_DECODER 0 ++#define CONFIG_C93_DECODER 0 ++#define CONFIG_CAVS_DECODER 0 ++#define CONFIG_CDGRAPHICS_DECODER 0 ++#define CONFIG_CDTOONS_DECODER 0 ++#define CONFIG_CDXL_DECODER 0 ++#define CONFIG_CFHD_DECODER 0 ++#define CONFIG_CINEPAK_DECODER 0 ++#define CONFIG_CLEARVIDEO_DECODER 0 ++#define CONFIG_CLJR_DECODER 0 ++#define CONFIG_CLLC_DECODER 0 ++#define CONFIG_COMFORTNOISE_DECODER 0 ++#define CONFIG_CPIA_DECODER 0 ++#define CONFIG_CSCD_DECODER 0 ++#define CONFIG_CYUV_DECODER 0 ++#define CONFIG_DDS_DECODER 0 ++#define CONFIG_DFA_DECODER 0 ++#define CONFIG_DIRAC_DECODER 0 ++#define CONFIG_DNXHD_DECODER 0 ++#define CONFIG_DPX_DECODER 0 ++#define CONFIG_DSICINVIDEO_DECODER 0 ++#define CONFIG_DVAUDIO_DECODER 0 ++#define CONFIG_DVVIDEO_DECODER 0 ++#define CONFIG_DXA_DECODER 0 ++#define CONFIG_DXTORY_DECODER 0 ++#define CONFIG_DXV_DECODER 0 ++#define CONFIG_EACMV_DECODER 0 ++#define CONFIG_EAMAD_DECODER 0 ++#define CONFIG_EATGQ_DECODER 0 ++#define CONFIG_EATGV_DECODER 0 ++#define CONFIG_EATQI_DECODER 0 ++#define CONFIG_EIGHTBPS_DECODER 0 ++#define CONFIG_EIGHTSVX_EXP_DECODER 0 ++#define CONFIG_EIGHTSVX_FIB_DECODER 0 ++#define CONFIG_ESCAPE124_DECODER 0 ++#define CONFIG_ESCAPE130_DECODER 0 ++#define CONFIG_EXR_DECODER 0 ++#define CONFIG_FFV1_DECODER 0 ++#define CONFIG_FFVHUFF_DECODER 0 ++#define CONFIG_FIC_DECODER 0 ++#define CONFIG_FITS_DECODER 0 ++#define CONFIG_FLASHSV_DECODER 0 ++#define CONFIG_FLASHSV2_DECODER 0 ++#define CONFIG_FLIC_DECODER 0 ++#define CONFIG_FLV_DECODER 0 ++#define CONFIG_FMVC_DECODER 0 ++#define CONFIG_FOURXM_DECODER 0 ++#define CONFIG_FRAPS_DECODER 0 ++#define CONFIG_FRWU_DECODER 0 ++#define CONFIG_G2M_DECODER 0 ++#define CONFIG_GDV_DECODER 0 ++#define CONFIG_GIF_DECODER 0 ++#define CONFIG_H261_DECODER 0 ++#define CONFIG_H263_DECODER 0 ++#define CONFIG_H263I_DECODER 0 ++#define CONFIG_H263P_DECODER 0 ++#define CONFIG_H263_V4L2M2M_DECODER 0 ++#define CONFIG_H264_DECODER 0 ++#define CONFIG_H264_CRYSTALHD_DECODER 0 ++#define CONFIG_H264_V4L2M2M_DECODER 0 ++#define CONFIG_H264_MEDIACODEC_DECODER 0 ++#define CONFIG_H264_MMAL_DECODER 0 ++#define CONFIG_H264_QSV_DECODER 0 ++#define CONFIG_H264_RKMPP_DECODER 0 ++#define CONFIG_HAP_DECODER 0 ++#define CONFIG_HEVC_DECODER 0 ++#define CONFIG_HEVC_QSV_DECODER 0 ++#define CONFIG_HEVC_RKMPP_DECODER 0 ++#define CONFIG_HEVC_V4L2M2M_DECODER 0 ++#define CONFIG_HNM4_VIDEO_DECODER 0 ++#define CONFIG_HQ_HQA_DECODER 0 ++#define CONFIG_HQX_DECODER 0 ++#define CONFIG_HUFFYUV_DECODER 0 ++#define CONFIG_HYMT_DECODER 0 ++#define CONFIG_IDCIN_DECODER 0 ++#define CONFIG_IFF_ILBM_DECODER 0 ++#define CONFIG_IMM4_DECODER 0 ++#define CONFIG_IMM5_DECODER 0 ++#define CONFIG_INDEO2_DECODER 0 ++#define CONFIG_INDEO3_DECODER 0 ++#define CONFIG_INDEO4_DECODER 0 ++#define CONFIG_INDEO5_DECODER 0 ++#define CONFIG_INTERPLAY_VIDEO_DECODER 0 ++#define CONFIG_JPEG2000_DECODER 0 ++#define CONFIG_JPEGLS_DECODER 0 ++#define CONFIG_JV_DECODER 0 ++#define CONFIG_KGV1_DECODER 0 ++#define CONFIG_KMVC_DECODER 0 ++#define CONFIG_LAGARITH_DECODER 0 ++#define CONFIG_LOCO_DECODER 0 ++#define CONFIG_LSCR_DECODER 0 ++#define CONFIG_M101_DECODER 0 ++#define CONFIG_MAGICYUV_DECODER 0 ++#define CONFIG_MDEC_DECODER 0 ++#define CONFIG_MIMIC_DECODER 0 ++#define CONFIG_MJPEG_DECODER 0 ++#define CONFIG_MJPEGB_DECODER 0 ++#define CONFIG_MMVIDEO_DECODER 0 ++#define CONFIG_MOTIONPIXELS_DECODER 0 ++#define CONFIG_MPEG1VIDEO_DECODER 0 ++#define CONFIG_MPEG2VIDEO_DECODER 0 ++#define CONFIG_MPEG4_DECODER 0 ++#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG4_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG4_MMAL_DECODER 0 ++#define CONFIG_MPEGVIDEO_DECODER 0 ++#define CONFIG_MPEG1_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_MMAL_DECODER 0 ++#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG2_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_QSV_DECODER 0 ++#define CONFIG_MPEG2_MEDIACODEC_DECODER 0 ++#define CONFIG_MSA1_DECODER 0 ++#define CONFIG_MSCC_DECODER 0 ++#define CONFIG_MSMPEG4V1_DECODER 0 ++#define CONFIG_MSMPEG4V2_DECODER 0 ++#define CONFIG_MSMPEG4V3_DECODER 0 ++#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MSRLE_DECODER 0 ++#define CONFIG_MSS1_DECODER 0 ++#define CONFIG_MSS2_DECODER 0 ++#define CONFIG_MSVIDEO1_DECODER 0 ++#define CONFIG_MSZH_DECODER 0 ++#define CONFIG_MTS2_DECODER 0 ++#define CONFIG_MV30_DECODER 0 ++#define CONFIG_MVC1_DECODER 0 ++#define CONFIG_MVC2_DECODER 0 ++#define CONFIG_MVDV_DECODER 0 ++#define CONFIG_MVHA_DECODER 0 ++#define CONFIG_MWSC_DECODER 0 ++#define CONFIG_MXPEG_DECODER 0 ++#define CONFIG_NUV_DECODER 0 ++#define CONFIG_PAF_VIDEO_DECODER 0 ++#define CONFIG_PAM_DECODER 0 ++#define CONFIG_PBM_DECODER 0 ++#define CONFIG_PCX_DECODER 0 ++#define CONFIG_PGM_DECODER 0 ++#define CONFIG_PGMYUV_DECODER 0 ++#define CONFIG_PICTOR_DECODER 0 ++#define CONFIG_PIXLET_DECODER 0 ++#define CONFIG_PNG_DECODER 0 ++#define CONFIG_PPM_DECODER 0 ++#define CONFIG_PRORES_DECODER 0 ++#define CONFIG_PROSUMER_DECODER 0 ++#define CONFIG_PSD_DECODER 0 ++#define CONFIG_PTX_DECODER 0 ++#define CONFIG_QDRAW_DECODER 0 ++#define CONFIG_QPEG_DECODER 0 ++#define CONFIG_QTRLE_DECODER 0 ++#define CONFIG_R10K_DECODER 0 ++#define CONFIG_R210_DECODER 0 ++#define CONFIG_RASC_DECODER 0 ++#define CONFIG_RAWVIDEO_DECODER 0 ++#define CONFIG_RL2_DECODER 0 ++#define CONFIG_ROQ_DECODER 0 ++#define CONFIG_RPZA_DECODER 0 ++#define CONFIG_RSCC_DECODER 0 ++#define CONFIG_RV10_DECODER 0 ++#define CONFIG_RV20_DECODER 0 ++#define CONFIG_RV30_DECODER 0 ++#define CONFIG_RV40_DECODER 0 ++#define CONFIG_S302M_DECODER 0 ++#define CONFIG_SANM_DECODER 0 ++#define CONFIG_SCPR_DECODER 0 ++#define CONFIG_SCREENPRESSO_DECODER 0 ++#define CONFIG_SGI_DECODER 0 ++#define CONFIG_SGIRLE_DECODER 0 ++#define CONFIG_SHEERVIDEO_DECODER 0 ++#define CONFIG_SMACKER_DECODER 0 ++#define CONFIG_SMC_DECODER 0 ++#define CONFIG_SMVJPEG_DECODER 0 ++#define CONFIG_SNOW_DECODER 0 ++#define CONFIG_SP5X_DECODER 0 ++#define CONFIG_SPEEDHQ_DECODER 0 ++#define CONFIG_SRGC_DECODER 0 ++#define CONFIG_SUNRAST_DECODER 0 ++#define CONFIG_SVQ1_DECODER 0 ++#define CONFIG_SVQ3_DECODER 0 ++#define CONFIG_TARGA_DECODER 0 ++#define CONFIG_TARGA_Y216_DECODER 0 ++#define CONFIG_TDSC_DECODER 0 ++#define CONFIG_THEORA_DECODER 1 ++#define CONFIG_THP_DECODER 0 ++#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 ++#define CONFIG_TIFF_DECODER 0 ++#define CONFIG_TMV_DECODER 0 ++#define CONFIG_TRUEMOTION1_DECODER 0 ++#define CONFIG_TRUEMOTION2_DECODER 0 ++#define CONFIG_TRUEMOTION2RT_DECODER 0 ++#define CONFIG_TSCC_DECODER 0 ++#define CONFIG_TSCC2_DECODER 0 ++#define CONFIG_TXD_DECODER 0 ++#define CONFIG_ULTI_DECODER 0 ++#define CONFIG_UTVIDEO_DECODER 0 ++#define CONFIG_V210_DECODER 0 ++#define CONFIG_V210X_DECODER 0 ++#define CONFIG_V308_DECODER 0 ++#define CONFIG_V408_DECODER 0 ++#define CONFIG_V410_DECODER 0 ++#define CONFIG_VB_DECODER 0 ++#define CONFIG_VBLE_DECODER 0 ++#define CONFIG_VC1_DECODER 0 ++#define CONFIG_VC1_CRYSTALHD_DECODER 0 ++#define CONFIG_VC1IMAGE_DECODER 0 ++#define CONFIG_VC1_MMAL_DECODER 0 ++#define CONFIG_VC1_QSV_DECODER 0 ++#define CONFIG_VC1_V4L2M2M_DECODER 0 ++#define CONFIG_VCR1_DECODER 0 ++#define CONFIG_VMDVIDEO_DECODER 0 ++#define CONFIG_VMNC_DECODER 0 ++#define CONFIG_VP3_DECODER 1 ++#define CONFIG_VP4_DECODER 0 ++#define CONFIG_VP5_DECODER 0 ++#define CONFIG_VP6_DECODER 0 ++#define CONFIG_VP6A_DECODER 0 ++#define CONFIG_VP6F_DECODER 0 ++#define CONFIG_VP7_DECODER 0 ++#define CONFIG_VP8_DECODER 1 ++#define CONFIG_VP8_RKMPP_DECODER 0 ++#define CONFIG_VP8_V4L2M2M_DECODER 0 ++#define CONFIG_VP9_DECODER 0 ++#define CONFIG_VP9_RKMPP_DECODER 0 ++#define CONFIG_VP9_V4L2M2M_DECODER 0 ++#define CONFIG_VQA_DECODER 0 ++#define CONFIG_WEBP_DECODER 0 ++#define CONFIG_WCMV_DECODER 0 ++#define CONFIG_WRAPPED_AVFRAME_DECODER 0 ++#define CONFIG_WMV1_DECODER 0 ++#define CONFIG_WMV2_DECODER 0 ++#define CONFIG_WMV3_DECODER 0 ++#define CONFIG_WMV3_CRYSTALHD_DECODER 0 ++#define CONFIG_WMV3IMAGE_DECODER 0 ++#define CONFIG_WNV1_DECODER 0 ++#define CONFIG_XAN_WC3_DECODER 0 ++#define CONFIG_XAN_WC4_DECODER 0 ++#define CONFIG_XBM_DECODER 0 ++#define CONFIG_XFACE_DECODER 0 ++#define CONFIG_XL_DECODER 0 ++#define CONFIG_XPM_DECODER 0 ++#define CONFIG_XWD_DECODER 0 ++#define CONFIG_Y41P_DECODER 0 ++#define CONFIG_YLC_DECODER 0 ++#define CONFIG_YOP_DECODER 0 ++#define CONFIG_YUV4_DECODER 0 ++#define CONFIG_ZERO12V_DECODER 0 ++#define CONFIG_ZEROCODEC_DECODER 0 ++#define CONFIG_ZLIB_DECODER 0 ++#define CONFIG_ZMBV_DECODER 0 ++#define CONFIG_AAC_DECODER 0 ++#define CONFIG_AAC_FIXED_DECODER 0 ++#define CONFIG_AAC_LATM_DECODER 0 ++#define CONFIG_AC3_DECODER 0 ++#define CONFIG_AC3_FIXED_DECODER 0 ++#define CONFIG_ACELP_KELVIN_DECODER 0 ++#define CONFIG_ALAC_DECODER 0 ++#define CONFIG_ALS_DECODER 0 ++#define CONFIG_AMRNB_DECODER 0 ++#define CONFIG_AMRWB_DECODER 0 ++#define CONFIG_APE_DECODER 0 ++#define CONFIG_APTX_DECODER 0 ++#define CONFIG_APTX_HD_DECODER 0 ++#define CONFIG_ATRAC1_DECODER 0 ++#define CONFIG_ATRAC3_DECODER 0 ++#define CONFIG_ATRAC3AL_DECODER 0 ++#define CONFIG_ATRAC3P_DECODER 0 ++#define CONFIG_ATRAC3PAL_DECODER 0 ++#define CONFIG_ATRAC9_DECODER 0 ++#define CONFIG_BINKAUDIO_DCT_DECODER 0 ++#define CONFIG_BINKAUDIO_RDFT_DECODER 0 ++#define CONFIG_BMV_AUDIO_DECODER 0 ++#define CONFIG_COOK_DECODER 0 ++#define CONFIG_DCA_DECODER 0 ++#define CONFIG_DOLBY_E_DECODER 0 ++#define CONFIG_DSD_LSBF_DECODER 0 ++#define CONFIG_DSD_MSBF_DECODER 0 ++#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 ++#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 ++#define CONFIG_DSICINAUDIO_DECODER 0 ++#define CONFIG_DSS_SP_DECODER 0 ++#define CONFIG_DST_DECODER 0 ++#define CONFIG_EAC3_DECODER 0 ++#define CONFIG_EVRC_DECODER 0 ++#define CONFIG_FFWAVESYNTH_DECODER 0 ++#define CONFIG_FLAC_DECODER 1 ++#define CONFIG_G723_1_DECODER 0 ++#define CONFIG_G729_DECODER 0 ++#define CONFIG_GSM_DECODER 0 ++#define CONFIG_GSM_MS_DECODER 0 ++#define CONFIG_HCA_DECODER 0 ++#define CONFIG_HCOM_DECODER 0 ++#define CONFIG_IAC_DECODER 0 ++#define CONFIG_ILBC_DECODER 0 ++#define CONFIG_IMC_DECODER 0 ++#define CONFIG_INTERPLAY_ACM_DECODER 0 ++#define CONFIG_MACE3_DECODER 0 ++#define CONFIG_MACE6_DECODER 0 ++#define CONFIG_METASOUND_DECODER 0 ++#define CONFIG_MLP_DECODER 0 ++#define CONFIG_MP1_DECODER 0 ++#define CONFIG_MP1FLOAT_DECODER 0 ++#define CONFIG_MP2_DECODER 0 ++#define CONFIG_MP2FLOAT_DECODER 0 ++#define CONFIG_MP3FLOAT_DECODER 0 ++#define CONFIG_MP3_DECODER 1 ++#define CONFIG_MP3ADUFLOAT_DECODER 0 ++#define CONFIG_MP3ADU_DECODER 0 ++#define CONFIG_MP3ON4FLOAT_DECODER 0 ++#define CONFIG_MP3ON4_DECODER 0 ++#define CONFIG_MPC7_DECODER 0 ++#define CONFIG_MPC8_DECODER 0 ++#define CONFIG_NELLYMOSER_DECODER 0 ++#define CONFIG_ON2AVC_DECODER 0 ++#define CONFIG_OPUS_DECODER 0 ++#define CONFIG_PAF_AUDIO_DECODER 0 ++#define CONFIG_QCELP_DECODER 0 ++#define CONFIG_QDM2_DECODER 0 ++#define CONFIG_QDMC_DECODER 0 ++#define CONFIG_RA_144_DECODER 0 ++#define CONFIG_RA_288_DECODER 0 ++#define CONFIG_RALF_DECODER 0 ++#define CONFIG_SBC_DECODER 0 ++#define CONFIG_SHORTEN_DECODER 0 ++#define CONFIG_SIPR_DECODER 0 ++#define CONFIG_SIREN_DECODER 0 ++#define CONFIG_SMACKAUD_DECODER 0 ++#define CONFIG_SONIC_DECODER 0 ++#define CONFIG_TAK_DECODER 0 ++#define CONFIG_TRUEHD_DECODER 0 ++#define CONFIG_TRUESPEECH_DECODER 0 ++#define CONFIG_TTA_DECODER 0 ++#define CONFIG_TWINVQ_DECODER 0 ++#define CONFIG_VMDAUDIO_DECODER 0 ++#define CONFIG_VORBIS_DECODER 1 ++#define CONFIG_WAVPACK_DECODER 0 ++#define CONFIG_WMALOSSLESS_DECODER 0 ++#define CONFIG_WMAPRO_DECODER 0 ++#define CONFIG_WMAV1_DECODER 0 ++#define CONFIG_WMAV2_DECODER 0 ++#define CONFIG_WMAVOICE_DECODER 0 ++#define CONFIG_WS_SND1_DECODER 0 ++#define CONFIG_XMA1_DECODER 0 ++#define CONFIG_XMA2_DECODER 0 ++#define CONFIG_PCM_ALAW_DECODER 1 ++#define CONFIG_PCM_BLURAY_DECODER 0 ++#define CONFIG_PCM_DVD_DECODER 0 ++#define CONFIG_PCM_F16LE_DECODER 0 ++#define CONFIG_PCM_F24LE_DECODER 0 ++#define CONFIG_PCM_F32BE_DECODER 0 ++#define CONFIG_PCM_F32LE_DECODER 1 ++#define CONFIG_PCM_F64BE_DECODER 0 ++#define CONFIG_PCM_F64LE_DECODER 0 ++#define CONFIG_PCM_LXF_DECODER 0 ++#define CONFIG_PCM_MULAW_DECODER 1 ++#define CONFIG_PCM_S8_DECODER 0 ++#define CONFIG_PCM_S8_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16BE_DECODER 1 ++#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16LE_DECODER 1 ++#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S24BE_DECODER 1 ++#define CONFIG_PCM_S24DAUD_DECODER 0 ++#define CONFIG_PCM_S24LE_DECODER 1 ++#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S32BE_DECODER 0 ++#define CONFIG_PCM_S32LE_DECODER 1 ++#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S64BE_DECODER 0 ++#define CONFIG_PCM_S64LE_DECODER 0 ++#define CONFIG_PCM_U8_DECODER 1 ++#define CONFIG_PCM_U16BE_DECODER 0 ++#define CONFIG_PCM_U16LE_DECODER 0 ++#define CONFIG_PCM_U24BE_DECODER 0 ++#define CONFIG_PCM_U24LE_DECODER 0 ++#define CONFIG_PCM_U32BE_DECODER 0 ++#define CONFIG_PCM_U32LE_DECODER 0 ++#define CONFIG_PCM_VIDC_DECODER 0 ++#define CONFIG_DERF_DPCM_DECODER 0 ++#define CONFIG_GREMLIN_DPCM_DECODER 0 ++#define CONFIG_INTERPLAY_DPCM_DECODER 0 ++#define CONFIG_ROQ_DPCM_DECODER 0 ++#define CONFIG_SDX2_DPCM_DECODER 0 ++#define CONFIG_SOL_DPCM_DECODER 0 ++#define CONFIG_XAN_DPCM_DECODER 0 ++#define CONFIG_ADPCM_4XM_DECODER 0 ++#define CONFIG_ADPCM_ADX_DECODER 0 ++#define CONFIG_ADPCM_AFC_DECODER 0 ++#define CONFIG_ADPCM_AGM_DECODER 0 ++#define CONFIG_ADPCM_AICA_DECODER 0 ++#define CONFIG_ADPCM_ARGO_DECODER 0 ++#define CONFIG_ADPCM_CT_DECODER 0 ++#define CONFIG_ADPCM_DTK_DECODER 0 ++#define CONFIG_ADPCM_EA_DECODER 0 ++#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 ++#define CONFIG_ADPCM_EA_R1_DECODER 0 ++#define CONFIG_ADPCM_EA_R2_DECODER 0 ++#define CONFIG_ADPCM_EA_R3_DECODER 0 ++#define CONFIG_ADPCM_EA_XAS_DECODER 0 ++#define CONFIG_ADPCM_G722_DECODER 0 ++#define CONFIG_ADPCM_G726_DECODER 0 ++#define CONFIG_ADPCM_G726LE_DECODER 0 ++#define CONFIG_ADPCM_IMA_AMV_DECODER 0 ++#define CONFIG_ADPCM_IMA_ALP_DECODER 0 ++#define CONFIG_ADPCM_IMA_APC_DECODER 0 ++#define CONFIG_ADPCM_IMA_APM_DECODER 0 ++#define CONFIG_ADPCM_IMA_CUNNING_DECODER 0 ++#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK3_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK4_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_ISS_DECODER 0 ++#define CONFIG_ADPCM_IMA_MTF_DECODER 0 ++#define CONFIG_ADPCM_IMA_OKI_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_DECODER 0 ++#define CONFIG_ADPCM_IMA_RAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_SSI_DECODER 0 ++#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 ++#define CONFIG_ADPCM_IMA_WAV_DECODER 0 ++#define CONFIG_ADPCM_IMA_WS_DECODER 0 ++#define CONFIG_ADPCM_MS_DECODER 0 ++#define CONFIG_ADPCM_MTAF_DECODER 0 ++#define CONFIG_ADPCM_PSX_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_2_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_3_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_4_DECODER 0 ++#define CONFIG_ADPCM_SWF_DECODER 0 ++#define CONFIG_ADPCM_THP_DECODER 0 ++#define CONFIG_ADPCM_THP_LE_DECODER 0 ++#define CONFIG_ADPCM_VIMA_DECODER 0 ++#define CONFIG_ADPCM_XA_DECODER 0 ++#define CONFIG_ADPCM_YAMAHA_DECODER 0 ++#define CONFIG_ADPCM_ZORK_DECODER 0 ++#define CONFIG_SSA_DECODER 0 ++#define CONFIG_ASS_DECODER 0 ++#define CONFIG_CCAPTION_DECODER 0 ++#define CONFIG_DVBSUB_DECODER 0 ++#define CONFIG_DVDSUB_DECODER 0 ++#define CONFIG_JACOSUB_DECODER 0 ++#define CONFIG_MICRODVD_DECODER 0 ++#define CONFIG_MOVTEXT_DECODER 0 ++#define CONFIG_MPL2_DECODER 0 ++#define CONFIG_PGSSUB_DECODER 0 ++#define CONFIG_PJS_DECODER 0 ++#define CONFIG_REALTEXT_DECODER 0 ++#define CONFIG_SAMI_DECODER 0 ++#define CONFIG_SRT_DECODER 0 ++#define CONFIG_STL_DECODER 0 ++#define CONFIG_SUBRIP_DECODER 0 ++#define CONFIG_SUBVIEWER_DECODER 0 ++#define CONFIG_SUBVIEWER1_DECODER 0 ++#define CONFIG_TEXT_DECODER 0 ++#define CONFIG_VPLAYER_DECODER 0 ++#define CONFIG_WEBVTT_DECODER 0 ++#define CONFIG_XSUB_DECODER 0 ++#define CONFIG_AAC_AT_DECODER 0 ++#define CONFIG_AC3_AT_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 ++#define CONFIG_ALAC_AT_DECODER 0 ++#define CONFIG_AMR_NB_AT_DECODER 0 ++#define CONFIG_EAC3_AT_DECODER 0 ++#define CONFIG_GSM_MS_AT_DECODER 0 ++#define CONFIG_ILBC_AT_DECODER 0 ++#define CONFIG_MP1_AT_DECODER 0 ++#define CONFIG_MP2_AT_DECODER 0 ++#define CONFIG_MP3_AT_DECODER 0 ++#define CONFIG_PCM_ALAW_AT_DECODER 0 ++#define CONFIG_PCM_MULAW_AT_DECODER 0 ++#define CONFIG_QDMC_AT_DECODER 0 ++#define CONFIG_QDM2_AT_DECODER 0 ++#define CONFIG_LIBARIBB24_DECODER 0 ++#define CONFIG_LIBCELT_DECODER 0 ++#define CONFIG_LIBCODEC2_DECODER 0 ++#define CONFIG_LIBDAV1D_DECODER 0 ++#define CONFIG_LIBDAVS2_DECODER 0 ++#define CONFIG_LIBFDK_AAC_DECODER 0 ++#define CONFIG_LIBGSM_DECODER 0 ++#define CONFIG_LIBGSM_MS_DECODER 0 ++#define CONFIG_LIBILBC_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 ++#define CONFIG_LIBOPENJPEG_DECODER 0 ++#define CONFIG_LIBOPUS_DECODER 1 ++#define CONFIG_LIBRSVG_DECODER 0 ++#define CONFIG_LIBSPEEX_DECODER 0 ++#define CONFIG_LIBVORBIS_DECODER 0 ++#define CONFIG_LIBVPX_VP8_DECODER 0 ++#define CONFIG_LIBVPX_VP9_DECODER 0 ++#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 ++#define CONFIG_BINTEXT_DECODER 0 ++#define CONFIG_XBIN_DECODER 0 ++#define CONFIG_IDF_DECODER 0 ++#define CONFIG_LIBAOM_AV1_DECODER 0 ++#define CONFIG_LIBOPENH264_DECODER 0 ++#define CONFIG_H264_CUVID_DECODER 0 ++#define CONFIG_HEVC_CUVID_DECODER 0 ++#define CONFIG_HEVC_MEDIACODEC_DECODER 0 ++#define CONFIG_MJPEG_CUVID_DECODER 0 ++#define CONFIG_MJPEG_QSV_DECODER 0 ++#define CONFIG_MPEG1_CUVID_DECODER 0 ++#define CONFIG_MPEG2_CUVID_DECODER 0 ++#define CONFIG_MPEG4_CUVID_DECODER 0 ++#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 ++#define CONFIG_VC1_CUVID_DECODER 0 ++#define CONFIG_VP8_CUVID_DECODER 0 ++#define CONFIG_VP8_MEDIACODEC_DECODER 0 ++#define CONFIG_VP8_QSV_DECODER 0 ++#define CONFIG_VP9_CUVID_DECODER 0 ++#define CONFIG_VP9_MEDIACODEC_DECODER 0 ++#define CONFIG_VP9_QSV_DECODER 0 ++#define CONFIG_A64MULTI_ENCODER 0 ++#define CONFIG_A64MULTI5_ENCODER 0 ++#define CONFIG_ALIAS_PIX_ENCODER 0 ++#define CONFIG_AMV_ENCODER 0 ++#define CONFIG_APNG_ENCODER 0 ++#define CONFIG_ASV1_ENCODER 0 ++#define CONFIG_ASV2_ENCODER 0 ++#define CONFIG_AVRP_ENCODER 0 ++#define CONFIG_AVUI_ENCODER 0 ++#define CONFIG_AYUV_ENCODER 0 ++#define CONFIG_BMP_ENCODER 0 ++#define CONFIG_CINEPAK_ENCODER 0 ++#define CONFIG_CLJR_ENCODER 0 ++#define CONFIG_COMFORTNOISE_ENCODER 0 ++#define CONFIG_DNXHD_ENCODER 0 ++#define CONFIG_DPX_ENCODER 0 ++#define CONFIG_DVVIDEO_ENCODER 0 ++#define CONFIG_FFV1_ENCODER 0 ++#define CONFIG_FFVHUFF_ENCODER 0 ++#define CONFIG_FITS_ENCODER 0 ++#define CONFIG_FLASHSV_ENCODER 0 ++#define CONFIG_FLASHSV2_ENCODER 0 ++#define CONFIG_FLV_ENCODER 0 ++#define CONFIG_GIF_ENCODER 0 ++#define CONFIG_H261_ENCODER 0 ++#define CONFIG_H263_ENCODER 0 ++#define CONFIG_H263P_ENCODER 0 ++#define CONFIG_HAP_ENCODER 0 ++#define CONFIG_HUFFYUV_ENCODER 0 ++#define CONFIG_JPEG2000_ENCODER 0 ++#define CONFIG_JPEGLS_ENCODER 0 ++#define CONFIG_LJPEG_ENCODER 0 ++#define CONFIG_MAGICYUV_ENCODER 0 ++#define CONFIG_MJPEG_ENCODER 0 ++#define CONFIG_MPEG1VIDEO_ENCODER 0 ++#define CONFIG_MPEG2VIDEO_ENCODER 0 ++#define CONFIG_MPEG4_ENCODER 0 ++#define CONFIG_MSMPEG4V2_ENCODER 0 ++#define CONFIG_MSMPEG4V3_ENCODER 0 ++#define CONFIG_MSVIDEO1_ENCODER 0 ++#define CONFIG_PAM_ENCODER 0 ++#define CONFIG_PBM_ENCODER 0 ++#define CONFIG_PCX_ENCODER 0 ++#define CONFIG_PGM_ENCODER 0 ++#define CONFIG_PGMYUV_ENCODER 0 ++#define CONFIG_PNG_ENCODER 0 ++#define CONFIG_PPM_ENCODER 0 ++#define CONFIG_PRORES_ENCODER 0 ++#define CONFIG_PRORES_AW_ENCODER 0 ++#define CONFIG_PRORES_KS_ENCODER 0 ++#define CONFIG_QTRLE_ENCODER 0 ++#define CONFIG_R10K_ENCODER 0 ++#define CONFIG_R210_ENCODER 0 ++#define CONFIG_RAWVIDEO_ENCODER 0 ++#define CONFIG_ROQ_ENCODER 0 ++#define CONFIG_RV10_ENCODER 0 ++#define CONFIG_RV20_ENCODER 0 ++#define CONFIG_S302M_ENCODER 0 ++#define CONFIG_SGI_ENCODER 0 ++#define CONFIG_SNOW_ENCODER 0 ++#define CONFIG_SUNRAST_ENCODER 0 ++#define CONFIG_SVQ1_ENCODER 0 ++#define CONFIG_TARGA_ENCODER 0 ++#define CONFIG_TIFF_ENCODER 0 ++#define CONFIG_UTVIDEO_ENCODER 0 ++#define CONFIG_V210_ENCODER 0 ++#define CONFIG_V308_ENCODER 0 ++#define CONFIG_V408_ENCODER 0 ++#define CONFIG_V410_ENCODER 0 ++#define CONFIG_VC2_ENCODER 0 ++#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 ++#define CONFIG_WMV1_ENCODER 0 ++#define CONFIG_WMV2_ENCODER 0 ++#define CONFIG_XBM_ENCODER 0 ++#define CONFIG_XFACE_ENCODER 0 ++#define CONFIG_XWD_ENCODER 0 ++#define CONFIG_Y41P_ENCODER 0 ++#define CONFIG_YUV4_ENCODER 0 ++#define CONFIG_ZLIB_ENCODER 0 ++#define CONFIG_ZMBV_ENCODER 0 ++#define CONFIG_AAC_ENCODER 0 ++#define CONFIG_AC3_ENCODER 0 ++#define CONFIG_AC3_FIXED_ENCODER 0 ++#define CONFIG_ALAC_ENCODER 0 ++#define CONFIG_APTX_ENCODER 0 ++#define CONFIG_APTX_HD_ENCODER 0 ++#define CONFIG_DCA_ENCODER 0 ++#define CONFIG_EAC3_ENCODER 0 ++#define CONFIG_FLAC_ENCODER 0 ++#define CONFIG_G723_1_ENCODER 0 ++#define CONFIG_MLP_ENCODER 0 ++#define CONFIG_MP2_ENCODER 0 ++#define CONFIG_MP2FIXED_ENCODER 0 ++#define CONFIG_NELLYMOSER_ENCODER 0 ++#define CONFIG_OPUS_ENCODER 0 ++#define CONFIG_RA_144_ENCODER 0 ++#define CONFIG_SBC_ENCODER 0 ++#define CONFIG_SONIC_ENCODER 0 ++#define CONFIG_SONIC_LS_ENCODER 0 ++#define CONFIG_TRUEHD_ENCODER 0 ++#define CONFIG_TTA_ENCODER 0 ++#define CONFIG_VORBIS_ENCODER 0 ++#define CONFIG_WAVPACK_ENCODER 0 ++#define CONFIG_WMAV1_ENCODER 0 ++#define CONFIG_WMAV2_ENCODER 0 ++#define CONFIG_PCM_ALAW_ENCODER 0 ++#define CONFIG_PCM_DVD_ENCODER 0 ++#define CONFIG_PCM_F32BE_ENCODER 0 ++#define CONFIG_PCM_F32LE_ENCODER 0 ++#define CONFIG_PCM_F64BE_ENCODER 0 ++#define CONFIG_PCM_F64LE_ENCODER 0 ++#define CONFIG_PCM_MULAW_ENCODER 0 ++#define CONFIG_PCM_S8_ENCODER 0 ++#define CONFIG_PCM_S8_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16BE_ENCODER 0 ++#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16LE_ENCODER 0 ++#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S24BE_ENCODER 0 ++#define CONFIG_PCM_S24DAUD_ENCODER 0 ++#define CONFIG_PCM_S24LE_ENCODER 0 ++#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S32BE_ENCODER 0 ++#define CONFIG_PCM_S32LE_ENCODER 0 ++#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S64BE_ENCODER 0 ++#define CONFIG_PCM_S64LE_ENCODER 0 ++#define CONFIG_PCM_U8_ENCODER 0 ++#define CONFIG_PCM_U16BE_ENCODER 0 ++#define CONFIG_PCM_U16LE_ENCODER 0 ++#define CONFIG_PCM_U24BE_ENCODER 0 ++#define CONFIG_PCM_U24LE_ENCODER 0 ++#define CONFIG_PCM_U32BE_ENCODER 0 ++#define CONFIG_PCM_U32LE_ENCODER 0 ++#define CONFIG_PCM_VIDC_ENCODER 0 ++#define CONFIG_ROQ_DPCM_ENCODER 0 ++#define CONFIG_ADPCM_ADX_ENCODER 0 ++#define CONFIG_ADPCM_G722_ENCODER 0 ++#define CONFIG_ADPCM_G726_ENCODER 0 ++#define CONFIG_ADPCM_G726LE_ENCODER 0 ++#define CONFIG_ADPCM_IMA_QT_ENCODER 0 ++#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 ++#define CONFIG_ADPCM_MS_ENCODER 0 ++#define CONFIG_ADPCM_SWF_ENCODER 0 ++#define CONFIG_ADPCM_YAMAHA_ENCODER 0 ++#define CONFIG_SSA_ENCODER 0 ++#define CONFIG_ASS_ENCODER 0 ++#define CONFIG_DVBSUB_ENCODER 0 ++#define CONFIG_DVDSUB_ENCODER 0 ++#define CONFIG_MOVTEXT_ENCODER 0 ++#define CONFIG_SRT_ENCODER 0 ++#define CONFIG_SUBRIP_ENCODER 0 ++#define CONFIG_TEXT_ENCODER 0 ++#define CONFIG_WEBVTT_ENCODER 0 ++#define CONFIG_XSUB_ENCODER 0 ++#define CONFIG_AAC_AT_ENCODER 0 ++#define CONFIG_ALAC_AT_ENCODER 0 ++#define CONFIG_ILBC_AT_ENCODER 0 ++#define CONFIG_PCM_ALAW_AT_ENCODER 0 ++#define CONFIG_PCM_MULAW_AT_ENCODER 0 ++#define CONFIG_LIBAOM_AV1_ENCODER 0 ++#define CONFIG_LIBCODEC2_ENCODER 0 ++#define CONFIG_LIBFDK_AAC_ENCODER 0 ++#define CONFIG_LIBGSM_ENCODER 0 ++#define CONFIG_LIBGSM_MS_ENCODER 0 ++#define CONFIG_LIBILBC_ENCODER 0 ++#define CONFIG_LIBMP3LAME_ENCODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 ++#define CONFIG_LIBOPENJPEG_ENCODER 0 ++#define CONFIG_LIBOPUS_ENCODER 0 ++#define CONFIG_LIBRAV1E_ENCODER 0 ++#define CONFIG_LIBSHINE_ENCODER 0 ++#define CONFIG_LIBSPEEX_ENCODER 0 ++#define CONFIG_LIBTHEORA_ENCODER 0 ++#define CONFIG_LIBTWOLAME_ENCODER 0 ++#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 ++#define CONFIG_LIBVORBIS_ENCODER 0 ++#define CONFIG_LIBVPX_VP8_ENCODER 0 ++#define CONFIG_LIBVPX_VP9_ENCODER 0 ++#define CONFIG_LIBWAVPACK_ENCODER 0 ++#define CONFIG_LIBWEBP_ANIM_ENCODER 0 ++#define CONFIG_LIBWEBP_ENCODER 0 ++#define CONFIG_LIBX262_ENCODER 0 ++#define CONFIG_LIBX264_ENCODER 0 ++#define CONFIG_LIBX264RGB_ENCODER 0 ++#define CONFIG_LIBX265_ENCODER 0 ++#define CONFIG_LIBXAVS_ENCODER 0 ++#define CONFIG_LIBXAVS2_ENCODER 0 ++#define CONFIG_LIBXVID_ENCODER 0 ++#define CONFIG_H263_V4L2M2M_ENCODER 0 ++#define CONFIG_LIBOPENH264_ENCODER 0 ++#define CONFIG_H264_AMF_ENCODER 0 ++#define CONFIG_H264_NVENC_ENCODER 0 ++#define CONFIG_H264_OMX_ENCODER 0 ++#define CONFIG_H264_QSV_ENCODER 0 ++#define CONFIG_H264_V4L2M2M_ENCODER 0 ++#define CONFIG_H264_VAAPI_ENCODER 0 ++#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_NVENC_ENCODER 0 ++#define CONFIG_NVENC_H264_ENCODER 0 ++#define CONFIG_NVENC_HEVC_ENCODER 0 ++#define CONFIG_HEVC_AMF_ENCODER 0 ++#define CONFIG_HEVC_NVENC_ENCODER 0 ++#define CONFIG_HEVC_QSV_ENCODER 0 ++#define CONFIG_HEVC_V4L2M2M_ENCODER 0 ++#define CONFIG_HEVC_VAAPI_ENCODER 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_LIBKVAZAAR_ENCODER 0 ++#define CONFIG_MJPEG_QSV_ENCODER 0 ++#define CONFIG_MJPEG_VAAPI_ENCODER 0 ++#define CONFIG_MPEG2_QSV_ENCODER 0 ++#define CONFIG_MPEG2_VAAPI_ENCODER 0 ++#define CONFIG_MPEG4_OMX_ENCODER 0 ++#define CONFIG_MPEG4_V4L2M2M_ENCODER 0 ++#define CONFIG_VP8_V4L2M2M_ENCODER 0 ++#define CONFIG_VP8_VAAPI_ENCODER 0 ++#define CONFIG_VP9_VAAPI_ENCODER 0 ++#define CONFIG_VP9_QSV_ENCODER 0 ++#define CONFIG_H263_VAAPI_HWACCEL 0 ++#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_H264_D3D11VA_HWACCEL 0 ++#define CONFIG_H264_D3D11VA2_HWACCEL 0 ++#define CONFIG_H264_DXVA2_HWACCEL 0 ++#define CONFIG_H264_NVDEC_HWACCEL 0 ++#define CONFIG_H264_VAAPI_HWACCEL 0 ++#define CONFIG_H264_VDPAU_HWACCEL 0 ++#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA2_HWACCEL 0 ++#define CONFIG_HEVC_DXVA2_HWACCEL 0 ++#define CONFIG_HEVC_NVDEC_HWACCEL 0 ++#define CONFIG_HEVC_VAAPI_HWACCEL 0 ++#define CONFIG_HEVC_VDPAU_HWACCEL 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MJPEG_NVDEC_HWACCEL 0 ++#define CONFIG_MJPEG_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG1_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG1_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG1_XVMC_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA2_HWACCEL 0 ++#define CONFIG_MPEG2_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG2_DXVA2_HWACCEL 0 ++#define CONFIG_MPEG2_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG2_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG2_XVMC_HWACCEL 0 ++#define CONFIG_MPEG4_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG4_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG4_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA2_HWACCEL 0 ++#define CONFIG_VC1_DXVA2_HWACCEL 0 ++#define CONFIG_VC1_NVDEC_HWACCEL 0 ++#define CONFIG_VC1_VAAPI_HWACCEL 0 ++#define CONFIG_VC1_VDPAU_HWACCEL 0 ++#define CONFIG_VP8_NVDEC_HWACCEL 0 ++#define CONFIG_VP8_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA2_HWACCEL 0 ++#define CONFIG_VP9_DXVA2_HWACCEL 0 ++#define CONFIG_VP9_NVDEC_HWACCEL 0 ++#define CONFIG_VP9_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_VDPAU_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA2_HWACCEL 0 ++#define CONFIG_WMV3_DXVA2_HWACCEL 0 ++#define CONFIG_WMV3_NVDEC_HWACCEL 0 ++#define CONFIG_WMV3_VAAPI_HWACCEL 0 ++#define CONFIG_WMV3_VDPAU_HWACCEL 0 ++#define CONFIG_AAC_PARSER 0 ++#define CONFIG_AAC_LATM_PARSER 0 ++#define CONFIG_AC3_PARSER 0 ++#define CONFIG_ADX_PARSER 0 ++#define CONFIG_AV1_PARSER 0 ++#define CONFIG_AVS2_PARSER 0 ++#define CONFIG_BMP_PARSER 0 ++#define CONFIG_CAVSVIDEO_PARSER 0 ++#define CONFIG_COOK_PARSER 0 ++#define CONFIG_DCA_PARSER 0 ++#define CONFIG_DIRAC_PARSER 0 ++#define CONFIG_DNXHD_PARSER 0 ++#define CONFIG_DPX_PARSER 0 ++#define CONFIG_DVAUDIO_PARSER 0 ++#define CONFIG_DVBSUB_PARSER 0 ++#define CONFIG_DVDSUB_PARSER 0 ++#define CONFIG_DVD_NAV_PARSER 0 ++#define CONFIG_FLAC_PARSER 1 ++#define CONFIG_G723_1_PARSER 0 ++#define CONFIG_G729_PARSER 0 ++#define CONFIG_GIF_PARSER 0 ++#define CONFIG_GSM_PARSER 0 ++#define CONFIG_H261_PARSER 0 ++#define CONFIG_H263_PARSER 0 ++#define CONFIG_H264_PARSER 0 ++#define CONFIG_HEVC_PARSER 0 ++#define CONFIG_MJPEG_PARSER 0 ++#define CONFIG_MLP_PARSER 0 ++#define CONFIG_MPEG4VIDEO_PARSER 0 ++#define CONFIG_MPEGAUDIO_PARSER 1 ++#define CONFIG_MPEGVIDEO_PARSER 0 ++#define CONFIG_OPUS_PARSER 1 ++#define CONFIG_PNG_PARSER 0 ++#define CONFIG_PNM_PARSER 0 ++#define CONFIG_RV30_PARSER 0 ++#define CONFIG_RV40_PARSER 0 ++#define CONFIG_SBC_PARSER 0 ++#define CONFIG_SIPR_PARSER 0 ++#define CONFIG_TAK_PARSER 0 ++#define CONFIG_VC1_PARSER 0 ++#define CONFIG_VORBIS_PARSER 1 ++#define CONFIG_VP3_PARSER 1 ++#define CONFIG_VP8_PARSER 1 ++#define CONFIG_VP9_PARSER 1 ++#define CONFIG_WEBP_PARSER 0 ++#define CONFIG_XMA_PARSER 0 ++#define CONFIG_ALSA_INDEV 0 ++#define CONFIG_ANDROID_CAMERA_INDEV 0 ++#define CONFIG_AVFOUNDATION_INDEV 0 ++#define CONFIG_BKTR_INDEV 0 ++#define CONFIG_DECKLINK_INDEV 0 ++#define CONFIG_DSHOW_INDEV 0 ++#define CONFIG_FBDEV_INDEV 0 ++#define CONFIG_GDIGRAB_INDEV 0 ++#define CONFIG_IEC61883_INDEV 0 ++#define CONFIG_JACK_INDEV 0 ++#define CONFIG_KMSGRAB_INDEV 0 ++#define CONFIG_LAVFI_INDEV 0 ++#define CONFIG_OPENAL_INDEV 0 ++#define CONFIG_OSS_INDEV 0 ++#define CONFIG_PULSE_INDEV 0 ++#define CONFIG_SNDIO_INDEV 0 ++#define CONFIG_V4L2_INDEV 0 ++#define CONFIG_VFWCAP_INDEV 0 ++#define CONFIG_XCBGRAB_INDEV 0 ++#define CONFIG_LIBCDIO_INDEV 0 ++#define CONFIG_LIBDC1394_INDEV 0 ++#define CONFIG_ALSA_OUTDEV 0 ++#define CONFIG_CACA_OUTDEV 0 ++#define CONFIG_DECKLINK_OUTDEV 0 ++#define CONFIG_FBDEV_OUTDEV 0 ++#define CONFIG_OPENGL_OUTDEV 0 ++#define CONFIG_OSS_OUTDEV 0 ++#define CONFIG_PULSE_OUTDEV 0 ++#define CONFIG_SDL2_OUTDEV 0 ++#define CONFIG_SNDIO_OUTDEV 0 ++#define CONFIG_V4L2_OUTDEV 0 ++#define CONFIG_XV_OUTDEV 0 ++#define CONFIG_ABENCH_FILTER 0 ++#define CONFIG_ACOMPRESSOR_FILTER 0 ++#define CONFIG_ACONTRAST_FILTER 0 ++#define CONFIG_ACOPY_FILTER 0 ++#define CONFIG_ACUE_FILTER 0 ++#define CONFIG_ACROSSFADE_FILTER 0 ++#define CONFIG_ACROSSOVER_FILTER 0 ++#define CONFIG_ACRUSHER_FILTER 0 ++#define CONFIG_ADECLICK_FILTER 0 ++#define CONFIG_ADECLIP_FILTER 0 ++#define CONFIG_ADELAY_FILTER 0 ++#define CONFIG_ADERIVATIVE_FILTER 0 ++#define CONFIG_AECHO_FILTER 0 ++#define CONFIG_AEMPHASIS_FILTER 0 ++#define CONFIG_AEVAL_FILTER 0 ++#define CONFIG_AFADE_FILTER 0 ++#define CONFIG_AFFTDN_FILTER 0 ++#define CONFIG_AFFTFILT_FILTER 0 ++#define CONFIG_AFIR_FILTER 0 ++#define CONFIG_AFORMAT_FILTER 0 ++#define CONFIG_AGATE_FILTER 0 ++#define CONFIG_AIIR_FILTER 0 ++#define CONFIG_AINTEGRAL_FILTER 0 ++#define CONFIG_AINTERLEAVE_FILTER 0 ++#define CONFIG_ALIMITER_FILTER 0 ++#define CONFIG_ALLPASS_FILTER 0 ++#define CONFIG_ALOOP_FILTER 0 ++#define CONFIG_AMERGE_FILTER 0 ++#define CONFIG_AMETADATA_FILTER 0 ++#define CONFIG_AMIX_FILTER 0 ++#define CONFIG_AMULTIPLY_FILTER 0 ++#define CONFIG_ANEQUALIZER_FILTER 0 ++#define CONFIG_ANLMDN_FILTER 0 ++#define CONFIG_ANLMS_FILTER 0 ++#define CONFIG_ANULL_FILTER 0 ++#define CONFIG_APAD_FILTER 0 ++#define CONFIG_APERMS_FILTER 0 ++#define CONFIG_APHASER_FILTER 0 ++#define CONFIG_APULSATOR_FILTER 0 ++#define CONFIG_AREALTIME_FILTER 0 ++#define CONFIG_ARESAMPLE_FILTER 0 ++#define CONFIG_AREVERSE_FILTER 0 ++#define CONFIG_ARNNDN_FILTER 0 ++#define CONFIG_ASELECT_FILTER 0 ++#define CONFIG_ASENDCMD_FILTER 0 ++#define CONFIG_ASETNSAMPLES_FILTER 0 ++#define CONFIG_ASETPTS_FILTER 0 ++#define CONFIG_ASETRATE_FILTER 0 ++#define CONFIG_ASETTB_FILTER 0 ++#define CONFIG_ASHOWINFO_FILTER 0 ++#define CONFIG_ASIDEDATA_FILTER 0 ++#define CONFIG_ASOFTCLIP_FILTER 0 ++#define CONFIG_ASPLIT_FILTER 0 ++#define CONFIG_ASR_FILTER 0 ++#define CONFIG_ASTATS_FILTER 0 ++#define CONFIG_ASTREAMSELECT_FILTER 0 ++#define CONFIG_ATEMPO_FILTER 0 ++#define CONFIG_ATRIM_FILTER 0 ++#define CONFIG_AXCORRELATE_FILTER 0 ++#define CONFIG_AZMQ_FILTER 0 ++#define CONFIG_BANDPASS_FILTER 0 ++#define CONFIG_BANDREJECT_FILTER 0 ++#define CONFIG_BASS_FILTER 0 ++#define CONFIG_BIQUAD_FILTER 0 ++#define CONFIG_BS2B_FILTER 0 ++#define CONFIG_CHROMABER_VULKAN_FILTER 0 ++#define CONFIG_CHANNELMAP_FILTER 0 ++#define CONFIG_CHANNELSPLIT_FILTER 0 ++#define CONFIG_CHORUS_FILTER 0 ++#define CONFIG_COMPAND_FILTER 0 ++#define CONFIG_COMPENSATIONDELAY_FILTER 0 ++#define CONFIG_CROSSFEED_FILTER 0 ++#define CONFIG_CRYSTALIZER_FILTER 0 ++#define CONFIG_DCSHIFT_FILTER 0 ++#define CONFIG_DEESSER_FILTER 0 ++#define CONFIG_DRMETER_FILTER 0 ++#define CONFIG_DYNAUDNORM_FILTER 0 ++#define CONFIG_EARWAX_FILTER 0 ++#define CONFIG_EBUR128_FILTER 0 ++#define CONFIG_EQUALIZER_FILTER 0 ++#define CONFIG_EXTRASTEREO_FILTER 0 ++#define CONFIG_FIREQUALIZER_FILTER 0 ++#define CONFIG_FLANGER_FILTER 0 ++#define CONFIG_HAAS_FILTER 0 ++#define CONFIG_HDCD_FILTER 0 ++#define CONFIG_HEADPHONE_FILTER 0 ++#define CONFIG_HIGHPASS_FILTER 0 ++#define CONFIG_HIGHSHELF_FILTER 0 ++#define CONFIG_JOIN_FILTER 0 ++#define CONFIG_LADSPA_FILTER 0 ++#define CONFIG_LOUDNORM_FILTER 0 ++#define CONFIG_LOWPASS_FILTER 0 ++#define CONFIG_LOWSHELF_FILTER 0 ++#define CONFIG_LV2_FILTER 0 ++#define CONFIG_MCOMPAND_FILTER 0 ++#define CONFIG_PAN_FILTER 0 ++#define CONFIG_REPLAYGAIN_FILTER 0 ++#define CONFIG_RESAMPLE_FILTER 0 ++#define CONFIG_RUBBERBAND_FILTER 0 ++#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 ++#define CONFIG_SIDECHAINGATE_FILTER 0 ++#define CONFIG_SILENCEDETECT_FILTER 0 ++#define CONFIG_SILENCEREMOVE_FILTER 0 ++#define CONFIG_SOFALIZER_FILTER 0 ++#define CONFIG_STEREOTOOLS_FILTER 0 ++#define CONFIG_STEREOWIDEN_FILTER 0 ++#define CONFIG_SUPEREQUALIZER_FILTER 0 ++#define CONFIG_SURROUND_FILTER 0 ++#define CONFIG_TREBLE_FILTER 0 ++#define CONFIG_TREMOLO_FILTER 0 ++#define CONFIG_VIBRATO_FILTER 0 ++#define CONFIG_VOLUME_FILTER 0 ++#define CONFIG_VOLUMEDETECT_FILTER 0 ++#define CONFIG_AEVALSRC_FILTER 0 ++#define CONFIG_AFIRSRC_FILTER 0 ++#define CONFIG_ANOISESRC_FILTER 0 ++#define CONFIG_ANULLSRC_FILTER 0 ++#define CONFIG_FLITE_FILTER 0 ++#define CONFIG_HILBERT_FILTER 0 ++#define CONFIG_SINC_FILTER 0 ++#define CONFIG_SINE_FILTER 0 ++#define CONFIG_ANULLSINK_FILTER 0 ++#define CONFIG_ADDROI_FILTER 0 ++#define CONFIG_ALPHAEXTRACT_FILTER 0 ++#define CONFIG_ALPHAMERGE_FILTER 0 ++#define CONFIG_AMPLIFY_FILTER 0 ++#define CONFIG_ASS_FILTER 0 ++#define CONFIG_ATADENOISE_FILTER 0 ++#define CONFIG_AVGBLUR_FILTER 0 ++#define CONFIG_AVGBLUR_OPENCL_FILTER 0 ++#define CONFIG_AVGBLUR_VULKAN_FILTER 0 ++#define CONFIG_BBOX_FILTER 0 ++#define CONFIG_BENCH_FILTER 0 ++#define CONFIG_BILATERAL_FILTER 0 ++#define CONFIG_BITPLANENOISE_FILTER 0 ++#define CONFIG_BLACKDETECT_FILTER 0 ++#define CONFIG_BLACKFRAME_FILTER 0 ++#define CONFIG_BLEND_FILTER 0 ++#define CONFIG_BM3D_FILTER 0 ++#define CONFIG_BOXBLUR_FILTER 0 ++#define CONFIG_BOXBLUR_OPENCL_FILTER 0 ++#define CONFIG_BWDIF_FILTER 0 ++#define CONFIG_CAS_FILTER 0 ++#define CONFIG_CHROMAHOLD_FILTER 0 ++#define CONFIG_CHROMAKEY_FILTER 0 ++#define CONFIG_CHROMASHIFT_FILTER 0 ++#define CONFIG_CIESCOPE_FILTER 0 ++#define CONFIG_CODECVIEW_FILTER 0 ++#define CONFIG_COLORBALANCE_FILTER 0 ++#define CONFIG_COLORCHANNELMIXER_FILTER 0 ++#define CONFIG_COLORKEY_FILTER 0 ++#define CONFIG_COLORKEY_OPENCL_FILTER 0 ++#define CONFIG_COLORHOLD_FILTER 0 ++#define CONFIG_COLORLEVELS_FILTER 0 ++#define CONFIG_COLORMATRIX_FILTER 0 ++#define CONFIG_COLORSPACE_FILTER 0 ++#define CONFIG_CONVOLUTION_FILTER 0 ++#define CONFIG_CONVOLUTION_OPENCL_FILTER 0 ++#define CONFIG_CONVOLVE_FILTER 0 ++#define CONFIG_COPY_FILTER 0 ++#define CONFIG_COREIMAGE_FILTER 0 ++#define CONFIG_COVER_RECT_FILTER 0 ++#define CONFIG_CROP_FILTER 0 ++#define CONFIG_CROPDETECT_FILTER 0 ++#define CONFIG_CUE_FILTER 0 ++#define CONFIG_CURVES_FILTER 0 ++#define CONFIG_DATASCOPE_FILTER 0 ++#define CONFIG_DCTDNOIZ_FILTER 0 ++#define CONFIG_DEBAND_FILTER 0 ++#define CONFIG_DEBLOCK_FILTER 0 ++#define CONFIG_DECIMATE_FILTER 0 ++#define CONFIG_DECONVOLVE_FILTER 0 ++#define CONFIG_DEDOT_FILTER 0 ++#define CONFIG_DEFLATE_FILTER 0 ++#define CONFIG_DEFLICKER_FILTER 0 ++#define CONFIG_DEINTERLACE_QSV_FILTER 0 ++#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 ++#define CONFIG_DEJUDDER_FILTER 0 ++#define CONFIG_DELOGO_FILTER 0 ++#define CONFIG_DENOISE_VAAPI_FILTER 0 ++#define CONFIG_DERAIN_FILTER 0 ++#define CONFIG_DESHAKE_FILTER 0 ++#define CONFIG_DESHAKE_OPENCL_FILTER 0 ++#define CONFIG_DESPILL_FILTER 0 ++#define CONFIG_DETELECINE_FILTER 0 ++#define CONFIG_DILATION_FILTER 0 ++#define CONFIG_DILATION_OPENCL_FILTER 0 ++#define CONFIG_DISPLACE_FILTER 0 ++#define CONFIG_DNN_PROCESSING_FILTER 0 ++#define CONFIG_DOUBLEWEAVE_FILTER 0 ++#define CONFIG_DRAWBOX_FILTER 0 ++#define CONFIG_DRAWGRAPH_FILTER 0 ++#define CONFIG_DRAWGRID_FILTER 0 ++#define CONFIG_DRAWTEXT_FILTER 0 ++#define CONFIG_EDGEDETECT_FILTER 0 ++#define CONFIG_ELBG_FILTER 0 ++#define CONFIG_ENTROPY_FILTER 0 ++#define CONFIG_EQ_FILTER 0 ++#define CONFIG_EROSION_FILTER 0 ++#define CONFIG_EROSION_OPENCL_FILTER 0 ++#define CONFIG_EXTRACTPLANES_FILTER 0 ++#define CONFIG_FADE_FILTER 0 ++#define CONFIG_FFTDNOIZ_FILTER 0 ++#define CONFIG_FFTFILT_FILTER 0 ++#define CONFIG_FIELD_FILTER 0 ++#define CONFIG_FIELDHINT_FILTER 0 ++#define CONFIG_FIELDMATCH_FILTER 0 ++#define CONFIG_FIELDORDER_FILTER 0 ++#define CONFIG_FILLBORDERS_FILTER 0 ++#define CONFIG_FIND_RECT_FILTER 0 ++#define CONFIG_FLOODFILL_FILTER 0 ++#define CONFIG_FORMAT_FILTER 0 ++#define CONFIG_FPS_FILTER 0 ++#define CONFIG_FRAMEPACK_FILTER 0 ++#define CONFIG_FRAMERATE_FILTER 0 ++#define CONFIG_FRAMESTEP_FILTER 0 ++#define CONFIG_FREEZEDETECT_FILTER 0 ++#define CONFIG_FREEZEFRAMES_FILTER 0 ++#define CONFIG_FREI0R_FILTER 0 ++#define CONFIG_FSPP_FILTER 0 ++#define CONFIG_GBLUR_FILTER 0 ++#define CONFIG_GEQ_FILTER 0 ++#define CONFIG_GRADFUN_FILTER 0 ++#define CONFIG_GRAPHMONITOR_FILTER 0 ++#define CONFIG_GREYEDGE_FILTER 0 ++#define CONFIG_HALDCLUT_FILTER 0 ++#define CONFIG_HFLIP_FILTER 0 ++#define CONFIG_HISTEQ_FILTER 0 ++#define CONFIG_HISTOGRAM_FILTER 0 ++#define CONFIG_HQDN3D_FILTER 0 ++#define CONFIG_HQX_FILTER 0 ++#define CONFIG_HSTACK_FILTER 0 ++#define CONFIG_HUE_FILTER 0 ++#define CONFIG_HWDOWNLOAD_FILTER 0 ++#define CONFIG_HWMAP_FILTER 0 ++#define CONFIG_HWUPLOAD_FILTER 0 ++#define CONFIG_HWUPLOAD_CUDA_FILTER 0 ++#define CONFIG_HYSTERESIS_FILTER 0 ++#define CONFIG_IDET_FILTER 0 ++#define CONFIG_IL_FILTER 0 ++#define CONFIG_INFLATE_FILTER 0 ++#define CONFIG_INTERLACE_FILTER 0 ++#define CONFIG_INTERLEAVE_FILTER 0 ++#define CONFIG_KERNDEINT_FILTER 0 ++#define CONFIG_LAGFUN_FILTER 0 ++#define CONFIG_LENSCORRECTION_FILTER 0 ++#define CONFIG_LENSFUN_FILTER 0 ++#define CONFIG_LIBVMAF_FILTER 0 ++#define CONFIG_LIMITER_FILTER 0 ++#define CONFIG_LOOP_FILTER 0 ++#define CONFIG_LUMAKEY_FILTER 0 ++#define CONFIG_LUT_FILTER 0 ++#define CONFIG_LUT1D_FILTER 0 ++#define CONFIG_LUT2_FILTER 0 ++#define CONFIG_LUT3D_FILTER 0 ++#define CONFIG_LUTRGB_FILTER 0 ++#define CONFIG_LUTYUV_FILTER 0 ++#define CONFIG_MASKEDCLAMP_FILTER 0 ++#define CONFIG_MASKEDMAX_FILTER 0 ++#define CONFIG_MASKEDMERGE_FILTER 0 ++#define CONFIG_MASKEDMIN_FILTER 0 ++#define CONFIG_MASKEDTHRESHOLD_FILTER 0 ++#define CONFIG_MASKFUN_FILTER 0 ++#define CONFIG_MCDEINT_FILTER 0 ++#define CONFIG_MEDIAN_FILTER 0 ++#define CONFIG_MERGEPLANES_FILTER 0 ++#define CONFIG_MESTIMATE_FILTER 0 ++#define CONFIG_METADATA_FILTER 0 ++#define CONFIG_MIDEQUALIZER_FILTER 0 ++#define CONFIG_MINTERPOLATE_FILTER 0 ++#define CONFIG_MIX_FILTER 0 ++#define CONFIG_MPDECIMATE_FILTER 0 ++#define CONFIG_NEGATE_FILTER 0 ++#define CONFIG_NLMEANS_FILTER 0 ++#define CONFIG_NLMEANS_OPENCL_FILTER 0 ++#define CONFIG_NNEDI_FILTER 0 ++#define CONFIG_NOFORMAT_FILTER 0 ++#define CONFIG_NOISE_FILTER 0 ++#define CONFIG_NORMALIZE_FILTER 0 ++#define CONFIG_NULL_FILTER 0 ++#define CONFIG_OCR_FILTER 0 ++#define CONFIG_OCV_FILTER 0 ++#define CONFIG_OSCILLOSCOPE_FILTER 0 ++#define CONFIG_OVERLAY_FILTER 0 ++#define CONFIG_OVERLAY_OPENCL_FILTER 0 ++#define CONFIG_OVERLAY_QSV_FILTER 0 ++#define CONFIG_OVERLAY_VULKAN_FILTER 0 ++#define CONFIG_OVERLAY_CUDA_FILTER 0 ++#define CONFIG_OWDENOISE_FILTER 0 ++#define CONFIG_PAD_FILTER 0 ++#define CONFIG_PAD_OPENCL_FILTER 0 ++#define CONFIG_PALETTEGEN_FILTER 0 ++#define CONFIG_PALETTEUSE_FILTER 0 ++#define CONFIG_PERMS_FILTER 0 ++#define CONFIG_PERSPECTIVE_FILTER 0 ++#define CONFIG_PHASE_FILTER 0 ++#define CONFIG_PHOTOSENSITIVITY_FILTER 0 ++#define CONFIG_PIXDESCTEST_FILTER 0 ++#define CONFIG_PIXSCOPE_FILTER 0 ++#define CONFIG_PP_FILTER 0 ++#define CONFIG_PP7_FILTER 0 ++#define CONFIG_PREMULTIPLY_FILTER 0 ++#define CONFIG_PREWITT_FILTER 0 ++#define CONFIG_PREWITT_OPENCL_FILTER 0 ++#define CONFIG_PROCAMP_VAAPI_FILTER 0 ++#define CONFIG_PROGRAM_OPENCL_FILTER 0 ++#define CONFIG_PSEUDOCOLOR_FILTER 0 ++#define CONFIG_PSNR_FILTER 0 ++#define CONFIG_PULLUP_FILTER 0 ++#define CONFIG_QP_FILTER 0 ++#define CONFIG_RANDOM_FILTER 0 ++#define CONFIG_READEIA608_FILTER 0 ++#define CONFIG_READVITC_FILTER 0 ++#define CONFIG_REALTIME_FILTER 0 ++#define CONFIG_REMAP_FILTER 0 ++#define CONFIG_REMOVEGRAIN_FILTER 0 ++#define CONFIG_REMOVELOGO_FILTER 0 ++#define CONFIG_REPEATFIELDS_FILTER 0 ++#define CONFIG_REVERSE_FILTER 0 ++#define CONFIG_RGBASHIFT_FILTER 0 ++#define CONFIG_ROBERTS_FILTER 0 ++#define CONFIG_ROBERTS_OPENCL_FILTER 0 ++#define CONFIG_ROTATE_FILTER 0 ++#define CONFIG_SAB_FILTER 0 ++#define CONFIG_SCALE_FILTER 0 ++#define CONFIG_SCALE_CUDA_FILTER 0 ++#define CONFIG_SCALE_NPP_FILTER 0 ++#define CONFIG_SCALE_QSV_FILTER 0 ++#define CONFIG_SCALE_VAAPI_FILTER 0 ++#define CONFIG_SCALE_VULKAN_FILTER 0 ++#define CONFIG_SCALE2REF_FILTER 0 ++#define CONFIG_SCROLL_FILTER 0 ++#define CONFIG_SELECT_FILTER 0 ++#define CONFIG_SELECTIVECOLOR_FILTER 0 ++#define CONFIG_SENDCMD_FILTER 0 ++#define CONFIG_SEPARATEFIELDS_FILTER 0 ++#define CONFIG_SETDAR_FILTER 0 ++#define CONFIG_SETFIELD_FILTER 0 ++#define CONFIG_SETPARAMS_FILTER 0 ++#define CONFIG_SETPTS_FILTER 0 ++#define CONFIG_SETRANGE_FILTER 0 ++#define CONFIG_SETSAR_FILTER 0 ++#define CONFIG_SETTB_FILTER 0 ++#define CONFIG_SHARPNESS_VAAPI_FILTER 0 ++#define CONFIG_SHOWINFO_FILTER 0 ++#define CONFIG_SHOWPALETTE_FILTER 0 ++#define CONFIG_SHUFFLEFRAMES_FILTER 0 ++#define CONFIG_SHUFFLEPLANES_FILTER 0 ++#define CONFIG_SIDEDATA_FILTER 0 ++#define CONFIG_SIGNALSTATS_FILTER 0 ++#define CONFIG_SIGNATURE_FILTER 0 ++#define CONFIG_SMARTBLUR_FILTER 0 ++#define CONFIG_SOBEL_FILTER 0 ++#define CONFIG_SOBEL_OPENCL_FILTER 0 ++#define CONFIG_SPLIT_FILTER 0 ++#define CONFIG_SPP_FILTER 0 ++#define CONFIG_SR_FILTER 0 ++#define CONFIG_SSIM_FILTER 0 ++#define CONFIG_STEREO3D_FILTER 0 ++#define CONFIG_STREAMSELECT_FILTER 0 ++#define CONFIG_SUBTITLES_FILTER 0 ++#define CONFIG_SUPER2XSAI_FILTER 0 ++#define CONFIG_SWAPRECT_FILTER 0 ++#define CONFIG_SWAPUV_FILTER 0 ++#define CONFIG_TBLEND_FILTER 0 ++#define CONFIG_TELECINE_FILTER 0 ++#define CONFIG_THISTOGRAM_FILTER 0 ++#define CONFIG_THRESHOLD_FILTER 0 ++#define CONFIG_THUMBNAIL_FILTER 0 ++#define CONFIG_THUMBNAIL_CUDA_FILTER 0 ++#define CONFIG_TILE_FILTER 0 ++#define CONFIG_TINTERLACE_FILTER 0 ++#define CONFIG_TLUT2_FILTER 0 ++#define CONFIG_TMEDIAN_FILTER 0 ++#define CONFIG_TMIX_FILTER 0 ++#define CONFIG_TONEMAP_FILTER 0 ++#define CONFIG_TONEMAP_OPENCL_FILTER 0 ++#define CONFIG_TONEMAP_VAAPI_FILTER 0 ++#define CONFIG_TPAD_FILTER 0 ++#define CONFIG_TRANSPOSE_FILTER 0 ++#define CONFIG_TRANSPOSE_NPP_FILTER 0 ++#define CONFIG_TRANSPOSE_OPENCL_FILTER 0 ++#define CONFIG_TRANSPOSE_VAAPI_FILTER 0 ++#define CONFIG_TRIM_FILTER 0 ++#define CONFIG_UNPREMULTIPLY_FILTER 0 ++#define CONFIG_UNSHARP_FILTER 0 ++#define CONFIG_UNSHARP_OPENCL_FILTER 0 ++#define CONFIG_USPP_FILTER 0 ++#define CONFIG_V360_FILTER 0 ++#define CONFIG_VAGUEDENOISER_FILTER 0 ++#define CONFIG_VECTORSCOPE_FILTER 0 ++#define CONFIG_VFLIP_FILTER 0 ++#define CONFIG_VFRDET_FILTER 0 ++#define CONFIG_VIBRANCE_FILTER 0 ++#define CONFIG_VIDSTABDETECT_FILTER 0 ++#define CONFIG_VIDSTABTRANSFORM_FILTER 0 ++#define CONFIG_VIGNETTE_FILTER 0 ++#define CONFIG_VMAFMOTION_FILTER 0 ++#define CONFIG_VPP_QSV_FILTER 0 ++#define CONFIG_VSTACK_FILTER 0 ++#define CONFIG_W3FDIF_FILTER 0 ++#define CONFIG_WAVEFORM_FILTER 0 ++#define CONFIG_WEAVE_FILTER 0 ++#define CONFIG_XBR_FILTER 0 ++#define CONFIG_XFADE_FILTER 0 ++#define CONFIG_XFADE_OPENCL_FILTER 0 ++#define CONFIG_XMEDIAN_FILTER 0 ++#define CONFIG_XSTACK_FILTER 0 ++#define CONFIG_YADIF_FILTER 0 ++#define CONFIG_YADIF_CUDA_FILTER 0 ++#define CONFIG_YAEPBLUR_FILTER 0 ++#define CONFIG_ZMQ_FILTER 0 ++#define CONFIG_ZOOMPAN_FILTER 0 ++#define CONFIG_ZSCALE_FILTER 0 ++#define CONFIG_ALLRGB_FILTER 0 ++#define CONFIG_ALLYUV_FILTER 0 ++#define CONFIG_CELLAUTO_FILTER 0 ++#define CONFIG_COLOR_FILTER 0 ++#define CONFIG_COREIMAGESRC_FILTER 0 ++#define CONFIG_FREI0R_SRC_FILTER 0 ++#define CONFIG_HALDCLUTSRC_FILTER 0 ++#define CONFIG_LIFE_FILTER 0 ++#define CONFIG_MANDELBROT_FILTER 0 ++#define CONFIG_MPTESTSRC_FILTER 0 ++#define CONFIG_NULLSRC_FILTER 0 ++#define CONFIG_OPENCLSRC_FILTER 0 ++#define CONFIG_PAL75BARS_FILTER 0 ++#define CONFIG_PAL100BARS_FILTER 0 ++#define CONFIG_RGBTESTSRC_FILTER 0 ++#define CONFIG_SIERPINSKI_FILTER 0 ++#define CONFIG_SMPTEBARS_FILTER 0 ++#define CONFIG_SMPTEHDBARS_FILTER 0 ++#define CONFIG_TESTSRC_FILTER 0 ++#define CONFIG_TESTSRC2_FILTER 0 ++#define CONFIG_YUVTESTSRC_FILTER 0 ++#define CONFIG_NULLSINK_FILTER 0 ++#define CONFIG_ABITSCOPE_FILTER 0 ++#define CONFIG_ADRAWGRAPH_FILTER 0 ++#define CONFIG_AGRAPHMONITOR_FILTER 0 ++#define CONFIG_AHISTOGRAM_FILTER 0 ++#define CONFIG_APHASEMETER_FILTER 0 ++#define CONFIG_AVECTORSCOPE_FILTER 0 ++#define CONFIG_CONCAT_FILTER 0 ++#define CONFIG_SHOWCQT_FILTER 0 ++#define CONFIG_SHOWFREQS_FILTER 0 ++#define CONFIG_SHOWSPATIAL_FILTER 0 ++#define CONFIG_SHOWSPECTRUM_FILTER 0 ++#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 ++#define CONFIG_SHOWVOLUME_FILTER 0 ++#define CONFIG_SHOWWAVES_FILTER 0 ++#define CONFIG_SHOWWAVESPIC_FILTER 0 ++#define CONFIG_SPECTRUMSYNTH_FILTER 0 ++#define CONFIG_AMOVIE_FILTER 0 ++#define CONFIG_MOVIE_FILTER 0 ++#define CONFIG_AFIFO_FILTER 0 ++#define CONFIG_FIFO_FILTER 0 ++#define CONFIG_AA_DEMUXER 0 ++#define CONFIG_AAC_DEMUXER 0 ++#define CONFIG_AC3_DEMUXER 0 ++#define CONFIG_ACM_DEMUXER 0 ++#define CONFIG_ACT_DEMUXER 0 ++#define CONFIG_ADF_DEMUXER 0 ++#define CONFIG_ADP_DEMUXER 0 ++#define CONFIG_ADS_DEMUXER 0 ++#define CONFIG_ADX_DEMUXER 0 ++#define CONFIG_AEA_DEMUXER 0 ++#define CONFIG_AFC_DEMUXER 0 ++#define CONFIG_AIFF_DEMUXER 0 ++#define CONFIG_AIX_DEMUXER 0 ++#define CONFIG_ALP_DEMUXER 0 ++#define CONFIG_AMR_DEMUXER 0 ++#define CONFIG_AMRNB_DEMUXER 0 ++#define CONFIG_AMRWB_DEMUXER 0 ++#define CONFIG_ANM_DEMUXER 0 ++#define CONFIG_APC_DEMUXER 0 ++#define CONFIG_APE_DEMUXER 0 ++#define CONFIG_APM_DEMUXER 0 ++#define CONFIG_APNG_DEMUXER 0 ++#define CONFIG_APTX_DEMUXER 0 ++#define CONFIG_APTX_HD_DEMUXER 0 ++#define CONFIG_AQTITLE_DEMUXER 0 ++#define CONFIG_ARGO_ASF_DEMUXER 0 ++#define CONFIG_ASF_DEMUXER 0 ++#define CONFIG_ASF_O_DEMUXER 0 ++#define CONFIG_ASS_DEMUXER 0 ++#define CONFIG_AST_DEMUXER 0 ++#define CONFIG_AU_DEMUXER 0 ++#define CONFIG_AV1_DEMUXER 0 ++#define CONFIG_AVI_DEMUXER 0 ++#define CONFIG_AVISYNTH_DEMUXER 0 ++#define CONFIG_AVR_DEMUXER 0 ++#define CONFIG_AVS_DEMUXER 0 ++#define CONFIG_AVS2_DEMUXER 0 ++#define CONFIG_BETHSOFTVID_DEMUXER 0 ++#define CONFIG_BFI_DEMUXER 0 ++#define CONFIG_BINTEXT_DEMUXER 0 ++#define CONFIG_BINK_DEMUXER 0 ++#define CONFIG_BIT_DEMUXER 0 ++#define CONFIG_BMV_DEMUXER 0 ++#define CONFIG_BFSTM_DEMUXER 0 ++#define CONFIG_BRSTM_DEMUXER 0 ++#define CONFIG_BOA_DEMUXER 0 ++#define CONFIG_C93_DEMUXER 0 ++#define CONFIG_CAF_DEMUXER 0 ++#define CONFIG_CAVSVIDEO_DEMUXER 0 ++#define CONFIG_CDG_DEMUXER 0 ++#define CONFIG_CDXL_DEMUXER 0 ++#define CONFIG_CINE_DEMUXER 0 ++#define CONFIG_CODEC2_DEMUXER 0 ++#define CONFIG_CODEC2RAW_DEMUXER 0 ++#define CONFIG_CONCAT_DEMUXER 0 ++#define CONFIG_DASH_DEMUXER 0 ++#define CONFIG_DATA_DEMUXER 0 ++#define CONFIG_DAUD_DEMUXER 0 ++#define CONFIG_DCSTR_DEMUXER 0 ++#define CONFIG_DERF_DEMUXER 0 ++#define CONFIG_DFA_DEMUXER 0 ++#define CONFIG_DHAV_DEMUXER 0 ++#define CONFIG_DIRAC_DEMUXER 0 ++#define CONFIG_DNXHD_DEMUXER 0 ++#define CONFIG_DSF_DEMUXER 0 ++#define CONFIG_DSICIN_DEMUXER 0 ++#define CONFIG_DSS_DEMUXER 0 ++#define CONFIG_DTS_DEMUXER 0 ++#define CONFIG_DTSHD_DEMUXER 0 ++#define CONFIG_DV_DEMUXER 0 ++#define CONFIG_DVBSUB_DEMUXER 0 ++#define CONFIG_DVBTXT_DEMUXER 0 ++#define CONFIG_DXA_DEMUXER 0 ++#define CONFIG_EA_DEMUXER 0 ++#define CONFIG_EA_CDATA_DEMUXER 0 ++#define CONFIG_EAC3_DEMUXER 0 ++#define CONFIG_EPAF_DEMUXER 0 ++#define CONFIG_FFMETADATA_DEMUXER 0 ++#define CONFIG_FILMSTRIP_DEMUXER 0 ++#define CONFIG_FITS_DEMUXER 0 ++#define CONFIG_FLAC_DEMUXER 1 ++#define CONFIG_FLIC_DEMUXER 0 ++#define CONFIG_FLV_DEMUXER 0 ++#define CONFIG_LIVE_FLV_DEMUXER 0 ++#define CONFIG_FOURXM_DEMUXER 0 ++#define CONFIG_FRM_DEMUXER 0 ++#define CONFIG_FSB_DEMUXER 0 ++#define CONFIG_FWSE_DEMUXER 0 ++#define CONFIG_G722_DEMUXER 0 ++#define CONFIG_G723_1_DEMUXER 0 ++#define CONFIG_G726_DEMUXER 0 ++#define CONFIG_G726LE_DEMUXER 0 ++#define CONFIG_G729_DEMUXER 0 ++#define CONFIG_GDV_DEMUXER 0 ++#define CONFIG_GENH_DEMUXER 0 ++#define CONFIG_GIF_DEMUXER 0 ++#define CONFIG_GSM_DEMUXER 0 ++#define CONFIG_GXF_DEMUXER 0 ++#define CONFIG_H261_DEMUXER 0 ++#define CONFIG_H263_DEMUXER 0 ++#define CONFIG_H264_DEMUXER 0 ++#define CONFIG_HCA_DEMUXER 0 ++#define CONFIG_HCOM_DEMUXER 0 ++#define CONFIG_HEVC_DEMUXER 0 ++#define CONFIG_HLS_DEMUXER 0 ++#define CONFIG_HNM_DEMUXER 0 ++#define CONFIG_ICO_DEMUXER 0 ++#define CONFIG_IDCIN_DEMUXER 0 ++#define CONFIG_IDF_DEMUXER 0 ++#define CONFIG_IFF_DEMUXER 0 ++#define CONFIG_IFV_DEMUXER 0 ++#define CONFIG_ILBC_DEMUXER 0 ++#define CONFIG_IMAGE2_DEMUXER 0 ++#define CONFIG_IMAGE2PIPE_DEMUXER 0 ++#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 ++#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 ++#define CONFIG_INGENIENT_DEMUXER 0 ++#define CONFIG_IPMOVIE_DEMUXER 0 ++#define CONFIG_IRCAM_DEMUXER 0 ++#define CONFIG_ISS_DEMUXER 0 ++#define CONFIG_IV8_DEMUXER 0 ++#define CONFIG_IVF_DEMUXER 0 ++#define CONFIG_IVR_DEMUXER 0 ++#define CONFIG_JACOSUB_DEMUXER 0 ++#define CONFIG_JV_DEMUXER 0 ++#define CONFIG_KUX_DEMUXER 0 ++#define CONFIG_KVAG_DEMUXER 0 ++#define CONFIG_LMLM4_DEMUXER 0 ++#define CONFIG_LOAS_DEMUXER 0 ++#define CONFIG_LRC_DEMUXER 0 ++#define CONFIG_LVF_DEMUXER 0 ++#define CONFIG_LXF_DEMUXER 0 ++#define CONFIG_M4V_DEMUXER 0 ++#define CONFIG_MATROSKA_DEMUXER 1 ++#define CONFIG_MGSTS_DEMUXER 0 ++#define CONFIG_MICRODVD_DEMUXER 0 ++#define CONFIG_MJPEG_DEMUXER 0 ++#define CONFIG_MJPEG_2000_DEMUXER 0 ++#define CONFIG_MLP_DEMUXER 0 ++#define CONFIG_MLV_DEMUXER 0 ++#define CONFIG_MM_DEMUXER 0 ++#define CONFIG_MMF_DEMUXER 0 ++#define CONFIG_MOV_DEMUXER 1 ++#define CONFIG_MP3_DEMUXER 1 ++#define CONFIG_MPC_DEMUXER 0 ++#define CONFIG_MPC8_DEMUXER 0 ++#define CONFIG_MPEGPS_DEMUXER 0 ++#define CONFIG_MPEGTS_DEMUXER 0 ++#define CONFIG_MPEGTSRAW_DEMUXER 0 ++#define CONFIG_MPEGVIDEO_DEMUXER 0 ++#define CONFIG_MPJPEG_DEMUXER 0 ++#define CONFIG_MPL2_DEMUXER 0 ++#define CONFIG_MPSUB_DEMUXER 0 ++#define CONFIG_MSF_DEMUXER 0 ++#define CONFIG_MSNWC_TCP_DEMUXER 0 ++#define CONFIG_MTAF_DEMUXER 0 ++#define CONFIG_MTV_DEMUXER 0 ++#define CONFIG_MUSX_DEMUXER 0 ++#define CONFIG_MV_DEMUXER 0 ++#define CONFIG_MVI_DEMUXER 0 ++#define CONFIG_MXF_DEMUXER 0 ++#define CONFIG_MXG_DEMUXER 0 ++#define CONFIG_NC_DEMUXER 0 ++#define CONFIG_NISTSPHERE_DEMUXER 0 ++#define CONFIG_NSP_DEMUXER 0 ++#define CONFIG_NSV_DEMUXER 0 ++#define CONFIG_NUT_DEMUXER 0 ++#define CONFIG_NUV_DEMUXER 0 ++#define CONFIG_OGG_DEMUXER 1 ++#define CONFIG_OMA_DEMUXER 0 ++#define CONFIG_PAF_DEMUXER 0 ++#define CONFIG_PCM_ALAW_DEMUXER 0 ++#define CONFIG_PCM_MULAW_DEMUXER 0 ++#define CONFIG_PCM_VIDC_DEMUXER 0 ++#define CONFIG_PCM_F64BE_DEMUXER 0 ++#define CONFIG_PCM_F64LE_DEMUXER 0 ++#define CONFIG_PCM_F32BE_DEMUXER 0 ++#define CONFIG_PCM_F32LE_DEMUXER 0 ++#define CONFIG_PCM_S32BE_DEMUXER 0 ++#define CONFIG_PCM_S32LE_DEMUXER 0 ++#define CONFIG_PCM_S24BE_DEMUXER 0 ++#define CONFIG_PCM_S24LE_DEMUXER 0 ++#define CONFIG_PCM_S16BE_DEMUXER 0 ++#define CONFIG_PCM_S16LE_DEMUXER 0 ++#define CONFIG_PCM_S8_DEMUXER 0 ++#define CONFIG_PCM_U32BE_DEMUXER 0 ++#define CONFIG_PCM_U32LE_DEMUXER 0 ++#define CONFIG_PCM_U24BE_DEMUXER 0 ++#define CONFIG_PCM_U24LE_DEMUXER 0 ++#define CONFIG_PCM_U16BE_DEMUXER 0 ++#define CONFIG_PCM_U16LE_DEMUXER 0 ++#define CONFIG_PCM_U8_DEMUXER 0 ++#define CONFIG_PJS_DEMUXER 0 ++#define CONFIG_PMP_DEMUXER 0 ++#define CONFIG_PVA_DEMUXER 0 ++#define CONFIG_PVF_DEMUXER 0 ++#define CONFIG_QCP_DEMUXER 0 ++#define CONFIG_R3D_DEMUXER 0 ++#define CONFIG_RAWVIDEO_DEMUXER 0 ++#define CONFIG_REALTEXT_DEMUXER 0 ++#define CONFIG_REDSPARK_DEMUXER 0 ++#define CONFIG_RL2_DEMUXER 0 ++#define CONFIG_RM_DEMUXER 0 ++#define CONFIG_ROQ_DEMUXER 0 ++#define CONFIG_RPL_DEMUXER 0 ++#define CONFIG_RSD_DEMUXER 0 ++#define CONFIG_RSO_DEMUXER 0 ++#define CONFIG_RTP_DEMUXER 0 ++#define CONFIG_RTSP_DEMUXER 0 ++#define CONFIG_S337M_DEMUXER 0 ++#define CONFIG_SAMI_DEMUXER 0 ++#define CONFIG_SAP_DEMUXER 0 ++#define CONFIG_SBC_DEMUXER 0 ++#define CONFIG_SBG_DEMUXER 0 ++#define CONFIG_SCC_DEMUXER 0 ++#define CONFIG_SDP_DEMUXER 0 ++#define CONFIG_SDR2_DEMUXER 0 ++#define CONFIG_SDS_DEMUXER 0 ++#define CONFIG_SDX_DEMUXER 0 ++#define CONFIG_SEGAFILM_DEMUXER 0 ++#define CONFIG_SER_DEMUXER 0 ++#define CONFIG_SHORTEN_DEMUXER 0 ++#define CONFIG_SIFF_DEMUXER 0 ++#define CONFIG_SLN_DEMUXER 0 ++#define CONFIG_SMACKER_DEMUXER 0 ++#define CONFIG_SMJPEG_DEMUXER 0 ++#define CONFIG_SMUSH_DEMUXER 0 ++#define CONFIG_SOL_DEMUXER 0 ++#define CONFIG_SOX_DEMUXER 0 ++#define CONFIG_SPDIF_DEMUXER 0 ++#define CONFIG_SRT_DEMUXER 0 ++#define CONFIG_STR_DEMUXER 0 ++#define CONFIG_STL_DEMUXER 0 ++#define CONFIG_SUBVIEWER1_DEMUXER 0 ++#define CONFIG_SUBVIEWER_DEMUXER 0 ++#define CONFIG_SUP_DEMUXER 0 ++#define CONFIG_SVAG_DEMUXER 0 ++#define CONFIG_SWF_DEMUXER 0 ++#define CONFIG_TAK_DEMUXER 0 ++#define CONFIG_TEDCAPTIONS_DEMUXER 0 ++#define CONFIG_THP_DEMUXER 0 ++#define CONFIG_THREEDOSTR_DEMUXER 0 ++#define CONFIG_TIERTEXSEQ_DEMUXER 0 ++#define CONFIG_TMV_DEMUXER 0 ++#define CONFIG_TRUEHD_DEMUXER 0 ++#define CONFIG_TTA_DEMUXER 0 ++#define CONFIG_TXD_DEMUXER 0 ++#define CONFIG_TTY_DEMUXER 0 ++#define CONFIG_TY_DEMUXER 0 ++#define CONFIG_V210_DEMUXER 0 ++#define CONFIG_V210X_DEMUXER 0 ++#define CONFIG_VAG_DEMUXER 0 ++#define CONFIG_VC1_DEMUXER 0 ++#define CONFIG_VC1T_DEMUXER 0 ++#define CONFIG_VIVIDAS_DEMUXER 0 ++#define CONFIG_VIVO_DEMUXER 0 ++#define CONFIG_VMD_DEMUXER 0 ++#define CONFIG_VOBSUB_DEMUXER 0 ++#define CONFIG_VOC_DEMUXER 0 ++#define CONFIG_VPK_DEMUXER 0 ++#define CONFIG_VPLAYER_DEMUXER 0 ++#define CONFIG_VQF_DEMUXER 0 ++#define CONFIG_W64_DEMUXER 0 ++#define CONFIG_WAV_DEMUXER 1 ++#define CONFIG_WC3_DEMUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 ++#define CONFIG_WEBVTT_DEMUXER 0 ++#define CONFIG_WSAUD_DEMUXER 0 ++#define CONFIG_WSD_DEMUXER 0 ++#define CONFIG_WSVQA_DEMUXER 0 ++#define CONFIG_WTV_DEMUXER 0 ++#define CONFIG_WVE_DEMUXER 0 ++#define CONFIG_WV_DEMUXER 0 ++#define CONFIG_XA_DEMUXER 0 ++#define CONFIG_XBIN_DEMUXER 0 ++#define CONFIG_XMV_DEMUXER 0 ++#define CONFIG_XVAG_DEMUXER 0 ++#define CONFIG_XWMA_DEMUXER 0 ++#define CONFIG_YOP_DEMUXER 0 ++#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 ++#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_GIF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SVG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XWD_PIPE_DEMUXER 0 ++#define CONFIG_LIBGME_DEMUXER 0 ++#define CONFIG_LIBMODPLUG_DEMUXER 0 ++#define CONFIG_LIBOPENMPT_DEMUXER 0 ++#define CONFIG_VAPOURSYNTH_DEMUXER 0 ++#define CONFIG_A64_MUXER 0 ++#define CONFIG_AC3_MUXER 0 ++#define CONFIG_ADTS_MUXER 0 ++#define CONFIG_ADX_MUXER 0 ++#define CONFIG_AIFF_MUXER 0 ++#define CONFIG_AMR_MUXER 0 ++#define CONFIG_APNG_MUXER 0 ++#define CONFIG_APTX_MUXER 0 ++#define CONFIG_APTX_HD_MUXER 0 ++#define CONFIG_ASF_MUXER 0 ++#define CONFIG_ASS_MUXER 0 ++#define CONFIG_AST_MUXER 0 ++#define CONFIG_ASF_STREAM_MUXER 0 ++#define CONFIG_AU_MUXER 0 ++#define CONFIG_AVI_MUXER 0 ++#define CONFIG_AVM2_MUXER 0 ++#define CONFIG_AVS2_MUXER 0 ++#define CONFIG_BIT_MUXER 0 ++#define CONFIG_CAF_MUXER 0 ++#define CONFIG_CAVSVIDEO_MUXER 0 ++#define CONFIG_CODEC2_MUXER 0 ++#define CONFIG_CODEC2RAW_MUXER 0 ++#define CONFIG_CRC_MUXER 0 ++#define CONFIG_DASH_MUXER 0 ++#define CONFIG_DATA_MUXER 0 ++#define CONFIG_DAUD_MUXER 0 ++#define CONFIG_DIRAC_MUXER 0 ++#define CONFIG_DNXHD_MUXER 0 ++#define CONFIG_DTS_MUXER 0 ++#define CONFIG_DV_MUXER 0 ++#define CONFIG_EAC3_MUXER 0 ++#define CONFIG_F4V_MUXER 0 ++#define CONFIG_FFMETADATA_MUXER 0 ++#define CONFIG_FIFO_MUXER 0 ++#define CONFIG_FIFO_TEST_MUXER 0 ++#define CONFIG_FILMSTRIP_MUXER 0 ++#define CONFIG_FITS_MUXER 0 ++#define CONFIG_FLAC_MUXER 0 ++#define CONFIG_FLV_MUXER 0 ++#define CONFIG_FRAMECRC_MUXER 0 ++#define CONFIG_FRAMEHASH_MUXER 0 ++#define CONFIG_FRAMEMD5_MUXER 0 ++#define CONFIG_G722_MUXER 0 ++#define CONFIG_G723_1_MUXER 0 ++#define CONFIG_G726_MUXER 0 ++#define CONFIG_G726LE_MUXER 0 ++#define CONFIG_GIF_MUXER 0 ++#define CONFIG_GSM_MUXER 0 ++#define CONFIG_GXF_MUXER 0 ++#define CONFIG_H261_MUXER 0 ++#define CONFIG_H263_MUXER 0 ++#define CONFIG_H264_MUXER 0 ++#define CONFIG_HASH_MUXER 0 ++#define CONFIG_HDS_MUXER 0 ++#define CONFIG_HEVC_MUXER 0 ++#define CONFIG_HLS_MUXER 0 ++#define CONFIG_ICO_MUXER 0 ++#define CONFIG_ILBC_MUXER 0 ++#define CONFIG_IMAGE2_MUXER 0 ++#define CONFIG_IMAGE2PIPE_MUXER 0 ++#define CONFIG_IPOD_MUXER 0 ++#define CONFIG_IRCAM_MUXER 0 ++#define CONFIG_ISMV_MUXER 0 ++#define CONFIG_IVF_MUXER 0 ++#define CONFIG_JACOSUB_MUXER 0 ++#define CONFIG_LATM_MUXER 0 ++#define CONFIG_LRC_MUXER 0 ++#define CONFIG_M4V_MUXER 0 ++#define CONFIG_MD5_MUXER 0 ++#define CONFIG_MATROSKA_MUXER 0 ++#define CONFIG_MATROSKA_AUDIO_MUXER 0 ++#define CONFIG_MICRODVD_MUXER 0 ++#define CONFIG_MJPEG_MUXER 0 ++#define CONFIG_MLP_MUXER 0 ++#define CONFIG_MMF_MUXER 0 ++#define CONFIG_MOV_MUXER 0 ++#define CONFIG_MP2_MUXER 0 ++#define CONFIG_MP3_MUXER 0 ++#define CONFIG_MP4_MUXER 0 ++#define CONFIG_MPEG1SYSTEM_MUXER 0 ++#define CONFIG_MPEG1VCD_MUXER 0 ++#define CONFIG_MPEG1VIDEO_MUXER 0 ++#define CONFIG_MPEG2DVD_MUXER 0 ++#define CONFIG_MPEG2SVCD_MUXER 0 ++#define CONFIG_MPEG2VIDEO_MUXER 0 ++#define CONFIG_MPEG2VOB_MUXER 0 ++#define CONFIG_MPEGTS_MUXER 0 ++#define CONFIG_MPJPEG_MUXER 0 ++#define CONFIG_MXF_MUXER 0 ++#define CONFIG_MXF_D10_MUXER 0 ++#define CONFIG_MXF_OPATOM_MUXER 0 ++#define CONFIG_NULL_MUXER 0 ++#define CONFIG_NUT_MUXER 0 ++#define CONFIG_OGA_MUXER 0 ++#define CONFIG_OGG_MUXER 0 ++#define CONFIG_OGV_MUXER 0 ++#define CONFIG_OMA_MUXER 0 ++#define CONFIG_OPUS_MUXER 0 ++#define CONFIG_PCM_ALAW_MUXER 0 ++#define CONFIG_PCM_MULAW_MUXER 0 ++#define CONFIG_PCM_VIDC_MUXER 0 ++#define CONFIG_PCM_F64BE_MUXER 0 ++#define CONFIG_PCM_F64LE_MUXER 0 ++#define CONFIG_PCM_F32BE_MUXER 0 ++#define CONFIG_PCM_F32LE_MUXER 0 ++#define CONFIG_PCM_S32BE_MUXER 0 ++#define CONFIG_PCM_S32LE_MUXER 0 ++#define CONFIG_PCM_S24BE_MUXER 0 ++#define CONFIG_PCM_S24LE_MUXER 0 ++#define CONFIG_PCM_S16BE_MUXER 0 ++#define CONFIG_PCM_S16LE_MUXER 0 ++#define CONFIG_PCM_S8_MUXER 0 ++#define CONFIG_PCM_U32BE_MUXER 0 ++#define CONFIG_PCM_U32LE_MUXER 0 ++#define CONFIG_PCM_U24BE_MUXER 0 ++#define CONFIG_PCM_U24LE_MUXER 0 ++#define CONFIG_PCM_U16BE_MUXER 0 ++#define CONFIG_PCM_U16LE_MUXER 0 ++#define CONFIG_PCM_U8_MUXER 0 ++#define CONFIG_PSP_MUXER 0 ++#define CONFIG_RAWVIDEO_MUXER 0 ++#define CONFIG_RM_MUXER 0 ++#define CONFIG_ROQ_MUXER 0 ++#define CONFIG_RSO_MUXER 0 ++#define CONFIG_RTP_MUXER 0 ++#define CONFIG_RTP_MPEGTS_MUXER 0 ++#define CONFIG_RTSP_MUXER 0 ++#define CONFIG_SAP_MUXER 0 ++#define CONFIG_SBC_MUXER 0 ++#define CONFIG_SCC_MUXER 0 ++#define CONFIG_SEGAFILM_MUXER 0 ++#define CONFIG_SEGMENT_MUXER 0 ++#define CONFIG_STREAM_SEGMENT_MUXER 0 ++#define CONFIG_SINGLEJPEG_MUXER 0 ++#define CONFIG_SMJPEG_MUXER 0 ++#define CONFIG_SMOOTHSTREAMING_MUXER 0 ++#define CONFIG_SOX_MUXER 0 ++#define CONFIG_SPX_MUXER 0 ++#define CONFIG_SPDIF_MUXER 0 ++#define CONFIG_SRT_MUXER 0 ++#define CONFIG_STREAMHASH_MUXER 0 ++#define CONFIG_SUP_MUXER 0 ++#define CONFIG_SWF_MUXER 0 ++#define CONFIG_TEE_MUXER 0 ++#define CONFIG_TG2_MUXER 0 ++#define CONFIG_TGP_MUXER 0 ++#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 ++#define CONFIG_TRUEHD_MUXER 0 ++#define CONFIG_TTA_MUXER 0 ++#define CONFIG_UNCODEDFRAMECRC_MUXER 0 ++#define CONFIG_VC1_MUXER 0 ++#define CONFIG_VC1T_MUXER 0 ++#define CONFIG_VOC_MUXER 0 ++#define CONFIG_W64_MUXER 0 ++#define CONFIG_WAV_MUXER 0 ++#define CONFIG_WEBM_MUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 ++#define CONFIG_WEBM_CHUNK_MUXER 0 ++#define CONFIG_WEBP_MUXER 0 ++#define CONFIG_WEBVTT_MUXER 0 ++#define CONFIG_WTV_MUXER 0 ++#define CONFIG_WV_MUXER 0 ++#define CONFIG_YUV4MPEGPIPE_MUXER 0 ++#define CONFIG_CHROMAPRINT_MUXER 0 ++#define CONFIG_ASYNC_PROTOCOL 0 ++#define CONFIG_BLURAY_PROTOCOL 0 ++#define CONFIG_CACHE_PROTOCOL 0 ++#define CONFIG_CONCAT_PROTOCOL 0 ++#define CONFIG_CRYPTO_PROTOCOL 0 ++#define CONFIG_DATA_PROTOCOL 0 ++#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 ++#define CONFIG_FFRTMPHTTP_PROTOCOL 0 ++#define CONFIG_FILE_PROTOCOL 0 ++#define CONFIG_FTP_PROTOCOL 0 ++#define CONFIG_GOPHER_PROTOCOL 0 ++#define CONFIG_HLS_PROTOCOL 0 ++#define CONFIG_HTTP_PROTOCOL 0 ++#define CONFIG_HTTPPROXY_PROTOCOL 0 ++#define CONFIG_HTTPS_PROTOCOL 0 ++#define CONFIG_ICECAST_PROTOCOL 0 ++#define CONFIG_MMSH_PROTOCOL 0 ++#define CONFIG_MMST_PROTOCOL 0 ++#define CONFIG_MD5_PROTOCOL 0 ++#define CONFIG_PIPE_PROTOCOL 0 ++#define CONFIG_PROMPEG_PROTOCOL 0 ++#define CONFIG_RTMP_PROTOCOL 0 ++#define CONFIG_RTMPE_PROTOCOL 0 ++#define CONFIG_RTMPS_PROTOCOL 0 ++#define CONFIG_RTMPT_PROTOCOL 0 ++#define CONFIG_RTMPTE_PROTOCOL 0 ++#define CONFIG_RTMPTS_PROTOCOL 0 ++#define CONFIG_RTP_PROTOCOL 0 ++#define CONFIG_SCTP_PROTOCOL 0 ++#define CONFIG_SRTP_PROTOCOL 0 ++#define CONFIG_SUBFILE_PROTOCOL 0 ++#define CONFIG_TEE_PROTOCOL 0 ++#define CONFIG_TCP_PROTOCOL 0 ++#define CONFIG_TLS_PROTOCOL 0 ++#define CONFIG_UDP_PROTOCOL 0 ++#define CONFIG_UDPLITE_PROTOCOL 0 ++#define CONFIG_UNIX_PROTOCOL 0 ++#define CONFIG_LIBAMQP_PROTOCOL 0 ++#define CONFIG_LIBRTMP_PROTOCOL 0 ++#define CONFIG_LIBRTMPE_PROTOCOL 0 ++#define CONFIG_LIBRTMPS_PROTOCOL 0 ++#define CONFIG_LIBRTMPT_PROTOCOL 0 ++#define CONFIG_LIBRTMPTE_PROTOCOL 0 ++#define CONFIG_LIBSRT_PROTOCOL 0 ++#define CONFIG_LIBSSH_PROTOCOL 0 ++#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 ++#define CONFIG_LIBZMQ_PROTOCOL 0 ++#endif /* FFMPEG_CONFIG_H */ +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/bsf_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/bsf_list.c +new file mode 100644 +index 000000000..d31ece942 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/bsf_list.c +@@ -0,0 +1,3 @@ ++static const AVBitStreamFilter * const bitstream_filters[] = { ++ &ff_null_bsf, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/codec_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/codec_list.c +new file mode 100644 +index 000000000..9407bd277 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/codec_list.c +@@ -0,0 +1,18 @@ ++static const AVCodec * const codec_list[] = { ++ &ff_theora_decoder, ++ &ff_vp3_decoder, ++ &ff_vp8_decoder, ++ &ff_flac_decoder, ++ &ff_mp3_decoder, ++ &ff_vorbis_decoder, ++ &ff_pcm_alaw_decoder, ++ &ff_pcm_f32le_decoder, ++ &ff_pcm_mulaw_decoder, ++ &ff_pcm_s16be_decoder, ++ &ff_pcm_s16le_decoder, ++ &ff_pcm_s24be_decoder, ++ &ff_pcm_s24le_decoder, ++ &ff_pcm_s32le_decoder, ++ &ff_pcm_u8_decoder, ++ &ff_libopus_decoder, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/parser_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/parser_list.c +new file mode 100644 +index 000000000..f81fbe8bb +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavcodec/parser_list.c +@@ -0,0 +1,9 @@ ++static const AVCodecParser * const parser_list[] = { ++ &ff_flac_parser, ++ &ff_mpegaudio_parser, ++ &ff_opus_parser, ++ &ff_vorbis_parser, ++ &ff_vp3_parser, ++ &ff_vp8_parser, ++ &ff_vp9_parser, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/demuxer_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/demuxer_list.c +new file mode 100644 +index 000000000..1908ba19e +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/demuxer_list.c +@@ -0,0 +1,8 @@ ++static const AVInputFormat * const demuxer_list[] = { ++ &ff_flac_demuxer, ++ &ff_matroska_demuxer, ++ &ff_mov_demuxer, ++ &ff_mp3_demuxer, ++ &ff_ogg_demuxer, ++ &ff_wav_demuxer, ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/muxer_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/muxer_list.c +new file mode 100644 +index 000000000..f36d9499c +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/muxer_list.c +@@ -0,0 +1,2 @@ ++static const AVOutputFormat * const muxer_list[] = { ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/protocol_list.c b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/protocol_list.c +new file mode 100644 +index 000000000..247e1e4c3 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavformat/protocol_list.c +@@ -0,0 +1,2 @@ ++static const URLProtocol * const url_protocols[] = { ++ NULL }; +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavutil/avconfig.h b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavutil/avconfig.h +new file mode 100644 +index 000000000..8558b3502 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavutil/avconfig.h +@@ -0,0 +1,6 @@ ++/* Generated by ffmpeg configure */ ++#ifndef AVUTIL_AVCONFIG_H ++#define AVUTIL_AVCONFIG_H ++#define AV_HAVE_BIGENDIAN 0 ++#define AV_HAVE_FAST_UNALIGNED 0 ++#endif /* AVUTIL_AVCONFIG_H */ +diff --git a/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavutil/ffversion.h b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavutil/ffversion.h +new file mode 100644 +index 000000000..31e5b5036 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/ffmpeg/chromium/config/Chromium/linux/la64/libavutil/ffversion.h +@@ -0,0 +1,5 @@ ++/* Automatically generated by version.sh, do not manually edit! */ ++#ifndef AVUTIL_FFVERSION_H ++#define AVUTIL_FFVERSION_H ++#define FFMPEG_VERSION "git-2020-06-16-23b2a15c25" ++#endif /* AVUTIL_FFVERSION_H */ +-- +2.20.1 + diff --git a/0006-fix-third_party-for-loongarch64-add-files-for-la64.patch b/0006-fix-third_party-for-loongarch64-add-files-for-la64.patch new file mode 100644 index 0000000..70622c0 --- /dev/null +++ b/0006-fix-third_party-for-loongarch64-add-files-for-la64.patch @@ -0,0 +1,6055 @@ +From 4f0a715b9cf52b57199b551bbce82e22377f0520 Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Wed, 13 Jan 2021 07:54:25 +0000 +Subject: [PATCH 06/13] fix third_party for loongarch64 add files for la64 + +--- + .../system_headers/la64_linux_syscalls.h | 1120 +++++ + .../platform/heap/asm/SaveRegisters_la64.S | 41 + + .../source/config/linux/la64/vp8_rtcd.h | 357 ++ + .../source/config/linux/la64/vp9_rtcd.h | 275 ++ + .../source/config/linux/la64/vpx_config.asm | 98 + + .../source/config/linux/la64/vpx_config.c | 10 + + .../source/config/linux/la64/vpx_config.h | 107 + + .../source/config/linux/la64/vpx_dsp_rtcd.h | 3868 +++++++++++++++++ + .../source/config/linux/la64/vpx_scale_rtcd.h | 96 + + 9 files changed, 5972 insertions(+) + create mode 100644 src/3rdparty/chromium/sandbox/linux/system_headers/la64_linux_syscalls.h + create mode 100644 src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/SaveRegisters_la64.S + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vp8_rtcd.h + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vp9_rtcd.h + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.asm + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.c + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.h + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_dsp_rtcd.h + create mode 100644 src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_scale_rtcd.h + +diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/la64_linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/la64_linux_syscalls.h +new file mode 100644 +index 000000000..12159cf61 +--- /dev/null ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/la64_linux_syscalls.h +@@ -0,0 +1,1120 @@ ++// Copyright 2021 The Chromium Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LA64_LINUX_SYSCALLS_H_ ++#define SANDBOX_LINUX_SYSTEM_HEADERS_LA64_LINUX_SYSCALLS_H_ ++ ++#include ++ ++#if !defined(__NR_io_setup) ++#define __NR_io_setup 0 ++#endif ++ ++#if !defined(__NR_io_destroy) ++#define __NR_io_destroy 1 ++#endif ++ ++#if !defined(__NR_io_submit) ++#define __NR_io_submit 2 ++#endif ++ ++#if !defined(__NR_io_cancel) ++#define __NR_io_cancel 3 ++#endif ++ ++#if !defined(__NR_io_getevents) ++#define __NR_io_getevents 4 ++#endif ++ ++#if !defined(__NR_setxattr) ++#define __NR_setxattr 5 ++#endif ++ ++#if !defined(__NR_lsetxattr) ++#define __NR_lsetxattr 6 ++#endif ++ ++#if !defined(__NR_fsetxattr) ++#define __NR_fsetxattr 7 ++#endif ++ ++#if !defined(__NR_getxattr) ++#define __NR_getxattr 8 ++#endif ++ ++#if !defined(__NR_lgetxattr) ++#define __NR_lgetxattr 9 ++#endif ++ ++#if !defined(__NR_fgetxattr) ++#define __NR_fgetxattr 10 ++#endif ++ ++#if !defined(__NR_listxattr) ++#define __NR_listxattr 11 ++#endif ++ ++#if !defined(__NR_llistxattr) ++#define __NR_llistxattr 12 ++#endif ++ ++#if !defined(__NR_flistxattr) ++#define __NR_flistxattr 13 ++#endif ++ ++#if !defined(__NR_removexattr) ++#define __NR_removexattr 14 ++#endif ++ ++#if !defined(__NR_lremovexattr) ++#define __NR_lremovexattr 15 ++#endif ++ ++#if !defined(__NR_fremovexattr) ++#define __NR_fremovexattr 16 ++#endif ++ ++#if !defined(__NR_getcwd) ++#define __NR_getcwd 17 ++#endif ++ ++#if !defined(__NR_lookup_dcookie) ++#define __NR_lookup_dcookie 18 ++#endif ++ ++#if !defined(__NR_eventfd2) ++#define __NR_eventfd2 19 ++#endif ++ ++#if !defined(__NR_epoll_create1) ++#define __NR_epoll_create1 20 ++#endif ++ ++#if !defined(__NR_epoll_ctl) ++#define __NR_epoll_ctl 21 ++#endif ++ ++#if !defined(__NR_epoll_pwait) ++#define __NR_epoll_pwait 22 ++#endif ++ ++#if !defined(__NR_dup) ++#define __NR_dup 23 ++#endif ++ ++#if !defined(__NR_dup3) ++#define __NR_dup3 24 ++#endif ++ ++#if !defined(__NR_fcntl) ++#define __NR_fcntl 25 ++#endif ++ ++#if !defined(__NR_inotify_init1) ++#define __NR_inotify_init1 26 ++#endif ++ ++#if !defined(__NR_inotify_add_watch) ++#define __NR_inotify_add_watch 27 ++#endif ++ ++#if !defined(__NR_inotify_rm_watch) ++#define __NR_inotify_rm_watch 28 ++#endif ++ ++#if !defined(__NR_ioctl) ++#define __NR_ioctl 29 ++#endif ++ ++#if !defined(__NR_ioprio_set) ++#define __NR_ioprio_set 30 ++#endif ++ ++#if !defined(__NR_ioprio_get) ++#define __NR_ioprio_get 31 ++#endif ++ ++#if !defined(__NR_flock) ++#define __NR_flock 32 ++#endif ++ ++#if !defined(__NR_mknodat) ++#define __NR_mknodat 33 ++#endif ++ ++#if !defined(__NR_mkdirat) ++#define __NR_mkdirat 34 ++#endif ++ ++#if !defined(__NR_unlinkat) ++#define __NR_unlinkat 35 ++#endif ++ ++#if !defined(__NR_symlinkat) ++#define __NR_symlinkat 36 ++#endif ++ ++#if !defined(__NR_linkat) ++#define __NR_linkat 37 ++#endif ++ ++#if !defined(__NR_renameat) ++#define __NR_renameat 38 ++#endif ++ ++#if !defined(__NR_umount2) ++#define __NR_umount2 39 ++#endif ++ ++#if !defined(__NR_mount) ++#define __NR_mount 40 ++#endif ++ ++#if !defined(__NR_pivot_root) ++#define __NR_pivot_root 41 ++#endif ++ ++#if !defined(__NR_nfsservctl) ++#define __NR_nfsservctl 42 ++#endif ++ ++#if !defined(__NR_statfs) ++#define __NR_statfs 43 ++#endif ++ ++#if !defined(__NR_fstatfs) ++#define __NR_fstatfs 44 ++#endif ++ ++#if !defined(__NR_truncate) ++#define __NR_truncate 45 ++#endif ++ ++#if !defined(__NR_ftruncate) ++#define __NR_ftruncate 46 ++#endif ++ ++#if !defined(__NR_fallocate) ++#define __NR_fallocate 47 ++#endif ++ ++#if !defined(__NR_faccessat) ++#define __NR_faccessat 48 ++#endif ++ ++#if !defined(__NR_chdir) ++#define __NR_chdir 49 ++#endif ++ ++#if !defined(__NR_fchdir) ++#define __NR_fchdir 50 ++#endif ++ ++#if !defined(__NR_chroot) ++#define __NR_chroot 51 ++#endif ++ ++#if !defined(__NR_fchmod) ++#define __NR_fchmod 52 ++#endif ++ ++#if !defined(__NR_fchmodat) ++#define __NR_fchmodat 53 ++#endif ++ ++#if !defined(__NR_fchownat) ++#define __NR_fchownat 54 ++#endif ++ ++#if !defined(__NR_fchown) ++#define __NR_fchown 55 ++#endif ++ ++#if !defined(__NR_openat) ++#define __NR_openat 56 ++#endif ++ ++#if !defined(__NR_close) ++#define __NR_close 57 ++#endif ++ ++#if !defined(__NR_vhangup) ++#define __NR_vhangup 58 ++#endif ++ ++#if !defined(__NR_pipe2) ++#define __NR_pipe2 59 ++#endif ++ ++#if !defined(__NR_quotactl) ++#define __NR_quotactl 60 ++#endif ++ ++#if !defined(__NR_getdents64) ++#define __NR_getdents64 61 ++#endif ++ ++#if !defined(__NR_lseek) ++#define __NR_lseek 62 ++#endif ++ ++#if !defined(__NR_read) ++#define __NR_read 63 ++#endif ++ ++#if !defined(__NR_write) ++#define __NR_write 64 ++#endif ++ ++#if !defined(__NR_readv) ++#define __NR_readv 65 ++#endif ++ ++#if !defined(__NR_writev) ++#define __NR_writev 66 ++#endif ++ ++#if !defined(__NR_pread64) ++#define __NR_pread64 67 ++#endif ++ ++#if !defined(__NR_pwrite64) ++#define __NR_pwrite64 68 ++#endif ++ ++#if !defined(__NR_preadv) ++#define __NR_preadv 69 ++#endif ++ ++#if !defined(__NR_pwritev) ++#define __NR_pwritev 70 ++#endif ++ ++#if !defined(__NR_sendfile) ++#define __NR_sendfile 71 ++#endif ++ ++#if !defined(__NR_pselect6) ++#define __NR_pselect6 72 ++#endif ++ ++#if !defined(__NR_ppoll) ++#define __NR_ppoll 73 ++#endif ++ ++#if !defined(__NR_signalfd4) ++#define __NR_signalfd4 74 ++#endif ++ ++#if !defined(__NR_vmsplice) ++#define __NR_vmsplice 75 ++#endif ++ ++#if !defined(__NR_splice) ++#define __NR_splice 76 ++#endif ++ ++#if !defined(__NR_tee) ++#define __NR_tee 77 ++#endif ++ ++#if !defined(__NR_readlinkat) ++#define __NR_readlinkat 78 ++#endif ++ ++// __NR3264_fstatat is not declared on old system ++#define __NR_newfstatat 79 ++ ++// __NR3264_fstat is not declared on old system ++#define __NR_fstat 80 ++ ++#if !defined(__NR_sync) ++#define __NR_sync 81 ++#endif ++ ++#if !defined(__NR_fsync) ++#define __NR_fsync 82 ++#endif ++ ++#if !defined(__NR_fdatasync) ++#define __NR_fdatasync 83 ++#endif ++ ++#if !defined(__NR_sync_file_range) ++#define __NR_sync_file_range 84 ++#endif ++ ++#if !defined(__NR_timerfd_create) ++#define __NR_timerfd_create 85 ++#endif ++ ++#if !defined(__NR_timerfd_settime) ++#define __NR_timerfd_settime 86 ++#endif ++ ++#if !defined(__NR_timerfd_gettime) ++#define __NR_timerfd_gettime 87 ++#endif ++ ++#if !defined(__NR_utimensat) ++#define __NR_utimensat 88 ++#endif ++ ++#if !defined(__NR_acct) ++#define __NR_acct 89 ++#endif ++ ++#if !defined(__NR_capget) ++#define __NR_capget 90 ++#endif ++ ++#if !defined(__NR_capset) ++#define __NR_capset 91 ++#endif ++ ++#if !defined(__NR_personality) ++#define __NR_personality 92 ++#endif ++ ++#if !defined(__NR_exit) ++#define __NR_exit 93 ++#endif ++ ++#if !defined(__NR_exit_group) ++#define __NR_exit_group 94 ++#endif ++ ++#if !defined(__NR_waitid) ++#define __NR_waitid 95 ++#endif ++ ++#if !defined(__NR_set_tid_address) ++#define __NR_set_tid_address 96 ++#endif ++ ++#if !defined(__NR_unshare) ++#define __NR_unshare 97 ++#endif ++ ++#if !defined(__NR_futex) ++#define __NR_futex 98 ++#endif ++ ++#if !defined(__NR_set_robust_list) ++#define __NR_set_robust_list 99 ++#endif ++ ++#if !defined(__NR_get_robust_list) ++#define __NR_get_robust_list 100 ++#endif ++ ++#if !defined(__NR_nanosleep) ++#define __NR_nanosleep 101 ++#endif ++ ++#if !defined(__NR_getitimer) ++#define __NR_getitimer 102 ++#endif ++ ++#if !defined(__NR_setitimer) ++#define __NR_setitimer 103 ++#endif ++ ++#if !defined(__NR_kexec_load) ++#define __NR_kexec_load 104 ++#endif ++ ++#if !defined(__NR_init_module) ++#define __NR_init_module 105 ++#endif ++ ++#if !defined(__NR_delete_module) ++#define __NR_delete_module 106 ++#endif ++ ++#if !defined(__NR_timer_create) ++#define __NR_timer_create 107 ++#endif ++ ++#if !defined(__NR_timer_gettime) ++#define __NR_timer_gettime 108 ++#endif ++ ++#if !defined(__NR_timer_getoverrun) ++#define __NR_timer_getoverrun 109 ++#endif ++ ++#if !defined(__NR_timer_settime) ++#define __NR_timer_settime 110 ++#endif ++ ++#if !defined(__NR_timer_delete) ++#define __NR_timer_delete 111 ++#endif ++ ++#if !defined(__NR_clock_settime) ++#define __NR_clock_settime 112 ++#endif ++ ++#if !defined(__NR_clock_gettime) ++#define __NR_clock_gettime 113 ++#endif ++ ++#if !defined(__NR_clock_getres) ++#define __NR_clock_getres 114 ++#endif ++ ++#if !defined(__NR_clock_nanosleep) ++#define __NR_clock_nanosleep 115 ++#endif ++ ++#if !defined(__NR_syslog) ++#define __NR_syslog 116 ++#endif ++ ++#if !defined(__NR_ptrace) ++#define __NR_ptrace 117 ++#endif ++ ++#if !defined(__NR_sched_setparam) ++#define __NR_sched_setparam 118 ++#endif ++ ++#if !defined(__NR_sched_setscheduler) ++#define __NR_sched_setscheduler 119 ++#endif ++ ++#if !defined(__NR_sched_getscheduler) ++#define __NR_sched_getscheduler 120 ++#endif ++ ++#if !defined(__NR_sched_getparam) ++#define __NR_sched_getparam 121 ++#endif ++ ++#if !defined(__NR_sched_setaffinity) ++#define __NR_sched_setaffinity 122 ++#endif ++ ++#if !defined(__NR_sched_getaffinity) ++#define __NR_sched_getaffinity 123 ++#endif ++ ++#if !defined(__NR_sched_yield) ++#define __NR_sched_yield 124 ++#endif ++ ++#if !defined(__NR_sched_get_priority_max) ++#define __NR_sched_get_priority_max 125 ++#endif ++ ++#if !defined(__NR_sched_get_priority_min) ++#define __NR_sched_get_priority_min 126 ++#endif ++ ++#if !defined(__NR_sched_rr_get_interval) ++#define __NR_sched_rr_get_interval 127 ++#endif ++ ++#if !defined(__NR_restart_syscall) ++#define __NR_restart_syscall 128 ++#endif ++ ++#if !defined(__NR_kill) ++#define __NR_kill 129 ++#endif ++ ++#if !defined(__NR_tkill) ++#define __NR_tkill 130 ++#endif ++ ++#if !defined(__NR_tgkill) ++#define __NR_tgkill 131 ++#endif ++ ++#if !defined(__NR_sigaltstack) ++#define __NR_sigaltstack 132 ++#endif ++ ++#if !defined(__NR_rt_sigsuspend) ++#define __NR_rt_sigsuspend 133 ++#endif ++ ++#if !defined(__NR_rt_sigaction) ++#define __NR_rt_sigaction 134 ++#endif ++ ++#if !defined(__NR_rt_sigprocmask) ++#define __NR_rt_sigprocmask 135 ++#endif ++ ++#if !defined(__NR_rt_sigpending) ++#define __NR_rt_sigpending 136 ++#endif ++ ++#if !defined(__NR_rt_sigtimedwait) ++#define __NR_rt_sigtimedwait 137 ++#endif ++ ++#if !defined(__NR_rt_sigqueueinfo) ++#define __NR_rt_sigqueueinfo 138 ++#endif ++ ++#if !defined(__NR_rt_sigreturn) ++#define __NR_rt_sigreturn 139 ++#endif ++ ++#if !defined(__NR_setpriority) ++#define __NR_setpriority 140 ++#endif ++ ++#if !defined(__NR_getpriority) ++#define __NR_getpriority 141 ++#endif ++ ++#if !defined(__NR_reboot) ++#define __NR_reboot 142 ++#endif ++ ++#if !defined(__NR_setregid) ++#define __NR_setregid 143 ++#endif ++ ++#if !defined(__NR_setgid) ++#define __NR_setgid 144 ++#endif ++ ++#if !defined(__NR_setreuid) ++#define __NR_setreuid 145 ++#endif ++ ++#if !defined(__NR_setuid) ++#define __NR_setuid 146 ++#endif ++ ++#if !defined(__NR_setresuid) ++#define __NR_setresuid 147 ++#endif ++ ++#if !defined(__NR_getresuid) ++#define __NR_getresuid 148 ++#endif ++ ++#if !defined(__NR_setresgid) ++#define __NR_setresgid 149 ++#endif ++ ++#if !defined(__NR_getresgid) ++#define __NR_getresgid 150 ++#endif ++ ++#if !defined(__NR_setfsuid) ++#define __NR_setfsuid 151 ++#endif ++ ++#if !defined(__NR_setfsgid) ++#define __NR_setfsgid 152 ++#endif ++ ++#if !defined(__NR_times) ++#define __NR_times 153 ++#endif ++ ++#if !defined(__NR_setpgid) ++#define __NR_setpgid 154 ++#endif ++ ++#if !defined(__NR_getpgid) ++#define __NR_getpgid 155 ++#endif ++ ++#if !defined(__NR_getsid) ++#define __NR_getsid 156 ++#endif ++ ++#if !defined(__NR_setsid) ++#define __NR_setsid 157 ++#endif ++ ++#if !defined(__NR_getgroups) ++#define __NR_getgroups 158 ++#endif ++ ++#if !defined(__NR_setgroups) ++#define __NR_setgroups 159 ++#endif ++ ++#if !defined(__NR_uname) ++#define __NR_uname 160 ++#endif ++ ++#if !defined(__NR_sethostname) ++#define __NR_sethostname 161 ++#endif ++ ++#if !defined(__NR_setdomainname) ++#define __NR_setdomainname 162 ++#endif ++ ++#if !defined(__NR_getrlimit) ++#define __NR_getrlimit 163 ++#endif ++ ++#if !defined(__NR_setrlimit) ++#define __NR_setrlimit 164 ++#endif ++ ++#if !defined(__NR_getrusage) ++#define __NR_getrusage 165 ++#endif ++ ++#if !defined(__NR_umask) ++#define __NR_umask 166 ++#endif ++ ++#if !defined(__NR_prctl) ++#define __NR_prctl 167 ++#endif ++ ++#if !defined(__NR_getcpu) ++#define __NR_getcpu 168 ++#endif ++ ++#if !defined(__NR_gettimeofday) ++#define __NR_gettimeofday 169 ++#endif ++ ++#if !defined(__NR_settimeofday) ++#define __NR_settimeofday 170 ++#endif ++ ++#if !defined(__NR_adjtimex) ++#define __NR_adjtimex 171 ++#endif ++ ++#if !defined(__NR_getpid) ++#define __NR_getpid 172 ++#endif ++ ++#if !defined(__NR_getppid) ++#define __NR_getppid 173 ++#endif ++ ++#if !defined(__NR_getuid) ++#define __NR_getuid 174 ++#endif ++ ++#if !defined(__NR_geteuid) ++#define __NR_geteuid 175 ++#endif ++ ++#if !defined(__NR_getgid) ++#define __NR_getgid 176 ++#endif ++ ++#if !defined(__NR_getegid) ++#define __NR_getegid 177 ++#endif ++ ++#if !defined(__NR_gettid) ++#define __NR_gettid 178 ++#endif ++ ++#if !defined(__NR_sysinfo) ++#define __NR_sysinfo 179 ++#endif ++ ++#if !defined(__NR_mq_open) ++#define __NR_mq_open 180 ++#endif ++ ++#if !defined(__NR_mq_unlink) ++#define __NR_mq_unlink 181 ++#endif ++ ++#if !defined(__NR_mq_timedsend) ++#define __NR_mq_timedsend 182 ++#endif ++ ++#if !defined(__NR_mq_timedreceive) ++#define __NR_mq_timedreceive 183 ++#endif ++ ++#if !defined(__NR_mq_notify) ++#define __NR_mq_notify 184 ++#endif ++ ++#if !defined(__NR_mq_getsetattr) ++#define __NR_mq_getsetattr 185 ++#endif ++ ++#if !defined(__NR_msgget) ++#define __NR_msgget 186 ++#endif ++ ++#if !defined(__NR_msgctl) ++#define __NR_msgctl 187 ++#endif ++ ++#if !defined(__NR_msgrcv) ++#define __NR_msgrcv 188 ++#endif ++ ++#if !defined(__NR_msgsnd) ++#define __NR_msgsnd 189 ++#endif ++ ++#if !defined(__NR_semget) ++#define __NR_semget 190 ++#endif ++ ++#if !defined(__NR_semctl) ++#define __NR_semctl 191 ++#endif ++ ++#if !defined(__NR_semtimedop) ++#define __NR_semtimedop 192 ++#endif ++ ++#if !defined(__NR_semop) ++#define __NR_semop 193 ++#endif ++ ++#if !defined(__NR_shmget) ++#define __NR_shmget 194 ++#endif ++ ++#if !defined(__NR_shmctl) ++#define __NR_shmctl 195 ++#endif ++ ++#if !defined(__NR_shmat) ++#define __NR_shmat 196 ++#endif ++ ++#if !defined(__NR_shmdt) ++#define __NR_shmdt 197 ++#endif ++ ++#if !defined(__NR_socket) ++#define __NR_socket 198 ++#endif ++ ++#if !defined(__NR_socketpair) ++#define __NR_socketpair 199 ++#endif ++ ++#if !defined(__NR_bind) ++#define __NR_bind 200 ++#endif ++ ++#if !defined(__NR_listen) ++#define __NR_listen 201 ++#endif ++ ++#if !defined(__NR_accept) ++#define __NR_accept 202 ++#endif ++ ++#if !defined(__NR_connect) ++#define __NR_connect 203 ++#endif ++ ++#if !defined(__NR_getsockname) ++#define __NR_getsockname 204 ++#endif ++ ++#if !defined(__NR_getpeername) ++#define __NR_getpeername 205 ++#endif ++ ++#if !defined(__NR_sendto) ++#define __NR_sendto 206 ++#endif ++ ++#if !defined(__NR_recvfrom) ++#define __NR_recvfrom 207 ++#endif ++ ++#if !defined(__NR_setsockopt) ++#define __NR_setsockopt 208 ++#endif ++ ++#if !defined(__NR_getsockopt) ++#define __NR_getsockopt 209 ++#endif ++ ++#if !defined(__NR_shutdown) ++#define __NR_shutdown 210 ++#endif ++ ++#if !defined(__NR_sendmsg) ++#define __NR_sendmsg 211 ++#endif ++ ++#if !defined(__NR_recvmsg) ++#define __NR_recvmsg 212 ++#endif ++ ++#if !defined(__NR_readahead) ++#define __NR_readahead 213 ++#endif ++ ++#if !defined(__NR_brk) ++#define __NR_brk 214 ++#endif ++ ++#if !defined(__NR_munmap) ++#define __NR_munmap 215 ++#endif ++ ++#if !defined(__NR_mremap) ++#define __NR_mremap 216 ++#endif ++ ++#if !defined(__NR_add_key) ++#define __NR_add_key 217 ++#endif ++ ++#if !defined(__NR_request_key) ++#define __NR_request_key 218 ++#endif ++ ++#if !defined(__NR_keyctl) ++#define __NR_keyctl 219 ++#endif ++ ++#if !defined(__NR_clone) ++#define __NR_clone 220 ++#endif ++ ++#if !defined(__NR_execve) ++#define __NR_execve 221 ++#endif ++ ++#if !defined(__NR_mmap) ++#define __NR_mmap 222 ++#endif ++ ++#if !defined(__NR_fadvise64) ++#define __NR_fadvise64 223 ++#endif ++ ++#if !defined(__NR_swapon) ++#define __NR_swapon 224 ++#endif ++ ++#if !defined(__NR_swapoff) ++#define __NR_swapoff 225 ++#endif ++ ++#if !defined(__NR_mprotect) ++#define __NR_mprotect 226 ++#endif ++ ++#if !defined(__NR_msync) ++#define __NR_msync 227 ++#endif ++ ++#if !defined(__NR_mlock) ++#define __NR_mlock 228 ++#endif ++ ++#if !defined(__NR_munlock) ++#define __NR_munlock 229 ++#endif ++ ++#if !defined(__NR_mlockall) ++#define __NR_mlockall 230 ++#endif ++ ++#if !defined(__NR_munlockall) ++#define __NR_munlockall 231 ++#endif ++ ++#if !defined(__NR_mincore) ++#define __NR_mincore 232 ++#endif ++ ++#if !defined(__NR_madvise) ++#define __NR_madvise 233 ++#endif ++ ++#if !defined(__NR_remap_file_pages) ++#define __NR_remap_file_pages 234 ++#endif ++ ++#if !defined(__NR_mbind) ++#define __NR_mbind 235 ++#endif ++ ++#if !defined(__NR_get_mempolicy) ++#define __NR_get_mempolicy 236 ++#endif ++ ++#if !defined(__NR_set_mempolicy) ++#define __NR_set_mempolicy 237 ++#endif ++ ++#if !defined(__NR_migrate_pages) ++#define __NR_migrate_pages 238 ++#endif ++ ++#if !defined(__NR_move_pages) ++#define __NR_move_pages 239 ++#endif ++ ++#if !defined(__NR_rt_tgsigqueueinfo) ++#define __NR_rt_tgsigqueueinfo 240 ++#endif ++ ++#if !defined(__NR_perf_event_open) ++#define __NR_perf_event_open 241 ++#endif ++ ++#if !defined(__NR_accept4) ++#define __NR_accept4 242 ++#endif ++ ++#if !defined(__NR_recvmmsg) ++#define __NR_recvmmsg 243 ++#endif ++ ++#if !defined(__NR_wait4) ++#define __NR_wait4 260 ++#endif ++ ++#if !defined(__NR_prlimit64) ++#define __NR_prlimit64 261 ++#endif ++ ++#if !defined(__NR_fanotify_init) ++#define __NR_fanotify_init 262 ++#endif ++ ++#if !defined(__NR_fanotify_mark) ++#define __NR_fanotify_mark 263 ++#endif ++ ++#if !defined(__NR_name_to_handle_at) ++#define __NR_name_to_handle_at 264 ++#endif ++ ++#if !defined(__NR_open_by_handle_at) ++#define __NR_open_by_handle_at 265 ++#endif ++ ++#if !defined(__NR_clock_adjtime) ++#define __NR_clock_adjtime 266 ++#endif ++ ++#if !defined(__NR_syncfs) ++#define __NR_syncfs 267 ++#endif ++ ++#if !defined(__NR_setns) ++#define __NR_setns 268 ++#endif ++ ++#if !defined(__NR_sendmmsg) ++#define __NR_sendmmsg 269 ++#endif ++ ++#if !defined(__NR_process_vm_readv) ++#define __NR_process_vm_readv 270 ++#endif ++ ++#if !defined(__NR_process_vm_writev) ++#define __NR_process_vm_writev 271 ++#endif ++ ++#if !defined(__NR_kcmp) ++#define __NR_kcmp 272 ++#endif ++ ++#if !defined(__NR_finit_module) ++#define __NR_finit_module 273 ++#endif ++ ++#if !defined(__NR_sched_setattr) ++#define __NR_sched_setattr 274 ++#endif ++ ++#if !defined(__NR_sched_getattr) ++#define __NR_sched_getattr 275 ++#endif ++ ++#if !defined(__NR_renameat2) ++#define __NR_renameat2 276 ++#endif ++ ++#if !defined(__NR_seccomp) ++#define __NR_seccomp 277 ++#endif ++ ++#if !defined(__NR_getrandom) ++#define __NR_getrandom 278 ++#endif ++ ++#if !defined(__NR_memfd_create) ++#define __NR_memfd_create 279 ++#endif ++ ++#if !defined(__NR_bfp) ++#define __NR_bfp 280 ++#endif ++ ++#if !defined(__NR_execveat) ++#define __NR_execveat 281 ++#endif ++ ++#if !defined(__NR_userfaultfd) ++#define __NR_userfaultfd 282 ++#endif ++ ++#if !defined(__NR_membarrier) ++#define __NR_membarrier 283 ++#endif ++ ++#if !defined(__NR_memlock2) ++#define __NR_memlock2 284 ++#endif ++ ++#if !defined(__NR_copy_file_range) ++#define __NR_copy_file_range 285 ++#endif ++ ++#if !defined(__NR_preadv2) ++#define __NR_preadv2 286 ++#endif ++ ++#if !defined(__NR_pwritev2) ++#define __NR_pwritev2 287 ++#endif ++ ++#if !defined(__NR_pkey_mprotect) ++#define __NR_pkey_mprotect 288 ++#endif ++ ++#if !defined(__NR_pkey_alloc) ++#define __NR_pkey_alloc 289 ++#endif ++ ++#if !defined(__NR_pkey_free) ++#define __NR_pkey_free 290 ++#endif ++ ++#if !defined(__NR_statx) ++#define __NR_statx 291 ++#endif ++ ++#if !defined(__NR_io_pgetevents) ++#define __NR_io_pgetevents 292 ++#endif ++ ++#if !defined(__NR_rseq) ++#define __NR_rseq 293 ++#endif ++ ++#endif // SANDBOX_LINUX_SYSTEM_HEADERS_LA64_LINUX_SYSCALLS_H_ +diff --git a/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/SaveRegisters_la64.S b/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/SaveRegisters_la64.S +new file mode 100644 +index 000000000..880201671 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/blink/renderer/platform/heap/asm/SaveRegisters_la64.S +@@ -0,0 +1,41 @@ ++// Copyright 2014 The Chromium Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++/* ++ * typedef void (*PushAllRegistersCallback)(ThreadState*, intptr_t*); ++ * extern "C" void PushAllRegisters(ThreadState*, PushAllRegistersCallback) ++ */ ++ ++.type PushAllRegisters, %function ++.global PushAllRegisters ++.hidden PushAllRegisters ++PushAllRegisters: ++ // Push all callee-saves registers to get them ++ // on the stack for conservative stack scanning. ++ // Reserve space for callee-saved registers and return address. ++ addi.d $sp,$sp,-80 ++ // Save the callee-saved registers and the return address. ++ st.d $s0, $sp, 0 ++ st.d $s1, $sp, 8 ++ st.d $s2, $sp, 16 ++ st.d $s3, $sp, 24 ++ st.d $s4, $sp, 32 ++ st.d $s5, $sp, 40 ++ st.d $s6, $sp, 48 ++ st.d $s7, $sp, 56 ++ st.d $ra, $sp, 64 ++ // Note: the callee-saved floating point registers do not need to be ++ // copied to the stack, because fp registers never hold heap pointers ++ // and so do not need to be kept visible to the garbage collector. ++ // Pass the first argument untouched in a0 and the ++ // stack pointer to the callback. ++ move $t7,$a1 ++ move $a1,$sp ++ jirl $ra, $t7, 0 ++ // Restore return address, adjust stack and return. ++ // Note: the copied registers do not need to be reloaded here, ++ // because they were preserved by the called routine. ++ ld.d $ra, $sp, 64 ++ addi.d $sp, $sp, 80 ++ jirl $zero, $ra, 0 +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vp8_rtcd.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vp8_rtcd.h +new file mode 100644 +index 000000000..aa475b55f +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vp8_rtcd.h +@@ -0,0 +1,357 @@ ++// This file is generated. Do not edit. ++#ifndef VP8_RTCD_H_ ++#define VP8_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * VP8 ++ */ ++ ++struct blockd; ++struct macroblockd; ++struct loop_filter_info; ++ ++/* Encoder forward decls */ ++struct block; ++struct macroblock; ++struct variance_vtable; ++union int_mv; ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++void vp8_bilinear_predict16x16_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c ++ ++void vp8_bilinear_predict4x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c ++ ++void vp8_bilinear_predict8x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c ++ ++void vp8_bilinear_predict8x8_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c ++ ++void vp8_blend_b_c(unsigned char* y, ++ unsigned char* u, ++ unsigned char* v, ++ int y_1, ++ int u_1, ++ int v_1, ++ int alpha, ++ int stride); ++#define vp8_blend_b vp8_blend_b_c ++ ++void vp8_blend_mb_inner_c(unsigned char* y, ++ unsigned char* u, ++ unsigned char* v, ++ int y_1, ++ int u_1, ++ int v_1, ++ int alpha, ++ int stride); ++#define vp8_blend_mb_inner vp8_blend_mb_inner_c ++ ++void vp8_blend_mb_outer_c(unsigned char* y, ++ unsigned char* u, ++ unsigned char* v, ++ int y_1, ++ int u_1, ++ int v_1, ++ int alpha, ++ int stride); ++#define vp8_blend_mb_outer vp8_blend_mb_outer_c ++ ++int vp8_block_error_c(short* coeff, short* dqcoeff); ++#define vp8_block_error vp8_block_error_c ++ ++void vp8_copy32xn_c(const unsigned char* src_ptr, ++ int src_stride, ++ unsigned char* dst_ptr, ++ int dst_stride, ++ int height); ++#define vp8_copy32xn vp8_copy32xn_c ++ ++void vp8_copy_mem16x16_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride); ++#define vp8_copy_mem16x16 vp8_copy_mem16x16_c ++ ++void vp8_copy_mem8x4_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride); ++#define vp8_copy_mem8x4 vp8_copy_mem8x4_c ++ ++void vp8_copy_mem8x8_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride); ++#define vp8_copy_mem8x8 vp8_copy_mem8x8_c ++ ++void vp8_dc_only_idct_add_c(short input_dc, ++ unsigned char* pred_ptr, ++ int pred_stride, ++ unsigned char* dst_ptr, ++ int dst_stride); ++#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c ++ ++int vp8_denoiser_filter_c(unsigned char* mc_running_avg_y, ++ int mc_avg_y_stride, ++ unsigned char* running_avg_y, ++ int avg_y_stride, ++ unsigned char* sig, ++ int sig_stride, ++ unsigned int motion_magnitude, ++ int increase_denoising); ++#define vp8_denoiser_filter vp8_denoiser_filter_c ++ ++int vp8_denoiser_filter_uv_c(unsigned char* mc_running_avg, ++ int mc_avg_stride, ++ unsigned char* running_avg, ++ int avg_stride, ++ unsigned char* sig, ++ int sig_stride, ++ unsigned int motion_magnitude, ++ int increase_denoising); ++#define vp8_denoiser_filter_uv vp8_denoiser_filter_uv_c ++ ++void vp8_dequant_idct_add_c(short* input, ++ short* dq, ++ unsigned char* dest, ++ int stride); ++#define vp8_dequant_idct_add vp8_dequant_idct_add_c ++ ++void vp8_dequant_idct_add_uv_block_c(short* q, ++ short* dq, ++ unsigned char* dst_u, ++ unsigned char* dst_v, ++ int stride, ++ char* eobs); ++#define vp8_dequant_idct_add_uv_block vp8_dequant_idct_add_uv_block_c ++ ++void vp8_dequant_idct_add_y_block_c(short* q, ++ short* dq, ++ unsigned char* dst, ++ int stride, ++ char* eobs); ++#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c ++ ++void vp8_dequantize_b_c(struct blockd*, short* DQC); ++#define vp8_dequantize_b vp8_dequantize_b_c ++ ++int vp8_diamond_search_sad_c(struct macroblock* x, ++ struct block* b, ++ struct blockd* d, ++ union int_mv* ref_mv, ++ union int_mv* best_mv, ++ int search_param, ++ int sad_per_bit, ++ int* num00, ++ struct variance_vtable* fn_ptr, ++ int* mvcost[2], ++ union int_mv* center_mv); ++#define vp8_diamond_search_sad vp8_diamond_search_sad_c ++ ++void vp8_fast_quantize_b_c(struct block*, struct blockd*); ++#define vp8_fast_quantize_b vp8_fast_quantize_b_c ++ ++void vp8_filter_by_weight16x16_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride, ++ int src_weight); ++#define vp8_filter_by_weight16x16 vp8_filter_by_weight16x16_c ++ ++void vp8_filter_by_weight4x4_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride, ++ int src_weight); ++#define vp8_filter_by_weight4x4 vp8_filter_by_weight4x4_c ++ ++void vp8_filter_by_weight8x8_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride, ++ int src_weight); ++#define vp8_filter_by_weight8x8 vp8_filter_by_weight8x8_c ++ ++int vp8_full_search_sad_c(struct macroblock* x, ++ struct block* b, ++ struct blockd* d, ++ union int_mv* ref_mv, ++ int sad_per_bit, ++ int distance, ++ struct variance_vtable* fn_ptr, ++ int* mvcost[2], ++ union int_mv* center_mv); ++#define vp8_full_search_sad vp8_full_search_sad_c ++ ++void vp8_loop_filter_bh_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_bh vp8_loop_filter_bh_c ++ ++void vp8_loop_filter_bv_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_bv vp8_loop_filter_bv_c ++ ++void vp8_loop_filter_mbh_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c ++ ++void vp8_loop_filter_mbv_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c ++ ++void vp8_loop_filter_bhs_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c ++ ++void vp8_loop_filter_bvs_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c ++ ++void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c ++ ++void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c ++ ++int vp8_mbblock_error_c(struct macroblock* mb, int dc); ++#define vp8_mbblock_error vp8_mbblock_error_c ++ ++int vp8_mbuverror_c(struct macroblock* mb); ++#define vp8_mbuverror vp8_mbuverror_c ++ ++int vp8_refining_search_sad_c(struct macroblock* x, ++ struct block* b, ++ struct blockd* d, ++ union int_mv* ref_mv, ++ int error_per_bit, ++ int search_range, ++ struct variance_vtable* fn_ptr, ++ int* mvcost[2], ++ union int_mv* center_mv); ++#define vp8_refining_search_sad vp8_refining_search_sad_c ++ ++void vp8_regular_quantize_b_c(struct block*, struct blockd*); ++#define vp8_regular_quantize_b vp8_regular_quantize_b_c ++ ++void vp8_short_fdct4x4_c(short* input, short* output, int pitch); ++#define vp8_short_fdct4x4 vp8_short_fdct4x4_c ++ ++void vp8_short_fdct8x4_c(short* input, short* output, int pitch); ++#define vp8_short_fdct8x4 vp8_short_fdct8x4_c ++ ++void vp8_short_idct4x4llm_c(short* input, ++ unsigned char* pred_ptr, ++ int pred_stride, ++ unsigned char* dst_ptr, ++ int dst_stride); ++#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c ++ ++void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff); ++#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c ++ ++void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff); ++#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c ++ ++void vp8_short_walsh4x4_c(short* input, short* output, int pitch); ++#define vp8_short_walsh4x4 vp8_short_walsh4x4_c ++ ++void vp8_sixtap_predict16x16_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c ++ ++void vp8_sixtap_predict4x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c ++ ++void vp8_sixtap_predict8x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c ++ ++void vp8_sixtap_predict8x8_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c ++ ++void vp8_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vp9_rtcd.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vp9_rtcd.h +new file mode 100644 +index 000000000..009139314 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vp9_rtcd.h +@@ -0,0 +1,275 @@ ++// This file is generated. Do not edit. ++#ifndef VP9_RTCD_H_ ++#define VP9_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * VP9 ++ */ ++ ++#include "vp9/common/vp9_common.h" ++#include "vp9/common/vp9_enums.h" ++#include "vp9/common/vp9_filter.h" ++#include "vpx/vpx_integer.h" ++ ++struct macroblockd; ++ ++/* Encoder forward decls */ ++struct macroblock; ++struct vp9_variance_vtable; ++struct search_site_config; ++struct mv; ++union int_mv; ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++int64_t vp9_block_error_c(const tran_low_t* coeff, ++ const tran_low_t* dqcoeff, ++ intptr_t block_size, ++ int64_t* ssz); ++#define vp9_block_error vp9_block_error_c ++ ++int64_t vp9_block_error_fp_c(const tran_low_t* coeff, ++ const tran_low_t* dqcoeff, ++ int block_size); ++#define vp9_block_error_fp vp9_block_error_fp_c ++ ++int vp9_denoiser_filter_c(const uint8_t* sig, ++ int sig_stride, ++ const uint8_t* mc_avg, ++ int mc_avg_stride, ++ uint8_t* avg, ++ int avg_stride, ++ int increase_denoising, ++ BLOCK_SIZE bs, ++ int motion_magnitude); ++#define vp9_denoiser_filter vp9_denoiser_filter_c ++ ++int vp9_diamond_search_sad_c(const struct macroblock* x, ++ const struct search_site_config* cfg, ++ struct mv* ref_mv, ++ struct mv* best_mv, ++ int search_param, ++ int sad_per_bit, ++ int* num00, ++ const struct vp9_variance_vtable* fn_ptr, ++ const struct mv* center_mv); ++#define vp9_diamond_search_sad vp9_diamond_search_sad_c ++ ++void vp9_fht16x16_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_fht16x16 vp9_fht16x16_c ++ ++void vp9_fht4x4_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_fht4x4 vp9_fht4x4_c ++ ++void vp9_fht8x8_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_fht8x8 vp9_fht8x8_c ++ ++void vp9_filter_by_weight16x16_c(const uint8_t* src, ++ int src_stride, ++ uint8_t* dst, ++ int dst_stride, ++ int src_weight); ++#define vp9_filter_by_weight16x16 vp9_filter_by_weight16x16_c ++ ++void vp9_filter_by_weight8x8_c(const uint8_t* src, ++ int src_stride, ++ uint8_t* dst, ++ int dst_stride, ++ int src_weight); ++#define vp9_filter_by_weight8x8 vp9_filter_by_weight8x8_c ++ ++void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vp9_fwht4x4 vp9_fwht4x4_c ++ ++int64_t vp9_highbd_block_error_c(const tran_low_t* coeff, ++ const tran_low_t* dqcoeff, ++ intptr_t block_size, ++ int64_t* ssz, ++ int bd); ++#define vp9_highbd_block_error vp9_highbd_block_error_c ++ ++void vp9_highbd_fht16x16_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_highbd_fht16x16 vp9_highbd_fht16x16_c ++ ++void vp9_highbd_fht4x4_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_highbd_fht4x4 vp9_highbd_fht4x4_c ++ ++void vp9_highbd_fht8x8_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_highbd_fht8x8 vp9_highbd_fht8x8_c ++ ++void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c ++ ++void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int tx_type, ++ int bd); ++#define vp9_highbd_iht16x16_256_add vp9_highbd_iht16x16_256_add_c ++ ++void vp9_highbd_iht4x4_16_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int tx_type, ++ int bd); ++#define vp9_highbd_iht4x4_16_add vp9_highbd_iht4x4_16_add_c ++ ++void vp9_highbd_iht8x8_64_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int tx_type, ++ int bd); ++#define vp9_highbd_iht8x8_64_add vp9_highbd_iht8x8_64_add_c ++ ++void vp9_highbd_mbpost_proc_across_ip_c(uint16_t* src, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vp9_highbd_mbpost_proc_across_ip vp9_highbd_mbpost_proc_across_ip_c ++ ++void vp9_highbd_mbpost_proc_down_c(uint16_t* dst, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vp9_highbd_mbpost_proc_down vp9_highbd_mbpost_proc_down_c ++ ++void vp9_highbd_post_proc_down_and_across_c(const uint16_t* src_ptr, ++ uint16_t* dst_ptr, ++ int src_pixels_per_line, ++ int dst_pixels_per_line, ++ int rows, ++ int cols, ++ int flimit); ++#define vp9_highbd_post_proc_down_and_across \ ++ vp9_highbd_post_proc_down_and_across_c ++ ++void vp9_highbd_quantize_fp_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vp9_highbd_quantize_fp vp9_highbd_quantize_fp_c ++ ++void vp9_highbd_quantize_fp_32x32_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vp9_highbd_quantize_fp_32x32 vp9_highbd_quantize_fp_32x32_c ++ ++void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1, ++ unsigned int stride, ++ const uint8_t* frame2, ++ unsigned int block_width, ++ unsigned int block_height, ++ int strength, ++ int* blk_fw, ++ int use_32x32, ++ uint32_t* accumulator, ++ uint16_t* count); ++#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c ++ ++void vp9_iht16x16_256_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride, ++ int tx_type); ++#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c ++ ++void vp9_iht4x4_16_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride, ++ int tx_type); ++#define vp9_iht4x4_16_add vp9_iht4x4_16_add_c ++ ++void vp9_iht8x8_64_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride, ++ int tx_type); ++#define vp9_iht8x8_64_add vp9_iht8x8_64_add_c ++ ++void vp9_quantize_fp_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vp9_quantize_fp vp9_quantize_fp_c ++ ++void vp9_quantize_fp_32x32_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vp9_quantize_fp_32x32 vp9_quantize_fp_32x32_c ++ ++void vp9_scale_and_extend_frame_c(const struct yv12_buffer_config* src, ++ struct yv12_buffer_config* dst, ++ INTERP_FILTER filter_type, ++ int phase_scaler); ++#define vp9_scale_and_extend_frame vp9_scale_and_extend_frame_c ++ ++void vp9_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.asm b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.asm +new file mode 100644 +index 000000000..00712e52b +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.asm +@@ -0,0 +1,98 @@ ++@ This file was created from a .asm file ++@ using the ads2gas.pl script. ++ .syntax unified ++.equ VPX_ARCH_ARM , 0 ++.equ ARCH_ARM , 0 ++.equ VPX_ARCH_MIPS , 0 ++.equ ARCH_MIPS , 0 ++.equ VPX_ARCH_X86 , 0 ++.equ ARCH_X86 , 0 ++.equ VPX_ARCH_X86_64 , 0 ++.equ ARCH_X86_64 , 0 ++.equ VPX_ARCH_PPC , 0 ++.equ ARCH_PPC , 0 ++.equ HAVE_NEON , 0 ++.equ HAVE_NEON_ASM , 0 ++.equ HAVE_MIPS32 , 0 ++.equ HAVE_DSPR2 , 0 ++.equ HAVE_MSA , 0 ++.equ HAVE_MIPS64 , 0 ++.equ HAVE_MMX , 0 ++.equ HAVE_SSE , 0 ++.equ HAVE_SSE2 , 0 ++.equ HAVE_SSE3 , 0 ++.equ HAVE_SSSE3 , 0 ++.equ HAVE_SSE4_1 , 0 ++.equ HAVE_AVX , 0 ++.equ HAVE_AVX2 , 0 ++.equ HAVE_AVX512 , 0 ++.equ HAVE_VSX , 0 ++.equ HAVE_MMI , 0 ++.equ HAVE_VPX_PORTS , 1 ++.equ HAVE_PTHREAD_H , 1 ++.equ HAVE_UNISTD_H , 0 ++.equ CONFIG_DEPENDENCY_TRACKING , 1 ++.equ CONFIG_EXTERNAL_BUILD , 1 ++.equ CONFIG_INSTALL_DOCS , 0 ++.equ CONFIG_INSTALL_BINS , 1 ++.equ CONFIG_INSTALL_LIBS , 1 ++.equ CONFIG_INSTALL_SRCS , 0 ++.equ CONFIG_DEBUG , 0 ++.equ CONFIG_GPROF , 0 ++.equ CONFIG_GCOV , 0 ++.equ CONFIG_RVCT , 0 ++.equ CONFIG_GCC , 1 ++.equ CONFIG_MSVS , 0 ++.equ CONFIG_PIC , 0 ++.equ CONFIG_BIG_ENDIAN , 0 ++.equ CONFIG_CODEC_SRCS , 0 ++.equ CONFIG_DEBUG_LIBS , 0 ++.equ CONFIG_DEQUANT_TOKENS , 0 ++.equ CONFIG_DC_RECON , 0 ++.equ CONFIG_RUNTIME_CPU_DETECT , 0 ++.equ CONFIG_POSTPROC , 1 ++.equ CONFIG_VP9_POSTPROC , 1 ++.equ CONFIG_MULTITHREAD , 1 ++.equ CONFIG_INTERNAL_STATS , 0 ++.equ CONFIG_VP8_ENCODER , 1 ++.equ CONFIG_VP8_DECODER , 1 ++.equ CONFIG_VP9_ENCODER , 1 ++.equ CONFIG_VP9_DECODER , 1 ++.equ CONFIG_VP8 , 1 ++.equ CONFIG_VP9 , 1 ++.equ CONFIG_ENCODERS , 1 ++.equ CONFIG_DECODERS , 1 ++.equ CONFIG_STATIC_MSVCRT , 0 ++.equ CONFIG_SPATIAL_RESAMPLING , 1 ++.equ CONFIG_REALTIME_ONLY , 1 ++.equ CONFIG_ONTHEFLY_BITPACKING , 0 ++.equ CONFIG_ERROR_CONCEALMENT , 0 ++.equ CONFIG_SHARED , 0 ++.equ CONFIG_STATIC , 1 ++.equ CONFIG_SMALL , 0 ++.equ CONFIG_POSTPROC_VISUALIZER , 0 ++.equ CONFIG_OS_SUPPORT , 1 ++.equ CONFIG_UNIT_TESTS , 1 ++.equ CONFIG_WEBM_IO , 1 ++.equ CONFIG_LIBYUV , 0 ++.equ CONFIG_DECODE_PERF_TESTS , 0 ++.equ CONFIG_ENCODE_PERF_TESTS , 0 ++.equ CONFIG_MULTI_RES_ENCODING , 1 ++.equ CONFIG_TEMPORAL_DENOISING , 1 ++.equ CONFIG_VP9_TEMPORAL_DENOISING , 1 ++.equ CONFIG_CONSISTENT_RECODE , 0 ++.equ CONFIG_COEFFICIENT_RANGE_CHECKING , 0 ++.equ CONFIG_VP9_HIGHBITDEPTH , 1 ++.equ CONFIG_BETTER_HW_COMPATIBILITY , 0 ++.equ CONFIG_EXPERIMENTAL , 0 ++.equ CONFIG_SIZE_LIMIT , 1 ++.equ CONFIG_ALWAYS_ADJUST_BPM , 0 ++.equ CONFIG_BITSTREAM_DEBUG , 0 ++.equ CONFIG_MISMATCH_DEBUG , 0 ++.equ CONFIG_FP_MB_STATS , 0 ++.equ CONFIG_EMULATE_HARDWARE , 0 ++.equ CONFIG_NON_GREEDY_MV , 0 ++.equ CONFIG_RATE_CTRL , 0 ++.equ DECODE_WIDTH_LIMIT , 16384 ++.equ DECODE_HEIGHT_LIMIT , 16384 ++ .section .note.GNU-stack,"",%progbits +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.c b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.c +new file mode 100644 +index 000000000..8aad25ff1 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.c +@@ -0,0 +1,10 @@ ++/* Copyright (c) 2011 The WebM project authors. All Rights Reserved. */ ++/* */ ++/* Use of this source code is governed by a BSD-style license */ ++/* that can be found in the LICENSE file in the root of the source */ ++/* tree. An additional intellectual property rights grant can be found */ ++/* in the file PATENTS. All contributing project authors may */ ++/* be found in the AUTHORS file in the root of the source tree. */ ++#include "vpx/vpx_codec.h" ++static const char* const cfg = "--target=generic-gnu --enable-vp9-highbitdepth --enable-external-build --enable-postproc --enable-multi-res-encoding --enable-temporal-denoising --enable-vp9-temporal-denoising --enable-vp9-postproc --size-limit=16384x16384 --enable-realtime-only --disable-install-docs --disable-libyuv"; ++const char *vpx_codec_build_config(void) {return cfg;} +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.h +new file mode 100644 +index 000000000..fddb76bd2 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_config.h +@@ -0,0 +1,107 @@ ++/* Copyright (c) 2011 The WebM project authors. All Rights Reserved. */ ++/* */ ++/* Use of this source code is governed by a BSD-style license */ ++/* that can be found in the LICENSE file in the root of the source */ ++/* tree. An additional intellectual property rights grant can be found */ ++/* in the file PATENTS. All contributing project authors may */ ++/* be found in the AUTHORS file in the root of the source tree. */ ++/* This file automatically generated by configure. Do not edit! */ ++#ifndef VPX_CONFIG_H ++#define VPX_CONFIG_H ++#define RESTRICT ++#define INLINE inline ++#define VPX_ARCH_ARM 0 ++#define ARCH_ARM 0 ++#define VPX_ARCH_MIPS 0 ++#define ARCH_MIPS 0 ++#define VPX_ARCH_X86 0 ++#define ARCH_X86 0 ++#define VPX_ARCH_X86_64 0 ++#define ARCH_X86_64 0 ++#define VPX_ARCH_PPC 0 ++#define ARCH_PPC 0 ++#define HAVE_NEON 0 ++#define HAVE_NEON_ASM 0 ++#define HAVE_MIPS32 0 ++#define HAVE_DSPR2 0 ++#define HAVE_MSA 0 ++#define HAVE_MIPS64 0 ++#define HAVE_MMX 0 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSSE3 0 ++#define HAVE_SSE4_1 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_AVX512 0 ++#define HAVE_VSX 0 ++#define HAVE_MMI 0 ++#define HAVE_VPX_PORTS 1 ++#define HAVE_PTHREAD_H 1 ++#define HAVE_UNISTD_H 0 ++#define CONFIG_DEPENDENCY_TRACKING 1 ++#define CONFIG_EXTERNAL_BUILD 1 ++#define CONFIG_INSTALL_DOCS 0 ++#define CONFIG_INSTALL_BINS 1 ++#define CONFIG_INSTALL_LIBS 1 ++#define CONFIG_INSTALL_SRCS 0 ++#define CONFIG_DEBUG 0 ++#define CONFIG_GPROF 0 ++#define CONFIG_GCOV 0 ++#define CONFIG_RVCT 0 ++#define CONFIG_GCC 1 ++#define CONFIG_MSVS 0 ++#define CONFIG_PIC 0 ++#define CONFIG_BIG_ENDIAN 0 ++#define CONFIG_CODEC_SRCS 0 ++#define CONFIG_DEBUG_LIBS 0 ++#define CONFIG_DEQUANT_TOKENS 0 ++#define CONFIG_DC_RECON 0 ++#define CONFIG_RUNTIME_CPU_DETECT 0 ++#define CONFIG_POSTPROC 1 ++#define CONFIG_VP9_POSTPROC 1 ++#define CONFIG_MULTITHREAD 1 ++#define CONFIG_INTERNAL_STATS 0 ++#define CONFIG_VP8_ENCODER 1 ++#define CONFIG_VP8_DECODER 1 ++#define CONFIG_VP9_ENCODER 1 ++#define CONFIG_VP9_DECODER 1 ++#define CONFIG_VP8 1 ++#define CONFIG_VP9 1 ++#define CONFIG_ENCODERS 1 ++#define CONFIG_DECODERS 1 ++#define CONFIG_STATIC_MSVCRT 0 ++#define CONFIG_SPATIAL_RESAMPLING 1 ++#define CONFIG_REALTIME_ONLY 1 ++#define CONFIG_ONTHEFLY_BITPACKING 0 ++#define CONFIG_ERROR_CONCEALMENT 0 ++#define CONFIG_SHARED 0 ++#define CONFIG_STATIC 1 ++#define CONFIG_SMALL 0 ++#define CONFIG_POSTPROC_VISUALIZER 0 ++#define CONFIG_OS_SUPPORT 1 ++#define CONFIG_UNIT_TESTS 1 ++#define CONFIG_WEBM_IO 1 ++#define CONFIG_LIBYUV 0 ++#define CONFIG_DECODE_PERF_TESTS 0 ++#define CONFIG_ENCODE_PERF_TESTS 0 ++#define CONFIG_MULTI_RES_ENCODING 1 ++#define CONFIG_TEMPORAL_DENOISING 1 ++#define CONFIG_VP9_TEMPORAL_DENOISING 1 ++#define CONFIG_CONSISTENT_RECODE 0 ++#define CONFIG_COEFFICIENT_RANGE_CHECKING 0 ++#define CONFIG_VP9_HIGHBITDEPTH 1 ++#define CONFIG_BETTER_HW_COMPATIBILITY 0 ++#define CONFIG_EXPERIMENTAL 0 ++#define CONFIG_SIZE_LIMIT 1 ++#define CONFIG_ALWAYS_ADJUST_BPM 0 ++#define CONFIG_BITSTREAM_DEBUG 0 ++#define CONFIG_MISMATCH_DEBUG 0 ++#define CONFIG_FP_MB_STATS 0 ++#define CONFIG_EMULATE_HARDWARE 0 ++#define CONFIG_NON_GREEDY_MV 0 ++#define CONFIG_RATE_CTRL 0 ++#define DECODE_WIDTH_LIMIT 16384 ++#define DECODE_HEIGHT_LIMIT 16384 ++#endif /* VPX_CONFIG_H */ +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_dsp_rtcd.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_dsp_rtcd.h +new file mode 100644 +index 000000000..8ba4d8805 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_dsp_rtcd.h +@@ -0,0 +1,3868 @@ ++// This file is generated. Do not edit. ++#ifndef VPX_DSP_RTCD_H_ ++#define VPX_DSP_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * DSP ++ */ ++ ++#include "vpx/vpx_integer.h" ++#include "vpx_dsp/vpx_dsp_common.h" ++#include "vpx_dsp/vpx_filter.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++unsigned int vpx_avg_4x4_c(const uint8_t*, int p); ++#define vpx_avg_4x4 vpx_avg_4x4_c ++ ++unsigned int vpx_avg_8x8_c(const uint8_t*, int p); ++#define vpx_avg_8x8 vpx_avg_8x8_c ++ ++void vpx_comp_avg_pred_c(uint8_t* comp_pred, ++ const uint8_t* pred, ++ int width, ++ int height, ++ const uint8_t* ref, ++ int ref_stride); ++#define vpx_comp_avg_pred vpx_comp_avg_pred_c ++ ++void vpx_convolve8_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8 vpx_convolve8_c ++ ++void vpx_convolve8_avg_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_avg vpx_convolve8_avg_c ++ ++void vpx_convolve8_avg_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_avg_horiz vpx_convolve8_avg_horiz_c ++ ++void vpx_convolve8_avg_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_avg_vert vpx_convolve8_avg_vert_c ++ ++void vpx_convolve8_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_horiz vpx_convolve8_horiz_c ++ ++void vpx_convolve8_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_vert vpx_convolve8_vert_c ++ ++void vpx_convolve_avg_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve_avg vpx_convolve_avg_c ++ ++void vpx_convolve_copy_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve_copy vpx_convolve_copy_c ++ ++void vpx_d117_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c ++ ++void vpx_d117_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c ++ ++void vpx_d117_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c ++ ++void vpx_d117_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c ++ ++void vpx_d135_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c ++ ++void vpx_d135_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c ++ ++void vpx_d135_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c ++ ++void vpx_d135_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c ++ ++void vpx_d153_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c ++ ++void vpx_d153_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c ++ ++void vpx_d153_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c ++ ++void vpx_d153_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c ++ ++void vpx_d207_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c ++ ++void vpx_d207_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c ++ ++void vpx_d207_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c ++ ++void vpx_d207_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c ++ ++void vpx_d45_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_c ++ ++void vpx_d45_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c ++ ++void vpx_d45_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c ++ ++void vpx_d45_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c ++ ++void vpx_d45e_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c ++ ++void vpx_d63_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c ++ ++void vpx_d63_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c ++ ++void vpx_d63_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c ++ ++void vpx_d63_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c ++ ++void vpx_d63e_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c ++ ++void vpx_dc_128_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_c ++ ++void vpx_dc_128_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_c ++ ++void vpx_dc_128_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c ++ ++void vpx_dc_128_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c ++ ++void vpx_dc_left_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_c ++ ++void vpx_dc_left_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_c ++ ++void vpx_dc_left_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c ++ ++void vpx_dc_left_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c ++ ++void vpx_dc_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_c ++ ++void vpx_dc_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_c ++ ++void vpx_dc_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c ++ ++void vpx_dc_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c ++ ++void vpx_dc_top_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_c ++ ++void vpx_dc_top_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_c ++ ++void vpx_dc_top_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c ++ ++void vpx_dc_top_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c ++ ++void vpx_fdct16x16_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct16x16 vpx_fdct16x16_c ++ ++void vpx_fdct16x16_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct16x16_1 vpx_fdct16x16_1_c ++ ++void vpx_fdct32x32_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct32x32 vpx_fdct32x32_c ++ ++void vpx_fdct32x32_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct32x32_1 vpx_fdct32x32_1_c ++ ++void vpx_fdct32x32_rd_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct32x32_rd vpx_fdct32x32_rd_c ++ ++void vpx_fdct4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct4x4 vpx_fdct4x4_c ++ ++void vpx_fdct4x4_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct4x4_1 vpx_fdct4x4_1_c ++ ++void vpx_fdct8x8_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct8x8 vpx_fdct8x8_c ++ ++void vpx_fdct8x8_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct8x8_1 vpx_fdct8x8_1_c ++ ++void vpx_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_get16x16var vpx_get16x16var_c ++ ++unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr, ++ int src_stride, ++ const unsigned char* ref_ptr, ++ int ref_stride); ++#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c ++ ++void vpx_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_get8x8var vpx_get8x8var_c ++ ++unsigned int vpx_get_mb_ss_c(const int16_t*); ++#define vpx_get_mb_ss vpx_get_mb_ss_c ++ ++void vpx_h_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_c ++ ++void vpx_h_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_c ++ ++void vpx_h_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c ++ ++void vpx_h_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c ++ ++void vpx_hadamard_16x16_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_hadamard_16x16 vpx_hadamard_16x16_c ++ ++void vpx_hadamard_32x32_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_hadamard_32x32 vpx_hadamard_32x32_c ++ ++void vpx_hadamard_8x8_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_hadamard_8x8 vpx_hadamard_8x8_c ++ ++void vpx_he_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c ++ ++void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c ++ ++void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c ++ ++unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_c ++ ++unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c ++ ++unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c ++ ++unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance16x16 \ ++ vpx_highbd_10_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance16x32 \ ++ vpx_highbd_10_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance16x8 \ ++ vpx_highbd_10_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance32x16 \ ++ vpx_highbd_10_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance32x32 \ ++ vpx_highbd_10_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance32x64 \ ++ vpx_highbd_10_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance4x4 \ ++ vpx_highbd_10_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance4x8 \ ++ vpx_highbd_10_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance64x32 \ ++ vpx_highbd_10_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance64x64 \ ++ vpx_highbd_10_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance8x16 \ ++ vpx_highbd_10_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance8x4 \ ++ vpx_highbd_10_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance8x8 \ ++ vpx_highbd_10_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance16x16 \ ++ vpx_highbd_10_sub_pixel_variance16x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance16x32 \ ++ vpx_highbd_10_sub_pixel_variance16x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance16x8 \ ++ vpx_highbd_10_sub_pixel_variance16x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance32x16 \ ++ vpx_highbd_10_sub_pixel_variance32x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance32x32 \ ++ vpx_highbd_10_sub_pixel_variance32x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance32x64 \ ++ vpx_highbd_10_sub_pixel_variance32x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance4x4 \ ++ vpx_highbd_10_sub_pixel_variance4x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance4x8 \ ++ vpx_highbd_10_sub_pixel_variance4x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance64x32 \ ++ vpx_highbd_10_sub_pixel_variance64x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance64x64 \ ++ vpx_highbd_10_sub_pixel_variance64x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance8x16 \ ++ vpx_highbd_10_sub_pixel_variance8x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance8x4 \ ++ vpx_highbd_10_sub_pixel_variance8x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance8x8 \ ++ vpx_highbd_10_sub_pixel_variance8x8_c ++ ++unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_c ++ ++unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_c ++ ++unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_c ++ ++unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_c ++ ++unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_c ++ ++unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_c ++ ++unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c ++ ++unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c ++ ++unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_c ++ ++unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_c ++ ++unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_c ++ ++unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c ++ ++unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_c ++ ++void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c ++ ++void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c ++ ++unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_c ++ ++unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c ++ ++unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c ++ ++unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance16x16 \ ++ vpx_highbd_12_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance16x32 \ ++ vpx_highbd_12_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance16x8 \ ++ vpx_highbd_12_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance32x16 \ ++ vpx_highbd_12_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance32x32 \ ++ vpx_highbd_12_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance32x64 \ ++ vpx_highbd_12_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance4x4 \ ++ vpx_highbd_12_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance4x8 \ ++ vpx_highbd_12_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance64x32 \ ++ vpx_highbd_12_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance64x64 \ ++ vpx_highbd_12_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance8x16 \ ++ vpx_highbd_12_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance8x4 \ ++ vpx_highbd_12_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance8x8 \ ++ vpx_highbd_12_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance16x16 \ ++ vpx_highbd_12_sub_pixel_variance16x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance16x32 \ ++ vpx_highbd_12_sub_pixel_variance16x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance16x8 \ ++ vpx_highbd_12_sub_pixel_variance16x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance32x16 \ ++ vpx_highbd_12_sub_pixel_variance32x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance32x32 \ ++ vpx_highbd_12_sub_pixel_variance32x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance32x64 \ ++ vpx_highbd_12_sub_pixel_variance32x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance4x4 \ ++ vpx_highbd_12_sub_pixel_variance4x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance4x8 \ ++ vpx_highbd_12_sub_pixel_variance4x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance64x32 \ ++ vpx_highbd_12_sub_pixel_variance64x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance64x64 \ ++ vpx_highbd_12_sub_pixel_variance64x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance8x16 \ ++ vpx_highbd_12_sub_pixel_variance8x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance8x4 \ ++ vpx_highbd_12_sub_pixel_variance8x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance8x8 \ ++ vpx_highbd_12_sub_pixel_variance8x8_c ++ ++unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_c ++ ++unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_c ++ ++unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_c ++ ++unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_c ++ ++unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_c ++ ++unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_c ++ ++unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c ++ ++unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c ++ ++unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_c ++ ++unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_c ++ ++unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_c ++ ++unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c ++ ++unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_c ++ ++void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c ++ ++void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c ++ ++unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_c ++ ++unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c ++ ++unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c ++ ++unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance16x16 \ ++ vpx_highbd_8_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance16x32 \ ++ vpx_highbd_8_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance16x8 \ ++ vpx_highbd_8_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance32x16 \ ++ vpx_highbd_8_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance32x32 \ ++ vpx_highbd_8_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance32x64 \ ++ vpx_highbd_8_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance4x4 \ ++ vpx_highbd_8_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance4x8 \ ++ vpx_highbd_8_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance64x32 \ ++ vpx_highbd_8_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance64x64 \ ++ vpx_highbd_8_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance8x16 \ ++ vpx_highbd_8_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance8x4 \ ++ vpx_highbd_8_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance8x8 \ ++ vpx_highbd_8_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance16x16 \ ++ vpx_highbd_8_sub_pixel_variance16x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance16x32 \ ++ vpx_highbd_8_sub_pixel_variance16x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance16x8 \ ++ vpx_highbd_8_sub_pixel_variance16x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance32x16 \ ++ vpx_highbd_8_sub_pixel_variance32x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance32x32 \ ++ vpx_highbd_8_sub_pixel_variance32x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance32x64 \ ++ vpx_highbd_8_sub_pixel_variance32x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance64x32 \ ++ vpx_highbd_8_sub_pixel_variance64x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance64x64 \ ++ vpx_highbd_8_sub_pixel_variance64x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance8x16 \ ++ vpx_highbd_8_sub_pixel_variance8x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance8x4 vpx_highbd_8_sub_pixel_variance8x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance8x8 vpx_highbd_8_sub_pixel_variance8x8_c ++ ++unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_c ++ ++unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_c ++ ++unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_c ++ ++unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_c ++ ++unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_c ++ ++unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_c ++ ++unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c ++ ++unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c ++ ++unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_c ++ ++unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_c ++ ++unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_c ++ ++unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c ++ ++unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_c ++ ++unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p); ++#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_c ++ ++unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p); ++#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_c ++ ++void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred, ++ const uint16_t* pred, ++ int width, ++ int height, ++ const uint16_t* ref, ++ int ref_stride); ++#define vpx_highbd_comp_avg_pred vpx_highbd_comp_avg_pred_c ++ ++void vpx_highbd_convolve8_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8 vpx_highbd_convolve8_c ++ ++void vpx_highbd_convolve8_avg_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_avg vpx_highbd_convolve8_avg_c ++ ++void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_avg_horiz vpx_highbd_convolve8_avg_horiz_c ++ ++void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_avg_vert vpx_highbd_convolve8_avg_vert_c ++ ++void vpx_highbd_convolve8_horiz_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_horiz vpx_highbd_convolve8_horiz_c ++ ++void vpx_highbd_convolve8_vert_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_vert vpx_highbd_convolve8_vert_c ++ ++void vpx_highbd_convolve_avg_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve_avg vpx_highbd_convolve_avg_c ++ ++void vpx_highbd_convolve_copy_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve_copy vpx_highbd_convolve_copy_c ++ ++void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_16x16 vpx_highbd_d117_predictor_16x16_c ++ ++void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_32x32 vpx_highbd_d117_predictor_32x32_c ++ ++void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_c ++ ++void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_8x8 vpx_highbd_d117_predictor_8x8_c ++ ++void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_16x16 vpx_highbd_d135_predictor_16x16_c ++ ++void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_32x32 vpx_highbd_d135_predictor_32x32_c ++ ++void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_c ++ ++void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_8x8 vpx_highbd_d135_predictor_8x8_c ++ ++void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_16x16 vpx_highbd_d153_predictor_16x16_c ++ ++void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_32x32 vpx_highbd_d153_predictor_32x32_c ++ ++void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_c ++ ++void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_8x8 vpx_highbd_d153_predictor_8x8_c ++ ++void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_16x16 vpx_highbd_d207_predictor_16x16_c ++ ++void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_32x32 vpx_highbd_d207_predictor_32x32_c ++ ++void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_c ++ ++void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_8x8 vpx_highbd_d207_predictor_8x8_c ++ ++void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_16x16 vpx_highbd_d45_predictor_16x16_c ++ ++void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_32x32 vpx_highbd_d45_predictor_32x32_c ++ ++void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_4x4 vpx_highbd_d45_predictor_4x4_c ++ ++void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_8x8 vpx_highbd_d45_predictor_8x8_c ++ ++void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_16x16 vpx_highbd_d63_predictor_16x16_c ++ ++void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_32x32 vpx_highbd_d63_predictor_32x32_c ++ ++void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_c ++ ++void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_8x8 vpx_highbd_d63_predictor_8x8_c ++ ++void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_c ++ ++void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_c ++ ++void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_c ++ ++void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_c ++ ++void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_16x16 vpx_highbd_dc_left_predictor_16x16_c ++ ++void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_32x32 vpx_highbd_dc_left_predictor_32x32_c ++ ++void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_c ++ ++void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_c ++ ++void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_c ++ ++void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_c ++ ++void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_c ++ ++void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_c ++ ++void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_c ++ ++void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_c ++ ++void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_c ++ ++void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_8x8 vpx_highbd_dc_top_predictor_8x8_c ++ ++void vpx_highbd_fdct16x16_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct16x16 vpx_highbd_fdct16x16_c ++ ++void vpx_highbd_fdct16x16_1_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct16x16_1 vpx_highbd_fdct16x16_1_c ++ ++void vpx_highbd_fdct32x32_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct32x32 vpx_highbd_fdct32x32_c ++ ++void vpx_highbd_fdct32x32_1_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct32x32_1 vpx_highbd_fdct32x32_1_c ++ ++void vpx_highbd_fdct32x32_rd_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct32x32_rd vpx_highbd_fdct32x32_rd_c ++ ++void vpx_highbd_fdct4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_highbd_fdct4x4 vpx_highbd_fdct4x4_c ++ ++void vpx_highbd_fdct8x8_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_highbd_fdct8x8 vpx_highbd_fdct8x8_c ++ ++void vpx_highbd_fdct8x8_1_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c ++ ++void vpx_highbd_h_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_c ++ ++void vpx_highbd_h_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_c ++ ++void vpx_highbd_h_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_c ++ ++void vpx_highbd_h_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_c ++ ++void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c ++ ++void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c ++ ++void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c ++ ++void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_10_add vpx_highbd_idct16x16_10_add_c ++ ++void vpx_highbd_idct16x16_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_1_add vpx_highbd_idct16x16_1_add_c ++ ++void vpx_highbd_idct16x16_256_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_256_add vpx_highbd_idct16x16_256_add_c ++ ++void vpx_highbd_idct16x16_38_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_38_add vpx_highbd_idct16x16_38_add_c ++ ++void vpx_highbd_idct32x32_1024_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_1024_add vpx_highbd_idct32x32_1024_add_c ++ ++void vpx_highbd_idct32x32_135_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_135_add vpx_highbd_idct32x32_135_add_c ++ ++void vpx_highbd_idct32x32_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_1_add vpx_highbd_idct32x32_1_add_c ++ ++void vpx_highbd_idct32x32_34_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_34_add vpx_highbd_idct32x32_34_add_c ++ ++void vpx_highbd_idct4x4_16_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct4x4_16_add vpx_highbd_idct4x4_16_add_c ++ ++void vpx_highbd_idct4x4_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct4x4_1_add vpx_highbd_idct4x4_1_add_c ++ ++void vpx_highbd_idct8x8_12_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct8x8_12_add vpx_highbd_idct8x8_12_add_c ++ ++void vpx_highbd_idct8x8_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct8x8_1_add vpx_highbd_idct8x8_1_add_c ++ ++void vpx_highbd_idct8x8_64_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct8x8_64_add vpx_highbd_idct8x8_64_add_c ++ ++void vpx_highbd_iwht4x4_16_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_iwht4x4_16_add vpx_highbd_iwht4x4_16_add_c ++ ++void vpx_highbd_iwht4x4_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_iwht4x4_1_add vpx_highbd_iwht4x4_1_add_c ++ ++void vpx_highbd_lpf_horizontal_16_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_16 vpx_highbd_lpf_horizontal_16_c ++ ++void vpx_highbd_lpf_horizontal_16_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_16_dual vpx_highbd_lpf_horizontal_16_dual_c ++ ++void vpx_highbd_lpf_horizontal_4_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_4 vpx_highbd_lpf_horizontal_4_c ++ ++void vpx_highbd_lpf_horizontal_4_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_horizontal_4_dual vpx_highbd_lpf_horizontal_4_dual_c ++ ++void vpx_highbd_lpf_horizontal_8_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_8 vpx_highbd_lpf_horizontal_8_c ++ ++void vpx_highbd_lpf_horizontal_8_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_horizontal_8_dual vpx_highbd_lpf_horizontal_8_dual_c ++ ++void vpx_highbd_lpf_vertical_16_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_16 vpx_highbd_lpf_vertical_16_c ++ ++void vpx_highbd_lpf_vertical_16_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_16_dual vpx_highbd_lpf_vertical_16_dual_c ++ ++void vpx_highbd_lpf_vertical_4_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_4 vpx_highbd_lpf_vertical_4_c ++ ++void vpx_highbd_lpf_vertical_4_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_vertical_4_dual vpx_highbd_lpf_vertical_4_dual_c ++ ++void vpx_highbd_lpf_vertical_8_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_8 vpx_highbd_lpf_vertical_8_c ++ ++void vpx_highbd_lpf_vertical_8_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_c ++ ++void vpx_highbd_minmax_8x8_c(const uint8_t* s8, ++ int p, ++ const uint8_t* d8, ++ int dp, ++ int* min, ++ int* max); ++#define vpx_highbd_minmax_8x8 vpx_highbd_minmax_8x8_c ++ ++void vpx_highbd_quantize_b_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* zbin_ptr, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ const int16_t* quant_shift_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vpx_highbd_quantize_b vpx_highbd_quantize_b_c ++ ++void vpx_highbd_quantize_b_32x32_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* zbin_ptr, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ const int16_t* quant_shift_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vpx_highbd_quantize_b_32x32 vpx_highbd_quantize_b_32x32_c ++ ++unsigned int vpx_highbd_sad16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad16x16 vpx_highbd_sad16x16_c ++ ++unsigned int vpx_highbd_sad16x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad16x16_avg vpx_highbd_sad16x16_avg_c ++ ++void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_c ++ ++unsigned int vpx_highbd_sad16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad16x32 vpx_highbd_sad16x32_c ++ ++unsigned int vpx_highbd_sad16x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad16x32_avg vpx_highbd_sad16x32_avg_c ++ ++void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_c ++ ++unsigned int vpx_highbd_sad16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad16x8 vpx_highbd_sad16x8_c ++ ++unsigned int vpx_highbd_sad16x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad16x8_avg vpx_highbd_sad16x8_avg_c ++ ++void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_c ++ ++unsigned int vpx_highbd_sad32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad32x16 vpx_highbd_sad32x16_c ++ ++unsigned int vpx_highbd_sad32x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad32x16_avg vpx_highbd_sad32x16_avg_c ++ ++void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_c ++ ++unsigned int vpx_highbd_sad32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad32x32 vpx_highbd_sad32x32_c ++ ++unsigned int vpx_highbd_sad32x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad32x32_avg vpx_highbd_sad32x32_avg_c ++ ++void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_c ++ ++unsigned int vpx_highbd_sad32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad32x64 vpx_highbd_sad32x64_c ++ ++unsigned int vpx_highbd_sad32x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad32x64_avg vpx_highbd_sad32x64_avg_c ++ ++void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_c ++ ++unsigned int vpx_highbd_sad4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad4x4 vpx_highbd_sad4x4_c ++ ++unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad4x4_avg vpx_highbd_sad4x4_avg_c ++ ++void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_c ++ ++unsigned int vpx_highbd_sad4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad4x8 vpx_highbd_sad4x8_c ++ ++unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad4x8_avg vpx_highbd_sad4x8_avg_c ++ ++void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_c ++ ++unsigned int vpx_highbd_sad64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad64x32 vpx_highbd_sad64x32_c ++ ++unsigned int vpx_highbd_sad64x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad64x32_avg vpx_highbd_sad64x32_avg_c ++ ++void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_c ++ ++unsigned int vpx_highbd_sad64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad64x64 vpx_highbd_sad64x64_c ++ ++unsigned int vpx_highbd_sad64x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad64x64_avg vpx_highbd_sad64x64_avg_c ++ ++void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_c ++ ++unsigned int vpx_highbd_sad8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad8x16 vpx_highbd_sad8x16_c ++ ++unsigned int vpx_highbd_sad8x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad8x16_avg vpx_highbd_sad8x16_avg_c ++ ++void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_c ++ ++unsigned int vpx_highbd_sad8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad8x4 vpx_highbd_sad8x4_c ++ ++unsigned int vpx_highbd_sad8x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad8x4_avg vpx_highbd_sad8x4_avg_c ++ ++void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_c ++ ++unsigned int vpx_highbd_sad8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad8x8 vpx_highbd_sad8x8_c ++ ++unsigned int vpx_highbd_sad8x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad8x8_avg vpx_highbd_sad8x8_avg_c ++ ++void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_c ++ ++int vpx_highbd_satd_c(const tran_low_t* coeff, int length); ++#define vpx_highbd_satd vpx_highbd_satd_c ++ ++void vpx_highbd_subtract_block_c(int rows, ++ int cols, ++ int16_t* diff_ptr, ++ ptrdiff_t diff_stride, ++ const uint8_t* src8_ptr, ++ ptrdiff_t src_stride, ++ const uint8_t* pred8_ptr, ++ ptrdiff_t pred_stride, ++ int bd); ++#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c ++ ++void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_c ++ ++void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_c ++ ++void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_c ++ ++void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_c ++ ++void vpx_highbd_v_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_c ++ ++void vpx_highbd_v_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_c ++ ++void vpx_highbd_v_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_c ++ ++void vpx_highbd_v_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_8x8 vpx_highbd_v_predictor_8x8_c ++ ++void vpx_idct16x16_10_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct16x16_10_add vpx_idct16x16_10_add_c ++ ++void vpx_idct16x16_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct16x16_1_add vpx_idct16x16_1_add_c ++ ++void vpx_idct16x16_256_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride); ++#define vpx_idct16x16_256_add vpx_idct16x16_256_add_c ++ ++void vpx_idct16x16_38_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct16x16_38_add vpx_idct16x16_38_add_c ++ ++void vpx_idct32x32_1024_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride); ++#define vpx_idct32x32_1024_add vpx_idct32x32_1024_add_c ++ ++void vpx_idct32x32_135_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride); ++#define vpx_idct32x32_135_add vpx_idct32x32_135_add_c ++ ++void vpx_idct32x32_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct32x32_1_add vpx_idct32x32_1_add_c ++ ++void vpx_idct32x32_34_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct32x32_34_add vpx_idct32x32_34_add_c ++ ++void vpx_idct4x4_16_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct4x4_16_add vpx_idct4x4_16_add_c ++ ++void vpx_idct4x4_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct4x4_1_add vpx_idct4x4_1_add_c ++ ++void vpx_idct8x8_12_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct8x8_12_add vpx_idct8x8_12_add_c ++ ++void vpx_idct8x8_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct8x8_1_add vpx_idct8x8_1_add_c ++ ++void vpx_idct8x8_64_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct8x8_64_add vpx_idct8x8_64_add_c ++ ++int16_t vpx_int_pro_col_c(const uint8_t* ref, const int width); ++#define vpx_int_pro_col vpx_int_pro_col_c ++ ++void vpx_int_pro_row_c(int16_t* hbuf, ++ const uint8_t* ref, ++ const int ref_stride, ++ const int height); ++#define vpx_int_pro_row vpx_int_pro_row_c ++ ++void vpx_iwht4x4_16_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_iwht4x4_16_add vpx_iwht4x4_16_add_c ++ ++void vpx_iwht4x4_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_iwht4x4_1_add vpx_iwht4x4_1_add_c ++ ++void vpx_lpf_horizontal_16_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_16 vpx_lpf_horizontal_16_c ++ ++void vpx_lpf_horizontal_16_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_16_dual vpx_lpf_horizontal_16_dual_c ++ ++void vpx_lpf_horizontal_4_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_4 vpx_lpf_horizontal_4_c ++ ++void vpx_lpf_horizontal_4_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_horizontal_4_dual vpx_lpf_horizontal_4_dual_c ++ ++void vpx_lpf_horizontal_8_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_8 vpx_lpf_horizontal_8_c ++ ++void vpx_lpf_horizontal_8_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_horizontal_8_dual vpx_lpf_horizontal_8_dual_c ++ ++void vpx_lpf_vertical_16_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_16 vpx_lpf_vertical_16_c ++ ++void vpx_lpf_vertical_16_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_16_dual vpx_lpf_vertical_16_dual_c ++ ++void vpx_lpf_vertical_4_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_4 vpx_lpf_vertical_4_c ++ ++void vpx_lpf_vertical_4_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_vertical_4_dual vpx_lpf_vertical_4_dual_c ++ ++void vpx_lpf_vertical_8_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_8 vpx_lpf_vertical_8_c ++ ++void vpx_lpf_vertical_8_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c ++ ++void vpx_mbpost_proc_across_ip_c(unsigned char* src, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vpx_mbpost_proc_across_ip vpx_mbpost_proc_across_ip_c ++ ++void vpx_mbpost_proc_down_c(unsigned char* dst, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vpx_mbpost_proc_down vpx_mbpost_proc_down_c ++ ++void vpx_minmax_8x8_c(const uint8_t* s, ++ int p, ++ const uint8_t* d, ++ int dp, ++ int* min, ++ int* max); ++#define vpx_minmax_8x8 vpx_minmax_8x8_c ++ ++unsigned int vpx_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse16x16 vpx_mse16x16_c ++ ++unsigned int vpx_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse16x8 vpx_mse16x8_c ++ ++unsigned int vpx_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse8x16 vpx_mse8x16_c ++ ++unsigned int vpx_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse8x8 vpx_mse8x8_c ++ ++void vpx_plane_add_noise_c(uint8_t* start, ++ const int8_t* noise, ++ int blackclamp, ++ int whiteclamp, ++ int width, ++ int height, ++ int pitch); ++#define vpx_plane_add_noise vpx_plane_add_noise_c ++ ++void vpx_post_proc_down_and_across_mb_row_c(unsigned char* src, ++ unsigned char* dst, ++ int src_pitch, ++ int dst_pitch, ++ int cols, ++ unsigned char* flimits, ++ int size); ++#define vpx_post_proc_down_and_across_mb_row \ ++ vpx_post_proc_down_and_across_mb_row_c ++ ++void vpx_quantize_b_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* zbin_ptr, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ const int16_t* quant_shift_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vpx_quantize_b vpx_quantize_b_c ++ ++void vpx_quantize_b_32x32_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ int skip_block, ++ const int16_t* zbin_ptr, ++ const int16_t* round_ptr, ++ const int16_t* quant_ptr, ++ const int16_t* quant_shift_ptr, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const int16_t* scan, ++ const int16_t* iscan); ++#define vpx_quantize_b_32x32 vpx_quantize_b_32x32_c ++ ++unsigned int vpx_sad16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad16x16 vpx_sad16x16_c ++ ++unsigned int vpx_sad16x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad16x16_avg vpx_sad16x16_avg_c ++ ++void vpx_sad16x16x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x16x3 vpx_sad16x16x3_c ++ ++void vpx_sad16x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x16x4d vpx_sad16x16x4d_c ++ ++void vpx_sad16x16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x16x8 vpx_sad16x16x8_c ++ ++unsigned int vpx_sad16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad16x32 vpx_sad16x32_c ++ ++unsigned int vpx_sad16x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad16x32_avg vpx_sad16x32_avg_c ++ ++void vpx_sad16x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x32x4d vpx_sad16x32x4d_c ++ ++unsigned int vpx_sad16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad16x8 vpx_sad16x8_c ++ ++unsigned int vpx_sad16x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad16x8_avg vpx_sad16x8_avg_c ++ ++void vpx_sad16x8x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x8x3 vpx_sad16x8x3_c ++ ++void vpx_sad16x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x8x4d vpx_sad16x8x4d_c ++ ++void vpx_sad16x8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad16x8x8 vpx_sad16x8x8_c ++ ++unsigned int vpx_sad32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad32x16 vpx_sad32x16_c ++ ++unsigned int vpx_sad32x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad32x16_avg vpx_sad32x16_avg_c ++ ++void vpx_sad32x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad32x16x4d vpx_sad32x16x4d_c ++ ++unsigned int vpx_sad32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad32x32 vpx_sad32x32_c ++ ++unsigned int vpx_sad32x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad32x32_avg vpx_sad32x32_avg_c ++ ++void vpx_sad32x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad32x32x4d vpx_sad32x32x4d_c ++ ++void vpx_sad32x32x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad32x32x8 vpx_sad32x32x8_c ++ ++unsigned int vpx_sad32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad32x64 vpx_sad32x64_c ++ ++unsigned int vpx_sad32x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad32x64_avg vpx_sad32x64_avg_c ++ ++void vpx_sad32x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad32x64x4d vpx_sad32x64x4d_c ++ ++unsigned int vpx_sad4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad4x4 vpx_sad4x4_c ++ ++unsigned int vpx_sad4x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad4x4_avg vpx_sad4x4_avg_c ++ ++void vpx_sad4x4x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad4x4x3 vpx_sad4x4x3_c ++ ++void vpx_sad4x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad4x4x4d vpx_sad4x4x4d_c ++ ++void vpx_sad4x4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad4x4x8 vpx_sad4x4x8_c ++ ++unsigned int vpx_sad4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad4x8 vpx_sad4x8_c ++ ++unsigned int vpx_sad4x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad4x8_avg vpx_sad4x8_avg_c ++ ++void vpx_sad4x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad4x8x4d vpx_sad4x8x4d_c ++ ++unsigned int vpx_sad64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad64x32 vpx_sad64x32_c ++ ++unsigned int vpx_sad64x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad64x32_avg vpx_sad64x32_avg_c ++ ++void vpx_sad64x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad64x32x4d vpx_sad64x32x4d_c ++ ++unsigned int vpx_sad64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad64x64 vpx_sad64x64_c ++ ++unsigned int vpx_sad64x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad64x64_avg vpx_sad64x64_avg_c ++ ++void vpx_sad64x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad64x64x4d vpx_sad64x64x4d_c ++ ++unsigned int vpx_sad8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad8x16 vpx_sad8x16_c ++ ++unsigned int vpx_sad8x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad8x16_avg vpx_sad8x16_avg_c ++ ++void vpx_sad8x16x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x16x3 vpx_sad8x16x3_c ++ ++void vpx_sad8x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x16x4d vpx_sad8x16x4d_c ++ ++void vpx_sad8x16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x16x8 vpx_sad8x16x8_c ++ ++unsigned int vpx_sad8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad8x4 vpx_sad8x4_c ++ ++unsigned int vpx_sad8x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad8x4_avg vpx_sad8x4_avg_c ++ ++void vpx_sad8x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x4x4d vpx_sad8x4x4d_c ++ ++unsigned int vpx_sad8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad8x8 vpx_sad8x8_c ++ ++unsigned int vpx_sad8x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad8x8_avg vpx_sad8x8_avg_c ++ ++void vpx_sad8x8x3_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x8x3 vpx_sad8x8x3_c ++ ++void vpx_sad8x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[], ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x8x4d vpx_sad8x8x4d_c ++ ++void vpx_sad8x8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sad_array); ++#define vpx_sad8x8x8 vpx_sad8x8x8_c ++ ++int vpx_satd_c(const tran_low_t* coeff, int length); ++#define vpx_satd vpx_satd_c ++ ++void vpx_scaled_2d_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_2d vpx_scaled_2d_c ++ ++void vpx_scaled_avg_2d_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_avg_2d vpx_scaled_avg_2d_c ++ ++void vpx_scaled_avg_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_avg_horiz vpx_scaled_avg_horiz_c ++ ++void vpx_scaled_avg_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_avg_vert vpx_scaled_avg_vert_c ++ ++void vpx_scaled_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_horiz vpx_scaled_horiz_c ++ ++void vpx_scaled_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_vert vpx_scaled_vert_c ++ ++uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_c ++ ++uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_c ++ ++uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_c ++ ++uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_c ++ ++uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_c ++ ++uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_c ++ ++uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_c ++ ++uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_c ++ ++uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_c ++ ++uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_c ++ ++uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_c ++ ++uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_c ++ ++uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance8x8 vpx_sub_pixel_variance8x8_c ++ ++void vpx_subtract_block_c(int rows, ++ int cols, ++ int16_t* diff_ptr, ++ ptrdiff_t diff_stride, ++ const uint8_t* src_ptr, ++ ptrdiff_t src_stride, ++ const uint8_t* pred_ptr, ++ ptrdiff_t pred_stride); ++#define vpx_subtract_block vpx_subtract_block_c ++ ++uint64_t vpx_sum_squares_2d_i16_c(const int16_t* src, int stride, int size); ++#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_c ++ ++void vpx_tm_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_c ++ ++void vpx_tm_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_c ++ ++void vpx_tm_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c ++ ++void vpx_tm_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c ++ ++void vpx_v_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_c ++ ++void vpx_v_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_c ++ ++void vpx_v_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c ++ ++void vpx_v_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c ++ ++unsigned int vpx_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance16x16 vpx_variance16x16_c ++ ++unsigned int vpx_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance16x32 vpx_variance16x32_c ++ ++unsigned int vpx_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance16x8 vpx_variance16x8_c ++ ++unsigned int vpx_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance32x16 vpx_variance32x16_c ++ ++unsigned int vpx_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance32x32 vpx_variance32x32_c ++ ++unsigned int vpx_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance32x64 vpx_variance32x64_c ++ ++unsigned int vpx_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance4x4 vpx_variance4x4_c ++ ++unsigned int vpx_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance4x8 vpx_variance4x8_c ++ ++unsigned int vpx_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance64x32 vpx_variance64x32_c ++ ++unsigned int vpx_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance64x64 vpx_variance64x64_c ++ ++unsigned int vpx_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance8x16 vpx_variance8x16_c ++ ++unsigned int vpx_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance8x4 vpx_variance8x4_c ++ ++unsigned int vpx_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance8x8 vpx_variance8x8_c ++ ++void vpx_ve_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c ++ ++int vpx_vector_var_c(const int16_t* ref, const int16_t* src, const int bwl); ++#define vpx_vector_var vpx_vector_var_c ++ ++void vpx_dsp_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +diff --git a/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_scale_rtcd.h b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_scale_rtcd.h +new file mode 100644 +index 000000000..c5196db4d +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/libvpx/source/config/linux/la64/vpx_scale_rtcd.h +@@ -0,0 +1,96 @@ ++// This file is generated. Do not edit. ++#ifndef VPX_SCALE_RTCD_H_ ++#define VPX_SCALE_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++void vp8_horizontal_line_2_1_scale_c(const unsigned char* source, ++ unsigned int source_width, ++ unsigned char* dest, ++ unsigned int dest_width); ++#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c ++ ++void vp8_horizontal_line_5_3_scale_c(const unsigned char* source, ++ unsigned int source_width, ++ unsigned char* dest, ++ unsigned int dest_width); ++#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c ++ ++void vp8_horizontal_line_5_4_scale_c(const unsigned char* source, ++ unsigned int source_width, ++ unsigned char* dest, ++ unsigned int dest_width); ++#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c ++ ++void vp8_vertical_band_2_1_scale_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c ++ ++void vp8_vertical_band_2_1_scale_i_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c ++ ++void vp8_vertical_band_5_3_scale_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c ++ ++void vp8_vertical_band_5_4_scale_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c ++ ++void vp8_yv12_copy_frame_c(const struct yv12_buffer_config* src_ybc, ++ struct yv12_buffer_config* dst_ybc); ++#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c ++ ++void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config* ybf); ++#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c ++ ++void vpx_extend_frame_borders_c(struct yv12_buffer_config* ybf); ++#define vpx_extend_frame_borders vpx_extend_frame_borders_c ++ ++void vpx_extend_frame_inner_borders_c(struct yv12_buffer_config* ybf); ++#define vpx_extend_frame_inner_borders vpx_extend_frame_inner_borders_c ++ ++void vpx_yv12_copy_frame_c(const struct yv12_buffer_config* src_ybc, ++ struct yv12_buffer_config* dst_ybc); ++#define vpx_yv12_copy_frame vpx_yv12_copy_frame_c ++ ++void vpx_yv12_copy_y_c(const struct yv12_buffer_config* src_ybc, ++ struct yv12_buffer_config* dst_ybc); ++#define vpx_yv12_copy_y vpx_yv12_copy_y_c ++ ++void vpx_scale_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +-- +2.20.1 + diff --git a/0007-port-icu-for-loongarch64.patch b/0007-port-icu-for-loongarch64.patch new file mode 100644 index 0000000..dbb7267 --- /dev/null +++ b/0007-port-icu-for-loongarch64.patch @@ -0,0 +1,25 @@ +From 0fc5b2512a913c9bb702e69262efc4bd72615598 Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Wed, 13 Jan 2021 07:57:17 +0000 +Subject: [PATCH 07/13] port icu for loongarch64 + +--- + .../third_party/icu/source/i18n/double-conversion-utils.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/3rdparty/chromium/third_party/icu/source/i18n/double-conversion-utils.h b/src/3rdparty/chromium/third_party/icu/source/i18n/double-conversion-utils.h +index c5439ca15..ed6fb5463 100644 +--- a/src/3rdparty/chromium/third_party/icu/source/i18n/double-conversion-utils.h ++++ b/src/3rdparty/chromium/third_party/icu/source/i18n/double-conversion-utils.h +@@ -103,7 +103,7 @@ int main(int argc, char** argv) { + #if defined(_M_X64) || defined(__x86_64__) || \ + defined(__ARMEL__) || defined(__avr32__) || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(__hppa__) || defined(__ia64__) || \ +- defined(__mips__) || \ ++ defined(__mips__) || defined(__loongarch__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \ + defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ + defined(__sparc__) || defined(__sparc) || defined(__s390__) || \ +-- +2.20.1 + diff --git a/0008-port-lss-for-loongarch64.patch b/0008-port-lss-for-loongarch64.patch new file mode 100644 index 0000000..21a49cd --- /dev/null +++ b/0008-port-lss-for-loongarch64.patch @@ -0,0 +1,295 @@ +From ecc38374acf668abf9586d9b162fec22350d771b Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Wed, 13 Jan 2021 07:58:34 +0000 +Subject: [PATCH 08/13] port lss for loongarch64 + +--- + .../third_party/lss/linux_syscall_support.h | 160 +++++++++++++++--- + 1 file changed, 141 insertions(+), 19 deletions(-) + +diff --git a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h b/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h +index d2baee9d2..fed8f2ed2 100644 +--- a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h ++++ b/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h +@@ -88,7 +88,7 @@ + */ + #if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) || \ + defined(__mips__) || defined(__PPC__) || defined(__ARM_EABI__) || \ +- defined(__aarch64__) || defined(__s390__)) \ ++ defined(__aarch64__) || defined(__s390__) || defined(__loongarch64)) \ + && (defined(__linux) || defined(__ANDROID__)) + + #ifndef SYS_CPLUSPLUS +@@ -299,7 +299,7 @@ struct kernel_old_sigaction { + } __attribute__((packed,aligned(4))); + #elif (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) + #define kernel_old_sigaction kernel_sigaction +-#elif defined(__aarch64__) ++#elif defined(__aarch64__) || defined(__loongarch64) + // No kernel_old_sigaction defined for arm64. + #endif + +@@ -312,7 +312,7 @@ struct kernel_old_sigaction { + * actual number of signals is obviously the same, but the constants + * differ by one. + */ +-#ifdef __mips__ ++#if defined(__mips__) || defined(__loongarch64) + #define KERNEL_NSIG 128 + #else + #define KERNEL_NSIG 64 +@@ -517,7 +517,7 @@ struct kernel_stat { + int st_blocks; + int st_pad4[14]; + }; +-#elif defined(__aarch64__) ++#elif defined(__aarch64__) || defined(__loongarch64) + struct kernel_stat { + unsigned long st_dev; + unsigned long st_ino; +@@ -604,7 +604,7 @@ struct kernel_statfs64 { + unsigned long f_spare[6]; + }; + #endif +-#elif defined(__s390__) ++#elif defined(__s390__) || defined(__loongarch64) + /* See also arch/s390/include/asm/compat.h */ + struct kernel_statfs64 { + unsigned int f_type; +@@ -668,7 +668,7 @@ struct kernel_statfs { + uint64_t f_frsize; + uint64_t f_spare[5]; + }; +-#elif defined(__s390__) ++#elif defined(__s390__) || defined(__loongarch64) + struct kernel_statfs { + unsigned int f_type; + unsigned int f_bsize; +@@ -1057,7 +1057,7 @@ struct kernel_statfs { + #define __NR_getcpu (__NR_SYSCALL_BASE + 345) + #endif + /* End of ARM 3/EABI definitions */ +-#elif defined(__aarch64__) ++#elif defined(__aarch64__) || defined(__loongarch64) + #ifndef __NR_setxattr + #define __NR_setxattr 5 + #endif +@@ -1860,7 +1860,8 @@ struct kernel_statfs { + + #undef LSS_RETURN + #if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) \ +- || defined(__ARM_EABI__) || defined(__aarch64__) || defined(__s390__)) ++ || defined(__ARM_EABI__) || defined(__aarch64__) || defined(__s390__) \ ++ || defined(__loongarch64)) + /* Failing system calls return a negative result in the range of + * -1..-4095. These are "errno" values with the sign inverted. + */ +@@ -1960,7 +1961,7 @@ struct kernel_statfs { + LSS_ENTRYPOINT \ + "pop %%ebx" \ + args \ +- : "esp", "memory"); \ ++ : "memory"); \ + LSS_RETURN(type,__res) + #undef _syscall0 + #define _syscall0(type,name) \ +@@ -2017,7 +2018,7 @@ struct kernel_statfs { + : "i" (__NR_##name), "ri" ((long)(arg1)), \ + "c" ((long)(arg2)), "d" ((long)(arg3)), \ + "S" ((long)(arg4)), "D" ((long)(arg5)) \ +- : "esp", "memory"); \ ++ : "memory"); \ + LSS_RETURN(type,__res); \ + } + #undef _syscall6 +@@ -2039,7 +2040,7 @@ struct kernel_statfs { + : "i" (__NR_##name), "0" ((long)(&__s)), \ + "c" ((long)(arg2)), "d" ((long)(arg3)), \ + "S" ((long)(arg4)), "D" ((long)(arg5)) \ +- : "esp", "memory"); \ ++ : "memory"); \ + LSS_RETURN(type,__res); \ + } + LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, +@@ -2125,7 +2126,7 @@ struct kernel_statfs { + : "0"(-EINVAL), "i"(__NR_clone), + "m"(fn), "m"(child_stack), "m"(flags), "m"(arg), + "m"(parent_tidptr), "m"(newtls), "m"(child_tidptr) +- : "esp", "memory", "ecx", "edx", "esi", "edi"); ++ : "memory", "ecx", "edx", "esi", "edi"); + LSS_RETURN(int, __res); + } + +@@ -2813,6 +2814,126 @@ struct kernel_statfs { + } + LSS_RETURN(int, __res); + } ++ #elif defined(__loongarch64) ++ /* Most definitions of _syscallX() neglect to mark "memory" as being ++ * clobbered. This causes problems with compilers, that do a better job ++ * at optimizing across __asm__ calls. ++ * So, we just have to redefine all of the _syscallX() macros. ++ */ ++ #undef LSS_REG ++ #define LSS_REG(ar,a) register int64_t __a##ar __asm__("a"#ar) = (int64_t)a ++ #undef LSS_BODY ++ #define LSS_BODY(type,name,args...) \ ++ register int64_t __res_a0 __asm__("a0"); \ ++ int64_t __res; \ ++ __asm__ __volatile__ ("li $a7, %1\n" \ ++ "syscall 0x0\n" \ ++ : "=r"(__res_a0) \ ++ : "i"(__NR_##name) , ## args \ ++ : "$a7", "memory"); \ ++ __res = __res_a0; \ ++ LSS_RETURN(type, __res) ++ #undef _syscall0 ++ #define _syscall0(type, name) \ ++ type LSS_NAME(name)(void) { \ ++ LSS_BODY(type, name); \ ++ } ++ #undef _syscall1 ++ #define _syscall1(type, name, type1, arg1) \ ++ type LSS_NAME(name)(type1 arg1) { \ ++ LSS_REG(0, arg1); LSS_BODY(type, name, "r"(__a0)); \ ++ } ++ #undef _syscall2 ++ #define _syscall2(type, name, type1, arg1, type2, arg2) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2) { \ ++ LSS_REG(0, arg1); LSS_REG(1, arg2); \ ++ LSS_BODY(type, name, "r"(__a0), "r"(__a1)); \ ++ } ++ #undef _syscall3 ++ #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ ++ LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ ++ LSS_BODY(type, name, "r"(__a0), "r"(__a1), "r"(__a2)); \ ++ } ++ #undef _syscall4 ++ #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ ++ LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ ++ LSS_REG(3, arg4); \ ++ LSS_BODY(type, name, "r"(__a0), "r"(__a1), "r"(__a2), "r"(__a3)); \ ++ } ++ #undef _syscall5 ++ #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ ++ type5,arg5) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ ++ type5 arg5) { \ ++ LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ ++ LSS_REG(3, arg4); LSS_REG(4, arg5); \ ++ LSS_BODY(type, name, "r"(__a0), "r"(__a1), "r"(__a2), "r"(__a3), \ ++ "r"(__a4)); \ ++ } ++ #undef _syscall6 ++ #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ ++ type5,arg5,type6,arg6) \ ++ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ ++ type5 arg5, type6 arg6) { \ ++ LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ ++ LSS_REG(3, arg4); LSS_REG(4, arg5); LSS_REG(5, arg6); \ ++ LSS_BODY(type, name, "r"(__a0), "r"(__a1), "r"(__a2), "r"(__a3), \ ++ "r"(__a4), "r"(__a5)); \ ++ } ++ ++ LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, ++ int flags, void *arg, int *parent_tidptr, ++ void *newtls, int *child_tidptr) { ++ int64_t __res; ++ { ++ register uint64_t __flags __asm__("a0") = flags; ++ register void *__stack __asm__("a1") = child_stack; ++ register void *__ptid __asm__("a2") = parent_tidptr; ++ register void *__tls __asm__("a3") = newtls; ++ register int *__ctid __asm__("a4") = child_tidptr; ++ __asm__ __volatile__(/* Push "arg" and "fn" onto the stack that will be ++ * used by the child. ++ */ ++ "sub.d $%2, 16\n" ++ "st.d %1, %2, 8\n" ++ "st.d %4, %2, 0\n" ++ ++ /* %a0 = syscall(%a0 = flags, ++ * %a1 = child_stack, ++ * %a2 = parent_tidptr, ++ * %a3 = newtls, ++ * %a4 = child_tidptr) ++ */ ++ "li a7, %8\n" ++ "syscall 0x0\n" ++ ++ /* if (%a0 != 0) ++ * return %a0; ++ */ ++ "bnz a0, 1f\n" ++ ++ /* In the child, now. Call "fn(arg)". ++ */ ++ "ld.d a0, $sp, 0\n" ++ "ld.d a1, $sp, 8\n" ++ "add.d $sp, 16\n" ++ "bl $a1\n" ++ ++ /* Call _exit(%a0). ++ */ ++ "li $a7, %9\n" ++ "syscall 0x0\n" ++ "1:\n" ++ : "=r" (__res) ++ : "r"(fn), "r"(__stack), "r"(__flags), "r"(arg), ++ "r"(__ptid), "r"(__tls), "r"(__ctid), ++ "i"(__NR_clone), "i"(__NR_exit) ++ : "cc", "a7", "memory"); ++ } ++ LSS_RETURN(int, __res); ++ } + #elif defined(__mips__) + #undef LSS_REG + #define LSS_REG(r,a) register unsigned long __r##r __asm__("$"#r) = \ +@@ -3396,9 +3517,10 @@ struct kernel_statfs { + LSS_INLINE _syscall2(int, ftruncate, int, f, + off_t, l) + #endif +- LSS_INLINE _syscall4(int, futex, int*, a, +- int, o, int, v, +- struct kernel_timespec*, t) ++ LSS_INLINE _syscall6(int, futex, int*, u, ++ int, o, int, v, ++ struct kernel_timespec*, t, ++ int*, u2, int, v2) + LSS_INLINE _syscall3(int, getdents, int, f, + struct kernel_dirent*, d, int, c) + LSS_INLINE _syscall3(int, getdents64, int, f, +@@ -4156,7 +4278,7 @@ struct kernel_statfs { + LSS_SC_BODY(4, int, 8, d, type, protocol, sv); + } + #endif +- #if defined(__ARM_EABI__) || defined (__aarch64__) ++ #if defined(__ARM_EABI__) || defined (__aarch64__) || defined (__loongarch64) + LSS_INLINE _syscall3(ssize_t, recvmsg, int, s, struct kernel_msghdr*, msg, + int, flags) + LSS_INLINE _syscall3(ssize_t, sendmsg, int, s, const struct kernel_msghdr*, +@@ -4478,7 +4600,7 @@ struct kernel_statfs { + // TODO: define this in an arch-independant way instead of inlining the clone + // syscall body. + +-# if defined(__aarch64__) ++# if defined(__aarch64__) || defined(__loongarch64) + LSS_INLINE pid_t LSS_NAME(fork)(void) { + // No fork syscall on aarch64 - implement by means of the clone syscall. + // Note that this does not reset glibc's cached view of the PID/TID, so +@@ -4494,8 +4616,8 @@ struct kernel_statfs { + LSS_REG(2, parent_tidptr); + LSS_REG(3, newtls); + LSS_REG(4, child_tidptr); +- LSS_BODY(pid_t, clone, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), +- "r"(__r4)); ++ LSS_BODY(pid_t, clone, "r"(__a0), "r"(__a1), "r"(__a2), "r"(__a3), ++ "r"(__a4)); + } + # elif defined(__x86_64__) + LSS_INLINE pid_t LSS_NAME(fork)(void) { +-- +2.20.1 + diff --git a/0009-port-pdfium-for-loongarch64.patch b/0009-port-pdfium-for-loongarch64.patch new file mode 100644 index 0000000..7b820b2 --- /dev/null +++ b/0009-port-pdfium-for-loongarch64.patch @@ -0,0 +1,45 @@ +From 25c0bc08ea4d22fda5e6796bc6dfab16ac02eace Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Wed, 13 Jan 2021 07:59:57 +0000 +Subject: [PATCH 09/13] port pdfium for loongarch64 + +--- + .../allocator/partition_allocator/page_allocator_constants.h | 4 ++-- + .../allocator/partition_allocator/partition_alloc_constants.h | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/pdfium/third_party/base/allocator/partition_allocator/page_allocator_constants.h +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/third_party/pdfium/third_party/base/allocator/partition_allocator/page_allocator_constants.h ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/pdfium/third_party/base/allocator/partition_allocator/page_allocator_constants.h +@@ -13,7 +13,7 @@ namespace pdfium { + namespace base { + #if defined(OS_WIN) || defined(ARCH_CPU_PPC64) + static constexpr size_t kPageAllocationGranularityShift = 16; // 64KB +-#elif defined(_MIPS_ARCH_LOONGSON) ++#elif defined(_MIPS_ARCH_LOONGSON) || defined(__loongarch__) + static constexpr size_t kPageAllocationGranularityShift = 14; // 16KB + #else + static constexpr size_t kPageAllocationGranularityShift = 12; // 4KB +@@ -25,7 +25,7 @@ static constexpr size_t kPageAllocationG + static constexpr size_t kPageAllocationGranularityBaseMask = + ~kPageAllocationGranularityOffsetMask; + +-#if defined(_MIPS_ARCH_LOONGSON) ++#if defined(_MIPS_ARCH_LOONGSON) || defined(__loongarch__) + static constexpr size_t kSystemPageSize = 16384; + #elif defined(ARCH_CPU_PPC64) + // Modern ppc64 systems support 4KB and 64KB page sizes. +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/pdfium/third_party/base/allocator/partition_allocator/partition_alloc_constants.h +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/third_party/pdfium/third_party/base/allocator/partition_allocator/partition_alloc_constants.h ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/pdfium/third_party/base/allocator/partition_allocator/partition_alloc_constants.h +@@ -35,7 +35,7 @@ static const size_t kBucketShift = (kAll + // other constant values, we pack _all_ `PartitionRootGeneric::Alloc` sizes + // perfectly up against the end of a system page. + +-#if defined(_MIPS_ARCH_LOONGSON) ++#if defined(_MIPS_ARCH_LOONGSON) || defined(__loongarch__) + static const size_t kPartitionPageShift = 16; // 64 KiB + #elif defined(ARCH_CPU_PPC64) + static const size_t kPartitionPageShift = 18; // 256 KiB diff --git a/0010-port-swiftshader-for-loongarch64.patch b/0010-port-swiftshader-for-loongarch64.patch new file mode 100644 index 0000000..2fa1fb7 --- /dev/null +++ b/0010-port-swiftshader-for-loongarch64.patch @@ -0,0 +1,318 @@ +From 8a0854463aec4ea5b3f1b1628c74e3781912d0c9 Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Wed, 13 Jan 2021 08:04:42 +0000 +Subject: [PATCH 10/13] port swiftshader for loongarch64 + +--- + .../swiftshader/src/Reactor/BUILD.gn | 2 +- + .../src/Reactor/SubzeroReactor.cpp | 3 + + .../third_party/marl/src/osfiber_asm_la64.S | 86 ++++++++++++ + .../third_party/marl/src/osfiber_asm_la64.h | 126 ++++++++++++++++++ + .../third_party/marl/src/osfiber_la64.c | 35 +++++ + 5 files changed, 251 insertions(+), 1 deletion(-) + create mode 100644 src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_asm_la64.S + create mode 100644 src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_asm_la64.h + create mode 100644 src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_la64.c + +diff --git a/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn b/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn +index 1154dba42..7d0e33cc2 100644 +--- a/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn ++++ b/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn +@@ -18,7 +18,7 @@ declare_args() { + # Subzero produces smaller binaries, but doesn't support ARM64, MIPS64, and + # PPC64. + use_swiftshader_with_subzero = +- current_cpu != "arm64" && current_cpu != "mips64el" && current_cpu != "ppc64" ++ current_cpu != "arm64" && current_cpu != "mips64el" && current_cpu != "ppc64" && current_cpu != "la64" + supports_llvm = is_linux || is_fuchsia || is_win || is_android || is_mac + } + +diff --git a/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/SubzeroReactor.cpp b/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/SubzeroReactor.cpp +index 522b56687..f34d37b73 100644 +--- a/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/SubzeroReactor.cpp ++++ b/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/SubzeroReactor.cpp +@@ -324,6 +324,8 @@ private: + return false; + #elif defined(__mips__) + return false; ++#elif defined(__loongarch__) ++ return false; + #else + # error "Unknown architecture" + #endif +@@ -665,6 +667,7 @@ std::vector loadImage(uint8_t *const elfImage, const std::vectore_machine == EM_AARCH64); + #elif defined(__mips__) + ASSERT(sizeof(void *) == 4 && elfHeader->e_machine == EM_MIPS); ++#elif defined(__loongarch__) + #else + # error "Unsupported platform" + #endif +diff --git a/src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_asm_la64.S b/src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_asm_la64.S +new file mode 100644 +index 000000000..a41e0be09 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_asm_la64.S +@@ -0,0 +1,86 @@ ++// Copyright 2020 The Marl Authors. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++#if defined(__loongarch__) ++ ++#define MARL_BUILD_ASM 1 ++#include "osfiber_asm_la64.h" ++ ++// void marl_fiber_swap(marl_fiber_context* from, const marl_fiber_context* to) ++// a0: from ++// v0: to ++.text ++.global MARL_ASM_SYMBOL(marl_fiber_swap) ++.align 4 ++MARL_ASM_SYMBOL(marl_fiber_swap): ++ ++ // Save context 'from' ++ ++ // Store callee-preserved registers ++ sd $s0, MARL_REG_s0($a0) ++ sd $s1, MARL_REG_s1($a0) ++ sd $s2, MARL_REG_s2($a0) ++ sd $s3, MARL_REG_s3($a0) ++ sd $s4, MARL_REG_s4($a0) ++ sd $s5, MARL_REG_s5($a0) ++ sd $s6, MARL_REG_s6($a0) ++ sd $s7, MARL_REG_s7($a0) ++ ++ s.d $f24, MARL_REG_f24($a0) ++ s.d $f25, MARL_REG_f25($a0) ++ s.d $f26, MARL_REG_f26($a0) ++ s.d $f27, MARL_REG_f27($a0) ++ s.d $f28, MARL_REG_f28($a0) ++ s.d $f29, MARL_REG_f29($a0) ++ s.d $f31, MARL_REG_f30($a0) ++ s.d $f31, MARL_REG_f31($a0) ++ ++ sd $gp, MARL_REG_gp($a0) ++ sd $sp, MARL_REG_sp($a0) ++ sd $fp, MARL_REG_fp($a0) ++ sd $ra, MARL_REG_ra($a0) ++ ++ move $v0, $a1 // Function have no return, so safe to touch v0 ++ ++ // Recover callee-preserved registers ++ ld $s0, MARL_REG_s0($v0) ++ ld $s1, MARL_REG_s1($v0) ++ ld $s2, MARL_REG_s2($v0) ++ ld $s3, MARL_REG_s3($v0) ++ ld $s4, MARL_REG_s4($v0) ++ ld $s5, MARL_REG_s5($v0) ++ ld $s6, MARL_REG_s6($v0) ++ ld $s7, MARL_REG_s7($v0) ++ ++ l.d $f24, MARL_REG_f24($v0) ++ l.d $f25, MARL_REG_f25($v0) ++ l.d $f26, MARL_REG_f26($v0) ++ l.d $f27, MARL_REG_f27($v0) ++ l.d $f28, MARL_REG_f28($v0) ++ l.d $f29, MARL_REG_f29($v0) ++ l.d $f31, MARL_REG_f30($v0) ++ l.d $f31, MARL_REG_f31($v0) ++ ++ ld $gp, MARL_REG_gp($v0) ++ ld $sp, MARL_REG_sp($v0) ++ ld $fp, MARL_REG_fp($v0) ++ ld $ra, MARL_REG_ra($v0) ++ ++ // Recover arguments ++ ld $a0, MARL_REG_a0($v0) ++ ld $a1, MARL_REG_a1($v0) ++ ++ jr $ra ++ ++#endif // defined(__loongarch__) +diff --git a/src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_asm_la64.h b/src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_asm_la64.h +new file mode 100644 +index 000000000..e444e1c78 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_asm_la64.h +@@ -0,0 +1,126 @@ ++// Copyright 2020 The Marl Authors. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++#define MARL_REG_a0 0x00 ++#define MARL_REG_a1 0x08 ++#define MARL_REG_s0 0x10 ++#define MARL_REG_s1 0x18 ++#define MARL_REG_s2 0x20 ++#define MARL_REG_s3 0x28 ++#define MARL_REG_s4 0x30 ++#define MARL_REG_s5 0x38 ++#define MARL_REG_s6 0x40 ++#define MARL_REG_s7 0x48 ++#define MARL_REG_f24 0x50 ++#define MARL_REG_f25 0x58 ++#define MARL_REG_f26 0x60 ++#define MARL_REG_f27 0x68 ++#define MARL_REG_f28 0x70 ++#define MARL_REG_f29 0x78 ++#define MARL_REG_f30 0x80 ++#define MARL_REG_f31 0x88 ++#define MARL_REG_gp 0x90 ++#define MARL_REG_sp 0x98 ++#define MARL_REG_fp 0xa0 ++#define MARL_REG_ra 0xa8 ++ ++#if defined(__APPLE__) ++#define MARL_ASM_SYMBOL(x) _##x ++#else ++#define MARL_ASM_SYMBOL(x) x ++#endif ++ ++#ifndef MARL_BUILD_ASM ++ ++#include ++ ++struct marl_fiber_context { ++ // parameter registers (First two) ++ uintptr_t a0; ++ uintptr_t a1; ++ ++ // callee-saved registers ++ uintptr_t s0; ++ uintptr_t s1; ++ uintptr_t s2; ++ uintptr_t s3; ++ uintptr_t s4; ++ uintptr_t s5; ++ uintptr_t s6; ++ uintptr_t s7; ++ ++ uintptr_t f24; ++ uintptr_t f25; ++ uintptr_t f26; ++ uintptr_t f27; ++ uintptr_t f28; ++ uintptr_t f29; ++ uintptr_t f30; ++ uintptr_t f31; ++ ++ uintptr_t gp; ++ uintptr_t sp; ++ uintptr_t fp; ++ uintptr_t ra; ++}; ++ ++#ifdef __cplusplus ++#include ++static_assert(offsetof(marl_fiber_context, a0) == MARL_REG_a0, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, a1) == MARL_REG_a1, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, s0) == MARL_REG_s0, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, s1) == MARL_REG_s1, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, s2) == MARL_REG_s2, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, s3) == MARL_REG_s3, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, s4) == MARL_REG_s4, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, s5) == MARL_REG_s5, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, s6) == MARL_REG_s6, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, s7) == MARL_REG_s7, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, f24) == MARL_REG_f24, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, f25) == MARL_REG_f25, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, f26) == MARL_REG_f26, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, f27) == MARL_REG_f27, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, f28) == MARL_REG_f28, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, f29) == MARL_REG_f29, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, f30) == MARL_REG_f30, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, f31) == MARL_REG_f31, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, gp) == MARL_REG_gp, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, sp) == MARL_REG_sp, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, fp) == MARL_REG_fp, ++ "Bad register offset"); ++static_assert(offsetof(marl_fiber_context, ra) == MARL_REG_ra, ++ "Bad register offset"); ++#endif // __cplusplus ++ ++#endif // MARL_BUILD_ASM +diff --git a/src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_la64.c b/src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_la64.c +new file mode 100644 +index 000000000..4838f9f09 +--- /dev/null ++++ b/src/3rdparty/chromium/third_party/swiftshader/third_party/marl/src/osfiber_la64.c +@@ -0,0 +1,35 @@ ++// Copyright 2020 The Marl Authors. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++#if defined(__loongarch__) ++ ++#include "osfiber_asm_la64.h" ++ ++void marl_fiber_trampoline(void (*target)(void*), void* arg) { ++ target(arg); ++} ++ ++void marl_fiber_set_target(struct marl_fiber_context* ctx, ++ void* stack, ++ uint32_t stack_size, ++ void (*target)(void*), ++ void* arg) { ++ uintptr_t* stack_top = (uintptr_t*)((uint8_t*)(stack) + stack_size); ++ ctx->ra = (uintptr_t)&marl_fiber_trampoline; ++ ctx->a0 = (uintptr_t)target; ++ ctx->a1 = (uintptr_t)arg; ++ ctx->sp = ((uintptr_t)stack_top) & ~(uintptr_t)15; ++} ++ ++#endif // defined(__loongarch__) +-- +2.20.1 + diff --git a/0011-port-webrtc-for-loongarch64.patch b/0011-port-webrtc-for-loongarch64.patch new file mode 100644 index 0000000..d02a9fb --- /dev/null +++ b/0011-port-webrtc-for-loongarch64.patch @@ -0,0 +1,274 @@ +From 186c03b0beb851ca6decead032b57da6db6edf6f Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Wed, 13 Jan 2021 08:07:49 +0000 +Subject: [PATCH 11/13] port webrtc for loongarch64 + +--- + .../modules/desktop_capture/differ_block.cc | 5 +- + .../codecs/vp8/libvpx_interface.cc | 52 ++++++++++++++++++- + .../codecs/vp8/libvpx_vp8_decoder.cc | 10 +++- + .../codecs/vp8/libvpx_vp8_encoder.cc | 9 ++++ + .../third_party/webrtc/rtc_base/system/arch.h | 2 + + 5 files changed, 74 insertions(+), 4 deletions(-) + +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/modules/desktop_capture/differ_block.cc +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/third_party/webrtc/modules/desktop_capture/differ_block.cc ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/modules/desktop_capture/differ_block.cc +@@ -29,7 +29,7 @@ bool VectorDifference_C(const uint8_t* i + bool VectorDifference(const uint8_t* image1, const uint8_t* image2) { + static bool (*diff_proc)(const uint8_t*, const uint8_t*) = nullptr; + +- if (!diff_proc) { ++/* if (!diff_proc) { + #if defined(WEBRTC_ARCH_ARM_FAMILY) || defined(WEBRTC_ARCH_MIPS_FAMILY) + // For ARM and MIPS processors, always use C version. + // TODO(hclam): Implement a NEON version. +@@ -47,7 +47,8 @@ bool VectorDifference(const uint8_t* ima + #endif + } + +- return diff_proc(image1, image2); ++ return diff_proc(image1, image2);*/ ++ return false; + } + + bool BlockDifference(const uint8_t* image1, +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/libvpx_interface.cc +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/libvpx_interface.cc ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/libvpx_interface.cc +@@ -26,6 +26,9 @@ class LibvpxVp8Facade : public LibvpxInt + unsigned int d_w, + unsigned int d_h, + unsigned int align) const override { ++#if defined(__loongarch__) ++ return NULL; ++#endif + return ::vpx_img_alloc(img, fmt, d_w, d_h, align); + } + +@@ -35,20 +38,34 @@ class LibvpxVp8Facade : public LibvpxInt + unsigned int d_h, + unsigned int stride_align, + unsigned char* img_data) const override { ++#if defined(__loongarch__) ++ return NULL; ++#endif + return ::vpx_img_wrap(img, fmt, d_w, d_h, stride_align, img_data); + } + +- void img_free(vpx_image_t* img) const override { ::vpx_img_free(img); } ++ void img_free(vpx_image_t* img) const override { ++#if defined(__loongarch__) ++ return ; ++#endif ++ ::vpx_img_free(img); ++ } + + vpx_codec_err_t codec_enc_config_set( + vpx_codec_ctx_t* ctx, + const vpx_codec_enc_cfg_t* cfg) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + return ::vpx_codec_enc_config_set(ctx, cfg); + } + + vpx_codec_err_t codec_enc_config_default(vpx_codec_iface_t* iface, + vpx_codec_enc_cfg_t* cfg, + unsigned int usage) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + return ::vpx_codec_enc_config_default(iface, cfg, usage); + } + +@@ -56,6 +73,9 @@ class LibvpxVp8Facade : public LibvpxInt + vpx_codec_iface_t* iface, + const vpx_codec_enc_cfg_t* cfg, + vpx_codec_flags_t flags) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + return ::vpx_codec_enc_init(ctx, iface, cfg, flags); + } + +@@ -65,10 +85,16 @@ class LibvpxVp8Facade : public LibvpxInt + int num_enc, + vpx_codec_flags_t flags, + vpx_rational_t* dsf) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + return ::vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf); + } + + vpx_codec_err_t codec_destroy(vpx_codec_ctx_t* ctx) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + return ::vpx_codec_destroy(ctx); + } + +@@ -78,6 +104,9 @@ class LibvpxVp8Facade : public LibvpxInt + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + uint32_t param) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + // We need an explicit call for each type since vpx_codec_control is a + // macro that gets expanded into another call based on the parameter name. + switch (ctrl_id) { +@@ -113,6 +142,9 @@ class LibvpxVp8Facade : public LibvpxInt + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + int param) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + switch (ctrl_id) { + case VP8E_SET_FRAME_FLAGS: + return vpx_codec_control(ctx, VP8E_SET_FRAME_FLAGS, param); +@@ -134,6 +166,9 @@ class LibvpxVp8Facade : public LibvpxInt + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + int* param) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + switch (ctrl_id) { + case VP8E_GET_LAST_QUANTIZER: + return vpx_codec_control(ctx, VP8E_GET_LAST_QUANTIZER, param); +@@ -148,6 +183,9 @@ class LibvpxVp8Facade : public LibvpxInt + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_roi_map* param) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + switch (ctrl_id) { + case VP8E_SET_ROI_MAP: + return vpx_codec_control(ctx, VP8E_SET_ROI_MAP, param); +@@ -160,6 +198,9 @@ class LibvpxVp8Facade : public LibvpxInt + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_active_map* param) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + switch (ctrl_id) { + case VP8E_SET_ACTIVEMAP: + return vpx_codec_control(ctx, VP8E_SET_ACTIVEMAP, param); +@@ -172,6 +213,9 @@ class LibvpxVp8Facade : public LibvpxInt + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_scaling_mode* param) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + switch (ctrl_id) { + case VP8E_SET_SCALEMODE: + return vpx_codec_control(ctx, VP8E_SET_SCALEMODE, param); +@@ -187,12 +231,18 @@ class LibvpxVp8Facade : public LibvpxInt + uint64_t duration, + vpx_enc_frame_flags_t flags, + uint64_t deadline) const override { ++#if defined(__loongarch__) ++ return VPX_CODEC_ERROR; ++#endif + return ::vpx_codec_encode(ctx, img, pts, duration, flags, deadline); + } + + const vpx_codec_cx_pkt_t* codec_get_cx_data( + vpx_codec_ctx_t* ctx, + vpx_codec_iter_t* iter) const override { ++#if defined(__loongarch__) ++ return NULL; ++#endif + return ::vpx_codec_get_cx_data(ctx, iter); + } + }; +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc +@@ -118,6 +118,9 @@ LibvpxVp8Decoder::~LibvpxVp8Decoder() { + } + + int LibvpxVp8Decoder::InitDecode(const VideoCodec* inst, int number_of_cores) { ++#if defined(__loongarch__) ++ return WEBRTC_VIDEO_CODEC_OK; ++#endif + int ret_val = Release(); + if (ret_val < 0) { + return ret_val; +@@ -160,6 +163,9 @@ int LibvpxVp8Decoder::InitDecode(const V + int LibvpxVp8Decoder::Decode(const EncodedImage& input_image, + bool missing_frames, + int64_t /*render_time_ms*/) { ++#if defined(__loongarch__) ++ return WEBRTC_VIDEO_CODEC_UNINITIALIZED; ++#endif + if (!inited_) { + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } +@@ -342,7 +348,9 @@ int LibvpxVp8Decoder::RegisterDecodeComp + + int LibvpxVp8Decoder::Release() { + int ret_val = WEBRTC_VIDEO_CODEC_OK; +- ++#if defined(__loongarch__) ++ return ret_val; ++#endif + if (decoder_ != NULL) { + if (inited_) { + if (vpx_codec_destroy(decoder_)) { +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc +@@ -452,6 +452,9 @@ void LibvpxVp8Encoder::SetFecControllerO + // TODO(eladalon): s/inst/codec_settings/g. + int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst, + const VideoEncoder::Settings& settings) { ++#if defined(__loongarch__) ++ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; ++#endif + if (inst == NULL) { + return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; + } +@@ -793,6 +796,9 @@ int LibvpxVp8Encoder::NumberOfThreads(in + } + + int LibvpxVp8Encoder::InitAndSetControlSettings() { ++#if defined(__loongarch__) ++ return WEBRTC_VIDEO_CODEC_UNINITIALIZED; ++#endif + vpx_codec_flags_t flags = 0; + flags |= VPX_CODEC_USE_OUTPUT_PARTITION; + +@@ -943,6 +949,9 @@ int LibvpxVp8Encoder::Encode(const Video + const std::vector* frame_types) { + RTC_DCHECK_EQ(frame.width(), codec_.width); + RTC_DCHECK_EQ(frame.height(), codec_.height); ++#if defined(__loongarch__) ++ return WEBRTC_VIDEO_CODEC_UNINITIALIZED; ++#endif + + if (!inited_) + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/rtc_base/system/arch.h +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/third_party/webrtc/rtc_base/system/arch.h ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/webrtc/rtc_base/system/arch.h +@@ -38,6 +38,8 @@ + #define WEBRTC_ARCH_LITTLE_ENDIAN + #elif defined(__MIPSEL__) + #define WEBRTC_ARCH_MIPS_FAMILY ++#elif defined(__loongarch__) ++#define WEBRTC_ARCH_LOONGARCH_FAMILY + #if defined(__LP64__) + #define WEBRTC_ARCH_64_BITS + #else diff --git a/0012-port-v8-for-loongarch64.patch b/0012-port-v8-for-loongarch64.patch new file mode 100644 index 0000000..ee0c1d0 --- /dev/null +++ b/0012-port-v8-for-loongarch64.patch @@ -0,0 +1,45399 @@ +From a18ead4450b5043e69f2eae92e9f100a29249524 Mon Sep 17 00:00:00 2001 +From: Zhao Jiazhong +Date: Wed, 27 Jan 2021 10:50:38 +0800 +Subject: [PATCH] [LA64] Support LoongArch64 in v8.3 + +--- + BUILD.gn | 42 + + gni/snapshot_toolchain.gni | 3 +- + src/base/build_config.h | 13 +- + src/base/platform/platform-posix.cc | 6 + + src/builtins/builtins.cc | 2 +- + src/builtins/la64/builtins-la64.cc | 3173 +++++++++ + src/codegen/assembler-arch.h | 2 + + src/codegen/assembler-inl.h | 2 + + src/codegen/constants-arch.h | 2 + + src/codegen/cpu-features.h | 3 + + src/codegen/external-reference.cc | 2 + + src/codegen/interface-descriptors.cc | 6 +- + src/codegen/la64/assembler-la64-inl.h | 268 + + src/codegen/la64/assembler-la64.cc | 2856 ++++++++ + src/codegen/la64/assembler-la64.h | 1171 ++++ + src/codegen/la64/constants-la64.cc | 100 + + src/codegen/la64/constants-la64.h | 1479 +++++ + src/codegen/la64/cpu-la64.cc | 38 + + .../la64/interface-descriptors-la64.cc | 332 + + src/codegen/la64/macro-assembler-la64.cc | 3992 ++++++++++++ + src/codegen/la64/macro-assembler-la64.h | 1084 +++ + src/codegen/la64/register-la64.h | 328 + + src/codegen/macro-assembler.h | 3 + + src/codegen/mips64/assembler-mips64.cc | 2 +- + src/codegen/mips64/assembler-mips64.h | 1 + + src/codegen/register-arch.h | 2 + + src/codegen/register-configuration.cc | 2 + + src/codegen/reloc-info.cc | 3 +- + src/common/globals.h | 3 + + src/compiler/backend/instruction-codes.h | 2 + + src/compiler/backend/instruction-selector.cc | 5 +- + .../backend/la64/code-generator-la64.cc | 2847 ++++++++ + .../backend/la64/instruction-codes-la64.h | 412 ++ + .../la64/instruction-scheduler-la64.cc | 1534 +++++ + .../backend/la64/instruction-selector-la64.cc | 3096 +++++++++ + src/compiler/c-linkage.cc | 19 +- + src/debug/debug-evaluate.cc | 2 +- + src/debug/la64/debug-la64.cc | 56 + + src/deoptimizer/la64/deoptimizer-la64.cc | 241 + + src/diagnostics/gdb-jit.cc | 2 + + src/diagnostics/la64/disasm-la64.cc | 1841 ++++++ + src/diagnostics/perf-jit.h | 3 + + src/execution/frame-constants.h | 2 + + src/execution/la64/frame-constants-la64.cc | 32 + + src/execution/la64/frame-constants-la64.h | 75 + + src/execution/la64/simulator-la64.cc | 5804 +++++++++++++++++ + src/execution/la64/simulator-la64.h | 646 ++ + src/execution/mips64/simulator-mips64.cc | 288 + + src/execution/simulator-base.h | 2 +- + src/execution/simulator.h | 2 + + src/flags/flag-definitions.h | 2 +- + src/heap/base/asm/la64/push_registers_asm.cc | 48 + + src/interpreter/interpreter-assembler.cc | 2 +- + src/libsampler/sampler.cc | 4 + + src/logging/log.cc | 2 + + src/objects/backing-store.cc | 2 +- + src/objects/code.h | 2 + + src/profiler/tick-sample.cc | 2 +- + .../la64/regexp-macro-assembler-la64.cc | 1286 ++++ + src/regexp/la64/regexp-macro-assembler-la64.h | 216 + + src/regexp/regexp-macro-assembler-arch.h | 2 + + src/regexp/regexp-macro-assembler-tracer.cc | 4 +- + src/regexp/regexp-macro-assembler.h | 1 + + src/regexp/regexp.cc | 3 + + src/runtime/runtime-atomics.cc | 3 +- + src/snapshot/deserializer.h | 5 +- + .../baseline/la64/liftoff-assembler-la64.h | 1503 +++++ + src/wasm/baseline/liftoff-assembler-defs.h | 10 +- + src/wasm/baseline/liftoff-assembler.h | 2 + + src/wasm/jump-table-assembler.cc | 31 + + src/wasm/jump-table-assembler.h | 6 + + src/wasm/wasm-linkage.h | 9 + + test/cctest/BUILD.gn | 9 +- + test/cctest/test-assembler-la64.cc | 5127 +++++++++++++++ + test/cctest/test-disasm-la64.cc | 966 +++ + test/cctest/test-icache.cc | 4 + + test/cctest/test-macro-assembler-la64.cc | 2894 ++++++++ + test/cctest/test-platform.cc | 2 + + test/cctest/test-regexp.cc | 2 + + test/cctest/wasm/test-jump-table-assembler.cc | 5 + + test/mjsunit/mjsunit.status | 12 + + tools/dev/gm.py | 4 +- + 82 files changed, 43974 insertions(+), 27 deletions(-) + create mode 100644 src/builtins/la64/builtins-la64.cc + create mode 100644 src/codegen/la64/assembler-la64-inl.h + create mode 100644 src/codegen/la64/assembler-la64.cc + create mode 100644 src/codegen/la64/assembler-la64.h + create mode 100644 src/codegen/la64/constants-la64.cc + create mode 100644 src/codegen/la64/constants-la64.h + create mode 100644 src/codegen/la64/cpu-la64.cc + create mode 100644 src/codegen/la64/interface-descriptors-la64.cc + create mode 100644 src/codegen/la64/macro-assembler-la64.cc + create mode 100644 src/codegen/la64/macro-assembler-la64.h + create mode 100644 src/codegen/la64/register-la64.h + create mode 100644 src/compiler/backend/la64/code-generator-la64.cc + create mode 100644 src/compiler/backend/la64/instruction-codes-la64.h + create mode 100644 src/compiler/backend/la64/instruction-scheduler-la64.cc + create mode 100644 src/compiler/backend/la64/instruction-selector-la64.cc + create mode 100644 src/debug/la64/debug-la64.cc + create mode 100644 src/deoptimizer/la64/deoptimizer-la64.cc + create mode 100644 src/diagnostics/la64/disasm-la64.cc + create mode 100644 src/execution/la64/frame-constants-la64.cc + create mode 100644 src/execution/la64/frame-constants-la64.h + create mode 100644 src/execution/la64/simulator-la64.cc + create mode 100644 src/execution/la64/simulator-la64.h + create mode 100644 src/heap/base/asm/la64/push_registers_asm.cc + create mode 100644 src/regexp/la64/regexp-macro-assembler-la64.cc + create mode 100644 src/regexp/la64/regexp-macro-assembler-la64.h + create mode 100644 src/wasm/baseline/la64/liftoff-assembler-la64.h + create mode 100644 test/cctest/test-assembler-la64.cc + create mode 100644 test/cctest/test-disasm-la64.cc + create mode 100644 test/cctest/test-macro-assembler-la64.cc + +diff --git a/src/3rdparty/chromium/v8/BUILD.gn b/src/3rdparty/chromium/v8/BUILD.gn +index b2dde3f9d7..ffe2fe29c5 100644 +--- a/src/3rdparty/chromium/v8/BUILD.gn ++++ b/src/3rdparty/chromium/v8/BUILD.gn +@@ -665,6 +665,16 @@ config("toolchain") { + cflags += [ "-march=z196" ] + } + } ++ ++ # la64 simulators. ++ if (target_is_simulator && v8_current_cpu == "la64") { ++ defines += [ "_LA64_TARGET_SIMULATOR" ] ++ } ++ ++ if (v8_current_cpu == "la64") { ++ defines += [ "V8_TARGET_ARCH_LA64" ] ++ } ++ + if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { + if (v8_current_cpu == "ppc") { + defines += [ "V8_TARGET_ARCH_PPC" ] +@@ -1674,6 +1684,11 @@ v8_source_set("v8_initializers") { + ### gcmole(arch:mips64el) ### + "src/builtins/mips64/builtins-mips64.cc", + ] ++ } else if (v8_current_cpu == "la64") { ++ sources += [ ++ ### gcmole(arch:la64) ### ++ "src/builtins/la64/builtins-la64.cc", ++ ] + } else if (v8_current_cpu == "ppc") { + sources += [ + ### gcmole(arch:ppc) ### +@@ -3319,6 +3334,33 @@ v8_source_set("v8_base_without_compiler") { + "src/regexp/mips64/regexp-macro-assembler-mips64.h", + "src/wasm/baseline/mips64/liftoff-assembler-mips64.h", + ] ++ } else if (v8_current_cpu == "la64") { ++ sources += [ ### gcmole(arch:la64) ### ++ "src/codegen/la64/assembler-la64-inl.h", ++ "src/codegen/la64/assembler-la64.cc", ++ "src/codegen/la64/assembler-la64.h", ++ "src/codegen/la64/constants-la64.cc", ++ "src/codegen/la64/constants-la64.h", ++ "src/codegen/la64/cpu-la64.cc", ++ "src/codegen/la64/interface-descriptors-la64.cc", ++ "src/codegen/la64/macro-assembler-la64.cc", ++ "src/codegen/la64/macro-assembler-la64.h", ++ "src/codegen/la64/register-la64.h", ++ "src/compiler/backend/la64/code-generator-la64.cc", ++ "src/compiler/backend/la64/instruction-codes-la64.h", ++ "src/compiler/backend/la64/instruction-scheduler-la64.cc", ++ "src/compiler/backend/la64/instruction-selector-la64.cc", ++ "src/debug/la64/debug-la64.cc", ++ "src/deoptimizer/la64/deoptimizer-la64.cc", ++ "src/diagnostics/la64/disasm-la64.cc", ++ "src/execution/la64/frame-constants-la64.cc", ++ "src/execution/la64/frame-constants-la64.h", ++ "src/execution/la64/simulator-la64.cc", ++ "src/execution/la64/simulator-la64.h", ++ "src/regexp/la64/regexp-macro-assembler-la64.cc", ++ "src/regexp/la64/regexp-macro-assembler-la64.h", ++ "src/wasm/baseline/la64/liftoff-assembler-la64.h", ++ ] + } else if (v8_current_cpu == "ppc") { + sources += [ ### gcmole(arch:ppc) ### + "src/codegen/ppc/assembler-ppc-inl.h", +diff --git a/src/3rdparty/chromium/v8/gni/snapshot_toolchain.gni b/src/3rdparty/chromium/v8/gni/snapshot_toolchain.gni +index b5fb1823b3..8ada11d790 100644 +--- a/src/3rdparty/chromium/v8/gni/snapshot_toolchain.gni ++++ b/src/3rdparty/chromium/v8/gni/snapshot_toolchain.gni +@@ -79,7 +79,8 @@ if (v8_snapshot_toolchain == "") { + + if (v8_current_cpu == "x64" || v8_current_cpu == "x86") { + _cpus = v8_current_cpu +- } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") { ++ } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" || ++ v8_current_cpu == "la64") { + if (is_win && v8_current_cpu == "arm64") { + # set _cpus to blank for Windows ARM64 so host_toolchain could be + # selected as snapshot toolchain later. +diff --git a/src/3rdparty/chromium/v8/src/base/build_config.h b/src/3rdparty/chromium/v8/src/base/build_config.h +index 8d142c456c..a9a8a07b12 100644 +--- a/src/3rdparty/chromium/v8/src/base/build_config.h ++++ b/src/3rdparty/chromium/v8/src/base/build_config.h +@@ -33,6 +33,9 @@ + #elif defined(__MIPSEB__) || defined(__MIPSEL__) + #define V8_HOST_ARCH_MIPS 1 + #define V8_HOST_ARCH_32_BIT 1 ++#elif defined(__loongarch64) ++#define V8_HOST_ARCH_LA64 1 ++#define V8_HOST_ARCH_64_BIT 1 + #elif defined(__PPC64__) || defined(_ARCH_PPC64) + #define V8_HOST_ARCH_PPC64 1 + #define V8_HOST_ARCH_64_BIT 1 +@@ -77,7 +80,8 @@ + // environment as presented by the compiler. + #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ + !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \ +- !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 ++ !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ ++ !V8_TARGET_ARCH_LA64 + #if defined(_M_X64) || defined(__x86_64__) + #define V8_TARGET_ARCH_X64 1 + #elif defined(_M_IX86) || defined(__i386__) +@@ -118,6 +122,8 @@ + #define V8_TARGET_ARCH_32_BIT 1 + #elif V8_TARGET_ARCH_MIPS64 + #define V8_TARGET_ARCH_64_BIT 1 ++#elif V8_TARGET_ARCH_LA64 ++#define V8_TARGET_ARCH_64_BIT 1 + #elif V8_TARGET_ARCH_PPC + #define V8_TARGET_ARCH_32_BIT 1 + #elif V8_TARGET_ARCH_PPC64 +@@ -156,6 +162,9 @@ + #if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64)) + #error Target architecture mips64 is only supported on mips64 and x64 host + #endif ++#if (V8_TARGET_ARCH_LA64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LA64)) ++#error Target architecture la64 is only supported on la64 and x64 host ++#endif + + // Determine architecture endianness. + #if V8_TARGET_ARCH_IA32 +@@ -166,6 +175,8 @@ + #define V8_TARGET_LITTLE_ENDIAN 1 + #elif V8_TARGET_ARCH_ARM64 + #define V8_TARGET_LITTLE_ENDIAN 1 ++#elif V8_TARGET_ARCH_LA64 ++#define V8_TARGET_LITTLE_ENDIAN 1 + #elif V8_TARGET_ARCH_MIPS + #if defined(__MIPSEB__) + #define V8_TARGET_BIG_ENDIAN 1 +diff --git a/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc b/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc +index 1e600c7891..f0e95f6443 100644 +--- a/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc ++++ b/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc +@@ -297,6 +297,10 @@ void* OS::GetRandomMmapAddr() { + // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance + // to fulfill request. + raw_addr &= uint64_t{0xFFFFFF0000}; ++#elif V8_TARGET_ARCH_LA64 ++ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance ++ // to fulfill request. ++ raw_addr &= uint64_t{0xFFFFFF0000}; + #else + raw_addr &= 0x3FFFF000; + +@@ -470,6 +474,8 @@ void OS::DebugBreak() { + asm("break"); + #elif V8_HOST_ARCH_MIPS64 + asm("break"); ++#elif V8_HOST_ARCH_LA64 ++ asm("break 0"); + #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 + asm("twge 2,2"); + #elif V8_HOST_ARCH_IA32 +diff --git a/src/3rdparty/chromium/v8/src/builtins/builtins.cc b/src/3rdparty/chromium/v8/src/builtins/builtins.cc +index 34f7ddc18a..72f28d08e9 100644 +--- a/src/3rdparty/chromium/v8/src/builtins/builtins.cc ++++ b/src/3rdparty/chromium/v8/src/builtins/builtins.cc +@@ -466,7 +466,7 @@ bool Builtins::CodeObjectIsExecutable(int builtin_index) { + case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit: + return true; + default: +-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ++#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LA64 + // TODO(Loongson): Move non-JS linkage builtins code objects into RO_SPACE + // caused MIPS platform to crash, and we need some time to handle it. Now + // disable this change temporarily on MIPS platform. +diff --git a/src/3rdparty/chromium/v8/src/builtins/la64/builtins-la64.cc b/src/3rdparty/chromium/v8/src/builtins/la64/builtins-la64.cc +new file mode 100644 +index 0000000000..cdfb9abed1 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/builtins/la64/builtins-la64.cc +@@ -0,0 +1,3173 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/api/api-arguments.h" ++#include "src/codegen/code-factory.h" ++#include "src/debug/debug.h" ++#include "src/deoptimizer/deoptimizer.h" ++#include "src/execution/frame-constants.h" ++#include "src/execution/frames.h" ++#include "src/logging/counters.h" ++// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. ++#include "src/codegen/la64/constants-la64.h" ++#include "src/codegen/macro-assembler-inl.h" ++#include "src/codegen/register-configuration.h" ++#include "src/heap/heap-inl.h" ++#include "src/objects/cell.h" ++#include "src/objects/foreign.h" ++#include "src/objects/heap-number.h" ++#include "src/objects/js-generator.h" ++#include "src/objects/objects-inl.h" ++#include "src/objects/smi.h" ++#include "src/runtime/runtime.h" ++#include "src/wasm/wasm-linkage.h" ++#include "src/wasm/wasm-objects.h" ++ ++namespace v8 { ++namespace internal { ++ ++#define __ ACCESS_MASM(masm) ++ ++void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) { ++ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), ++ RelocInfo::CODE_TARGET); ++} ++ ++static void GenerateTailCallToReturnedCode(MacroAssembler* masm, ++ Runtime::FunctionId function_id) { ++ // ----------- S t a t e ------------- ++ // -- a1 : target function (preserved for callee) ++ // -- a3 : new target (preserved for callee) ++ // ----------------------------------- ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ // Push a copy of the function onto the stack. ++ // Push a copy of the target function and the new target. ++ __ Push(a1, a3, a1); ++ ++ __ CallRuntime(function_id, 1); ++ // Restore target function and new target. ++ __ Pop(a1, a3); ++ } ++ ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Add_d(a2, a0, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Jump(a2); ++} ++ ++namespace { ++ ++enum StackLimitKind { kInterruptStackLimit, kRealStackLimit }; ++ ++void LoadStackLimit(MacroAssembler* masm, Register destination, ++ StackLimitKind kind) { ++ DCHECK(masm->root_array_available()); ++ Isolate* isolate = masm->isolate(); ++ ExternalReference limit = ++ kind == StackLimitKind::kRealStackLimit ++ ? ExternalReference::address_of_real_jslimit(isolate) ++ : ExternalReference::address_of_jslimit(isolate); ++ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); ++ ++ intptr_t offset = ++ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); ++ CHECK(is_int32(offset)); ++ __ Ld_d(destination, MemOperand(kRootRegister, static_cast(offset))); ++} ++ ++void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : number of arguments ++ // -- a1 : constructor function ++ // -- a3 : new target ++ // -- cp : context ++ // -- ra : return address ++ // -- sp[...]: constructor arguments ++ // ----------------------------------- ++ ++ // Enter a construct frame. ++ { ++ FrameScope scope(masm, StackFrame::CONSTRUCT); ++ ++ // Preserve the incoming parameters on the stack. ++ __ SmiTag(a0); ++ __ Push(cp, a0); ++ __ SmiUntag(a0); ++ ++ // The receiver for the builtin/api call. ++ __ PushRoot(RootIndex::kTheHoleValue); ++ ++ // Set up pointer to last argument. ++ __ Add_d(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); ++ ++ // Copy arguments and receiver to the expression stack. ++ Label loop, entry; ++ __ mov(t3, a0); ++ // ----------- S t a t e ------------- ++ // -- a0: number of arguments (untagged) ++ // -- a3: new target ++ // -- t2: pointer to last argument ++ // -- t3: counter ++ // -- sp[0*kPointerSize]: the hole (receiver) ++ // -- sp[1*kPointerSize]: number of arguments (tagged) ++ // -- sp[2*kPointerSize]: context ++ // ----------------------------------- ++ __ jmp(&entry); ++ __ bind(&loop); ++ __ Alsl_d(t0, t3, t2, kPointerSizeLog2, t7); ++ __ Ld_d(t1, MemOperand(t0, 0)); ++ __ push(t1); ++ __ bind(&entry); ++ __ Add_d(t3, t3, Operand(-1)); ++ __ Branch(&loop, greater_equal, t3, Operand(zero_reg)); ++ ++ // Call the function. ++ // a0: number of arguments (untagged) ++ // a1: constructor function ++ // a3: new target ++ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); ++ ++ // Restore context from the frame. ++ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); ++ // Restore smi-tagged arguments count from the frame. ++ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); ++ // Leave construct frame. ++ } ++ ++ // Remove caller arguments from the stack and return. ++ __ SmiScale(a4, a1, kPointerSizeLog2); ++ __ Add_d(sp, sp, a4); ++ __ Add_d(sp, sp, kPointerSize); ++ __ Ret(); ++} ++ ++static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, ++ Register scratch1, Register scratch2, ++ Label* stack_overflow) { ++ // Check the stack for overflow. We are not trying to catch ++ // interruptions (e.g. debug break and preemption) here, so the "real stack ++ // limit" is checked. ++ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit); ++ // Make scratch1 the space we have left. The stack might already be overflowed ++ // here which will cause scratch1 to become negative. ++ __ sub_d(scratch1, sp, scratch1); ++ // Check if the arguments will overflow the stack. ++ __ slli_d(scratch2, num_args, kPointerSizeLog2); ++ // Signed comparison. ++ __ Branch(stack_overflow, le, scratch1, Operand(scratch2)); ++} ++ ++} // namespace ++ ++// The construct stub for ES5 constructor functions and ES6 class constructors. ++void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0: number of arguments (untagged) ++ // -- a1: constructor function ++ // -- a3: new target ++ // -- cp: context ++ // -- ra: return address ++ // -- sp[...]: constructor arguments ++ // ----------------------------------- ++ ++ // Enter a construct frame. ++ { ++ FrameScope scope(masm, StackFrame::CONSTRUCT); ++ Label post_instantiation_deopt_entry, not_create_implicit_receiver; ++ ++ // Preserve the incoming parameters on the stack. ++ __ SmiTag(a0); ++ __ Push(cp, a0, a1); ++ __ PushRoot(RootIndex::kTheHoleValue); ++ __ Push(a3); ++ ++ // ----------- S t a t e ------------- ++ // -- sp[0*kPointerSize]: new target ++ // -- sp[1*kPointerSize]: padding ++ // -- a1 and sp[2*kPointerSize]: constructor function ++ // -- sp[3*kPointerSize]: number of arguments (tagged) ++ // -- sp[4*kPointerSize]: context ++ // ----------------------------------- ++ ++ __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset)); ++ __ DecodeField(t2); ++ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor, ++ ¬_create_implicit_receiver); ++ ++ // If not derived class constructor: Allocate the new receiver object. ++ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, ++ t2, t3); ++ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), ++ RelocInfo::CODE_TARGET); ++ __ Branch(&post_instantiation_deopt_entry); ++ ++ // Else: use TheHoleValue as receiver for constructor call ++ __ bind(¬_create_implicit_receiver); ++ __ LoadRoot(a0, RootIndex::kTheHoleValue); ++ ++ // ----------- S t a t e ------------- ++ // -- a0: receiver ++ // -- Slot 4 / sp[0*kPointerSize]: new target ++ // -- Slot 3 / sp[1*kPointerSize]: padding ++ // -- Slot 2 / sp[2*kPointerSize]: constructor function ++ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) ++ // -- Slot 0 / sp[4*kPointerSize]: context ++ // ----------------------------------- ++ // Deoptimizer enters here. ++ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( ++ masm->pc_offset()); ++ __ bind(&post_instantiation_deopt_entry); ++ ++ // Restore new target. ++ __ Pop(a3); ++ // Push the allocated receiver to the stack. We need two copies ++ // because we may have to return the original one and the calling ++ // conventions dictate that the called function pops the receiver. ++ __ Push(a0, a0); ++ ++ // ----------- S t a t e ------------- ++ // -- r3: new target ++ // -- sp[0*kPointerSize]: implicit receiver ++ // -- sp[1*kPointerSize]: implicit receiver ++ // -- sp[2*kPointerSize]: padding ++ // -- sp[3*kPointerSize]: constructor function ++ // -- sp[4*kPointerSize]: number of arguments (tagged) ++ // -- sp[5*kPointerSize]: context ++ // ----------------------------------- ++ ++ // Restore constructor function and argument count. ++ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); ++ __ Ld_d(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); ++ __ SmiUntag(a0); ++ ++ // Set up pointer to last argument. ++ __ Add_d(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); ++ ++ Label enough_stack_space, stack_overflow; ++ Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow); ++ __ Branch(&enough_stack_space); ++ ++ __ bind(&stack_overflow); ++ // Restore the context from the frame. ++ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ break_(0xCC); ++ ++ __ bind(&enough_stack_space); ++ ++ // Copy arguments and receiver to the expression stack. ++ Label loop, entry; ++ __ mov(t3, a0); ++ // ----------- S t a t e ------------- ++ // -- a0: number of arguments (untagged) ++ // -- a3: new target ++ // -- t2: pointer to last argument ++ // -- t3: counter ++ // -- sp[0*kPointerSize]: implicit receiver ++ // -- sp[1*kPointerSize]: implicit receiver ++ // -- sp[2*kPointerSize]: padding ++ // -- a1 and sp[3*kPointerSize]: constructor function ++ // -- sp[4*kPointerSize]: number of arguments (tagged) ++ // -- sp[5*kPointerSize]: context ++ // ----------------------------------- ++ __ jmp(&entry); ++ __ bind(&loop); ++ __ Alsl_d(t0, t3, t2, kPointerSizeLog2, t7); ++ __ Ld_d(t1, MemOperand(t0, 0)); ++ __ push(t1); ++ __ bind(&entry); ++ __ Add_d(t3, t3, Operand(-1)); ++ __ Branch(&loop, greater_equal, t3, Operand(zero_reg)); ++ ++ // Call the function. ++ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); ++ ++ // ----------- S t a t e ------------- ++ // -- t5: constructor result ++ // -- sp[0*kPointerSize]: implicit receiver ++ // -- sp[1*kPointerSize]: padding ++ // -- sp[2*kPointerSize]: constructor function ++ // -- sp[3*kPointerSize]: number of arguments ++ // -- sp[4*kPointerSize]: context ++ // ----------------------------------- ++ ++ // Store offset of return address for deoptimizer. ++ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset( ++ masm->pc_offset()); ++ ++ // Restore the context from the frame. ++ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); ++ ++ // If the result is an object (in the ECMA sense), we should get rid ++ // of the receiver and use the result; see ECMA-262 section 13.2.2-7 ++ // on page 74. ++ Label use_receiver, do_throw, leave_frame; ++ ++ // If the result is undefined, we jump out to using the implicit receiver. ++ __ JumpIfRoot(a0, RootIndex::kUndefinedValue, &use_receiver); ++ ++ // Otherwise we do a smi check and fall through to check if the return value ++ // is a valid receiver. ++ ++ // If the result is a smi, it is *not* an object in the ECMA sense. ++ __ JumpIfSmi(a0, &use_receiver); ++ ++ // If the type of the result (stored in its map) is less than ++ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense. ++ __ GetObjectType(a0, t2, t2); ++ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); ++ __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE)); ++ __ Branch(&use_receiver); ++ ++ __ bind(&do_throw); ++ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); ++ ++ // Throw away the result of the constructor invocation and use the ++ // on-stack receiver as the result. ++ __ bind(&use_receiver); ++ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize)); ++ __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw); ++ ++ __ bind(&leave_frame); ++ // Restore smi-tagged arguments count from the frame. ++ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); ++ // Leave construct frame. ++ } ++ // Remove caller arguments from the stack and return. ++ __ SmiScale(a4, a1, kPointerSizeLog2); ++ __ Add_d(sp, sp, a4); ++ __ Add_d(sp, sp, kPointerSize); ++ __ Ret(); ++} ++ ++void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { ++ Generate_JSBuiltinsConstructStubHelper(masm); ++} ++ ++static void GetSharedFunctionInfoBytecode(MacroAssembler* masm, ++ Register sfi_data, ++ Register scratch1) { ++ Label done; ++ ++ __ GetObjectType(sfi_data, scratch1, scratch1); ++ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); ++ __ Ld_d(sfi_data, ++ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); ++ ++ __ bind(&done); ++} ++ ++// static ++void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the value to pass to the generator ++ // -- a1 : the JSGeneratorObject to resume ++ // -- ra : return address ++ // ----------------------------------- ++ __ AssertGeneratorObject(a1); ++ ++ // Store input value into generator object. ++ __ St_d(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); ++ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0, a3, ++ kRAHasNotBeenSaved, kDontSaveFPRegs); ++ ++ // Load suspended function and context. ++ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); ++ __ Ld_d(cp, FieldMemOperand(a4, JSFunction::kContextOffset)); ++ ++ // Flood function if we are stepping. ++ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; ++ Label stepping_prepared; ++ ExternalReference debug_hook = ++ ExternalReference::debug_hook_on_function_call_address(masm->isolate()); ++ __ li(a5, debug_hook); ++ __ Ld_b(a5, MemOperand(a5, 0)); ++ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg)); ++ ++ // Flood function if we need to continue stepping in the suspended generator. ++ ExternalReference debug_suspended_generator = ++ ExternalReference::debug_suspended_generator_address(masm->isolate()); ++ __ li(a5, debug_suspended_generator); ++ __ Ld_d(a5, MemOperand(a5, 0)); ++ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5)); ++ __ bind(&stepping_prepared); ++ ++ // Check the stack for overflow. We are not trying to catch interruptions ++ // (i.e. debug break and preemption) here, so check the "real stack limit". ++ Label stack_overflow; ++ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); ++ __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg)); ++ ++ // Push receiver. ++ __ Ld_d(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); ++ __ Push(a5); ++ ++ // ----------- S t a t e ------------- ++ // -- a1 : the JSGeneratorObject to resume ++ // -- a4 : generator function ++ // -- cp : generator context ++ // -- ra : return address ++ // -- sp[0] : generator receiver ++ // ----------------------------------- ++ ++ // Push holes for arguments to generator function. Since the parser forced ++ // context allocation for any variables in generators, the actual argument ++ // values have already been copied into the context and these dummy values ++ // will never be used. ++ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_hu( ++ a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ Ld_d(t1, FieldMemOperand( ++ a1, JSGeneratorObject::kParametersAndRegistersOffset)); ++ { ++ Label done_loop, loop; ++ __ Move(t2, zero_reg); ++ __ bind(&loop); ++ __ Sub_d(a3, a3, Operand(1)); ++ __ Branch(&done_loop, lt, a3, Operand(zero_reg)); ++ __ Alsl_d(kScratchReg, t2, t1, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); ++ __ Push(kScratchReg); ++ __ Add_d(t2, t2, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Underlying function needs to have bytecode available. ++ if (FLAG_debug_code) { ++ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_d(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); ++ GetSharedFunctionInfoBytecode(masm, a3, t5); ++ __ GetObjectType(a3, a3, a3); ++ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, ++ Operand(BYTECODE_ARRAY_TYPE)); ++ } ++ ++ // Resume (Ignition/TurboFan) generator object. ++ { ++ __ Ld_d(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_hu(a0, FieldMemOperand( ++ a0, SharedFunctionInfo::kFormalParameterCountOffset)); ++ // We abuse new.target both to indicate that this is a resume call and to ++ // pass in the generator object. In ordinary calls, new.target is always ++ // undefined because generator functions are non-constructable. ++ __ Move(a3, a1); ++ __ Move(a1, a4); ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); ++ __ Add_d(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Jump(a2); ++ } ++ ++ __ bind(&prepare_step_in_if_stepping); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1, a4); ++ // Push hole as receiver since we do not use it for stepping. ++ __ PushRoot(RootIndex::kTheHoleValue); ++ __ CallRuntime(Runtime::kDebugOnFunctionCall); ++ __ Pop(a1); ++ } ++ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); ++ __ Branch(&stepping_prepared); ++ ++ __ bind(&prepare_step_in_suspended_generator); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); ++ __ Pop(a1); ++ } ++ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); ++ __ Branch(&stepping_prepared); ++ ++ __ bind(&stack_overflow); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ __ break_(0xCC); // This should be unreachable. ++ } ++} ++ ++void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kThrowConstructedNonConstructable); ++} ++ ++// Clobbers scratch1 and scratch2; preserves all other registers. ++static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, ++ Register scratch1, Register scratch2) { ++ // Check the stack for overflow. We are not trying to catch ++ // interruptions (e.g. debug break and preemption) here, so the "real stack ++ // limit" is checked. ++ Label okay; ++ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit); ++ // Make a2 the space we have left. The stack might already be overflowed ++ // here which will cause r2 to become negative. ++ __ sub_d(scratch1, sp, scratch1); ++ // Check if the arguments will overflow the stack. ++ __ slli_d(scratch2, argc, kPointerSizeLog2); ++ __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison. ++ ++ // Out of stack space. ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ ++ __ bind(&okay); ++} ++ ++namespace { ++ ++// Called with the native C calling convention. The corresponding function ++// signature is either: ++// ++// using JSEntryFunction = GeneratedCode; ++// or ++// using JSEntryFunction = GeneratedCode; ++void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ++ Builtins::Name entry_trampoline) { ++ Label invoke, handler_entry, exit; ++ ++ { ++ NoRootArrayScope no_root_array(masm); ++ ++ // TODO(plind): unify the ABI description here. ++ // Registers: ++ // either ++ // a0: root register value ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ // or ++ // a0: root register value ++ // a1: microtask_queue ++ // ++ // Stack: ++ // 0 arg slots on mips64 (4 args slots on mips) ++ ++ // Save callee saved registers on the stack. ++ __ MultiPush(kCalleeSaved | ra.bit()); ++ ++ // Save callee-saved FPU registers. ++ __ MultiPushFPU(kCalleeSavedFPU); ++ // Set up the reserved register for 0.0. ++ __ Move(kDoubleRegZero, 0.0); ++ ++ // Initialize the root register. ++ // C calling convention. The first argument is passed in a0. ++ __ mov(kRootRegister, a0); ++ } ++ ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ ++ // We build an EntryFrame. ++ __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used. ++ __ li(s2, Operand(StackFrame::TypeToMarker(type))); ++ __ li(s3, Operand(StackFrame::TypeToMarker(type))); ++ ExternalReference c_entry_fp = ExternalReference::Create( ++ IsolateAddressId::kCEntryFPAddress, masm->isolate()); ++ __ li(s4, c_entry_fp); ++ __ Ld_d(s4, MemOperand(s4, 0)); ++ __ Push(s1, s2, s3, s4); ++ // Set up frame pointer for the frame to be pushed. ++ __ addi_d(fp, sp, -EntryFrameConstants::kCallerFPOffset); ++ ++ // Registers: ++ // either ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ // or ++ // a1: microtask_queue ++ // ++ // Stack: ++ // caller fp | ++ // function slot | entry frame ++ // context slot | ++ // bad fp (0xFF...F) | ++ // callee saved registers + ra ++ // [ O32: 4 args slots] ++ // args ++ ++ // If this is the outermost JS call, set js_entry_sp value. ++ Label non_outermost_js; ++ ExternalReference js_entry_sp = ExternalReference::Create( ++ IsolateAddressId::kJSEntrySPAddress, masm->isolate()); ++ __ li(s1, js_entry_sp); ++ __ Ld_d(s2, MemOperand(s1, 0)); ++ __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg)); ++ __ St_d(fp, MemOperand(s1, 0)); ++ __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); ++ Label cont; ++ __ b(&cont); ++ __ nop(); // Branch delay slot nop. ++ __ bind(&non_outermost_js); ++ __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME)); ++ __ bind(&cont); ++ __ push(s3); ++ ++ // Jump to a faked try block that does the invoke, with a faked catch ++ // block that sets the pending exception. ++ __ jmp(&invoke); ++ __ bind(&handler_entry); ++ ++ // Store the current pc as the handler offset. It's used later to create the ++ // handler table. ++ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos()); ++ ++ // Caught exception: Store result (exception) in the pending exception ++ // field in the JSEnv and return a failure sentinel. Coming in here the ++ // fp will be invalid because the PushStackHandler below sets it to 0 to ++ // signal the existence of the JSEntry frame. ++ __ li(s1, ExternalReference::Create( ++ IsolateAddressId::kPendingExceptionAddress, masm->isolate())); ++ __ St_d(a0, ++ MemOperand(s1, 0)); // We come back from 'invoke'. result is in a0. ++ __ LoadRoot(a0, RootIndex::kException); ++ __ b(&exit); // b exposes branch delay slot. ++ __ nop(); // Branch delay slot nop. ++ ++ // Invoke: Link this frame into the handler chain. ++ __ bind(&invoke); ++ __ PushStackHandler(); ++ // If an exception not caught by another handler occurs, this handler ++ // returns control to the code after the bal(&invoke) above, which ++ // restores all kCalleeSaved registers (including cp and fp) to their ++ // saved values before returning a failure to C. ++ // ++ // Registers: ++ // either ++ // a0: root register value ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ // or ++ // a0: root register value ++ // a1: microtask_queue ++ // ++ // Stack: ++ // handler frame ++ // entry frame ++ // callee saved registers + ra ++ // [ O32: 4 args slots] ++ // args ++ // ++ // Invoke the function by calling through JS entry trampoline builtin and ++ // pop the faked function when we return. ++ ++ Handle trampoline_code = ++ masm->isolate()->builtins()->builtin_handle(entry_trampoline); ++ __ Call(trampoline_code, RelocInfo::CODE_TARGET); ++ ++ // Unlink this frame from the handler chain. ++ __ PopStackHandler(); ++ ++ __ bind(&exit); // a0 holds result ++ // Check if the current stack frame is marked as the outermost JS frame. ++ Label non_outermost_js_2; ++ __ pop(a5); ++ __ Branch(&non_outermost_js_2, ne, a5, ++ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); ++ __ li(a5, js_entry_sp); ++ __ St_d(zero_reg, MemOperand(a5, 0)); ++ __ bind(&non_outermost_js_2); ++ ++ // Restore the top frame descriptors from the stack. ++ __ pop(a5); ++ __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, ++ masm->isolate())); ++ __ St_d(a5, MemOperand(a4, 0)); ++ ++ // Reset the stack to the callee saved registers. ++ __ addi_d(sp, sp, -EntryFrameConstants::kCallerFPOffset); ++ ++ // Restore callee-saved fpu registers. ++ __ MultiPopFPU(kCalleeSavedFPU); ++ ++ // Restore callee saved registers from the stack. ++ __ MultiPop(kCalleeSaved | ra.bit()); ++ // Return. ++ __ Jump(ra); ++} ++ ++} // namespace ++ ++void Builtins::Generate_JSEntry(MacroAssembler* masm) { ++ Generate_JSEntryVariant(masm, StackFrame::ENTRY, ++ Builtins::kJSEntryTrampoline); ++} ++ ++void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) { ++ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY, ++ Builtins::kJSConstructEntryTrampoline); ++} ++ ++void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) { ++ Generate_JSEntryVariant(masm, StackFrame::ENTRY, ++ Builtins::kRunMicrotasksTrampoline); ++} ++ ++static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ++ bool is_construct) { ++ // ----------- S t a t e ------------- ++ // -- a1: new.target ++ // -- a2: function ++ // -- a3: receiver_pointer ++ // -- a4: argc ++ // -- a5: argv ++ // ----------------------------------- ++ ++ // Enter an internal frame. ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ ++ // Setup the context (we need to use the caller context from the isolate). ++ ExternalReference context_address = ExternalReference::Create( ++ IsolateAddressId::kContextAddress, masm->isolate()); ++ __ li(cp, context_address); ++ __ Ld_d(cp, MemOperand(cp, 0)); ++ ++ // Push the function and the receiver onto the stack. ++ __ Push(a2, a3); ++ ++ // Check if we have enough stack space to push all arguments. ++ // Clobbers a0 and a3. ++ Generate_CheckStackOverflow(masm, a4, t5, a3); ++ ++ // Setup new.target, function and argc. ++ __ mov(a3, a1); ++ __ mov(a1, a2); ++ __ mov(a0, a4); ++ ++ // a0: argc ++ // a1: function ++ // a3: new.target ++ // a5: argv ++ ++ // Copy arguments to the stack in a loop. ++ // a3: argc ++ // a5: argv, i.e. points to first arg ++ Label loop, entry; ++ __ Alsl_d(s1, a4, a5, kPointerSizeLog2, t7); ++ __ b(&entry); ++ __ nop(); // Branch delay slot nop. ++ // s1 points past last arg. ++ __ bind(&loop); ++ __ Ld_d(s2, MemOperand(a5, 0)); // Read next parameter. ++ __ addi_d(a5, a5, kPointerSize); ++ __ Ld_d(s2, MemOperand(s2, 0)); // Dereference handle. ++ __ push(s2); // Push parameter. ++ __ bind(&entry); ++ __ Branch(&loop, ne, a5, Operand(s1)); ++ ++ // a0: argc ++ // a1: function ++ // a3: new.target ++ ++ // Initialize all JavaScript callee-saved registers, since they will be seen ++ // by the garbage collector as part of handlers. ++ __ LoadRoot(a4, RootIndex::kUndefinedValue); ++ __ mov(a5, a4); ++ __ mov(s1, a4); ++ __ mov(s2, a4); ++ __ mov(s3, a4); ++ __ mov(s4, a4); ++ __ mov(s5, a4); ++ // s6 holds the root address. Do not clobber. ++ // s7 is cp. Do not init. ++ ++ // Invoke the code. ++ Handle builtin = is_construct ++ ? BUILTIN_CODE(masm->isolate(), Construct) ++ : masm->isolate()->builtins()->Call(); ++ __ Call(builtin, RelocInfo::CODE_TARGET); ++ ++ // Leave internal frame. ++ } ++ __ Jump(ra); ++} ++ ++void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { ++ Generate_JSEntryTrampolineHelper(masm, false); ++} ++ ++void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { ++ Generate_JSEntryTrampolineHelper(masm, true); ++} ++ ++void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { ++ // a1: microtask_queue ++ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1); ++ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); ++} ++ ++static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, ++ Register optimized_code, ++ Register closure, ++ Register scratch1, ++ Register scratch2) { ++ // Store code entry in the closure. ++ __ St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); ++ __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. ++ __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, ++ kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, ++ OMIT_SMI_CHECK); ++} ++ ++static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { ++ Register args_count = scratch; ++ ++ // Get the arguments + receiver count. ++ __ Ld_d(args_count, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ Ld_w(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset)); ++ ++ // Leave the frame (also dropping the register file). ++ __ LeaveFrame(StackFrame::INTERPRETED); ++ ++ // Drop receiver + arguments. ++ __ Add_d(sp, sp, args_count); ++} ++ ++// Tail-call |function_id| if |smi_entry| == |marker| ++static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, ++ Register smi_entry, ++ OptimizationMarker marker, ++ Runtime::FunctionId function_id) { ++ Label no_match; ++ __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker))); ++ GenerateTailCallToReturnedCode(masm, function_id); ++ __ bind(&no_match); ++} ++ ++static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ++ Register optimized_code_entry, ++ Register scratch1, Register scratch2) { ++ // ----------- S t a t e ------------- ++ // -- a3 : new target (preserved for callee if needed, and caller) ++ // -- a1 : target function (preserved for callee if needed, and caller) ++ // ----------------------------------- ++ DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2)); ++ ++ Register closure = a1; ++ ++ // Check if the optimized code is marked for deopt. If it is, call the ++ // runtime to clear it. ++ Label found_deoptimized_code; ++ __ Ld_d(a5, FieldMemOperand(optimized_code_entry, ++ Code::kCodeDataContainerOffset)); ++ __ Ld_w(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset)); ++ __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit)); ++ __ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg)); ++ ++ // Optimized code is good, get it into the closure and link the closure into ++ // the optimized functions list, then tail call the optimized code. ++ // The feedback vector is no longer used, so re-use it as a scratch ++ // register. ++ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, ++ scratch1, scratch2); ++ ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Add_d(a2, optimized_code_entry, ++ Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Jump(a2); ++ ++ // Optimized code slot contains deoptimized code, evict it and re-enter the ++ // closure's code. ++ __ bind(&found_deoptimized_code); ++ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); ++} ++ ++static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ++ Register optimization_marker) { ++ // ----------- S t a t e ------------- ++ // -- a3 : new target (preserved for callee if needed, and caller) ++ // -- a1 : target function (preserved for callee if needed, and caller) ++ // -- feedback vector (preserved for caller if needed) ++ // -- optimization_marker : a Smi containing a non-zero optimization marker. ++ // ----------------------------------- ++ DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker)); ++ ++ // TODO(v8:8394): The logging of first execution will break if ++ // feedback vectors are not allocated. We need to find a different way of ++ // logging these events if required. ++ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, ++ OptimizationMarker::kLogFirstExecution, ++ Runtime::kFunctionFirstExecution); ++ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, ++ OptimizationMarker::kCompileOptimized, ++ Runtime::kCompileOptimized_NotConcurrent); ++ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, ++ OptimizationMarker::kCompileOptimizedConcurrent, ++ Runtime::kCompileOptimized_Concurrent); ++ ++ // Otherwise, the marker is InOptimizationQueue, so fall through hoping ++ // that an interrupt will eventually update the slot with optimized code. ++ if (FLAG_debug_code) { ++ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel, ++ optimization_marker, ++ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); ++ } ++} ++ ++// Advance the current bytecode offset. This simulates what all bytecode ++// handlers do upon completion of the underlying operation. Will bail out to a ++// label if the bytecode (without prefix) is a return bytecode. ++static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ++ Register bytecode_array, ++ Register bytecode_offset, ++ Register bytecode, Register scratch1, ++ Register scratch2, Label* if_return) { ++ Register bytecode_size_table = scratch1; ++ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table, ++ bytecode)); ++ ++ __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address()); ++ ++ // Check if the bytecode is a Wide or ExtraWide prefix bytecode. ++ Label process_bytecode, extra_wide; ++ STATIC_ASSERT(0 == static_cast(interpreter::Bytecode::kWide)); ++ STATIC_ASSERT(1 == static_cast(interpreter::Bytecode::kExtraWide)); ++ STATIC_ASSERT(2 == static_cast(interpreter::Bytecode::kDebugBreakWide)); ++ STATIC_ASSERT(3 == ++ static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); ++ __ Branch(&process_bytecode, hi, bytecode, Operand(3)); ++ __ And(scratch2, bytecode, Operand(1)); ++ __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg)); ++ ++ // Load the next bytecode and update table to the wide scaled table. ++ __ Add_d(bytecode_offset, bytecode_offset, Operand(1)); ++ __ Add_d(scratch2, bytecode_array, bytecode_offset); ++ __ Ld_bu(bytecode, MemOperand(scratch2, 0)); ++ __ Add_d(bytecode_size_table, bytecode_size_table, ++ Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount)); ++ __ jmp(&process_bytecode); ++ ++ __ bind(&extra_wide); ++ // Load the next bytecode and update table to the extra wide scaled table. ++ __ Add_d(bytecode_offset, bytecode_offset, Operand(1)); ++ __ Add_d(scratch2, bytecode_array, bytecode_offset); ++ __ Ld_bu(bytecode, MemOperand(scratch2, 0)); ++ __ Add_d(bytecode_size_table, bytecode_size_table, ++ Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); ++ ++ __ bind(&process_bytecode); ++ ++// Bailout to the return label if this is a return bytecode. ++#define JUMP_IF_EQUAL(NAME) \ ++ __ Branch(if_return, eq, bytecode, \ ++ Operand(static_cast(interpreter::Bytecode::k##NAME))); ++ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) ++#undef JUMP_IF_EQUAL ++ ++ // Otherwise, load the size of the current bytecode and advance the offset. ++ __ Alsl_d(scratch2, bytecode, bytecode_size_table, 2, t7); ++ __ Ld_w(scratch2, MemOperand(scratch2, 0)); ++ __ Add_d(bytecode_offset, bytecode_offset, scratch2); ++} ++ ++// Generate code for entering a JS function with the interpreter. ++// On entry to the function the receiver and arguments have been pushed on the ++// stack left to right. The actual argument count matches the formal parameter ++// count expected by the function. ++// ++// The live registers are: ++// o a1: the JS function object being called. ++// o a3: the incoming new target or generator object ++// o cp: our context ++// o fp: the caller's frame pointer ++// o sp: stack pointer ++// o ra: return address ++// ++// The function builds an interpreter frame. See InterpreterFrameConstants in ++// frames.h for its layout. ++void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ++ Register closure = a1; ++ Register feedback_vector = a2; ++ ++ // Get the bytecode array from the function object and load it into ++ // kInterpreterBytecodeArrayRegister. ++ __ Ld_d(t5, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ FieldMemOperand(t5, SharedFunctionInfo::kFunctionDataOffset)); ++ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4); ++ ++ // The bytecode array could have been flushed from the shared function info, ++ // if so, call into CompileLazy. ++ Label compile_lazy; ++ __ GetObjectType(kInterpreterBytecodeArrayRegister, t5, t5); ++ __ Branch(&compile_lazy, ne, t5, Operand(BYTECODE_ARRAY_TYPE)); ++ ++ // Load the feedback vector from the closure. ++ __ Ld_d(feedback_vector, ++ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); ++ __ Ld_d(feedback_vector, ++ FieldMemOperand(feedback_vector, Cell::kValueOffset)); ++ ++ Label push_stack_frame; ++ // Check if feedback vector is valid. If valid, check for optimized code ++ // and update invocation count. Otherwise, setup the stack frame. ++ __ Ld_d(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); ++ __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); ++ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); ++ ++ // Read off the optimized code slot in the feedback vector, and if there ++ // is optimized code or an optimization marker, call that instead. ++ Register optimized_code_entry = a4; ++ __ Ld_d(optimized_code_entry, ++ FieldMemOperand(feedback_vector, ++ FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); ++ ++ // Check if the optimized code slot is not empty. ++ Label optimized_code_slot_not_empty; ++ ++ __ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry, ++ Operand(Smi::FromEnum(OptimizationMarker::kNone))); ++ ++ Label not_optimized; ++ __ bind(¬_optimized); ++ ++ // Increment invocation count for the function. ++ __ Ld_w(a4, FieldMemOperand(feedback_vector, ++ FeedbackVector::kInvocationCountOffset)); ++ __ Add_w(a4, a4, Operand(1)); ++ __ St_w(a4, FieldMemOperand(feedback_vector, ++ FeedbackVector::kInvocationCountOffset)); ++ ++ // Open a frame scope to indicate that there is a frame on the stack. The ++ // MANUAL indicates that the scope shouldn't actually generate code to set up ++ // the frame (that is done below). ++ __ bind(&push_stack_frame); ++ FrameScope frame_scope(masm, StackFrame::MANUAL); ++ __ PushStandardFrame(closure); ++ ++ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are ++ // 8-bit fields next to each other, so we could just optimize by writing a ++ // 16-bit. These static asserts guard our assumption is valid. ++ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == ++ BytecodeArray::kOsrNestingLevelOffset + kCharSize); ++ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); ++ __ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, ++ BytecodeArray::kOsrNestingLevelOffset)); ++ ++ // Load initial bytecode offset. ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ ++ // Push bytecode array and Smi tagged bytecode array offset. ++ __ SmiTag(a4, kInterpreterBytecodeOffsetRegister); ++ __ Push(kInterpreterBytecodeArrayRegister, a4); ++ ++ // Allocate the local and temporary register file on the stack. ++ Label stack_overflow; ++ { ++ // Load frame size (word) from the BytecodeArray object. ++ __ Ld_w(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister, ++ BytecodeArray::kFrameSizeOffset)); ++ ++ // Do a stack check to ensure we don't go over the limit. ++ __ Sub_d(a5, sp, Operand(a4)); ++ LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit); ++ __ Branch(&stack_overflow, lo, a5, Operand(a2)); ++ ++ // If ok, push undefined as the initial value for all register file entries. ++ Label loop_header; ++ Label loop_check; ++ __ LoadRoot(a5, RootIndex::kUndefinedValue); ++ __ Branch(&loop_check); ++ __ bind(&loop_header); ++ // TODO(rmcilroy): Consider doing more than one push per loop iteration. ++ __ push(a5); ++ // Continue loop if not done. ++ __ bind(&loop_check); ++ __ Sub_d(a4, a4, Operand(kPointerSize)); ++ __ Branch(&loop_header, ge, a4, Operand(zero_reg)); ++ } ++ ++ // If the bytecode array has a valid incoming new target or generator object ++ // register, initialize it with incoming value which was passed in r3. ++ Label no_incoming_new_target_or_generator_register; ++ __ Ld_w(a5, FieldMemOperand( ++ kInterpreterBytecodeArrayRegister, ++ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); ++ __ Branch(&no_incoming_new_target_or_generator_register, eq, a5, ++ Operand(zero_reg)); ++ __ Alsl_d(a5, a5, fp, kPointerSizeLog2, t7); ++ __ St_d(a3, MemOperand(a5, 0)); ++ __ bind(&no_incoming_new_target_or_generator_register); ++ ++ // Perform interrupt stack check. ++ // TODO(solanes): Merge with the real stack limit check above. ++ Label stack_check_interrupt, after_stack_check_interrupt; ++ LoadStackLimit(masm, a5, StackLimitKind::kInterruptStackLimit); ++ __ Branch(&stack_check_interrupt, lo, sp, Operand(a5)); ++ __ bind(&after_stack_check_interrupt); ++ ++ // Load accumulator as undefined. ++ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); ++ ++ // Load the dispatch table into a register and dispatch to the bytecode ++ // handler at the current bytecode offset. ++ Label do_dispatch; ++ __ bind(&do_dispatch); ++ __ li(kInterpreterDispatchTableRegister, ++ ExternalReference::interpreter_dispatch_table_address(masm->isolate())); ++ __ Add_d(t5, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ld_bu(a7, MemOperand(t5, 0)); ++ __ Alsl_d(kScratchReg, a7, kInterpreterDispatchTableRegister, ++ kPointerSizeLog2, t7); ++ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg, 0)); ++ __ Call(kJavaScriptCallCodeStartRegister); ++ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); ++ ++ // Any returns to the entry trampoline are either due to the return bytecode ++ // or the interpreter tail calling a builtin and then a dispatch. ++ ++ // Get bytecode array and bytecode offset from the stack frame. ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ Ld_d(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ __ SmiUntag(kInterpreterBytecodeOffsetRegister); ++ ++ // Either return, or advance to the next bytecode and dispatch. ++ Label do_return; ++ __ Add_d(a1, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ld_bu(a1, MemOperand(a1, 0)); ++ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister, a1, a2, a3, ++ &do_return); ++ __ jmp(&do_dispatch); ++ ++ __ bind(&do_return); ++ // The return value is in a0. ++ LeaveInterpreterFrame(masm, t0); ++ __ Jump(ra); ++ ++ __ bind(&stack_check_interrupt); ++ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset ++ // for the call to the StackGuard. ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag + ++ kFunctionEntryBytecodeOffset))); ++ __ St_d(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ __ CallRuntime(Runtime::kStackGuard); ++ ++ // After the call, restore the bytecode array, bytecode offset and accumulator ++ // registers again. Also, restore the bytecode offset in the stack to its ++ // previous value. ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); ++ ++ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister); ++ __ St_d(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ ++ __ jmp(&after_stack_check_interrupt); ++ ++ __ bind(&optimized_code_slot_not_empty); ++ Label maybe_has_optimized_code; ++ // Check if optimized code marker is actually a weak reference to the ++ // optimized code as opposed to an optimization marker. ++ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code, t7); ++ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); ++ // Fall through if there's no runnable optimized code. ++ __ jmp(¬_optimized); ++ ++ __ bind(&maybe_has_optimized_code); ++ // Load code entry from the weak reference, if it was cleared, resume ++ // execution of unoptimized code. ++ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); ++ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5); ++ ++ __ bind(&compile_lazy); ++ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); ++ // Unreachable code. ++ __ break_(0xCC); ++ ++ __ bind(&stack_overflow); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ break_(0xCC); ++} ++ ++static void Generate_InterpreterPushArgs(MacroAssembler* masm, ++ Register num_args, Register index, ++ Register scratch, Register scratch2) { ++ // Find the address of the last argument. ++ __ mov(scratch2, num_args); ++ __ slli_d(scratch2, scratch2, kPointerSizeLog2); ++ __ Sub_d(scratch2, index, Operand(scratch2)); ++ ++ // Push the arguments. ++ Label loop_header, loop_check; ++ __ Branch(&loop_check); ++ __ bind(&loop_header); ++ __ Ld_d(scratch, MemOperand(index, 0)); ++ __ Add_d(index, index, Operand(-kPointerSize)); ++ __ push(scratch); ++ __ bind(&loop_check); ++ __ Branch(&loop_header, hi, index, Operand(scratch2)); ++} ++ ++// static ++void Builtins::Generate_InterpreterPushArgsThenCallImpl( ++ MacroAssembler* masm, ConvertReceiverMode receiver_mode, ++ InterpreterPushArgsMode mode) { ++ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a2 : the address of the first argument to be pushed. Subsequent ++ // arguments should be consecutive above this, in the same order as ++ // they are to be pushed onto the stack. ++ // -- a1 : the target to call (can be any Object). ++ // ----------------------------------- ++ Label stack_overflow; ++ ++ __ Add_d(a3, a0, Operand(1)); // Add one for receiver. ++ ++ // Push "undefined" as the receiver arg if we need to. ++ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { ++ __ PushRoot(RootIndex::kUndefinedValue); ++ __ Sub_d(a3, a3, Operand(1)); // Subtract one for receiver. ++ } ++ ++ Generate_StackOverflowCheck(masm, a3, a4, t0, &stack_overflow); ++ ++ // This function modifies a2, t0 and a4. ++ Generate_InterpreterPushArgs(masm, a3, a2, a4, t0); ++ ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ __ Pop(a2); // Pass the spread in a register ++ __ Sub_d(a0, a0, Operand(1)); // Subtract one for spread ++ } ++ ++ // Call the target. ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread), ++ RelocInfo::CODE_TARGET); ++ } else { ++ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny), ++ RelocInfo::CODE_TARGET); ++ } ++ ++ __ bind(&stack_overflow); ++ { ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ break_(0xCC); ++ } ++} ++ ++// static ++void Builtins::Generate_InterpreterPushArgsThenConstructImpl( ++ MacroAssembler* masm, InterpreterPushArgsMode mode) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argument count (not including receiver) ++ // -- a3 : new target ++ // -- a1 : constructor to call ++ // -- a2 : allocation site feedback if available, undefined otherwise. ++ // -- a4 : address of the first argument ++ // ----------------------------------- ++ Label stack_overflow; ++ ++ // Push a slot for the receiver. ++ __ push(zero_reg); ++ ++ Generate_StackOverflowCheck(masm, a0, a5, t0, &stack_overflow); ++ ++ // This function modifies t0, a4 and a5. ++ Generate_InterpreterPushArgs(masm, a0, a4, a5, t0); ++ ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ __ Pop(a2); // Pass the spread in a register ++ __ Sub_d(a0, a0, Operand(1)); // Subtract one for spread ++ } else { ++ __ AssertUndefinedOrAllocationSite(a2, t0); ++ } ++ ++ if (mode == InterpreterPushArgsMode::kArrayFunction) { ++ __ AssertFunction(a1); ++ ++ // Tail call to the function-specific construct stub (still in the caller ++ // context at this point). ++ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl), ++ RelocInfo::CODE_TARGET); ++ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ // Call the constructor with a0, a1, and a3 unmodified. ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread), ++ RelocInfo::CODE_TARGET); ++ } else { ++ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode); ++ // Call the constructor with a0, a1, and a3 unmodified. ++ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); ++ } ++ ++ __ bind(&stack_overflow); ++ { ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ break_(0xCC); ++ } ++} ++ ++static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ++ // Set the return address to the correct point in the interpreter entry ++ // trampoline. ++ Label builtin_trampoline, trampoline_loaded; ++ Smi interpreter_entry_return_pc_offset( ++ masm->isolate()->heap()->interpreter_entry_return_pc_offset()); ++ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero()); ++ ++ // If the SFI function_data is an InterpreterData, the function will have a ++ // custom copy of the interpreter entry trampoline for profiling. If so, ++ // get the custom trampoline, otherwise grab the entry address of the global ++ // trampoline. ++ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ __ Ld_d(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_d(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); ++ __ GetObjectType(t0, kInterpreterDispatchTableRegister, ++ kInterpreterDispatchTableRegister); ++ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, ++ Operand(INTERPRETER_DATA_TYPE)); ++ ++ __ Ld_d(t0, ++ FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); ++ __ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Branch(&trampoline_loaded); ++ ++ __ bind(&builtin_trampoline); ++ __ li(t0, ExternalReference:: ++ address_of_interpreter_entry_trampoline_instruction_start( ++ masm->isolate())); ++ __ Ld_d(t0, MemOperand(t0, 0)); ++ ++ __ bind(&trampoline_loaded); ++ __ Add_d(ra, t0, Operand(interpreter_entry_return_pc_offset.value())); ++ ++ // Initialize the dispatch table register. ++ __ li(kInterpreterDispatchTableRegister, ++ ExternalReference::interpreter_dispatch_table_address(masm->isolate())); ++ ++ // Get the bytecode array pointer from the frame. ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ ++ if (FLAG_debug_code) { ++ // Check function data field is actually a BytecodeArray object. ++ __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg); ++ __ Assert(ne, ++ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, ++ kScratchReg, Operand(zero_reg)); ++ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); ++ __ Assert(eq, ++ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, ++ a1, Operand(BYTECODE_ARRAY_TYPE)); ++ } ++ ++ // Get the target bytecode offset from the frame. ++ __ SmiUntag(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ ++ if (FLAG_debug_code) { ++ Label okay; ++ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ // Unreachable code. ++ __ break_(0xCC); ++ __ bind(&okay); ++ } ++ ++ // Dispatch to the target bytecode. ++ __ Add_d(a1, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ld_bu(a7, MemOperand(a1, 0)); ++ __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kPointerSizeLog2, t7); ++ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(a1, 0)); ++ __ Jump(kJavaScriptCallCodeStartRegister); ++} ++ ++void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { ++ // Advance the current bytecode offset stored within the given interpreter ++ // stack frame. This simulates what all bytecode handlers do upon completion ++ // of the underlying operation. ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ Ld_d(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ __ SmiUntag(kInterpreterBytecodeOffsetRegister); ++ ++ Label enter_bytecode, function_entry_bytecode; ++ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + ++ kFunctionEntryBytecodeOffset)); ++ ++ // Load the current bytecode. ++ __ Add_d(a1, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ld_bu(a1, MemOperand(a1, 0)); ++ ++ // Advance to the next bytecode. ++ Label if_return; ++ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister, a1, a2, a3, ++ &if_return); ++ ++ __ bind(&enter_bytecode); ++ // Convert new bytecode offset to a Smi and save in the stackframe. ++ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister); ++ __ St_d(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ ++ Generate_InterpreterEnterBytecode(masm); ++ ++ __ bind(&function_entry_bytecode); ++ // If the code deoptimizes during the implicit function entry stack interrupt ++ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is ++ // not a valid bytecode offset. Detect this case and advance to the first ++ // actual bytecode. ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ __ Branch(&enter_bytecode); ++ ++ // We should never take the if_return path. ++ __ bind(&if_return); ++ __ Abort(AbortReason::kInvalidBytecodeAdvance); ++} ++ ++void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { ++ Generate_InterpreterEnterBytecode(masm); ++} ++ ++namespace { ++void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, ++ bool java_script_builtin, ++ bool with_result) { ++ const RegisterConfiguration* config(RegisterConfiguration::Default()); ++ int allocatable_register_count = config->num_allocatable_general_registers(); ++ if (with_result) { ++ // Overwrite the hole inserted by the deoptimizer with the return value from ++ // the LAZY deopt point. ++ __ St_d(a0, ++ MemOperand( ++ sp, config->num_allocatable_general_registers() * kPointerSize + ++ BuiltinContinuationFrameConstants::kFixedFrameSize)); ++ } ++ for (int i = allocatable_register_count - 1; i >= 0; --i) { ++ int code = config->GetAllocatableGeneralCode(i); ++ __ Pop(Register::from_code(code)); ++ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) { ++ __ SmiUntag(Register::from_code(code)); ++ } ++ } ++ __ Ld_d( ++ fp, ++ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); ++ // Load builtin index (stored as a Smi) and use it to get the builtin start ++ // address from the builtins table. ++ __ Pop(t0); ++ __ Add_d(sp, sp, ++ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); ++ __ Pop(ra); ++ __ LoadEntryFromBuiltinIndex(t0); ++ __ Jump(t0); ++} ++} // namespace ++ ++void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, false, false); ++} ++ ++void Builtins::Generate_ContinueToCodeStubBuiltinWithResult( ++ MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, false, true); ++} ++ ++void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, true, false); ++} ++ ++void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult( ++ MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, true, true); ++} ++ ++void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kNotifyDeoptimized); ++ } ++ ++ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code()); ++ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize)); ++ __ Add_d(sp, sp, Operand(1 * kPointerSize)); // Remove state. ++ __ Ret(); ++} ++ ++void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kCompileForOnStackReplacement); ++ } ++ ++ // If the code object is null, just return to the caller. ++ __ Ret(eq, a0, Operand(Smi::zero())); ++ ++ // Drop the handler frame that is be sitting on top of the actual ++ // JavaScript frame. This is the case then OSR is triggered from bytecode. ++ __ LeaveFrame(StackFrame::STUB); ++ ++ // Load deoptimization data from the code object. ++ // = [#deoptimization_data_offset] ++ __ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); ++ ++ // Load the OSR entrypoint offset from the deoptimization data. ++ // = [#header_size + #osr_pc_offset] ++ __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( ++ DeoptimizationData::kOsrPcOffsetIndex) - ++ kHeapObjectTag)); ++ ++ // Compute the target address = code_obj + header_size + osr_offset ++ // = + #header_size + ++ __ Add_d(a0, a0, a1); ++ __ addi_d(ra, a0, Code::kHeaderSize - kHeapObjectTag); ++ ++ // And "return" to the OSR entry point of the function. ++ __ Ret(); ++} ++ ++// static ++void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argc ++ // -- sp[0] : argArray ++ // -- sp[4] : thisArg ++ // -- sp[8] : receiver ++ // ----------------------------------- ++ ++ Register argc = a0; ++ Register arg_array = a2; ++ Register receiver = a1; ++ Register this_arg = a5; ++ Register undefined_value = a3; ++ Register scratch = a4; ++ ++ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); ++ ++ // 1. Load receiver into a1, argArray into a2 (if present), remove all ++ // arguments from the stack (including the receiver), and push thisArg (if ++ // present) instead. ++ { ++ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a ++ // consistent state for a simple pop operation. ++ ++ __ Sub_d(sp, sp, Operand(2 * kPointerSize)); ++ __ Alsl_d(sp, argc, sp, kPointerSizeLog2, t7); ++ __ mov(scratch, argc); ++ __ Pop(this_arg, arg_array); // Overwrite argc ++ __ Movz(arg_array, undefined_value, scratch); // if argc == 0 ++ __ Movz(this_arg, undefined_value, scratch); // if argc == 0 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(arg_array, undefined_value, scratch); // if argc == 1 ++ __ Ld_d(receiver, MemOperand(sp, 0)); ++ __ St_d(this_arg, MemOperand(sp, 0)); ++ } ++ ++ // ----------- S t a t e ------------- ++ // -- a2 : argArray ++ // -- a1 : receiver ++ // -- a3 : undefined root value ++ // -- sp[0] : thisArg ++ // ----------------------------------- ++ ++ // 2. We don't need to check explicitly for callable receiver here, ++ // since that's the first thing the Call/CallWithArrayLike builtins ++ // will do. ++ ++ // 3. Tail call with no arguments if argArray is null or undefined. ++ Label no_arguments; ++ __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments); ++ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value)); ++ ++ // 4a. Apply the receiver to the given argArray. ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), ++ RelocInfo::CODE_TARGET); ++ ++ // 4b. The argArray is either null or undefined, so we tail call without any ++ // arguments to the receiver. ++ __ bind(&no_arguments); ++ { ++ __ mov(a0, zero_reg); ++ DCHECK(receiver == a1); ++ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); ++ } ++} ++ ++// static ++void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { ++ // 1. Make sure we have at least one argument. ++ // a0: actual number of arguments ++ { ++ Label done; ++ __ Branch(&done, ne, a0, Operand(zero_reg)); ++ __ PushRoot(RootIndex::kUndefinedValue); ++ __ Add_d(a0, a0, Operand(1)); ++ __ bind(&done); ++ } ++ ++ // 2. Get the function to call (passed as receiver) from the stack. ++ // a0: actual number of arguments ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ Ld_d(a1, MemOperand(kScratchReg, 0)); ++ ++ // 3. Shift arguments and return address one slot down on the stack ++ // (overwriting the original receiver). Adjust argument count to make ++ // the original first argument the new receiver. ++ // a0: actual number of arguments ++ // a1: function ++ { ++ Label loop; ++ // Calculate the copy start address (destination). Copy end address is sp. ++ __ Alsl_d(a2, a0, sp, kPointerSizeLog2, t7); ++ ++ __ bind(&loop); ++ __ Ld_d(kScratchReg, MemOperand(a2, -kPointerSize)); ++ __ St_d(kScratchReg, MemOperand(a2, 0)); ++ __ Sub_d(a2, a2, Operand(kPointerSize)); ++ __ Branch(&loop, ne, a2, Operand(sp)); ++ // Adjust the actual number of arguments and remove the top element ++ // (which is a copy of the last argument). ++ __ Sub_d(a0, a0, Operand(1)); ++ __ Pop(); ++ } ++ ++ // 4. Call the callable. ++ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); ++} ++ ++void Builtins::Generate_ReflectApply(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argc ++ // -- sp[0] : argumentsList (if argc ==3) ++ // -- sp[4] : thisArgument (if argc >=2) ++ // -- sp[8] : target (if argc >=1) ++ // -- sp[12] : receiver ++ // ----------------------------------- ++ ++ Register argc = a0; ++ Register arguments_list = a2; ++ Register target = a1; ++ Register this_argument = a5; ++ Register undefined_value = a3; ++ Register scratch = a4; ++ ++ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); ++ ++ // 1. Load target into a1 (if present), argumentsList into a2 (if present), ++ // remove all arguments from the stack (including the receiver), and push ++ // thisArgument (if present) instead. ++ { ++ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a ++ // consistent state for a simple pop operation. ++ ++ __ Sub_d(sp, sp, Operand(3 * kPointerSize)); ++ __ Alsl_d(sp, argc, sp, kPointerSizeLog2, t7); ++ __ mov(scratch, argc); ++ __ Pop(target, this_argument, arguments_list); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0 ++ __ Movz(this_argument, undefined_value, scratch); // if argc == 0 ++ __ Movz(target, undefined_value, scratch); // if argc == 0 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1 ++ __ Movz(this_argument, undefined_value, scratch); // if argc == 1 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 2 ++ ++ __ St_d(this_argument, MemOperand(sp, 0)); // Overwrite receiver ++ } ++ ++ // ----------- S t a t e ------------- ++ // -- a2 : argumentsList ++ // -- a1 : target ++ // -- a3 : undefined root value ++ // -- sp[0] : thisArgument ++ // ----------------------------------- ++ ++ // 2. We don't need to check explicitly for callable target here, ++ // since that's the first thing the Call/CallWithArrayLike builtins ++ // will do. ++ ++ // 3. Apply the target to the given argumentsList. ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), ++ RelocInfo::CODE_TARGET); ++} ++ ++void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argc ++ // -- sp[0] : new.target (optional) (dummy value if argc <= 2) ++ // -- sp[4] : argumentsList (dummy value if argc <= 1) ++ // -- sp[8] : target (dummy value if argc == 0) ++ // -- sp[12] : receiver ++ // ----------------------------------- ++ Register argc = a0; ++ Register arguments_list = a2; ++ Register target = a1; ++ Register new_target = a3; ++ Register undefined_value = a4; ++ Register scratch = a5; ++ ++ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); ++ ++ // 1. Load target into a1 (if present), argumentsList into a2 (if present), ++ // new.target into a3 (if present, otherwise use target), remove all ++ // arguments from the stack (including the receiver), and push thisArgument ++ // (if present) instead. ++ { ++ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a ++ // consistent state for a simple pop operation. ++ ++ __ Sub_d(sp, sp, Operand(3 * kPointerSize)); ++ __ Alsl_d(sp, argc, sp, kPointerSizeLog2, t7); ++ __ mov(scratch, argc); ++ __ Pop(target, arguments_list, new_target); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0 ++ __ Movz(new_target, undefined_value, scratch); // if argc == 0 ++ __ Movz(target, undefined_value, scratch); // if argc == 0 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1 ++ __ Movz(new_target, target, scratch); // if argc == 1 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(new_target, target, scratch); // if argc == 2 ++ ++ __ St_d(undefined_value, MemOperand(sp, 0)); // Overwrite receiver ++ } ++ ++ // ----------- S t a t e ------------- ++ // -- a2 : argumentsList ++ // -- a1 : target ++ // -- a3 : new.target ++ // -- sp[0] : receiver (undefined) ++ // ----------------------------------- ++ ++ // 2. We don't need to check explicitly for constructor target here, ++ // since that's the first thing the Construct/ConstructWithArrayLike ++ // builtins will do. ++ ++ // 3. We don't need to check explicitly for constructor new.target here, ++ // since that's the second thing the Construct/ConstructWithArrayLike ++ // builtins will do. ++ ++ // 4. Construct the target with the given new.target and argumentsList. ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike), ++ RelocInfo::CODE_TARGET); ++} ++ ++static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { ++ __ SmiTag(a0); ++ __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); ++ __ Push(ra, fp, a4, a1, a0); ++ __ Push(Smi::zero()); // Padding. ++ __ Add_d(fp, sp, ++ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); ++} ++ ++static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : result being passed through ++ // ----------------------------------- ++ // Get the number of arguments passed (as a smi), tear down the frame and ++ // then tear down the parameters. ++ __ Ld_d(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); ++ __ mov(sp, fp); ++ __ Pop(ra, fp); ++ __ SmiScale(a4, a1, kPointerSizeLog2); ++ __ Add_d(sp, sp, a4); ++ // Adjust for the receiver. ++ __ Add_d(sp, sp, Operand(kPointerSize)); ++} ++ ++// static ++void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, ++ Handle code) { ++ // ----------- S t a t e ------------- ++ // -- a1 : target ++ // -- a0 : number of parameters on the stack (not including the receiver) ++ // -- a2 : arguments list (a FixedArray) ++ // -- a4 : len (number of elements to push from args) ++ // -- a3 : new.target (for [[Construct]]) ++ // ----------------------------------- ++ if (masm->emit_debug_code()) { ++ // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0. ++ Label ok, fail; ++ __ AssertNotSmi(a2); ++ __ GetObjectType(a2, t8, t8); ++ __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE)); ++ __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE)); ++ __ Branch(&ok, eq, a4, Operand(zero_reg)); ++ // Fall through. ++ __ bind(&fail); ++ __ Abort(AbortReason::kOperandIsNotAFixedArray); ++ ++ __ bind(&ok); ++ } ++ ++ Register args = a2; ++ Register len = a4; ++ ++ // Check for stack overflow. ++ Label stack_overflow; ++ Generate_StackOverflowCheck(masm, len, kScratchReg, a5, &stack_overflow); ++ ++ // Push arguments onto the stack (thisArgument is already on the stack). ++ { ++ Label done, push, loop; ++ Register src = a6; ++ Register scratch = len; ++ ++ __ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag); ++ __ Add_d(a0, a0, len); // The 'len' argument for Call() or Construct(). ++ __ Branch(&done, eq, len, Operand(zero_reg)); ++ __ slli_d(scratch, len, kPointerSizeLog2); ++ __ Sub_d(scratch, sp, Operand(scratch)); ++ __ LoadRoot(t1, RootIndex::kTheHoleValue); ++ __ bind(&loop); ++ __ Ld_d(a5, MemOperand(src, 0)); ++ __ Branch(&push, ne, a5, Operand(t1)); ++ __ LoadRoot(a5, RootIndex::kUndefinedValue); ++ __ bind(&push); ++ __ addi_d(src, src, kPointerSize); ++ __ Push(a5); ++ __ Branch(&loop, ne, scratch, Operand(sp)); ++ __ bind(&done); ++ } ++ ++ // Tail-call to the actual Call or Construct builtin. ++ __ Jump(code, RelocInfo::CODE_TARGET); ++ ++ __ bind(&stack_overflow); ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++} ++ ++// static ++void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, ++ CallOrConstructMode mode, ++ Handle code) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a3 : the new.target (for [[Construct]] calls) ++ // -- a1 : the target to call (can be any Object) ++ // -- a2 : start index (to support rest parameters) ++ // ----------------------------------- ++ ++ // Check if new.target has a [[Construct]] internal method. ++ if (mode == CallOrConstructMode::kConstruct) { ++ Label new_target_constructor, new_target_not_constructor; ++ __ JumpIfSmi(a3, &new_target_not_constructor); ++ __ Ld_d(t1, FieldMemOperand(a3, HeapObject::kMapOffset)); ++ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); ++ __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask)); ++ __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg)); ++ __ bind(&new_target_not_constructor); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterFrame(StackFrame::INTERNAL); ++ __ Push(a3); ++ __ CallRuntime(Runtime::kThrowNotConstructor); ++ } ++ __ bind(&new_target_constructor); ++ } ++ ++ // Check if we have an arguments adaptor frame below the function frame. ++ Label arguments_adaptor, arguments_done; ++ __ Ld_d(a6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); ++ __ Ld_d(a7, MemOperand(a6, CommonFrameConstants::kContextOrFrameTypeOffset)); ++ __ Branch(&arguments_adaptor, eq, a7, ++ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); ++ { ++ __ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ __ Ld_d(a7, FieldMemOperand(a7, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_hu(a7, FieldMemOperand( ++ a7, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ mov(a6, fp); ++ } ++ __ Branch(&arguments_done); ++ __ bind(&arguments_adaptor); ++ { ++ // Just get the length from the ArgumentsAdaptorFrame. ++ __ SmiUntag(a7, ++ MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset)); ++ } ++ __ bind(&arguments_done); ++ ++ Label stack_done, stack_overflow; ++ __ Sub_w(a7, a7, a2); ++ __ Branch(&stack_done, le, a7, Operand(zero_reg)); ++ { ++ // Check for stack overflow. ++ Generate_StackOverflowCheck(masm, a7, a4, a5, &stack_overflow); ++ ++ // Forward the arguments from the caller frame. ++ { ++ Label loop; ++ __ Add_d(a0, a0, a7); ++ __ bind(&loop); ++ { ++ __ Alsl_d(kScratchReg, a7, a6, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(kScratchReg, 1 * kPointerSize)); ++ __ push(kScratchReg); ++ __ Sub_w(a7, a7, Operand(1)); ++ __ Branch(&loop, ne, a7, Operand(zero_reg)); ++ } ++ } ++ } ++ __ Branch(&stack_done); ++ __ bind(&stack_overflow); ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++ __ bind(&stack_done); ++ ++ // Tail-call to the {code} handler. ++ __ Jump(code, RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_CallFunction(MacroAssembler* masm, ++ ConvertReceiverMode mode) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSFunction) ++ // ----------------------------------- ++ __ AssertFunction(a1); ++ ++ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) ++ // Check that function is not a "classConstructor". ++ Label class_constructor; ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); ++ __ And(kScratchReg, a3, ++ Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); ++ __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg)); ++ ++ // Enter the context of the function; ToObject has to run in the function ++ // context, and we also need to take the global proxy from the function ++ // context in case of conversion. ++ __ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ++ // We need to convert the receiver for non-native sloppy mode functions. ++ Label done_convert; ++ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); ++ __ And(kScratchReg, a3, ++ Operand(SharedFunctionInfo::IsNativeBit::kMask | ++ SharedFunctionInfo::IsStrictBit::kMask)); ++ __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); ++ { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSFunction) ++ // -- a2 : the shared function info. ++ // -- cp : the function context. ++ // ----------------------------------- ++ ++ if (mode == ConvertReceiverMode::kNullOrUndefined) { ++ // Patch receiver to global proxy. ++ __ LoadGlobalProxy(a3); ++ } else { ++ Label convert_to_object, convert_receiver; ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ Ld_d(a3, MemOperand(kScratchReg, 0)); ++ __ JumpIfSmi(a3, &convert_to_object); ++ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); ++ __ GetObjectType(a3, a4, a4); ++ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE)); ++ if (mode != ConvertReceiverMode::kNotNullOrUndefined) { ++ Label convert_global_proxy; ++ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); ++ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object); ++ __ bind(&convert_global_proxy); ++ { ++ // Patch receiver to global proxy. ++ __ LoadGlobalProxy(a3); ++ } ++ __ Branch(&convert_receiver); ++ } ++ __ bind(&convert_to_object); ++ { ++ // Convert receiver using ToObject. ++ // TODO(bmeurer): Inline the allocation here to avoid building the frame ++ // in the fast case? (fall back to AllocateInNewSpace?) ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ SmiTag(a0); ++ __ Push(a0, a1); ++ __ mov(a0, a3); ++ __ Push(cp); ++ __ Call(BUILTIN_CODE(masm->isolate(), ToObject), ++ RelocInfo::CODE_TARGET); ++ __ Pop(cp); ++ __ mov(a3, a0); ++ __ Pop(a0, a1); ++ __ SmiUntag(a0); ++ } ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ bind(&convert_receiver); ++ } ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(a3, MemOperand(kScratchReg, 0)); ++ } ++ __ bind(&done_convert); ++ ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSFunction) ++ // -- a2 : the shared function info. ++ // -- cp : the function context. ++ // ----------------------------------- ++ ++ __ Ld_hu( ++ a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION); ++ ++ // The function is a "classConstructor", need to raise an exception. ++ __ bind(&class_constructor); ++ { ++ FrameScope frame(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kThrowConstructorNonCallableError); ++ } ++} ++ ++// static ++void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // ----------------------------------- ++ __ AssertBoundFunction(a1); ++ ++ // Patch the receiver to [[BoundThis]]. ++ { ++ __ Ld_d(kScratchReg, ++ FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); ++ __ Alsl_d(a4, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a4, 0)); ++ } ++ ++ // Load [[BoundArguments]] into a2 and length of that into a4. ++ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // -- a2 : the [[BoundArguments]] (implemented as FixedArray) ++ // -- a4 : the number of [[BoundArguments]] ++ // ----------------------------------- ++ ++ // Reserve stack space for the [[BoundArguments]]. ++ { ++ Label done; ++ __ slli_d(a5, a4, kPointerSizeLog2); ++ __ Sub_d(sp, sp, Operand(a5)); ++ // Check the stack for overflow. We are not trying to catch interruptions ++ // (i.e. debug break and preemption) here, so check the "real stack limit". ++ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); ++ __ Branch(&done, hs, sp, Operand(kScratchReg)); ++ // Restore the stack pointer. ++ __ Add_d(sp, sp, Operand(a5)); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterFrame(StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ } ++ __ bind(&done); ++ } ++ ++ // Relocate arguments down the stack. ++ { ++ Label loop, done_loop; ++ __ mov(a5, zero_reg); ++ __ bind(&loop); ++ __ Branch(&done_loop, gt, a5, Operand(a0)); ++ __ Alsl_d(a6, a4, sp, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(a6, 0)); ++ __ Alsl_d(a6, a5, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a6, 0)); ++ __ Add_d(a4, a4, Operand(1)); ++ __ Add_d(a5, a5, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Copy [[BoundArguments]] to the stack (below the arguments). ++ { ++ Label loop, done_loop; ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); ++ __ bind(&loop); ++ __ Sub_d(a4, a4, Operand(1)); ++ __ Branch(&done_loop, lt, a4, Operand(zero_reg)); ++ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(a5, 0)); ++ __ Alsl_d(a5, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a5, 0)); ++ __ Add_d(a0, a0, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Call the [[BoundTargetFunction]] via the Call builtin. ++ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), ++ RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the target to call (can be any Object). ++ // ----------------------------------- ++ ++ Label non_callable, non_smi; ++ __ JumpIfSmi(a1, &non_callable); ++ __ bind(&non_smi); ++ __ GetObjectType(a1, t1, t2); ++ __ Jump(masm->isolate()->builtins()->CallFunction(mode), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); ++ ++ // Check if target has a [[Call]] internal method. ++ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); ++ __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask)); ++ __ Branch(&non_callable, eq, t1, Operand(zero_reg)); ++ ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, ++ t2, Operand(JS_PROXY_TYPE)); ++ ++ // 2. Call to something else, which might have a [[Call]] internal method (if ++ // not we raise an exception). ++ // Overwrite the original receiver with the (original) target. ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(a1, MemOperand(kScratchReg, 0)); ++ // Let the "call_as_function_delegate" take care of the rest. ++ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1); ++ __ Jump(masm->isolate()->builtins()->CallFunction( ++ ConvertReceiverMode::kNotNullOrUndefined), ++ RelocInfo::CODE_TARGET); ++ ++ // 3. Call to something that is not callable. ++ __ bind(&non_callable); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kThrowCalledNonCallable); ++ } ++} ++ ++void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the constructor to call (checked to be a JSFunction) ++ // -- a3 : the new target (checked to be a constructor) ++ // ----------------------------------- ++ __ AssertConstructor(a1); ++ __ AssertFunction(a1); ++ ++ // Calling convention for function specific ConstructStubs require ++ // a2 to contain either an AllocationSite or undefined. ++ __ LoadRoot(a2, RootIndex::kUndefinedValue); ++ ++ Label call_generic_stub; ++ ++ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. ++ __ Ld_d(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset)); ++ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); ++ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg)); ++ ++ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), ++ RelocInfo::CODE_TARGET); ++ ++ __ bind(&call_generic_stub); ++ __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric), ++ RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // -- a3 : the new target (checked to be a constructor) ++ // ----------------------------------- ++ __ AssertConstructor(a1); ++ __ AssertBoundFunction(a1); ++ ++ // Load [[BoundArguments]] into a2 and length of that into a4. ++ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // -- a2 : the [[BoundArguments]] (implemented as FixedArray) ++ // -- a3 : the new target (checked to be a constructor) ++ // -- a4 : the number of [[BoundArguments]] ++ // ----------------------------------- ++ ++ // Reserve stack space for the [[BoundArguments]]. ++ { ++ Label done; ++ __ slli_d(a5, a4, kPointerSizeLog2); ++ __ Sub_d(sp, sp, Operand(a5)); ++ // Check the stack for overflow. We are not trying to catch interruptions ++ // (i.e. debug break and preemption) here, so check the "real stack limit". ++ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); ++ __ Branch(&done, hs, sp, Operand(kScratchReg)); ++ // Restore the stack pointer. ++ __ Add_d(sp, sp, Operand(a5)); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterFrame(StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ } ++ __ bind(&done); ++ } ++ ++ // Relocate arguments down the stack. ++ { ++ Label loop, done_loop; ++ __ mov(a5, zero_reg); ++ __ bind(&loop); ++ __ Branch(&done_loop, ge, a5, Operand(a0)); ++ __ Alsl_d(a6, a4, sp, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(a6, 0)); ++ __ Alsl_d(a6, a5, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a6, 0)); ++ __ Add_d(a4, a4, Operand(1)); ++ __ Add_d(a5, a5, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Copy [[BoundArguments]] to the stack (below the arguments). ++ { ++ Label loop, done_loop; ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); ++ __ bind(&loop); ++ __ Sub_d(a4, a4, Operand(1)); ++ __ Branch(&done_loop, lt, a4, Operand(zero_reg)); ++ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(a5, 0)); ++ __ Alsl_d(a5, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a5, 0)); ++ __ Add_d(a0, a0, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Patch new.target to [[BoundTargetFunction]] if new.target equals target. ++ { ++ Label skip_load; ++ __ Branch(&skip_load, ne, a1, Operand(a3)); ++ __ Ld_d(a3, ++ FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); ++ __ bind(&skip_load); ++ } ++ ++ // Construct the [[BoundTargetFunction]] via the Construct builtin. ++ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_Construct(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the constructor to call (can be any Object) ++ // -- a3 : the new target (either the same as the constructor or ++ // the JSFunction on which new was invoked initially) ++ // ----------------------------------- ++ ++ // Check if target is a Smi. ++ Label non_constructor, non_proxy; ++ __ JumpIfSmi(a1, &non_constructor); ++ ++ // Check if target has a [[Construct]] internal method. ++ __ Ld_d(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); ++ __ Ld_bu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); ++ __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask)); ++ __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); ++ ++ // Dispatch based on instance type. ++ __ Ld_hu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); ++ ++ // Only dispatch to bound functions after checking whether they are ++ // constructors. ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); ++ ++ // Only dispatch to proxies after checking whether they are constructors. ++ __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), ++ RelocInfo::CODE_TARGET); ++ ++ // Called Construct on an exotic Object with a [[Construct]] internal method. ++ __ bind(&non_proxy); ++ { ++ // Overwrite the original receiver with the (original) target. ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(a1, MemOperand(kScratchReg, 0)); ++ // Let the "call_as_constructor_delegate" take care of the rest. ++ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1); ++ __ Jump(masm->isolate()->builtins()->CallFunction(), ++ RelocInfo::CODE_TARGET); ++ } ++ ++ // Called Construct on an Object that doesn't have a [[Construct]] internal ++ // method. ++ __ bind(&non_constructor); ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable), ++ RelocInfo::CODE_TARGET); ++} ++ ++void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ++ // State setup as expected by MacroAssembler::InvokePrologue. ++ // ----------- S t a t e ------------- ++ // -- a0: actual arguments count ++ // -- a1: function (passed through to callee) ++ // -- a2: expected arguments count ++ // -- a3: new target (passed through to callee) ++ // ----------------------------------- ++ ++ Label invoke, dont_adapt_arguments, stack_overflow; ++ ++ Label enough, too_few; ++ __ Branch(&dont_adapt_arguments, eq, a2, ++ Operand(kDontAdaptArgumentsSentinel)); ++ // We use Uless as the number of argument should always be greater than 0. ++ __ Branch(&too_few, Uless, a0, Operand(a2)); ++ ++ { // Enough parameters: actual >= expected. ++ // a0: actual number of arguments as a smi ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ __ bind(&enough); ++ EnterArgumentsAdaptorFrame(masm); ++ Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow); ++ ++ // Calculate copy start address into a0 and copy end address into a4. ++ __ SmiScale(a0, a0, kPointerSizeLog2); ++ __ Add_d(a0, fp, a0); ++ // Adjust for return address and receiver. ++ __ Add_d(a0, a0, Operand(2 * kPointerSize)); ++ // Compute copy end address. ++ __ slli_d(a4, a2, kPointerSizeLog2); ++ __ sub_d(a4, a0, a4); ++ ++ // Copy the arguments (including the receiver) to the new stack frame. ++ // a0: copy start address ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ // a4: copy end address ++ ++ Label copy; ++ __ bind(©); ++ __ Ld_d(a5, MemOperand(a0, 0)); ++ __ push(a5); ++ __ addi_d(a0, a0, -kPointerSize); ++ __ Branch(©, ge, a0, Operand(a4)); ++ ++ __ jmp(&invoke); ++ } ++ ++ { // Too few parameters: Actual < expected. ++ __ bind(&too_few); ++ EnterArgumentsAdaptorFrame(masm); ++ Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow); ++ ++ // Calculate copy start address into a0 and copy end address into a7. ++ // a0: actual number of arguments as a smi ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ __ SmiScale(a0, a0, kPointerSizeLog2); ++ __ Add_d(a0, fp, a0); ++ // Adjust for return address and receiver. ++ __ Add_d(a0, a0, Operand(2 * kPointerSize)); ++ // Compute copy end address. Also adjust for return address. ++ __ Add_d(a7, fp, kPointerSize); ++ ++ // Copy the arguments (including the receiver) to the new stack frame. ++ // a0: copy start address ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ // a7: copy end address ++ Label copy; ++ __ bind(©); ++ __ Ld_d(a4, ++ MemOperand(a0, 0)); // Adjusted above for return addr and receiver. ++ __ Sub_d(sp, sp, kPointerSize); ++ __ Sub_d(a0, a0, kPointerSize); ++ __ St_d(a4, MemOperand(sp, 0)); ++ __ Branch(©, ne, a0, Operand(a7)); ++ ++ // Fill the remaining expected arguments with undefined. ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ __ LoadRoot(a5, RootIndex::kUndefinedValue); ++ __ slli_d(a6, a2, kPointerSizeLog2); ++ __ Sub_d(a4, fp, Operand(a6)); ++ // Adjust for frame. ++ __ Sub_d(a4, a4, ++ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + ++ kPointerSize)); ++ ++ Label fill; ++ __ bind(&fill); ++ __ Sub_d(sp, sp, kPointerSize); ++ __ St_d(a5, MemOperand(sp, 0)); ++ __ Branch(&fill, ne, sp, Operand(a4)); ++ } ++ ++ // Call the entry point. ++ __ bind(&invoke); ++ __ mov(a0, a2); ++ // a0 : expected number of arguments ++ // a1 : function (passed through to callee) ++ // a3: new target (passed through to callee) ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); ++ __ Add_d(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Call(a2); ++ ++ // Store offset of return address for deoptimizer. ++ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); ++ ++ // Exit frame and return. ++ LeaveArgumentsAdaptorFrame(masm); ++ __ Ret(); ++ ++ // ------------------------------------------- ++ // Don't adapt arguments. ++ // ------------------------------------------- ++ __ bind(&dont_adapt_arguments); ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); ++ __ Add_d(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Jump(a2); ++ ++ __ bind(&stack_overflow); ++ { ++ FrameScope frame(masm, StackFrame::MANUAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ __ break_(0xCC); ++ } ++} ++ ++void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ++ // The function index was put in t0 by the jump table trampoline. ++ // Convert to Smi for the runtime call ++ __ SmiTag(kWasmCompileLazyFuncIndexRegister); ++ { ++ HardAbortScope hard_abort(masm); // Avoid calls to Abort. ++ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); ++ ++ // Save all parameter registers (see wasm-linkage.cc). They might be ++ // overwritten in the runtime call below. We don't have any callee-saved ++ // registers in wasm, so no need to store anything else. ++ constexpr RegList gp_regs = Register::ListOf(a0, a2, a3, a4, a5, a6, a7); ++ constexpr RegList fp_regs = ++ DoubleRegister::ListOf(f2, f4, f6, f8, f10, f12, f14); ++ __ MultiPush(gp_regs); ++ __ MultiPushFPU(fp_regs); ++ ++ // Pass instance and function index as an explicit arguments to the runtime ++ // function. ++ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); ++ // Initialize the JavaScript context with 0. CEntry will use it to ++ // set the current context on the isolate. ++ __ Move(kContextRegister, Smi::zero()); ++ __ CallRuntime(Runtime::kWasmCompileLazy, 2); ++ __ mov(t8, a0); ++ ++ // Restore registers. ++ __ MultiPopFPU(fp_regs); ++ __ MultiPop(gp_regs); ++ } ++ // Finally, jump to the entrypoint. ++ __ Jump(t8); ++} ++ ++void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { ++ HardAbortScope hard_abort(masm); // Avoid calls to Abort. ++ { ++ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK); ++ ++ // Save all parameter registers. They might hold live values, we restore ++ // them after the runtime call. ++ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs); ++ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); ++ ++ // Initialize the JavaScript context with 0. CEntry will use it to ++ // set the current context on the isolate. ++ __ Move(cp, Smi::zero()); ++ __ CallRuntime(Runtime::kWasmDebugBreak, 0); ++ ++ // Restore registers. ++ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); ++ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs); ++ } ++ __ Ret(); ++} ++ ++void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ++ SaveFPRegsMode save_doubles, ArgvMode argv_mode, ++ bool builtin_exit_frame) { ++ // Called from JavaScript; parameters are on stack as if calling JS function ++ // a0: number of arguments including receiver ++ // a1: pointer to builtin function ++ // fp: frame pointer (restored after C call) ++ // sp: stack pointer (restored as callee's sp after C call) ++ // cp: current context (C callee-saved) ++ // ++ // If argv_mode == kArgvInRegister: ++ // a2: pointer to the first argument ++ ++ if (argv_mode == kArgvInRegister) { ++ // Move argv into the correct register. ++ __ mov(s1, a2); ++ } else { ++ // Compute the argv pointer in a callee-saved register. ++ __ Alsl_d(s1, a0, sp, kPointerSizeLog2, t7); ++ __ Sub_d(s1, s1, kPointerSize); ++ } ++ ++ // Enter the exit frame that transitions from JavaScript to C++. ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterExitFrame( ++ save_doubles == kSaveFPRegs, 0, ++ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); ++ ++ // s0: number of arguments including receiver (C callee-saved) ++ // s1: pointer to first argument (C callee-saved) ++ // s2: pointer to builtin function (C callee-saved) ++ ++ // Prepare arguments for C routine. ++ // a0 = argc ++ __ mov(s0, a0); ++ __ mov(s2, a1); ++ ++ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We ++ // also need to reserve the 4 argument slots on the stack. ++ ++ __ AssertStackIsAligned(); ++ ++ // a0 = argc, a1 = argv, a2 = isolate ++ __ li(a2, ExternalReference::isolate_address(masm->isolate())); ++ __ mov(a1, s1); ++ ++ __ StoreReturnAddressAndCall(s2); ++ ++ // Result returned in a0 or a1:a0 - do not destroy these registers! ++ ++ // Check result for exception sentinel. ++ Label exception_returned; ++ __ LoadRoot(a4, RootIndex::kException); ++ __ Branch(&exception_returned, eq, a4, Operand(a0)); ++ ++ // Check that there is no pending exception, otherwise we ++ // should have returned the exception sentinel. ++ if (FLAG_debug_code) { ++ Label okay; ++ ExternalReference pending_exception_address = ExternalReference::Create( ++ IsolateAddressId::kPendingExceptionAddress, masm->isolate()); ++ __ li(a2, pending_exception_address); ++ __ Ld_d(a2, MemOperand(a2, 0)); ++ __ LoadRoot(a4, RootIndex::kTheHoleValue); ++ // Cannot use check here as it attempts to generate call into runtime. ++ __ Branch(&okay, eq, a4, Operand(a2)); ++ __ stop(); ++ __ bind(&okay); ++ } ++ ++ // Exit C frame and return. ++ // a0:a1: result ++ // sp: stack pointer ++ // fp: frame pointer ++ Register argc = argv_mode == kArgvInRegister ++ // We don't want to pop arguments so set argc to no_reg. ++ ? no_reg ++ // s0: still holds argc (callee-saved). ++ : s0; ++ __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN); ++ ++ // Handling of exception. ++ __ bind(&exception_returned); ++ ++ ExternalReference pending_handler_context_address = ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerContextAddress, masm->isolate()); ++ ExternalReference pending_handler_entrypoint_address = ++ ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate()); ++ ExternalReference pending_handler_fp_address = ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerFPAddress, masm->isolate()); ++ ExternalReference pending_handler_sp_address = ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerSPAddress, masm->isolate()); ++ ++ // Ask the runtime for help to determine the handler. This will set a0 to ++ // contain the current pending exception, don't clobber it. ++ ExternalReference find_handler = ++ ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ PrepareCallCFunction(3, 0, a0); ++ __ mov(a0, zero_reg); ++ __ mov(a1, zero_reg); ++ __ li(a2, ExternalReference::isolate_address(masm->isolate())); ++ __ CallCFunction(find_handler, 3); ++ } ++ ++ // Retrieve the handler context, SP and FP. ++ __ li(cp, pending_handler_context_address); ++ __ Ld_d(cp, MemOperand(cp, 0)); ++ __ li(sp, pending_handler_sp_address); ++ __ Ld_d(sp, MemOperand(sp, 0)); ++ __ li(fp, pending_handler_fp_address); ++ __ Ld_d(fp, MemOperand(fp, 0)); ++ ++ // If the handler is a JS frame, restore the context to the frame. Note that ++ // the context will be set to (cp == 0) for non-JS frames. ++ Label zero; ++ __ Branch(&zero, eq, cp, Operand(zero_reg)); ++ __ St_d(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); ++ __ bind(&zero); ++ ++ // Reset the masking register. This is done independent of the underlying ++ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work ++ // with both configurations. It is safe to always do this, because the ++ // underlying register is caller-saved and can be arbitrarily clobbered. ++ __ ResetSpeculationPoisonRegister(); ++ ++ // Compute the handler entry address and jump to it. ++ __ li(t7, pending_handler_entrypoint_address); ++ __ Ld_d(t7, MemOperand(t7, 0)); ++ __ Jump(t7); ++} ++ ++void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ++ Label done; ++ Register result_reg = t0; ++ ++ Register scratch = GetRegisterThatIsNotOneOf(result_reg); ++ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch); ++ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2); ++ DoubleRegister double_scratch = kScratchDoubleReg; ++ ++ // Account for saved regs. ++ const int kArgumentOffset = 4 * kPointerSize; ++ ++ __ Push(result_reg); ++ __ Push(scratch, scratch2, scratch3); ++ ++ // Load double input. ++ __ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset)); ++ ++ // Clear cumulative exception flags and save the FCSR. ++ // __ movfcsr2gr(scratch2, FCSR); ++ // __ movgr2fcsr(FCSR, zero_reg); ++ ++ // Try a conversion to a signed integer. ++ __ ftintrz_w_d(double_scratch, double_scratch); ++ // Move the converted value into the result register. ++ __ movfr2gr_s(scratch3, double_scratch); ++ ++ // Retrieve and restore the FCSR. ++ __ movfcsr2gr(scratch); // __ cfc1(scratch, FCSR); ++ // __ ctc1(scratch2, FCSR); ++ ++ // Check for overflow and NaNs. ++ __ And( ++ scratch, scratch, ++ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask); ++ // If we had no exceptions then set result_reg and we are done. ++ Label error; ++ __ Branch(&error, ne, scratch, Operand(zero_reg)); ++ __ Move(result_reg, scratch3); ++ __ Branch(&done); ++ __ bind(&error); ++ ++ // Load the double value and perform a manual truncation. ++ Register input_high = scratch2; ++ Register input_low = scratch3; ++ ++ __ Ld_w(input_low, ++ MemOperand(sp, kArgumentOffset + Register::kMantissaOffset)); ++ __ Ld_w(input_high, ++ MemOperand(sp, kArgumentOffset + Register::kExponentOffset)); ++ ++ Label normal_exponent; ++ // Extract the biased exponent in result. ++ __ bstrpick_w(result_reg, input_high, ++ HeapNumber::kExponentShift + HeapNumber::kExponentBits - 1, ++ HeapNumber::kExponentShift); ++ ++ // Check for Infinity and NaNs, which should return 0. ++ __ Sub_w(scratch, result_reg, HeapNumber::kExponentMask); ++ __ Movz(result_reg, zero_reg, scratch); ++ __ Branch(&done, eq, scratch, Operand(zero_reg)); ++ ++ // Express exponent as delta to (number of mantissa bits + 31). ++ __ Sub_w(result_reg, result_reg, ++ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); ++ ++ // If the delta is strictly positive, all bits would be shifted away, ++ // which means that we can return 0. ++ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg)); ++ __ mov(result_reg, zero_reg); ++ __ Branch(&done); ++ ++ __ bind(&normal_exponent); ++ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; ++ // Calculate shift. ++ __ Add_w(scratch, result_reg, ++ Operand(kShiftBase + HeapNumber::kMantissaBits)); ++ ++ // Save the sign. ++ Register sign = result_reg; ++ result_reg = no_reg; ++ __ And(sign, input_high, Operand(HeapNumber::kSignMask)); ++ ++ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need ++ // to check for this specific case. ++ Label high_shift_needed, high_shift_done; ++ __ Branch(&high_shift_needed, lt, scratch, Operand(32)); ++ __ mov(input_high, zero_reg); ++ __ Branch(&high_shift_done); ++ __ bind(&high_shift_needed); ++ ++ // Set the implicit 1 before the mantissa part in input_high. ++ __ Or(input_high, input_high, ++ Operand(1 << HeapNumber::kMantissaBitsInTopWord)); ++ // Shift the mantissa bits to the correct position. ++ // We don't need to clear non-mantissa bits as they will be shifted away. ++ // If they weren't, it would mean that the answer is in the 32bit range. ++ __ sll_w(input_high, input_high, scratch); ++ ++ __ bind(&high_shift_done); ++ ++ // Replace the shifted bits with bits from the lower mantissa word. ++ Label pos_shift, shift_done; ++ __ li(kScratchReg, 32); ++ __ sub_w(scratch, kScratchReg, scratch); ++ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); ++ ++ // Negate scratch. ++ __ Sub_w(scratch, zero_reg, scratch); ++ __ sll_w(input_low, input_low, scratch); ++ __ Branch(&shift_done); ++ ++ __ bind(&pos_shift); ++ __ srl_w(input_low, input_low, scratch); ++ ++ __ bind(&shift_done); ++ __ Or(input_high, input_high, Operand(input_low)); ++ // Restore sign if necessary. ++ __ mov(scratch, sign); ++ result_reg = sign; ++ sign = no_reg; ++ __ Sub_w(result_reg, zero_reg, input_high); ++ __ Movz(result_reg, input_high, scratch); ++ ++ __ bind(&done); ++ ++ __ St_d(result_reg, MemOperand(sp, kArgumentOffset)); ++ __ Pop(scratch, scratch2, scratch3); ++ __ Pop(result_reg); ++ __ Ret(); ++} ++ ++namespace { ++ ++int AddressOffset(ExternalReference ref0, ExternalReference ref1) { ++ int64_t offset = (ref0.address() - ref1.address()); ++ DCHECK(static_cast(offset) == offset); ++ return static_cast(offset); ++} ++ ++// Calls an API function. Allocates HandleScope, extracts returned value ++// from handle and propagates exceptions. Restores context. stack_space ++// - space to be unwound on exit (includes the call JS arguments space and ++// the additional space allocated for the fast call). ++void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, ++ ExternalReference thunk_ref, int stack_space, ++ MemOperand* stack_space_operand, ++ MemOperand return_value_operand) { ++ Isolate* isolate = masm->isolate(); ++ ExternalReference next_address = ++ ExternalReference::handle_scope_next_address(isolate); ++ const int kNextOffset = 0; ++ const int kLimitOffset = AddressOffset( ++ ExternalReference::handle_scope_limit_address(isolate), next_address); ++ const int kLevelOffset = AddressOffset( ++ ExternalReference::handle_scope_level_address(isolate), next_address); ++ ++ DCHECK(function_address == a1 || function_address == a2); ++ ++ Label profiler_enabled, end_profiler_check; ++ __ li(t7, ExternalReference::is_profiling_address(isolate)); ++ __ Ld_b(t7, MemOperand(t7, 0)); ++ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg)); ++ __ li(t7, ExternalReference::address_of_runtime_stats_flag()); ++ __ Ld_w(t7, MemOperand(t7, 0)); ++ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg)); ++ { ++ // Call the api function directly. ++ __ mov(t7, function_address); ++ __ Branch(&end_profiler_check); ++ } ++ ++ __ bind(&profiler_enabled); ++ { ++ // Additional parameter is the address of the actual callback. ++ __ li(t7, thunk_ref); ++ } ++ __ bind(&end_profiler_check); ++ ++ // Allocate HandleScope in callee-save registers. ++ __ li(s5, next_address); ++ __ Ld_d(s0, MemOperand(s5, kNextOffset)); ++ __ Ld_d(s1, MemOperand(s5, kLimitOffset)); ++ __ Ld_w(s2, MemOperand(s5, kLevelOffset)); ++ __ Add_w(s2, s2, Operand(1)); ++ __ St_w(s2, MemOperand(s5, kLevelOffset)); ++ ++ __ StoreReturnAddressAndCall(t7); ++ ++ Label promote_scheduled_exception; ++ Label delete_allocated_handles; ++ Label leave_exit_frame; ++ Label return_value_loaded; ++ ++ // Load value from ReturnValue. ++ __ Ld_d(a0, return_value_operand); ++ __ bind(&return_value_loaded); ++ ++ // No more valid handles (the result handle was the last one). Restore ++ // previous handle scope. ++ __ St_d(s0, MemOperand(s5, kNextOffset)); ++ if (__ emit_debug_code()) { ++ __ Ld_w(a1, MemOperand(s5, kLevelOffset)); ++ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, ++ Operand(s2)); ++ } ++ __ Sub_w(s2, s2, Operand(1)); ++ __ St_w(s2, MemOperand(s5, kLevelOffset)); ++ __ Ld_d(kScratchReg, MemOperand(s5, kLimitOffset)); ++ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg)); ++ ++ // Leave the API exit frame. ++ __ bind(&leave_exit_frame); ++ ++ if (stack_space_operand == nullptr) { ++ DCHECK_NE(stack_space, 0); ++ __ li(s0, Operand(stack_space)); ++ } else { ++ DCHECK_EQ(stack_space, 0); ++ STATIC_ASSERT(kCArgSlotCount == 0); ++ __ Ld_d(s0, *stack_space_operand); ++ } ++ ++ static constexpr bool kDontSaveDoubles = false; ++ static constexpr bool kRegisterContainsSlotCount = false; ++ __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN, ++ kRegisterContainsSlotCount); ++ ++ // Check if the function scheduled an exception. ++ __ LoadRoot(a4, RootIndex::kTheHoleValue); ++ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate)); ++ __ Ld_d(a5, MemOperand(kScratchReg, 0)); ++ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); ++ ++ __ Ret(); ++ ++ // Re-throw by promoting a scheduled exception. ++ __ bind(&promote_scheduled_exception); ++ __ TailCallRuntime(Runtime::kPromoteScheduledException); ++ ++ // HandleScope limit has changed. Delete allocated extensions. ++ __ bind(&delete_allocated_handles); ++ __ St_d(s1, MemOperand(s5, kLimitOffset)); ++ __ mov(s0, a0); ++ __ PrepareCallCFunction(1, s1); ++ __ li(a0, ExternalReference::isolate_address(isolate)); ++ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1); ++ __ mov(a0, s0); ++ __ jmp(&leave_exit_frame); ++} ++ ++} // namespace ++ ++void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- cp : context ++ // -- a1 : api function address ++ // -- a2 : arguments count (not including the receiver) ++ // -- a3 : call data ++ // -- a0 : holder ++ // -- ++ // -- sp[0] : last argument ++ // -- ... ++ // -- sp[(argc - 1) * 8] : first argument ++ // -- sp[(argc + 0) * 8] : receiver ++ // ----------------------------------- ++ ++ Register api_function_address = a1; ++ Register argc = a2; ++ Register call_data = a3; ++ Register holder = a0; ++ Register scratch = t0; ++ Register base = t1; // For addressing MemOperands on the stack. ++ ++ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch, ++ base)); ++ ++ using FCA = FunctionCallbackArguments; ++ ++ STATIC_ASSERT(FCA::kArgsLength == 6); ++ STATIC_ASSERT(FCA::kNewTargetIndex == 5); ++ STATIC_ASSERT(FCA::kDataIndex == 4); ++ STATIC_ASSERT(FCA::kReturnValueOffset == 3); ++ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); ++ STATIC_ASSERT(FCA::kIsolateIndex == 1); ++ STATIC_ASSERT(FCA::kHolderIndex == 0); ++ ++ // Set up FunctionCallbackInfo's implicit_args on the stack as follows: ++ // ++ // Target state: ++ // sp[0 * kPointerSize]: kHolder ++ // sp[1 * kPointerSize]: kIsolate ++ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue) ++ // sp[3 * kPointerSize]: undefined (kReturnValue) ++ // sp[4 * kPointerSize]: kData ++ // sp[5 * kPointerSize]: undefined (kNewTarget) ++ ++ // Set up the base register for addressing through MemOperands. It will point ++ // at the receiver (located at sp + argc * kPointerSize). ++ __ Alsl_d(base, argc, sp, kPointerSizeLog2, t7); ++ ++ // Reserve space on the stack. ++ __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kPointerSize)); ++ ++ // kHolder. ++ __ St_d(holder, MemOperand(sp, 0 * kPointerSize)); ++ ++ // kIsolate. ++ __ li(scratch, ExternalReference::isolate_address(masm->isolate())); ++ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize)); ++ ++ // kReturnValueDefaultValue and kReturnValue. ++ __ LoadRoot(scratch, RootIndex::kUndefinedValue); ++ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize)); ++ __ St_d(scratch, MemOperand(sp, 3 * kPointerSize)); ++ ++ // kData. ++ __ St_d(call_data, MemOperand(sp, 4 * kPointerSize)); ++ ++ // kNewTarget. ++ __ St_d(scratch, MemOperand(sp, 5 * kPointerSize)); ++ ++ // Keep a pointer to kHolder (= implicit_args) in a scratch register. ++ // We use it below to set up the FunctionCallbackInfo object. ++ __ mov(scratch, sp); ++ ++ // Allocate the v8::Arguments structure in the arguments' space since ++ // it's not controlled by GC. ++ static constexpr int kApiStackSpace = 4; ++ static constexpr bool kDontSaveDoubles = false; ++ FrameScope frame_scope(masm, StackFrame::MANUAL); ++ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); ++ ++ // EnterExitFrame may align the sp. ++ ++ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). ++ // Arguments are after the return address (pushed by EnterExitFrame()). ++ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize)); ++ ++ // FunctionCallbackInfo::values_ (points at the first varargs argument passed ++ // on the stack). ++ __ Sub_d(scratch, base, Operand(1 * kPointerSize)); ++ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize)); ++ ++ // FunctionCallbackInfo::length_. ++ // Stored as int field, 32-bit integers within struct on stack always left ++ // justified by n64 ABI. ++ __ St_w(argc, MemOperand(sp, 3 * kPointerSize)); ++ ++ // We also store the number of bytes to drop from the stack after returning ++ // from the API function here. ++ // Note: Unlike on other architectures, this stores the number of slots to ++ // drop, not the number of bytes. ++ __ Add_d(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */)); ++ __ St_d(scratch, MemOperand(sp, 4 * kPointerSize)); ++ ++ // v8::InvocationCallback's argument. ++ DCHECK(!AreAliased(api_function_address, scratch, a0)); ++ __ Add_d(a0, sp, Operand(1 * kPointerSize)); ++ ++ ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); ++ ++ // There are two stack slots above the arguments we constructed on the stack. ++ // TODO(jgruber): Document what these arguments are. ++ static constexpr int kStackSlotsAboveFCA = 2; ++ MemOperand return_value_operand( ++ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize); ++ ++ static constexpr int kUseStackSpaceOperand = 0; ++ MemOperand stack_space_operand(sp, 4 * kPointerSize); ++ ++ AllowExternalCallThatCantCauseGC scope(masm); ++ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, ++ kUseStackSpaceOperand, &stack_space_operand, ++ return_value_operand); ++} ++ ++void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { ++ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property ++ // name below the exit frame to make GC aware of them. ++ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0); ++ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1); ++ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2); ++ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3); ++ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4); ++ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5); ++ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6); ++ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7); ++ ++ Register receiver = ApiGetterDescriptor::ReceiverRegister(); ++ Register holder = ApiGetterDescriptor::HolderRegister(); ++ Register callback = ApiGetterDescriptor::CallbackRegister(); ++ Register scratch = a4; ++ DCHECK(!AreAliased(receiver, holder, callback, scratch)); ++ ++ Register api_function_address = a2; ++ ++ // Here and below +1 is for name() pushed after the args_ array. ++ using PCA = PropertyCallbackArguments; ++ __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kPointerSize); ++ __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize)); ++ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset)); ++ __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize)); ++ __ LoadRoot(scratch, RootIndex::kUndefinedValue); ++ __ St_d(scratch, ++ MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize)); ++ __ St_d(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) * ++ kPointerSize)); ++ __ li(scratch, ExternalReference::isolate_address(masm->isolate())); ++ __ St_d(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize)); ++ __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize)); ++ // should_throw_on_error -> false ++ DCHECK_EQ(0, Smi::zero().ptr()); ++ __ St_d(zero_reg, ++ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize)); ++ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); ++ __ St_d(scratch, MemOperand(sp, 0 * kPointerSize)); ++ ++ // v8::PropertyCallbackInfo::args_ array and name handle. ++ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; ++ ++ // Load address of v8::PropertyAccessorInfo::args_ array and name handle. ++ __ mov(a0, sp); // a0 = Handle ++ __ Add_d(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_ ++ ++ const int kApiStackSpace = 1; ++ FrameScope frame_scope(masm, StackFrame::MANUAL); ++ __ EnterExitFrame(false, kApiStackSpace); ++ ++ // Create v8::PropertyCallbackInfo object on the stack and initialize ++ // it's args_ field. ++ __ St_d(a1, MemOperand(sp, 1 * kPointerSize)); ++ __ Add_d(a1, sp, Operand(1 * kPointerSize)); ++ // a1 = v8::PropertyCallbackInfo& ++ ++ ExternalReference thunk_ref = ++ ExternalReference::invoke_accessor_getter_callback(); ++ ++ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); ++ __ Ld_d(api_function_address, ++ FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); ++ ++ // +3 is to skip prolog, return address and name handle. ++ MemOperand return_value_operand( ++ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); ++ MemOperand* const kUseStackSpaceConstant = nullptr; ++ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, ++ kStackUnwindSpace, kUseStackSpaceConstant, ++ return_value_operand); ++} ++ ++void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { ++ // The sole purpose of DirectCEntry is for movable callers (e.g. any general ++ // purpose Code object) to be able to call into C functions that may trigger ++ // GC and thus move the caller. ++ // ++ // DirectCEntry places the return address on the stack (updated by the GC), ++ // making the call GC safe. The irregexp backend relies on this. ++ ++ // Make place for arguments to fit C calling convention. Callers use ++ // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't ++ // have to do that here. Any caller must drop kCArgsSlotsSize stack space ++ // after the call. ++ __ addi_d(sp, sp, -kCArgsSlotsSize); ++ ++ __ St_d(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address. ++ __ Call(t7); // Call the C++ function. ++ __ Ld_d(t7, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code. ++ ++ if (FLAG_debug_code && FLAG_enable_slow_asserts) { ++ // In case of an error the return address may point to a memory area ++ // filled with kZapValue by the GC. Dereference the address and check for ++ // this. ++ __ Ld_d(a4, MemOperand(t7, 0)); ++ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4, ++ Operand(reinterpret_cast(kZapValue))); ++ } ++ ++ __ Jump(t7); ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/assembler-arch.h b/src/3rdparty/chromium/v8/src/codegen/assembler-arch.h +index d56b372504..6d5ad8bbf1 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/assembler-arch.h ++++ b/src/3rdparty/chromium/v8/src/codegen/assembler-arch.h +@@ -21,6 +21,8 @@ + #include "src/codegen/mips/assembler-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/assembler-mips64.h" ++#elif V8_TARGET_ARCH_LA64 ++#include "src/codegen/la64/assembler-la64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/assembler-s390.h" + #else +diff --git a/src/3rdparty/chromium/v8/src/codegen/assembler-inl.h b/src/3rdparty/chromium/v8/src/codegen/assembler-inl.h +index 8c81315d50..304eed44f0 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/assembler-inl.h ++++ b/src/3rdparty/chromium/v8/src/codegen/assembler-inl.h +@@ -21,6 +21,8 @@ + #include "src/codegen/mips/assembler-mips-inl.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/assembler-mips64-inl.h" ++#elif V8_TARGET_ARCH_LA64 ++#include "src/codegen/la64/assembler-la64-inl.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/assembler-s390-inl.h" + #else +diff --git a/src/3rdparty/chromium/v8/src/codegen/constants-arch.h b/src/3rdparty/chromium/v8/src/codegen/constants-arch.h +index 7a222c960f..701c3c08a9 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/constants-arch.h ++++ b/src/3rdparty/chromium/v8/src/codegen/constants-arch.h +@@ -15,6 +15,8 @@ + #include "src/codegen/mips/constants-mips.h" // NOLINT + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/constants-mips64.h" // NOLINT ++#elif V8_TARGET_ARCH_LA64 ++#include "src/codegen/la64/constants-la64.h" // NOLINT + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 + #include "src/codegen/ppc/constants-ppc.h" // NOLINT + #elif V8_TARGET_ARCH_S390 +diff --git a/src/3rdparty/chromium/v8/src/codegen/cpu-features.h b/src/3rdparty/chromium/v8/src/codegen/cpu-features.h +index 14c94ebae9..d0bb89367e 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/cpu-features.h ++++ b/src/3rdparty/chromium/v8/src/codegen/cpu-features.h +@@ -47,6 +47,9 @@ enum CpuFeature { + MIPSr6, + MIPS_SIMD, // MSA instructions + ++#elif V8_TARGET_ARCH_LA64 ++ FPU, // TODO ++ + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 + FPU, + FPR_GPR_MOV, +diff --git a/src/3rdparty/chromium/v8/src/codegen/external-reference.cc b/src/3rdparty/chromium/v8/src/codegen/external-reference.cc +index 7a42e40461..3bf4edef3b 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/external-reference.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/external-reference.cc +@@ -472,6 +472,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() { + #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState + #elif V8_TARGET_ARCH_MIPS64 + #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState ++#elif V8_TARGET_ARCH_LA64 ++#define re_stack_check_func RegExpMacroAssemblerLA64::CheckStackGuardState + #elif V8_TARGET_ARCH_S390 + #define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState + #else +diff --git a/src/3rdparty/chromium/v8/src/codegen/interface-descriptors.cc b/src/3rdparty/chromium/v8/src/codegen/interface-descriptors.cc +index 42b45c0f33..d0b2bfe1e7 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/interface-descriptors.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/interface-descriptors.cc +@@ -128,7 +128,8 @@ const char* CallInterfaceDescriptor::DebugName() const { + return ""; + } + +-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) ++#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \ ++ !defined(V8_TARGET_ARCH_LA64) + bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) { + return true; + } +@@ -412,7 +413,8 @@ void WasmAtomicNotifyDescriptor::InitializePlatformSpecific( + DefaultInitializePlatformSpecific(data, kParameterCount); + } + +-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) ++#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \ ++ !defined(V8_TARGET_ARCH_LA64) + void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + DefaultInitializePlatformSpecific(data, kParameterCount); +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/assembler-la64-inl.h b/src/3rdparty/chromium/v8/src/codegen/la64/assembler-la64-inl.h +new file mode 100644 +index 0000000000..e2ead3948c +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/assembler-la64-inl.h +@@ -0,0 +1,268 @@ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#ifndef V8_CODEGEN_LA64_ASSEMBLER_LA64_INL_H_ ++#define V8_CODEGEN_LA64_ASSEMBLER_LA64_INL_H_ ++ ++#include "src/codegen/la64/assembler-la64.h" ++ ++#include "src/codegen/assembler.h" ++#include "src/debug/debug.h" ++#include "src/objects/objects-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); } ++ ++bool CpuFeatures::SupportsWasmSimd128() { return false; } ++ ++// ----------------------------------------------------------------------------- ++// Operand and MemOperand. ++ ++bool Operand::is_reg() const { return rm_.is_valid(); } ++ ++int64_t Operand::immediate() const { ++ DCHECK(!is_reg()); ++ DCHECK(!IsHeapObjectRequest()); ++ return value_.immediate; ++} ++ ++// ----------------------------------------------------------------------------- ++// RelocInfo. ++ ++void RelocInfo::apply(intptr_t delta) { ++ if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { ++ // Absolute code pointer inside code object moves with the code object. ++ Assembler::RelocateInternalReference(rmode_, pc_, delta); ++ } ++} ++ ++Address RelocInfo::target_address() { ++ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_)); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++Address RelocInfo::target_address_address() { ++ DCHECK(HasTargetAddressAddress()); ++ // Read the address of the word containing the target_address in an ++ // instruction stream. ++ // The only architecture-independent user of this function is the serializer. ++ // The serializer uses it to find out how many raw bytes of instruction to ++ // output before the next target. ++ // For an instruction like LUI/ORI where the target bits are mixed into the ++ // instruction bits, the size of the target will be zero, indicating that the ++ // serializer should not step forward in memory after a target is resolved ++ // and written. In this case the target_address_address function should ++ // return the end of the instructions to be patched, allowing the ++ // deserializer to deserialize the instructions as raw bytes and put them in ++ // place, ready to be patched with the target. After jump optimization, ++ // that is the address of the instruction that follows J/JAL/JR/JALR ++ // instruction. ++ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize; ++} ++ ++Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); } ++ ++int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; } ++ ++void Assembler::deserialization_set_special_target_at( ++ Address instruction_payload, Code code, Address target) { ++ set_target_address_at(instruction_payload, ++ !code.is_null() ? code.constant_pool() : kNullAddress, ++ target); ++} ++ ++int Assembler::deserialization_special_target_size( ++ Address instruction_payload) { ++ return kSpecialTargetSize; ++} ++ ++void Assembler::set_target_internal_reference_encoded_at(Address pc, ++ Address target) { ++ // TODO, see AssembleJumpTable, la64 does not generate internal reference? ++ abort(); ++} ++ ++void Assembler::deserialization_set_target_internal_reference_at( ++ Address pc, Address target, RelocInfo::Mode mode) { ++ if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) { ++ DCHECK(IsJ(instr_at(pc))); ++ set_target_internal_reference_encoded_at(pc, target); ++ } else { ++ DCHECK(mode == RelocInfo::INTERNAL_REFERENCE); ++ Memory
(pc) = target; ++ } ++} ++ ++HeapObject RelocInfo::target_object() { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ return HeapObject::cast( ++ Object(Assembler::target_address_at(pc_, constant_pool_))); ++} ++ ++HeapObject RelocInfo::target_object_no_host(Isolate* isolate) { ++ return target_object(); ++} ++ ++Handle RelocInfo::target_object_handle(Assembler* origin) { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ return Handle(reinterpret_cast( ++ Assembler::target_address_at(pc_, constant_pool_))); ++} ++ ++void RelocInfo::set_target_object(Heap* heap, HeapObject target, ++ WriteBarrierMode write_barrier_mode, ++ ICacheFlushMode icache_flush_mode) { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), ++ icache_flush_mode); ++ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && ++ !FLAG_disable_write_barriers) { ++ WriteBarrierForCode(host(), this, target); ++ } ++} ++ ++Address RelocInfo::target_external_reference() { ++ DCHECK(rmode_ == EXTERNAL_REFERENCE); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++void RelocInfo::set_target_external_reference( ++ Address target, ICacheFlushMode icache_flush_mode) { ++ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); ++ Assembler::set_target_address_at(pc_, constant_pool_, target, ++ icache_flush_mode); ++} ++ ++Address RelocInfo::target_internal_reference() { ++ if (rmode_ == INTERNAL_REFERENCE) { ++ return Memory
(pc_); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++Address RelocInfo::target_internal_reference_address() { ++ DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED); ++ return pc_; ++} ++ ++Address RelocInfo::target_runtime_entry(Assembler* origin) { ++ DCHECK(IsRuntimeEntry(rmode_)); ++ return target_address(); ++} ++ ++void RelocInfo::set_target_runtime_entry(Address target, ++ WriteBarrierMode write_barrier_mode, ++ ICacheFlushMode icache_flush_mode) { ++ DCHECK(IsRuntimeEntry(rmode_)); ++ if (target_address() != target) ++ set_target_address(target, write_barrier_mode, icache_flush_mode); ++} ++ ++Address RelocInfo::target_off_heap_target() { ++ DCHECK(IsOffHeapTarget(rmode_)); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++void RelocInfo::WipeOut() { ++ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || ++ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || ++ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) || ++ IsOffHeapTarget(rmode_)); ++ if (IsInternalReference(rmode_)) { ++ Memory
(pc_) = kNullAddress; ++ } else if (IsInternalReferenceEncoded(rmode_)) { ++ Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress); ++ } else { ++ Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// Assembler. ++ ++void Assembler::CheckBuffer() { ++ if (buffer_space() <= kGap) { ++ GrowBuffer(); ++ } ++} ++ ++void Assembler::EmitHelper(Instr x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += kInstrSize; ++ CheckTrampolinePoolQuick(); ++} ++ ++template <> ++inline void Assembler::EmitHelper(uint8_t x); ++ ++template ++void Assembler::EmitHelper(T x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += sizeof(x); ++ CheckTrampolinePoolQuick(); ++} ++ ++template <> ++void Assembler::EmitHelper(uint8_t x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += sizeof(x); ++ if (reinterpret_cast(pc_) % kInstrSize == 0) { ++ CheckTrampolinePoolQuick(); ++ } ++} ++ ++void Assembler::emit(Instr x) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ EmitHelper(x); ++} ++ ++void Assembler::emit(uint64_t data) { ++ // CheckForEmitInForbiddenSlot(); ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ EmitHelper(data); ++} ++ ++EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LA64_ASSEMBLER_LA64_INL_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/assembler-la64.cc b/src/3rdparty/chromium/v8/src/codegen/la64/assembler-la64.cc +new file mode 100644 +index 0000000000..0272caeaaf +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/assembler-la64.cc +@@ -0,0 +1,2856 @@ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#include "src/codegen/la64/assembler-la64.h" ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/base/cpu.h" ++#include "src/codegen/la64/assembler-la64-inl.h" ++#include "src/codegen/safepoint-table.h" ++#include "src/codegen/string-constants.h" ++#include "src/deoptimizer/deoptimizer.h" ++#include "src/objects/heap-number-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++void CpuFeatures::ProbeImpl(bool cross_compile) { ++ supported_ |= 1u << FPU; ++ ++ // Only use statically determined features for cross compile (snapshot). ++ if (cross_compile) return; ++ ++#if defined(_loongisa_vec) ++ supported_ |= 0u; ++#endif ++ // If the compiler is allowed to use fpu then we can use fpu too in our ++ // code generation. ++#ifdef __loongarch__ ++ // Probe for additional features at runtime. ++ base::CPU cpu; ++ supported_ |= 0u; ++#endif ++} ++ ++void CpuFeatures::PrintTarget() {} ++void CpuFeatures::PrintFeatures() {} ++ ++int ToNumber(Register reg) { ++ DCHECK(reg.is_valid()); ++ const int kNumbers[] = { ++ 0, // zero_reg ++ 1, // r1 ra ++ 2, // r2 gp ++ 3, // r3 sp ++ 4, // a0 v0 ++ 5, // a1 v1 ++ 6, // a2 ++ 7, // a3 ++ 8, // a4 ++ 9, // a5 ++ 10, // a6 ++ 11, // a7 ++ 12, // t0 ++ 13, // t1 ++ 14, // t2 ++ 15, // t3 ++ 16, // t4 ++ 17, // t5 ++ 18, // t6 ++ 19, // t7 ++ 20, // t8 ++ 21, // tp ++ 22, // fp ++ 23, // s0 ++ 24, // s1 ++ 25, // s2 ++ 26, // s3 ++ 27, // s4 ++ 28, // s5 ++ 29, // s6 ++ 30, // s7 ++ 31, // s8 ++ }; ++ return kNumbers[reg.code()]; ++} ++ ++Register ToRegister(int num) { ++ DCHECK(num >= 0 && num < kNumRegisters); ++ const Register kRegisters[] = { ++ zero_reg, ra, gp, sp, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, ++ t4, t5, t6, t7, t8, tp, fp, s0, s1, s2, s3, s4, s5, s6, s7, s8}; ++ return kRegisters[num]; ++} ++ ++// ----------------------------------------------------------------------------- ++// Implementation of RelocInfo. ++ ++const int RelocInfo::kApplyMask = ++ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | ++ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED); ++ ++bool RelocInfo::IsCodedSpecially() { ++ // The deserializer needs to know whether a pointer is specially coded. Being ++ // specially coded on loongisa means that it is a lui/ori instruction, and ++ // that is always the case inside code objects. ++ return true; ++} ++ ++bool RelocInfo::IsInConstantPool() { return false; } ++ ++uint32_t RelocInfo::wasm_call_tag() const { ++ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL); ++ return static_cast( ++ Assembler::target_address_at(pc_, constant_pool_)); ++} ++ ++// ----------------------------------------------------------------------------- ++// Implementation of Operand and MemOperand. ++// See assembler-la64-inl.h for inlined constructors. ++ ++Operand::Operand(Handle handle) ++ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) { ++ value_.immediate = static_cast(handle.address()); ++} ++ ++Operand Operand::EmbeddedNumber(double value) { ++ int32_t smi; ++ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi)); ++ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); ++ result.is_heap_object_request_ = true; ++ result.value_.heap_object_request = HeapObjectRequest(value); ++ return result; ++} ++ ++Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { ++ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); ++ result.is_heap_object_request_ = true; ++ result.value_.heap_object_request = HeapObjectRequest(str); ++ return result; ++} ++ ++MemOperand::MemOperand(Register base, int32_t offset) ++ : base_(base), index_(no_reg), offset_(offset) {} ++ ++MemOperand::MemOperand(Register base, Register index) ++ : base_(base), index_(index), offset_(0) {} ++ ++void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { ++ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); ++ for (auto& request : heap_object_requests_) { ++ Handle object; ++ switch (request.kind()) { ++ case HeapObjectRequest::kHeapNumber: ++ object = isolate->factory()->NewHeapNumber( ++ request.heap_number()); ++ break; ++ case HeapObjectRequest::kStringConstant: ++ const StringConstantBase* str = request.string(); ++ CHECK_NOT_NULL(str); ++ object = str->AllocateStringConstant(isolate); ++ break; ++ } ++ Address pc = reinterpret_cast
(buffer_start_) + request.offset(); ++ set_target_value_at(pc, reinterpret_cast(object.location())); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// Specific instructions, constants, and masks. ++ ++// addi_d(sp, sp, 8) aka Pop() operation or part of Pop(r) ++// operations as post-increment of sp. ++const Instr kPopInstruction = ADDI_D | (kPointerSize & kImm12Mask) << kRkShift | ++ (sp.code() << kRjShift) | sp.code(); // NOLINT ++// addi_d(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. ++const Instr kPushInstruction = ADDI_D | ++ (-kPointerSize & kImm12Mask) << kRkShift | ++ (sp.code() << kRjShift) | sp.code(); // NOLINT ++// St_d(r, MemOperand(sp, 0)) ++const Instr kPushRegPattern = ST_D | (sp.code() << kRjShift); // NOLINT ++// Ld_d(r, MemOperand(sp, 0)) ++const Instr kPopRegPattern = LD_D | (sp.code() << kRjShift); // NOLINT ++ ++Assembler::Assembler(const AssemblerOptions& options, ++ std::unique_ptr buffer) ++ : AssemblerBase(options, std::move(buffer)), ++ scratch_register_list_(t7.bit() | t6.bit()) { ++ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); ++ ++ last_trampoline_pool_end_ = 0; ++ no_trampoline_pool_before_ = 0; ++ trampoline_pool_blocked_nesting_ = 0; ++ // We leave space (16 * kTrampolineSlotsSize) ++ // for BlockTrampolinePoolScope buffer. ++ next_buffer_check_ = FLAG_force_long_branches ++ ? kMaxInt ++ : kMax16BranchOffset - kTrampolineSlotsSize * 16; ++ internal_trampoline_exception_ = false; ++ last_bound_pos_ = 0; ++ ++ trampoline_emitted_ = FLAG_force_long_branches; // TODO remove this ++ unbound_labels_count_ = 0; ++ block_buffer_growth_ = false; ++} ++ ++void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, ++ SafepointTableBuilder* safepoint_table_builder, ++ int handler_table_offset) { ++ // EmitForbiddenSlotInstruction(); // TODO why? ++ ++ int code_comments_size = WriteCodeComments(); ++ ++ DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. ++ ++ AllocateAndInstallRequestedHeapObjects(isolate); ++ ++ // Set up code descriptor. ++ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to ++ // this point to make CodeDesc initialization less fiddly. ++ ++ static constexpr int kConstantPoolSize = 0; ++ const int instruction_size = pc_offset(); ++ const int code_comments_offset = instruction_size - code_comments_size; ++ const int constant_pool_offset = code_comments_offset - kConstantPoolSize; ++ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) ++ ? constant_pool_offset ++ : handler_table_offset; ++ const int safepoint_table_offset = ++ (safepoint_table_builder == kNoSafepointTable) ++ ? handler_table_offset2 ++ : safepoint_table_builder->GetCodeOffset(); ++ const int reloc_info_offset = ++ static_cast(reloc_info_writer.pos() - buffer_->start()); ++ CodeDesc::Initialize(desc, this, safepoint_table_offset, ++ handler_table_offset2, constant_pool_offset, ++ code_comments_offset, reloc_info_offset); ++} ++ ++void Assembler::Align(int m) { ++ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m)); ++ while ((pc_offset() & (m - 1)) != 0) { ++ nop(); ++ } ++} ++ ++void Assembler::CodeTargetAlign() { ++ // No advantage to aligning branch/call targets to more than ++ // single instruction, that I am aware of. ++ Align(4); ++} ++ ++Register Assembler::GetRkReg(Instr instr) { ++ return Register::from_code((instr & kRkFieldMask) >> kRkShift); ++} ++ ++Register Assembler::GetRjReg(Instr instr) { ++ return Register::from_code((instr & kRjFieldMask) >> kRjShift); ++} ++ ++Register Assembler::GetRdReg(Instr instr) { ++ return Register::from_code((instr & kRdFieldMask) >> kRdShift); ++} ++ ++uint32_t Assembler::GetRk(Instr instr) { ++ return (instr & kRkFieldMask) >> kRkShift; ++} ++ ++uint32_t Assembler::GetRkField(Instr instr) { return instr & kRkFieldMask; } ++ ++uint32_t Assembler::GetRj(Instr instr) { ++ return (instr & kRjFieldMask) >> kRjShift; ++} ++ ++uint32_t Assembler::GetRjField(Instr instr) { return instr & kRjFieldMask; } ++ ++uint32_t Assembler::GetRd(Instr instr) { ++ return (instr & kRdFieldMask) >> kRdShift; ++} ++ ++uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; } ++ ++uint32_t Assembler::GetSa2(Instr instr) { ++ return (instr & kSa2FieldMask) >> kSaShift; ++} ++ ++uint32_t Assembler::GetSa2Field(Instr instr) { return instr & kSa2FieldMask; } ++ ++uint32_t Assembler::GetSa3(Instr instr) { ++ return (instr & kSa3FieldMask) >> kSaShift; ++} ++ ++uint32_t Assembler::GetSa3Field(Instr instr) { return instr & kSa3FieldMask; } ++ ++bool Assembler::IsPop(Instr instr) { ++ return (instr & 0xffc003e0) == kPopRegPattern; ++} ++ ++bool Assembler::IsPush(Instr instr) { ++ return (instr & 0xffc003e0) == kPushRegPattern; ++} ++ ++// Labels refer to positions in the (to be) generated code. ++// There are bound, linked, and unused labels. ++// ++// Bound labels refer to known positions in the already ++// generated code. pos() is the position the label refers to. ++// ++// Linked labels refer to unknown positions in the code ++// to be generated; pos() is the position of the last ++// instruction using the label. ++ ++// The link chain is terminated by a value in the instruction of -1, ++// which is an otherwise illegal value (branch -1 is inf loop). ++// The instruction 16-bit offset field addresses 32-bit words, but in ++// code is conv to an 18-bit value addressing bytes, hence the -4 value. ++ ++const int kEndOfChain = 0; ++// Determines the end of the Jump chain (a subset of the label link chain). ++const int kEndOfJumpChain = 0; ++ ++bool Assembler::IsBranch(Instr instr) { ++ uint32_t opcode = (instr >> 26) << 26; ++ // Checks if the instruction is a branch. ++ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ || ++ opcode == B || opcode == BL || opcode == BEQ || ++ opcode == BNE || opcode == BLT || opcode == BGE || ++ opcode == BLTU || opcode == BGEU; ++ return isBranch; ++} ++ ++bool Assembler::IsB(Instr instr) { ++ uint32_t opcode = (instr >> 26) << 26; ++ // Checks if the instruction is a b. ++ bool isBranch = opcode == B || opcode == BL; ++ return isBranch; ++} ++ ++bool Assembler::IsBz(Instr instr) { ++ uint32_t opcode = (instr >> 26) << 26; ++ // Checks if the instruction is a branch. ++ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ; ++ return isBranch; ++} ++ ++bool Assembler::IsEmittedConstant(Instr instr) { ++ // Add GetLabelConst function? ++ uint32_t label_constant = instr & ~kImm16Mask; ++ return label_constant == 0; // Emitted label const in reg-exp engine. ++} ++ ++bool Assembler::IsJ(Instr instr) { ++ uint32_t opcode = (instr >> 26) << 26; ++ // Checks if the instruction is a jump. ++ return opcode == JIRL; ++} ++ ++bool Assembler::IsLu12i_w(Instr instr) { ++ uint32_t opcode = (instr >> 25) << 25; ++ return opcode == LU12I_W; ++} ++ ++bool Assembler::IsOri(Instr instr) { ++ uint32_t opcode = (instr >> 22) << 22; ++ return opcode == ORI; ++} ++ ++bool Assembler::IsLu32i_d(Instr instr) { ++ uint32_t opcode = (instr >> 25) << 25; ++ return opcode == LU32I_D; ++} ++ ++bool Assembler::IsLu52i_d(Instr instr) { ++ uint32_t opcode = (instr >> 22) << 22; ++ return opcode == LU52I_D; ++} ++ ++bool Assembler::IsMov(Instr instr, Register rd, Register rj) { ++ // Checks if the instruction is a OR with zero_reg argument (aka MOV). ++ Instr instr1 = ++ OR | zero_reg.code() << kRkShift | rj.code() << kRjShift | rd.code(); ++ return instr == instr1; ++} ++ ++bool Assembler::IsPcAddi(Instr instr, Register rd, int32_t si20) { ++ DCHECK(is_int20(si20)); ++ Instr instr1 = PCADDI | (si20 & 0xfffff) << kRjShift | rd.code(); ++ return instr == instr1; ++} ++ ++bool Assembler::IsNop(Instr instr, unsigned int type) { ++ // See Assembler::nop(type). ++ DCHECK_LT(type, 32); ++ // Traditional loongisa nop == andi(zero_reg, zero_reg, 0) ++ // When marking non-zero type, use andi(zero_reg, t7, type) ++ // to avoid use of ssnop and ehb special encodings of the ++ // andi instruction. ++ ++ Register nop_rt_reg = (type == 0) ? zero_reg : t7; ++ Instr instr1 = ANDI | ((type & kImm12Mask) << kRkShift) | ++ (nop_rt_reg.code() << kRjShift); ++ ++ return instr == instr1; ++} ++ ++static inline int32_t GetOffsetOfBranch(Instr instr, ++ Assembler::OffsetSize bits) { ++ int32_t result = 0; ++ if (bits == 16) { ++ result = (instr << 6) >> 16; ++ } else if (bits == 21) { ++ uint32_t low16 = instr << 6; ++ low16 = low16 >> 16; ++ low16 &= 0xffff; ++ int32_t hi5 = (instr << 27) >> 11; ++ result = hi5 | low16; ++ } else { ++ uint32_t low16 = instr << 6; ++ low16 = low16 >> 16; ++ low16 &= 0xffff; ++ int32_t hi10 = (instr << 22) >> 6; ++ result = hi10 | low16; ++ DCHECK_EQ(bits, 26); ++ } ++ return result << 2; ++} ++ ++static Assembler::OffsetSize OffsetSizeInBits(Instr instr) { ++ if (Assembler::IsB(instr)) { ++ return Assembler::OffsetSize::kOffset26; ++ } else if (Assembler::IsBz(instr)) { ++ return Assembler::OffsetSize::kOffset21; ++ } else { ++ DCHECK(Assembler::IsBranch(instr)); ++ return Assembler::OffsetSize::kOffset16; ++ } ++} ++ ++static inline int32_t AddBranchOffset(int pos, Instr instr) { ++ Assembler::OffsetSize bits = OffsetSizeInBits(instr); ++ ++ int32_t imm = GetOffsetOfBranch(instr, bits); ++ ++ if (imm == kEndOfChain) { ++ // EndOfChain sentinel is returned directly, not relative to pc or pos. ++ return kEndOfChain; ++ } else { ++ // Handle the case that next branch position is 0. ++ // TODO: Define -4 as a constant ++ int32_t offset = pos + Assembler::kBranchPCOffset + imm; ++ return offset == 0 ? -4 : offset; ++ } ++} ++ ++int Assembler::target_at(int pos, bool is_internal) { ++ if (is_internal) { ++ int64_t* p = reinterpret_cast(buffer_start_ + pos); ++ int64_t address = *p; ++ if (address == kEndOfJumpChain) { ++ return kEndOfChain; ++ } else { ++ int64_t instr_address = reinterpret_cast(p); ++ DCHECK(instr_address - address < INT_MAX); ++ int delta = static_cast(instr_address - address); ++ DCHECK(pos > delta); ++ return pos - delta; ++ } ++ } ++ Instr instr = instr_at(pos); ++ ++ // TODO remove after remove label_at_put? ++ if ((instr & ~kImm16Mask) == 0) { ++ // Emitted label constant, not part of a branch. ++ if (instr == 0) { ++ return kEndOfChain; ++ } else { ++ int32_t imm18 = ((instr & static_cast(kImm16Mask)) << 16) >> 14; ++ return (imm18 + pos); ++ } ++ } ++ ++ // Check we have a branch or jump instruction. ++ DCHECK(IsBranch(instr) || IsJ(instr) || IsLu12i_w(instr) || ++ IsPcAddi(instr, t8, 16)); ++ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming ++ // the compiler uses arithmetic shifts for signed integers. ++ if (IsBranch(instr)) { ++ return AddBranchOffset(pos, instr); ++ } else if (IsPcAddi(instr, t8, 16)) { ++ // see BranchLong(Label* L) and BranchAndLinkLong ?? ++ int32_t imm32; ++ Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 2 * kInstrSize); ++ DCHECK(IsLu12i_w(instr_lu12i_w)); ++ // DCHECK(IsOri(instr_ori)); ++ imm32 = ((instr_lu12i_w >> 5) & 0xfffff) << 12; ++ imm32 |= ((instr_ori >> 10) & static_cast(kImm12Mask)); ++ if (imm32 == kEndOfJumpChain) { ++ // EndOfChain sentinel is returned directly, not relative to pc or pos. ++ return kEndOfChain; ++ } ++ return pos + imm32; ++ } else if (IsLu12i_w(instr)) { ++ abort(); ++ // TODO no used?? ++ /* Instr instr_lui = instr_at(pos + 0 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 1 * kInstrSize); ++ Instr instr_ori2 = instr_at(pos + 3 * kInstrSize); ++ DCHECK(IsOri(instr_ori)); ++ DCHECK(IsOri(instr_ori2)); ++ ++ // TODO(plind) create named constants for shift values. ++ int64_t imm = static_cast(instr_lui & kImm16Mask) << 48; ++ imm |= static_cast(instr_ori & kImm16Mask) << 32; ++ imm |= static_cast(instr_ori2 & kImm16Mask) << 16; ++ // Sign extend address; ++ imm >>= 16; ++ ++ if (imm == kEndOfJumpChain) { ++ // EndOfChain sentinel is returned directly, not relative to pc or ++ pos. return kEndOfChain; } else { uint64_t instr_address = ++ reinterpret_cast(buffer_start_ + pos); DCHECK(instr_address - ++ imm < INT_MAX); int delta = static_cast(instr_address - imm); ++ DCHECK(pos > delta); ++ return pos - delta; ++ }*/ ++ } else { ++ DCHECK(IsJ(instr)); ++ // TODO not used??? ++ abort(); ++ } ++} ++ ++static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos, ++ Instr instr) { ++ int32_t bits = OffsetSizeInBits(instr); ++ int32_t imm = target_pos - pos; ++ DCHECK_EQ(imm & 3, 0); ++ imm >>= 2; ++ ++ DCHECK(is_intn(imm, bits)); ++ ++ if (bits == 16) { ++ const int32_t mask = ((1 << 16) - 1) << 10; ++ instr &= ~mask; ++ return instr | ((imm << 10) & mask); ++ } else if (bits == 21) { ++ const int32_t mask = 0x3fffc1f; ++ instr &= ~mask; ++ uint32_t low16 = (imm & kImm16Mask) << 10; ++ int32_t hi5 = (imm >> 16) & 0x1f; ++ return instr | low16 | hi5; ++ } else { ++ DCHECK_EQ(bits, 26); ++ const int32_t mask = 0x3ffffff; ++ instr &= ~mask; ++ uint32_t low16 = (imm & kImm16Mask) << 10; ++ int32_t hi10 = (imm >> 16) & 0x3ff; ++ return instr | low16 | hi10; ++ } ++} ++ ++void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { ++ if (is_internal) { ++ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; ++ *reinterpret_cast(buffer_start_ + pos) = imm; ++ return; ++ } ++ Instr instr = instr_at(pos); ++ if ((instr & ~kImm16Mask) == 0) { ++ DCHECK(target_pos == kEndOfChain || target_pos >= 0); ++ // Emitted label constant, not part of a branch. ++ // Make label relative to Code pointer of generated Code object. ++ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); ++ return; ++ } ++ ++ if (IsBranch(instr)) { ++ instr = SetBranchOffset(pos, target_pos, instr); ++ instr_at_put(pos, instr); ++ } else if (0 == 1 /*IsLui(instr)*/) { ++ /* if (IsPcAddi(instr, t8, 16)) { ++ Instr instr_lui = instr_at(pos + 0 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 2 * kInstrSize); ++ DCHECK(IsLui(instr_lui)); ++ DCHECK(IsOri(instr_ori)); ++ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); ++ DCHECK_EQ(imm & 3, 0); ++ if (is_int16(imm + Assembler::kLongBranchPCOffset - ++ Assembler::kBranchPCOffset)) { ++ // Optimize by converting to regular branch and link with 16-bit ++ // offset. ++ Instr instr_b = REGIMM | BGEZAL; // Branch and link. ++ instr_b = SetBranchOffset(pos, target_pos, instr_b); ++ // Correct ra register to point to one instruction after jalr from ++ // TurboAssembler::BranchAndLinkLong. ++ Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift ++ | kOptimizedBranchAndLinkLongReturnOffset; ++ ++ instr_at_put(pos, instr_b); ++ instr_at_put(pos + 1 * kInstrSize, instr_a); ++ } else { ++ instr_lui &= ~kImm16Mask; ++ instr_ori &= ~kImm16Mask; ++ ++ instr_at_put(pos + 0 * kInstrSize, ++ instr_lui | ((imm >> kLuiShift) & kImm16Mask)); ++ instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask)); ++ } ++ } else { ++ Instr instr_lui = instr_at(pos + 0 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 1 * kInstrSize); ++ Instr instr_ori2 = instr_at(pos + 3 * kInstrSize); ++ DCHECK(IsOri(instr_ori)); ++ DCHECK(IsOri(instr_ori2)); ++ ++ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; ++ DCHECK_EQ(imm & 3, 0); ++ ++ instr_lui &= ~kImm16Mask; ++ instr_ori &= ~kImm16Mask; ++ instr_ori2 &= ~kImm16Mask; ++ ++ instr_at_put(pos + 0 * kInstrSize, ++ instr_lui | ((imm >> 32) & kImm16Mask)); ++ instr_at_put(pos + 1 * kInstrSize, ++ instr_ori | ((imm >> 16) & kImm16Mask)); ++ instr_at_put(pos + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask)); ++ }*/ ++ } else if (IsPcAddi(instr, t8, 16)) { ++ abort(); /* ++ Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 2 * kInstrSize); ++ DCHECK(IsLu12i_w(instr_lu12i_w)); ++ //DCHECK(IsOri(instr_ori)); ++ ++ int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset); ++ ++ if (is_int21(imm_short)) { ++ // Optimize by converting to regular branch with 21-bit ++ // offset ++ Instr instr_b = B; ++ instr_b = SetBranchOffset(pos, target_pos, instr_b); ++ ++ instr_at_put(pos, instr_b); ++ } else { ++ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); ++ DCHECK_EQ(imm & 3, 0); ++ ++ instr_lu12i_w &= 0xfe00001fu; // opcode:7 | bit20 | rd:5 ++ instr_ori &= 0xffc003ffu; // opcode:10 | bit12 | rj:5 | rd:5 ++ ++ instr_at_put(pos + 1 * kInstrSize, ++ instr_lu12i_w | (((imm >> 12) & 0xfffff) << 5)); ++ instr_at_put(pos + 2 * kInstrSize, instr_ori | ++ ((imm & 0xfff) << 10)); ++ }*/ ++ } else if (IsJ(instr)) { ++ /* ++ int32_t imm28 = target_pos - pos; ++ DCHECK_EQ(imm28 & 3, 0); ++ ++ uint32_t imm26 = static_cast(imm28 >> 2); ++ DCHECK(is_uint26(imm26)); ++ // Place 26-bit signed offset with markings. ++ // When code is committed it will be resolved to j/jal. ++ int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark; ++ instr_at_put(pos, mark | (imm26 & kImm26Mask));*/ ++ abort(); ++ } else { ++ /* int32_t imm28 = target_pos - pos; ++ DCHECK_EQ(imm28 & 3, 0); ++ ++ uint32_t imm26 = static_cast(imm28 >> 2); ++ DCHECK(is_uint26(imm26)); ++ // Place raw 26-bit signed offset. ++ // When code is committed it will be resolved to j/jal. ++ instr &= ~kImm26Mask; ++ instr_at_put(pos, instr | (imm26 & kImm26Mask));*/ ++ abort(); ++ } ++} ++ ++void Assembler::print(const Label* L) { ++ if (L->is_unused()) { ++ PrintF("unused label\n"); ++ } else if (L->is_bound()) { ++ PrintF("bound label to %d\n", L->pos()); ++ } else if (L->is_linked()) { ++ Label l; ++ l.link_to(L->pos()); ++ PrintF("unbound label"); ++ while (l.is_linked()) { ++ PrintF("@ %d ", l.pos()); ++ Instr instr = instr_at(l.pos()); ++ if ((instr & ~kImm16Mask) == 0) { ++ PrintF("value\n"); ++ } else { ++ PrintF("%d\n", instr); ++ } ++ next(&l, is_internal_reference(&l)); ++ } ++ } else { ++ PrintF("label in inconsistent state (pos = %d)\n", L->pos_); ++ } ++} ++ ++void Assembler::bind_to(Label* L, int pos) { ++ DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. ++ int trampoline_pos = kInvalidSlotPos; ++ bool is_internal = false; ++ if (L->is_linked() && !trampoline_emitted_) { ++ unbound_labels_count_--; ++ if (!is_internal_reference(L)) { ++ next_buffer_check_ += kTrampolineSlotsSize; ++ } ++ } ++ ++ while (L->is_linked()) { ++ int fixup_pos = L->pos(); ++ int dist = pos - fixup_pos; ++ is_internal = is_internal_reference(L); ++ next(L, is_internal); // Call next before overwriting link with target at ++ // fixup_pos. ++ Instr instr = instr_at(fixup_pos); ++ if (is_internal) { ++ target_at_put(fixup_pos, pos, is_internal); ++ } else { ++ if (IsBranch(instr)) { ++ int branch_offset = BranchOffset(instr); ++ if (dist > branch_offset) { ++ if (trampoline_pos == kInvalidSlotPos) { ++ trampoline_pos = get_trampoline_entry(fixup_pos); ++ CHECK_NE(trampoline_pos, kInvalidSlotPos); ++ } ++ CHECK((trampoline_pos - fixup_pos) <= branch_offset); ++ target_at_put(fixup_pos, trampoline_pos, false); ++ fixup_pos = trampoline_pos; ++ } ++ target_at_put(fixup_pos, pos, false); ++ } else { ++ DCHECK(IsJ(instr) || IsLu12i_w(instr) || IsEmittedConstant(instr) || ++ IsPcAddi(instr, t8, 8)); ++ target_at_put(fixup_pos, pos, false); ++ } ++ } ++ } ++ L->bind_to(pos); ++ ++ // Keep track of the last bound label so we don't eliminate any instructions ++ // before a bound label. ++ if (pos > last_bound_pos_) last_bound_pos_ = pos; ++} ++ ++void Assembler::bind(Label* L) { ++ DCHECK(!L->is_bound()); // Label can only be bound once. ++ bind_to(L, pc_offset()); ++} ++ ++void Assembler::next(Label* L, bool is_internal) { ++ DCHECK(L->is_linked()); ++ int link = target_at(L->pos(), is_internal); ++ if (link == kEndOfChain) { ++ L->Unuse(); ++ } else if (link == -4) { ++ // Next position is pc_offset == 0 ++ L->link_to(0); ++ } else { ++ DCHECK_GE(link, 0); ++ L->link_to(link); ++ } ++} ++ ++bool Assembler::is_near_c(Label* L) { ++ DCHECK(L->is_bound()); ++ return pc_offset() - L->pos() < kMax16BranchOffset - 4 * kInstrSize; ++} ++ ++bool Assembler::is_near(Label* L, OffsetSize bits) { ++ DCHECK(L->is_bound()); ++ return ((pc_offset() - L->pos()) < ++ (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize); ++} ++ ++bool Assembler::is_near_a(Label* L) { ++ DCHECK(L->is_bound()); ++ return pc_offset() - L->pos() <= kMax26BranchOffset - 4 * kInstrSize; ++} ++ ++int Assembler::BranchOffset(Instr instr) { ++ int bits = OffsetSize::kOffset16; ++ ++ uint32_t opcode = (instr >> 26) << 26; ++ switch (opcode) { ++ case B: ++ case BL: ++ bits = OffsetSize::kOffset26; ++ break; ++ case BNEZ: ++ case BEQZ: ++ case BCZ: ++ bits = OffsetSize::kOffset21; ++ break; ++ case BNE: ++ case BEQ: ++ case BLT: ++ case BGE: ++ case BLTU: ++ case BGEU: ++ case JIRL: ++ bits = OffsetSize::kOffset16; ++ break; ++ default: ++ break; ++ } ++ ++ return (1 << (bits + 2 - 1)) - 1; ++} ++ ++// We have to use a temporary register for things that can be relocated even ++// if they can be encoded in the LA's 16 bits of immediate-offset instruction ++// space. There is no guarantee that the relocated location can be similarly ++// encoded. ++bool Assembler::MustUseReg(RelocInfo::Mode rmode) { ++ return !RelocInfo::IsNone(rmode); ++} ++ ++void Assembler::GenB(Opcode opcode, Register rj, int32_t si21) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK((BEQZ == opcode || BNEZ == opcode) && is_int21(si21) && rj.is_valid()); ++ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | ++ (rj.code() << kRjShift) | ((si21 & 0x1fffff) >> 16); ++ emit(instr); ++} ++ ++void Assembler::GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(BCZ == opcode && is_int21(si21)); ++ DCHECK(cj >= 0 && cj <= 7); ++ int32_t sc = (isEq ? cj : cj + 8); ++ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | (sc << kRjShift) | ++ ((si21 & 0x1fffff) >> 16); ++ emit(instr); ++} ++ ++void Assembler::GenB(Opcode opcode, int32_t si26) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK((B == opcode || BL == opcode) && is_int26(si26)); ++ Instr instr = ++ opcode | ((si26 & kImm16Mask) << kRkShift) | ((si26 & kImm26Mask) >> 16); ++ emit(instr); ++} ++ ++void Assembler::GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(is_int16(si16)); ++ Instr instr = opcode | ((si16 & kImm16Mask) << kRkShift) | ++ (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, ++ FPURegister fj, CFRegister cd) { ++ DCHECK(opcode == FCMP_COND_S || opcode == FCMP_COND_D); ++ Instr instr = opcode | cond << kCondShift | (fk.code() << kFkShift) | ++ (fj.code() << kFjShift) | cd; ++ emit(instr); ++} ++ ++void Assembler::GenSel(Opcode opcode, CFRegister ca, FPURegister fk, ++ FPURegister fj, FPURegister rd) { ++ DCHECK((opcode == FSEL)); ++ Instr instr = opcode | ca << kCondShift | (fk.code() << kFkShift) | ++ (fj.code() << kFjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rj, Register rd, ++ bool rjrd) { ++ Instr instr = 0; ++ if (rjrd) { ++ instr = opcode | (rj.code() << kRjShift) | rd.code(); ++ } else { ++ DCHECK(opcode == ASRTLE_D || opcode == ASRTGT_D); ++ instr = opcode | (rj.code() << kRkShift) | rd.code() << kRjShift; ++ } ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fj, FPURegister fd) { ++ Instr instr = opcode | (fj.code() << kFjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rj, FPURegister fd) { ++ DCHECK((opcode == MOVGR2FR_W) || (opcode == MOVGR2FR_D) || ++ (opcode == MOVGR2FRH_W)); ++ Instr instr = opcode | (rj.code() << kRjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fj, Register rd) { ++ DCHECK((opcode == MOVFR2GR_S) || (opcode == MOVFR2GR_D) || ++ (opcode == MOVFRH2GR_S)); ++ Instr instr = opcode | (fj.code() << kFjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rj, FPUControlRegister fd) { ++ DCHECK((opcode == MOVGR2FCSR)); ++ Instr instr = opcode | (rj.code() << kRjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPUControlRegister fj, Register rd) { ++ DCHECK((opcode == MOVFCSR2GR)); ++ Instr instr = opcode | (fj.code() << kFjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fj, CFRegister cd) { ++ DCHECK((opcode == MOVFR2CF)); ++ Instr instr = opcode | (fj.code() << kFjShift) | cd; ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, CFRegister cj, FPURegister fd) { ++ DCHECK((opcode == MOVCF2FR)); ++ Instr instr = opcode | cj << kFjShift | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rj, CFRegister cd) { ++ DCHECK((opcode == MOVGR2CF)); ++ Instr instr = opcode | (rj.code() << kRjShift) | cd; ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, CFRegister cj, Register rd) { ++ DCHECK((opcode == MOVCF2GR)); ++ Instr instr = opcode | cj << kFjShift | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rk, Register rj, ++ Register rd) { ++ Instr instr = ++ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fk, FPURegister fj, ++ FPURegister fd) { ++ Instr instr = ++ opcode | (fk.code() << kFkShift) | (fj.code() << kFjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fa, FPURegister fk, ++ FPURegister fj, FPURegister fd) { ++ Instr instr = opcode | (fa.code() << kFaShift) | (fk.code() << kFkShift) | ++ (fj.code() << kFjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rk, Register rj, ++ FPURegister fd) { ++ Instr instr = ++ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj, ++ Register rd) { ++ DCHECK(is_uint3(bit3)); ++ Instr instr = opcode | (bit3 & 0x7) << kSaShift | (rk.code() << kRkShift) | ++ (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj, ++ Register rd) { ++ DCHECK(is_uint6(bit6m) && is_uint6(bit6l)); ++ Instr instr = opcode | (bit6m & 0x3f) << 16 | (bit6l & 0x3f) << kRkShift | ++ (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit20, Register rd) { ++ // DCHECK(is_uint20(bit20) || is_int20(bit20)); ++ Instr instr = opcode | (bit20 & 0xfffff) << kRjShift | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit15) { ++ DCHECK(is_uint15(bit15)); ++ Instr instr = opcode | (bit15 & 0x7fff); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t value, Register rj, Register rd, ++ int32_t value_bits) { ++ DCHECK(value_bits == 6 || value_bits == 12 || value_bits == 14 || ++ value_bits == 16); ++ uint32_t imm = value & 0x3f; ++ if (value_bits == 12) { ++ imm = value & kImm12Mask; ++ } else if (value_bits == 14) { ++ imm = value & 0x3fff; ++ } else if (value_bits == 16) { ++ imm = value & kImm16Mask; ++ } ++ Instr instr = opcode | imm << kRkShift | (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit12, Register rj, ++ FPURegister fd) { ++ DCHECK(is_int12(bit12)); ++ Instr instr = opcode | ((bit12 & kImm12Mask) << kRkShift) | ++ (rj.code() << kRjShift) | fd.code(); ++ emit(instr); ++} ++ ++// Returns the next free trampoline entry. ++int32_t Assembler::get_trampoline_entry(int32_t pos) { ++ int32_t trampoline_entry = kInvalidSlotPos; ++ if (!internal_trampoline_exception_) { ++ if (trampoline_.start() > pos) { ++ trampoline_entry = trampoline_.take_slot(); ++ } ++ ++ if (kInvalidSlotPos == trampoline_entry) { ++ internal_trampoline_exception_ = true; ++ } ++ } ++ return trampoline_entry; ++} ++ ++uint64_t Assembler::jump_address(Label* L) { ++ int64_t target_pos; ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ L->link_to(pc_offset()); ++ } else { ++ L->link_to(pc_offset()); ++ return kEndOfJumpChain; ++ } ++ } ++ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; ++ DCHECK_EQ(imm & 3, 0); ++ ++ return imm; ++} ++ ++uint64_t Assembler::branch_long_offset(Label* L) { ++ int64_t target_pos; ++ ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ L->link_to(pc_offset()); ++ } else { ++ L->link_to(pc_offset()); ++ return kEndOfJumpChain; ++ } ++ } ++ int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset); ++ DCHECK_EQ(offset & 3, 0); ++ ++ return static_cast(offset); ++} ++ ++int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { ++ int32_t target_pos; ++ ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); ++ L->link_to(pc_offset()); ++ } else { ++ L->link_to(pc_offset()); ++ if (!trampoline_emitted_) { ++ unbound_labels_count_++; ++ next_buffer_check_ -= kTrampolineSlotsSize; ++ } ++ return kEndOfChain; ++ } ++ } ++ ++ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); ++ DCHECK(is_intn(offset, bits + 2)); ++ DCHECK_EQ(offset & 3, 0); ++ ++ return offset; ++} ++ ++void Assembler::label_at_put(Label* L, int at_offset) { ++ int target_pos; ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ int32_t imm18 = target_pos - at_offset; ++ DCHECK_EQ(imm18 & 3, 0); ++ int32_t imm16 = imm18 >> 2; ++ DCHECK(is_int16(imm16)); ++ instr_at_put(at_offset, (imm16 & kImm16Mask)); ++ } else { ++ target_pos = kEndOfChain; ++ instr_at_put(at_offset, 0); ++ if (!trampoline_emitted_) { ++ unbound_labels_count_++; ++ next_buffer_check_ -= kTrampolineSlotsSize; ++ } ++ } ++ L->link_to(at_offset); ++ } ++ // TODO PushBackTrack() ++} ++ ++//------- Branch and jump instructions -------- ++ ++void Assembler::b(int32_t offset) { GenB(B, offset); } ++ ++void Assembler::bl(int32_t offset) { GenB(BL, offset); } ++ ++void Assembler::beq(Register rj, Register rd, int32_t offset) { ++ GenBJ(BEQ, rj, rd, offset); ++} ++ ++void Assembler::bne(Register rj, Register rd, int32_t offset) { ++ GenBJ(BNE, rj, rd, offset); ++} ++ ++void Assembler::blt(Register rj, Register rd, int32_t offset) { ++ GenBJ(BLT, rj, rd, offset); ++} ++ ++void Assembler::bge(Register rj, Register rd, int32_t offset) { ++ GenBJ(BGE, rj, rd, offset); ++} ++ ++void Assembler::bltu(Register rj, Register rd, int32_t offset) { ++ GenBJ(BLTU, rj, rd, offset); ++} ++ ++void Assembler::bgeu(Register rj, Register rd, int32_t offset) { ++ GenBJ(BGEU, rj, rd, offset); ++} ++ ++void Assembler::beqz(Register rj, int32_t offset) { GenB(BEQZ, rj, offset); } ++void Assembler::bnez(Register rj, int32_t offset) { GenB(BNEZ, rj, offset); } ++ ++void Assembler::jirl(Register rd, Register rj, int32_t offset) { ++ GenBJ(JIRL, rj, rd, offset); ++} ++ ++void Assembler::bceqz(CFRegister cj, int32_t si21) { ++ GenB(BCZ, cj, si21, true); ++} ++ ++void Assembler::bcnez(CFRegister cj, int32_t si21) { ++ GenB(BCZ, cj, si21, false); ++} ++ ++// -------Data-processing-instructions--------- ++ ++// Arithmetic. ++void Assembler::add_w(Register rd, Register rj, Register rk) { ++ GenRegister(ADD_W, rk, rj, rd); ++} ++ ++void Assembler::add_d(Register rd, Register rj, Register rk) { ++ GenRegister(ADD_D, rk, rj, rd); ++} ++ ++void Assembler::sub_w(Register rd, Register rj, Register rk) { ++ GenRegister(SUB_W, rk, rj, rd); ++} ++ ++void Assembler::sub_d(Register rd, Register rj, Register rk) { ++ GenRegister(SUB_D, rk, rj, rd); ++} ++ ++void Assembler::addi_w(Register rd, Register rj, int32_t si12) { ++ GenImm(ADDI_W, si12, rj, rd, 12); ++} ++ ++void Assembler::addi_d(Register rd, Register rj, int32_t si12) { ++ GenImm(ADDI_D, si12, rj, rd, 12); ++} ++ ++void Assembler::addu16i_d(Register rd, Register rj, int32_t si16) { ++ GenImm(ADDU16I_D, si16, rj, rd, 16); ++} ++ ++void Assembler::alsl_w(Register rd, Register rj, Register rk, int32_t sa2) { ++ DCHECK(is_uint2(sa2 - 1)); ++ GenImm(ALSL_W, sa2 - 1, rk, rj, rd); ++} ++ ++void Assembler::alsl_wu(Register rd, Register rj, Register rk, int32_t sa2) { ++ DCHECK(is_uint2(sa2 - 1)); ++ GenImm(ALSL_WU, sa2 + 3, rk, rj, rd); ++} ++ ++void Assembler::alsl_d(Register rd, Register rj, Register rk, int32_t sa2) { ++ DCHECK(is_uint2(sa2 - 1)); ++ GenImm(ALSL_D, sa2 - 1, rk, rj, rd); ++} ++ ++void Assembler::lu12i_w(Register rd, int32_t si20) { ++ GenImm(LU12I_W, si20, rd); ++} ++ ++void Assembler::lu32i_d(Register rd, int32_t si20) { ++ GenImm(LU32I_D, si20, rd); ++} ++ ++void Assembler::lu52i_d(Register rd, Register rj, int32_t si12) { ++ GenImm(LU52I_D, si12, rj, rd, 12); ++} ++ ++void Assembler::slt(Register rd, Register rj, Register rk) { ++ GenRegister(SLT, rk, rj, rd); ++} ++ ++void Assembler::sltu(Register rd, Register rj, Register rk) { ++ GenRegister(SLTU, rk, rj, rd); ++} ++ ++void Assembler::slti(Register rd, Register rj, int32_t si12) { ++ GenImm(SLTI, si12, rj, rd, 12); ++} ++ ++void Assembler::sltui(Register rd, Register rj, int32_t si12) { ++ GenImm(SLTUI, si12, rj, rd, 12); ++} ++ ++void Assembler::pcaddi(Register rd, int32_t si20) { GenImm(PCADDI, si20, rd); } ++ ++void Assembler::pcaddu12i(Register rd, int32_t si20) { ++ GenImm(PCADDU12I, si20, rd); ++} ++ ++void Assembler::pcaddu18i(Register rd, int32_t si20) { ++ GenImm(PCADDU18I, si20, rd); ++} ++ ++void Assembler::pcalau12i(Register rd, int32_t si20) { ++ GenImm(PCALAU12I, si20, rd); ++} ++ ++void Assembler::and_(Register rd, Register rj, Register rk) { ++ GenRegister(AND, rk, rj, rd); ++} ++ ++void Assembler::or_(Register rd, Register rj, Register rk) { ++ GenRegister(OR, rk, rj, rd); ++} ++ ++void Assembler::xor_(Register rd, Register rj, Register rk) { ++ GenRegister(XOR, rk, rj, rd); ++} ++ ++void Assembler::nor(Register rd, Register rj, Register rk) { ++ GenRegister(NOR, rk, rj, rd); ++} ++ ++void Assembler::andn(Register rd, Register rj, Register rk) { ++ GenRegister(ANDN, rk, rj, rd); ++} ++ ++void Assembler::orn(Register rd, Register rj, Register rk) { ++ GenRegister(ORN, rk, rj, rd); ++} ++ ++void Assembler::andi(Register rd, Register rj, int32_t ui12) { ++ GenImm(ANDI, ui12, rj, rd, 12); ++} ++ ++void Assembler::ori(Register rd, Register rj, int32_t ui12) { ++ GenImm(ORI, ui12, rj, rd, 12); ++} ++ ++void Assembler::xori(Register rd, Register rj, int32_t ui12) { ++ GenImm(XORI, ui12, rj, rd, 12); ++} ++ ++void Assembler::mul_w(Register rd, Register rj, Register rk) { ++ GenRegister(MUL_W, rk, rj, rd); ++} ++ ++void Assembler::mulh_w(Register rd, Register rj, Register rk) { ++ GenRegister(MULH_W, rk, rj, rd); ++} ++ ++void Assembler::mulh_wu(Register rd, Register rj, Register rk) { ++ GenRegister(MULH_WU, rk, rj, rd); ++} ++ ++void Assembler::mul_d(Register rd, Register rj, Register rk) { ++ GenRegister(MUL_D, rk, rj, rd); ++} ++ ++void Assembler::mulh_d(Register rd, Register rj, Register rk) { ++ GenRegister(MULH_D, rk, rj, rd); ++} ++ ++void Assembler::mulh_du(Register rd, Register rj, Register rk) { ++ GenRegister(MULH_DU, rk, rj, rd); ++} ++ ++void Assembler::mulw_d_w(Register rd, Register rj, Register rk) { ++ GenRegister(MULW_D_W, rk, rj, rd); ++} ++ ++void Assembler::mulw_d_wu(Register rd, Register rj, Register rk) { ++ GenRegister(MULW_D_WU, rk, rj, rd); ++} ++ ++void Assembler::div_w(Register rd, Register rj, Register rk) { ++ GenRegister(DIV_W, rk, rj, rd); ++} ++ ++void Assembler::mod_w(Register rd, Register rj, Register rk) { ++ GenRegister(MOD_W, rk, rj, rd); ++} ++ ++void Assembler::div_wu(Register rd, Register rj, Register rk) { ++ GenRegister(DIV_WU, rk, rj, rd); ++} ++ ++void Assembler::mod_wu(Register rd, Register rj, Register rk) { ++ GenRegister(MOD_WU, rk, rj, rd); ++} ++ ++void Assembler::div_d(Register rd, Register rj, Register rk) { ++ GenRegister(DIV_D, rk, rj, rd); ++} ++ ++void Assembler::mod_d(Register rd, Register rj, Register rk) { ++ GenRegister(MOD_D, rk, rj, rd); ++} ++ ++void Assembler::div_du(Register rd, Register rj, Register rk) { ++ GenRegister(DIV_DU, rk, rj, rd); ++} ++ ++void Assembler::mod_du(Register rd, Register rj, Register rk) { ++ GenRegister(MOD_DU, rk, rj, rd); ++} ++ ++// Shifts. ++void Assembler::sll_w(Register rd, Register rj, Register rk) { ++ GenRegister(SLL_W, rk, rj, rd); ++} ++ ++void Assembler::srl_w(Register rd, Register rj, Register rk) { ++ GenRegister(SRL_W, rk, rj, rd); ++} ++ ++void Assembler::sra_w(Register rd, Register rj, Register rk) { ++ GenRegister(SRA_W, rk, rj, rd); ++} ++ ++void Assembler::rotr_w(Register rd, Register rj, Register rk) { ++ GenRegister(ROTR_W, rk, rj, rd); ++} ++ ++void Assembler::slli_w(Register rd, Register rj, int32_t ui5) { ++ DCHECK(is_uint5(ui5)); ++ GenImm(SLLI_W, ui5 + 0x20, rj, rd, 6); ++} ++ ++void Assembler::srli_w(Register rd, Register rj, int32_t ui5) { ++ DCHECK(is_uint5(ui5)); ++ GenImm(SRLI_W, ui5 + 0x20, rj, rd, 6); ++} ++ ++void Assembler::srai_w(Register rd, Register rj, int32_t ui5) { ++ DCHECK(is_uint5(ui5)); ++ GenImm(SRAI_W, ui5 + 0x20, rj, rd, 6); ++} ++ ++void Assembler::rotri_w(Register rd, Register rj, int32_t ui5) { ++ DCHECK(is_uint5(ui5)); ++ GenImm(ROTRI_W, ui5 + 0x20, rj, rd, 6); ++} ++ ++void Assembler::sll_d(Register rd, Register rj, Register rk) { ++ GenRegister(SLL_D, rk, rj, rd); ++} ++ ++void Assembler::srl_d(Register rd, Register rj, Register rk) { ++ GenRegister(SRL_D, rk, rj, rd); ++} ++ ++void Assembler::sra_d(Register rd, Register rj, Register rk) { ++ GenRegister(SRA_D, rk, rj, rd); ++} ++ ++void Assembler::rotr_d(Register rd, Register rj, Register rk) { ++ GenRegister(ROTR_D, rk, rj, rd); ++} ++ ++void Assembler::slli_d(Register rd, Register rj, int32_t ui6) { ++ GenImm(SLLI_D, ui6, rj, rd, 6); ++} ++ ++void Assembler::srli_d(Register rd, Register rj, int32_t ui6) { ++ GenImm(SRLI_D, ui6, rj, rd, 6); ++} ++ ++void Assembler::srai_d(Register rd, Register rj, int32_t ui6) { ++ GenImm(SRAI_D, ui6, rj, rd, 6); ++} ++ ++void Assembler::rotri_d(Register rd, Register rj, int32_t ui6) { ++ GenImm(ROTRI_D, ui6, rj, rd, 6); ++} ++ ++// Bit twiddling. ++void Assembler::ext_w_b(Register rd, Register rj) { ++ GenRegister(EXT_W_B, rj, rd); ++} ++ ++void Assembler::ext_w_h(Register rd, Register rj) { ++ GenRegister(EXT_W_H, rj, rd); ++} ++ ++void Assembler::clo_w(Register rd, Register rj) { GenRegister(CLO_W, rj, rd); } ++ ++void Assembler::clz_w(Register rd, Register rj) { GenRegister(CLZ_W, rj, rd); } ++ ++void Assembler::cto_w(Register rd, Register rj) { GenRegister(CTO_W, rj, rd); } ++ ++void Assembler::ctz_w(Register rd, Register rj) { GenRegister(CTZ_W, rj, rd); } ++ ++void Assembler::clo_d(Register rd, Register rj) { GenRegister(CLO_D, rj, rd); } ++ ++void Assembler::clz_d(Register rd, Register rj) { GenRegister(CLZ_D, rj, rd); } ++ ++void Assembler::cto_d(Register rd, Register rj) { GenRegister(CTO_D, rj, rd); } ++ ++void Assembler::ctz_d(Register rd, Register rj) { GenRegister(CTZ_D, rj, rd); } ++ ++void Assembler::bytepick_w(Register rd, Register rj, Register rk, int32_t sa2) { ++ DCHECK(is_uint2(sa2)); ++ GenImm(BYTEPICK_W, sa2, rk, rj, rd); ++} ++ ++void Assembler::bytepick_d(Register rd, Register rj, Register rk, int32_t sa3) { ++ GenImm(BYTEPICK_D, sa3, rk, rj, rd); ++} ++ ++void Assembler::revb_2h(Register rd, Register rj) { ++ GenRegister(REVB_2H, rj, rd); ++} ++ ++void Assembler::revb_4h(Register rd, Register rj) { ++ GenRegister(REVB_4H, rj, rd); ++} ++ ++void Assembler::revb_2w(Register rd, Register rj) { ++ GenRegister(REVB_2W, rj, rd); ++} ++ ++void Assembler::revb_d(Register rd, Register rj) { ++ GenRegister(REVB_D, rj, rd); ++} ++ ++void Assembler::revh_2w(Register rd, Register rj) { ++ GenRegister(REVH_2W, rj, rd); ++} ++ ++void Assembler::revh_d(Register rd, Register rj) { ++ GenRegister(REVH_D, rj, rd); ++} ++ ++void Assembler::bitrev_4b(Register rd, Register rj) { ++ GenRegister(BITREV_4B, rj, rd); ++} ++ ++void Assembler::bitrev_8b(Register rd, Register rj) { ++ GenRegister(BITREV_8B, rj, rd); ++} ++ ++void Assembler::bitrev_w(Register rd, Register rj) { ++ GenRegister(BITREV_W, rj, rd); ++} ++ ++void Assembler::bitrev_d(Register rd, Register rj) { ++ GenRegister(BITREV_D, rj, rd); ++} ++ ++void Assembler::bstrins_w(Register rd, Register rj, int32_t msbw, ++ int32_t lsbw) { ++ DCHECK(is_uint5(msbw) && is_uint5(lsbw)); ++ GenImm(BSTR_W, msbw + 0x20, lsbw, rj, rd); ++} ++ ++void Assembler::bstrins_d(Register rd, Register rj, int32_t msbd, ++ int32_t lsbd) { ++ GenImm(BSTRINS_D, msbd, lsbd, rj, rd); ++} ++ ++void Assembler::bstrpick_w(Register rd, Register rj, int32_t msbw, ++ int32_t lsbw) { ++ DCHECK(is_uint5(msbw) && is_uint5(lsbw)); ++ GenImm(BSTR_W, msbw + 0x20, lsbw + 0x20, rj, rd); ++} ++ ++void Assembler::bstrpick_d(Register rd, Register rj, int32_t msbd, ++ int32_t lsbd) { ++ GenImm(BSTRPICK_D, msbd, lsbd, rj, rd); ++} ++ ++void Assembler::maskeqz(Register rd, Register rj, Register rk) { ++ GenRegister(MASKEQZ, rk, rj, rd); ++} ++ ++void Assembler::masknez(Register rd, Register rj, Register rk) { ++ GenRegister(MASKNEZ, rk, rj, rd); ++} ++ ++// Memory-instructions ++void Assembler::ld_b(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_B, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_h(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_H, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_w(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_W, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_d(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_D, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_bu(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_BU, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_hu(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_HU, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_wu(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_WU, si12, rj, rd, 12); ++} ++ ++void Assembler::st_b(Register rd, Register rj, int32_t si12) { ++ GenImm(ST_B, si12, rj, rd, 12); ++} ++ ++void Assembler::st_h(Register rd, Register rj, int32_t si12) { ++ GenImm(ST_H, si12, rj, rd, 12); ++} ++ ++void Assembler::st_w(Register rd, Register rj, int32_t si12) { ++ GenImm(ST_W, si12, rj, rd, 12); ++} ++ ++void Assembler::st_d(Register rd, Register rj, int32_t si12) { ++ GenImm(ST_D, si12, rj, rd, 12); ++} ++ ++void Assembler::ldx_b(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_B, rk, rj, rd); ++} ++ ++void Assembler::ldx_h(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_H, rk, rj, rd); ++} ++ ++void Assembler::ldx_w(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_W, rk, rj, rd); ++} ++ ++void Assembler::ldx_d(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_D, rk, rj, rd); ++} ++ ++void Assembler::ldx_bu(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_BU, rk, rj, rd); ++} ++ ++void Assembler::ldx_hu(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_HU, rk, rj, rd); ++} ++ ++void Assembler::ldx_wu(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_WU, rk, rj, rd); ++} ++ ++void Assembler::stx_b(Register rd, Register rj, Register rk) { ++ GenRegister(STX_B, rk, rj, rd); ++} ++ ++void Assembler::stx_h(Register rd, Register rj, Register rk) { ++ GenRegister(STX_H, rk, rj, rd); ++} ++ ++void Assembler::stx_w(Register rd, Register rj, Register rk) { ++ GenRegister(STX_W, rk, rj, rd); ++} ++ ++void Assembler::stx_d(Register rd, Register rj, Register rk) { ++ GenRegister(STX_D, rk, rj, rd); ++} ++ ++void Assembler::ldptr_w(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(LDPTR_W, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::ldptr_d(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(LDPTR_D, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::stptr_w(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(STPTR_W, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::stptr_d(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(STPTR_D, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::preld(int32_t hint, Register rj, int32_t si12) { ++ DCHECK(is_uint5(hint)); ++ GenImm(PRELD, si12, rj, Register::from_code(hint), 12); ++} ++ ++void Assembler::preldx(int32_t hint, Register rj, Register rk) { ++ DCHECK(is_uint5(hint)); ++ GenRegister(PRELDX, rk, rj, Register::from_code(hint)); ++} ++ ++void Assembler::ldgt_b(Register rd, Register rj, Register rk) { ++ GenRegister(LDGT_B, rk, rj, rd); ++} ++ ++void Assembler::ldgt_h(Register rd, Register rj, Register rk) { ++ GenRegister(LDGT_H, rk, rj, rd); ++} ++ ++void Assembler::ldgt_w(Register rd, Register rj, Register rk) { ++ GenRegister(LDGT_W, rk, rj, rd); ++} ++ ++void Assembler::ldgt_d(Register rd, Register rj, Register rk) { ++ GenRegister(LDGT_D, rk, rj, rd); ++} ++ ++void Assembler::ldle_b(Register rd, Register rj, Register rk) { ++ GenRegister(LDLE_B, rk, rj, rd); ++} ++ ++void Assembler::ldle_h(Register rd, Register rj, Register rk) { ++ GenRegister(LDLE_H, rk, rj, rd); ++} ++ ++void Assembler::ldle_w(Register rd, Register rj, Register rk) { ++ GenRegister(LDLE_W, rk, rj, rd); ++} ++ ++void Assembler::ldle_d(Register rd, Register rj, Register rk) { ++ GenRegister(LDLE_D, rk, rj, rd); ++} ++ ++void Assembler::stgt_b(Register rd, Register rj, Register rk) { ++ GenRegister(STGT_B, rk, rj, rd); ++} ++ ++void Assembler::stgt_h(Register rd, Register rj, Register rk) { ++ GenRegister(STGT_H, rk, rj, rd); ++} ++ ++void Assembler::stgt_w(Register rd, Register rj, Register rk) { ++ GenRegister(STGT_W, rk, rj, rd); ++} ++ ++void Assembler::stgt_d(Register rd, Register rj, Register rk) { ++ GenRegister(STGT_D, rk, rj, rd); ++} ++ ++void Assembler::stle_b(Register rd, Register rj, Register rk) { ++ GenRegister(STLE_B, rk, rj, rd); ++} ++ ++void Assembler::stle_h(Register rd, Register rj, Register rk) { ++ GenRegister(STLE_H, rk, rj, rd); ++} ++ ++void Assembler::stle_w(Register rd, Register rj, Register rk) { ++ GenRegister(STLE_W, rk, rj, rd); ++} ++ ++void Assembler::stle_d(Register rd, Register rj, Register rk) { ++ GenRegister(STLE_D, rk, rj, rd); ++} ++ ++void Assembler::amswap_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMSWAP_W, rk, rj, rd); ++} ++ ++void Assembler::amswap_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMSWAP_D, rk, rj, rd); ++} ++ ++void Assembler::amadd_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMADD_W, rk, rj, rd); ++} ++ ++void Assembler::amadd_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMADD_D, rk, rj, rd); ++} ++ ++void Assembler::amand_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMAND_W, rk, rj, rd); ++} ++ ++void Assembler::amand_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMAND_D, rk, rj, rd); ++} ++ ++void Assembler::amor_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMOR_W, rk, rj, rd); ++} ++ ++void Assembler::amor_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMOR_D, rk, rj, rd); ++} ++ ++void Assembler::amxor_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMXOR_W, rk, rj, rd); ++} ++ ++void Assembler::amxor_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMXOR_D, rk, rj, rd); ++} ++ ++void Assembler::ammax_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_W, rk, rj, rd); ++} ++ ++void Assembler::ammax_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_D, rk, rj, rd); ++} ++ ++void Assembler::ammin_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_W, rk, rj, rd); ++} ++ ++void Assembler::ammin_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_D, rk, rj, rd); ++} ++ ++void Assembler::ammax_wu(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_WU, rk, rj, rd); ++} ++ ++void Assembler::ammax_du(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DU, rk, rj, rd); ++} ++ ++void Assembler::ammin_wu(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_WU, rk, rj, rd); ++} ++ ++void Assembler::ammin_du(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DU, rk, rj, rd); ++} ++ ++void Assembler::amswap_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMSWAP_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amswap_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMSWAP_DB_D, rk, rj, rd); ++} ++ ++void Assembler::amadd_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMADD_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amadd_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMADD_DB_D, rk, rj, rd); ++} ++ ++void Assembler::amand_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMAND_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amand_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMAND_DB_D, rk, rj, rd); ++} ++ ++void Assembler::amor_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMOR_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amor_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMOR_DB_D, rk, rj, rd); ++} ++ ++void Assembler::amxor_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMXOR_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amxor_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMXOR_DB_D, rk, rj, rd); ++} ++ ++void Assembler::ammax_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DB_W, rk, rj, rd); ++} ++ ++void Assembler::ammax_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DB_D, rk, rj, rd); ++} ++ ++void Assembler::ammin_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DB_W, rk, rj, rd); ++} ++ ++void Assembler::ammin_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DB_D, rk, rj, rd); ++} ++ ++void Assembler::ammax_db_wu(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DB_WU, rk, rj, rd); ++} ++ ++void Assembler::ammax_db_du(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DB_DU, rk, rj, rd); ++} ++ ++void Assembler::ammin_db_wu(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DB_WU, rk, rj, rd); ++} ++ ++void Assembler::ammin_db_du(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DB_DU, rk, rj, rd); ++} ++ ++void Assembler::ll_w(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(LL_W, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::ll_d(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(LL_D, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::sc_w(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(SC_W, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::sc_d(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(SC_D, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::dbar(int32_t hint) { GenImm(DBAR, hint); } ++ ++void Assembler::ibar(int32_t hint) { GenImm(IBAR, hint); } ++ ++void Assembler::crc_w_b_w(Register rd, Register rj, Register rk) { ++ GenRegister(CRC_W_B_W, rk, rj, rd); ++} ++ ++void Assembler::crc_w_h_w(Register rd, Register rj, Register rk) { ++ GenRegister(CRC_W_H_W, rk, rj, rd); ++} ++ ++void Assembler::crc_w_w_w(Register rd, Register rj, Register rk) { ++ GenRegister(CRC_W_W_W, rk, rj, rd); ++} ++ ++void Assembler::crc_w_d_w(Register rd, Register rj, Register rk) { ++ GenRegister(CRC_W_D_W, rk, rj, rd); ++} ++ ++void Assembler::crcc_w_b_w(Register rd, Register rj, Register rk) { ++ GenRegister(CRCC_W_B_W, rk, rj, rd); ++} ++ ++void Assembler::crcc_w_h_w(Register rd, Register rj, Register rk) { ++ GenRegister(CRCC_W_H_W, rk, rj, rd); ++} ++ ++void Assembler::crcc_w_w_w(Register rd, Register rj, Register rk) { ++ GenRegister(CRCC_W_W_W, rk, rj, rd); ++} ++ ++void Assembler::crcc_w_d_w(Register rd, Register rj, Register rk) { ++ GenRegister(CRCC_W_D_W, rk, rj, rd); ++} ++ ++void Assembler::syscall(int32_t code) { GenImm(SYSCALL, code); } ++ ++void Assembler::asrtle_d(Register rj, Register rk) { ++ GenRegister(ASRTLE_D, rk, rj, false); ++} ++ ++void Assembler::asrtgt_d(Register rj, Register rk) { ++ GenRegister(ASRTGT_D, rk, rj, false); ++} ++ ++void Assembler::rdtimel_w(Register rd, Register rj) { ++ GenRegister(RDTIMEL_W, rj, rd); ++} ++ ++void Assembler::rdtimeh_w(Register rd, Register rj) { ++ GenRegister(RDTIMEH_W, rj, rd); ++} ++ ++void Assembler::rdtime_d(Register rd, Register rj) { ++ GenRegister(RDTIME_D, rj, rd); ++} ++ ++void Assembler::cpucfg(Register rd, Register rj) { ++ GenRegister(CPUCFG_W, rj, rd); ++} ++ ++// Break / Trap instructions. ++void Assembler::break_(uint32_t code, bool break_as_stop) { ++ DCHECK( ++ (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) || ++ (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode))); ++ GenImm(BREAK, code); ++} ++ ++void Assembler::stop(uint32_t code) { ++ DCHECK_GT(code, kMaxWatchpointCode); ++ DCHECK_LE(code, kMaxStopCode); ++#if defined(V8_HOST_ARCH_LA64) ++ break_(0x4321); ++#else // V8_HOST_ARCH_LA64 ++ break_(code, true); ++#endif ++} ++ ++void Assembler::fadd_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FADD_S, fk, fj, fd); ++} ++ ++void Assembler::fadd_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FADD_D, fk, fj, fd); ++} ++ ++void Assembler::fsub_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FSUB_S, fk, fj, fd); ++} ++ ++void Assembler::fsub_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FSUB_D, fk, fj, fd); ++} ++ ++void Assembler::fmul_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMUL_S, fk, fj, fd); ++} ++ ++void Assembler::fmul_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMUL_D, fk, fj, fd); ++} ++ ++void Assembler::fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FDIV_S, fk, fj, fd); ++} ++ ++void Assembler::fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FDIV_D, fk, fj, fd); ++} ++ ++void Assembler::fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FMADD_S, fa, fk, fj, fd); ++} ++ ++void Assembler::fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FMADD_D, fa, fk, fj, fd); ++} ++ ++void Assembler::fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FMSUB_S, fa, fk, fj, fd); ++} ++ ++void Assembler::fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FMSUB_D, fa, fk, fj, fd); ++} ++ ++void Assembler::fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FNMADD_S, fa, fk, fj, fd); ++} ++ ++void Assembler::fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FNMADD_D, fa, fk, fj, fd); ++} ++ ++void Assembler::fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FNMSUB_S, fa, fk, fj, fd); ++} ++ ++void Assembler::fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FNMSUB_D, fa, fk, fj, fd); ++} ++ ++void Assembler::fmax_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMAX_S, fk, fj, fd); ++} ++ ++void Assembler::fmax_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMAX_D, fk, fj, fd); ++} ++ ++void Assembler::fmin_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMIN_S, fk, fj, fd); ++} ++ ++void Assembler::fmin_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMIN_D, fk, fj, fd); ++} ++ ++void Assembler::fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMAXA_S, fk, fj, fd); ++} ++ ++void Assembler::fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMAXA_D, fk, fj, fd); ++} ++ ++void Assembler::fmina_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMINA_S, fk, fj, fd); ++} ++ ++void Assembler::fmina_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMINA_D, fk, fj, fd); ++} ++ ++void Assembler::fabs_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FABS_S, fj, fd); ++} ++ ++void Assembler::fabs_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FABS_D, fj, fd); ++} ++ ++void Assembler::fneg_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FNEG_S, fj, fd); ++} ++ ++void Assembler::fneg_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FNEG_D, fj, fd); ++} ++ ++void Assembler::fsqrt_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FSQRT_S, fj, fd); ++} ++ ++void Assembler::fsqrt_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FSQRT_D, fj, fd); ++} ++ ++void Assembler::frecip_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FRECIP_S, fj, fd); ++} ++ ++void Assembler::frecip_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FRECIP_D, fj, fd); ++} ++ ++void Assembler::frsqrt_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FRSQRT_S, fj, fd); ++} ++ ++void Assembler::frsqrt_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FRSQRT_D, fj, fd); ++} ++ ++void Assembler::fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FSCALEB_S, fk, fj, fd); ++} ++ ++void Assembler::fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FSCALEB_D, fk, fj, fd); ++} ++ ++void Assembler::flogb_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FLOGB_S, fj, fd); ++} ++ ++void Assembler::flogb_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FLOGB_D, fj, fd); ++} ++ ++void Assembler::fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FCOPYSIGN_S, fk, fj, fd); ++} ++ ++void Assembler::fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FCOPYSIGN_D, fk, fj, fd); ++} ++ ++void Assembler::fclass_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FCLASS_S, fj, fd); ++} ++ ++void Assembler::fclass_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FCLASS_D, fj, fd); ++} ++ ++void Assembler::fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk, ++ CFRegister cd) { ++ GenCmp(FCMP_COND_S, cc, fk, fj, cd); ++} ++ ++void Assembler::fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk, ++ CFRegister cd) { ++ GenCmp(FCMP_COND_D, cc, fk, fj, cd); ++} ++ ++void Assembler::fcvt_s_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FCVT_S_D, fj, fd); ++} ++ ++void Assembler::fcvt_d_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FCVT_D_S, fj, fd); ++} ++ ++void Assembler::ffint_s_w(FPURegister fd, FPURegister fj) { ++ GenRegister(FFINT_S_W, fj, fd); ++} ++ ++void Assembler::ffint_s_l(FPURegister fd, FPURegister fj) { ++ GenRegister(FFINT_S_L, fj, fd); ++} ++ ++void Assembler::ffint_d_w(FPURegister fd, FPURegister fj) { ++ GenRegister(FFINT_D_W, fj, fd); ++} ++ ++void Assembler::ffint_d_l(FPURegister fd, FPURegister fj) { ++ GenRegister(FFINT_D_L, fj, fd); ++} ++ ++void Assembler::ftint_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINT_W_S, fj, fd); ++} ++ ++void Assembler::ftint_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINT_W_D, fj, fd); ++} ++ ++void Assembler::ftint_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINT_L_S, fj, fd); ++} ++ ++void Assembler::ftint_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINT_L_D, fj, fd); ++} ++ ++void Assembler::ftintrm_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRM_W_S, fj, fd); ++} ++ ++void Assembler::ftintrm_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRM_W_D, fj, fd); ++} ++ ++void Assembler::ftintrm_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRM_L_S, fj, fd); ++} ++ ++void Assembler::ftintrm_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRM_L_D, fj, fd); ++} ++ ++void Assembler::ftintrp_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRP_W_S, fj, fd); ++} ++ ++void Assembler::ftintrp_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRP_W_D, fj, fd); ++} ++ ++void Assembler::ftintrp_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRP_L_S, fj, fd); ++} ++ ++void Assembler::ftintrp_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRP_L_D, fj, fd); ++} ++ ++void Assembler::ftintrz_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRZ_W_S, fj, fd); ++} ++ ++void Assembler::ftintrz_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRZ_W_D, fj, fd); ++} ++ ++void Assembler::ftintrz_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRZ_L_S, fj, fd); ++} ++ ++void Assembler::ftintrz_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRZ_L_D, fj, fd); ++} ++ ++void Assembler::ftintrne_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRNE_W_S, fj, fd); ++} ++ ++void Assembler::ftintrne_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRNE_W_D, fj, fd); ++} ++ ++void Assembler::ftintrne_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRNE_L_S, fj, fd); ++} ++ ++void Assembler::ftintrne_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRNE_L_D, fj, fd); ++} ++ ++void Assembler::frint_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FRINT_S, fj, fd); ++} ++ ++void Assembler::frint_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FRINT_D, fj, fd); ++} ++ ++void Assembler::fmov_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FMOV_S, fj, fd); ++} ++ ++void Assembler::fmov_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FMOV_D, fj, fd); ++} ++ ++void Assembler::fsel(CFRegister ca, FPURegister fd, FPURegister fj, ++ FPURegister fk) { ++ GenSel(FSEL, ca, fk, fj, fd); ++} ++ ++void Assembler::movgr2fr_w(FPURegister fd, Register rj) { ++ GenRegister(MOVGR2FR_W, rj, fd); ++} ++ ++void Assembler::movgr2fr_d(FPURegister fd, Register rj) { ++ GenRegister(MOVGR2FR_D, rj, fd); ++} ++ ++void Assembler::movgr2frh_w(FPURegister fd, Register rj) { ++ GenRegister(MOVGR2FRH_W, rj, fd); ++} ++ ++void Assembler::movfr2gr_s(Register rd, FPURegister fj) { ++ GenRegister(MOVFR2GR_S, fj, rd); ++} ++ ++void Assembler::movfr2gr_d(Register rd, FPURegister fj) { ++ GenRegister(MOVFR2GR_D, fj, rd); ++} ++ ++void Assembler::movfrh2gr_s(Register rd, FPURegister fj) { ++ GenRegister(MOVFRH2GR_S, fj, rd); ++} ++ ++void Assembler::movgr2fcsr(Register rj) { GenRegister(MOVGR2FCSR, rj, FCSR); } ++ ++void Assembler::movfcsr2gr(Register rd) { GenRegister(MOVFCSR2GR, FCSR, rd); } ++ ++void Assembler::movfr2cf(CFRegister cd, FPURegister fj) { ++ GenRegister(MOVFR2CF, fj, cd); ++} ++ ++void Assembler::movcf2fr(FPURegister fd, CFRegister cj) { ++ GenRegister(MOVCF2FR, cj, fd); ++} ++ ++void Assembler::movgr2cf(CFRegister cd, Register rj) { ++ GenRegister(MOVGR2CF, rj, cd); ++} ++ ++void Assembler::movcf2gr(Register rd, CFRegister cj) { ++ GenRegister(MOVCF2GR, cj, rd); ++} ++ ++void Assembler::fld_s(FPURegister fd, Register rj, int32_t si12) { ++ GenImm(FLD_S, si12, rj, fd); ++} ++ ++void Assembler::fld_d(FPURegister fd, Register rj, int32_t si12) { ++ GenImm(FLD_D, si12, rj, fd); ++} ++ ++void Assembler::fst_s(FPURegister fd, Register rj, int32_t si12) { ++ GenImm(FST_S, si12, rj, fd); ++} ++ ++void Assembler::fst_d(FPURegister fd, Register rj, int32_t si12) { ++ GenImm(FST_D, si12, rj, fd); ++} ++ ++void Assembler::fldx_s(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FLDX_S, rk, rj, fd); ++} ++ ++void Assembler::fldx_d(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FLDX_D, rk, rj, fd); ++} ++ ++void Assembler::fstx_s(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FSTX_S, rk, rj, fd); ++} ++ ++void Assembler::fstx_d(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FSTX_D, rk, rj, fd); ++} ++ ++void Assembler::fldgt_s(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FLDGT_S, rk, rj, fd); ++} ++ ++void Assembler::fldgt_d(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FLDGT_D, rk, rj, fd); ++} ++ ++void Assembler::fldle_s(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FLDLE_S, rk, rj, fd); ++} ++ ++void Assembler::fldle_d(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FLDLE_D, rk, rj, fd); ++} ++ ++void Assembler::fstgt_s(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FSTGT_S, rk, rj, fd); ++} ++ ++void Assembler::fstgt_d(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FSTGT_D, rk, rj, fd); ++} ++ ++void Assembler::fstle_s(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FSTLE_S, rk, rj, fd); ++} ++ ++void Assembler::fstle_d(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FSTLE_D, rk, rj, fd); ++} ++ ++// ------------Memory-instructions------------- ++ ++/*void Assembler::AdjustBaseAndOffset(MemOperand* src, ++ OffsetAccessType access_type, ++ int second_access_add_to_offset) { ++ // TODO should be optimized. ++ // This method is used to adjust the base register and offset pair ++ // for a load/store when the offset doesn't fit into int12_t. ++ ++ bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; ++ bool two_accesses = static_cast(access_type) || !doubleword_aligned; ++ DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. ++ ++ // is_int12 must be passed a signed value, hence the static cast below. ++ if (is_int12(src->offset()) && ++ (!two_accesses || is_int12(static_cast( ++ src->offset() + second_access_add_to_offset)))) { ++ // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified ++ // value) fits into int16_t. ++ return; ++ } ++ ++ DCHECK(src->rm() != ++ at); // Must not overwrite the register 'base' while loading 'offset'. ++ ++#ifdef DEBUG ++ // Remember the "(mis)alignment" of 'offset', it will be checked at the end. ++ uint32_t misalignment = src->offset() & (kDoubleSize - 1); ++#endif ++ ++ // Do not load the whole 32-bit 'offset' if it can be represented as ++ // a sum of two 16-bit signed offsets. This can save an instruction or two. ++ // To simplify matters, only do this for a symmetric range of offsets from ++ // about -64KB to about +64KB, allowing further addition of 4 when accessing ++ // 64-bit variables with two 32-bit accesses. ++ constexpr int32_t kMinOffsetForSimpleAdjustment = ++ 0x7FF8; // Max int16_t that's a multiple of 8. ++ constexpr int32_t kMaxOffsetForSimpleAdjustment = ++ 2 * kMinOffsetForSimpleAdjustment; ++ ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { ++ daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment); ++ src->offset_ -= kMinOffsetForSimpleAdjustment; ++ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && ++ src->offset() < 0) { ++ daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment); ++ src->offset_ += kMinOffsetForSimpleAdjustment; ++ } else if (kArchVariant == kMips64r6) { ++ // On r6 take advantage of the daui instruction, e.g.: ++ // daui at, base, offset_high ++ // [dahi at, 1] // When `offset` is close to +2GB. ++ // lw reg_lo, offset_low(at) ++ // [lw reg_hi, (offset_low+4)(at)] // If misaligned 64-bit load. ++ // or when offset_low+4 overflows int16_t: ++ // daui at, base, offset_high ++ // daddiu at, at, 8 ++ // lw reg_lo, (offset_low-8)(at) ++ // lw reg_hi, (offset_low-4)(at) ++ int16_t offset_low = static_cast(src->offset()); ++ int32_t offset_low32 = offset_low; ++ int16_t offset_high = static_cast(src->offset() >> 16); ++ bool increment_hi16 = offset_low < 0; ++ bool overflow_hi16 = false; ++ ++ if (increment_hi16) { ++ offset_high++; ++ overflow_hi16 = (offset_high == -32768); ++ } ++ daui(scratch, src->rm(), static_cast(offset_high)); ++ ++ if (overflow_hi16) { ++ dahi(scratch, 1); ++ } ++ ++ if (two_accesses && !is_int16(static_cast( ++ offset_low32 + second_access_add_to_offset))) { ++ // Avoid overflow in the 16-bit offset of the load/store instruction when ++ // adding 4. ++ daddiu(scratch, scratch, kDoubleSize); ++ offset_low32 -= kDoubleSize; ++ } ++ ++ src->offset_ = offset_low32; ++ } else { ++ // Do not load the whole 32-bit 'offset' if it can be represented as ++ // a sum of three 16-bit signed offsets. This can save an instruction. ++ // To simplify matters, only do this for a symmetric range of offsets from ++ // about -96KB to about +96KB, allowing further addition of 4 when accessing ++ // 64-bit variables with two 32-bit accesses. ++ constexpr int32_t kMinOffsetForMediumAdjustment = ++ 2 * kMinOffsetForSimpleAdjustment; ++ constexpr int32_t kMaxOffsetForMediumAdjustment = ++ 3 * kMinOffsetForSimpleAdjustment; ++ if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { ++ daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2); ++ daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2); ++ src->offset_ -= kMinOffsetForMediumAdjustment; ++ } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && ++ src->offset() < 0) { ++ daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2); ++ daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2); ++ src->offset_ += kMinOffsetForMediumAdjustment; ++ } else { ++ // Now that all shorter options have been exhausted, load the full 32-bit ++ // offset. ++ int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); ++ lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask); ++ ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset. ++ daddu(scratch, scratch, src->rm()); ++ src->offset_ -= loaded_offset; ++ } ++ } ++ src->rm_ = scratch; ++ ++ DCHECK(is_int16(src->offset())); ++ if (two_accesses) { ++ DCHECK(is_int16( ++ static_cast(src->offset() + second_access_add_to_offset))); ++ } ++ DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); ++}*/ ++ ++void Assembler::AdjustBaseAndOffset(MemOperand* src) { ++ // is_int12 must be passed a signed value, hence the static cast below. ++ if ((!src->hasIndexReg() && is_int12(src->offset())) || src->hasIndexReg()) { ++ return; ++ } ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ if (is_uint12(static_cast(src->offset()))) { ++ ori(scratch, zero_reg, src->offset() & kImm12Mask); ++ } else { ++ lu12i_w(scratch, src->offset() >> 12 & 0xfffff); ++ if (src->offset() & kImm12Mask) { ++ ori(scratch, scratch, src->offset() & kImm12Mask); ++ } ++ } ++ src->index_ = scratch; ++ src->offset_ = 0; ++ // TODO can be optimized, for example 2 * [int12_min, int12_max] ++ // addi_d scratch base, offset/2 only on instr ++ // base = scratch ++ // offset = offset - offset / 2 ++} ++ ++int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, ++ intptr_t pc_delta) { ++ if (RelocInfo::IsInternalReference(rmode)) { ++ int64_t* p = reinterpret_cast(pc); ++ if (*p == kEndOfJumpChain) { ++ return 0; // Number of instructions patched. ++ } ++ *p += pc_delta; ++ return 2; // Number of instructions patched. ++ } ++ abort(); ++ /* Instr instr = instr_at(pc); ++ DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode)); ++ if (IsLui(instr)) { ++ Instr instr_lui = instr_at(pc + 0 * kInstrSize); ++ Instr instr_ori = instr_at(pc + 1 * kInstrSize); ++ Instr instr_ori2 = instr_at(pc + 3 * kInstrSize); ++ DCHECK(IsOri(instr_ori)); ++ DCHECK(IsOri(instr_ori2)); ++ // TODO(plind): symbolic names for the shifts. ++ int64_t imm = (instr_lui & static_cast(kImm16Mask)) << 48; ++ imm |= (instr_ori & static_cast(kImm16Mask)) << 32; ++ imm |= (instr_ori2 & static_cast(kImm16Mask)) << 16; ++ // Sign extend address. ++ imm >>= 16; ++ ++ if (imm == kEndOfJumpChain) { ++ return 0; // Number of instructions patched. ++ } ++ imm += pc_delta; ++ DCHECK_EQ(imm & 3, 0); ++ ++ instr_lui &= ~kImm16Mask; ++ instr_ori &= ~kImm16Mask; ++ instr_ori2 &= ~kImm16Mask; ++ ++ instr_at_put(pc + 0 * kInstrSize, instr_lui | ((imm >> 32) & kImm16Mask)); ++ instr_at_put(pc + 1 * kInstrSize, instr_ori | (imm >> 16 & kImm16Mask)); ++ instr_at_put(pc + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask)); ++ return 4; // Number of instructions patched. ++ } else if (IsJ(instr) || IsJal(instr)) { ++ // Regular j/jal relocation. ++ uint32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; ++ imm28 += pc_delta; ++ imm28 &= kImm28Mask; ++ instr &= ~kImm26Mask; ++ DCHECK_EQ(imm28 & 3, 0); ++ uint32_t imm26 = static_cast(imm28 >> 2); ++ instr_at_put(pc, instr | (imm26 & kImm26Mask)); ++ return 1; // Number of instructions patched. ++ } else { ++ DCHECK(((instr & kJumpRawMask) == kJRawMark) || ++ ((instr & kJumpRawMask) == kJalRawMark)); ++ // Unbox raw offset and emit j/jal. ++ int32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; ++ // Sign extend 28-bit offset to 32-bit. ++ imm28 = (imm28 << 4) >> 4; ++ uint64_t target = ++ static_cast(imm28) + reinterpret_cast(pc); ++ target &= kImm28Mask; ++ DCHECK_EQ(imm28 & 3, 0); ++ uint32_t imm26 = static_cast(target >> 2); ++ // Check markings whether to emit j or jal. ++ uint32_t unbox = (instr & kJRawMark) ? J : JAL; ++ instr_at_put(pc, unbox | (imm26 & kImm26Mask)); ++ return 1; // Number of instructions patched. ++ }*/ ++} ++ ++void Assembler::GrowBuffer() { ++ // Compute new buffer size. ++ int old_size = buffer_->size(); ++ int new_size = std::min(2 * old_size, old_size + 1 * MB); ++ ++ // Some internal data structures overflow for very large buffers, ++ // they must ensure that kMaximalBufferSize is not too large. ++ if (new_size > kMaximalBufferSize) { ++ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer"); ++ } ++ ++ // Set up new buffer. ++ std::unique_ptr new_buffer = buffer_->Grow(new_size); ++ DCHECK_EQ(new_size, new_buffer->size()); ++ byte* new_start = new_buffer->start(); ++ ++ // Copy the data. ++ intptr_t pc_delta = new_start - buffer_start_; ++ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size); ++ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos(); ++ MemMove(new_start, buffer_start_, pc_offset()); ++ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), ++ reloc_size); ++ ++ // Switch buffers. ++ buffer_ = std::move(new_buffer); ++ buffer_start_ = new_start; ++ pc_ += pc_delta; ++ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, ++ reloc_info_writer.last_pc() + pc_delta); ++ ++ // Relocate runtime entries. ++ Vector instructions{buffer_start_, pc_offset()}; ++ Vector reloc_info{reloc_info_writer.pos(), reloc_size}; ++ for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) { ++ RelocInfo::Mode rmode = it.rinfo()->rmode(); ++ if (rmode == RelocInfo::INTERNAL_REFERENCE) { ++ RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta); ++ } ++ } ++ DCHECK(!overflow()); ++} ++ ++void Assembler::db(uint8_t data) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ *reinterpret_cast(pc_) = data; ++ pc_ += sizeof(uint8_t); ++} ++ ++void Assembler::dd(uint32_t data) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ *reinterpret_cast(pc_) = data; ++ pc_ += sizeof(uint32_t); ++} ++ ++void Assembler::dq(uint64_t data) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ *reinterpret_cast(pc_) = data; ++ pc_ += sizeof(uint64_t); ++} ++ ++void Assembler::dd(Label* label) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ uint64_t data; ++ if (label->is_bound()) { ++ data = reinterpret_cast(buffer_start_ + label->pos()); ++ } else { ++ data = jump_address(label); ++ unbound_labels_count_++; ++ internal_reference_positions_.insert(label->pos()); ++ } ++ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); ++ EmitHelper(data); ++} ++ ++void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { ++ if (!ShouldRecordRelocInfo(rmode)) return; ++ // We do not try to reuse pool constants. ++ RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); ++ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here. ++ reloc_info_writer.Write(&rinfo); ++} ++ ++void Assembler::BlockTrampolinePoolFor(int instructions) { ++ CheckTrampolinePoolQuick(instructions); ++ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); ++} ++ ++void Assembler::CheckTrampolinePool() { ++ // Some small sequences of instructions must not be broken up by the ++ // insertion of a trampoline pool; such sequences are protected by setting ++ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, ++ // which are both checked here. Also, recursive calls to CheckTrampolinePool ++ // are blocked by trampoline_pool_blocked_nesting_. ++ if ((trampoline_pool_blocked_nesting_ > 0) || ++ (pc_offset() < no_trampoline_pool_before_)) { ++ // Emission is currently blocked; make sure we try again as soon as ++ // possible. ++ if (trampoline_pool_blocked_nesting_ > 0) { ++ next_buffer_check_ = pc_offset() + kInstrSize; ++ } else { ++ next_buffer_check_ = no_trampoline_pool_before_; ++ } ++ return; ++ } ++ ++ DCHECK(!trampoline_emitted_); ++ DCHECK_GE(unbound_labels_count_, 0); ++ if (unbound_labels_count_ > 0) { ++ // First we emit jump (2 instructions), then we emit trampoline pool. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Label after_pool; ++ b(&after_pool); ++ nop(); // TODO remove this ++ ++ int pool_start = pc_offset(); ++ for (int i = 0; i < unbound_labels_count_; i++) { ++ { ++ b(&after_pool); ++ nop(); // TODO remove this ++ } ++ } ++ nop(); ++ bind(&after_pool); ++ trampoline_ = Trampoline(pool_start, unbound_labels_count_); ++ ++ trampoline_emitted_ = true; ++ // As we are only going to emit trampoline once, we need to prevent any ++ // further emission. ++ next_buffer_check_ = kMaxInt; ++ } ++ } else { ++ // Number of branches to unbound label at this point is zero, so we can ++ // move next buffer check to maximum. ++ next_buffer_check_ = ++ pc_offset() + kMax16BranchOffset - kTrampolineSlotsSize * 16; ++ } ++ return; ++} ++ ++Address Assembler::target_address_at(Address pc) { ++ Instr instr0 = instr_at(pc); ++ Instr instr1 = instr_at(pc + 1 * kInstrSize); ++ Instr instr2 = instr_at(pc + 2 * kInstrSize); ++ ++ // Interpret 4 instructions for address generated by li: See listing in ++ // Assembler::set_target_address_at() just below. ++ DCHECK((IsLu12i_w(instr0) && (IsOri(instr1)) && (IsLu32i_d(instr2)))); ++ ++ // Assemble the 48 bit value. ++ uint64_t hi20 = ((uint64_t)(instr2 >> 5) & 0xfffff) << 32; ++ uint64_t mid20 = ((uint64_t)(instr0 >> 5) & 0xfffff) << 12; ++ uint64_t low12 = ((uint64_t)(instr1 >> 10) & 0xfff); ++ int64_t addr = static_cast(hi20 | mid20 | low12); ++ ++ // Sign extend to get canonical address. ++ addr = (addr << 16) >> 16; ++ // printf("add : 0x%lx 0x%lx 0x%lx 0x%lx\n", addr, hi20, mid20, low12); ++ return static_cast
(addr); ++} ++ ++// On la64, a target address is stored in a 3-instruction sequence: ++// 0: lu12i_w(rd, (j.imm64_ >> 12) & kImm20Mask); ++// 1: ori(rd, rd, j.imm64_ & kImm12Mask); ++// 2: lu32i_d(rd, (j.imm64_ >> 32) & kImm20Mask); ++// ++// Patching the address must replace all the lui & ori instructions, ++// and flush the i-cache. ++// ++// There is an optimization below, which emits a nop when the address ++// fits in just 16 bits. This is unlikely to help, and should be benchmarked, ++// and possibly removed. ++void Assembler::set_target_value_at(Address pc, uint64_t target, ++ ICacheFlushMode icache_flush_mode) { ++ // There is an optimization where only 3 instructions are used to load address ++ // in code on LA64 because only 48-bits of address is effectively used. ++ // It relies on fact the upper [63:48] bits are not used for virtual address ++ // translation and they have to be set according to value of bit 47 in order ++ // get canonical address. ++#ifdef DEBUG ++ // Check we have the result from a li macro-instruction. ++ Instr instr0 = instr_at(pc); ++ Instr instr1 = instr_at(pc + kInstrSize); ++ Instr instr2 = instr_at(pc + kInstrSize * 2); ++ DCHECK(IsLu12i_w(instr0) && IsOri(instr1) && IsLu32i_d(instr2)); ++#endif ++ ++ Instr instr = instr_at(pc); ++ uint32_t rd_code = GetRd(instr); ++ uint32_t* p = reinterpret_cast(pc); ++ ++ // Must use 3 instructions to insure patchable code. ++ // lu12i_w rd, middle-20. ++ // ori rd, rd, low-12. ++ // li32i_d rd, high-20. ++ *p = LU12I_W | (((target >> 12) & 0xfffff) << kRjShift) | rd_code; ++ *(p + 1) = ++ ORI | (target & 0xfff) << kRkShift | (rd_code << kRjShift) | rd_code; ++ *(p + 2) = LU32I_D | (((target >> 32) & 0xfffff) << kRjShift) | rd_code; ++ ++ if (icache_flush_mode != SKIP_ICACHE_FLUSH) { ++ FlushInstructionCache(pc, 3 * kInstrSize); ++ } ++} ++ ++UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) ++ : available_(assembler->GetScratchRegisterList()), ++ old_available_(*available_) {} ++ ++UseScratchRegisterScope::~UseScratchRegisterScope() { ++ *available_ = old_available_; ++} ++ ++Register UseScratchRegisterScope::Acquire() { ++ DCHECK_NOT_NULL(available_); ++ DCHECK_NE(*available_, 0); ++ int index = static_cast(base::bits::CountTrailingZeros32(*available_)); ++ *available_ &= ~(1UL << index); ++ ++ return Register::from_code(index); ++} ++ ++bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; } ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/assembler-la64.h b/src/3rdparty/chromium/v8/src/codegen/la64/assembler-la64.h +new file mode 100644 +index 0000000000..03a0103b1c +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/assembler-la64.h +@@ -0,0 +1,1171 @@ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#ifndef V8_CODEGEN_LA64_ASSEMBLER_LA64_H_ ++#define V8_CODEGEN_LA64_ASSEMBLER_LA64_H_ ++ ++#include ++#include ++#include ++ ++#include "src/codegen/assembler.h" ++#include "src/codegen/external-reference.h" ++#include "src/codegen/la64/constants-la64.h" ++#include "src/codegen/la64/register-la64.h" ++#include "src/codegen/label.h" ++#include "src/objects/contexts.h" ++#include "src/objects/smi.h" ++ ++namespace v8 { ++namespace internal { ++ ++class SafepointTableBuilder; ++ ++// ----------------------------------------------------------------------------- ++// Machine instruction Operands. ++constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize; ++constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1; ++// Class Operand represents a shifter operand in data processing instructions. ++class Operand { ++ public: ++ // Immediate. ++ V8_INLINE explicit Operand(int64_t immediate, ++ RelocInfo::Mode rmode = RelocInfo::NONE) ++ : rm_(no_reg), rmode_(rmode) { ++ value_.immediate = immediate; ++ } ++ V8_INLINE explicit Operand(const ExternalReference& f) ++ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) { ++ value_.immediate = static_cast(f.address()); ++ } ++ V8_INLINE explicit Operand(const char* s); ++ explicit Operand(Handle handle); ++ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) { ++ value_.immediate = static_cast(value.ptr()); ++ } ++ ++ static Operand EmbeddedNumber(double number); // Smi or HeapNumber. ++ static Operand EmbeddedStringConstant(const StringConstantBase* str); ++ ++ // Register. ++ V8_INLINE explicit Operand(Register rm) : rm_(rm) {} ++ ++ // Return true if this is a register operand. ++ V8_INLINE bool is_reg() const; ++ ++ inline int64_t immediate() const; ++ ++ bool IsImmediate() const { return !rm_.is_valid(); } ++ ++ HeapObjectRequest heap_object_request() const { ++ DCHECK(IsHeapObjectRequest()); ++ return value_.heap_object_request; ++ } ++ ++ bool IsHeapObjectRequest() const { ++ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); ++ DCHECK_IMPLIES(is_heap_object_request_, ++ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT || ++ rmode_ == RelocInfo::CODE_TARGET); ++ return is_heap_object_request_; ++ } ++ ++ Register rm() const { return rm_; } ++ ++ RelocInfo::Mode rmode() const { return rmode_; } ++ ++ private: ++ Register rm_; ++ union Value { ++ Value() {} ++ HeapObjectRequest heap_object_request; // if is_heap_object_request_ ++ int64_t immediate; // otherwise ++ } value_; // valid if rm_ == no_reg ++ bool is_heap_object_request_ = false; ++ RelocInfo::Mode rmode_; ++ ++ friend class Assembler; ++ friend class MacroAssembler; ++}; ++ ++// Class MemOperand represents a memory operand in load and store instructions. ++// 1: base_reg + off_imm( si12 | si14<<2) ++// 2: base_reg + offset_reg ++class V8_EXPORT_PRIVATE MemOperand { ++ public: ++ explicit MemOperand(Register rj, int32_t offset = 0); ++ explicit MemOperand(Register rj, Register offset = no_reg); ++ Register base() const { return base_; } ++ Register index() const { return index_; } ++ int32_t offset() const { return offset_; } ++ ++ bool hasIndexReg() const { return index_ != no_reg; } ++ ++ private: ++ Register base_; // base ++ Register index_; // index ++ int32_t offset_; // offset ++ ++ friend class Assembler; ++}; ++ ++class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ++ public: ++ // Create an assembler. Instructions and relocation information are emitted ++ // into a buffer, with the instructions starting from the beginning and the ++ // relocation information starting from the end of the buffer. See CodeDesc ++ // for a detailed comment on the layout (globals.h). ++ // ++ // If the provided buffer is nullptr, the assembler allocates and grows its ++ // own buffer. Otherwise it takes ownership of the provided buffer. ++ explicit Assembler(const AssemblerOptions&, ++ std::unique_ptr = {}); ++ ++ virtual ~Assembler() {} ++ ++ // GetCode emits any pending (non-emitted) code and fills the descriptor desc. ++ static constexpr int kNoHandlerTable = 0; ++ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; ++ void GetCode(Isolate* isolate, CodeDesc* desc, ++ SafepointTableBuilder* safepoint_table_builder, ++ int handler_table_offset); ++ ++ // Convenience wrapper for code without safepoint or handler tables. ++ void GetCode(Isolate* isolate, CodeDesc* desc) { ++ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); ++ } ++ ++ // Unused on this architecture. ++ void MaybeEmitOutOfLineConstantPool() {} ++ ++ // Label operations & relative jumps (PPUM Appendix D). ++ // ++ // Takes a branch opcode (cc) and a label (L) and generates ++ // either a backward branch or a forward branch and links it ++ // to the label fixup chain. Usage: ++ // ++ // Label L; // unbound label ++ // j(cc, &L); // forward branch to unbound label ++ // bind(&L); // bind label to the current pc ++ // j(cc, &L); // backward branch to bound label ++ // bind(&L); // illegal: a label may be bound only once ++ // ++ // Note: The same Label can be used for forward and backward branches ++ // but it may be bound only once. ++ void bind(Label* L); // Binds an unbound label L to current code position. ++ ++ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; ++ ++ // Determines if Label is bound and near enough so that branch instruction ++ // can be used to reach it, instead of jump instruction. ++ // c means conditinal branch, a means always branch. ++ bool is_near_c(Label* L); ++ bool is_near(Label* L, OffsetSize bits); ++ bool is_near_a(Label* L); ++ ++ int BranchOffset(Instr instr); ++ ++ // Returns the branch offset to the given label from the current code ++ // position. Links the label to the current position if it is still unbound. ++ // Manages the jump elimination optimization if the second parameter is true. ++ int32_t branch_offset_helper(Label* L, OffsetSize bits); ++ inline int32_t branch_offset(Label* L) { ++ return branch_offset_helper(L, OffsetSize::kOffset16); ++ } ++ inline int32_t branch_offset21(Label* L) { ++ return branch_offset_helper(L, OffsetSize::kOffset21); ++ } ++ inline int32_t branch_offset26(Label* L) { ++ return branch_offset_helper(L, OffsetSize::kOffset26); ++ } ++ inline int32_t shifted_branch_offset(Label* L) { ++ return branch_offset(L) >> 2; ++ } ++ inline int32_t shifted_branch_offset21(Label* L) { ++ return branch_offset21(L) >> 2; ++ } ++ inline int32_t shifted_branch_offset26(Label* L) { ++ return branch_offset26(L) >> 2; ++ } ++ uint64_t jump_address(Label* L); ++ uint64_t jump_offset(Label* L); ++ uint64_t branch_long_offset(Label* L); ++ ++ // Puts a labels target address at the given position. ++ // The high 8 bits are set to zero. ++ void label_at_put(Label* L, int at_offset); ++ ++ // Read/Modify the code target address in the branch/call instruction at pc. ++ // The isolate argument is unused (and may be nullptr) when skipping flushing. ++ static Address target_address_at(Address pc); ++ V8_INLINE static void set_target_address_at( ++ Address pc, Address target, ++ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { ++ set_target_value_at(pc, target, icache_flush_mode); ++ } ++ // On MIPS there is no Constant Pool so we skip that parameter. ++ V8_INLINE static Address target_address_at(Address pc, ++ Address constant_pool) { ++ return target_address_at(pc); ++ } ++ V8_INLINE static void set_target_address_at( ++ Address pc, Address constant_pool, Address target, ++ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { ++ set_target_address_at(pc, target, icache_flush_mode); ++ } ++ ++ static void set_target_value_at( ++ Address pc, uint64_t target, ++ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); ++ ++ static void JumpLabelToJumpRegister(Address pc); ++ ++ // This sets the branch destination (which gets loaded at the call address). ++ // This is for calls and branches within generated code. The serializer ++ // has already deserialized the lui/ori instructions etc. ++ inline static void deserialization_set_special_target_at( ++ Address instruction_payload, Code code, Address target); ++ ++ // Get the size of the special target encoded at 'instruction_payload'. ++ inline static int deserialization_special_target_size( ++ Address instruction_payload); ++ ++ // This sets the internal reference at the pc. ++ inline static void deserialization_set_target_internal_reference_at( ++ Address pc, Address target, ++ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); ++ ++ // Here we are patching the address in the LUI/ORI instruction pair. ++ // These values are used in the serialization process and must be zero for ++ // LA platform, as Code, Embedded Object or External-reference pointers ++ // are split across two consecutive instructions and don't exist separately ++ // in the code, so the serializer should not step forwards in memory after ++ // a target is resolved and written. ++ static constexpr int kSpecialTargetSize = 0; ++ ++ // Number of consecutive instructions used to store 32bit/64bit constant. ++ // This constant was used in RelocInfo::target_address_address() function ++ // to tell serializer address of the instruction that follows ++ // LUI/ORI instruction pair. ++ // TODO check this ++ static constexpr int kInstructionsFor64BitConstant = 4; ++ ++ // Difference between address of current opcode and target address offset. ++ static constexpr int kBranchPCOffset = 0; ++ ++ // Difference between address of current opcode and target address offset, ++ // when we are generatinga sequence of instructions for long relative PC ++ // branches ++ static constexpr int kLongBranchPCOffset = 0; // 3 * kInstrSize; ++ ++ // Max offset for instructions with 16-bit offset field ++ static constexpr int kMax16BranchOffset = (1 << (18 - 1)) - 1; ++ ++ // Max offset for instructions with 21-bit offset field ++ static constexpr int kMax21BranchOffset = (1 << (23 - 1)) - 1; ++ ++ // Max offset for compact branch instructions with 26-bit offset field ++ static constexpr int kMax26BranchOffset = (1 << (28 - 1)) - 1; ++ ++ static constexpr int kTrampolineSlotsSize = 2 * kInstrSize; ++ ++ RegList* GetScratchRegisterList() { return &scratch_register_list_; } ++ ++ // --------------------------------------------------------------------------- ++ // Code generation. ++ ++ // Insert the smallest number of nop instructions ++ // possible to align the pc offset to a multiple ++ // of m. m must be a power of 2 (>= 4). ++ void Align(int m); ++ // Insert the smallest number of zero bytes possible to align the pc offset ++ // to a mulitple of m. m must be a power of 2 (>= 2). ++ void DataAlign(int m); ++ // Aligns code to something that's optimal for a jump target for the platform. ++ void CodeTargetAlign(); ++ ++ // Different nop operations are used by the code generator to detect certain ++ // states of the generated code. ++ enum NopMarkerTypes { ++ NON_MARKING_NOP = 0, ++ DEBUG_BREAK_NOP, ++ // IC markers. ++ PROPERTY_ACCESS_INLINED, ++ PROPERTY_ACCESS_INLINED_CONTEXT, ++ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, ++ // Helper values. ++ LAST_CODE_MARKER, ++ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, ++ }; ++ ++ // Type == 0 is the default non-marking nop. For loongisa this is a ++ // andi(zero_reg, zero_reg, 0). We use rt_reg == r1 for non-zero ++ // marking, to avoid conflict with ssnop and ehb instructions. ++ void nop(unsigned int type = 0) { ++ DCHECK_LT(type, 32); ++ Register nop_rt_reg = (type == 0) ? zero_reg : t7; ++ andi(zero_reg, nop_rt_reg, type); ++ } ++ ++ // --------Branch-and-jump-instructions---------- ++ // We don't use likely variant of instructions. ++ void b(int32_t offset); ++ inline void b(Label* L) { b(shifted_branch_offset26(L)); } ++ void bl(int32_t offset); ++ inline void bl(Label* L) { bl(shifted_branch_offset26(L)); } ++ ++ void beq(Register rj, Register rd, int32_t offset); ++ inline void beq(Register rj, Register rd, Label* L) { ++ beq(rj, rd, shifted_branch_offset(L)); ++ } ++ void bne(Register rj, Register rd, int32_t offset); ++ inline void bne(Register rj, Register rd, Label* L) { ++ bne(rj, rd, shifted_branch_offset(L)); ++ } ++ void blt(Register rj, Register rd, int32_t offset); ++ inline void blt(Register rj, Register rd, Label* L) { ++ blt(rj, rd, shifted_branch_offset(L)); ++ } ++ void bge(Register rj, Register rd, int32_t offset); ++ inline void bge(Register rj, Register rd, Label* L) { ++ bge(rj, rd, shifted_branch_offset(L)); ++ } ++ void bltu(Register rj, Register rd, int32_t offset); ++ inline void bltu(Register rj, Register rd, Label* L) { ++ bltu(rj, rd, shifted_branch_offset(L)); ++ } ++ void bgeu(Register rj, Register rd, int32_t offset); ++ inline void bgeu(Register rj, Register rd, Label* L) { ++ bgeu(rj, rd, shifted_branch_offset(L)); ++ } ++ void beqz(Register rj, int32_t offset); ++ inline void beqz(Register rj, Label* L) { ++ beqz(rj, shifted_branch_offset21(L)); ++ } ++ void bnez(Register rj, int32_t offset); ++ inline void bnez(Register rj, Label* L) { ++ bnez(rj, shifted_branch_offset21(L)); ++ } ++ ++ void jirl(Register rd, Register rj, int32_t offset); ++ ++ void bceqz(CFRegister cj, int32_t si21); ++ inline void bceqz(CFRegister cj, Label* L) { ++ bceqz(cj, shifted_branch_offset21(L)); ++ } ++ void bcnez(CFRegister cj, int32_t si21); ++ inline void bcnez(CFRegister cj, Label* L) { ++ bcnez(cj, shifted_branch_offset21(L)); ++ } ++ ++ // -------Data-processing-instructions--------- ++ ++ // Arithmetic. ++ void add_w(Register rd, Register rj, Register rk); ++ void add_d(Register rd, Register rj, Register rk); ++ void sub_w(Register rd, Register rj, Register rk); ++ void sub_d(Register rd, Register rj, Register rk); ++ ++ void addi_w(Register rd, Register rj, int32_t si12); ++ void addi_d(Register rd, Register rj, int32_t si12); ++ ++ void addu16i_d(Register rd, Register rj, int32_t si16); ++ ++ void alsl_w(Register rd, Register rj, Register rk, int32_t sa2); ++ void alsl_wu(Register rd, Register rj, Register rk, int32_t sa2); ++ void alsl_d(Register rd, Register rj, Register rk, int32_t sa2); ++ ++ void lu12i_w(Register rd, int32_t si20); ++ void lu32i_d(Register rd, int32_t si20); ++ void lu52i_d(Register rd, Register rj, int32_t si12); ++ ++ void slt(Register rd, Register rj, Register rk); ++ void sltu(Register rd, Register rj, Register rk); ++ void slti(Register rd, Register rj, int32_t si12); ++ void sltui(Register rd, Register rj, int32_t si12); ++ ++ void pcaddi(Register rd, int32_t si20); ++ void pcaddu12i(Register rd, int32_t si20); ++ void pcaddu18i(Register rd, int32_t si20); ++ void pcalau12i(Register rd, int32_t si20); ++ ++ void and_(Register rd, Register rj, Register rk); ++ void or_(Register rd, Register rj, Register rk); ++ void xor_(Register rd, Register rj, Register rk); ++ void nor(Register rd, Register rj, Register rk); ++ void andn(Register rd, Register rj, Register rk); ++ void orn(Register rd, Register rj, Register rk); ++ ++ void andi(Register rd, Register rj, int32_t ui12); ++ void ori(Register rd, Register rj, int32_t ui12); ++ void xori(Register rd, Register rj, int32_t ui12); ++ ++ void mul_w(Register rd, Register rj, Register rk); ++ void mulh_w(Register rd, Register rj, Register rk); ++ void mulh_wu(Register rd, Register rj, Register rk); ++ void mul_d(Register rd, Register rj, Register rk); ++ void mulh_d(Register rd, Register rj, Register rk); ++ void mulh_du(Register rd, Register rj, Register rk); ++ ++ void mulw_d_w(Register rd, Register rj, Register rk); ++ void mulw_d_wu(Register rd, Register rj, Register rk); ++ ++ void div_w(Register rd, Register rj, Register rk); ++ void mod_w(Register rd, Register rj, Register rk); ++ void div_wu(Register rd, Register rj, Register rk); ++ void mod_wu(Register rd, Register rj, Register rk); ++ void div_d(Register rd, Register rj, Register rk); ++ void mod_d(Register rd, Register rj, Register rk); ++ void div_du(Register rd, Register rj, Register rk); ++ void mod_du(Register rd, Register rj, Register rk); ++ ++ // Shifts. ++ void sll_w(Register rd, Register rj, Register rk); ++ void srl_w(Register rd, Register rj, Register rk); ++ void sra_w(Register rd, Register rj, Register rk); ++ void rotr_w(Register rd, Register rj, Register rk); ++ ++ void slli_w(Register rd, Register rj, int32_t ui5); ++ void srli_w(Register rd, Register rj, int32_t ui5); ++ void srai_w(Register rd, Register rj, int32_t ui5); ++ void rotri_w(Register rd, Register rj, int32_t ui5); ++ ++ void sll_d(Register rd, Register rj, Register rk); ++ void srl_d(Register rd, Register rj, Register rk); ++ void sra_d(Register rd, Register rj, Register rk); ++ void rotr_d(Register rd, Register rj, Register rk); ++ ++ void slli_d(Register rd, Register rj, int32_t ui6); ++ void srli_d(Register rd, Register rj, int32_t ui6); ++ void srai_d(Register rd, Register rj, int32_t ui6); ++ void rotri_d(Register rd, Register rj, int32_t ui6); ++ ++ // Bit twiddling. ++ void ext_w_b(Register rd, Register rj); ++ void ext_w_h(Register rd, Register rj); ++ ++ void clo_w(Register rd, Register rj); ++ void clz_w(Register rd, Register rj); ++ void cto_w(Register rd, Register rj); ++ void ctz_w(Register rd, Register rj); ++ void clo_d(Register rd, Register rj); ++ void clz_d(Register rd, Register rj); ++ void cto_d(Register rd, Register rj); ++ void ctz_d(Register rd, Register rj); ++ ++ void bytepick_w(Register rd, Register rj, Register rk, int32_t sa2); ++ void bytepick_d(Register rd, Register rj, Register rk, int32_t sa3); ++ ++ void revb_2h(Register rd, Register rj); ++ void revb_4h(Register rd, Register rj); ++ void revb_2w(Register rd, Register rj); ++ void revb_d(Register rd, Register rj); ++ ++ void revh_2w(Register rd, Register rj); ++ void revh_d(Register rd, Register rj); ++ ++ void bitrev_4b(Register rd, Register rj); ++ void bitrev_8b(Register rd, Register rj); ++ ++ void bitrev_w(Register rd, Register rj); ++ void bitrev_d(Register rd, Register rj); ++ ++ void bstrins_w(Register rd, Register rj, int32_t msbw, int32_t lsbw); ++ void bstrins_d(Register rd, Register rj, int32_t msbd, int32_t lsbd); ++ ++ void bstrpick_w(Register rd, Register rj, int32_t msbw, int32_t lsbw); ++ void bstrpick_d(Register rd, Register rj, int32_t msbd, int32_t lsbd); ++ ++ void maskeqz(Register rd, Register rj, Register rk); ++ void masknez(Register rd, Register rj, Register rk); ++ ++ // Memory-instructions ++ void ld_b(Register rd, Register rj, int32_t si12); ++ void ld_h(Register rd, Register rj, int32_t si12); ++ void ld_w(Register rd, Register rj, int32_t si12); ++ void ld_d(Register rd, Register rj, int32_t si12); ++ void ld_bu(Register rd, Register rj, int32_t si12); ++ void ld_hu(Register rd, Register rj, int32_t si12); ++ void ld_wu(Register rd, Register rj, int32_t si12); ++ void st_b(Register rd, Register rj, int32_t si12); ++ void st_h(Register rd, Register rj, int32_t si12); ++ void st_w(Register rd, Register rj, int32_t si12); ++ void st_d(Register rd, Register rj, int32_t si12); ++ ++ void ldx_b(Register rd, Register rj, Register rk); ++ void ldx_h(Register rd, Register rj, Register rk); ++ void ldx_w(Register rd, Register rj, Register rk); ++ void ldx_d(Register rd, Register rj, Register rk); ++ void ldx_bu(Register rd, Register rj, Register rk); ++ void ldx_hu(Register rd, Register rj, Register rk); ++ void ldx_wu(Register rd, Register rj, Register rk); ++ void stx_b(Register rd, Register rj, Register rk); ++ void stx_h(Register rd, Register rj, Register rk); ++ void stx_w(Register rd, Register rj, Register rk); ++ void stx_d(Register rd, Register rj, Register rk); ++ ++ void ldptr_w(Register rd, Register rj, int32_t si14); ++ void ldptr_d(Register rd, Register rj, int32_t si14); ++ void stptr_w(Register rd, Register rj, int32_t si14); ++ void stptr_d(Register rd, Register rj, int32_t si14); ++ ++ void preld(int32_t hint, Register rj, int32_t si12); ++ ++ void preldx(int32_t hint, Register rj, Register rk); ++ ++ void ldgt_b(Register rd, Register rj, Register rk); ++ void ldgt_h(Register rd, Register rj, Register rk); ++ void ldgt_w(Register rd, Register rj, Register rk); ++ void ldgt_d(Register rd, Register rj, Register rk); ++ ++ void ldle_b(Register rd, Register rj, Register rk); ++ void ldle_h(Register rd, Register rj, Register rk); ++ void ldle_w(Register rd, Register rj, Register rk); ++ void ldle_d(Register rd, Register rj, Register rk); ++ ++ void stgt_b(Register rd, Register rj, Register rk); ++ void stgt_h(Register rd, Register rj, Register rk); ++ void stgt_w(Register rd, Register rj, Register rk); ++ void stgt_d(Register rd, Register rj, Register rk); ++ ++ void stle_b(Register rd, Register rj, Register rk); ++ void stle_h(Register rd, Register rj, Register rk); ++ void stle_w(Register rd, Register rj, Register rk); ++ void stle_d(Register rd, Register rj, Register rk); ++ ++ void amswap_w(Register rd, Register rk, Register rj); ++ void amswap_d(Register rd, Register rk, Register rj); ++ void amadd_w(Register rd, Register rk, Register rj); ++ void amadd_d(Register rd, Register rk, Register rj); ++ void amand_w(Register rd, Register rk, Register rj); ++ void amand_d(Register rd, Register rk, Register rj); ++ void amor_w(Register rd, Register rk, Register rj); ++ void amor_d(Register rd, Register rk, Register rj); ++ void amxor_w(Register rd, Register rk, Register rj); ++ void amxor_d(Register rd, Register rk, Register rj); ++ void ammax_w(Register rd, Register rk, Register rj); ++ void ammax_d(Register rd, Register rk, Register rj); ++ void ammin_w(Register rd, Register rk, Register rj); ++ void ammin_d(Register rd, Register rk, Register rj); ++ void ammax_wu(Register rd, Register rk, Register rj); ++ void ammax_du(Register rd, Register rk, Register rj); ++ void ammin_wu(Register rd, Register rk, Register rj); ++ void ammin_du(Register rd, Register rk, Register rj); ++ ++ void amswap_db_w(Register rd, Register rk, Register rj); ++ void amswap_db_d(Register rd, Register rk, Register rj); ++ void amadd_db_w(Register rd, Register rk, Register rj); ++ void amadd_db_d(Register rd, Register rk, Register rj); ++ void amand_db_w(Register rd, Register rk, Register rj); ++ void amand_db_d(Register rd, Register rk, Register rj); ++ void amor_db_w(Register rd, Register rk, Register rj); ++ void amor_db_d(Register rd, Register rk, Register rj); ++ void amxor_db_w(Register rd, Register rk, Register rj); ++ void amxor_db_d(Register rd, Register rk, Register rj); ++ void ammax_db_w(Register rd, Register rk, Register rj); ++ void ammax_db_d(Register rd, Register rk, Register rj); ++ void ammin_db_w(Register rd, Register rk, Register rj); ++ void ammin_db_d(Register rd, Register rk, Register rj); ++ void ammax_db_wu(Register rd, Register rk, Register rj); ++ void ammax_db_du(Register rd, Register rk, Register rj); ++ void ammin_db_wu(Register rd, Register rk, Register rj); ++ void ammin_db_du(Register rd, Register rk, Register rj); ++ ++ void ll_w(Register rd, Register rj, int32_t si14); ++ void ll_d(Register rd, Register rj, int32_t si14); ++ void sc_w(Register rd, Register rj, int32_t si14); ++ void sc_d(Register rd, Register rj, int32_t si14); ++ ++ void dbar(int32_t hint); ++ void ibar(int32_t hint); ++ ++ void crc_w_b_w(Register rd, Register rj, Register rk); ++ void crc_w_h_w(Register rd, Register rj, Register rk); ++ void crc_w_w_w(Register rd, Register rj, Register rk); ++ void crc_w_d_w(Register rd, Register rj, Register rk); ++ void crcc_w_b_w(Register rd, Register rj, Register rk); ++ void crcc_w_h_w(Register rd, Register rj, Register rk); ++ void crcc_w_w_w(Register rd, Register rj, Register rk); ++ void crcc_w_d_w(Register rd, Register rj, Register rk); ++ ++ void syscall(int32_t code); ++ ++ void asrtle_d(Register rj, Register rk); ++ void asrtgt_d(Register rj, Register rk); ++ ++ void rdtimel_w(Register rd, Register rj); ++ void rdtimeh_w(Register rd, Register rj); ++ void rdtime_d(Register rd, Register rj); ++ ++ void cpucfg(Register rd, Register rj); ++ ++ // Break / Trap instructions. ++ void break_(uint32_t code, bool break_as_stop = false); ++ void stop(uint32_t code = kMaxStopCode); ++ ++ // Arithmetic. ++ void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fsub_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmul_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmul_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ ++ void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmina_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmina_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void fabs_s(FPURegister fd, FPURegister fj); ++ void fabs_d(FPURegister fd, FPURegister fj); ++ void fneg_s(FPURegister fd, FPURegister fj); ++ void fneg_d(FPURegister fd, FPURegister fj); ++ ++ void fsqrt_s(FPURegister fd, FPURegister fj); ++ void fsqrt_d(FPURegister fd, FPURegister fj); ++ void frecip_s(FPURegister fd, FPURegister fj); ++ void frecip_d(FPURegister fd, FPURegister fj); ++ void frsqrt_s(FPURegister fd, FPURegister fj); ++ void frsqrt_d(FPURegister fd, FPURegister fj); ++ ++ void fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void flogb_s(FPURegister fd, FPURegister fj); ++ void flogb_d(FPURegister fd, FPURegister fj); ++ void fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void fclass_s(FPURegister fd, FPURegister fj); ++ void fclass_d(FPURegister fd, FPURegister fj); ++ ++ void fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk, ++ CFRegister cd); ++ void fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk, ++ CFRegister cd); ++ ++ void fcvt_s_d(FPURegister fd, FPURegister fj); ++ void fcvt_d_s(FPURegister fd, FPURegister fj); ++ ++ void ffint_s_w(FPURegister fd, FPURegister fj); ++ void ffint_s_l(FPURegister fd, FPURegister fj); ++ void ffint_d_w(FPURegister fd, FPURegister fj); ++ void ffint_d_l(FPURegister fd, FPURegister fj); ++ void ftint_w_s(FPURegister fd, FPURegister fj); ++ void ftint_w_d(FPURegister fd, FPURegister fj); ++ void ftint_l_s(FPURegister fd, FPURegister fj); ++ void ftint_l_d(FPURegister fd, FPURegister fj); ++ ++ void ftintrm_w_s(FPURegister fd, FPURegister fj); ++ void ftintrm_w_d(FPURegister fd, FPURegister fj); ++ void ftintrm_l_s(FPURegister fd, FPURegister fj); ++ void ftintrm_l_d(FPURegister fd, FPURegister fj); ++ void ftintrp_w_s(FPURegister fd, FPURegister fj); ++ void ftintrp_w_d(FPURegister fd, FPURegister fj); ++ void ftintrp_l_s(FPURegister fd, FPURegister fj); ++ void ftintrp_l_d(FPURegister fd, FPURegister fj); ++ void ftintrz_w_s(FPURegister fd, FPURegister fj); ++ void ftintrz_w_d(FPURegister fd, FPURegister fj); ++ void ftintrz_l_s(FPURegister fd, FPURegister fj); ++ void ftintrz_l_d(FPURegister fd, FPURegister fj); ++ void ftintrne_w_s(FPURegister fd, FPURegister fj); ++ void ftintrne_w_d(FPURegister fd, FPURegister fj); ++ void ftintrne_l_s(FPURegister fd, FPURegister fj); ++ void ftintrne_l_d(FPURegister fd, FPURegister fj); ++ ++ void frint_s(FPURegister fd, FPURegister fj); ++ void frint_d(FPURegister fd, FPURegister fj); ++ ++ void fmov_s(FPURegister fd, FPURegister fj); ++ void fmov_d(FPURegister fd, FPURegister fj); ++ ++ void fsel(CFRegister ca, FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void movgr2fr_w(FPURegister fd, Register rj); ++ void movgr2fr_d(FPURegister fd, Register rj); ++ void movgr2frh_w(FPURegister fd, Register rj); ++ ++ void movfr2gr_s(Register rd, FPURegister fj); ++ void movfr2gr_d(Register rd, FPURegister fj); ++ void movfrh2gr_s(Register rd, FPURegister fj); ++ ++ void movgr2fcsr(Register rj); ++ void movfcsr2gr(Register rd); ++ ++ void movfr2cf(CFRegister cd, FPURegister fj); ++ void movcf2fr(FPURegister fd, CFRegister cj); ++ ++ void movgr2cf(CFRegister cd, Register rj); ++ void movcf2gr(Register rd, CFRegister cj); ++ ++ void fld_s(FPURegister fd, Register rj, int32_t si12); ++ void fld_d(FPURegister fd, Register rj, int32_t si12); ++ void fst_s(FPURegister fd, Register rj, int32_t si12); ++ void fst_d(FPURegister fd, Register rj, int32_t si12); ++ ++ void fldx_s(FPURegister fd, Register rj, Register rk); ++ void fldx_d(FPURegister fd, Register rj, Register rk); ++ void fstx_s(FPURegister fd, Register rj, Register rk); ++ void fstx_d(FPURegister fd, Register rj, Register rk); ++ ++ void fldgt_s(FPURegister fd, Register rj, Register rk); ++ void fldgt_d(FPURegister fd, Register rj, Register rk); ++ void fldle_s(FPURegister fd, Register rj, Register rk); ++ void fldle_d(FPURegister fd, Register rj, Register rk); ++ void fstgt_s(FPURegister fd, Register rj, Register rk); ++ void fstgt_d(FPURegister fd, Register rj, Register rk); ++ void fstle_s(FPURegister fd, Register rj, Register rk); ++ void fstle_d(FPURegister fd, Register rj, Register rk); ++ ++ // Check the code size generated from label to here. ++ int SizeOfCodeGeneratedSince(Label* label) { ++ return pc_offset() - label->pos(); ++ } ++ ++ // Check the number of instructions generated from label to here. ++ int InstructionsGeneratedSince(Label* label) { ++ return SizeOfCodeGeneratedSince(label) / kInstrSize; ++ } ++ ++ // Class for scoping postponing the trampoline pool generation. ++ class BlockTrampolinePoolScope { ++ public: ++ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { ++ assem_->StartBlockTrampolinePool(); ++ } ++ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); } ++ ++ private: ++ Assembler* assem_; ++ ++ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); ++ }; ++ ++ // Class for postponing the assembly buffer growth. Typically used for ++ // sequences of instructions that must be emitted as a unit, before ++ // buffer growth (and relocation) can occur. ++ // This blocking scope is not nestable. ++ class BlockGrowBufferScope { ++ public: ++ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { ++ assem_->StartBlockGrowBuffer(); ++ } ++ ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); } ++ ++ private: ++ Assembler* assem_; ++ ++ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); ++ }; ++ ++ // Record a deoptimization reason that can be used by a log or cpu profiler. ++ // Use --trace-deopt to enable. ++ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position, ++ int id); ++ ++ static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, ++ intptr_t pc_delta); ++ ++ // Writes a single byte or word of data in the code stream. Used for ++ // inline tables, e.g., jump-tables. ++ void db(uint8_t data); ++ void dd(uint32_t data); ++ void dq(uint64_t data); ++ void dp(uintptr_t data) { dq(data); } ++ void dd(Label* label); ++ ++ // Postpone the generation of the trampoline pool for the specified number of ++ // instructions. ++ void BlockTrampolinePoolFor(int instructions); ++ ++ // Check if there is less than kGap bytes available in the buffer. ++ // If this is the case, we need to grow the buffer before emitting ++ // an instruction or relocation information. ++ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } ++ ++ // Get the number of bytes available in the buffer. ++ inline intptr_t available_space() const { ++ return reloc_info_writer.pos() - pc_; ++ } ++ ++ // Read/patch instructions. ++ static Instr instr_at(Address pc) { return *reinterpret_cast(pc); } ++ static void instr_at_put(Address pc, Instr instr) { ++ *reinterpret_cast(pc) = instr; ++ } ++ Instr instr_at(int pos) { ++ return *reinterpret_cast(buffer_start_ + pos); ++ } ++ void instr_at_put(int pos, Instr instr) { ++ *reinterpret_cast(buffer_start_ + pos) = instr; ++ } ++ ++ // Check if an instruction is a branch of some kind. ++ static bool IsBranch(Instr instr); ++ static bool IsB(Instr instr); ++ static bool IsBz(Instr instr); ++ static bool IsNal(Instr instr); ++ ++ static bool IsBeq(Instr instr); ++ static bool IsBne(Instr instr); ++ ++ static bool IsJump(Instr instr); ++ static bool IsMov(Instr instr, Register rd, Register rs); ++ static bool IsPcAddi(Instr instr, Register rd, int32_t si20); ++ ++ static bool IsJ(Instr instr); ++ static bool IsLu12i_w(Instr instr); ++ static bool IsOri(Instr instr); ++ static bool IsLu32i_d(Instr instr); ++ static bool IsLu52i_d(Instr instr); ++ ++ static bool IsNop(Instr instr, unsigned int type); ++ static bool IsPop(Instr instr); ++ static bool IsPush(Instr instr); ++ // static bool IsLwRegFpOffset(Instr instr); ++ // static bool IsSwRegFpOffset(Instr instr); ++ // static bool IsLwRegFpNegOffset(Instr instr); ++ // static bool IsSwRegFpNegOffset(Instr instr); ++ ++ static Register GetRjReg(Instr instr); ++ static Register GetRkReg(Instr instr); ++ static Register GetRdReg(Instr instr); ++ ++ static uint32_t GetRj(Instr instr); ++ static uint32_t GetRjField(Instr instr); ++ static uint32_t GetRk(Instr instr); ++ static uint32_t GetRkField(Instr instr); ++ static uint32_t GetRd(Instr instr); ++ static uint32_t GetRdField(Instr instr); ++ static uint32_t GetSa2(Instr instr); ++ static uint32_t GetSa3(Instr instr); ++ static uint32_t GetSa2Field(Instr instr); ++ static uint32_t GetSa3Field(Instr instr); ++ static uint32_t GetOpcodeField(Instr instr); ++ static uint32_t GetFunction(Instr instr); ++ static uint32_t GetFunctionField(Instr instr); ++ static uint32_t GetImmediate16(Instr instr); ++ static uint32_t GetLabelConst(Instr instr); ++ ++ static bool IsAddImmediate(Instr instr); ++ static Instr SetAddImmediateOffset(Instr instr, int16_t offset); ++ ++ static bool IsAndImmediate(Instr instr); ++ static bool IsEmittedConstant(Instr instr); ++ ++ void CheckTrampolinePool(); ++ ++ inline int UnboundLabelsCount() { return unbound_labels_count_; } ++ ++ protected: ++ // Helper function for memory load/store. ++ void AdjustBaseAndOffset(MemOperand* src); ++ ++ inline static void set_target_internal_reference_encoded_at(Address pc, ++ Address target); ++ ++ int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; } ++ ++ // Decode branch instruction at pos and return branch target pos. ++ int target_at(int pos, bool is_internal); ++ ++ // Patch branch instruction at pos to branch to given branch target pos. ++ void target_at_put(int pos, int target_pos, bool is_internal); ++ ++ // Say if we need to relocate with this mode. ++ bool MustUseReg(RelocInfo::Mode rmode); ++ ++ // Record reloc info for current pc_. ++ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); ++ ++ // Block the emission of the trampoline pool before pc_offset. ++ void BlockTrampolinePoolBefore(int pc_offset) { ++ if (no_trampoline_pool_before_ < pc_offset) ++ no_trampoline_pool_before_ = pc_offset; ++ } ++ ++ void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; } ++ ++ void EndBlockTrampolinePool() { ++ trampoline_pool_blocked_nesting_--; ++ if (trampoline_pool_blocked_nesting_ == 0) { ++ CheckTrampolinePoolQuick(1); ++ } ++ } ++ ++ bool is_trampoline_pool_blocked() const { ++ return trampoline_pool_blocked_nesting_ > 0; ++ } ++ ++ bool has_exception() const { return internal_trampoline_exception_; } ++ ++ bool is_trampoline_emitted() const { return trampoline_emitted_; } ++ ++ // Temporarily block automatic assembly buffer growth. ++ void StartBlockGrowBuffer() { ++ DCHECK(!block_buffer_growth_); ++ block_buffer_growth_ = true; ++ } ++ ++ void EndBlockGrowBuffer() { ++ DCHECK(block_buffer_growth_); ++ block_buffer_growth_ = false; ++ } ++ ++ bool is_buffer_growth_blocked() const { return block_buffer_growth_; } ++ ++ void CheckTrampolinePoolQuick(int extra_instructions = 0) { ++ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { ++ CheckTrampolinePool(); ++ } ++ } ++ ++ private: ++ // Avoid overflows for displacements etc. ++ static const int kMaximalBufferSize = 512 * MB; ++ ++ // Buffer size and constant pool distance are checked together at regular ++ // intervals of kBufferCheckInterval emitted bytes. ++ static constexpr int kBufferCheckInterval = 1 * KB / 2; ++ ++ // Code generation. ++ // The relocation writer's position is at least kGap bytes below the end of ++ // the generated instructions. This is so that multi-instruction sequences do ++ // not have to check for overflow. The same is true for writes of large ++ // relocation info entries. ++ static constexpr int kGap = 64; ++ STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap); ++ ++ // Repeated checking whether the trampoline pool should be emitted is rather ++ // expensive. By default we only check again once a number of instructions ++ // has been generated. ++ static constexpr int kCheckConstIntervalInst = 32; ++ static constexpr int kCheckConstInterval = ++ kCheckConstIntervalInst * kInstrSize; ++ ++ int next_buffer_check_; // pc offset of next buffer check. ++ ++ // Emission of the trampoline pool may be blocked in some code sequences. ++ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. ++ int no_trampoline_pool_before_; // Block emission before this pc offset. ++ ++ // Keep track of the last emitted pool to guarantee a maximal distance. ++ int last_trampoline_pool_end_; // pc offset of the end of the last pool. ++ ++ // Automatic growth of the assembly buffer may be blocked for some sequences. ++ bool block_buffer_growth_; // Block growth when true. ++ ++ // Relocation information generation. ++ // Each relocation is encoded as a variable size value. ++ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; ++ RelocInfoWriter reloc_info_writer; ++ ++ // The bound position, before this we cannot do instruction elimination. ++ int last_bound_pos_; ++ ++ // Code emission. ++ inline void CheckBuffer(); ++ void GrowBuffer(); ++ inline void emit(Instr x); ++ inline void emit(uint64_t x); ++ // inline void CheckForEmitInForbiddenSlot(); ++ template ++ inline void EmitHelper(T x); ++ inline void EmitHelper(Instr x); ++ ++ void GenB(Opcode opcode, Register rj, int32_t si21); // opcode:6 ++ void GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq); ++ void GenB(Opcode opcode, int32_t si26); ++ void GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16); ++ void GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, FPURegister fj, ++ CFRegister cd); ++ void GenSel(Opcode opcode, CFRegister ca, FPURegister fk, FPURegister fj, ++ FPURegister rd); ++ ++ void GenRegister(Opcode opcode, Register rj, Register rd, bool rjrd = true); ++ void GenRegister(Opcode opcode, FPURegister fj, FPURegister fd); ++ void GenRegister(Opcode opcode, Register rj, FPURegister fd); ++ void GenRegister(Opcode opcode, FPURegister fj, Register rd); ++ void GenRegister(Opcode opcode, Register rj, FPUControlRegister fd); ++ void GenRegister(Opcode opcode, FPUControlRegister fj, Register rd); ++ void GenRegister(Opcode opcode, FPURegister fj, CFRegister cd); ++ void GenRegister(Opcode opcode, CFRegister cj, FPURegister fd); ++ void GenRegister(Opcode opcode, Register rj, CFRegister cd); ++ void GenRegister(Opcode opcode, CFRegister cj, Register rd); ++ ++ void GenRegister(Opcode opcode, Register rk, Register rj, Register rd); ++ void GenRegister(Opcode opcode, FPURegister fk, FPURegister fj, ++ FPURegister fd); ++ ++ void GenRegister(Opcode opcode, FPURegister fa, FPURegister fk, ++ FPURegister fj, FPURegister fd); ++ void GenRegister(Opcode opcode, Register rk, Register rj, FPURegister fd); ++ ++ void GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj, ++ Register rd); ++ void GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj, ++ Register rd); ++ void GenImm(Opcode opcode, int32_t bit20, Register rd); ++ void GenImm(Opcode opcode, int32_t bit15); ++ void GenImm(Opcode opcode, int32_t value, Register rj, Register rd, ++ int32_t value_bits); // 6 | 12 | 14 | 16 ++ void GenImm(Opcode opcode, int32_t bit12, Register rj, FPURegister fd); ++ ++ // Labels. ++ void print(const Label* L); ++ void bind_to(Label* L, int pos); ++ void next(Label* L, bool is_internal); ++ ++ // One trampoline consists of: ++ // - space for trampoline slots, ++ // - space for labels. ++ // ++ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. ++ // Space for trampoline slots precedes space for labels. Each label is of one ++ // instruction size, so total amount for labels is equal to ++ // label_count * kInstrSize. ++ class Trampoline { ++ public: ++ Trampoline() { ++ start_ = 0; ++ next_slot_ = 0; ++ free_slot_count_ = 0; ++ end_ = 0; ++ } ++ Trampoline(int start, int slot_count) { ++ start_ = start; ++ next_slot_ = start; ++ free_slot_count_ = slot_count; ++ end_ = start + slot_count * kTrampolineSlotsSize; ++ } ++ int start() { return start_; } ++ int end() { return end_; } ++ int take_slot() { ++ int trampoline_slot = kInvalidSlotPos; ++ if (free_slot_count_ <= 0) { ++ // We have run out of space on trampolines. ++ // Make sure we fail in debug mode, so we become aware of each case ++ // when this happens. ++ DCHECK(0); ++ // Internal exception will be caught. ++ } else { ++ trampoline_slot = next_slot_; ++ free_slot_count_--; ++ next_slot_ += kTrampolineSlotsSize; ++ } ++ return trampoline_slot; ++ } ++ ++ private: ++ int start_; ++ int end_; ++ int next_slot_; ++ int free_slot_count_; ++ }; ++ ++ int32_t get_trampoline_entry(int32_t pos); ++ int unbound_labels_count_; ++ // After trampoline is emitted, long branches are used in generated code for ++ // the forward branches whose target offsets could be beyond reach of branch ++ // instruction. We use this information to trigger different mode of ++ // branch instruction generation, where we use jump instructions rather ++ // than regular branch instructions. ++ bool trampoline_emitted_; ++ static constexpr int kInvalidSlotPos = -1; ++ ++ // Internal reference positions, required for unbounded internal reference ++ // labels. ++ std::set internal_reference_positions_; ++ bool is_internal_reference(Label* L) { ++ return internal_reference_positions_.find(L->pos()) != ++ internal_reference_positions_.end(); ++ } ++ ++ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } ++ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } ++ bool prev_instr_compact_branch_ = false; ++ ++ Trampoline trampoline_; ++ bool internal_trampoline_exception_; ++ ++ RegList scratch_register_list_; ++ ++ private: ++ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); ++ ++ int WriteCodeComments(); ++ ++ friend class RegExpMacroAssemblerMIPS; ++ friend class RelocInfo; ++ friend class BlockTrampolinePoolScope; ++ friend class EnsureSpace; ++}; ++ ++class EnsureSpace { ++ public: ++ explicit inline EnsureSpace(Assembler* assembler); ++}; ++ ++class V8_EXPORT_PRIVATE UseScratchRegisterScope { ++ public: ++ explicit UseScratchRegisterScope(Assembler* assembler); ++ ~UseScratchRegisterScope(); ++ ++ Register Acquire(); ++ bool hasAvailable() const; ++ ++ private: ++ RegList* available_; ++ RegList old_available_; ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LA64_ASSEMBLER_LA64_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/constants-la64.cc b/src/3rdparty/chromium/v8/src/codegen/la64/constants-la64.cc +new file mode 100644 +index 0000000000..1a406a8c4d +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/constants-la64.cc +@@ -0,0 +1,100 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/codegen/la64/constants-la64.h" ++ ++namespace v8 { ++namespace internal { ++ ++// ----------------------------------------------------------------------------- ++// Registers. ++ ++// These register names are defined in a way to match the native disassembler ++// formatting. See for example the command "objdump -d ". ++const char* Registers::names_[kNumSimuRegisters] = { ++ "zero_reg", "ra", "gp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6", ++ "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "tp", ++ "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "pc"}; ++ ++// List of alias names which can be used when referring to MIPS registers. ++const Registers::RegisterAlias Registers::aliases_[] = { ++ {0, "zero"}, {23, "cp"}, {kInvalidRegister, nullptr}}; ++ ++const char* Registers::Name(int reg) { ++ const char* result; ++ if ((0 <= reg) && (reg < kNumSimuRegisters)) { ++ result = names_[reg]; ++ } else { ++ result = "noreg"; ++ } ++ return result; ++} ++ ++int Registers::Number(const char* name) { ++ // Look through the canonical names. ++ for (int i = 0; i < kNumSimuRegisters; i++) { ++ if (strcmp(names_[i], name) == 0) { ++ return i; ++ } ++ } ++ ++ // Look through the alias names. ++ int i = 0; ++ while (aliases_[i].reg != kInvalidRegister) { ++ if (strcmp(aliases_[i].name, name) == 0) { ++ return aliases_[i].reg; ++ } ++ i++; ++ } ++ ++ // No register with the reguested name found. ++ return kInvalidRegister; ++} ++ ++const char* FPURegisters::names_[kNumFPURegisters] = { ++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", ++ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", ++ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; ++ ++// List of alias names which can be used when referring to MIPS registers. ++const FPURegisters::RegisterAlias FPURegisters::aliases_[] = { ++ {kInvalidRegister, nullptr}}; ++ ++const char* FPURegisters::Name(int creg) { ++ const char* result; ++ if ((0 <= creg) && (creg < kNumFPURegisters)) { ++ result = names_[creg]; ++ } else { ++ result = "nocreg"; ++ } ++ return result; ++} ++ ++int FPURegisters::Number(const char* name) { ++ // Look through the canonical names. ++ for (int i = 0; i < kNumFPURegisters; i++) { ++ if (strcmp(names_[i], name) == 0) { ++ return i; ++ } ++ } ++ ++ // Look through the alias names. ++ int i = 0; ++ while (aliases_[i].creg != kInvalidRegister) { ++ if (strcmp(aliases_[i].name, name) == 0) { ++ return aliases_[i].creg; ++ } ++ i++; ++ } ++ ++ // No Cregister with the reguested name found. ++ return kInvalidFPURegister; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/constants-la64.h b/src/3rdparty/chromium/v8/src/codegen/la64/constants-la64.h +new file mode 100644 +index 0000000000..6cf2ec3b7e +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/constants-la64.h +@@ -0,0 +1,1479 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_CODEGEN_LA64_CONSTANTS_LA64_H_ ++#define V8_CODEGEN_LA64_CONSTANTS_LA64_H_ ++ ++#include "src/base/logging.h" ++#include "src/base/macros.h" ++#include "src/common/globals.h" ++ ++// UNIMPLEMENTED_ macro for LOONGISA. ++#ifdef DEBUG ++#define UNIMPLEMENTED_LOONGISA() \ ++ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \ ++ __FILE__, __LINE__, __func__) ++#else ++#define UNIMPLEMENTED_LOONGISA() ++#endif ++ ++#define UNSUPPORTED_LOONGISA() \ ++ v8::internal::PrintF("Unsupported instruction.\n") ++ ++const uint32_t kLeastSignificantByteInInt32Offset = 0; ++const uint32_t kLessSignificantWordInDoublewordOffset = 0; ++ ++#ifndef __STDC_FORMAT_MACROS ++#define __STDC_FORMAT_MACROS ++#endif ++#include ++ ++// Defines constants and accessor classes to assemble, disassemble and ++// simulate LA64 instructions. ++ ++namespace v8 { ++namespace internal { ++ ++constexpr size_t kMaxPCRelativeCodeRangeInMB = 128; ++ ++// ----------------------------------------------------------------------------- ++// Registers and FPURegisters. ++ ++// Number of general purpose registers. ++const int kNumRegisters = 32; ++const int kInvalidRegister = -1; ++ ++// Number of registers with pc. ++const int kNumSimuRegisters = 33; ++ ++// In the simulator, the PC register is simulated as the 34th register. ++const int kPCRegister = 32; ++ ++// Number coprocessor registers. ++const int kNumFPURegisters = 32; ++const int kInvalidFPURegister = -1; ++ ++// FPU (coprocessor 1) control registers. Currently only FCSR is implemented. ++// TODO fcsr0 fcsr1 fcsr2 fcsr3 ++const int kFCSRRegister = 0; ++const int kInvalidFPUControlRegister = -1; ++const uint32_t kFPUInvalidResult = static_cast(1u << 31) - 1; ++const int32_t kFPUInvalidResultNegative = static_cast(1u << 31); ++const uint64_t kFPU64InvalidResult = ++ static_cast(static_cast(1) << 63) - 1; ++const int64_t kFPU64InvalidResultNegative = ++ static_cast(static_cast(1) << 63); ++ ++// FCSR constants. ++// TODO ++const uint32_t kFCSRInexactFlagBit = 16; ++const uint32_t kFCSRUnderflowFlagBit = 17; ++const uint32_t kFCSROverflowFlagBit = 18; ++const uint32_t kFCSRDivideByZeroFlagBit = 19; ++const uint32_t kFCSRInvalidOpFlagBit = 20; ++ ++const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit; ++const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit; ++const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit; ++const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit; ++const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit; ++ ++const uint32_t kFCSRFlagMask = ++ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask | ++ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask; ++ ++const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask; ++ ++// 'preld' instruction hints ++const int32_t kPrefHintLoad = 0; ++const int32_t kPrefHintStore = 8; ++ ++// Actual value of root register is offset from the root array's start ++// to take advantage of negative displacement values. ++// TODO(sigurds): Choose best value. ++constexpr int kRootRegisterBias = 256; ++ ++// Helper functions for converting between register numbers and names. ++class Registers { ++ public: ++ // Return the name of the register. ++ static const char* Name(int reg); ++ ++ // Lookup the register number for the name provided. ++ static int Number(const char* name); ++ ++ struct RegisterAlias { ++ int reg; ++ const char* name; ++ }; ++ ++ static const int64_t kMaxValue = 0x7fffffffffffffffl; ++ static const int64_t kMinValue = 0x8000000000000000l; ++ ++ private: ++ static const char* names_[kNumSimuRegisters]; ++ static const RegisterAlias aliases_[]; ++}; ++ ++// Helper functions for converting between register numbers and names. ++class FPURegisters { ++ public: ++ // Return the name of the register. ++ static const char* Name(int reg); ++ ++ // Lookup the register number for the name provided. ++ static int Number(const char* name); ++ ++ struct RegisterAlias { ++ int creg; ++ const char* name; ++ }; ++ ++ private: ++ static const char* names_[kNumFPURegisters]; ++ static const RegisterAlias aliases_[]; ++}; ++ ++// ----------------------------------------------------------------------------- ++// Instructions encoding constants. ++ ++// On LoongISA all instructions are 32 bits. ++using Instr = int32_t; ++ ++// Special Software Interrupt codes when used in the presence of the LA64 ++// simulator. ++enum SoftwareInterruptCodes { ++ // Transition to C code. ++ call_rt_redirected = 0x7fff ++}; ++ ++// On LA64 Simulator breakpoints can have different codes: ++// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints, ++// the simulator will run through them and print the registers. ++// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop() ++// instructions (see Assembler::stop()). ++// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the ++// debugger. ++const uint32_t kMaxWatchpointCode = 31; ++const uint32_t kMaxStopCode = 127; ++STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode); ++ ++// ----- Fields offset and length. ++const int kRjShift = 5; ++const int kRjBits = 5; ++const int kRkShift = 10; ++const int kRkBits = 5; ++const int kRdShift = 0; ++const int kRdBits = 5; ++const int kSaShift = 15; ++const int kSa2Bits = 2; ++const int kSa3Bits = 3; ++const int kCdShift = 0; ++const int kCdBits = 3; ++const int kCjShift = 5; ++const int kCjBits = 3; ++const int kCodeShift = 0; ++const int kCodeBits = 15; ++const int kCondShift = 15; ++const int kCondBits = 5; ++const int kUi5Shift = 10; ++const int kUi5Bits = 5; ++const int kUi6Shift = 10; ++const int kUi6Bits = 6; ++const int kUi12Shift = 10; ++const int kUi12Bits = 12; ++const int kSi12Shift = 10; ++const int kSi12Bits = 12; ++const int kSi14Shift = 10; ++const int kSi14Bits = 14; ++const int kSi16Shift = 10; ++const int kSi16Bits = 16; ++const int kSi20Shift = 5; ++const int kSi20Bits = 20; ++const int kMsbwShift = 16; ++const int kMsbwBits = 5; ++const int kLsbwShift = 10; ++const int kLsbwBits = 5; ++const int kMsbdShift = 16; ++const int kMsbdBits = 6; ++const int kLsbdShift = 10; ++const int kLsbdBits = 6; ++const int kFdShift = 0; ++const int kFdBits = 5; ++const int kFjShift = 5; ++const int kFjBits = 5; ++const int kFkShift = 10; ++const int kFkBits = 5; ++const int kFaShift = 15; ++const int kFaBits = 5; ++const int kCaShift = 15; ++const int kCaBits = 3; ++const int kHint15Shift = 0; ++const int kHint15Bits = 15; ++const int kHint5Shift = 0; ++const int kHint5Bits = 5; ++const int kOffsLowShift = 10; ++const int kOffsLowBits = 16; ++const int kOffs26HighShift = 0; ++const int kOffs26HighBits = 10; ++const int kOffs21HighShift = 0; ++const int kOffs21HighBits = 5; ++const int kImm12Shift = 0; ++const int kImm12Bits = 12; ++const int kImm16Shift = 0; ++const int kImm16Bits = 16; ++const int kImm26Shift = 0; ++const int kImm26Bits = 26; ++const int kImm28Shift = 0; ++const int kImm28Bits = 28; ++const int kImm32Shift = 0; ++const int kImm32Bits = 32; ++ ++// ----- Miscellaneous useful masks. ++// Instruction bit masks. ++const int kRjFieldMask = ((1 << kRjBits) - 1) << kRjShift; ++const int kRkFieldMask = ((1 << kRkBits) - 1) << kRkShift; ++const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift; ++const int kSa2FieldMask = ((1 << kSa2Bits) - 1) << kSaShift; ++const int kSa3FieldMask = ((1 << kSa3Bits) - 1) << kSaShift; ++// Misc masks. ++const int kHiMaskOf32 = 0xffff << 16; // Only to be used with 32-bit values ++const int kLoMaskOf32 = 0xffff; ++const int kSignMaskOf32 = 0x80000000; // Only to be used with 32-bit values ++const int64_t kTop16MaskOf64 = (int64_t)0xffff << 48; ++const int64_t kHigher16MaskOf64 = (int64_t)0xffff << 32; ++const int64_t kUpper16MaskOf64 = (int64_t)0xffff << 16; ++ ++const int kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift; ++const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift; ++const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift; ++const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift; ++ ++// ----- LA64 Opcodes and Function Fields. ++enum Opcode : uint32_t { ++ BEQZ = 0x10U << 26, ++ BNEZ = 0x11U << 26, ++ BCZ = 0x12U << 26, // BCEQZ & BCNEZ ++ JIRL = 0x13U << 26, ++ B = 0x14U << 26, ++ BL = 0x15U << 26, ++ BEQ = 0x16U << 26, ++ BNE = 0x17U << 26, ++ BLT = 0x18U << 26, ++ BGE = 0x19U << 26, ++ BLTU = 0x1aU << 26, ++ BGEU = 0x1bU << 26, ++ ++ ADDU16I_D = 0x4U << 26, ++ ++ LU12I_W = 0xaU << 25, ++ LU32I_D = 0xbU << 25, ++ PCADDI = 0xcU << 25, ++ PCALAU12I = 0xdU << 25, ++ PCADDU12I = 0xeU << 25, ++ PCADDU18I = 0xfU << 25, ++ ++ CSR = 0x4U << 24, // CSRRD & CSRWR & CSRXCHG ++ ++ LL_W = 0x20U << 24, ++ SC_W = 0x21U << 24, ++ LL_D = 0x22U << 24, ++ SC_D = 0x23U << 24, ++ LDPTR_W = 0x24U << 24, ++ STPTR_W = 0x25U << 24, ++ LDPTR_D = 0x26U << 24, ++ STPTR_D = 0x27U << 24, ++ ++ BSTR_W = 0x1U << 22, // BSTRINS_W & BSTRPICK_W ++ BSTRINS_W = BSTR_W, ++ BSTRPICK_W = BSTR_W, ++ BSTRINS_D = 0x2U << 22, ++ BSTRPICK_D = 0x3U << 22, ++ ++ SLTI = 0x8U << 22, ++ SLTUI = 0x9U << 22, ++ ADDI_W = 0xaU << 22, ++ ADDI_D = 0xbU << 22, ++ LU52I_D = 0xcU << 22, ++ ANDI = 0xdU << 22, ++ ORI = 0xeU << 22, ++ XORI = 0xfU << 22, ++ ++ CACHE = 0x18U << 22, ++ ++ LD_B = 0xa0U << 22, ++ LD_H = 0xa1U << 22, ++ LD_W = 0xa2U << 22, ++ LD_D = 0xa3U << 22, ++ ST_B = 0xa4U << 22, ++ ST_H = 0xa5U << 22, ++ ST_W = 0xa6U << 22, ++ ST_D = 0xa7U << 22, ++ LD_BU = 0xa8U << 22, ++ LD_HU = 0xa9U << 22, ++ LD_WU = 0xaaU << 22, ++ PRELD = 0xabU << 22, ++ FLD_S = 0xacU << 22, ++ FST_S = 0xadU << 22, ++ FLD_D = 0xaeU << 22, ++ FST_D = 0xafU << 22, ++ ++ FMADD_S = 0x81U << 20, ++ FMADD_D = 0x82U << 20, ++ FMSUB_S = 0x85U << 20, ++ FMSUB_D = 0x86U << 20, ++ FNMADD_S = 0x89U << 20, ++ FNMADD_D = 0x8aU << 20, ++ FNMSUB_S = 0x8dU << 20, ++ FNMSUB_D = 0x8eU << 20, ++ FCMP_COND_S = 0xc1U << 20, ++ FCMP_COND_D = 0xc2U << 20, ++ ++ BYTEPICK_D = 0x3U << 18, ++ BYTEPICK_W = 0x2U << 18, ++ ++ LDDIR = 0x190U << 18, ++ LDPTE = 0x191U << 18, ++ ++ FSEL = 0x340U << 18, ++ ++ ALSL = 0x1U << 18, ++ ALSL_W = ALSL, ++ ALSL_WU = ALSL, ++ ++ ALSL_D = 0xbU << 18, ++ ++ SLLI_W = 0x40U << 16, ++ SRLI_W = 0x44U << 16, ++ SRAI_W = 0x48U << 16, ++ ROTRI_W = 0x4cU << 16, ++ ++ SLLI_D = 0x41U << 16, ++ SRLI_D = 0x45U << 16, ++ SRAI_D = 0x49U << 16, ++ ROTRI_D = 0x4dU << 16, ++ ++ SLLI = 0x10U << 18, ++ SRLI = 0x11U << 18, ++ SRAI = 0x12U << 18, ++ ROTRI = 0x13U << 18, ++ ++ ASRTLE_D = 0x2U << 15, ++ ASRTGT_D = 0x3U << 15, ++ ++ ADD_W = 0x20U << 15, ++ ADD_D = 0x21U << 15, ++ SUB_W = 0x22U << 15, ++ SUB_D = 0x23U << 15, ++ SLT = 0x24U << 15, ++ SLTU = 0x25U << 15, ++ MASKNEZ = 0x26U << 15, ++ MASKEQZ = 0x27U << 15, ++ NOR = 0x28U << 15, ++ AND = 0x29U << 15, ++ OR = 0x2aU << 15, ++ XOR = 0x2bU << 15, ++ ORN = 0x2cU << 15, ++ ANDN = 0x2dU << 15, ++ SLL_W = 0x2eU << 15, ++ SRL_W = 0x2fU << 15, ++ SRA_W = 0x30U << 15, ++ SLL_D = 0x31U << 15, ++ SRL_D = 0x32U << 15, ++ SRA_D = 0x33U << 15, ++ ROTR_W = 0x36U << 15, ++ ROTR_D = 0x37U << 15, ++ MUL_W = 0x38U << 15, ++ MULH_W = 0x39U << 15, ++ MULH_WU = 0x3aU << 15, ++ MUL_D = 0x3bU << 15, ++ MULH_D = 0x3cU << 15, ++ MULH_DU = 0x3dU << 15, ++ MULW_D_W = 0x3eU << 15, ++ MULW_D_WU = 0x3fU << 15, ++ ++ DIV_W = 0x40U << 15, ++ MOD_W = 0x41U << 15, ++ DIV_WU = 0x42U << 15, ++ MOD_WU = 0x43U << 15, ++ DIV_D = 0x44U << 15, ++ MOD_D = 0x45U << 15, ++ DIV_DU = 0x46U << 15, ++ MOD_DU = 0x47U << 15, ++ ++ CRC_W_B_W = 0x48U << 15, ++ CRC_W_H_W = 0x49U << 15, ++ CRC_W_W_W = 0x4aU << 15, ++ CRC_W_D_W = 0x4bU << 15, ++ CRCC_W_B_W = 0x4cU << 15, ++ CRCC_W_H_W = 0x4dU << 15, ++ CRCC_W_W_W = 0x4eU << 15, ++ CRCC_W_D_W = 0x4fU << 15, ++ ++ BREAK = 0x54U << 15, ++ DBGCALL = 0x55U << 15, ++ SYSCALL = 0x56U << 15, ++ HYPCALL = 0x57U << 15, ++ ++ FADD_S = 0x201U << 15, ++ FADD_D = 0x202U << 15, ++ FSUB_S = 0x205U << 15, ++ FSUB_D = 0x206U << 15, ++ FMUL_S = 0x209U << 15, ++ FMUL_D = 0x20aU << 15, ++ FDIV_S = 0x20dU << 15, ++ FDIV_D = 0x20eU << 15, ++ FMAX_S = 0x211U << 15, ++ FMAX_D = 0x212U << 15, ++ FMIN_S = 0x215U << 15, ++ FMIN_D = 0x216U << 15, ++ FMAXA_S = 0x219U << 15, ++ FMAXA_D = 0x21aU << 15, ++ FMINA_S = 0x21dU << 15, ++ FMINA_D = 0x21eU << 15, ++ FSCALEB_S = 0x221U << 15, ++ FSCALEB_D = 0x222U << 15, ++ FCOPYSIGN_S = 0x225U << 15, ++ FCOPYSIGN_D = 0x226U << 15, ++ ++ WAIT_INVTLB = 0xc91U << 15, // wait & invtlb ++ ++ LDX_B = 0x7000U << 15, ++ LDX_H = 0x7008U << 15, ++ LDX_W = 0x7010U << 15, ++ LDX_D = 0x7018U << 15, ++ STX_B = 0x7020U << 15, ++ STX_H = 0x7028U << 15, ++ STX_W = 0x7030U << 15, ++ STX_D = 0x7038U << 15, ++ LDX_BU = 0x7040U << 15, ++ LDX_HU = 0x7048U << 15, ++ LDX_WU = 0x7050U << 15, ++ PRELDX = 0x7058U << 15, ++ FLDX_S = 0x7060U << 15, ++ FLDX_D = 0x7068U << 15, ++ FSTX_S = 0x7070U << 15, ++ FSTX_D = 0x7078U << 15, ++ ++ AMSWAP_W = 0x70c0U << 15, ++ AMSWAP_D = 0x70c1U << 15, ++ AMADD_W = 0x70c2U << 15, ++ AMADD_D = 0x70c3U << 15, ++ AMAND_W = 0x70c4U << 15, ++ AMAND_D = 0x70c5U << 15, ++ AMOR_W = 0x70c6U << 15, ++ AMOR_D = 0x70c7U << 15, ++ AMXOR_W = 0x70c8U << 15, ++ AMXOR_D = 0x70c9U << 15, ++ AMMAX_W = 0x70caU << 15, ++ AMMAX_D = 0x70cbU << 15, ++ AMMIN_W = 0x70ccU << 15, ++ AMMIN_D = 0x70cdU << 15, ++ AMMAX_WU = 0x70ceU << 15, ++ AMMAX_DU = 0x70cfU << 15, ++ AMMIN_WU = 0x70d0U << 15, ++ AMMIN_DU = 0x70d1U << 15, ++ AMSWAP_DB_W = 0x70d2U << 15, ++ AMSWAP_DB_D = 0x70d3U << 15, ++ AMADD_DB_W = 0x70d4U << 15, ++ AMADD_DB_D = 0x70d5U << 15, ++ AMAND_DB_W = 0x70d6U << 15, ++ AMAND_DB_D = 0x70d7U << 15, ++ AMOR_DB_W = 0x70d8U << 15, ++ AMOR_DB_D = 0x70d9U << 15, ++ AMXOR_DB_W = 0x70daU << 15, ++ AMXOR_DB_D = 0x70dbU << 15, ++ AMMAX_DB_W = 0x70dcU << 15, ++ AMMAX_DB_D = 0x70ddU << 15, ++ AMMIN_DB_W = 0x70deU << 15, ++ AMMIN_DB_D = 0x70dfU << 15, ++ AMMAX_DB_WU = 0x70e0U << 15, ++ AMMAX_DB_DU = 0x70e1U << 15, ++ AMMIN_DB_WU = 0x70e2U << 15, ++ AMMIN_DB_DU = 0x70e3U << 15, ++ ++ DBAR = 0x70e4U << 15, ++ IBAR = 0x70e5U << 15, ++ ++ FLDGT_S = 0x70e8U << 15, ++ FLDGT_D = 0x70e9U << 15, ++ FLDLE_S = 0x70eaU << 15, ++ FLDLE_D = 0x70ebU << 15, ++ FSTGT_S = 0x70ecU << 15, ++ FSTGT_D = 0x70edU << 15, ++ FSTLE_S = 0x70eeU << 15, ++ FSTLE_D = 0x70efU << 15, ++ LDGT_B = 0x70f0U << 15, ++ LDGT_H = 0x70f1U << 15, ++ LDGT_W = 0x70f2U << 15, ++ LDGT_D = 0x70f3U << 15, ++ LDLE_B = 0x70f4U << 15, ++ LDLE_H = 0x70f5U << 15, ++ LDLE_W = 0x70f6U << 15, ++ LDLE_D = 0x70f7U << 15, ++ STGT_B = 0x70f8U << 15, ++ STGT_H = 0x70f9U << 15, ++ STGT_W = 0x70faU << 15, ++ STGT_D = 0x70fbU << 15, ++ STLE_B = 0x70fcU << 15, ++ STLE_H = 0x70fdU << 15, ++ STLE_W = 0x70feU << 15, ++ STLE_D = 0x70ffU << 15, ++ ++ CLO_W = 0X4U << 10, ++ CLZ_W = 0X5U << 10, ++ CTO_W = 0X6U << 10, ++ CTZ_W = 0X7U << 10, ++ CLO_D = 0X8U << 10, ++ CLZ_D = 0X9U << 10, ++ CTO_D = 0XaU << 10, ++ CTZ_D = 0XbU << 10, ++ REVB_2H = 0XcU << 10, ++ REVB_4H = 0XdU << 10, ++ REVB_2W = 0XeU << 10, ++ REVB_D = 0XfU << 10, ++ REVH_2W = 0X10U << 10, ++ REVH_D = 0X11U << 10, ++ BITREV_4B = 0X12U << 10, ++ BITREV_8B = 0X13U << 10, ++ BITREV_W = 0X14U << 10, ++ BITREV_D = 0X15U << 10, ++ EXT_W_H = 0X16U << 10, ++ EXT_W_B = 0X17U << 10, ++ RDTIMEL_W = 0X18U << 10, ++ RDTIMEH_W = 0X19U << 10, ++ RDTIME_D = 0X1aU << 10, ++ CPUCFG_W = 0X1bU << 10, ++ ++ FABS_S = 0X4501U << 10, ++ FABS_D = 0X4502U << 10, ++ FNEG_S = 0X4505U << 10, ++ FNEG_D = 0X4506U << 10, ++ FLOGB_S = 0X4509U << 10, ++ FLOGB_D = 0X450aU << 10, ++ FCLASS_S = 0X450dU << 10, ++ FCLASS_D = 0X450eU << 10, ++ FSQRT_S = 0X4511U << 10, ++ FSQRT_D = 0X4512U << 10, ++ FRECIP_S = 0X4515U << 10, ++ FRECIP_D = 0X4516U << 10, ++ FRSQRT_S = 0X4519U << 10, ++ FRSQRT_D = 0X451aU << 10, ++ FMOV_S = 0X4525U << 10, ++ FMOV_D = 0X4526U << 10, ++ MOVGR2FR_W = 0X4529U << 10, ++ MOVGR2FR_D = 0X452aU << 10, ++ MOVGR2FRH_W = 0X452bU << 10, ++ MOVFR2GR_S = 0X452dU << 10, ++ MOVFR2GR_D = 0X452eU << 10, ++ MOVFRH2GR_S = 0X452fU << 10, ++ MOVGR2FCSR = 0X4530U << 10, ++ MOVFCSR2GR = 0X4532U << 10, ++ MOVFR2CF = 0X4534U << 10, ++ MOVGR2CF = 0X4536U << 10, ++ ++ FCVT_S_D = 0x4646U << 10, ++ FCVT_D_S = 0x4649U << 10, ++ FTINTRM_W_S = 0x4681U << 10, ++ FTINTRM_W_D = 0x4682U << 10, ++ FTINTRM_L_S = 0x4689U << 10, ++ FTINTRM_L_D = 0x468aU << 10, ++ FTINTRP_W_S = 0x4691U << 10, ++ FTINTRP_W_D = 0x4692U << 10, ++ FTINTRP_L_S = 0x4699U << 10, ++ FTINTRP_L_D = 0x469aU << 10, ++ FTINTRZ_W_S = 0x46a1U << 10, ++ FTINTRZ_W_D = 0x46a2U << 10, ++ FTINTRZ_L_S = 0x46a9U << 10, ++ FTINTRZ_L_D = 0x46aaU << 10, ++ FTINTRNE_W_S = 0x46b1U << 10, ++ FTINTRNE_W_D = 0x46b2U << 10, ++ FTINTRNE_L_S = 0x46b9U << 10, ++ FTINTRNE_L_D = 0x46baU << 10, ++ FTINT_W_S = 0x46c1U << 10, ++ FTINT_W_D = 0x46c2U << 10, ++ FTINT_L_S = 0x46c9U << 10, ++ FTINT_L_D = 0x46caU << 10, ++ FFINT_S_W = 0x4744U << 10, ++ FFINT_S_L = 0x4746U << 10, ++ FFINT_D_W = 0x4748U << 10, ++ FFINT_D_L = 0x474aU << 10, ++ FRINT_S = 0x4791U << 10, ++ FRINT_D = 0x4792U << 10, ++ ++ IOCSRRD_B = 0x19200U << 10, ++ IOCSRRD_H = 0x19201U << 10, ++ IOCSRRD_W = 0x19202U << 10, ++ IOCSRRD_D = 0x19203U << 10, ++ IOCSRWR_B = 0x19204U << 10, ++ IOCSRWR_H = 0x19205U << 10, ++ IOCSRWR_W = 0x19206U << 10, ++ IOCSRWR_D = 0x19207U << 10, ++ ++ MOVCF2FR = 0x4535U << 10, ++ MOVCF2GR = 0x4537U << 10, ++ ++ TLBINV = 0x06482000U, ++ TLBFLUSH = 0x06482400U, ++ TLBP = 0x06482800U, ++ TLBR = 0x06482c00U, ++ TLBWI = 0x06483000U, ++ TLBWR = 0x06483400U, ++ ERET = 0x06483800U ++}; ++ ++// ----- Emulated conditions. ++// On LA64 we use this enum to abstract from conditional branch instructions. ++// The 'U' prefix is used to specify unsigned comparisons. ++enum Condition { ++ // Any value < 0 is considered no_condition. ++ kNoCondition = -1, ++ overflow = 0, ++ no_overflow = 1, ++ Uless = 2, ++ Ugreater_equal = 3, ++ Uless_equal = 4, ++ Ugreater = 5, ++ equal = 6, ++ not_equal = 7, // Unordered or Not Equal. ++ negative = 8, ++ positive = 9, ++ parity_even = 10, ++ parity_odd = 11, ++ less = 12, ++ greater_equal = 13, ++ less_equal = 14, ++ greater = 15, ++ ueq = 16, // Unordered or Equal. ++ ogl = 17, // Ordered and Not Equal. ++ cc_always = 18, ++ ++ // Aliases. ++ carry = Uless, ++ not_carry = Ugreater_equal, ++ zero = equal, ++ eq = equal, ++ not_zero = not_equal, ++ ne = not_equal, ++ nz = not_equal, ++ sign = negative, ++ not_sign = positive, ++ mi = negative, ++ pl = positive, ++ hi = Ugreater, ++ ls = Uless_equal, ++ ge = greater_equal, ++ lt = less, ++ gt = greater, ++ le = less_equal, ++ hs = Ugreater_equal, ++ lo = Uless, ++ al = cc_always, ++ ult = Uless, ++ uge = Ugreater_equal, ++ ule = Uless_equal, ++ ugt = Ugreater, ++ cc_default = kNoCondition ++}; ++ ++// Returns the equivalent of !cc. ++// Negation of the default kNoCondition (-1) results in a non-default ++// no_condition value (-2). As long as tests for no_condition check ++// for condition < 0, this will work as expected. ++inline Condition NegateCondition(Condition cc) { ++ DCHECK(cc != cc_always); ++ return static_cast(cc ^ 1); ++} ++ ++inline Condition NegateFpuCondition(Condition cc) { ++ DCHECK(cc != cc_always); ++ switch (cc) { ++ case ult: ++ return ge; ++ case ugt: ++ return le; ++ case uge: ++ return lt; ++ case ule: ++ return gt; ++ case lt: ++ return uge; ++ case gt: ++ return ule; ++ case ge: ++ return ult; ++ case le: ++ return ugt; ++ case eq: ++ return ne; ++ case ne: ++ return eq; ++ case ueq: ++ return ogl; ++ case ogl: ++ return ueq; ++ default: ++ return cc; ++ } ++} ++ ++// ----- Coprocessor conditions. ++enum FPUCondition { ++ kNoFPUCondition = -1, ++ ++ CAF = 0x00, // False. ++ SAF = 0x01, // False. ++ CLT = 0x02, // Less Than quiet ++ // SLT = 0x03, // Less Than signaling ++ CEQ = 0x04, ++ SEQ = 0x05, ++ CLE = 0x06, ++ SLE = 0x07, ++ CUN = 0x08, ++ SUN = 0x09, ++ CULT = 0x0a, ++ SULT = 0x0b, ++ CUEQ = 0x0c, ++ SUEQ = 0x0d, ++ CULE = 0x0e, ++ SULE = 0x0f, ++ CNE = 0x10, ++ SNE = 0x11, ++ COR = 0x14, ++ SOR = 0x15, ++ CUNE = 0x18, ++ SUNE = 0x19, ++}; ++ ++const uint32_t kFPURoundingModeShift = 8; ++const uint32_t kFPURoundingModeMask = 0b11 << kFPURoundingModeShift; ++ ++// FPU rounding modes. ++enum FPURoundingMode { ++ RN = 0b00 << kFPURoundingModeShift, // Round to Nearest. ++ RZ = 0b01 << kFPURoundingModeShift, // Round towards zero. ++ RP = 0b10 << kFPURoundingModeShift, // Round towards Plus Infinity. ++ RM = 0b11 << kFPURoundingModeShift, // Round towards Minus Infinity. ++ ++ // Aliases. ++ kRoundToNearest = RN, ++ kRoundToZero = RZ, ++ kRoundToPlusInf = RP, ++ kRoundToMinusInf = RM, ++ ++ mode_round = RN, ++ mode_ceil = RP, ++ mode_floor = RM, ++ mode_trunc = RZ ++}; ++ ++enum CheckForInexactConversion { ++ kCheckForInexactConversion, ++ kDontCheckForInexactConversion ++}; ++ ++enum class MaxMinKind : int { kMin = 0, kMax = 1 }; ++ ++// ----------------------------------------------------------------------------- ++// Hints. ++ ++// Branch hints are not used on the LA64. They are defined so that they can ++// appear in shared function signatures, but will be ignored in LA64 ++// implementations. ++enum Hint { no_hint = 0 }; ++ ++inline Hint NegateHint(Hint hint) { return no_hint; } ++ ++// ----------------------------------------------------------------------------- ++// Specific instructions, constants, and masks. ++// These constants are declared in assembler-mips.cc, as they use named ++// registers and other constants. ++ ++// addi_d(sp, sp, 8) aka Pop() operation or part of Pop(r) ++// operations as post-increment of sp. ++extern const Instr kPopInstruction; ++// addi_d(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. ++extern const Instr kPushInstruction; ++// St_d(r, MemOperand(sp, 0)) ++extern const Instr kPushRegPattern; ++// Ld_d(r, MemOperand(sp, 0)) ++extern const Instr kPopRegPattern; ++// extern const Instr kLwRegFpOffsetPattern; ++// extern const Instr kSwRegFpOffsetPattern; ++// extern const Instr kLwRegFpNegOffsetPattern; ++// extern const Instr kSwRegFpNegOffsetPattern; ++// A mask for the Rk register for push, pop, lw, sw instructions. ++extern const Instr kRtMask; ++// extern const Instr kLwSwInstrTypeMask; ++// extern const Instr kLwSwInstrArgumentMask; ++// extern const Instr kLwSwOffsetMask; ++ ++// Break 0xfffff, reserved for redirected real time call. ++const Instr rtCallRedirInstr = BREAK | call_rt_redirected; ++// A nop instruction. (Encoding of addi_w 0 0 0). ++const Instr nopInstr = ADDI_W; ++ ++constexpr uint8_t kInstrSize = 4; ++constexpr uint8_t kInstrSizeLog2 = 2; ++ ++class InstructionBase { ++ public: ++ enum { ++ // On Loonisa PC cannot actually be directly accessed. We behave as if PC ++ // was ++ // always the value of the current instruction being executed. ++ kPCReadOffset = 0 ++ }; ++ ++ enum Type { ++ kOp6Type, ++ kOp7Type, ++ kOp8Type, ++ kOp10Type, ++ kOp12Type, ++ kOp14Type, ++ kOp17Type, ++ kOp22Type, ++ kUnsupported = -1 ++ }; ++ ++ // Get the raw instruction bits. ++ inline Instr InstructionBits() const { ++ return *reinterpret_cast(this); ++ } ++ ++ // Set the raw instruction bits to value. ++ inline void SetInstructionBits(Instr value) { ++ *reinterpret_cast(this) = value; ++ } ++ ++ // Read one particular bit out of the instruction bits. ++ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; } ++ ++ // Read a bit field out of the instruction bits. ++ inline int Bits(int hi, int lo) const { ++ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1); ++ } ++ ++ // Safe to call within InstructionType(). ++ inline int RjFieldRawNoAssert() const { ++ return InstructionBits() & kRjFieldMask; ++ } ++ ++ // Get the encoding type of the instruction. ++ inline Type InstructionType() const; ++ ++ protected: ++ InstructionBase() {} ++}; ++ ++template ++class InstructionGetters : public T { ++ public: ++ inline int RjValue() const { ++ return this->Bits(kRjShift + kRjBits - 1, kRjShift); ++ } ++ ++ inline int RkValue() const { ++ return this->Bits(kRkShift + kRkBits - 1, kRkShift); ++ } ++ ++ inline int RdValue() const { ++ return this->Bits(kRdShift + kRdBits - 1, kRdShift); ++ } ++ ++ inline int Sa2Value() const { ++ return this->Bits(kSaShift + kSa2Bits - 1, kSaShift); ++ } ++ ++ inline int Sa3Value() const { ++ return this->Bits(kSaShift + kSa3Bits - 1, kSaShift); ++ } ++ ++ inline int Ui5Value() const { ++ return this->Bits(kUi5Shift + kUi5Bits - 1, kUi5Shift); ++ } ++ ++ inline int Ui6Value() const { ++ return this->Bits(kUi6Shift + kUi6Bits - 1, kUi6Shift); ++ } ++ ++ inline int Ui12Value() const { ++ return this->Bits(kUi12Shift + kUi12Bits - 1, kUi12Shift); ++ } ++ ++ inline int LsbwValue() const { ++ return this->Bits(kLsbwShift + kLsbwBits - 1, kLsbwShift); ++ } ++ ++ inline int MsbwValue() const { ++ return this->Bits(kMsbwShift + kMsbwBits - 1, kMsbwShift); ++ } ++ ++ inline int LsbdValue() const { ++ return this->Bits(kLsbdShift + kLsbdBits - 1, kLsbdShift); ++ } ++ ++ inline int MsbdValue() const { ++ return this->Bits(kMsbdShift + kMsbdBits - 1, kMsbdShift); ++ } ++ ++ inline int CondValue() const { ++ return this->Bits(kCondShift + kCondBits - 1, kCondShift); ++ } ++ ++ inline int Si12Value() const { ++ return this->Bits(kSi12Shift + kSi12Bits - 1, kSi12Shift); ++ } ++ ++ inline int Si14Value() const { ++ return this->Bits(kSi14Shift + kSi14Bits - 1, kSi14Shift); ++ } ++ ++ inline int Si16Value() const { ++ return this->Bits(kSi16Shift + kSi16Bits - 1, kSi16Shift); ++ } ++ ++ inline int Si20Value() const { ++ return this->Bits(kSi20Shift + kSi20Bits - 1, kSi20Shift); ++ } ++ ++ inline int FdValue() const { ++ return this->Bits(kFdShift + kFdBits - 1, kFdShift); ++ } ++ ++ inline int FaValue() const { ++ return this->Bits(kFaShift + kFaBits - 1, kFaShift); ++ } ++ ++ inline int FjValue() const { ++ return this->Bits(kFjShift + kFjBits - 1, kFjShift); ++ } ++ ++ inline int FkValue() const { ++ return this->Bits(kFkShift + kFkBits - 1, kFkShift); ++ } ++ ++ inline int CjValue() const { ++ return this->Bits(kCjShift + kCjBits - 1, kCjShift); ++ } ++ ++ inline int CdValue() const { ++ return this->Bits(kCdShift + kCdBits - 1, kCdShift); ++ } ++ ++ inline int CaValue() const { ++ return this->Bits(kCaShift + kCaBits - 1, kCaShift); ++ } ++ ++ inline int CodeValue() const { ++ return this->Bits(kCodeShift + kCodeBits - 1, kCodeShift); ++ } ++ ++ inline int Hint5Value() const { ++ return this->Bits(kHint5Shift + kHint5Bits - 1, kHint5Shift); ++ } ++ ++ inline int Hint15Value() const { ++ return this->Bits(kHint15Shift + kHint15Bits - 1, kHint15Shift); ++ } ++ ++ inline int Offs16Value() const { ++ return this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift); ++ } ++ ++ inline int Offs21Value() const { ++ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift); ++ int high = ++ this->Bits(kOffs21HighShift + kOffs21HighBits - 1, kOffs21HighShift); ++ return ((high << kOffsLowBits) + low); ++ } ++ ++ inline int Offs26Value() const { ++ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift); ++ int high = ++ this->Bits(kOffs26HighShift + kOffs26HighBits - 1, kOffs26HighShift); ++ return ((high << kOffsLowBits) + low); ++ } ++ ++ inline int RjFieldRaw() const { ++ return this->InstructionBits() & kRjFieldMask; ++ } ++ ++ inline int RkFieldRaw() const { ++ return this->InstructionBits() & kRkFieldMask; ++ } ++ ++ inline int RdFieldRaw() const { ++ return this->InstructionBits() & kRdFieldMask; ++ } ++ ++ inline int32_t ImmValue(int bits) const { return this->Bits(bits - 1, 0); } ++ ++ /*TODO*/ ++ inline int32_t Imm12Value() const { abort(); } ++ ++ inline int32_t Imm14Value() const { abort(); } ++ ++ inline int32_t Imm16Value() const { abort(); } ++ ++ // Say if the instruction 'links'. e.g. jal, bal. ++ bool IsLinkingInstruction() const; ++ // Say if the instruction is a break or a trap. ++ bool IsTrap() const; ++}; ++ ++class Instruction : public InstructionGetters { ++ public: ++ // Instructions are read of out a code stream. The only way to get a ++ // reference to an instruction is to convert a pointer. There is no way ++ // to allocate or create instances of class Instruction. ++ // Use the At(pc) function to create references to Instruction. ++ static Instruction* At(byte* pc) { ++ return reinterpret_cast(pc); ++ } ++ ++ private: ++ // We need to prevent the creation of instances of class Instruction. ++ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction); ++}; ++ ++// ----------------------------------------------------------------------------- ++// LA64 assembly various constants. ++ ++// C/C++ argument slots size. ++const int kCArgSlotCount = 0; ++ ++const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2; ++ ++const int kInvalidStackOffset = -1; ++ ++static const int kNegOffset = 0x00008000; ++ ++InstructionBase::Type InstructionBase::InstructionType() const { ++ InstructionBase::Type kType = kUnsupported; ++ ++ // Check for kOp6Type ++ switch (Bits(31, 26) << 26) { ++ case ADDU16I_D: ++ case BEQZ: ++ case BNEZ: ++ case BCZ: ++ case JIRL: ++ case B: ++ case BL: ++ case BEQ: ++ case BNE: ++ case BLT: ++ case BGE: ++ case BLTU: ++ case BGEU: ++ kType = kOp6Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp7Type ++ switch (Bits(31, 25) << 25) { ++ case LU12I_W: ++ case LU32I_D: ++ case PCADDI: ++ case PCALAU12I: ++ case PCADDU12I: ++ case PCADDU18I: ++ kType = kOp7Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp8Type ++ switch (Bits(31, 24) << 24) { ++ case LDPTR_W: ++ case STPTR_W: ++ case LDPTR_D: ++ case STPTR_D: ++ case LL_W: ++ case SC_W: ++ case LL_D: ++ case SC_D: ++ case CSR: ++ kType = kOp8Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp10Type ++ switch (Bits(31, 22) << 22) { ++ case BSTR_W: { ++ // If Bit(21) = 0, then the Opcode is not BSTR_W. ++ if (Bit(21) == 0) ++ kType = kUnsupported; ++ else ++ kType = kOp10Type; ++ break; ++ } ++ case BSTRINS_D: ++ case BSTRPICK_D: ++ case SLTI: ++ case SLTUI: ++ case ADDI_W: ++ case ADDI_D: ++ case LU52I_D: ++ case ANDI: ++ case ORI: ++ case XORI: ++ case LD_B: ++ case LD_H: ++ case LD_W: ++ case LD_D: ++ case ST_B: ++ case ST_H: ++ case ST_W: ++ case ST_D: ++ case LD_BU: ++ case LD_HU: ++ case LD_WU: ++ case PRELD: ++ case FLD_S: ++ case FST_S: ++ case FLD_D: ++ case FST_D: ++ case CACHE: ++ kType = kOp10Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp12Type ++ switch (Bits(31, 20) << 20) { ++ case FMADD_S: ++ case FMADD_D: ++ case FMSUB_S: ++ case FMSUB_D: ++ case FNMADD_S: ++ case FNMADD_D: ++ case FNMSUB_S: ++ case FNMSUB_D: ++ case FCMP_COND_S: ++ case FCMP_COND_D: ++ case FSEL: ++ kType = kOp12Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp14Type ++ switch (Bits(31, 18) << 18) { ++ case ALSL: ++ case BYTEPICK_W: ++ case BYTEPICK_D: ++ case ALSL_D: ++ case SLLI: ++ case SRLI: ++ case SRAI: ++ case ROTRI: ++ case LDDIR: ++ case LDPTE: ++ kType = kOp14Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp17Type ++ switch (Bits(31, 15) << 15) { ++ case ADD_W: ++ case ADD_D: ++ case SUB_W: ++ case SUB_D: ++ case SLT: ++ case SLTU: ++ case MASKEQZ: ++ case MASKNEZ: ++ case NOR: ++ case AND: ++ case OR: ++ case XOR: ++ case ORN: ++ case ANDN: ++ case SLL_W: ++ case SRL_W: ++ case SRA_W: ++ case SLL_D: ++ case SRL_D: ++ case SRA_D: ++ case ROTR_D: ++ case ROTR_W: ++ case MUL_W: ++ case MULH_W: ++ case MULH_WU: ++ case MUL_D: ++ case MULH_D: ++ case MULH_DU: ++ case MULW_D_W: ++ case MULW_D_WU: ++ case DIV_W: ++ case MOD_W: ++ case DIV_WU: ++ case MOD_WU: ++ case DIV_D: ++ case MOD_D: ++ case DIV_DU: ++ case MOD_DU: ++ case BREAK: ++ case FADD_S: ++ case FADD_D: ++ case FSUB_S: ++ case FSUB_D: ++ case FMUL_S: ++ case FMUL_D: ++ case FDIV_S: ++ case FDIV_D: ++ case FMAX_S: ++ case FMAX_D: ++ case FMIN_S: ++ case FMIN_D: ++ case FMAXA_S: ++ case FMAXA_D: ++ case FMINA_S: ++ case FMINA_D: ++ case LDX_B: ++ case LDX_H: ++ case LDX_W: ++ case LDX_D: ++ case STX_B: ++ case STX_H: ++ case STX_W: ++ case STX_D: ++ case LDX_BU: ++ case LDX_HU: ++ case LDX_WU: ++ case PRELDX: ++ case FLDX_S: ++ case FLDX_D: ++ case FSTX_S: ++ case FSTX_D: ++ case ASRTLE_D: ++ case ASRTGT_D: ++ case DBGCALL: ++ case SYSCALL: ++ case HYPCALL: ++ case AMSWAP_W: ++ case AMSWAP_D: ++ case AMADD_W: ++ case AMADD_D: ++ case AMAND_W: ++ case AMAND_D: ++ case AMOR_W: ++ case AMOR_D: ++ case AMXOR_W: ++ case AMXOR_D: ++ case AMMAX_W: ++ case AMMAX_D: ++ case AMMIN_W: ++ case AMMIN_D: ++ case AMMAX_WU: ++ case AMMAX_DU: ++ case AMMIN_WU: ++ case AMMIN_DU: ++ case AMSWAP_DB_W: ++ case AMSWAP_DB_D: ++ case AMADD_DB_W: ++ case AMADD_DB_D: ++ case AMAND_DB_W: ++ case AMAND_DB_D: ++ case AMOR_DB_W: ++ case AMOR_DB_D: ++ case AMXOR_DB_W: ++ case AMXOR_DB_D: ++ case AMMAX_DB_W: ++ case AMMAX_DB_D: ++ case AMMIN_DB_W: ++ case AMMIN_DB_D: ++ case AMMAX_DB_WU: ++ case AMMAX_DB_DU: ++ case AMMIN_DB_WU: ++ case AMMIN_DB_DU: ++ case DBAR: ++ case IBAR: ++ case FLDGT_S: ++ case FLDGT_D: ++ case FLDLE_S: ++ case FLDLE_D: ++ case FSTGT_S: ++ case FSTGT_D: ++ case FSTLE_S: ++ case FSTLE_D: ++ case LDGT_B: ++ case LDGT_H: ++ case LDGT_W: ++ case LDGT_D: ++ case LDLE_B: ++ case LDLE_H: ++ case LDLE_W: ++ case LDLE_D: ++ case STGT_B: ++ case STGT_H: ++ case STGT_W: ++ case STGT_D: ++ case STLE_B: ++ case STLE_H: ++ case STLE_W: ++ case STLE_D: ++ case WAIT_INVTLB: ++ case FSCALEB_S: ++ case FSCALEB_D: ++ case FCOPYSIGN_S: ++ case FCOPYSIGN_D: ++ case CRC_W_B_W: ++ case CRC_W_H_W: ++ case CRC_W_W_W: ++ case CRC_W_D_W: ++ case CRCC_W_B_W: ++ case CRCC_W_H_W: ++ case CRCC_W_W_W: ++ case CRCC_W_D_W: ++ kType = kOp17Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp22Type ++ switch (Bits(31, 10) << 10) { ++ case CLZ_W: ++ case CTZ_W: ++ case CLZ_D: ++ case CTZ_D: ++ case REVB_2H: ++ case REVB_4H: ++ case REVB_2W: ++ case REVB_D: ++ case REVH_2W: ++ case REVH_D: ++ case BITREV_4B: ++ case BITREV_8B: ++ case BITREV_W: ++ case BITREV_D: ++ case EXT_W_B: ++ case EXT_W_H: ++ case FABS_S: ++ case FABS_D: ++ case FNEG_S: ++ case FNEG_D: ++ case FSQRT_S: ++ case FSQRT_D: ++ case FMOV_S: ++ case FMOV_D: ++ case MOVGR2FR_W: ++ case MOVGR2FR_D: ++ case MOVGR2FRH_W: ++ case MOVFR2GR_S: ++ case MOVFR2GR_D: ++ case MOVFRH2GR_S: ++ case MOVGR2FCSR: ++ case MOVFCSR2GR: ++ case FCVT_S_D: ++ case FCVT_D_S: ++ case FTINTRM_W_S: ++ case FTINTRM_W_D: ++ case FTINTRM_L_S: ++ case FTINTRM_L_D: ++ case FTINTRP_W_S: ++ case FTINTRP_W_D: ++ case FTINTRP_L_S: ++ case FTINTRP_L_D: ++ case FTINTRZ_W_S: ++ case FTINTRZ_W_D: ++ case FTINTRZ_L_S: ++ case FTINTRZ_L_D: ++ case FTINTRNE_W_S: ++ case FTINTRNE_W_D: ++ case FTINTRNE_L_S: ++ case FTINTRNE_L_D: ++ case FTINT_W_S: ++ case FTINT_W_D: ++ case FTINT_L_S: ++ case FTINT_L_D: ++ case FFINT_S_W: ++ case FFINT_S_L: ++ case FFINT_D_W: ++ case FFINT_D_L: ++ case FRINT_S: ++ case FRINT_D: ++ case MOVFR2CF: ++ case MOVCF2FR: ++ case MOVGR2CF: ++ case MOVCF2GR: ++ case FRECIP_S: ++ case FRECIP_D: ++ case FRSQRT_S: ++ case FRSQRT_D: ++ case FCLASS_S: ++ case FCLASS_D: ++ case FLOGB_S: ++ case FLOGB_D: ++ case CLO_W: ++ case CTO_W: ++ case CLO_D: ++ case CTO_D: ++ case IOCSRRD_B: ++ case IOCSRRD_H: ++ case IOCSRRD_W: ++ case IOCSRRD_D: ++ case IOCSRWR_B: ++ case IOCSRWR_H: ++ case IOCSRWR_W: ++ case IOCSRWR_D: ++ case TLBINV: ++ case TLBFLUSH: ++ case TLBP: ++ case TLBR: ++ case TLBWI: ++ case TLBWR: ++ case ERET: ++ case RDTIMEL_W: ++ case RDTIMEH_W: ++ case RDTIME_D: ++ // case CPUCFG: ++ kType = kOp22Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ return kType; ++} ++ ++// ----------------------------------------------------------------------------- ++// Instructions. ++ ++template ++bool InstructionGetters

::IsTrap() const { ++ return true; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LA64_CONSTANTS_LA64_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/cpu-la64.cc b/src/3rdparty/chromium/v8/src/codegen/la64/cpu-la64.cc +new file mode 100644 +index 0000000000..3e11a88313 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/cpu-la64.cc +@@ -0,0 +1,38 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// CPU specific code for loongisa independent of OS goes here. ++ ++#include ++#include ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/codegen/cpu-features.h" ++ ++namespace v8 { ++namespace internal { ++ ++void CpuFeatures::FlushICache(void* start, size_t size) { ++#if !defined(USE_SIMULATOR) ++ // Nothing to do, flushing no instructions. ++ if (size == 0) { ++ return; ++ } ++ ++#if defined(ANDROID) && !defined(__LP64__) ++ // Bionic cacheflush can typically run in userland, avoiding kernel call. ++ char* end = reinterpret_cast(start) + size; ++ cacheflush(reinterpret_cast(start), reinterpret_cast(end), ++ 0); ++#else // ANDROID ++ asm("ibar 0\n"); ++#endif // ANDROID ++#endif // !USE_SIMULATOR. ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/interface-descriptors-la64.cc b/src/3rdparty/chromium/v8/src/codegen/la64/interface-descriptors-la64.cc +new file mode 100644 +index 0000000000..b72ee4f917 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/interface-descriptors-la64.cc +@@ -0,0 +1,332 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/codegen/interface-descriptors.h" ++ ++#include "src/execution/frames.h" ++ ++namespace v8 { ++namespace internal { ++ ++const Register CallInterfaceDescriptor::ContextRegister() { return cp; } ++ ++void CallInterfaceDescriptor::DefaultInitializePlatformSpecific( ++ CallInterfaceDescriptorData* data, int register_parameter_count) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ CHECK_LE(static_cast(register_parameter_count), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(register_parameter_count, ++ default_stub_registers); ++} ++ ++// On MIPS it is not allowed to use odd numbered floating point registers ++// (e.g. f1, f3, etc.) for parameters. This can happen if we use ++// DefaultInitializePlatformSpecific to assign float registers for parameters. ++// E.g if fourth parameter goes to float register, f7 would be assigned for ++// parameter (a3 casted to int is 7). ++bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) { ++ return reg.code() % 2 == 0; ++} ++ ++void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3}; ++ CHECK_EQ(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2}; ++ CHECK_EQ(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ CHECK_EQ(static_cast(kParameterCount - kStackArgumentsCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount, ++ default_stub_registers); ++} ++ ++void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2}; ++ CHECK_EQ(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void RecordWriteDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ ++ data->RestrictAllocatableRegisters(default_stub_registers, ++ arraysize(default_stub_registers)); ++ ++ CHECK_LE(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ ++ data->RestrictAllocatableRegisters(default_stub_registers, ++ arraysize(default_stub_registers)); ++ ++ CHECK_LE(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() { ++ return a1; ++} ++const Register FastNewFunctionContextDescriptor::SlotsRegister() { return a0; } ++ ++const Register LoadDescriptor::ReceiverRegister() { return a1; } ++const Register LoadDescriptor::NameRegister() { return a2; } ++const Register LoadDescriptor::SlotRegister() { return a0; } ++ ++const Register LoadWithVectorDescriptor::VectorRegister() { return a3; } ++ ++const Register StoreDescriptor::ReceiverRegister() { return a1; } ++const Register StoreDescriptor::NameRegister() { return a2; } ++const Register StoreDescriptor::ValueRegister() { return a0; } ++const Register StoreDescriptor::SlotRegister() { return a4; } ++ ++const Register StoreWithVectorDescriptor::VectorRegister() { return a3; } ++ ++const Register StoreTransitionDescriptor::SlotRegister() { return a4; } ++const Register StoreTransitionDescriptor::VectorRegister() { return a3; } ++const Register StoreTransitionDescriptor::MapRegister() { return a5; } ++ ++const Register ApiGetterDescriptor::HolderRegister() { return a0; } ++const Register ApiGetterDescriptor::CallbackRegister() { return a3; } ++ ++const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; } ++const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; } ++ ++// static ++const Register TypeConversionDescriptor::ArgumentRegister() { return a0; } ++ ++void TypeofDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a3}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallTrampolineDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: target ++ // a0: number of arguments ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a4 : arguments list length (untagged) ++ // a2 : arguments list (FixedArray) ++ Register registers[] = {a1, a0, a4, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallForwardVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: the target to call ++ // a0: number of arguments ++ // a2: start index (to support rest parameters) ++ Register registers[] = {a1, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallFunctionTemplateDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1 : function template info ++ // a0 : number of arguments (on the stack, not including receiver) ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallWithSpreadDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a2 : the object to spread ++ Register registers[] = {a1, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallWithArrayLikeDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1 : the target to call ++ // a2 : the arguments list ++ Register registers[] = {a1, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a3 : the new target ++ // a4 : arguments list length (untagged) ++ // a2 : arguments list (FixedArray) ++ Register registers[] = {a1, a3, a0, a4, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: the target to call ++ // a3: new target ++ // a0: number of arguments ++ // a2: start index (to support rest parameters) ++ Register registers[] = {a1, a3, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructWithSpreadDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a3 : the new target ++ // a2 : the object to spread ++ Register registers[] = {a1, a3, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1 : the target to call ++ // a3 : the new target ++ // a2 : the arguments list ++ Register registers[] = {a1, a3, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructStubDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: target ++ // a3: new target ++ // a0: number of arguments ++ // a2: allocation site or undefined ++ Register registers[] = {a1, a3, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void AbortDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void AllocateHeapNumberDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // register state ++ data->InitializePlatformSpecific(0, nullptr); ++} ++ ++void CompareDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void BinaryOpDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ArgumentsAdaptorDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a1, // JSFunction ++ a3, // the new target ++ a0, // actual number of arguments ++ a2, // expected number of arguments ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ApiCallbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a1, // kApiFunctionAddress ++ a2, // kArgc ++ a3, // kCallData ++ a0, // kHolder ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void InterpreterDispatchDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, ++ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a0, // argument count (not including receiver) ++ a2, // address of first argument ++ a1 // the target callable to be call ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a0, // argument count (not including receiver) ++ a4, // address of the first argument ++ a1, // constructor to call ++ a3, // new target ++ a2, // allocation site feedback if available, undefined otherwise ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ResumeGeneratorDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a0, // the value to pass to the generator ++ a1 // the JSGeneratorObject to resume ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void FrameDropperTrampolineDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a1, // loaded new FP ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void RunMicrotasksEntryDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a0, a1}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/macro-assembler-la64.cc b/src/3rdparty/chromium/v8/src/codegen/la64/macro-assembler-la64.cc +new file mode 100644 +index 0000000000..3fde2b9dfa +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/macro-assembler-la64.cc +@@ -0,0 +1,3992 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include // For LONG_MIN, LONG_MAX. ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/base/bits.h" ++#include "src/base/division-by-constant.h" ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/callable.h" ++#include "src/codegen/code-factory.h" ++#include "src/codegen/external-reference-table.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/register-configuration.h" ++#include "src/debug/debug.h" ++#include "src/execution/frames-inl.h" ++#include "src/heap/heap-inl.h" // For MemoryChunk. ++#include "src/init/bootstrapper.h" ++#include "src/logging/counters.h" ++#include "src/objects/heap-number.h" ++#include "src/runtime/runtime.h" ++#include "src/snapshot/embedded/embedded-data.h" ++#include "src/snapshot/snapshot.h" ++#include "src/wasm/wasm-code-manager.h" ++ ++// Satisfy cpplint check, but don't include platform-specific header. It is ++// included recursively via macro-assembler.h. ++#if 0 ++#include "src/codegen/la64/macro-assembler-la64.h" ++#endif ++ ++namespace v8 { ++namespace internal { ++ ++static inline bool IsZero(const Operand& rk) { ++ if (rk.is_reg()) { ++ return rk.rm() == zero_reg; ++ } else { ++ return rk.immediate() == 0; ++ } ++} ++ ++int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, ++ Register exclusion1, ++ Register exclusion2, ++ Register exclusion3) const { ++ int bytes = 0; ++ RegList exclusions = 0; ++ if (exclusion1 != no_reg) { ++ exclusions |= exclusion1.bit(); ++ if (exclusion2 != no_reg) { ++ exclusions |= exclusion2.bit(); ++ if (exclusion3 != no_reg) { ++ exclusions |= exclusion3.bit(); ++ } ++ } ++ } ++ ++ RegList list = kJSCallerSaved & ~exclusions; ++ bytes += NumRegs(list) * kPointerSize; ++ ++ if (fp_mode == kSaveFPRegs) { ++ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; ++ } ++ ++ return bytes; ++} ++ ++int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, ++ Register exclusion2, Register exclusion3) { ++ int bytes = 0; ++ RegList exclusions = 0; ++ if (exclusion1 != no_reg) { ++ exclusions |= exclusion1.bit(); ++ if (exclusion2 != no_reg) { ++ exclusions |= exclusion2.bit(); ++ if (exclusion3 != no_reg) { ++ exclusions |= exclusion3.bit(); ++ } ++ } ++ } ++ ++ RegList list = kJSCallerSaved & ~exclusions; ++ MultiPush(list); ++ bytes += NumRegs(list) * kPointerSize; ++ ++ if (fp_mode == kSaveFPRegs) { ++ MultiPushFPU(kCallerSavedFPU); ++ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; ++ } ++ ++ return bytes; ++} ++ ++int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, ++ Register exclusion2, Register exclusion3) { ++ int bytes = 0; ++ if (fp_mode == kSaveFPRegs) { ++ MultiPopFPU(kCallerSavedFPU); ++ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; ++ } ++ ++ RegList exclusions = 0; ++ if (exclusion1 != no_reg) { ++ exclusions |= exclusion1.bit(); ++ if (exclusion2 != no_reg) { ++ exclusions |= exclusion2.bit(); ++ if (exclusion3 != no_reg) { ++ exclusions |= exclusion3.bit(); ++ } ++ } ++ } ++ ++ RegList list = kJSCallerSaved & ~exclusions; ++ MultiPop(list); ++ bytes += NumRegs(list) * kPointerSize; ++ ++ return bytes; ++} ++ ++void TurboAssembler::LoadRoot(Register destination, RootIndex index) { ++ Ld_d(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index))); ++} ++ ++void TurboAssembler::PushCommonFrame(Register marker_reg) { ++ if (marker_reg.is_valid()) { ++ Push(ra, fp, marker_reg); ++ Add_d(fp, sp, Operand(kPointerSize)); ++ } else { ++ Push(ra, fp); ++ mov(fp, sp); ++ } ++} ++ ++void TurboAssembler::PushStandardFrame(Register function_reg) { ++ int offset = -StandardFrameConstants::kContextOffset; ++ if (function_reg.is_valid()) { ++ Push(ra, fp, cp, function_reg); ++ offset += kPointerSize; ++ } else { ++ Push(ra, fp, cp); ++ } ++ Add_d(fp, sp, Operand(offset)); ++} ++ ++int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { ++ // The registers are pushed starting with the highest encoding, ++ // which means that lowest encodings are closest to the stack pointer. ++ return kSafepointRegisterStackIndexMap[reg_code]; ++} ++ ++// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved) ++// The register 'object' contains a heap object pointer. The heap object ++// tag is shifted away. ++void MacroAssembler::RecordWriteField(Register object, int offset, ++ Register value, Register dst, ++ RAStatus ra_status, ++ SaveFPRegsMode save_fp, ++ RememberedSetAction remembered_set_action, ++ SmiCheck smi_check) { ++ DCHECK(!AreAliased(value, dst, t8, object)); ++ // First, check if a write barrier is even needed. The tests below ++ // catch stores of Smis. ++ Label done; ++ ++ // Skip barrier if writing a smi. ++ if (smi_check == INLINE_SMI_CHECK) { ++ JumpIfSmi(value, &done); ++ } ++ ++ // Although the object register is tagged, the offset is relative to the start ++ // of the object, so so offset must be a multiple of kPointerSize. ++ DCHECK(IsAligned(offset, kPointerSize)); ++ ++ Add_d(dst, object, Operand(offset - kHeapObjectTag)); ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Label ok; ++ And(t8, dst, Operand(kPointerSize - 1)); ++ Branch(&ok, eq, t8, Operand(zero_reg)); ++ stop(); ++ bind(&ok); ++ } ++ ++ RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action, ++ OMIT_SMI_CHECK); ++ ++ bind(&done); ++ ++ // Clobber clobbered input registers when running with the debug-code flag ++ // turned on to provoke errors. ++ if (emit_debug_code()) { ++ li(value, Operand(bit_cast(kZapValue + 4))); ++ li(dst, Operand(bit_cast(kZapValue + 8))); ++ } ++} ++ ++void TurboAssembler::SaveRegisters(RegList registers) { ++ DCHECK_GT(NumRegs(registers), 0); ++ RegList regs = 0; ++ for (int i = 0; i < Register::kNumRegisters; ++i) { ++ if ((registers >> i) & 1u) { ++ regs |= Register::from_code(i).bit(); ++ } ++ } ++ MultiPush(regs); ++} ++ ++void TurboAssembler::RestoreRegisters(RegList registers) { ++ DCHECK_GT(NumRegs(registers), 0); ++ RegList regs = 0; ++ for (int i = 0; i < Register::kNumRegisters; ++i) { ++ if ((registers >> i) & 1u) { ++ regs |= Register::from_code(i).bit(); ++ } ++ } ++ MultiPop(regs); ++} ++ ++void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address, ++ SaveFPRegsMode fp_mode) { ++ EphemeronKeyBarrierDescriptor descriptor; ++ RegList registers = descriptor.allocatable_registers(); ++ ++ SaveRegisters(registers); ++ ++ Register object_parameter( ++ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject)); ++ Register slot_parameter(descriptor.GetRegisterParameter( ++ EphemeronKeyBarrierDescriptor::kSlotAddress)); ++ Register fp_mode_parameter( ++ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode)); ++ ++ Push(object); ++ Push(address); ++ ++ Pop(slot_parameter); ++ Pop(object_parameter); ++ ++ Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); ++ Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier), ++ RelocInfo::CODE_TARGET); ++ RestoreRegisters(registers); ++} ++ ++void TurboAssembler::CallRecordWriteStub( ++ Register object, Register address, ++ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { ++ CallRecordWriteStub( ++ object, address, remembered_set_action, fp_mode, ++ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), ++ kNullAddress); ++} ++ ++void TurboAssembler::CallRecordWriteStub( ++ Register object, Register address, ++ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, ++ Address wasm_target) { ++ CallRecordWriteStub(object, address, remembered_set_action, fp_mode, ++ Handle::null(), wasm_target); ++} ++ ++void TurboAssembler::CallRecordWriteStub( ++ Register object, Register address, ++ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, ++ Handle code_target, Address wasm_target) { ++ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); ++ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode, ++ // i.e. always emit remember set and save FP registers in RecordWriteStub. If ++ // large performance regression is observed, we should use these values to ++ // avoid unnecessary work. ++ ++ RecordWriteDescriptor descriptor; ++ RegList registers = descriptor.allocatable_registers(); ++ ++ SaveRegisters(registers); ++ Register object_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject)); ++ Register slot_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot)); ++ Register remembered_set_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet)); ++ Register fp_mode_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode)); ++ ++ Push(object); ++ Push(address); ++ ++ Pop(slot_parameter); ++ Pop(object_parameter); ++ ++ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); ++ Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); ++ if (code_target.is_null()) { ++ Call(wasm_target, RelocInfo::WASM_STUB_CALL); ++ } else { ++ Call(code_target, RelocInfo::CODE_TARGET); ++ } ++ ++ RestoreRegisters(registers); ++} ++ ++// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved) ++// The register 'object' contains a heap object pointer. The heap object ++// tag is shifted away. ++void MacroAssembler::RecordWrite(Register object, Register address, ++ Register value, RAStatus ra_status, ++ SaveFPRegsMode fp_mode, ++ RememberedSetAction remembered_set_action, ++ SmiCheck smi_check) { ++ DCHECK(!AreAliased(object, address, value)); ++ ++ if (emit_debug_code()) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Ld_d(scratch, MemOperand(address, 0)); ++ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch, ++ Operand(value)); ++ } ++ ++ if ((remembered_set_action == OMIT_REMEMBERED_SET && ++ !FLAG_incremental_marking) || ++ FLAG_disable_write_barriers) { ++ return; ++ } ++ ++ // First, check if a write barrier is even needed. The tests below ++ // catch stores of smis and stores into the young generation. ++ Label done; ++ ++ if (smi_check == INLINE_SMI_CHECK) { ++ DCHECK_EQ(0, kSmiTag); ++ JumpIfSmi(value, &done); ++ } ++ ++ CheckPageFlag(value, ++ value, // Used as scratch. ++ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); ++ CheckPageFlag(object, ++ value, // Used as scratch. ++ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); ++ ++ // Record the actual write. ++ if (ra_status == kRAHasNotBeenSaved) { ++ push(ra); ++ } ++ CallRecordWriteStub(object, address, remembered_set_action, fp_mode); ++ if (ra_status == kRAHasNotBeenSaved) { ++ pop(ra); ++ } ++ ++ bind(&done); ++ ++ // Clobber clobbered registers when running with the debug-code flag ++ // turned on to provoke errors. ++ if (emit_debug_code()) { ++ li(address, Operand(bit_cast(kZapValue + 12))); ++ li(value, Operand(bit_cast(kZapValue + 16))); ++ } ++} ++ ++// --------------------------------------------------------------------------- ++// Instruction macros. ++ ++void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ add_w(rd, rj, rk.rm()); ++ } else { ++ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ addi_w(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ add_w(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ add_d(rd, rj, rk.rm()); ++ } else { ++ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ addi_d(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ add_d(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sub_w(rd, rj, rk.rm()); ++ } else { ++ DCHECK(is_int32(rk.immediate())); ++ if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) { ++ addi_w(rd, rj, ++ static_cast( ++ -rk.immediate())); // No subi_w instr, use addi_w(x, y, -imm). ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ if (-rk.immediate() >> 12 == 0 && !MustUseReg(rk.rmode())) { ++ // Use load -imm and addu when loading -imm generates one instruction. ++ li(scratch, -rk.immediate()); ++ add_w(rd, rj, scratch); ++ } else { ++ // li handles the relocation. ++ li(scratch, rk); ++ sub_w(rd, rj, scratch); ++ } ++ } ++ } ++} ++ ++void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sub_d(rd, rj, rk.rm()); ++ } else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) { ++ addi_d(rd, rj, ++ static_cast( ++ -rk.immediate())); // No subi_d instr, use addi_d(x, y, -imm). ++ } else { ++ DCHECK(rj != t7); ++ int li_count = InstrCountForLi64Bit(rk.immediate()); ++ int li_neg_count = InstrCountForLi64Bit(-rk.immediate()); ++ if (li_neg_count < li_count && !MustUseReg(rk.rmode())) { ++ // Use load -imm and add_d when loading -imm generates one instruction. ++ DCHECK(rk.immediate() != std::numeric_limits::min()); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(-rk.immediate())); ++ add_d(rd, rj, scratch); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rk); ++ sub_d(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mul_w(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mul_w(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mulh_w(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mulh_w(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mulh_wu(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mulh_wu(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mul_d(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mul_d(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mulh_d(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mulh_d(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ div_w(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ div_w(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mod_w(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mod_w(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mod_wu(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mod_wu(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ div_d(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ div_d(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ div_wu(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ div_wu(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ div_du(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ div_du(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mod_d(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mod_d(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mod_du(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mod_du(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::And(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ and_(rd, rj, rk.rm()); ++ } else { ++ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ andi(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ and_(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ or_(rd, rj, rk.rm()); ++ } else { ++ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ ori(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ or_(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ xor_(rd, rj, rk.rm()); ++ } else { ++ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ xori(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ xor_(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ nor(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ nor(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ andn(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ andn(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ orn(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ orn(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Neg(Register rj, const Operand& rk) { ++ DCHECK(rk.is_reg()); ++ sub_d(rj, zero_reg, rk.rm()); ++} ++ ++void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ slt(rd, rj, rk.rm()); ++ } else { ++ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ slti(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ // TODO why?? ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ slt(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sltu(rd, rj, rk.rm()); ++ } else { ++ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ sltui(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ sltu(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ slt(rd, rk.rm(), rj); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ slt(rd, scratch, rj); ++ } ++ xori(rd, rd, 1); ++} ++ ++void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sltu(rd, rk.rm(), rj); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ sltu(rd, scratch, rj); ++ } ++ xori(rd, rd, 1); ++} ++ ++void TurboAssembler::Sge(Register rd, Register rj, const Operand& rk) { ++ Slt(rd, rj, rk); ++ xori(rd, rd, 1); ++} ++ ++void TurboAssembler::Sgeu(Register rd, Register rj, const Operand& rk) { ++ Sltu(rd, rj, rk); ++ xori(rd, rd, 1); ++} ++ ++void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ slt(rd, rk.rm(), rj); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ slt(rd, scratch, rj); ++ } ++} ++ ++void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sltu(rd, rk.rm(), rj); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ sltu(rd, scratch, rj); ++ } ++} ++ ++void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ rotr_w(rd, rj, rk.rm()); ++ } else { ++ int64_t ror_value = rk.immediate() % 32; ++ if (ror_value < 0) { ++ ror_value += 32; ++ } ++ rotri_w(rd, rj, ror_value); ++ } ++} ++ ++void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ rotr_d(rd, rj, rk.rm()); ++ } else { ++ int64_t dror_value = rk.immediate() % 64; ++ if (dror_value < 0) dror_value += 64; ++ rotri_d(rd, rj, dror_value); ++ } ++} ++ ++void MacroAssembler::Pref(int32_t hint, const MemOperand& rj) { ++ // TODO ++ // pref(hint); ++} ++ ++void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, ++ Register scratch) { ++ DCHECK(sa >= 1 && sa <= 31); ++ if (sa <= 4) { ++ alsl_w(rd, rj, rk, sa); ++ } else { ++ Register tmp = rd == rk ? scratch : rd; ++ DCHECK(tmp != rk); ++ slli_w(tmp, rj, sa); ++ add_w(rd, rk, tmp); ++ } ++} ++ ++void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, ++ Register scratch) { ++ DCHECK(sa >= 1 && sa <= 31); ++ if (sa <= 4) { ++ alsl_d(rd, rj, rk, sa); ++ } else { ++ Register tmp = rd == rk ? scratch : rd; ++ DCHECK(tmp != rk); ++ slli_d(tmp, rj, sa); ++ add_d(rd, rk, tmp); ++ } ++} ++ ++// ------------Pseudo-instructions------------- ++ ++// Change endianness ++void TurboAssembler::ByteSwapSigned(Register dest, Register src, ++ int operand_size) { ++ DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8); ++ if (operand_size == 2) { ++ revb_2h(dest, src); ++ ext_w_h(dest, dest); ++ } else if (operand_size == 4) { ++ revb_2w(dest, src); ++ slli_w(dest, dest, 0); ++ } else { ++ revb_d(dest, dest); ++ } ++} ++ ++void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, ++ int operand_size) { ++ DCHECK(operand_size == 2 || operand_size == 4); ++ if (operand_size == 2) { ++ revb_2h(dest, src); ++ bstrins_d(dest, zero_reg, 63, 16); ++ } else { ++ revb_2w(dest, src); ++ bstrins_d(dest, zero_reg, 63, 32); ++ } ++} ++ ++void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_b(rd, source.base(), source.index()); ++ } else { ++ ld_b(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_bu(rd, source.base(), source.index()); ++ } else { ++ ld_bu(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::St_b(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ stx_b(rd, source.base(), source.index()); ++ } else { ++ st_b(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_h(rd, source.base(), source.index()); ++ } else { ++ ld_h(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_hu(rd, source.base(), source.index()); ++ } else { ++ ld_hu(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::St_h(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ stx_h(rd, source.base(), source.index()); ++ } else { ++ st_h(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); // TODO ldptr_w ?? ++ if (source.hasIndexReg()) { ++ ldx_w(rd, source.base(), source.index()); ++ } else { ++ ld_w(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_wu(rd, source.base(), source.index()); ++ } else { ++ ld_wu(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::St_w(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ stx_w(rd, source.base(), source.index()); ++ } else { ++ st_w(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_d(rd, source.base(), source.index()); ++ } else { ++ ld_d(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::St_d(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ stx_d(rd, source.base(), source.index()); ++ } else { ++ st_d(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) { ++ MemOperand tmp = src; ++ AdjustBaseAndOffset(&tmp); ++ if (tmp.hasIndexReg()) { ++ fldx_s(fd, tmp.base(), tmp.index()); ++ } else { ++ fld_s(fd, tmp.base(), tmp.offset()); ++ } ++} ++ ++void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) { ++ MemOperand tmp = src; ++ AdjustBaseAndOffset(&tmp); ++ if (tmp.hasIndexReg()) { ++ fstx_s(fs, tmp.base(), tmp.index()); ++ } else { ++ fst_s(fs, tmp.base(), tmp.offset()); ++ } ++} ++ ++void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) { ++ MemOperand tmp = src; ++ AdjustBaseAndOffset(&tmp); ++ if (tmp.hasIndexReg()) { ++ fldx_d(fd, tmp.base(), tmp.index()); ++ } else { ++ fld_d(fd, tmp.base(), tmp.offset()); ++ } ++} ++ ++void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) { ++ MemOperand tmp = src; ++ AdjustBaseAndOffset(&tmp); ++ if (tmp.hasIndexReg()) { ++ fstx_d(fs, tmp.base(), tmp.index()); ++ } else { ++ fst_d(fs, tmp.base(), tmp.offset()); ++ } ++} ++ ++void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) { ++ DCHECK(!rj.hasIndexReg()); ++ bool is_one_instruction = is_int14(rj.offset()); ++ if (is_one_instruction) { ++ ll_w(rd, rj.base(), rj.offset()); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rj.offset()); ++ add_d(scratch, scratch, rj.base()); ++ ll_w(rd, scratch, 0); ++ } ++} ++ ++void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) { ++ DCHECK(!rj.hasIndexReg()); ++ bool is_one_instruction = is_int14(rj.offset()); ++ if (is_one_instruction) { ++ ll_d(rd, rj.base(), rj.offset()); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rj.offset()); ++ add_d(scratch, scratch, rj.base()); ++ ll_d(rd, scratch, 0); ++ } ++} ++ ++void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) { ++ DCHECK(!rj.hasIndexReg()); ++ bool is_one_instruction = is_int14(rj.offset()); ++ if (is_one_instruction) { ++ sc_w(rd, rj.base(), rj.offset()); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rj.offset()); ++ add_d(scratch, scratch, rj.base()); ++ sc_w(rd, scratch, 0); ++ } ++} ++ ++void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) { ++ DCHECK(!rj.hasIndexReg()); ++ bool is_one_instruction = is_int14(rj.offset()); ++ if (is_one_instruction) { ++ sc_d(rd, rj.base(), rj.offset()); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rj.offset()); ++ add_d(scratch, scratch, rj.base()); ++ sc_d(rd, scratch, 0); ++ } ++} ++ ++void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { ++ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating ++ // non-isolate-independent code. In many cases it might be cheaper than ++ // embedding the relocatable value. ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadConstant(dst, value); ++ return; ++ } ++ li(dst, Operand(value), mode); ++} ++ ++void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { ++ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating ++ // non-isolate-independent code. In many cases it might be cheaper than ++ // embedding the relocatable value. ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadExternalReference(dst, value); ++ return; ++ } ++ li(dst, Operand(value), mode); ++} ++ ++void TurboAssembler::li(Register dst, const StringConstantBase* string, ++ LiFlags mode) { ++ li(dst, Operand::EmbeddedStringConstant(string), mode); ++} ++ ++static inline int InstrCountForLiLower32Bit(int64_t value) { ++ if (is_int12(static_cast(value)) || ++ is_uint12(static_cast(value)) || !(value & kImm12Mask)) { ++ return 1; ++ } else { ++ return 2; ++ } ++} ++ ++void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) { ++ if (is_int12(static_cast(j.immediate()))) { ++ addi_d(rd, zero_reg, j.immediate()); ++ } else if (is_uint12(static_cast(j.immediate()))) { ++ ori(rd, zero_reg, j.immediate() & kImm12Mask); ++ } else { ++ lu12i_w(rd, j.immediate() >> 12 & 0xfffff); ++ if (j.immediate() & kImm12Mask) { ++ ori(rd, rd, j.immediate() & kImm12Mask); ++ } ++ } ++} ++ ++int TurboAssembler::InstrCountForLi64Bit(int64_t value) { ++ if (is_int32(value)) { ++ return InstrCountForLiLower32Bit(value); ++ } else if (is_int52(value)) { ++ return InstrCountForLiLower32Bit(value) + 1; ++ } else if ((value & 0xffffffffL) == 0) { ++ // 32 LSBs (Least Significant Bits) all set to zero. ++ uint8_t tzc = base::bits::CountTrailingZeros32(value >> 32); ++ uint8_t lzc = base::bits::CountLeadingZeros32(value >> 32); ++ if (tzc >= 20) { ++ return 1; ++ } else if (tzc + lzc > 12) { ++ return 2; ++ } else { ++ return 3; ++ } ++ } else { ++ int64_t imm21 = (value >> 31) & 0x1fffffL; ++ if (imm21 != 0x1fffffL && imm21 != 0) { ++ return InstrCountForLiLower32Bit(value) + 2; ++ } else { ++ return InstrCountForLiLower32Bit(value) + 1; ++ } ++ } ++ UNREACHABLE(); ++ return INT_MAX; ++} ++ ++// All changes to if...else conditions here must be added to ++// InstrCountForLi64Bit as well. ++void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { ++ DCHECK(!j.is_reg()); ++ DCHECK(!MustUseReg(j.rmode())); ++ DCHECK(mode == OPTIMIZE_SIZE); ++ int64_t imm = j.immediate(); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Normal load of an immediate value which does not need Relocation Info. ++ if (is_int32(imm)) { ++ LiLower32BitHelper(rd, j); ++ } else if (is_int52(imm)) { ++ LiLower32BitHelper(rd, j); ++ lu32i_d(rd, imm >> 32 & 0xfffff); ++ } else if ((imm & 0xffffffffL) == 0) { ++ // 32 LSBs (Least Significant Bits) all set to zero. ++ uint8_t tzc = base::bits::CountTrailingZeros32(imm >> 32); ++ uint8_t lzc = base::bits::CountLeadingZeros32(imm >> 32); ++ if (tzc >= 20) { ++ lu52i_d(rd, zero_reg, imm >> 52 & kImm12Mask); ++ } else if (tzc + lzc > 12) { ++ int32_t mask = (1 << (32 - tzc)) - 1; ++ lu12i_w(rd, imm >> (tzc + 32) & mask); ++ slli_d(rd, rd, tzc + 20); ++ } else { ++ xor_(rd, rd, rd); ++ lu32i_d(rd, imm >> 32 & 0xfffff); ++ lu52i_d(rd, rd, imm >> 52 & kImm12Mask); ++ } ++ } else { ++ int64_t imm21 = (imm >> 31) & 0x1fffffL; ++ LiLower32BitHelper(rd, j); ++ if (imm21 != 0x1fffffL && imm21 != 0) lu32i_d(rd, imm >> 32 & 0xfffff); ++ lu52i_d(rd, rd, imm >> 52 & kImm12Mask); ++ } ++} ++ ++void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { ++ DCHECK(!j.is_reg()); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { ++ li_optimized(rd, j, mode); ++ } else if (MustUseReg(j.rmode())) { ++ int64_t immediate; ++ if (j.IsHeapObjectRequest()) { ++ RequestHeapObject(j.heap_object_request()); ++ immediate = 0; ++ } else { ++ immediate = j.immediate(); ++ } ++ ++ RecordRelocInfo(j.rmode(), immediate); ++ lu12i_w(rd, immediate >> 12 & 0xfffff); ++ ori(rd, rd, immediate & kImm12Mask); ++ lu32i_d(rd, immediate >> 32 & 0xfffff); ++ } else if (mode == ADDRESS_LOAD) { ++ // We always need the same number of instructions as we may need to patch ++ // this code to load another value which may need all 3 instructions. ++ lu12i_w(rd, j.immediate() >> 12 & 0xfffff); ++ ori(rd, rd, j.immediate() & kImm12Mask); ++ lu32i_d(rd, j.immediate() >> 32 & 0xfffff); ++ } else { // mode == CONSTANT_SIZE - always emit the same instruction ++ // sequence. ++ lu12i_w(rd, j.immediate() >> 12 & 0xfffff); ++ ori(rd, rd, j.immediate() & kImm12Mask); ++ lu32i_d(rd, j.immediate() >> 32 & 0xfffff); ++ lu52i_d(rd, rd, j.immediate() >> 52 & kImm12Mask); ++ } ++} ++ ++void TurboAssembler::MultiPush(RegList regs) { ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPush(RegList regs1, RegList regs2) { ++ DCHECK_EQ(regs1 & regs2, 0); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs1 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs2 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) { ++ DCHECK_EQ(regs1 & regs2, 0); ++ DCHECK_EQ(regs1 & regs3, 0); ++ DCHECK_EQ(regs2 & regs3, 0); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs1 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs2 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs3 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPop(RegList regs) { ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPop(RegList regs1, RegList regs2) { ++ DCHECK_EQ(regs1 & regs2, 0); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs2 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs1 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) { ++ DCHECK_EQ(regs1 & regs2, 0); ++ DCHECK_EQ(regs1 & regs3, 0); ++ DCHECK_EQ(regs2 & regs3, 0); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs3 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs2 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs1 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPushFPU(RegList regs) { ++ int16_t num_to_push = base::bits::CountPopulation(regs); ++ int16_t stack_offset = num_to_push * kDoubleSize; ++ ++ Sub_d(sp, sp, Operand(stack_offset)); ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs & (1 << i)) != 0) { ++ stack_offset -= kDoubleSize; ++ Fst_d(FPURegister::from_code(i), MemOperand(sp, stack_offset)); ++ } ++ } ++} ++ ++void TurboAssembler::MultiPopFPU(RegList regs) { ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs & (1 << i)) != 0) { ++ Fld_d(FPURegister::from_code(i), MemOperand(sp, stack_offset)); ++ stack_offset += kDoubleSize; ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw, ++ uint16_t lsbw) { ++ DCHECK_LT(lsbw, msbw); ++ DCHECK_LT(lsbw, 32); ++ DCHECK_LT(msbw, 32); ++ bstrpick_w(rk, rj, msbw, lsbw); ++} ++ ++void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw, ++ uint16_t lsbw) { ++ DCHECK_LT(lsbw, msbw); ++ DCHECK_LT(lsbw, 64); ++ DCHECK_LT(msbw, 64); ++ bstrpick_d(rk, rj, msbw, lsbw); ++} ++ ++void TurboAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); } ++ ++void TurboAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); } ++ ++void TurboAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) { ++ // Move the data from fs to t8. ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ movfr2gr_s(t8, fj); ++ Ffint_d_uw(fd, t8); ++} ++ ++void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ ++ // Convert rj to a FP value in fd. ++ DCHECK(rj != t7); ++ ++ // Zero extend int32 in rj. ++ Bstrpick_d(t7, rj, 31, 0); ++ movgr2fr_d(fd, t7); ++ ffint_d_l(fd, fd); ++} ++ ++void TurboAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Move the data from fs to t8. ++ movfr2gr_d(t8, fj); ++ Ffint_d_ul(fd, t8); ++} ++ ++void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Convert rj to a FP value in fd. ++ ++ DCHECK(rj != t7); ++ ++ Label msb_clear, conversion_done; ++ ++ Branch(&msb_clear, ge, rj, Operand(zero_reg)); ++ ++ // Rj >= 2^63 ++ andi(t7, rj, 1); ++ srli_d(rj, rj, 1); ++ or_(t7, t7, rj); ++ movgr2fr_d(fd, t7); ++ ffint_d_l(fd, fd); ++ fadd_d(fd, fd, fd); ++ Branch(&conversion_done); ++ ++ bind(&msb_clear); ++ // Rs < 2^63, we can do simple conversion. ++ movgr2fr_d(fd, rj); ++ ffint_d_l(fd, fd); ++ ++ bind(&conversion_done); ++} ++ ++void TurboAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Move the data from fs to t8. ++ movfr2gr_d(t8, fj); ++ Ffint_s_uw(fd, t8); ++} ++ ++void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Convert rj to a FP value in fd. ++ DCHECK(rj != t7); ++ ++ // Zero extend int32 in rj. ++ bstrpick_d(t7, rj, 31, 0); ++ movgr2fr_d(fd, t7); ++ ffint_s_l(fd, fd); ++} ++ ++void TurboAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Move the data from fs to t8. ++ movfr2gr_d(t8, fj); ++ Ffint_s_ul(fd, t8); ++} ++ ++void TurboAssembler::Ffint_s_ul(FPURegister fd, Register rj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Convert rj to a FP value in fd. ++ ++ DCHECK(rj != t7); ++ ++ Label positive, conversion_done; ++ ++ Branch(&positive, ge, rj, Operand(zero_reg)); ++ ++ // Rs >= 2^31. ++ andi(t7, rj, 1); ++ srli_d(rj, rj, 1); ++ or_(t7, t7, rj); ++ movgr2fr_d(fd, t7); ++ ffint_s_l(fd, fd); ++ fadd_s(fd, fd, fd); ++ Branch(&conversion_done); ++ ++ bind(&positive); ++ // Rs < 2^31, we can do simple conversion. ++ movgr2fr_d(fd, rj); ++ ffint_s_l(fd, fd); ++ ++ bind(&conversion_done); ++} ++ ++void MacroAssembler::Ftintrne_l_d(FPURegister fd, FPURegister fj) { ++ ftintrne_l_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrm_l_d(FPURegister fd, FPURegister fj) { ++ ftintrm_l_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrp_l_d(FPURegister fd, FPURegister fj) { ++ ftintrp_l_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrz_l_d(FPURegister fd, FPURegister fj) { ++ ftintrz_l_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj, ++ FPURegister scratch) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Load to GPR. ++ movfr2gr_d(t8, fj); ++ // Reset sign bit. ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x7FFFFFFFFFFFFFFFl); ++ and_(t8, t8, scratch1); ++ } ++ movgr2fr_d(scratch, t8); ++ Ftintrz_l_d(fd, scratch); ++} ++ ++void TurboAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj, ++ FPURegister scratch) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Ftintrz_uw_d(t8, fj, scratch); ++ movgr2fr_w(fd, t8); ++} ++ ++void TurboAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj, ++ FPURegister scratch) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Ftintrz_uw_s(t8, fj, scratch); ++ movgr2fr_w(fd, t8); ++} ++ ++void TurboAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj, ++ FPURegister scratch, Register result) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Ftintrz_ul_d(t8, fj, scratch, result); ++ movgr2fr_d(fd, t8); ++} ++ ++void TurboAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj, ++ FPURegister scratch, Register result) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Ftintrz_ul_s(t8, fj, scratch, result); ++ movgr2fr_d(fd, t8); ++} ++ ++void MacroAssembler::Ftintrz_w_d(FPURegister fd, FPURegister fj) { ++ ftintrz_w_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrne_w_d(FPURegister fd, FPURegister fj) { ++ ftintrne_w_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrm_w_d(FPURegister fd, FPURegister fj) { ++ ftintrm_w_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) { ++ ftintrp_w_d(fd, fj); ++} ++ ++void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj, ++ FPURegister scratch) { ++ DCHECK(fj != scratch); ++ DCHECK(rd != t7); ++ ++ { ++ // Load 2^31 into scratch as its float representation. ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x41E00000); ++ movgr2fr_w(scratch, zero_reg); ++ movgr2frh_w(scratch, scratch1); ++ } ++ // Test if scratch > fd. ++ // If fd < 2^31 we can convert it normally. ++ Label simple_convert; ++ CompareF64(fj, scratch, CLT); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^31 from fd, then trunc it to rs ++ // and add 2^31 to rj. ++ fsub_d(scratch, fj, scratch); ++ ftintrz_w_d(scratch, scratch); ++ movfr2gr_s(rd, scratch); ++ Or(rd, rd, 1 << 31); ++ ++ Label done; ++ Branch(&done); ++ // Simple conversion. ++ bind(&simple_convert); ++ ftintrz_w_d(scratch, fj); ++ movfr2gr_s(rd, scratch); ++ ++ bind(&done); ++} ++ ++void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj, ++ FPURegister scratch) { ++ DCHECK(fj != scratch); ++ DCHECK(rd != t7); ++ { ++ // Load 2^31 into scratch as its float representation. ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x4F000000); ++ movgr2fr_w(scratch, scratch1); ++ } ++ // Test if scratch > fs. ++ // If fs < 2^31 we can convert it normally. ++ Label simple_convert; ++ CompareF32(fj, scratch, CLT); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^31 from fs, then trunc it to rd ++ // and add 2^31 to rd. ++ fsub_s(scratch, fj, scratch); ++ ftintrz_w_s(scratch, scratch); ++ movfr2gr_s(rd, scratch); ++ Or(rd, rd, 1 << 31); ++ ++ Label done; ++ Branch(&done); ++ // Simple conversion. ++ bind(&simple_convert); ++ ftintrz_w_s(scratch, fj); ++ movfr2gr_s(rd, scratch); ++ ++ bind(&done); ++} ++ ++void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj, ++ FPURegister scratch, Register result) { ++ DCHECK(fj != scratch); ++ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); ++ ++ Label simple_convert, done, fail; ++ if (result.is_valid()) { ++ mov(result, zero_reg); ++ Move(scratch, -1.0); ++ // If fd =< -1 or unordered, then the conversion fails. ++ CompareF64(fj, scratch, CLE); ++ BranchTrueShortF(&fail); ++ CompareIsNanF64(fj, scratch); ++ BranchTrueShortF(&fail); ++ } ++ ++ // Load 2^63 into scratch as its double representation. ++ li(t7, 0x43E0000000000000); ++ movgr2fr_d(scratch, t7); ++ ++ // Test if scratch > fs. ++ // If fs < 2^63 we can convert it normally. ++ CompareF64(fj, scratch, CLT); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^63 from fs, then trunc it to rd ++ // and add 2^63 to rd. ++ fsub_d(scratch, fj, scratch); ++ ftintrz_l_d(scratch, scratch); ++ movfr2gr_d(rd, scratch); ++ Or(rd, rd, Operand(1UL << 63)); ++ Branch(&done); ++ ++ // Simple conversion. ++ bind(&simple_convert); ++ ftintrz_l_d(scratch, fj); ++ movfr2gr_d(rd, scratch); ++ ++ bind(&done); ++ if (result.is_valid()) { ++ // Conversion is failed if the result is negative. ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ addi_d(scratch1, zero_reg, -1); ++ srli_d(scratch1, scratch1, 1); // Load 2^62. ++ movfr2gr_d(result, scratch); ++ xor_(result, result, scratch1); ++ } ++ Slt(result, zero_reg, result); ++ } ++ ++ bind(&fail); ++} ++ ++void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj, ++ FPURegister scratch, Register result) { ++ DCHECK(fj != scratch); ++ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); ++ ++ Label simple_convert, done, fail; ++ if (result.is_valid()) { ++ mov(result, zero_reg); ++ Move(scratch, -1.0f); ++ // If fd =< -1 or unordered, then the conversion fails. ++ CompareF32(fj, scratch, CLE); ++ BranchTrueShortF(&fail); ++ CompareIsNanF32(fj, scratch); ++ BranchTrueShortF(&fail); ++ } ++ ++ { ++ // Load 2^63 into scratch as its float representation. ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x5F000000); ++ movgr2fr_w(scratch, scratch1); ++ } ++ ++ // Test if scratch > fs. ++ // If fs < 2^63 we can convert it normally. ++ CompareF32(fj, scratch, CLT); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^63 from fs, then trunc it to rd ++ // and add 2^63 to rd. ++ fsub_s(scratch, fj, scratch); ++ ftintrz_l_s(scratch, scratch); ++ movfr2gr_d(rd, scratch); ++ Or(rd, rd, Operand(1UL << 63)); ++ Branch(&done); ++ ++ // Simple conversion. ++ bind(&simple_convert); ++ ftintrz_l_s(scratch, fj); ++ movfr2gr_d(rd, scratch); ++ ++ bind(&done); ++ if (result.is_valid()) { ++ // Conversion is failed if the result is negative or unordered. ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ addi_d(scratch1, zero_reg, -1); ++ srli_d(scratch1, scratch1, 1); // Load 2^62. ++ movfr2gr_d(result, scratch); ++ xor_(result, result, scratch1); ++ } ++ Slt(result, zero_reg, result); ++ } ++ ++ bind(&fail); ++} ++ ++void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, ++ FPURoundingMode mode) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = t8; ++ movfcsr2gr(scratch); ++ li(t7, Operand(mode)); ++ movgr2fcsr(t7); ++ frint_d(dst, src); ++ movgr2fcsr(scratch); ++} ++ ++void TurboAssembler::Floor_d(FPURegister dst, FPURegister src) { ++ RoundDouble(dst, src, mode_floor); ++} ++ ++void TurboAssembler::Ceil_d(FPURegister dst, FPURegister src) { ++ RoundDouble(dst, src, mode_ceil); ++} ++ ++void TurboAssembler::Trunc_d(FPURegister dst, FPURegister src) { ++ RoundDouble(dst, src, mode_trunc); ++} ++ ++void TurboAssembler::Round_d(FPURegister dst, FPURegister src) { ++ RoundDouble(dst, src, mode_round); ++} ++ ++void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, ++ FPURoundingMode mode) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = t8; ++ movfcsr2gr(scratch); ++ li(t7, Operand(mode)); ++ movgr2fcsr(t7); ++ frint_s(dst, src); ++ movgr2fcsr(scratch); ++} ++ ++void TurboAssembler::Floor_s(FPURegister dst, FPURegister src) { ++ RoundFloat(dst, src, mode_floor); ++} ++ ++void TurboAssembler::Ceil_s(FPURegister dst, FPURegister src) { ++ RoundFloat(dst, src, mode_ceil); ++} ++ ++void TurboAssembler::Trunc_s(FPURegister dst, FPURegister src) { ++ RoundFloat(dst, src, mode_trunc); ++} ++ ++void TurboAssembler::Round_s(FPURegister dst, FPURegister src) { ++ RoundFloat(dst, src, mode_round); ++} ++ ++void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2, ++ FPUCondition cc, CFRegister cd, bool f32) { ++ if (f32) { ++ fcmp_cond_s(cc, cmp1, cmp2, cd); ++ } else { ++ fcmp_cond_d(cc, cmp1, cmp2, cd); ++ } ++} ++ ++void TurboAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2, ++ CFRegister cd, bool f32) { ++ CompareF(cmp1, cmp2, CUN, cd, f32); ++} ++ ++void TurboAssembler::BranchTrueShortF(Label* target, CFRegister cj) { ++ bcnez(cj, target); ++} ++ ++void TurboAssembler::BranchFalseShortF(Label* target, CFRegister cj) { ++ bceqz(cj, target); ++} ++ ++void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) { ++ // TODO can be optimzed ++ bool long_branch = target->is_bound() ++ ? !is_near(target, OffsetSize::kOffset21) ++ : is_trampoline_emitted(); ++ if (long_branch) { ++ Label skip; ++ BranchFalseShortF(&skip, cj); ++ Branch(target); ++ bind(&skip); ++ } else { ++ BranchTrueShortF(target, cj); ++ } ++} ++ ++void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) { ++ bool long_branch = target->is_bound() ++ ? !is_near(target, OffsetSize::kOffset21) ++ : is_trampoline_emitted(); ++ if (long_branch) { ++ Label skip; ++ BranchTrueShortF(&skip, cj); ++ Branch(target); ++ bind(&skip); ++ } else { ++ BranchFalseShortF(target, cj); ++ } ++} ++ ++void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(src_low != scratch); ++ movfrh2gr_s(scratch, dst); ++ movgr2fr_w(dst, src_low); ++ movgr2frh_w(dst, scratch); ++} ++ ++void TurboAssembler::Move(FPURegister dst, uint32_t src) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(static_cast(src))); ++ movgr2fr_w(dst, scratch); ++} ++ ++void TurboAssembler::Move(FPURegister dst, uint64_t src) { ++ // Handle special values first. ++ if (src == bit_cast(0.0) && has_double_zero_reg_set_) { ++ fmov_d(dst, kDoubleRegZero); ++ } else if (src == bit_cast(-0.0) && has_double_zero_reg_set_) { ++ Neg_d(dst, kDoubleRegZero); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(static_cast(src))); ++ movgr2fr_d(dst, scratch); ++ if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true; ++ } ++} ++ ++void TurboAssembler::Movz(Register rd, Register rj, Register rk) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ maskeqz(scratch, rj, rk); ++ masknez(rd, rd, rk); ++ or_(rd, rd, scratch); ++} ++ ++void TurboAssembler::Movn(Register rd, Register rj, Register rk) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ masknez(scratch, rj, rk); ++ maskeqz(rd, rd, rk); ++ or_(rd, rd, scratch); ++} ++ ++void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj, ++ const Operand& rk, Condition cond) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ switch (cond) { ++ case cc_always: ++ mov(rd, zero_reg); ++ break; ++ case eq: ++ if (rj == zero_reg) { ++ if (rk.is_reg()) { ++ LoadZeroIfConditionZero(rd, rk.rm()); ++ } else { ++ if (rk.immediate() == 0) { ++ mov(rd, zero_reg); ++ } else { ++ // nop(); ++ } ++ } ++ } else if (IsZero(rk)) { ++ LoadZeroIfConditionZero(rd, rj); ++ } else { ++ Sub_d(t7, rj, rk); ++ LoadZeroIfConditionZero(rd, t7); ++ } ++ break; ++ case ne: ++ if (rj == zero_reg) { ++ if (rk.is_reg()) { ++ LoadZeroIfConditionNotZero(rd, rk.rm()); ++ } else { ++ if (rk.immediate() != 0) { ++ mov(rd, zero_reg); ++ } else { ++ // nop(); ++ } ++ } ++ } else if (IsZero(rk)) { ++ LoadZeroIfConditionNotZero(rd, rj); ++ } else { ++ Sub_d(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ } ++ break; ++ ++ // Signed comparison. ++ case greater: ++ Sgt(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ break; ++ case greater_equal: ++ Sge(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj >= rk ++ break; ++ case less: ++ Slt(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj < rk ++ break; ++ case less_equal: ++ Sle(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj <= rk ++ break; ++ ++ // Unsigned comparison. ++ case Ugreater: ++ Sgtu(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj > rk ++ break; ++ ++ case Ugreater_equal: ++ Sgeu(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj >= rk ++ break; ++ case Uless: ++ Sltu(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj < rk ++ break; ++ case Uless_equal: ++ Sleu(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj <= rk ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, ++ Register condition) { ++ maskeqz(dest, dest, condition); ++} ++ ++void TurboAssembler::LoadZeroIfConditionZero(Register dest, ++ Register condition) { ++ masknez(dest, dest, condition); ++} ++ ++void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ movcf2gr(scratch, cc); ++ LoadZeroIfConditionNotZero(dest, scratch); ++} ++ ++void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ movcf2gr(scratch, cc); ++ LoadZeroIfConditionZero(dest, scratch); ++} ++ ++void TurboAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); } ++ ++void TurboAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); } ++ ++void TurboAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); } ++ ++void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); } ++ ++// TODO: Optimize like arm64, use simd instruction ++void TurboAssembler::Popcnt_w(Register rd, Register rj) { ++ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel ++ // ++ // A generalization of the best bit counting method to integers of ++ // bit-widths up to 128 (parameterized by type T) is this: ++ // ++ // v = v - ((v >> 1) & (T)~(T)0/3); // temp ++ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp ++ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp ++ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count ++ // ++ // For comparison, for 32-bit quantities, this algorithm can be executed ++ // using 20 MIPS instructions (the calls to LoadConst32() generate two ++ // machine instructions each for the values being used in this algorithm). ++ // A(n unrolled) loop-based algorithm requires 25 instructions. ++ // ++ // For a 64-bit operand this can be performed in 24 instructions compared ++ // to a(n unrolled) loop based algorithm which requires 38 instructions. ++ // ++ // There are algorithms which are faster in the cases where very few ++ // bits are set but the algorithm here attempts to minimize the total ++ // number of instructions executed even when a large number of bits ++ // are set. ++ int32_t B0 = 0x55555555; // (T)~(T)0/3 ++ int32_t B1 = 0x33333333; // (T)~(T)0/15*3 ++ int32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15 ++ int32_t value = 0x01010101; // (T)~(T)0/255 ++ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE ++ ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.Acquire(); ++ Register scratch2 = t8; ++ srli_w(scratch, rj, 1); ++ li(scratch2, B0); ++ And(scratch, scratch, scratch2); ++ Sub_w(scratch, rj, scratch); ++ li(scratch2, B1); ++ And(rd, scratch, scratch2); ++ srli_w(scratch, scratch, 2); ++ And(scratch, scratch, scratch2); ++ Add_w(scratch, rd, scratch); ++ srli_w(rd, scratch, 4); ++ Add_w(rd, rd, scratch); ++ li(scratch2, B2); ++ And(rd, rd, scratch2); ++ li(scratch, value); ++ Mul_w(rd, rd, scratch); ++ srli_w(rd, rd, shift); ++} ++ ++void TurboAssembler::Popcnt_d(Register rd, Register rj) { ++ int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 ++ int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3 ++ int64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15 ++ int64_t value = 0x0101010101010101l; // (T)~(T)0/255 ++ uint32_t shift = 56; // (sizeof(T) - 1) * BITS_PER_BYTE ++ ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.Acquire(); ++ Register scratch2 = t8; ++ srli_d(scratch, rj, 1); ++ li(scratch2, B0); ++ And(scratch, scratch, scratch2); ++ Sub_d(scratch, rj, scratch); ++ li(scratch2, B1); ++ And(rd, scratch, scratch2); ++ srli_d(scratch, scratch, 2); ++ And(scratch, scratch, scratch2); ++ Add_d(scratch, rd, scratch); ++ srli_d(rd, scratch, 4); ++ Add_d(rd, rd, scratch); ++ li(scratch2, B2); ++ And(rd, rd, scratch2); ++ li(scratch, value); ++ Mul_d(rd, rd, scratch); ++ srli_d(rd, rd, shift); ++} ++ ++void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, ++ int size, bool sign_extend) { ++ sra_d(dest, source, pos); ++ bstrpick_d(dest, dest, size - 1, 0); ++ if (sign_extend) { ++ switch (size) { ++ case 8: ++ ext_w_b(dest, dest); ++ break; ++ case 16: ++ ext_w_h(dest, dest); ++ break; ++ case 32: ++ // sign-extend word ++ slli_w(dest, dest, 0); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++void TurboAssembler::InsertBits(Register dest, Register source, Register pos, ++ int size) { ++ Rotr_d(dest, dest, pos); ++ bstrins_d(dest, source, size - 1, 0); ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Sub_d(scratch, zero_reg, pos); ++ Rotr_d(dest, dest, scratch); ++ } ++} ++ ++void MacroAssembler::EmitFPUTruncate( ++ FPURoundingMode rounding_mode, Register result, DoubleRegister double_input, ++ Register scratch, DoubleRegister double_scratch, Register except_flag, ++ CheckForInexactConversion check_inexact) { ++ break_(3); ++} ++ ++void TurboAssembler::TryInlineTruncateDoubleToI(Register result, ++ DoubleRegister double_input, ++ Label* done) { ++ DoubleRegister single_scratch = kScratchDoubleReg.low(); ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.Acquire(); ++ Register scratch2 = t7; ++ ++ // Clear cumulative exception flags and save the FCSR. ++ /* movfcsr2gr(scratch2, FCSR); ++ movgr2fcsr(FCSR, zero_reg); ++ // Try a conversion to a signed integer. ++ ftintrz_w_d(single_scratch, double_input); ++ movfr2gr_w(result, single_scratch); ++ // Retrieve and restore the FCSR. ++ movfcsr2gr(scratch, FCSR); ++ movgr2fcsr(FCSR, scratch2); ++ // Check for overflow and NaNs. ++ And(scratch, scratch, ++ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | ++ kFCSRInvalidOpFlagMask); ++ // If we had no exceptions we are done. ++ Branch(done, eq, scratch, Operand(zero_reg));*/ ++ ++ CompareIsNanF64(double_input, double_input); ++ Move(result, zero_reg); ++ bcnez(FCC0, done); ++ ftintrz_l_d(single_scratch, double_input); ++ movfr2gr_d(scratch2, single_scratch); ++ li(scratch, 1L << 63); ++ Xor(scratch, scratch, scratch2); ++ rotri_d(scratch2, scratch, 1); ++ movfr2gr_s(result, single_scratch); ++ Branch(done, ne, scratch, Operand(scratch2)); ++} ++ ++void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, ++ Register result, ++ DoubleRegister double_input, ++ StubCallMode stub_mode) { ++ Label done; ++ ++ TryInlineTruncateDoubleToI(result, double_input, &done); ++ ++ // If we fell through then inline version didn't succeed - call stub instead. ++ Sub_d(sp, sp, ++ Operand(kDoubleSize + kSystemPointerSize)); // Put input on stack. ++ St_d(ra, MemOperand(sp, kSystemPointerSize)); ++ Fst_d(double_input, MemOperand(sp, 0)); ++ ++ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) { ++ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); ++ } else { ++ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET); ++ } ++ ++ Pop(ra, result); ++ bind(&done); ++} ++ ++// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. ++#define BRANCH_ARGS_CHECK(cond, rj, rk) \ ++ DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \ ++ (cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg))) ++ ++void TurboAssembler::Branch(Label* L, bool need_link) { ++ int offset = GetOffset(L, OffsetSize::kOffset26); ++ if (need_link) { ++ bl(offset); ++ } else { ++ b(offset); ++ } ++} ++ ++void TurboAssembler::Branch(Label* L, Condition cond, Register rj, ++ const Operand& rk, bool need_link) { ++ if (L->is_bound()) { ++ BRANCH_ARGS_CHECK(cond, rj, rk); ++ if (!BranchShortOrFallback(L, cond, rj, rk, need_link)) { ++ if (cond != cc_always) { ++ Label skip; ++ Condition neg_cond = NegateCondition(cond); ++ BranchShort(&skip, neg_cond, rj, rk, need_link); ++ Branch(L, need_link); ++ bind(&skip); ++ } else { ++ Branch(L); ++ } ++ } ++ } else { ++ if (is_trampoline_emitted()) { ++ if (cond != cc_always) { ++ Label skip; ++ Condition neg_cond = NegateCondition(cond); ++ BranchShort(&skip, neg_cond, rj, rk, need_link); ++ Branch(L, need_link); ++ bind(&skip); ++ } else { ++ Branch(L); ++ } ++ } else { ++ BranchShort(L, cond, rj, rk, need_link); ++ } ++ } ++} ++ ++void TurboAssembler::Branch(Label* L, Condition cond, Register rj, ++ RootIndex index) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Branch(L, cond, rj, Operand(scratch)); ++} ++ ++int32_t TurboAssembler::GetOffset(Label* L, OffsetSize bits) { ++ return branch_offset_helper(L, bits) >> 2; ++} ++ ++Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk, ++ Register scratch) { ++ Register r2 = no_reg; ++ if (rk.is_reg()) { ++ r2 = rk.rm(); ++ } else { ++ r2 = scratch; ++ li(r2, rk); ++ } ++ ++ return r2; ++} ++ ++bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond, ++ Register rj, const Operand& rk, ++ bool need_link) { ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ ++ // Be careful to always use shifted_branch_offset only just before the ++ // branch instruction, as the location will be remember for patching the ++ // target. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ int offset = 0; ++ switch (cond) { ++ case cc_always: ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ if (need_link) { ++ bl(offset); ++ } else { ++ b(offset); ++ } ++ break; ++ case eq: ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // beq is used here to make the code patchable. Otherwise b should ++ // be used which has no condition field so is not patchable. ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ beq(rj, rj, offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset21); ++ beqz(rj, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ // We don't want any other register but scratch clobbered. ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ beq(rj, sc, offset); ++ } ++ break; ++ case ne: ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ // bne is used here to make the code patchable. Otherwise we ++ // should not generate any instruction. ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bne(rj, rj, offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset21); ++ bnez(rj, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ // We don't want any other register but scratch clobbered. ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bne(rj, sc, offset); ++ } ++ break; ++ ++ // Signed comparison. ++ case greater: ++ // rj > rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // No code needs to be emitted. ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ blt(zero_reg, rj, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ blt(sc, rj, offset); ++ } ++ break; ++ case greater_equal: ++ // rj >= rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bge(rj, zero_reg, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bge(rj, sc, offset); ++ } ++ break; ++ case less: ++ // rj < rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // No code needs to be emitted. ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ blt(rj, zero_reg, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ blt(rj, sc, offset); ++ } ++ break; ++ case less_equal: ++ // rj <= rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bge(zero_reg, rj, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bge(sc, rj, offset); ++ } ++ break; ++ ++ // Unsigned comparison. ++ case Ugreater: ++ // rj > rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // No code needs to be emitted. ++ } else if (rj == zero_reg) { ++ // No code needs to be emitted. ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bltu(sc, rj, offset); ++ } ++ break; ++ case Ugreater_equal: ++ // rj >= rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (rj == zero_reg) { ++ // No code needs to be emitted. ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bgeu(rj, sc, offset); ++ } ++ break; ++ case Uless: ++ // rj < rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // No code needs to be emitted. ++ } else if (IsZero(rk)) { ++ // No code needs to be emitted. ++ } else if (rj == zero_reg) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bltu(rj, sc, offset); ++ } ++ break; ++ case Uless_equal: ++ // rj <= rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (rj == zero_reg) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (IsZero(rk)) { ++ // No code needs to be emitted. ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bgeu(sc, rj, offset); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ return true; ++} ++ ++void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj, ++ const Operand& rk, bool need_link) { ++ BRANCH_ARGS_CHECK(cond, rj, rk); ++ bool result = BranchShortOrFallback(L, cond, rj, rk, need_link); ++ DCHECK(result); ++ USE(result); ++} ++ ++void TurboAssembler::LoadFromConstantsTable(Register destination, ++ int constant_index) { ++ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); ++ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); ++ Ld_d(destination, ++ FieldMemOperand(destination, FixedArray::kHeaderSize + ++ constant_index * kPointerSize)); ++} ++ ++void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { ++ Ld_d(destination, MemOperand(kRootRegister, offset)); ++} ++ ++void TurboAssembler::LoadRootRegisterOffset(Register destination, ++ intptr_t offset) { ++ if (offset == 0) { ++ Move(destination, kRootRegister); ++ } else { ++ Add_d(destination, kRootRegister, Operand(offset)); ++ } ++} ++ ++void TurboAssembler::Jump(Register target, Condition cond, Register rj, ++ const Operand& rk) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (cond == cc_always) { ++ jirl(zero_reg, target, 0); ++ } else { ++ BRANCH_ARGS_CHECK(cond, rj, rk); ++ Label skip; ++ Branch(&skip, NegateCondition(cond), rj, rk); ++ jirl(zero_reg, target, 0); ++ bind(&skip); ++ } ++} ++ ++void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, ++ Condition cond, Register rj, const Operand& rk) { ++ Label skip; ++ if (cond != cc_always) { ++ Branch(&skip, NegateCondition(cond), rj, rk); ++ } ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ li(t7, Operand(target, rmode)); ++ jirl(zero_reg, t7, 0); ++ bind(&skip); ++ } ++} ++ ++void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, ++ Register rj, const Operand& rk) { ++ DCHECK(!RelocInfo::IsCodeTarget(rmode)); ++ Jump(static_cast(target), rmode, cond, rj, rk); ++} ++ ++void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, ++ Condition cond, Register rj, const Operand& rk) { ++ DCHECK(RelocInfo::IsCodeTarget(rmode)); ++ ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadConstant(t7, code); ++ Add_d(t7, t7, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ Jump(t7, cond, rj, rk); ++ return; ++ } else if (options().inline_offheap_trampolines) { ++ int builtin_index = Builtins::kNoBuiltinId; ++ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && ++ Builtins::IsIsolateIndependent(builtin_index)) { ++ // Inline the trampoline. ++ RecordCommentForOffHeapTrampoline(builtin_index); ++ CHECK_NE(builtin_index, Builtins::kNoBuiltinId); ++ EmbeddedData d = EmbeddedData::FromBlob(); ++ Address entry = d.InstructionStartOfBuiltin(builtin_index); ++ li(t7, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ Jump(t7, cond, rj, rk); ++ return; ++ } ++ } ++ ++ Jump(static_cast(code.address()), rmode, cond, rj, rk); ++} ++ ++void TurboAssembler::Jump(const ExternalReference& reference) { ++ li(t7, reference); ++ Jump(t7); ++} ++ ++// Note: To call gcc-compiled C code on loonarch, you must call through t[0-8]. ++void TurboAssembler::Call(Register target, Condition cond, Register rj, ++ const Operand& rk) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (cond == cc_always) { ++ jirl(ra, target, 0); ++ } else { ++ BRANCH_ARGS_CHECK(cond, rj, rk); ++ Label skip; ++ Branch(&skip, NegateCondition(cond), rj, rk); ++ jirl(ra, target, 0); ++ bind(&skip); ++ } ++} ++ ++void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, ++ unsigned higher_limit, ++ Label* on_in_range) { ++ if (lower_limit != 0) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Sub_d(scratch, value, Operand(lower_limit)); ++ Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit)); ++ } else { ++ Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit)); ++ } ++} ++ ++void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, ++ Register rj, const Operand& rk) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ li(t7, Operand(static_cast(target), rmode), ADDRESS_LOAD); ++ Call(t7, cond, rj, rk); ++} ++ ++void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, ++ Condition cond, Register rj, const Operand& rk) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadConstant(t7, code); ++ Add_d(t7, t7, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ Call(t7, cond, rj, rk); ++ return; ++ } else if (options().inline_offheap_trampolines) { ++ int builtin_index = Builtins::kNoBuiltinId; ++ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && ++ Builtins::IsIsolateIndependent(builtin_index)) { ++ // Inline the trampoline. ++ RecordCommentForOffHeapTrampoline(builtin_index); ++ CHECK_NE(builtin_index, Builtins::kNoBuiltinId); ++ EmbeddedData d = EmbeddedData::FromBlob(); ++ Address entry = d.InstructionStartOfBuiltin(builtin_index); ++ li(t7, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ Call(t7, cond, rj, rk); ++ return; ++ } ++ } ++ ++ DCHECK(RelocInfo::IsCodeTarget(rmode)); ++ DCHECK(code->IsExecutable()); ++ Call(code.address(), rmode, cond, rj, rk); ++} ++ ++void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ++ STATIC_ASSERT(kSystemPointerSize == 8); ++ STATIC_ASSERT(kSmiTagSize == 1); ++ STATIC_ASSERT(kSmiTag == 0); ++ ++ // The builtin_index register contains the builtin index as a Smi. ++ SmiUntag(builtin_index, builtin_index); ++ Alsl_d(builtin_index, builtin_index, kRootRegister, kSystemPointerSizeLog2, ++ t7); ++ Ld_d(builtin_index, ++ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); ++} ++ ++void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { ++ LoadEntryFromBuiltinIndex(builtin_index); ++ Call(builtin_index); ++} ++ ++void TurboAssembler::PatchAndJump(Address target) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ pcaddi(scratch, 4); ++ Ld_d(t7, MemOperand(scratch, 0)); ++ jirl(zero_reg, t7, 0); ++ nop(); ++ DCHECK_EQ(reinterpret_cast(pc_) % 8, 0); ++ *reinterpret_cast(pc_) = target; // pc_ should be align. ++ pc_ += sizeof(uint64_t); ++} ++ ++void TurboAssembler::StoreReturnAddressAndCall(Register target) { ++ // This generates the final instruction sequence for calls to C functions ++ // once an exit frame has been constructed. ++ // ++ // Note that this assumes the caller code (i.e. the Code object currently ++ // being generated) is immovable or that the callee function cannot trigger ++ // GC, since the callee function will return to it. ++ ++ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); ++ static constexpr int kNumInstructionsToJump = 2; ++ Label find_ra; ++ // Adjust the value in ra to point to the correct return location, 2nd ++ // instruction past the real call into C code (the jirl)), and push it. ++ // This is the return address of the exit frame. ++ pcaddi(ra, kNumInstructionsToJump + 1); ++ bind(&find_ra); ++ ++ // This spot was reserved in EnterExitFrame. ++ St_d(ra, MemOperand(sp, 0)); ++ // Stack is still aligned. ++ ++ // TODO can be jirl target? a0 -- a7? ++ jirl(zero_reg, target, 0); ++ // Make sure the stored 'ra' points to this position. ++ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); ++} ++ ++void TurboAssembler::Ret(Condition cond, Register rj, const Operand& rk) { ++ Jump(ra, cond, rj, rk); ++} ++ ++void TurboAssembler::DropAndRet(int drop) { ++ DCHECK(is_int16(drop * kPointerSize)); ++ addi_d(sp, sp, drop * kPointerSize); ++ Ret(); ++} ++ ++void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, ++ const Operand& r2) { ++ // Both Drop and Ret need to be conditional. ++ Label skip; ++ if (cond != cc_always) { ++ Branch(&skip, NegateCondition(cond), r1, r2); ++ } ++ ++ Drop(drop); ++ Ret(); ++ ++ if (cond != cc_always) { ++ bind(&skip); ++ } ++} ++ ++void TurboAssembler::Drop(int count, Condition cond, Register reg, ++ const Operand& op) { ++ if (count <= 0) { ++ return; ++ } ++ ++ Label skip; ++ ++ if (cond != al) { ++ Branch(&skip, NegateCondition(cond), reg, op); ++ } ++ ++ Add_d(sp, sp, Operand(count * kPointerSize)); ++ ++ if (cond != al) { ++ bind(&skip); ++ } ++} ++ ++void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { ++ if (scratch == no_reg) { ++ Xor(reg1, reg1, Operand(reg2)); ++ Xor(reg2, reg2, Operand(reg1)); ++ Xor(reg1, reg1, Operand(reg2)); ++ } else { ++ mov(scratch, reg1); ++ mov(reg1, reg2); ++ mov(reg2, scratch); ++ } ++} ++ ++void TurboAssembler::Call(Label* target) { Branch(target, true); } ++ ++void TurboAssembler::Push(Smi smi) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(smi)); ++ push(scratch); ++} ++ ++void TurboAssembler::Push(Handle handle) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(handle)); ++ push(scratch); ++} ++ ++void MacroAssembler::MaybeDropFrames() { ++ // Check whether we need to drop frames to restart a function on the stack. ++ li(a1, ExternalReference::debug_restart_fp_address(isolate())); ++ Ld_d(a1, MemOperand(a1, 0)); ++ Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET, ++ ne, a1, Operand(zero_reg)); ++} ++ ++// --------------------------------------------------------------------------- ++// Exception handling. ++ ++void MacroAssembler::PushStackHandler() { ++ // Adjust this code if not the case. ++ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); ++ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); ++ ++ Push(Smi::zero()); // Padding. ++ ++ // Link the current handler as the next handler. ++ li(t2, ++ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); ++ Ld_d(t1, MemOperand(t2, 0)); ++ push(t1); ++ ++ // Set this new handler as the current one. ++ St_d(sp, MemOperand(t2, 0)); ++} ++ ++void MacroAssembler::PopStackHandler() { ++ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); ++ pop(a1); ++ Add_d(sp, sp, ++ Operand( ++ static_cast(StackHandlerConstants::kSize - kPointerSize))); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, ++ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); ++ St_d(a1, MemOperand(scratch, 0)); ++} ++ ++void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, ++ const DoubleRegister src) { ++ fsub_d(dst, src, kDoubleRegZero); ++} ++ ++void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { ++ Move(dst, f0); // Reg f0 is loongarch return value ++} ++ ++void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { ++ Move(dst, f0); // Reg f0 is loongarch first argument value. ++} ++ ++void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(f0, src); } ++ ++void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(f0, src); } ++ ++void TurboAssembler::MovToFloatParameters(DoubleRegister src1, ++ DoubleRegister src2) { ++ const DoubleRegister fparg2 = f1; ++ if (src2 == f0) { ++ DCHECK(src1 != fparg2); ++ Move(fparg2, src2); ++ Move(f0, src1); ++ } else { ++ Move(f0, src1); ++ Move(fparg2, src2); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// JavaScript invokes. ++ ++void TurboAssembler::PrepareForTailCall(Register callee_args_count, ++ Register caller_args_count, ++ Register scratch0, Register scratch1) { ++ // Calculate the end of destination area where we will put the arguments ++ // after we drop current frame. We add kPointerSize to count the receiver ++ // argument which is not included into formal parameters count. ++ Register dst_reg = scratch0; ++ Alsl_d(dst_reg, caller_args_count, fp, kPointerSizeLog2, t7); ++ Add_d(dst_reg, dst_reg, ++ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize)); ++ ++ Register src_reg = caller_args_count; ++ // Calculate the end of source area. +kPointerSize is for the receiver. ++ Alsl_d(src_reg, callee_args_count, sp, kPointerSizeLog2, t7); ++ Add_d(src_reg, src_reg, Operand(kPointerSize)); ++ ++ if (FLAG_debug_code) { ++ Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg, ++ Operand(dst_reg)); ++ } ++ ++ // Restore caller's frame pointer and return address now as they will be ++ // overwritten by the copying loop. ++ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); ++ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); ++ ++ // Now copy callee arguments to the caller frame going backwards to avoid ++ // callee arguments corruption (source and destination areas could overlap). ++ ++ // Both src_reg and dst_reg are pointing to the word after the one to copy, ++ // so they must be pre-decremented in the loop. ++ Register tmp_reg = scratch1; ++ Label loop, entry; ++ Branch(&entry); ++ bind(&loop); ++ Sub_d(src_reg, src_reg, Operand(kPointerSize)); ++ Sub_d(dst_reg, dst_reg, Operand(kPointerSize)); ++ Ld_d(tmp_reg, MemOperand(src_reg, 0)); ++ St_d(tmp_reg, MemOperand(dst_reg, 0)); ++ bind(&entry); ++ Branch(&loop, ne, sp, Operand(src_reg)); ++ ++ // Leave current frame. ++ mov(sp, dst_reg); ++} ++ ++void MacroAssembler::InvokePrologue(Register expected_parameter_count, ++ Register actual_parameter_count, ++ Label* done, InvokeFlag flag) { ++ Label regular_invoke; ++ ++ // Check whether the expected and actual arguments count match. The registers ++ // are set up according to contract with ArgumentsAdaptorTrampoline: ++ // a0: actual arguments count ++ // a1: function (passed through to callee) ++ // a2: expected arguments count ++ ++ // The code below is made a lot easier because the calling code already sets ++ // up actual and expected registers according to the contract. ++ ++ DCHECK_EQ(actual_parameter_count, a0); ++ DCHECK_EQ(expected_parameter_count, a2); ++ ++ Branch(®ular_invoke, eq, expected_parameter_count, ++ Operand(actual_parameter_count)); ++ ++ Handle adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline); ++ if (flag == CALL_FUNCTION) { ++ Call(adaptor); ++ Branch(done); ++ } else { ++ Jump(adaptor, RelocInfo::CODE_TARGET); ++ } ++ ++ bind(®ular_invoke); ++} ++ ++void MacroAssembler::CheckDebugHook(Register fun, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count) { ++ Label skip_hook; ++ ++ li(t0, ExternalReference::debug_hook_on_function_call_address(isolate())); ++ Ld_b(t0, MemOperand(t0, 0)); ++ Branch(&skip_hook, eq, t0, Operand(zero_reg)); ++ ++ { ++ // Load receiver to pass it later to DebugOnFunctionCall hook. ++ Alsl_d(t0, actual_parameter_count, sp, kPointerSizeLog2, t7); ++ Ld_d(t0, MemOperand(t0, 0)); ++ FrameScope frame(this, ++ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); ++ SmiTag(expected_parameter_count); ++ Push(expected_parameter_count); ++ ++ SmiTag(actual_parameter_count); ++ Push(actual_parameter_count); ++ ++ if (new_target.is_valid()) { ++ Push(new_target); ++ } ++ // TODO: MultiPush/Pop ++ Push(fun); ++ Push(fun); ++ Push(t0); ++ CallRuntime(Runtime::kDebugOnFunctionCall); ++ Pop(fun); ++ if (new_target.is_valid()) { ++ Pop(new_target); ++ } ++ ++ Pop(actual_parameter_count); ++ SmiUntag(actual_parameter_count); ++ ++ Pop(expected_parameter_count); ++ SmiUntag(expected_parameter_count); ++ } ++ bind(&skip_hook); ++} ++ ++void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count, ++ InvokeFlag flag) { ++ // You can't call a function without a valid frame. ++ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); ++ DCHECK_EQ(function, a1); ++ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3); ++ ++ // On function call, call into the debugger if necessary. ++ CheckDebugHook(function, new_target, expected_parameter_count, ++ actual_parameter_count); ++ ++ // Clear the new.target register if not given. ++ if (!new_target.is_valid()) { ++ LoadRoot(a3, RootIndex::kUndefinedValue); ++ } ++ ++ Label done; ++ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag); ++ // We call indirectly through the code field in the function to ++ // allow recompilation to take effect without changing any of the ++ // call sites. ++ Register code = kJavaScriptCallCodeStartRegister; ++ Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset)); ++ if (flag == CALL_FUNCTION) { ++ Add_d(code, code, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ Call(code); ++ } else { ++ DCHECK(flag == JUMP_FUNCTION); ++ Add_d(code, code, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ Jump(code); ++ } ++ ++ // Continue here if InvokePrologue does handle the invocation due to ++ // mismatched parameter counts. ++ bind(&done); ++} ++ ++void MacroAssembler::InvokeFunctionWithNewTarget( ++ Register function, Register new_target, Register actual_parameter_count, ++ InvokeFlag flag) { ++ // You can't call a function without a valid frame. ++ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); ++ ++ // Contract with called JS functions requires that function is passed in a1. ++ DCHECK_EQ(function, a1); ++ Register expected_parameter_count = a2; ++ Register temp_reg = t0; ++ Ld_d(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ++ // The argument count is stored as uint16_t ++ Ld_hu(expected_parameter_count, ++ FieldMemOperand(temp_reg, ++ SharedFunctionInfo::kFormalParameterCountOffset)); ++ ++ InvokeFunctionCode(a1, new_target, expected_parameter_count, ++ actual_parameter_count, flag); ++} ++ ++void MacroAssembler::InvokeFunction(Register function, ++ Register expected_parameter_count, ++ Register actual_parameter_count, ++ InvokeFlag flag) { ++ // You can't call a function without a valid frame. ++ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); ++ ++ // Contract with called JS functions requires that function is passed in a1. ++ DCHECK_EQ(function, a1); ++ ++ // Get the function and setup the context. ++ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ++ ++ InvokeFunctionCode(a1, no_reg, expected_parameter_count, ++ actual_parameter_count, flag); ++} ++ ++// --------------------------------------------------------------------------- ++// Support functions. ++ ++void MacroAssembler::GetObjectType(Register object, Register map, ++ Register type_reg) { ++ LoadMap(map, object); ++ Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); ++} ++ ++// ----------------------------------------------------------------------------- ++// Runtime calls. ++ ++void TurboAssembler::AdddOverflow(Register dst, Register left, ++ const Operand& right, Register overflow) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register right_reg = no_reg; ++ Register scratch = t8; ++ if (!right.is_reg()) { ++ li(t7, Operand(right)); ++ right_reg = t7; ++ } else { ++ right_reg = right.rm(); ++ } ++ ++ DCHECK(left != scratch && right_reg != scratch && dst != scratch && ++ overflow != scratch); ++ DCHECK(overflow != left && overflow != right_reg); ++ ++ if (dst == left || dst == right_reg) { ++ add_d(scratch, left, right_reg); ++ xor_(overflow, scratch, left); ++ xor_(t7, scratch, right_reg); ++ and_(overflow, overflow, t7); ++ mov(dst, scratch); ++ } else { ++ add_d(dst, left, right_reg); ++ xor_(overflow, dst, left); ++ xor_(t7, dst, right_reg); ++ and_(overflow, overflow, t7); ++ } ++} ++ ++void TurboAssembler::SubdOverflow(Register dst, Register left, ++ const Operand& right, Register overflow) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register right_reg = no_reg; ++ Register scratch = t8; ++ if (!right.is_reg()) { ++ li(t7, Operand(right)); ++ right_reg = t7; ++ } else { ++ right_reg = right.rm(); ++ } ++ ++ DCHECK(left != scratch && right_reg != scratch && dst != scratch && ++ overflow != scratch); ++ DCHECK(overflow != left && overflow != right_reg); ++ ++ if (dst == left || dst == right_reg) { ++ Sub_d(scratch, left, right_reg); ++ xor_(overflow, left, scratch); ++ xor_(t7, left, right_reg); ++ and_(overflow, overflow, t7); ++ mov(dst, scratch); ++ } else { ++ sub_d(dst, left, right_reg); ++ xor_(overflow, left, dst); ++ xor_(t7, left, right_reg); ++ and_(overflow, overflow, t7); ++ } ++} ++ ++void TurboAssembler::MulOverflow(Register dst, Register left, ++ const Operand& right, Register overflow) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register right_reg = no_reg; ++ Register scratch = t8; ++ if (!right.is_reg()) { ++ li(t7, Operand(right)); ++ right_reg = t7; ++ } else { ++ right_reg = right.rm(); ++ } ++ ++ DCHECK(left != scratch && right_reg != scratch && dst != scratch && ++ overflow != scratch); ++ DCHECK(overflow != left && overflow != right_reg); ++ ++ if (dst == left || dst == right_reg) { ++ Mul_w(scratch, left, right_reg); ++ Mulh_w(overflow, left, right_reg); ++ mov(dst, scratch); ++ } else { ++ Mul_w(dst, left, right_reg); ++ Mulh_w(overflow, left, right_reg); ++ } ++ ++ srai_d(scratch, dst, 32); ++ xor_(overflow, overflow, scratch); ++} ++ ++void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, ++ SaveFPRegsMode save_doubles) { ++ // All parameters are on the stack. v0 has the return value after call. ++ ++ // If the expected number of arguments of the runtime function is ++ // constant, we check that the actual number of arguments match the ++ // expectation. ++ CHECK(f->nargs < 0 || f->nargs == num_arguments); ++ ++ // TODO(1236192): Most runtime routines don't need the number of ++ // arguments passed in because it is constant. At some point we ++ // should remove this need and make the runtime routine entry code ++ // smarter. ++ PrepareCEntryArgs(num_arguments); ++ PrepareCEntryFunction(ExternalReference::Create(f)); ++ Handle code = ++ CodeFactory::CEntry(isolate(), f->result_size, save_doubles); ++ Call(code, RelocInfo::CODE_TARGET); ++} ++ ++void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { ++ const Runtime::Function* function = Runtime::FunctionForId(fid); ++ DCHECK_EQ(1, function->result_size); ++ if (function->nargs >= 0) { ++ PrepareCEntryArgs(function->nargs); ++ } ++ JumpToExternalReference(ExternalReference::Create(fid)); ++} ++ ++void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, ++ bool builtin_exit_frame) { ++ PrepareCEntryFunction(builtin); ++ Handle code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, ++ kArgvOnStack, builtin_exit_frame); ++ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg)); ++} ++ ++void MacroAssembler::JumpToInstructionStream(Address entry) { ++ li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ Jump(kOffHeapTrampolineRegister); ++} ++ ++void MacroAssembler::LoadWeakValue(Register out, Register in, ++ Label* target_if_cleared) { ++ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32)); ++ ++ And(out, in, Operand(~kWeakHeapObjectMask)); ++} ++ ++void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, ++ Register scratch1, Register scratch2) { ++ DCHECK_GT(value, 0); ++ if (FLAG_native_code_counters && counter->Enabled()) { ++ // This operation has to be exactly 32-bit wide in case the external ++ // reference table redirects the counter to a uint32_t dummy_stats_counter_ ++ // field. ++ li(scratch2, ExternalReference::Create(counter)); ++ Ld_w(scratch1, MemOperand(scratch2, 0)); ++ Add_w(scratch1, scratch1, Operand(value)); ++ St_w(scratch1, MemOperand(scratch2, 0)); ++ } ++} ++ ++void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, ++ Register scratch1, Register scratch2) { ++ DCHECK_GT(value, 0); ++ if (FLAG_native_code_counters && counter->Enabled()) { ++ // This operation has to be exactly 32-bit wide in case the external ++ // reference table redirects the counter to a uint32_t dummy_stats_counter_ ++ // field. ++ li(scratch2, ExternalReference::Create(counter)); ++ Ld_w(scratch1, MemOperand(scratch2, 0)); ++ Sub_w(scratch1, scratch1, Operand(value)); ++ St_w(scratch1, MemOperand(scratch2, 0)); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// Debugging. ++ ++void TurboAssembler::Trap() { stop(); } ++void TurboAssembler::DebugBreak() { stop(); } ++ ++void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, ++ Operand rk) { ++ if (emit_debug_code()) Check(cc, reason, rs, rk); ++} ++ ++void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj, ++ Operand rk) { ++ Label L; ++ Branch(&L, cc, rj, rk); ++ Abort(reason); ++ // Will not return here. ++ bind(&L); ++} ++ ++void TurboAssembler::Abort(AbortReason reason) { ++ Label abort_start; ++ bind(&abort_start); ++#ifdef DEBUG ++ const char* msg = GetAbortReason(reason); ++ RecordComment("Abort message: "); ++ RecordComment(msg); ++#endif ++ ++ // Avoid emitting call to builtin if requested. ++ if (trap_on_abort()) { ++ stop(); ++ return; ++ } ++ ++ if (should_abort_hard()) { ++ // We don't care if we constructed a frame. Just pretend we did. ++ FrameScope assume_frame(this, StackFrame::NONE); ++ PrepareCallCFunction(0, a0); ++ li(a0, Operand(static_cast(reason))); ++ CallCFunction(ExternalReference::abort_with_reason(), 1); ++ return; ++ } ++ ++ Move(a0, Smi::FromInt(static_cast(reason))); ++ ++ // Disable stub call restrictions to always allow calls to abort. ++ if (!has_frame()) { ++ // We don't actually want to generate a pile of code for this, so just ++ // claim there is a stack frame, without generating one. ++ FrameScope scope(this, StackFrame::NONE); ++ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); ++ } else { ++ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); ++ } ++ // Will not return here. ++ if (is_trampoline_pool_blocked()) { ++ // If the calling code cares about the exact number of ++ // instructions generated, we insert padding here to keep the size ++ // of the Abort macro constant. ++ // Currently in debug mode with debug_code enabled the number of ++ // generated instructions is 10, so we use this as a maximum value. ++ static const int kExpectedAbortInstructions = 10; ++ int abort_instructions = InstructionsGeneratedSince(&abort_start); ++ DCHECK_LE(abort_instructions, kExpectedAbortInstructions); ++ while (abort_instructions++ < kExpectedAbortInstructions) { ++ nop(); ++ } ++ } ++} ++ ++void MacroAssembler::LoadMap(Register destination, Register object) { ++ Ld_d(destination, FieldMemOperand(object, HeapObject::kMapOffset)); ++} ++ ++void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { ++ LoadMap(dst, cp); ++ Ld_d(dst, FieldMemOperand( ++ dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); ++ Ld_d(dst, MemOperand(dst, Context::SlotOffset(index))); ++} ++ ++void TurboAssembler::StubPrologue(StackFrame::Type type) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(StackFrame::TypeToMarker(type))); ++ PushCommonFrame(scratch); ++} ++ ++void TurboAssembler::Prologue() { PushStandardFrame(a1); } ++ ++void TurboAssembler::EnterFrame(StackFrame::Type type) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ int stack_offset = -3 * kPointerSize; ++ const int fp_offset = 1 * kPointerSize; ++ addi_d(sp, sp, stack_offset); ++ stack_offset = -stack_offset - kPointerSize; ++ St_d(ra, MemOperand(sp, stack_offset)); ++ stack_offset -= kPointerSize; ++ St_d(fp, MemOperand(sp, stack_offset)); ++ stack_offset -= kPointerSize; ++ li(t7, Operand(StackFrame::TypeToMarker(type))); ++ St_d(t7, MemOperand(sp, stack_offset)); ++ // Adjust FP to point to saved FP. ++ DCHECK_EQ(stack_offset, 0); ++ Add_d(fp, sp, Operand(fp_offset)); ++} ++ ++void TurboAssembler::LeaveFrame(StackFrame::Type type) { ++ addi_d(sp, fp, 2 * kPointerSize); ++ Ld_d(ra, MemOperand(fp, 1 * kPointerSize)); ++ Ld_d(fp, MemOperand(fp, 0 * kPointerSize)); ++} ++ ++void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, ++ StackFrame::Type frame_type) { ++ DCHECK(frame_type == StackFrame::EXIT || ++ frame_type == StackFrame::BUILTIN_EXIT); ++ ++ // Set up the frame structure on the stack. ++ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); ++ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); ++ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); ++ ++ // This is how the stack will look: ++ // fp + 2 (==kCallerSPDisplacement) - old stack's end ++ // [fp + 1 (==kCallerPCOffset)] - saved old ra ++ // [fp + 0 (==kCallerFPOffset)] - saved old fp ++ // [fp - 1 StackFrame::EXIT Smi ++ // [fp - 2 (==kSPOffset)] - sp of the called function ++ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the ++ // new stack (will contain saved ra) ++ ++ // Save registers and reserve room for saved entry sp. ++ addi_d(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp); ++ St_d(ra, MemOperand(sp, 3 * kPointerSize)); ++ St_d(fp, MemOperand(sp, 2 * kPointerSize)); ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(StackFrame::TypeToMarker(frame_type))); ++ St_d(scratch, MemOperand(sp, 1 * kPointerSize)); ++ } ++ // Set up new frame pointer. ++ addi_d(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp); ++ ++ if (emit_debug_code()) { ++ St_d(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); ++ } ++ ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Save the frame pointer and the context in top. ++ li(t8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, ++ isolate())); ++ St_d(fp, MemOperand(t8, 0)); ++ li(t8, ++ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ++ St_d(cp, MemOperand(t8, 0)); ++ } ++ ++ const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); ++ if (save_doubles) { ++ // The stack is already aligned to 0 modulo 8 for stores with sdc1. ++ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; ++ int space = kNumOfSavedRegisters * kDoubleSize; ++ Sub_d(sp, sp, Operand(space)); ++ // Remember: we only need to save every 2nd double FPU value. ++ for (int i = 0; i < kNumOfSavedRegisters; i++) { ++ FPURegister reg = FPURegister::from_code(2 * i); ++ Fst_d(reg, MemOperand(sp, i * kDoubleSize)); ++ } ++ } ++ ++ // Reserve place for the return address, stack space and an optional slot ++ // (used by DirectCEntry to hold the return value if a struct is ++ // returned) and align the frame preparing for calling the runtime function. ++ DCHECK_GE(stack_space, 0); ++ Sub_d(sp, sp, Operand((stack_space + 2) * kPointerSize)); ++ if (frame_alignment > 0) { ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ And(sp, sp, Operand(-frame_alignment)); // Align stack. ++ } ++ ++ // Set the exit frame sp value to point just before the return address ++ // location. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ addi_d(scratch, sp, kPointerSize); ++ St_d(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); ++} ++ ++void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, ++ bool do_return, ++ bool argument_count_is_length) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Optionally restore all double registers. ++ if (save_doubles) { ++ // Remember: we only need to restore every 2nd double FPU value. ++ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; ++ Sub_d(t8, fp, ++ Operand(ExitFrameConstants::kFixedFrameSizeFromFp + ++ kNumOfSavedRegisters * kDoubleSize)); ++ for (int i = 0; i < kNumOfSavedRegisters; i++) { ++ FPURegister reg = FPURegister::from_code(2 * i); ++ Fld_d(reg, MemOperand(t8, i * kDoubleSize)); ++ } ++ } ++ ++ // Clear top frame. ++ li(t8, ++ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); ++ St_d(zero_reg, MemOperand(t8, 0)); ++ ++ // Restore current context from top and clear it in debug mode. ++ li(t8, ++ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ++ Ld_d(cp, MemOperand(t8, 0)); ++ ++#ifdef DEBUG ++ li(t8, ++ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ++ St_d(a3, MemOperand(t8, 0)); ++#endif ++ ++ // Pop the arguments, restore registers, and return. ++ mov(sp, fp); // Respect ABI stack constraint. ++ Ld_d(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); ++ Ld_d(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); ++ ++ if (argument_count.is_valid()) { ++ if (argument_count_is_length) { ++ add_d(sp, sp, argument_count); ++ } else { ++ Alsl_d(sp, argument_count, sp, kPointerSizeLog2, t8); ++ } ++ } ++ ++ addi_d(sp, sp, 2 * kPointerSize); ++ if (do_return) { ++ Ret(); ++ } ++} ++ ++int TurboAssembler::ActivationFrameAlignment() { ++#if V8_HOST_ARCH_LA64 ++ // Running on the real platform. Use the alignment as mandated by the local ++ // environment. ++ // Note: This will break if we ever start generating snapshots on one Mips ++ // platform for another Mips platform with a different alignment. ++ return base::OS::ActivationFrameAlignment(); ++#else // V8_HOST_ARCH_LA64 ++ // If we are using the simulator then we should always align to the expected ++ // alignment. As the simulator is used to generate snapshots we do not know ++ // if the target platform will need alignment, so this is controlled from a ++ // flag. ++ return FLAG_sim_stack_alignment; ++#endif // V8_HOST_ARCH_LA64 ++} ++ ++void MacroAssembler::AssertStackIsAligned() { ++ if (emit_debug_code()) { ++ const int frame_alignment = ActivationFrameAlignment(); ++ const int frame_alignment_mask = frame_alignment - 1; ++ ++ if (frame_alignment > kPointerSize) { ++ Label alignment_as_expected; ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ andi(scratch, sp, frame_alignment_mask); ++ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); ++ } ++ // Don't use Check here, as it will call Runtime_Abort re-entering here. ++ stop(); ++ bind(&alignment_as_expected); ++ } ++ } ++} ++ ++void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { ++ if (SmiValuesAre32Bits()) { ++ Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset()))); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ Ld_w(dst, src); ++ SmiUntag(dst); ++ } ++} ++ ++void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, ++ Register scratch) { ++ DCHECK_EQ(0, kSmiTag); ++ andi(scratch, value, kSmiTagMask); ++ Branch(smi_label, eq, scratch, Operand(zero_reg)); ++} ++ ++void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, ++ Register scratch) { ++ DCHECK_EQ(0, kSmiTag); ++ andi(scratch, value, kSmiTagMask); ++ Branch(not_smi_label, ne, scratch, Operand(zero_reg)); ++} ++ ++void MacroAssembler::AssertNotSmi(Register object) { ++ if (emit_debug_code()) { ++ STATIC_ASSERT(kSmiTag == 0); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ andi(scratch, object, kSmiTagMask); ++ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); ++ } ++} ++ ++void MacroAssembler::AssertSmi(Register object) { ++ if (emit_debug_code()) { ++ STATIC_ASSERT(kSmiTag == 0); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ andi(scratch, object, kSmiTagMask); ++ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); ++ } ++} ++ ++void MacroAssembler::AssertConstructor(Register object) { ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t8); ++ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8, ++ Operand(zero_reg)); ++ ++ LoadMap(t8, object); ++ Ld_bu(t8, FieldMemOperand(t8, Map::kBitFieldOffset)); ++ And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask)); ++ Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg)); ++ } ++} ++ ++void MacroAssembler::AssertFunction(Register object) { ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t8); ++ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8, ++ Operand(zero_reg)); ++ GetObjectType(object, t8, t8); ++ Check(eq, AbortReason::kOperandIsNotAFunction, t8, ++ Operand(JS_FUNCTION_TYPE)); ++ } ++} ++ ++void MacroAssembler::AssertBoundFunction(Register object) { ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t8); ++ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8, ++ Operand(zero_reg)); ++ GetObjectType(object, t8, t8); ++ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8, ++ Operand(JS_BOUND_FUNCTION_TYPE)); ++ } ++} ++ ++void MacroAssembler::AssertGeneratorObject(Register object) { ++ if (!emit_debug_code()) return; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t8); ++ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8, ++ Operand(zero_reg)); ++ ++ GetObjectType(object, t8, t8); ++ ++ Label done; ++ ++ // Check if JSGeneratorObject ++ Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE)); ++ ++ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType) ++ Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE)); ++ ++ // Check if JSAsyncGeneratorObject ++ Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE)); ++ ++ Abort(AbortReason::kOperandIsNotAGeneratorObject); ++ ++ bind(&done); ++} ++ ++void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, ++ Register scratch) { ++ if (emit_debug_code()) { ++ Label done_checking; ++ AssertNotSmi(object); ++ LoadRoot(scratch, RootIndex::kUndefinedValue); ++ Branch(&done_checking, eq, object, Operand(scratch)); ++ GetObjectType(object, scratch, scratch); ++ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch, ++ Operand(ALLOCATION_SITE_TYPE)); ++ bind(&done_checking); ++ } ++} ++ ++void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) { ++ if (src1 == src2) { ++ Move_s(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF32(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ fmax_s(dst, src1, src2); ++} ++ ++void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) { ++ fadd_s(dst, src1, src2); ++} ++ ++void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) { ++ if (src1 == src2) { ++ Move_s(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF32(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ fmin_s(dst, src1, src2); ++} ++ ++void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) { ++ fadd_s(dst, src1, src2); ++} ++ ++void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) { ++ if (src1 == src2) { ++ Move_d(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF64(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ fmax_d(dst, src1, src2); ++} ++ ++void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) { ++ fadd_d(dst, src1, src2); ++} ++ ++void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) { ++ if (src1 == src2) { ++ Move_d(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF64(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ fmin_d(dst, src1, src2); ++} ++ ++void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) { ++ fadd_d(dst, src1, src2); ++} ++ ++static const int kRegisterPassedArguments = 8; ++ ++int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, ++ int num_double_arguments) { ++ int stack_passed_words = 0; ++ num_reg_arguments += 2 * num_double_arguments; ++ ++ // O32: Up to four simple arguments are passed in registers a0..a3. ++ // N64: Up to eight simple arguments are passed in registers a0..a7. ++ if (num_reg_arguments > kRegisterPassedArguments) { ++ stack_passed_words += num_reg_arguments - kRegisterPassedArguments; ++ } ++ stack_passed_words += kCArgSlotCount; ++ return stack_passed_words; ++} ++ ++void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, ++ int num_double_arguments, ++ Register scratch) { ++ int frame_alignment = ActivationFrameAlignment(); ++ ++ // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots. ++ // O32: Up to four simple arguments are passed in registers a0..a3. ++ // Those four arguments must have reserved argument slots on the stack for ++ // mips, even though those argument slots are not normally used. ++ // Both ABIs: Remaining arguments are pushed on the stack, above (higher ++ // address than) the (O32) argument slots. (arg slot calculation handled by ++ // CalculateStackPassedWords()). ++ int stack_passed_arguments = ++ CalculateStackPassedWords(num_reg_arguments, num_double_arguments); ++ if (frame_alignment > kPointerSize) { ++ // Make stack end at alignment and make room for num_arguments - 4 words ++ // and the original value of sp. ++ mov(scratch, sp); ++ Sub_d(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ bstrins_d(sp, zero_reg, std::log2(frame_alignment) - 1, 0); ++ St_d(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); ++ } else { ++ Sub_d(sp, sp, Operand(stack_passed_arguments * kPointerSize)); ++ } ++} ++ ++void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, ++ Register scratch) { ++ PrepareCallCFunction(num_reg_arguments, 0, scratch); ++} ++ ++void TurboAssembler::CallCFunction(ExternalReference function, ++ int num_reg_arguments, ++ int num_double_arguments) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ li(t7, function); ++ CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments); ++} ++ ++void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, ++ int num_double_arguments) { ++ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); ++} ++ ++void TurboAssembler::CallCFunction(ExternalReference function, ++ int num_arguments) { ++ CallCFunction(function, num_arguments, 0); ++} ++ ++void TurboAssembler::CallCFunction(Register function, int num_arguments) { ++ CallCFunction(function, num_arguments, 0); ++} ++ ++void TurboAssembler::CallCFunctionHelper(Register function, ++ int num_reg_arguments, ++ int num_double_arguments) { ++ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); ++ DCHECK(has_frame()); ++ // Make sure that the stack is aligned before calling a C function unless ++ // running in the simulator. The simulator has its own alignment check which ++ // provides more information. ++ // The argument stots are presumed to have been set up by ++ // PrepareCallCFunction. The C function must be called via t9, for mips ABI. ++ ++#if V8_HOST_ARCH_LA64 ++ if (emit_debug_code()) { ++ int frame_alignment = base::OS::ActivationFrameAlignment(); ++ int frame_alignment_mask = frame_alignment - 1; ++ if (frame_alignment > kPointerSize) { ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ Label alignment_as_expected; ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ And(scratch, sp, Operand(frame_alignment_mask)); ++ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); ++ } ++ // Don't use Check here, as it will call Runtime_Abort possibly ++ // re-entering here. ++ stop(); ++ bind(&alignment_as_expected); ++ } ++ } ++#endif // V8_HOST_ARCH_LA64 ++ ++ // Just call directly. The function called cannot cause a GC, or ++ // allow preemption, so the return address in the link register ++ // stays correct. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (function != t7) { ++ mov(t7, function); ++ function = t7; ++ } ++ ++ // Save the frame pointer and PC so that the stack layout remains iterable, ++ // even without an ExitFrame which normally exists between JS and C frames. ++ // 't' registers are caller-saved so this is safe as a scratch register. ++ Register pc_scratch = t1; ++ Register scratch = t2; ++ DCHECK(!AreAliased(pc_scratch, scratch, function)); ++ ++ pcaddi(pc_scratch, 1); ++ ++ // See x64 code for reasoning about how to address the isolate data fields. ++ if (root_array_available()) { ++ St_d(pc_scratch, MemOperand(kRootRegister, ++ IsolateData::fast_c_call_caller_pc_offset())); ++ St_d(fp, MemOperand(kRootRegister, ++ IsolateData::fast_c_call_caller_fp_offset())); ++ } else { ++ DCHECK_NOT_NULL(isolate()); ++ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); ++ St_d(pc_scratch, MemOperand(scratch, 0)); ++ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); ++ St_d(fp, MemOperand(scratch, 0)); ++ } ++ ++ Call(function); ++ ++ // We don't unset the PC; the FP is the source of truth. ++ if (root_array_available()) { ++ St_d(zero_reg, MemOperand(kRootRegister, ++ IsolateData::fast_c_call_caller_fp_offset())); ++ } else { ++ DCHECK_NOT_NULL(isolate()); ++ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); ++ St_d(zero_reg, MemOperand(scratch, 0)); ++ } ++ } ++ ++ int stack_passed_arguments = ++ CalculateStackPassedWords(num_reg_arguments, num_double_arguments); ++ ++ if (base::OS::ActivationFrameAlignment() > kPointerSize) { ++ Ld_d(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); ++ } else { ++ Add_d(sp, sp, Operand(stack_passed_arguments * kPointerSize)); ++ } ++} ++ ++#undef BRANCH_ARGS_CHECK ++ ++void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, ++ Condition cc, Label* condition_met) { ++ And(scratch, object, Operand(~kPageAlignmentMask)); ++ Ld_d(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); ++ And(scratch, scratch, Operand(mask)); ++ Branch(condition_met, cc, scratch, Operand(zero_reg)); ++} ++ ++Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, ++ Register reg4, Register reg5, ++ Register reg6) { ++ RegList regs = 0; ++ if (reg1.is_valid()) regs |= reg1.bit(); ++ if (reg2.is_valid()) regs |= reg2.bit(); ++ if (reg3.is_valid()) regs |= reg3.bit(); ++ if (reg4.is_valid()) regs |= reg4.bit(); ++ if (reg5.is_valid()) regs |= reg5.bit(); ++ if (reg6.is_valid()) regs |= reg6.bit(); ++ ++ const RegisterConfiguration* config = RegisterConfiguration::Default(); ++ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { ++ int code = config->GetAllocatableGeneralCode(i); ++ Register candidate = Register::from_code(code); ++ if (regs & candidate.bit()) continue; ++ return candidate; ++ } ++ UNREACHABLE(); ++} ++ ++void TurboAssembler::ComputeCodeStartAddress(Register dst) { ++ // TODO: range check, add Pcadd macro function? ++ pcaddi(dst, -pc_offset() >> 2); ++} ++ ++void TurboAssembler::ResetSpeculationPoisonRegister() { ++ li(kSpeculationPoisonRegister, -1); ++} ++ ++void TurboAssembler::CallForDeoptimization(Address target, int deopt_id, ++ Label* exit, DeoptimizeKind kind) { ++ USE(exit, kind); ++ NoRootArrayScope no_root_array(this); ++ ++ // Save the deopt id in kRootRegister (we don't need the roots array from now ++ // on). ++ DCHECK_LE(deopt_id, 0xFFFF); ++ li(kRootRegister, deopt_id); ++ Call(target, RelocInfo::RUNTIME_ENTRY); ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/macro-assembler-la64.h b/src/3rdparty/chromium/v8/src/codegen/la64/macro-assembler-la64.h +new file mode 100644 +index 0000000000..64116977c0 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/macro-assembler-la64.h +@@ -0,0 +1,1084 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H ++#error This header must be included via macro-assembler.h ++#endif ++ ++#ifndef V8_CODEGEN_LA64_MACRO_ASSEMBLER_LA64_H_ ++#define V8_CODEGEN_LA64_MACRO_ASSEMBLER_LA64_H_ ++ ++#include "src/codegen/assembler.h" ++#include "src/codegen/la64/assembler-la64.h" ++#include "src/common/globals.h" ++ ++namespace v8 { ++namespace internal { ++ ++// Forward declarations. ++enum class AbortReason : uint8_t; ++ ++// Reserved Register Usage Summary. ++// ++// Registers t8 and t7 are reserved for use by the MacroAssembler. ++// ++// The programmer should know that the MacroAssembler may clobber these two, ++// but won't touch other registers except in special cases. ++// ++// Per the MIPS ABI, register t0 -- t8 must be used for indirect function call ++// via 'jirl t[0-8]' instructions. gcc? ++ ++// Flags used for LeaveExitFrame function. ++enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false }; ++ ++// Flags used for the li macro-assembler function. ++enum LiFlags { ++ // If the constant value can be represented in just 12 bits, then ++ // optimize the li to use a single instruction, rather than lu12i_w/lu32i_d/ ++ // lu52i_d/ori sequence. A number of other optimizations that emits less than ++ // maximum number of instructions exists. ++ OPTIMIZE_SIZE = 0, ++ // Always use 4 instructions (lu12i_w/ori/lu32i_d/lu52i_d sequence), ++ // even if the constant could be loaded with just one, so that this value is ++ // patchable later. ++ CONSTANT_SIZE = 1, ++ // For address loads only 3 instruction are required. Used to mark ++ // constant load that will be used as address without relocation ++ // information. It ensures predictable code size, so specific sites ++ // in code are patchable. ++ ADDRESS_LOAD = 2 ++}; ++ ++enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; ++enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; ++enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; ++ ++Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, ++ Register reg3 = no_reg, ++ Register reg4 = no_reg, ++ Register reg5 = no_reg, ++ Register reg6 = no_reg); ++ ++// ----------------------------------------------------------------------------- ++// Static helper functions. ++ ++#define SmiWordOffset(offset) (offset + kPointerSize / 2) ++ ++// Generate a MemOperand for loading a field from an object. ++inline MemOperand FieldMemOperand(Register object, int offset) { ++ return MemOperand(object, offset - kHeapObjectTag); ++} ++ ++class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ++ public: ++ using TurboAssemblerBase::TurboAssemblerBase; ++ ++ // Activation support. ++ void EnterFrame(StackFrame::Type type); ++ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { ++ // Out-of-line constant pool not implemented on la64. ++ UNREACHABLE(); ++ } ++ void LeaveFrame(StackFrame::Type type); ++ ++ // Generates function and stub prologue code. ++ void StubPrologue(StackFrame::Type type); ++ void Prologue(); ++ ++ void InitializeRootRegister() { ++ ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); ++ li(kRootRegister, Operand(isolate_root)); ++ } ++ ++ // Jump unconditionally to given label. ++ // Use rather b(Label) for code generation. ++ void jmp(Label* L) { Branch(L); } ++ ++ // ------------------------------------------------------------------------- ++ // Debugging. ++ ++ void Trap() override; ++ void DebugBreak() override; ++ ++ // Calls Abort(msg) if the condition cc is not satisfied. ++ // Use --debug_code to enable. ++ void Assert(Condition cc, AbortReason reason, Register rj, Operand rk); ++ ++ // Like Assert(), but always enabled. ++ void Check(Condition cc, AbortReason reason, Register rj, Operand rk); ++ ++ // Print a message to stdout and abort execution. ++ void Abort(AbortReason msg); ++ ++ void Branch(Label* label, bool need_link = false); ++ void Branch(Label* label, Condition cond, Register r1, const Operand& r2, ++ bool need_link = false); ++ void BranchShort(Label* label, Condition cond, Register r1, const Operand& r2, ++ bool need_link = false); ++ void Branch(Label* L, Condition cond, Register rj, RootIndex index); ++ ++ // Floating point branches ++ void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, ++ CFRegister cd = FCC0) { ++ CompareF(cmp1, cmp2, cc, cd, true); ++ } ++ ++ void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2, ++ CFRegister cd = FCC0) { ++ CompareIsNanF(cmp1, cmp2, cd, true); ++ } ++ ++ void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, ++ CFRegister cd = FCC0) { ++ CompareF(cmp1, cmp2, cc, cd, false); ++ } ++ ++ void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2, ++ CFRegister cd = FCC0) { ++ CompareIsNanF(cmp1, cmp2, cd, false); ++ } ++ ++ void BranchTrueShortF(Label* target, CFRegister cc = FCC0); ++ void BranchFalseShortF(Label* target, CFRegister cc = FCC0); ++ ++ void BranchTrueF(Label* target, CFRegister cc = FCC0); ++ void BranchFalseF(Label* target, CFRegister cc = FCC0); ++ ++ static int InstrCountForLi64Bit(int64_t value); ++ inline void LiLower32BitHelper(Register rd, Operand j); ++ void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); ++ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); ++ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) { ++ li(rd, Operand(j), mode); ++ } ++ inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) { ++ li(rd, Operand(static_cast(j)), mode); ++ } ++ void li(Register dst, Handle value, LiFlags mode = OPTIMIZE_SIZE); ++ void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE); ++ void li(Register dst, const StringConstantBase* string, ++ LiFlags mode = OPTIMIZE_SIZE); ++ ++ void LoadFromConstantsTable(Register destination, ++ int constant_index) override; ++ void LoadRootRegisterOffset(Register destination, intptr_t offset) override; ++ void LoadRootRelative(Register destination, int32_t offset) override; ++ ++// Jump, Call, and Ret pseudo instructions implementing inter-working. ++#define COND_ARGS \ ++ Condition cond = al, Register rj = zero_reg, \ ++ const Operand &rk = Operand(zero_reg) ++ ++ void Jump(Register target, COND_ARGS); ++ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); ++ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); ++ // Deffer from li, this method save target to the memory, and then load ++ // it to register use ld_d, it can be used in wasm jump table for concurrent ++ // patching. ++ void PatchAndJump(Address target); ++ void Jump(Handle code, RelocInfo::Mode rmode, COND_ARGS); ++ void Jump(const ExternalReference& reference) override; ++ void Call(Register target, COND_ARGS); ++ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); ++ void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, ++ COND_ARGS); ++ void Call(Label* target); ++ void LoadAddress(Register dst, Label* target); ++ ++ // Load the builtin given by the Smi in |builtin_index| into the same ++ // register. ++ void LoadEntryFromBuiltinIndex(Register builtin_index); ++ void CallBuiltinByIndex(Register builtin_index) override; ++ ++ void LoadCodeObjectEntry(Register destination, ++ Register code_object) override { ++ // TODO(mips): Implement. ++ UNIMPLEMENTED(); ++ } ++ void CallCodeObject(Register code_object) override { ++ // TODO(mips): Implement. ++ UNIMPLEMENTED(); ++ } ++ void JumpCodeObject(Register code_object) override { ++ // TODO(mips): Implement. ++ UNIMPLEMENTED(); ++ } ++ ++ // Generates an instruction sequence s.t. the return address points to the ++ // instruction following the call. ++ // The return address on the stack is used by frame iteration. ++ void StoreReturnAddressAndCall(Register target); ++ ++ void CallForDeoptimization(Address target, int deopt_id, Label* exit, ++ DeoptimizeKind kind); ++ ++ void Ret(COND_ARGS); ++ ++ // Emit code to discard a non-negative number of pointer-sized elements ++ // from the stack, clobbering only the sp register. ++ void Drop(int count, Condition cond = cc_always, Register reg = no_reg, ++ const Operand& op = Operand(no_reg)); ++ ++ // Trivial case of DropAndRet that utilizes the delay slot and only emits ++ // 2 instructions. ++ void DropAndRet(int drop); ++ ++ void DropAndRet(int drop, Condition cond, Register reg, const Operand& op); ++ ++ void Ld_d(Register rd, const MemOperand& rj); ++ void St_d(Register rd, const MemOperand& rj); ++ ++ void push(Register src) { ++ Add_d(sp, sp, Operand(-kPointerSize)); ++ St_d(src, MemOperand(sp, 0)); ++ } ++ void Push(Register src) { push(src); } ++ void Push(Handle handle); ++ void Push(Smi smi); ++ ++ // Push two registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2) { ++ Sub_d(sp, sp, Operand(2 * kPointerSize)); ++ St_d(src1, MemOperand(sp, 1 * kPointerSize)); ++ St_d(src2, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ // Push three registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2, Register src3) { ++ Sub_d(sp, sp, Operand(3 * kPointerSize)); ++ St_d(src1, MemOperand(sp, 2 * kPointerSize)); ++ St_d(src2, MemOperand(sp, 1 * kPointerSize)); ++ St_d(src3, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ // Push four registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2, Register src3, Register src4) { ++ Sub_d(sp, sp, Operand(4 * kPointerSize)); ++ St_d(src1, MemOperand(sp, 3 * kPointerSize)); ++ St_d(src2, MemOperand(sp, 2 * kPointerSize)); ++ St_d(src3, MemOperand(sp, 1 * kPointerSize)); ++ St_d(src4, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ // Push five registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2, Register src3, Register src4, ++ Register src5) { ++ Sub_d(sp, sp, Operand(5 * kPointerSize)); ++ St_d(src1, MemOperand(sp, 4 * kPointerSize)); ++ St_d(src2, MemOperand(sp, 3 * kPointerSize)); ++ St_d(src3, MemOperand(sp, 2 * kPointerSize)); ++ St_d(src4, MemOperand(sp, 1 * kPointerSize)); ++ St_d(src5, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ void Push(Register src, Condition cond, Register tst1, Register tst2) { ++ // Since we don't have conditional execution we use a Branch. ++ Label skip; ++ Branch(&skip, cond, tst1, Operand(tst2)); ++ addi_d(sp, sp, -kPointerSize); ++ st_d(src, sp, 0); ++ bind(&skip); ++ } ++ ++ void SaveRegisters(RegList registers); ++ void RestoreRegisters(RegList registers); ++ ++ void CallRecordWriteStub(Register object, Register address, ++ RememberedSetAction remembered_set_action, ++ SaveFPRegsMode fp_mode); ++ void CallRecordWriteStub(Register object, Register address, ++ RememberedSetAction remembered_set_action, ++ SaveFPRegsMode fp_mode, Address wasm_target); ++ void CallEphemeronKeyBarrier(Register object, Register address, ++ SaveFPRegsMode fp_mode); ++ ++ // Push multiple registers on the stack. ++ // Registers are saved in numerical order, with higher numbered registers ++ // saved in higher memory addresses. ++ void MultiPush(RegList regs); ++ void MultiPush(RegList regs1, RegList regs2); ++ void MultiPush(RegList regs1, RegList regs2, RegList regs3); ++ void MultiPushFPU(RegList regs); ++ ++ // Calculate how much stack space (in bytes) are required to store caller ++ // registers excluding those specified in the arguments. ++ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, ++ Register exclusion1 = no_reg, ++ Register exclusion2 = no_reg, ++ Register exclusion3 = no_reg) const; ++ ++ // Push caller saved registers on the stack, and return the number of bytes ++ // stack pointer is adjusted. ++ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, ++ Register exclusion2 = no_reg, ++ Register exclusion3 = no_reg); ++ // Restore caller saved registers from the stack, and return the number of ++ // bytes stack pointer is adjusted. ++ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, ++ Register exclusion2 = no_reg, ++ Register exclusion3 = no_reg); ++ ++ void pop(Register dst) { ++ Ld_d(dst, MemOperand(sp, 0)); ++ Add_d(sp, sp, Operand(kPointerSize)); ++ } ++ void Pop(Register dst) { pop(dst); } ++ ++ // Pop two registers. Pops rightmost register first (from lower address). ++ void Pop(Register src1, Register src2) { ++ DCHECK(src1 != src2); ++ Ld_d(src2, MemOperand(sp, 0 * kPointerSize)); ++ Ld_d(src1, MemOperand(sp, 1 * kPointerSize)); ++ Add_d(sp, sp, 2 * kPointerSize); ++ } ++ ++ // Pop three registers. Pops rightmost register first (from lower address). ++ void Pop(Register src1, Register src2, Register src3) { ++ Ld_d(src3, MemOperand(sp, 0 * kPointerSize)); ++ Ld_d(src2, MemOperand(sp, 1 * kPointerSize)); ++ Ld_d(src1, MemOperand(sp, 2 * kPointerSize)); ++ Add_d(sp, sp, 3 * kPointerSize); ++ } ++ ++ void Pop(uint32_t count = 1) { Add_d(sp, sp, Operand(count * kPointerSize)); } ++ ++ // Pops multiple values from the stack and load them in the ++ // registers specified in regs. Pop order is the opposite as in MultiPush. ++ void MultiPop(RegList regs); ++ void MultiPop(RegList regs1, RegList regs2); ++ void MultiPop(RegList regs1, RegList regs2, RegList regs3); ++ ++ void MultiPopFPU(RegList regs); ++ ++#define DEFINE_INSTRUCTION(instr) \ ++ void instr(Register rd, Register rj, const Operand& rk); \ ++ void instr(Register rd, Register rj, Register rk) { \ ++ instr(rd, rj, Operand(rk)); \ ++ } \ ++ void instr(Register rj, Register rk, int32_t j) { instr(rj, rk, Operand(j)); } ++ ++#define DEFINE_INSTRUCTION2(instr) \ ++ void instr(Register rj, const Operand& rk); \ ++ void instr(Register rj, Register rk) { instr(rj, Operand(rk)); } \ ++ void instr(Register rj, int32_t j) { instr(rj, Operand(j)); } ++ ++ DEFINE_INSTRUCTION(Add_w) ++ DEFINE_INSTRUCTION(Add_d) ++ DEFINE_INSTRUCTION(Div_w) ++ DEFINE_INSTRUCTION(Div_wu) ++ DEFINE_INSTRUCTION(Div_du) ++ DEFINE_INSTRUCTION(Mod_w) ++ DEFINE_INSTRUCTION(Mod_wu) ++ DEFINE_INSTRUCTION(Div_d) ++ DEFINE_INSTRUCTION(Sub_w) ++ DEFINE_INSTRUCTION(Sub_d) ++ DEFINE_INSTRUCTION(Mod_d) ++ DEFINE_INSTRUCTION(Mod_du) ++ DEFINE_INSTRUCTION(Mul_w) ++ DEFINE_INSTRUCTION(Mulh_w) ++ DEFINE_INSTRUCTION(Mulh_wu) ++ DEFINE_INSTRUCTION(Mul_d) ++ DEFINE_INSTRUCTION(Mulh_d) ++ DEFINE_INSTRUCTION2(Div_w) ++ DEFINE_INSTRUCTION2(Div_d) ++ DEFINE_INSTRUCTION2(Div_wu) ++ DEFINE_INSTRUCTION2(Div_du) ++ ++ DEFINE_INSTRUCTION(And) ++ DEFINE_INSTRUCTION(Or) ++ DEFINE_INSTRUCTION(Xor) ++ DEFINE_INSTRUCTION(Nor) ++ DEFINE_INSTRUCTION2(Neg) ++ DEFINE_INSTRUCTION(Andn) ++ DEFINE_INSTRUCTION(Orn) ++ ++ DEFINE_INSTRUCTION(Slt) ++ DEFINE_INSTRUCTION(Sltu) ++ DEFINE_INSTRUCTION(Slti) ++ DEFINE_INSTRUCTION(Sltiu) ++ DEFINE_INSTRUCTION(Sle) ++ DEFINE_INSTRUCTION(Sleu) ++ DEFINE_INSTRUCTION(Sgt) ++ DEFINE_INSTRUCTION(Sgtu) ++ DEFINE_INSTRUCTION(Sge) ++ DEFINE_INSTRUCTION(Sgeu) ++ ++ DEFINE_INSTRUCTION(Rotr_w) ++ DEFINE_INSTRUCTION(Rotr_d) ++ ++#undef DEFINE_INSTRUCTION ++#undef DEFINE_INSTRUCTION2 ++#undef DEFINE_INSTRUCTION3 ++ ++ void SmiUntag(Register dst, const MemOperand& src); ++ void SmiUntag(Register dst, Register src) { ++ if (SmiValuesAre32Bits()) { ++ srai_d(dst, src, kSmiShift); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ srai_w(dst, src, kSmiShift); ++ } ++ } ++ ++ void SmiUntag(Register reg) { SmiUntag(reg, reg); } ++ ++ // Removes current frame and its arguments from the stack preserving ++ // the arguments and a return address pushed to the stack for the next call. ++ // Both |callee_args_count| and |caller_args_count| do not include ++ // receiver. |callee_args_count| is not modified. |caller_args_count| ++ // is trashed. ++ void PrepareForTailCall(Register callee_args_count, ++ Register caller_args_count, Register scratch0, ++ Register scratch1); ++ ++ int CalculateStackPassedWords(int num_reg_arguments, ++ int num_double_arguments); ++ ++ // Before calling a C-function from generated code, align arguments on stack ++ // and add space for the four mips argument slots. ++ // After aligning the frame, non-register arguments must be stored on the ++ // stack, after the argument-slots using helper: CFunctionArgumentOperand(). ++ // The argument count assumes all arguments are word sized. ++ // Some compilers/platforms require the stack to be aligned when calling ++ // C++ code. ++ // Needs a scratch register to do some arithmetic. This register will be ++ // trashed. ++ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, ++ Register scratch); ++ void PrepareCallCFunction(int num_reg_arguments, Register scratch); ++ ++ // Calls a C function and cleans up the space for arguments allocated ++ // by PrepareCallCFunction. The called function is not allowed to trigger a ++ // garbage collection, since that might move the code and invalidate the ++ // return address (unless this is somehow accounted for by the called ++ // function). ++ void CallCFunction(ExternalReference function, int num_arguments); ++ void CallCFunction(Register function, int num_arguments); ++ void CallCFunction(ExternalReference function, int num_reg_arguments, ++ int num_double_arguments); ++ void CallCFunction(Register function, int num_reg_arguments, ++ int num_double_arguments); ++ void MovFromFloatResult(DoubleRegister dst); ++ void MovFromFloatParameter(DoubleRegister dst); ++ ++ // There are two ways of passing double arguments on MIPS, depending on ++ // whether soft or hard floating point ABI is used. These functions ++ // abstract parameter passing for the three different ways we call ++ // C functions from generated code. ++ void MovToFloatParameter(DoubleRegister src); ++ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); ++ void MovToFloatResult(DoubleRegister src); ++ ++ // See comments at the beginning of Builtins::Generate_CEntry. ++ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); } ++ inline void PrepareCEntryFunction(const ExternalReference& ref) { ++ li(a1, ref); ++ } ++ ++ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, ++ Label* condition_met); ++#undef COND_ARGS ++ ++ // Performs a truncating conversion of a floating point number as used by ++ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. ++ // Exits with 'result' holding the answer. ++ void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, ++ DoubleRegister double_input, StubCallMode stub_mode); ++ ++ // Conditional move. ++ void Movz(Register rd, Register rj, Register rk); ++ void Movn(Register rd, Register rj, Register rk); ++ ++ void LoadZeroIfFPUCondition(Register dest, CFRegister = FCC0); ++ void LoadZeroIfNotFPUCondition(Register dest, CFRegister = FCC0); ++ ++ void LoadZeroIfConditionNotZero(Register dest, Register condition); ++ void LoadZeroIfConditionZero(Register dest, Register condition); ++ void LoadZeroOnCondition(Register rd, Register rj, const Operand& rk, ++ Condition cond); ++ ++ void Clz_w(Register rd, Register rj); ++ void Clz_d(Register rd, Register rj); ++ void Ctz_w(Register rd, Register rj); ++ void Ctz_d(Register rd, Register rj); ++ void Popcnt_w(Register rd, Register rj); ++ void Popcnt_d(Register rd, Register rj); ++ ++ void ExtractBits(Register dest, Register source, Register pos, int size, ++ bool sign_extend = false); ++ void InsertBits(Register dest, Register source, Register pos, int size); ++ ++ void Bstrins_w(Register rk, Register rj, uint16_t msbw, uint16_t lswb); ++ void Bstrins_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw); ++ void Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw); ++ void Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw); ++ void Neg_s(FPURegister fd, FPURegister fj); ++ void Neg_d(FPURegister fd, FPURegister fk); ++ ++ // Convert single to unsigned word. ++ void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch); ++ void Trunc_uw_s(Register rd, FPURegister fj, FPURegister scratch); ++ ++ // Change endianness ++ void ByteSwapSigned(Register dest, Register src, int operand_size); ++ void ByteSwapUnsigned(Register dest, Register src, int operand_size); ++ ++ void Ld_b(Register rd, const MemOperand& rj); ++ void Ld_bu(Register rd, const MemOperand& rj); ++ void St_b(Register rd, const MemOperand& rj); ++ ++ void Ld_h(Register rd, const MemOperand& rj); ++ void Ld_hu(Register rd, const MemOperand& rj); ++ void St_h(Register rd, const MemOperand& rj); ++ ++ void Ld_w(Register rd, const MemOperand& rj); ++ void Ld_wu(Register rd, const MemOperand& rj); ++ void St_w(Register rd, const MemOperand& rj); ++ ++ void Fld_s(FPURegister fd, const MemOperand& src); ++ void Fst_s(FPURegister fj, const MemOperand& dst); ++ ++ void Fld_d(FPURegister fd, const MemOperand& src); ++ void Fst_d(FPURegister fj, const MemOperand& dst); ++ ++ void Ll_w(Register rd, const MemOperand& rj); ++ void Sc_w(Register rd, const MemOperand& rj); ++ ++ void Ll_d(Register rd, const MemOperand& rj); ++ void Sc_d(Register rd, const MemOperand& rj); ++ ++ // These functions assume (and assert) that src1!=src2. It is permitted ++ // for the result to alias either input register. ++ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ ++ // Generate out-of-line cases for the macros above. ++ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ ++ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; } ++ ++ void mov(Register rd, Register rj) { or_(rd, rj, zero_reg); } ++ ++ inline void Move(Register dst, Handle handle) { li(dst, handle); } ++ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); } ++ ++ inline void Move(Register dst, Register src) { ++ if (dst != src) { ++ mov(dst, src); ++ } ++ } ++ ++ inline void FmoveLow(Register dst_low, FPURegister src) { ++ movfr2gr_s(dst_low, src); ++ } ++ ++ void FmoveLow(FPURegister dst, Register src_low); ++ ++ inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); } ++ ++ inline void Move_d(FPURegister dst, FPURegister src) { ++ if (dst != src) { ++ fmov_d(dst, src); ++ } ++ } ++ ++ inline void Move_s(FPURegister dst, FPURegister src) { ++ if (dst != src) { ++ fmov_s(dst, src); ++ } ++ } ++ ++ void Move(FPURegister dst, float imm) { Move(dst, bit_cast(imm)); } ++ void Move(FPURegister dst, double imm) { Move(dst, bit_cast(imm)); } ++ void Move(FPURegister dst, uint32_t src); ++ void Move(FPURegister dst, uint64_t src); ++ ++ // AdddOverflow sets overflow register to a negative value if ++ // overflow occured, otherwise it is zero or positive ++ void AdddOverflow(Register dst, Register left, const Operand& right, ++ Register overflow); ++ // SubdOverflow sets overflow register to a negative value if ++ // overflow occured, otherwise it is zero or positive ++ void SubdOverflow(Register dst, Register left, const Operand& right, ++ Register overflow); ++ // MulOverflow sets overflow register to zero if no overflow occured ++ void MulOverflow(Register dst, Register left, const Operand& right, ++ Register overflow); ++ ++ // Number of instructions needed for calculation of switch table entry address ++ static const int kSwitchTablePrologueSize = 5; ++ ++ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a ++ // functor/function with 'Label *func(size_t index)' declaration. ++ template ++ void GenerateSwitchTable(Register index, size_t case_count, ++ Func GetLabelFunction); ++ ++ // Load an object from the root table. ++ void LoadRoot(Register destination, RootIndex index) override; ++ void LoadRoot(Register destination, RootIndex index, Condition cond, ++ Register src1, const Operand& src2); ++ ++ // If the value is a NaN, canonicalize the value, src must be nan. ++ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); ++ ++ // --------------------------------------------------------------------------- ++ // FPU macros. These do not handle special cases like NaN or +- inf. ++ ++ // Convert unsigned word to double. ++ void Ffint_d_uw(FPURegister fd, FPURegister fj); ++ void Ffint_d_uw(FPURegister fd, Register rj); ++ ++ // Convert unsigned long to double. ++ void Ffint_d_ul(FPURegister fd, FPURegister fj); ++ void Ffint_d_ul(FPURegister fd, Register rj); ++ ++ // Convert unsigned word to float. ++ void Ffint_s_uw(FPURegister fd, FPURegister fj); ++ void Ffint_s_uw(FPURegister fd, Register rj); ++ ++ // Convert unsigned long to float. ++ void Ffint_s_ul(FPURegister fd, FPURegister fj); ++ void Ffint_s_ul(FPURegister fd, Register rj); ++ ++ // Convert double to unsigned word. ++ void Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch); ++ void Ftintrz_uw_d(Register rd, FPURegister fj, FPURegister scratch); ++ ++ // Convert single to unsigned word. ++ void Ftintrz_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch); ++ void Ftintrz_uw_s(Register rd, FPURegister fs, FPURegister scratch); ++ ++ // Convert double to unsigned long. ++ void Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch, ++ Register result = no_reg); ++ void Ftintrz_ul_d(Register rd, FPURegister fj, FPURegister scratch, ++ Register result = no_reg); ++ ++ // Convert single to unsigned long. ++ void Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch, ++ Register result = no_reg); ++ void Ftintrz_ul_s(Register rd, FPURegister fj, FPURegister scratch, ++ Register result = no_reg); ++ ++ // Round double functions ++ void Trunc_d(FPURegister fd, FPURegister fj); ++ void Round_d(FPURegister fd, FPURegister fj); ++ void Floor_d(FPURegister fd, FPURegister fj); ++ void Ceil_d(FPURegister fd, FPURegister fj); ++ ++ // Round float functions ++ void Trunc_s(FPURegister fd, FPURegister fj); ++ void Round_s(FPURegister fd, FPURegister fj); ++ void Floor_s(FPURegister fd, FPURegister fj); ++ void Ceil_s(FPURegister fd, FPURegister fj); ++ ++ // Jump the register contains a smi. ++ void JumpIfSmi(Register value, Label* smi_label, Register scratch = t7); ++ ++ void JumpIfEqual(Register a, int32_t b, Label* dest) { ++ li(kScratchReg, Operand(b)); ++ Branch(dest, eq, a, Operand(kScratchReg)); ++ } ++ ++ void JumpIfLessThan(Register a, int32_t b, Label* dest) { ++ li(kScratchReg, Operand(b)); ++ Branch(dest, lt, a, Operand(kScratchReg)); ++ } ++ ++ // Push a standard frame, consisting of ra, fp, context and JS function. ++ void PushStandardFrame(Register function_reg); ++ ++ // Get the actual activation frame alignment for target environment. ++ static int ActivationFrameAlignment(); ++ ++ // Load Scaled Address instructions. Parameter sa (shift argument) must be ++ // between [1, 31] (inclusive). The scratch register may be clobbered. ++ void Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, ++ Register scratch = t7); ++ void Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, ++ Register scratch = t7); ++ ++ // Compute the start of the generated instruction stream from the current PC. ++ // This is an alternative to embedding the {CodeObject} handle as a reference. ++ void ComputeCodeStartAddress(Register dst); ++ ++ void ResetSpeculationPoisonRegister(); ++ ++ // Control-flow integrity: ++ ++ // Define a function entrypoint. This doesn't emit any code for this ++ // architecture, as control-flow integrity is not supported for it. ++ void CodeEntry() {} ++ // Define an exception handler. ++ void ExceptionHandler() {} ++ // Define an exception handler and bind a label. ++ void BindExceptionHandler(Label* label) { bind(label); } ++ ++ protected: ++ inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch); ++ inline int32_t GetOffset(Label* L, OffsetSize bits); ++ ++ private: ++ bool has_double_zero_reg_set_ = false; ++ ++ // Performs a truncating conversion of a floating point number as used by ++ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it ++ // succeeds, otherwise falls through if result is saturated. On return ++ // 'result' either holds answer, or is clobbered on fall through. ++ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, ++ Label* done); ++ ++ bool BranchShortOrFallback(Label* L, Condition cond, Register rj, ++ const Operand& rk, bool need_link); ++ ++ // f32 or f64 ++ void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, ++ CFRegister cd, bool f32 = true); ++ ++ void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, ++ bool f32 = true); ++ ++ void CallCFunctionHelper(Register function, int num_reg_arguments, ++ int num_double_arguments); ++ ++ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode); ++ ++ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode); ++ ++ // Push a fixed frame, consisting of ra, fp. ++ void PushCommonFrame(Register marker_reg = no_reg); ++ ++ void CallRecordWriteStub(Register object, Register address, ++ RememberedSetAction remembered_set_action, ++ SaveFPRegsMode fp_mode, Handle code_target, ++ Address wasm_target); ++}; ++ ++// MacroAssembler implements a collection of frequently used macros. ++class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { ++ public: ++ using TurboAssembler::TurboAssembler; ++ ++ bool IsNear(Label* L, Condition cond, int rs_reg); ++ ++ // Swap two registers. If the scratch register is omitted then a slightly ++ // less efficient form using xor instead of mov is emitted. ++ void Swap(Register reg1, Register reg2, Register scratch = no_reg); ++ ++ void PushRoot(RootIndex index) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Push(scratch); ++ } ++ ++ // Compare the object in a register to a value and jump if they are equal. ++ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Branch(if_equal, eq, with, Operand(scratch)); ++ } ++ ++ // Compare the object in a register to a value and jump if they are not equal. ++ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Branch(if_not_equal, ne, with, Operand(scratch)); ++ } ++ ++ // Checks if value is in range [lower_limit, higher_limit] using a single ++ // comparison. ++ void JumpIfIsInRange(Register value, unsigned lower_limit, ++ unsigned higher_limit, Label* on_in_range); ++ ++ // --------------------------------------------------------------------------- ++ // GC Support ++ ++ // Notify the garbage collector that we wrote a pointer into an object. ++ // |object| is the object being stored into, |value| is the object being ++ // stored. value and scratch registers are clobbered by the operation. ++ // The offset is the offset from the start of the object, not the offset from ++ // the tagged HeapObject pointer. For use with FieldOperand(reg, off). ++ void RecordWriteField( ++ Register object, int offset, Register value, Register scratch, ++ RAStatus ra_status, SaveFPRegsMode save_fp, ++ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, ++ SmiCheck smi_check = INLINE_SMI_CHECK); ++ ++ // For a given |object| notify the garbage collector that the slot |address| ++ // has been written. |value| is the object being stored. The value and ++ // address registers are clobbered by the operation. ++ void RecordWrite( ++ Register object, Register address, Register value, RAStatus ra_status, ++ SaveFPRegsMode save_fp, ++ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, ++ SmiCheck smi_check = INLINE_SMI_CHECK); ++ ++ void Pref(int32_t hint, const MemOperand& rs); ++ ++ // --------------------------------------------------------------------------- ++ // Pseudo-instructions. ++ ++ void LoadWordPair(Register rd, const MemOperand& rj, Register scratch); ++ void StoreWordPair(Register rd, const MemOperand& rj, Register scratch); ++ ++ // Convert double to unsigned long. ++ void Ftintrz_l_ud(FPURegister fd, FPURegister fj, FPURegister scratch); ++ ++ void Ftintrz_l_d(FPURegister fd, FPURegister fj); ++ void Ftintrne_l_d(FPURegister fd, FPURegister fj); ++ void Ftintrm_l_d(FPURegister fd, FPURegister fj); ++ void Ftintrp_l_d(FPURegister fd, FPURegister fj); ++ ++ void Ftintrz_w_d(FPURegister fd, FPURegister fj); ++ void Ftintrne_w_d(FPURegister fd, FPURegister fj); ++ void Ftintrm_w_d(FPURegister fd, FPURegister fj); ++ void Ftintrp_w_d(FPURegister fd, FPURegister fj); ++ ++ void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); ++ void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); ++ void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); ++ void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); ++ ++ // Truncates a double using a specific rounding mode, and writes the value ++ // to the result register. ++ // The except_flag will contain any exceptions caused by the instruction. ++ // If check_inexact is kDontCheckForInexactConversion, then the inexact ++ // exception is masked. ++ void EmitFPUTruncate( ++ FPURoundingMode rounding_mode, Register result, ++ DoubleRegister double_input, Register scratch, ++ DoubleRegister double_scratch, Register except_flag, ++ CheckForInexactConversion check_inexact = kDontCheckForInexactConversion); ++ ++ // Enter exit frame. ++ // argc - argument count to be dropped by LeaveExitFrame. ++ // save_doubles - saves FPU registers on stack, currently disabled. ++ // stack_space - extra stack space. ++ void EnterExitFrame(bool save_doubles, int stack_space = 0, ++ StackFrame::Type frame_type = StackFrame::EXIT); ++ ++ // Leave the current exit frame. ++ void LeaveExitFrame(bool save_doubles, Register arg_count, ++ bool do_return = NO_EMIT_RETURN, ++ bool argument_count_is_length = false); ++ ++ void LoadMap(Register destination, Register object); ++ ++ // Make sure the stack is aligned. Only emits code in debug mode. ++ void AssertStackIsAligned(); ++ ++ // Load the global proxy from the current context. ++ void LoadGlobalProxy(Register dst) { ++ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); ++ } ++ ++ void LoadNativeContextSlot(int index, Register dst); ++ ++ // Load the initial map from the global function. The registers ++ // function and map can be the same, function is then overwritten. ++ void LoadGlobalFunctionInitialMap(Register function, Register map, ++ Register scratch); ++ ++ // ------------------------------------------------------------------------- ++ // JavaScript invokes. ++ ++ // Invoke the JavaScript function code by either calling or jumping. ++ void InvokeFunctionCode(Register function, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count, InvokeFlag flag); ++ ++ // On function call, call into the debugger if necessary. ++ void CheckDebugHook(Register fun, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count); ++ ++ // Invoke the JavaScript function in the given register. Changes the ++ // current context to the context in the function before invoking. ++ void InvokeFunctionWithNewTarget(Register function, Register new_target, ++ Register actual_parameter_count, ++ InvokeFlag flag); ++ void InvokeFunction(Register function, Register expected_parameter_count, ++ Register actual_parameter_count, InvokeFlag flag); ++ ++ // Frame restart support. ++ void MaybeDropFrames(); ++ ++ // Exception handling. ++ ++ // Push a new stack handler and link into stack handler chain. ++ void PushStackHandler(); ++ ++ // Unlink the stack handler on top of the stack from the stack handler chain. ++ // Must preserve the result register. ++ void PopStackHandler(); ++ ++ // ------------------------------------------------------------------------- ++ // Support functions. ++ ++ void GetObjectType(Register function, Register map, Register type_reg); ++ ++ // ------------------------------------------------------------------------- ++ // Runtime calls. ++ ++ // Call a runtime routine. ++ void CallRuntime(const Runtime::Function* f, int num_arguments, ++ SaveFPRegsMode save_doubles = kDontSaveFPRegs); ++ ++ // Convenience function: Same as above, but takes the fid instead. ++ void CallRuntime(Runtime::FunctionId fid, ++ SaveFPRegsMode save_doubles = kDontSaveFPRegs) { ++ const Runtime::Function* function = Runtime::FunctionForId(fid); ++ CallRuntime(function, function->nargs, save_doubles); ++ } ++ ++ // Convenience function: Same as above, but takes the fid instead. ++ void CallRuntime(Runtime::FunctionId fid, int num_arguments, ++ SaveFPRegsMode save_doubles = kDontSaveFPRegs) { ++ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); ++ } ++ ++ // Convenience function: tail call a runtime routine (jump). ++ void TailCallRuntime(Runtime::FunctionId fid); ++ ++ // Jump to the builtin routine. ++ void JumpToExternalReference(const ExternalReference& builtin, ++ bool builtin_exit_frame = false); ++ ++ // Generates a trampoline to jump to the off-heap instruction stream. ++ void JumpToInstructionStream(Address entry); ++ ++ // --------------------------------------------------------------------------- ++ // In-place weak references. ++ void LoadWeakValue(Register out, Register in, Label* target_if_cleared); ++ ++ // ------------------------------------------------------------------------- ++ // StatsCounter support. ++ ++ void IncrementCounter(StatsCounter* counter, int value, Register scratch1, ++ Register scratch2); ++ void DecrementCounter(StatsCounter* counter, int value, Register scratch1, ++ Register scratch2); ++ ++ // ------------------------------------------------------------------------- ++ // Smi utilities. ++ ++ void SmiTag(Register dst, Register src) { ++ STATIC_ASSERT(kSmiTag == 0); ++ if (SmiValuesAre32Bits()) { ++ slli_d(dst, src, 32); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ add_w(dst, src, src); ++ } ++ } ++ ++ void SmiTag(Register reg) { SmiTag(reg, reg); } ++ ++ // Left-shifted from int32 equivalent of Smi. ++ void SmiScale(Register dst, Register src, int scale) { ++ if (SmiValuesAre32Bits()) { ++ // The int portion is upper 32-bits of 64-bit word. ++ srai_d(dst, src, kSmiShift - scale); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ DCHECK_GE(scale, kSmiTagSize); ++ slli_w(dst, src, scale - kSmiTagSize); ++ } ++ } ++ ++ // Test if the register contains a smi. ++ inline void SmiTst(Register value, Register scratch) { ++ And(scratch, value, Operand(kSmiTagMask)); ++ } ++ ++ // Jump if the register contains a non-smi. ++ void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch); ++ ++ // Abort execution if argument is a smi, enabled via --debug-code. ++ void AssertNotSmi(Register object); ++ void AssertSmi(Register object); ++ ++ // Abort execution if argument is not a Constructor, enabled via --debug-code. ++ void AssertConstructor(Register object); ++ ++ // Abort execution if argument is not a JSFunction, enabled via --debug-code. ++ void AssertFunction(Register object); ++ ++ // Abort execution if argument is not a JSBoundFunction, ++ // enabled via --debug-code. ++ void AssertBoundFunction(Register object); ++ ++ // Abort execution if argument is not a JSGeneratorObject (or subclass), ++ // enabled via --debug-code. ++ void AssertGeneratorObject(Register object); ++ ++ // Abort execution if argument is not undefined or an AllocationSite, enabled ++ // via --debug-code. ++ void AssertUndefinedOrAllocationSite(Register object, Register scratch); ++ ++ template ++ void DecodeField(Register dst, Register src) { ++ Bstrpick_d(dst, src, Field::kShift + Field::kSize - 1, Field::kShift); ++ } ++ ++ template ++ void DecodeField(Register reg) { ++ DecodeField(reg, reg); ++ } ++ ++ private: ++ // Helper functions for generating invokes. ++ void InvokePrologue(Register expected_parameter_count, ++ Register actual_parameter_count, Label* done, ++ InvokeFlag flag); ++ ++ // Compute memory operands for safepoint stack slots. ++ static int SafepointRegisterStackIndex(int reg_code); ++ ++ // Needs access to SafepointRegisterStackIndex for compiled frame ++ // traversal. ++ friend class StandardFrame; ++ ++ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); ++}; ++ ++template ++void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, ++ Func GetLabelFunction) { ++ // Ensure that dd-ed labels following this instruction use 8 bytes aligned ++ // addresses. ++ BlockTrampolinePoolFor(static_cast(case_count) * 2 + ++ kSwitchTablePrologueSize); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Align(8); // next is 4 instrs. ++ pcaddi(scratch, 4); ++ // alsl_d will do sa ++ alsl_d(scratch, index, scratch, kPointerSizeLog2); ++ Ld_d(scratch, MemOperand(scratch, 0)); ++ jirl(zero_reg, scratch, 0); ++ for (size_t index = 0; index < case_count; ++index) { ++ dd(GetLabelFunction(index)); ++ } ++} ++ ++#define ACCESS_MASM(masm) masm-> ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LA64_MACRO_ASSEMBLER_LA64_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/la64/register-la64.h b/src/3rdparty/chromium/v8/src/codegen/la64/register-la64.h +new file mode 100644 +index 0000000000..f2025e28e5 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/codegen/la64/register-la64.h +@@ -0,0 +1,328 @@ ++// Copyright 2018 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_CODEGEN_LA64_REGISTER_LA64_H_ ++#define V8_CODEGEN_LA64_REGISTER_LA64_H_ ++ ++#include "src/codegen/la64/constants-la64.h" ++#include "src/codegen/register.h" ++#include "src/codegen/reglist.h" ++ ++namespace v8 { ++namespace internal { ++ ++// clang-format off ++#define GENERAL_REGISTERS(V) \ ++ V(zero_reg) V(ra) V(gp) V(sp) \ ++ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \ ++ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) V(t8) \ ++ V(tp) V(fp) \ ++ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) \ ++ ++#define ALLOCATABLE_GENERAL_REGISTERS(V) \ ++ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \ ++ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(s7) ++ ++#define DOUBLE_REGISTERS(V) \ ++ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ ++ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \ ++ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \ ++ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31) ++ ++#define FLOAT_REGISTERS DOUBLE_REGISTERS ++#define SIMD128_REGISTERS(V) \ ++ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \ ++ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \ ++ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \ ++ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31) ++ ++#define ALLOCATABLE_DOUBLE_REGISTERS(V) \ ++ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ ++ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) V(f16) \ ++ V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) ++// clang-format on ++ ++// Note that the bit values must match those used in actual instruction ++// encoding. ++const int kNumRegs = 32; ++ ++const RegList kJSCallerSaved = 1 << 4 | // a0 ++ 1 << 5 | // a1 ++ 1 << 6 | // a2 ++ 1 << 7 | // a3 ++ 1 << 8 | // a4 ++ 1 << 9 | // a5 ++ 1 << 10 | // a6 ++ 1 << 11 | // a7 ++ 1 << 12 | // t0 ++ 1 << 13 | // t1 ++ 1 << 14 | // t2 ++ 1 << 15 | // t3 ++ 1 << 16 | // t4 ++ 1 << 17 | // t5 ++ 1 << 20; // t8 ++ ++const int kNumJSCallerSaved = 15; ++ ++// Callee-saved registers preserved when switching from C to JavaScript. ++const RegList kCalleeSaved = 1 << 22 | // fp ++ 1 << 23 | // s0 ++ 1 << 24 | // s1 ++ 1 << 25 | // s2 ++ 1 << 26 | // s3 ++ 1 << 27 | // s4 ++ 1 << 28 | // s5 ++ 1 << 29 | // s6 (roots in Javascript code) ++ 1 << 30 | // s7 (cp in Javascript code) ++ 1 << 31; // s8 ++ ++const int kNumCalleeSaved = 10; ++ ++const RegList kCalleeSavedFPU = 1 << 24 | // f24 ++ 1 << 25 | // f25 ++ 1 << 26 | // f26 ++ 1 << 27 | // f27 ++ 1 << 28 | // f28 ++ 1 << 29 | // f29 ++ 1 << 30 | // f30 ++ 1 << 31; // f31 ++ ++const int kNumCalleeSavedFPU = 8; ++ ++const RegList kCallerSavedFPU = 1 << 0 | // f0 ++ 1 << 1 | // f1 ++ 1 << 2 | // f2 ++ 1 << 3 | // f3 ++ 1 << 4 | // f4 ++ 1 << 5 | // f5 ++ 1 << 6 | // f6 ++ 1 << 7 | // f7 ++ 1 << 8 | // f8 ++ 1 << 9 | // f9 ++ 1 << 10 | // f10 ++ 1 << 11 | // f11 ++ 1 << 12 | // f12 ++ 1 << 13 | // f13 ++ 1 << 14 | // f14 ++ 1 << 15 | // f15 ++ 1 << 16 | // f16 ++ 1 << 17 | // f17 ++ 1 << 18 | // f18 ++ 1 << 19 | // f19 ++ 1 << 20 | // f20 ++ 1 << 21 | // f21 ++ 1 << 22 | // f22 ++ 1 << 23; // f23 ++ ++// Number of registers for which space is reserved in safepoints. Must be a ++// multiple of 8. ++const int kNumSafepointRegisters = 32; ++ ++// Define the list of registers actually saved at safepoints. ++// Note that the number of saved registers may be smaller than the reserved ++// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters. ++const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved; ++const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved; ++ ++const int kUndefIndex = -1; ++// Map with indexes on stack that corresponds to codes of saved registers. ++const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg ++ kUndefIndex, // ra ++ kUndefIndex, // gp ++ kUndefIndex, // sp ++ 0, // a0 ++ 1, // a1 ++ 2, // a2 ++ 3, // a3 ++ 4, // a4 ++ 5, // a5 ++ 6, // a6 ++ 7, // a7 ++ 8, // t0 ++ 9, // t1 ++ 10, // t2 ++ 11, // t3 ++ 12, // t4 ++ 13, // t5 ++ kUndefIndex, // t6 ++ kUndefIndex, // t7 ++ 14, // t8 ++ kUndefIndex, // tp ++ 15, // fp ++ 16, // s0 ++ 17, // s1 ++ 28, // s2 ++ 29, // s3 ++ 20, // s4 ++ 21, // s5 ++ 22, // s6 ++ 23, // s7 ++ 24}; // s8 ++ ++// CPU Registers. ++// ++// 1) We would prefer to use an enum, but enum values are assignment- ++// compatible with int, which has caused code-generation bugs. ++// ++// 2) We would prefer to use a class instead of a struct but we don't like ++// the register initialization to depend on the particular initialization ++// order (which appears to be different on OS X, Linux, and Windows for the ++// installed versions of C++ we tried). Using a struct permits C-style ++// "initialization". Also, the Register objects cannot be const as this ++// forces initialization stubs in MSVC, making us dependent on initialization ++// order. ++// ++// 3) By not using an enum, we are possibly preventing the compiler from ++// doing certain constant folds, which may significantly reduce the ++// code generated for some assembly instructions (because they boil down ++// to a few constants). If this is a problem, we could change the code ++// such that we use an enum in optimized mode, and the struct in debug ++// mode. This way we get the compile-time error checking in debug mode ++// and best performance in optimized code. ++ ++// ----------------------------------------------------------------------------- ++// Implementation of Register and FPURegister. ++ ++enum RegisterCode { ++#define REGISTER_CODE(R) kRegCode_##R, ++ GENERAL_REGISTERS(REGISTER_CODE) ++#undef REGISTER_CODE ++ kRegAfterLast ++}; ++ ++class Register : public RegisterBase { ++ public: ++ static constexpr int kMantissaOffset = 0; ++ static constexpr int kExponentOffset = 4; ++ ++ private: ++ friend class RegisterBase; ++ explicit constexpr Register(int code) : RegisterBase(code) {} ++}; ++ ++// s7: context register ++// s3: scratch register ++// s4: scratch register 2 ++#define DECLARE_REGISTER(R) \ ++ constexpr Register R = Register::from_code(kRegCode_##R); ++GENERAL_REGISTERS(DECLARE_REGISTER) ++#undef DECLARE_REGISTER ++ ++constexpr Register no_reg = Register::no_reg(); ++ ++int ToNumber(Register reg); ++ ++Register ToRegister(int num); ++ ++constexpr bool kPadArguments = false; ++constexpr bool kSimpleFPAliasing = true; ++constexpr bool kSimdMaskRegisters = false; ++ ++enum DoubleRegisterCode { ++#define REGISTER_CODE(R) kDoubleCode_##R, ++ DOUBLE_REGISTERS(REGISTER_CODE) ++#undef REGISTER_CODE ++ kDoubleAfterLast ++}; ++ ++// Coprocessor register. ++class FPURegister : public RegisterBase { ++ public: ++ FPURegister low() const { ++ // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1. ++ // Find low reg of a Double-reg pair, which is the reg itself. ++ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even. ++ return FPURegister::from_code(code()); ++ } ++ ++ private: ++ friend class RegisterBase; ++ explicit constexpr FPURegister(int code) : RegisterBase(code) {} ++}; ++ ++enum CFRegister { FCC0, FCC1, FCC2, FCC3, FCC4, FCC5, FCC6, FCC7 }; ++ ++using FloatRegister = FPURegister; ++ ++using DoubleRegister = FPURegister; ++ ++// TODO here only for build success ++using Simd128Register = FPURegister; ++ ++#define DECLARE_DOUBLE_REGISTER(R) \ ++ constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R); ++DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER) ++#undef DECLARE_DOUBLE_REGISTER ++ ++constexpr DoubleRegister no_dreg = DoubleRegister::no_reg(); ++ ++// Register aliases. ++// cp is assumed to be a callee saved register. ++constexpr Register kRootRegister = s6; ++constexpr Register cp = s7; ++constexpr Register kScratchReg = s3; ++constexpr Register kScratchReg2 = s4; ++constexpr DoubleRegister kScratchDoubleReg = f30; ++// FPU zero reg is often used to hold 0.0, but it's not hardwired to 0.0. ++constexpr DoubleRegister kDoubleRegZero = f28; ++ ++// FPU (coprocessor 1) control registers. ++// Currently only FCSR0 is implemented. ++// TODO fscr0 fcsr1 fcsr2 fscsr3 ++struct FPUControlRegister { ++ bool is_valid() const { return reg_code == kFCSRRegister; } ++ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; } ++ int code() const { ++ DCHECK(is_valid()); ++ return reg_code; ++ } ++ int bit() const { ++ DCHECK(is_valid()); ++ return 1 << reg_code; ++ } ++ void setcode(int f) { ++ reg_code = f; ++ DCHECK(is_valid()); ++ } ++ // Unfortunately we can't make this private in a struct. ++ int reg_code; ++}; ++ ++constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister}; ++constexpr FPUControlRegister FCSR = {kFCSRRegister}; ++ ++// Define {RegisterName} methods for the register types. ++DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS) ++DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS) ++ ++// Give alias names to registers for calling conventions. ++constexpr Register kReturnRegister0 = a0; ++constexpr Register kReturnRegister1 = a1; ++constexpr Register kReturnRegister2 = a2; ++constexpr Register kJSFunctionRegister = a1; ++constexpr Register kContextRegister = s7; ++constexpr Register kAllocateSizeRegister = a0; ++constexpr Register kSpeculationPoisonRegister = t3; ++constexpr Register kInterpreterAccumulatorRegister = a0; ++constexpr Register kInterpreterBytecodeOffsetRegister = t0; ++constexpr Register kInterpreterBytecodeArrayRegister = t1; ++constexpr Register kInterpreterDispatchTableRegister = t2; ++ ++constexpr Register kJavaScriptCallArgCountRegister = a0; ++constexpr Register kJavaScriptCallCodeStartRegister = a2; ++constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister; ++constexpr Register kJavaScriptCallNewTargetRegister = a3; ++constexpr Register kJavaScriptCallExtraArg1Register = a2; ++ ++constexpr Register kOffHeapTrampolineRegister = t7; ++constexpr Register kRuntimeCallFunctionRegister = a1; ++constexpr Register kRuntimeCallArgCountRegister = a0; ++constexpr Register kRuntimeCallArgvRegister = a2; ++constexpr Register kWasmInstanceRegister = a0; ++constexpr Register kWasmCompileLazyFuncIndexRegister = t0; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LA64_REGISTER_LA64_H_ +diff --git a/src/3rdparty/chromium/v8/src/codegen/macro-assembler.h b/src/3rdparty/chromium/v8/src/codegen/macro-assembler.h +index 01175e585e..9c2fa9e310 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/macro-assembler.h ++++ b/src/3rdparty/chromium/v8/src/codegen/macro-assembler.h +@@ -49,6 +49,9 @@ enum AllocationFlags { + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/constants-mips64.h" + #include "src/codegen/mips64/macro-assembler-mips64.h" ++#elif V8_TARGET_ARCH_LA64 ++#include "src/codegen/la64/constants-la64.h" ++#include "src/codegen/la64/macro-assembler-la64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/constants-s390.h" + #include "src/codegen/s390/macro-assembler-s390.h" +diff --git a/src/3rdparty/chromium/v8/src/codegen/mips64/assembler-mips64.cc b/src/3rdparty/chromium/v8/src/codegen/mips64/assembler-mips64.cc +index 37a05585c4..cafcfef81d 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/mips64/assembler-mips64.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/mips64/assembler-mips64.cc +@@ -996,7 +996,7 @@ void Assembler::next(Label* L, bool is_internal) { + } + + bool Assembler::is_near(Label* L) { +- DCHECK(L->is_bound()); ++ if (L == nullptr || !L->is_bound()) return true; + return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; + } + +diff --git a/src/3rdparty/chromium/v8/src/codegen/mips64/assembler-mips64.h b/src/3rdparty/chromium/v8/src/codegen/mips64/assembler-mips64.h +index f70e46f81b..c585840a7a 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/mips64/assembler-mips64.h ++++ b/src/3rdparty/chromium/v8/src/codegen/mips64/assembler-mips64.h +@@ -1864,6 +1864,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { + // instruction. We use this information to trigger different mode of + // branch instruction generation, where we use jump instructions rather + // than regular branch instructions. ++ // TODO can this be optimied?????? + bool trampoline_emitted_; + static constexpr int kInvalidSlotPos = -1; + +diff --git a/src/3rdparty/chromium/v8/src/codegen/register-arch.h b/src/3rdparty/chromium/v8/src/codegen/register-arch.h +index 21a7233016..5ee6c4683d 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/register-arch.h ++++ b/src/3rdparty/chromium/v8/src/codegen/register-arch.h +@@ -22,6 +22,8 @@ + #include "src/codegen/mips/register-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/register-mips64.h" ++#elif V8_TARGET_ARCH_LA64 ++#include "src/codegen/la64/register-la64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/register-s390.h" + #else +diff --git a/src/3rdparty/chromium/v8/src/codegen/register-configuration.cc b/src/3rdparty/chromium/v8/src/codegen/register-configuration.cc +index 5752b46339..2c4bb1426a 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/register-configuration.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/register-configuration.cc +@@ -58,6 +58,8 @@ static int get_num_allocatable_double_registers() { + kMaxAllocatableDoubleRegisterCount; + #elif V8_TARGET_ARCH_MIPS64 + kMaxAllocatableDoubleRegisterCount; ++#elif V8_TARGET_ARCH_LA64 ++ kMaxAllocatableDoubleRegisterCount; + #elif V8_TARGET_ARCH_PPC + kMaxAllocatableDoubleRegisterCount; + #elif V8_TARGET_ARCH_PPC64 +diff --git a/src/3rdparty/chromium/v8/src/codegen/reloc-info.cc b/src/3rdparty/chromium/v8/src/codegen/reloc-info.cc +index 9f07978932..ccbd7a355c 100644 +--- a/src/3rdparty/chromium/v8/src/codegen/reloc-info.cc ++++ b/src/3rdparty/chromium/v8/src/codegen/reloc-info.cc +@@ -329,7 +329,8 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() { + return false; + #elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \ + defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \ +- defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) ++ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \ ++ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LA64) + return true; + #endif + } +diff --git a/src/3rdparty/chromium/v8/src/common/globals.h b/src/3rdparty/chromium/v8/src/common/globals.h +index c79b3b633c..05078cb3f2 100644 +--- a/src/3rdparty/chromium/v8/src/common/globals.h ++++ b/src/3rdparty/chromium/v8/src/common/globals.h +@@ -58,6 +58,9 @@ constexpr int GB = MB * 1024; + #if (V8_TARGET_ARCH_S390 && !V8_HOST_ARCH_S390) + #define USE_SIMULATOR 1 + #endif ++#if (V8_TARGET_ARCH_LA64 && !V8_HOST_ARCH_LA64) ++#define USE_SIMULATOR 1 ++#endif + #endif + + // Determine whether the architecture uses an embedded constant pool +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-codes.h b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-codes.h +index 84d5d249b8..353594436e 100644 +--- a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-codes.h ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-codes.h +@@ -17,6 +17,8 @@ + #include "src/compiler/backend/mips/instruction-codes-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/compiler/backend/mips64/instruction-codes-mips64.h" ++#elif V8_TARGET_ARCH_LA64 ++#include "src/compiler/backend/la64/instruction-codes-la64.h" + #elif V8_TARGET_ARCH_X64 + #include "src/compiler/backend/x64/instruction-codes-x64.h" + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc +index 7d72dbbf2d..628ba0e7c5 100644 +--- a/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/instruction-selector.cc +@@ -2573,7 +2573,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { + #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS + + #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \ +- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 ++ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_LA64 + void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); } + + void InstructionSelector::VisitWord64AtomicStore(Node* node) { +@@ -2598,7 +2598,8 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { + UNIMPLEMENTED(); + } + #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64 +- // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 ++ // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 && ++ // !V8_TARGET_ARCH_LA64 + + #if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM + // This is only needed on 32-bit to split the 64-bit value into two operands. +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/la64/code-generator-la64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/la64/code-generator-la64.cc +new file mode 100644 +index 0000000000..29bfffb5f6 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/la64/code-generator-la64.cc +@@ -0,0 +1,2847 @@ ++// Copyright 2014 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/callable.h" ++#include "src/codegen/la64/constants-la64.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/optimized-compilation-info.h" ++#include "src/compiler/backend/code-generator-impl.h" ++#include "src/compiler/backend/code-generator.h" ++#include "src/compiler/backend/gap-resolver.h" ++#include "src/compiler/node-matchers.h" ++#include "src/compiler/osr.h" ++#include "src/heap/heap-inl.h" // crbug.com/v8/8499 ++#include "src/wasm/wasm-code-manager.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++#define __ tasm()-> ++ ++// TODO(plind): consider renaming these macros. ++#define TRACE_MSG(msg) \ ++ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ ++ __LINE__) ++ ++#define TRACE_UNIMPL() \ ++ PrintF("UNIMPLEMENTED code_generator_la64: %s at line %d\n", __FUNCTION__, \ ++ __LINE__) ++ ++// Adds La64-specific methods to convert InstructionOperands. ++class La64OperandConverter final : public InstructionOperandConverter { ++ public: ++ La64OperandConverter(CodeGenerator* gen, Instruction* instr) ++ : InstructionOperandConverter(gen, instr) {} ++ ++ FloatRegister OutputSingleRegister(size_t index = 0) { ++ return ToSingleRegister(instr_->OutputAt(index)); ++ } ++ ++ FloatRegister InputSingleRegister(size_t index) { ++ return ToSingleRegister(instr_->InputAt(index)); ++ } ++ ++ FloatRegister ToSingleRegister(InstructionOperand* op) { ++ // Single (Float) and Double register namespace is same on LA64, ++ // both are typedefs of FPURegister. ++ return ToDoubleRegister(op); ++ } ++ ++ Register InputOrZeroRegister(size_t index) { ++ if (instr_->InputAt(index)->IsImmediate()) { ++ DCHECK_EQ(0, InputInt32(index)); ++ return zero_reg; ++ } ++ return InputRegister(index); ++ } ++ ++ DoubleRegister InputOrZeroDoubleRegister(size_t index) { ++ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; ++ ++ return InputDoubleRegister(index); ++ } ++ ++ DoubleRegister InputOrZeroSingleRegister(size_t index) { ++ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; ++ ++ return InputSingleRegister(index); ++ } ++ ++ Operand InputImmediate(size_t index) { ++ Constant constant = ToConstant(instr_->InputAt(index)); ++ switch (constant.type()) { ++ case Constant::kInt32: ++ return Operand(constant.ToInt32()); ++ case Constant::kInt64: ++ return Operand(constant.ToInt64()); ++ case Constant::kFloat32: ++ return Operand::EmbeddedNumber(constant.ToFloat32()); ++ case Constant::kFloat64: ++ return Operand::EmbeddedNumber(constant.ToFloat64().value()); ++ case Constant::kExternalReference: ++ case Constant::kCompressedHeapObject: ++ case Constant::kHeapObject: ++ // TODO(plind): Maybe we should handle ExtRef & HeapObj here? ++ // maybe not done on arm due to const pool ?? ++ break; ++ case Constant::kDelayedStringConstant: ++ return Operand::EmbeddedStringConstant( ++ constant.ToDelayedStringConstant()); ++ case Constant::kRpoNumber: ++ UNREACHABLE(); // TODO(titzer): RPO immediates on la64? ++ break; ++ } ++ UNREACHABLE(); ++ } ++ ++ Operand InputOperand(size_t index) { ++ InstructionOperand* op = instr_->InputAt(index); ++ if (op->IsRegister()) { ++ return Operand(ToRegister(op)); ++ } ++ return InputImmediate(index); ++ } ++ ++ MemOperand MemoryOperand(size_t* first_index) { ++ const size_t index = *first_index; ++ switch (AddressingModeField::decode(instr_->opcode())) { ++ case kMode_None: ++ break; ++ case kMode_MRI: ++ *first_index += 2; ++ return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); ++ case kMode_MRR: ++ *first_index += 2; ++ return MemOperand(InputRegister(index + 0), InputRegister(index + 1)); ++ } ++ UNREACHABLE(); ++ } ++ ++ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); } ++ ++ MemOperand ToMemOperand(InstructionOperand* op) const { ++ DCHECK_NOT_NULL(op); ++ DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); ++ return SlotToMemOperand(AllocatedOperand::cast(op)->index()); ++ } ++ ++ MemOperand SlotToMemOperand(int slot) const { ++ FrameOffset offset = frame_access_state()->GetFrameOffset(slot); ++ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); ++ } ++}; ++ ++static inline bool HasRegisterInput(Instruction* instr, size_t index) { ++ return instr->InputAt(index)->IsRegister(); ++} ++ ++namespace { ++ ++class OutOfLineRecordWrite final : public OutOfLineCode { ++ public: ++ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, ++ Register value, Register scratch0, Register scratch1, ++ RecordWriteMode mode, StubCallMode stub_mode) ++ : OutOfLineCode(gen), ++ object_(object), ++ index_(index), ++ value_(value), ++ scratch0_(scratch0), ++ scratch1_(scratch1), ++ mode_(mode), ++ stub_mode_(stub_mode), ++ must_save_lr_(!gen->frame_access_state()->has_frame()), ++ zone_(gen->zone()) {} ++ ++ void Generate() final { ++ if (mode_ > RecordWriteMode::kValueIsPointer) { ++ __ JumpIfSmi(value_, exit()); ++ } ++ __ CheckPageFlag(value_, scratch0_, ++ MemoryChunk::kPointersToHereAreInterestingMask, eq, ++ exit()); ++ __ Add_d(scratch1_, object_, index_); ++ RememberedSetAction const remembered_set_action = ++ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET ++ : OMIT_REMEMBERED_SET; ++ SaveFPRegsMode const save_fp_mode = ++ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; ++ if (must_save_lr_) { ++ // We need to save and restore ra if the frame was elided. ++ __ Push(ra); ++ } ++ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { ++ __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode); ++ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) { ++ // A direct call to a wasm runtime stub defined in this module. ++ // Just encode the stub index. This will be patched when the code ++ // is added to the native module and copied into wasm code space. ++ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, ++ save_fp_mode, wasm::WasmCode::kRecordWrite); ++ } else { ++ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, ++ save_fp_mode); ++ } ++ if (must_save_lr_) { ++ __ Pop(ra); ++ } ++ } ++ ++ private: ++ Register const object_; ++ Register const index_; ++ Register const value_; ++ Register const scratch0_; ++ Register const scratch1_; ++ RecordWriteMode const mode_; ++ StubCallMode const stub_mode_; ++ bool must_save_lr_; ++ Zone* zone_; ++}; ++ ++#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \ ++ class ool_name final : public OutOfLineCode { \ ++ public: \ ++ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \ ++ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \ ++ \ ++ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \ ++ \ ++ private: \ ++ T const dst_; \ ++ T const src1_; \ ++ T const src2_; \ ++ } ++ ++CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister); ++CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister); ++CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister); ++CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister); ++ ++#undef CREATE_OOL_CLASS ++ ++Condition FlagsConditionToConditionCmp(FlagsCondition condition) { ++ switch (condition) { ++ case kEqual: ++ return eq; ++ case kNotEqual: ++ return ne; ++ case kSignedLessThan: ++ return lt; ++ case kSignedGreaterThanOrEqual: ++ return ge; ++ case kSignedLessThanOrEqual: ++ return le; ++ case kSignedGreaterThan: ++ return gt; ++ case kUnsignedLessThan: ++ return lo; ++ case kUnsignedGreaterThanOrEqual: ++ return hs; ++ case kUnsignedLessThanOrEqual: ++ return ls; ++ case kUnsignedGreaterThan: ++ return hi; ++ case kUnorderedEqual: ++ case kUnorderedNotEqual: ++ break; ++ default: ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++Condition FlagsConditionToConditionTst(FlagsCondition condition) { ++ switch (condition) { ++ case kNotEqual: ++ return ne; ++ case kEqual: ++ return eq; ++ default: ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++Condition FlagsConditionToConditionOvf(FlagsCondition condition) { ++ switch (condition) { ++ case kOverflow: ++ return ne; ++ case kNotOverflow: ++ return eq; ++ default: ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, ++ FlagsCondition condition) { ++ switch (condition) { ++ case kEqual: ++ *predicate = true; ++ return CEQ; ++ case kNotEqual: ++ *predicate = false; ++ return CEQ; ++ case kUnsignedLessThan: ++ *predicate = true; ++ return CLT; ++ case kUnsignedGreaterThanOrEqual: ++ *predicate = false; ++ return CLT; ++ case kUnsignedLessThanOrEqual: ++ *predicate = true; ++ return CLE; ++ case kUnsignedGreaterThan: ++ *predicate = false; ++ return CLE; ++ case kUnorderedEqual: ++ case kUnorderedNotEqual: ++ *predicate = true; ++ break; ++ default: ++ *predicate = true; ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, ++ InstructionCode opcode, Instruction* instr, ++ La64OperandConverter const& i) { ++ const MemoryAccessMode access_mode = ++ static_cast(MiscField::decode(opcode)); ++ if (access_mode == kMemoryAccessPoisoned) { ++ Register value = i.OutputRegister(); ++ codegen->tasm()->And(value, value, kSpeculationPoisonRegister); ++ } ++} ++ ++} // namespace ++ ++#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ ++ do { \ ++ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ ++ do { \ ++ __ dbar(0); \ ++ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// only use for sub_w and sub_d ++#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \ ++ do { \ ++ Label binop; \ ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ __ dbar(0); \ ++ __ bind(&binop); \ ++ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ ++ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ ++ Operand(i.InputRegister(2))); \ ++ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \ ++ size, bin_instr, representation) \ ++ do { \ ++ Label binop; \ ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ if (representation == 32) { \ ++ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \ ++ } else { \ ++ DCHECK_EQ(representation, 64); \ ++ __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \ ++ } \ ++ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \ ++ Operand(i.TempRegister(3))); \ ++ __ slli_w(i.TempRegister(3), i.TempRegister(3), 3); \ ++ __ dbar(0); \ ++ __ bind(&binop); \ ++ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ ++ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ ++ size, sign_extend); \ ++ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \ ++ Operand(i.InputRegister(2))); \ ++ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \ ++ size); \ ++ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \ ++ load_linked, store_conditional, sign_extend, size, representation) \ ++ do { \ ++ Label exchange; \ ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ if (representation == 32) { \ ++ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ ++ } else { \ ++ DCHECK_EQ(representation, 64); \ ++ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ ++ } \ ++ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \ ++ Operand(i.TempRegister(1))); \ ++ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \ ++ __ dbar(0); \ ++ __ bind(&exchange); \ ++ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ ++ size, sign_extend); \ ++ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \ ++ size); \ ++ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \ ++ store_conditional) \ ++ do { \ ++ Label compareExchange; \ ++ Label exit; \ ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ __ dbar(0); \ ++ __ bind(&compareExchange); \ ++ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&exit, ne, i.InputRegister(2), \ ++ Operand(i.OutputRegister(0))); \ ++ __ mov(i.TempRegister(2), i.InputRegister(3)); \ ++ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ ++ Operand(zero_reg)); \ ++ __ bind(&exit); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \ ++ load_linked, store_conditional, sign_extend, size, representation) \ ++ do { \ ++ Label compareExchange; \ ++ Label exit; \ ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ if (representation == 32) { \ ++ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ ++ } else { \ ++ DCHECK_EQ(representation, 64); \ ++ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ ++ } \ ++ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \ ++ Operand(i.TempRegister(1))); \ ++ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \ ++ __ dbar(0); \ ++ __ bind(&compareExchange); \ ++ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ ++ size, sign_extend); \ ++ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \ ++ size, sign_extend); \ ++ __ BranchShort(&exit, ne, i.InputRegister(2), \ ++ Operand(i.OutputRegister(0))); \ ++ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \ ++ size); \ ++ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ ++ Operand(zero_reg)); \ ++ __ bind(&exit); \ ++ __ dbar(0); \ ++ } while (0) ++ ++#define ASSEMBLE_IEEE754_BINOP(name) \ ++ do { \ ++ FrameScope scope(tasm(), StackFrame::MANUAL); \ ++ __ PrepareCallCFunction(0, 2, kScratchReg); \ ++ __ MovToFloatParameters(i.InputDoubleRegister(0), \ ++ i.InputDoubleRegister(1)); \ ++ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ ++ /* Move the result in the double result register. */ \ ++ __ MovFromFloatResult(i.OutputDoubleRegister()); \ ++ } while (0) ++ ++#define ASSEMBLE_IEEE754_UNOP(name) \ ++ do { \ ++ FrameScope scope(tasm(), StackFrame::MANUAL); \ ++ __ PrepareCallCFunction(0, 1, kScratchReg); \ ++ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ ++ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ ++ /* Move the result in the double result register. */ \ ++ __ MovFromFloatResult(i.OutputDoubleRegister()); \ ++ } while (0) ++ ++#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \ ++ do { \ ++ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \ ++ i.InputSimd128Register(1)); \ ++ } while (0) ++ ++void CodeGenerator::AssembleDeconstructFrame() { ++ __ mov(sp, fp); ++ __ Pop(ra, fp); ++} ++ ++void CodeGenerator::AssemblePrepareTailCall() { ++ if (frame_access_state()->has_frame()) { ++ __ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); ++ __ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); ++ } ++ frame_access_state()->SetFrameAccessToSP(); ++} ++ ++void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, ++ Register scratch1, ++ Register scratch2, ++ Register scratch3) { ++ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3)); ++ Label done; ++ ++ // Check if current frame is an arguments adaptor frame. ++ __ Ld_d(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset)); ++ __ Branch(&done, ne, scratch3, ++ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); ++ ++ // Load arguments count from current arguments adaptor frame (note, it ++ // does not include receiver). ++ Register caller_args_count_reg = scratch1; ++ __ Ld_d(caller_args_count_reg, ++ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); ++ __ SmiUntag(caller_args_count_reg); ++ ++ __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3); ++ __ bind(&done); ++} ++ ++namespace { ++ ++void AdjustStackPointerForTailCall(TurboAssembler* tasm, ++ FrameAccessState* state, ++ int new_slot_above_sp, ++ bool allow_shrinkage = true) { ++ int current_sp_offset = state->GetSPToFPSlotCount() + ++ StandardFrameConstants::kFixedSlotCountAboveFp; ++ int stack_slot_delta = new_slot_above_sp - current_sp_offset; ++ if (stack_slot_delta > 0) { ++ tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize); ++ state->IncreaseSPDelta(stack_slot_delta); ++ } else if (allow_shrinkage && stack_slot_delta < 0) { ++ tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize); ++ state->IncreaseSPDelta(stack_slot_delta); ++ } ++} ++ ++} // namespace ++ ++void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, ++ int first_unused_stack_slot) { ++ AdjustStackPointerForTailCall(tasm(), frame_access_state(), ++ first_unused_stack_slot, false); ++} ++ ++void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, ++ int first_unused_stack_slot) { ++ AdjustStackPointerForTailCall(tasm(), frame_access_state(), ++ first_unused_stack_slot); ++} ++ ++// Check that {kJavaScriptCallCodeStartRegister} is correct. ++void CodeGenerator::AssembleCodeStartRegisterCheck() { ++ __ ComputeCodeStartAddress(kScratchReg); ++ __ Assert(eq, AbortReason::kWrongFunctionCodeStart, ++ kJavaScriptCallCodeStartRegister, Operand(kScratchReg)); ++} ++ ++// Check if the code object is marked for deoptimization. If it is, then it ++// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need ++// to: ++// 1. read from memory the word that contains that bit, which can be found in ++// the flags in the referenced {CodeDataContainer} object; ++// 2. test kMarkedForDeoptimizationBit in those flags; and ++// 3. if it is not zero then it jumps to the builtin. ++void CodeGenerator::BailoutIfDeoptimized() { ++ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; ++ __ Ld_d(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); ++ __ Ld_w(kScratchReg, ++ FieldMemOperand(kScratchReg, ++ CodeDataContainer::kKindSpecificFlagsOffset)); ++ __ And(kScratchReg, kScratchReg, ++ Operand(1 << Code::kMarkedForDeoptimizationBit)); ++ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), ++ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); ++} ++ ++void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { ++ // Calculate a mask which has all bits set in the normal case, but has all ++ // bits cleared if we are speculatively executing the wrong PC. ++ __ li(kSpeculationPoisonRegister, -1); ++ __ ComputeCodeStartAddress(kScratchReg); ++ __ sub_d(kScratchReg, kScratchReg, kJavaScriptCallCodeStartRegister); ++ __ maskeqz(kSpeculationPoisonRegister, kSpeculationPoisonRegister, ++ kScratchReg); ++} ++ ++void CodeGenerator::AssembleRegisterArgumentPoisoning() { ++ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); ++ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister); ++ __ And(sp, sp, kSpeculationPoisonRegister); ++} ++ ++// Assembles an instruction after register allocation, producing machine code. ++CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ++ Instruction* instr) { ++ La64OperandConverter i(this, instr); ++ InstructionCode opcode = instr->opcode(); ++ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); ++ switch (arch_opcode) { ++ case kArchCallCodeObject: { ++ if (instr->InputAt(0)->IsImmediate()) { ++ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); ++ } else { ++ Register reg = i.InputRegister(0); ++ DCHECK_IMPLIES( ++ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), ++ reg == kJavaScriptCallCodeStartRegister); ++ __ addi_d(reg, reg, Code::kHeaderSize - kHeapObjectTag); ++ __ Call(reg); ++ } ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchCallBuiltinPointer: { ++ DCHECK(!instr->InputAt(0)->IsImmediate()); ++ Register builtin_index = i.InputRegister(0); ++ __ CallBuiltinByIndex(builtin_index); ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchCallWasmFunction: { ++ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { ++ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, ++ i.TempRegister(0), i.TempRegister(1), ++ i.TempRegister(2)); ++ } ++ if (instr->InputAt(0)->IsImmediate()) { ++ Constant constant = i.ToConstant(instr->InputAt(0)); ++ Address wasm_code = static_cast

(constant.ToInt64()); ++ __ Call(wasm_code, constant.rmode()); ++ } else { ++ __ addi_d(kScratchReg, i.InputRegister(0), 0); ++ __ Call(kScratchReg); ++ } ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchTailCallCodeObjectFromJSFunction: ++ case kArchTailCallCodeObject: { ++ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { ++ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, ++ i.TempRegister(0), i.TempRegister(1), ++ i.TempRegister(2)); ++ } ++ if (instr->InputAt(0)->IsImmediate()) { ++ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); ++ } else { ++ Register reg = i.InputRegister(0); ++ DCHECK_IMPLIES( ++ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), ++ reg == kJavaScriptCallCodeStartRegister); ++ __ addi_d(reg, reg, Code::kHeaderSize - kHeapObjectTag); ++ __ Jump(reg); ++ } ++ frame_access_state()->ClearSPDelta(); ++ frame_access_state()->SetFrameAccessToDefault(); ++ break; ++ } ++ case kArchTailCallWasm: { ++ if (instr->InputAt(0)->IsImmediate()) { ++ Constant constant = i.ToConstant(instr->InputAt(0)); ++ Address wasm_code = static_cast
(constant.ToInt64()); ++ __ Jump(wasm_code, constant.rmode()); ++ } else { ++ __ addi_d(kScratchReg, i.InputRegister(0), 0); ++ __ Jump(kScratchReg); ++ } ++ frame_access_state()->ClearSPDelta(); ++ frame_access_state()->SetFrameAccessToDefault(); ++ break; ++ } ++ case kArchTailCallAddress: { ++ CHECK(!instr->InputAt(0)->IsImmediate()); ++ Register reg = i.InputRegister(0); ++ DCHECK_IMPLIES( ++ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), ++ reg == kJavaScriptCallCodeStartRegister); ++ __ Jump(reg); ++ frame_access_state()->ClearSPDelta(); ++ frame_access_state()->SetFrameAccessToDefault(); ++ break; ++ } ++ case kArchCallJSFunction: { ++ Register func = i.InputRegister(0); ++ if (FLAG_debug_code) { ++ // Check the function's context matches the context argument. ++ __ Ld_d(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); ++ __ Assert(eq, AbortReason::kWrongFunctionContext, cp, ++ Operand(kScratchReg)); ++ } ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); ++ __ Add_d(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Call(a2); ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchPrepareCallCFunction: { ++ int const num_parameters = MiscField::decode(instr->opcode()); ++ __ PrepareCallCFunction(num_parameters, kScratchReg); ++ // Frame alignment requires using FP-relative frame addressing. ++ frame_access_state()->SetFrameAccessToFP(); ++ break; ++ } ++ case kArchSaveCallerRegisters: { ++ fp_mode_ = ++ static_cast(MiscField::decode(instr->opcode())); ++ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); ++ // kReturnRegister0 should have been saved before entering the stub. ++ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0); ++ DCHECK(IsAligned(bytes, kSystemPointerSize)); ++ DCHECK_EQ(0, frame_access_state()->sp_delta()); ++ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); ++ DCHECK(!caller_registers_saved_); ++ caller_registers_saved_ = true; ++ break; ++ } ++ case kArchRestoreCallerRegisters: { ++ DCHECK(fp_mode_ == ++ static_cast(MiscField::decode(instr->opcode()))); ++ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); ++ // Don't overwrite the returned value. ++ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0); ++ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize)); ++ DCHECK_EQ(0, frame_access_state()->sp_delta()); ++ DCHECK(caller_registers_saved_); ++ caller_registers_saved_ = false; ++ break; ++ } ++ case kArchPrepareTailCall: ++ AssemblePrepareTailCall(); ++ break; ++ case kArchCallCFunction: { ++ int const num_parameters = MiscField::decode(instr->opcode()); ++ Label start_call; ++ bool isWasmCapiFunction = ++ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); ++ // from start_call to return address. ++ int offset = __ root_array_available() ? 44 : 80; // 11 or 20 instrs ++#if V8_HOST_ARCH_LA64 ++ if (__ emit_debug_code()) { ++ offset += 12; // see CallCFunction ++ } ++#endif ++ if (isWasmCapiFunction) { ++ // Put the return address in a stack slot. ++ // __ mov(kScratchReg, ra); ++ __ bind(&start_call); ++ __ pcaddi(t7, -4); // __ nal(); ++ //__ nop(); ++ //__ Daddu(ra, ra, offset - 8); // 8 = nop + nal ++ __ St_d(t7, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); ++ // __ mov(ra, kScratchReg); ++ } ++ if (instr->InputAt(0)->IsImmediate()) { ++ ExternalReference ref = i.InputExternalReference(0); ++ __ CallCFunction(ref, num_parameters); ++ } else { ++ Register func = i.InputRegister(0); ++ __ CallCFunction(func, num_parameters); ++ } ++ if (isWasmCapiFunction) { ++ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); ++ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); ++ } ++ ++ frame_access_state()->SetFrameAccessToDefault(); ++ // Ideally, we should decrement SP delta to match the change of stack ++ // pointer in CallCFunction. However, for certain architectures (e.g. ++ // ARM), there may be more strict alignment requirement, causing old SP ++ // to be saved on the stack. In those cases, we can not calculate the SP ++ // delta statically. ++ frame_access_state()->ClearSPDelta(); ++ if (caller_registers_saved_) { ++ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters. ++ // Here, we assume the sequence to be: ++ // kArchSaveCallerRegisters; ++ // kArchCallCFunction; ++ // kArchRestoreCallerRegisters; ++ int bytes = ++ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0); ++ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); ++ } ++ break; ++ } ++ case kArchJmp: ++ AssembleArchJump(i.InputRpo(0)); ++ break; ++ case kArchBinarySearchSwitch: ++ AssembleArchBinarySearchSwitch(instr); ++ break; ++ break; ++ case kArchTableSwitch: ++ AssembleArchTableSwitch(instr); ++ break; ++ case kArchAbortCSAAssert: ++ DCHECK(i.InputRegister(0) == a0); ++ { ++ // We don't actually want to generate a pile of code for this, so just ++ // claim there is a stack frame, without generating one. ++ FrameScope scope(tasm(), StackFrame::NONE); ++ __ Call( ++ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), ++ RelocInfo::CODE_TARGET); ++ } ++ __ stop(); ++ break; ++ case kArchDebugBreak: ++ __ DebugBreak(); ++ break; ++ case kArchComment: ++ __ RecordComment(reinterpret_cast(i.InputInt64(0))); ++ break; ++ case kArchNop: ++ case kArchThrowTerminator: ++ // don't emit code for nops. ++ break; ++ case kArchDeoptimize: { ++ DeoptimizationExit* exit = ++ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); ++ CodeGenResult result = AssembleDeoptimizerCall(exit); ++ if (result != kSuccess) return result; ++ break; ++ } ++ case kArchRet: ++ AssembleReturn(instr->InputAt(0)); ++ break; ++ case kArchStackPointerGreaterThan: ++ // Pseudo-instruction used for cmp/branch. No opcode emitted here. ++ break; ++ case kArchStackCheckOffset: ++ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); ++ break; ++ case kArchFramePointer: ++ __ mov(i.OutputRegister(), fp); ++ break; ++ case kArchParentFramePointer: ++ if (frame_access_state()->has_frame()) { ++ __ Ld_d(i.OutputRegister(), MemOperand(fp, 0)); ++ } else { ++ __ mov(i.OutputRegister(), fp); ++ } ++ break; ++ case kArchTruncateDoubleToI: ++ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), ++ i.InputDoubleRegister(0), DetermineStubCallMode()); ++ break; ++ case kArchStoreWithWriteBarrier: { ++ RecordWriteMode mode = ++ static_cast(MiscField::decode(instr->opcode())); ++ Register object = i.InputRegister(0); ++ Register index = i.InputRegister(1); ++ Register value = i.InputRegister(2); ++ Register scratch0 = i.TempRegister(0); ++ Register scratch1 = i.TempRegister(1); ++ auto ool = new (zone()) ++ OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1, ++ mode, DetermineStubCallMode()); ++ __ Add_d(kScratchReg, object, index); ++ __ St_d(value, MemOperand(kScratchReg, 0)); ++ __ CheckPageFlag(object, scratch0, ++ MemoryChunk::kPointersFromHereAreInterestingMask, ne, ++ ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kArchStackSlot: { ++ FrameOffset offset = ++ frame_access_state()->GetFrameOffset(i.InputInt32(0)); ++ Register base_reg = offset.from_stack_pointer() ? sp : fp; ++ __ Add_d(i.OutputRegister(), base_reg, Operand(offset.offset())); ++ int alignment = i.InputInt32(1); ++ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || ++ alignment == 16); ++ if (FLAG_debug_code && alignment > 0) { ++ // Verify that the output_register is properly aligned ++ __ And(kScratchReg, i.OutputRegister(), ++ Operand(kSystemPointerSize - 1)); ++ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg, ++ Operand(zero_reg)); ++ } ++ if (alignment == 2 * kSystemPointerSize) { ++ Label done; ++ __ Add_d(kScratchReg, base_reg, Operand(offset.offset())); ++ __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); ++ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); ++ __ Add_d(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize); ++ __ bind(&done); ++ } else if (alignment > 2 * kSystemPointerSize) { ++ Label done; ++ __ Add_d(kScratchReg, base_reg, Operand(offset.offset())); ++ __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); ++ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); ++ __ li(kScratchReg2, alignment); ++ __ Sub_d(kScratchReg2, kScratchReg2, Operand(kScratchReg)); ++ __ Add_d(i.OutputRegister(), i.OutputRegister(), kScratchReg2); ++ __ bind(&done); ++ } ++ ++ break; ++ } ++ case kArchWordPoisonOnSpeculation: ++ __ And(i.OutputRegister(), i.InputRegister(0), ++ kSpeculationPoisonRegister); ++ break; ++ case kIeee754Float64Acos: ++ ASSEMBLE_IEEE754_UNOP(acos); ++ break; ++ case kIeee754Float64Acosh: ++ ASSEMBLE_IEEE754_UNOP(acosh); ++ break; ++ case kIeee754Float64Asin: ++ ASSEMBLE_IEEE754_UNOP(asin); ++ break; ++ case kIeee754Float64Asinh: ++ ASSEMBLE_IEEE754_UNOP(asinh); ++ break; ++ case kIeee754Float64Atan: ++ ASSEMBLE_IEEE754_UNOP(atan); ++ break; ++ case kIeee754Float64Atanh: ++ ASSEMBLE_IEEE754_UNOP(atanh); ++ break; ++ case kIeee754Float64Atan2: ++ ASSEMBLE_IEEE754_BINOP(atan2); ++ break; ++ case kIeee754Float64Cos: ++ ASSEMBLE_IEEE754_UNOP(cos); ++ break; ++ case kIeee754Float64Cosh: ++ ASSEMBLE_IEEE754_UNOP(cosh); ++ break; ++ case kIeee754Float64Cbrt: ++ ASSEMBLE_IEEE754_UNOP(cbrt); ++ break; ++ case kIeee754Float64Exp: ++ ASSEMBLE_IEEE754_UNOP(exp); ++ break; ++ case kIeee754Float64Expm1: ++ ASSEMBLE_IEEE754_UNOP(expm1); ++ break; ++ case kIeee754Float64Log: ++ ASSEMBLE_IEEE754_UNOP(log); ++ break; ++ case kIeee754Float64Log1p: ++ ASSEMBLE_IEEE754_UNOP(log1p); ++ break; ++ case kIeee754Float64Log2: ++ ASSEMBLE_IEEE754_UNOP(log2); ++ break; ++ case kIeee754Float64Log10: ++ ASSEMBLE_IEEE754_UNOP(log10); ++ break; ++ case kIeee754Float64Pow: ++ ASSEMBLE_IEEE754_BINOP(pow); ++ break; ++ case kIeee754Float64Sin: ++ ASSEMBLE_IEEE754_UNOP(sin); ++ break; ++ case kIeee754Float64Sinh: ++ ASSEMBLE_IEEE754_UNOP(sinh); ++ break; ++ case kIeee754Float64Tan: ++ ASSEMBLE_IEEE754_UNOP(tan); ++ break; ++ case kIeee754Float64Tanh: ++ ASSEMBLE_IEEE754_UNOP(tanh); ++ break; ++ case kLa64Add: ++ __ Add_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Dadd: ++ __ Add_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64DaddOvf: ++ __ AdddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), ++ kScratchReg); ++ break; ++ case kLa64Sub: ++ __ Sub_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Dsub: ++ __ Sub_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64DsubOvf: ++ __ SubdOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), ++ kScratchReg); ++ break; ++ case kLa64Mul: ++ __ Mul_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64MulOvf: ++ __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), ++ kScratchReg); ++ break; ++ case kLa64MulHigh: ++ __ Mulh_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64MulHighU: ++ __ Mulh_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64DMulHigh: ++ __ Mulh_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Div: ++ __ Div_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ break; ++ case kLa64DivU: ++ __ Div_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ break; ++ case kLa64Mod: ++ __ Mod_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64ModU: ++ __ Mod_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Dmul: ++ __ Mul_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Ddiv: ++ __ Div_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ break; ++ case kLa64DdivU: ++ __ Div_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ break; ++ case kLa64Dmod: ++ __ Mod_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64DmodU: ++ __ Mod_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Dlsa: ++ DCHECK(instr->InputAt(2)->IsImmediate()); ++ __ Alsl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), ++ i.InputInt8(2), t7); ++ break; ++ case kLa64Lsa: ++ DCHECK(instr->InputAt(2)->IsImmediate()); ++ __ Alsl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), ++ i.InputInt8(2), t7); ++ break; ++ case kLa64And: ++ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64And32: ++ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ break; ++ case kLa64Or: ++ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Or32: ++ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ break; ++ case kLa64Nor: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ } else { ++ DCHECK_EQ(0, i.InputOperand(1).immediate()); ++ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); ++ } ++ break; ++ case kLa64Nor32: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ } else { ++ DCHECK_EQ(0, i.InputOperand(1).immediate()); ++ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ } ++ break; ++ case kLa64Xor: ++ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Xor32: ++ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ break; ++ case kLa64Clz: ++ __ Clz_w(i.OutputRegister(), i.InputRegister(0)); ++ break; ++ case kLa64Dclz: ++ __ clz_d(i.OutputRegister(), i.InputRegister(0)); ++ break; ++ case kLa64Ctz: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Ctz_w(dst, src); ++ } break; ++ case kLa64Dctz: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Ctz_d(dst, src); ++ } break; ++ case kLa64Popcnt: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Popcnt_w(dst, src); ++ } break; ++ case kLa64Dpopcnt: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Popcnt_d(dst, src); ++ } break; ++ case kLa64Shl: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ sll_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ slli_w(i.OutputRegister(), i.InputRegister(0), ++ static_cast(imm)); ++ } ++ break; ++ case kLa64Shr: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ slli_w(i.InputRegister(0), i.InputRegister(0), 0x0); ++ __ srl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ slli_w(i.OutputRegister(), i.InputRegister(0), 0x0); ++ __ srli_w(i.OutputRegister(), i.OutputRegister(), ++ static_cast(imm)); ++ } ++ break; ++ case kLa64Sar: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ slli_w(i.InputRegister(0), i.InputRegister(0), 0x0); ++ __ sra_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ slli_w(i.OutputRegister(), i.InputRegister(0), 0x0); ++ __ srai_w(i.OutputRegister(), i.OutputRegister(), ++ static_cast(imm)); ++ } ++ break; ++ case kLa64Ext: ++ __ bstrpick_w(i.OutputRegister(), i.InputRegister(0), ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ break; ++ case kLa64Ins: ++ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { ++ __ bstrins_w(i.OutputRegister(), zero_reg, ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ } else { ++ __ bstrins_w(i.OutputRegister(), i.InputRegister(0), ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ } ++ break; ++ case kLa64Dext: { ++ __ bstrpick_d(i.OutputRegister(), i.InputRegister(0), ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ break; ++ } ++ case kLa64Dins: ++ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { ++ __ bstrins_d(i.OutputRegister(), zero_reg, ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ } else { ++ __ bstrins_d(i.OutputRegister(), i.InputRegister(0), ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ } ++ break; ++ case kLa64Dshl: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ sll_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ slli_d(i.OutputRegister(), i.InputRegister(0), ++ static_cast(imm)); ++ } ++ break; ++ case kLa64Dshr: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ srl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ srli_d(i.OutputRegister(), i.InputRegister(0), ++ static_cast(imm)); ++ } ++ break; ++ case kLa64Dsar: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ sra_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ srai_d(i.OutputRegister(), i.InputRegister(0), imm); ++ } ++ break; ++ case kLa64Ror: ++ __ Rotr_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Dror: ++ __ Rotr_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLa64Tst: ++ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1)); ++ // Pseudo-instruction used for cmp/branch. No opcode emitted here. ++ break; ++ case kLa64Cmp: ++ // Pseudo-instruction used for cmp/branch. No opcode emitted here. ++ break; ++ case kLa64Mov: ++ // TODO(plind): Should we combine mov/li like this, or use separate instr? ++ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType ++ if (HasRegisterInput(instr, 0)) { ++ __ mov(i.OutputRegister(), i.InputRegister(0)); ++ } else { ++ __ li(i.OutputRegister(), i.InputOperand(0)); ++ } ++ break; ++ ++ case kLa64CmpS: { ++ FPURegister left = i.InputOrZeroSingleRegister(0); ++ FPURegister right = i.InputOrZeroSingleRegister(1); ++ bool predicate; ++ FPUCondition cc = ++ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); ++ ++ if ((left == kDoubleRegZero || right == kDoubleRegZero) && ++ !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ CompareF32(left, right, cc); ++ } break; ++ case kLa64AddS: ++ // TODO(plind): add special case: combine mult & add. ++ __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64SubS: ++ __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64MulS: ++ // TODO(plind): add special case: right op is -1.0, see arm port. ++ __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64DivS: ++ __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64ModS: { ++ // TODO(bmeurer): We should really get rid of this special instruction, ++ // and generate a CallAddress instruction instead. ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(0, 2, kScratchReg); ++ __ MovToFloatParameters(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate()) ++ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); ++ // Move the result in the double result register. ++ __ MovFromFloatResult(i.OutputSingleRegister()); ++ break; ++ } ++ case kLa64AbsS: ++ __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ case kLa64NegS: ++ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ case kLa64SqrtS: { ++ __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLa64MaxS: ++ __ fmax_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64MinS: ++ __ fmin_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64CmpD: { ++ FPURegister left = i.InputOrZeroDoubleRegister(0); ++ FPURegister right = i.InputOrZeroDoubleRegister(1); ++ bool predicate; ++ FPUCondition cc = ++ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); ++ if ((left == kDoubleRegZero || right == kDoubleRegZero) && ++ !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ CompareF64(left, right, cc); ++ } break; ++ case kLa64AddD: ++ // TODO(plind): add special case: combine mult & add. ++ __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64SubD: ++ __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64MulD: ++ // TODO(plind): add special case: right op is -1.0, see arm port. ++ __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64DivD: ++ __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64ModD: { ++ // TODO(bmeurer): We should really get rid of this special instruction, ++ // and generate a CallAddress instruction instead. ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(0, 2, kScratchReg); ++ __ MovToFloatParameters(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); ++ // Move the result in the double result register. ++ __ MovFromFloatResult(i.OutputDoubleRegister()); ++ break; ++ } ++ case kLa64AbsD: ++ __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLa64NegD: ++ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLa64SqrtD: { ++ __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLa64MaxD: ++ __ fmax_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64MinD: ++ __ fmin_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLa64Float64RoundDown: { ++ __ Floor_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLa64Float32RoundDown: { ++ __ Floor_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kLa64Float64RoundTruncate: { ++ __ Trunc_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLa64Float32RoundTruncate: { ++ __ Trunc_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kLa64Float64RoundUp: { ++ __ Ceil_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLa64Float32RoundUp: { ++ __ Ceil_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kLa64Float64RoundTiesEven: { ++ __ Round_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLa64Float32RoundTiesEven: { ++ __ Round_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kLa64Float32Max: { ++ FPURegister dst = i.OutputSingleRegister(); ++ FPURegister src1 = i.InputSingleRegister(0); ++ FPURegister src2 = i.InputSingleRegister(1); ++ auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2); ++ __ Float32Max(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kLa64Float64Max: { ++ FPURegister dst = i.OutputDoubleRegister(); ++ FPURegister src1 = i.InputDoubleRegister(0); ++ FPURegister src2 = i.InputDoubleRegister(1); ++ auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2); ++ __ Float64Max(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kLa64Float32Min: { ++ FPURegister dst = i.OutputSingleRegister(); ++ FPURegister src1 = i.InputSingleRegister(0); ++ FPURegister src2 = i.InputSingleRegister(1); ++ auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2); ++ __ Float32Min(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kLa64Float64Min: { ++ FPURegister dst = i.OutputDoubleRegister(); ++ FPURegister src1 = i.InputDoubleRegister(0); ++ FPURegister src2 = i.InputDoubleRegister(1); ++ auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2); ++ __ Float64Min(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kLa64Float64SilenceNaN: ++ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLa64CvtSD: ++ __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLa64CvtDS: ++ __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); ++ break; ++ case kLa64CvtDW: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ movgr2fr_w(scratch, i.InputRegister(0)); ++ __ ffint_d_w(i.OutputDoubleRegister(), scratch); ++ break; ++ } ++ case kLa64CvtSW: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ movgr2fr_w(scratch, i.InputRegister(0)); ++ __ ffint_s_w(i.OutputDoubleRegister(), scratch); ++ break; ++ } ++ case kLa64CvtSUw: { ++ __ Ffint_s_uw(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kLa64CvtSL: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ movgr2fr_d(scratch, i.InputRegister(0)); ++ __ ffint_s_l(i.OutputDoubleRegister(), scratch); ++ break; ++ } ++ case kLa64CvtDL: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ movgr2fr_d(scratch, i.InputRegister(0)); ++ __ ffint_d_l(i.OutputDoubleRegister(), scratch); ++ break; ++ } ++ case kLa64CvtDUw: { ++ __ Ffint_d_uw(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kLa64CvtDUl: { ++ __ Ffint_d_ul(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kLa64CvtSUl: { ++ __ Ffint_s_ul(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kLa64FloorWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrm_w_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLa64CeilWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrp_w_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLa64RoundWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrne_w_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLa64TruncWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ // Other arches use round to zero here, so we follow. ++ __ ftintrz_w_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLa64FloorWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrm_w_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLa64CeilWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrp_w_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLa64RoundWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrne_w_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLa64TruncWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrz_w_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, ++ // because INT32_MIN allows easier out-of-bounds detection. ++ __ addi_w(kScratchReg, i.OutputRegister(), 1); ++ __ slt(kScratchReg2, kScratchReg, i.OutputRegister()); ++ __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2); ++ break; ++ } ++ case kLa64TruncLS: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register tmp_fcsr = kScratchReg; ++ Register result = kScratchReg2; ++ ++ bool load_status = instr->OutputCount() > 1; ++ if (load_status) { ++ // Save FCSR. ++ __ movfcsr2gr(tmp_fcsr); // __ cfc1(tmp_fcsr, FCSR); ++ // Clear FPU flags. ++ __ movgr2fcsr(zero_reg); // __ ctc1(zero_reg, FCSR); ++ } ++ // Other arches use round to zero here, so we follow. ++ __ ftintrz_l_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_d(i.OutputRegister(), scratch); ++ if (load_status) { ++ __ movfcsr2gr(result); // __ cfc1(result, FCSR); ++ // Check for overflow and NaNs. ++ __ And(result, result, ++ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); ++ __ Slt(result, zero_reg, result); ++ __ xori(result, result, 1); ++ __ mov(i.OutputRegister(1), result); ++ // Restore FCSR ++ __ movgr2fcsr(tmp_fcsr); // __ ctc1(tmp_fcsr, FCSR); ++ } ++ break; ++ } ++ case kLa64TruncLD: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register tmp_fcsr = kScratchReg; ++ Register result = kScratchReg2; ++ ++ bool load_status = instr->OutputCount() > 1; ++ if (load_status) { ++ // Save FCSR. ++ __ movfcsr2gr(tmp_fcsr); // __ cfc1(tmp_fcsr, FCSR); ++ // Clear FPU flags. ++ __ movgr2fcsr(zero_reg); // __ ctc1(zero_reg, FCSR); ++ } ++ // Other arches use round to zero here, so we follow. ++ __ ftintrz_l_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_d(i.OutputRegister(0), scratch); ++ if (load_status) { ++ __ movfcsr2gr(result); // __ cfc1(result, FCSR); ++ // Check for overflow and NaNs. ++ __ And(result, result, ++ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); ++ __ Slt(result, zero_reg, result); ++ __ xori(result, result, 1); ++ __ mov(i.OutputRegister(1), result); ++ // Restore FCSR ++ __ movgr2fcsr(tmp_fcsr); // __ ctc1(tmp_fcsr, FCSR); ++ } ++ break; ++ } ++ case kLa64TruncUwD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ Ftintrz_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch); ++ break; ++ } ++ case kLa64TruncUwS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); ++ // Avoid UINT32_MAX as an overflow indicator and use 0 instead, ++ // because 0 allows easier out-of-bounds detection. ++ __ addi_w(kScratchReg, i.OutputRegister(), 1); ++ __ Movz(i.OutputRegister(), zero_reg, kScratchReg); ++ break; ++ } ++ case kLa64TruncUlS: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; ++ __ Ftintrz_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch, ++ result); ++ break; ++ } ++ case kLa64TruncUlD: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; ++ __ Ftintrz_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch, ++ result); ++ break; ++ } ++ case kLa64BitcastDL: ++ __ movfr2gr_d(i.OutputRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLa64BitcastLD: ++ __ movgr2fr_d(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ case kLa64Float64ExtractLowWord32: ++ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLa64Float64ExtractHighWord32: ++ __ movfrh2gr_s(i.OutputRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLa64Float64InsertLowWord32: ++ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1)); ++ break; ++ case kLa64Float64InsertHighWord32: ++ __ movgr2frh_w(i.OutputDoubleRegister(), i.InputRegister(1)); ++ break; ++ // ... more basic instructions ... ++ ++ case kLa64Seb: ++ __ ext_w_b(i.OutputRegister(), i.InputRegister(0)); ++ break; ++ case kLa64Seh: ++ __ ext_w_h(i.OutputRegister(), i.InputRegister(0)); ++ break; ++ case kLa64Lbu: ++ __ Ld_bu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Lb: ++ __ Ld_b(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Sb: ++ __ St_b(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLa64Lhu: ++ __ Ld_hu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Ulhu: ++ __ Ld_hu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Lh: ++ __ Ld_h(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Ulh: ++ __ Ld_h(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Sh: ++ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLa64Ush: ++ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLa64Lw: ++ __ Ld_w(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Ulw: ++ __ Ld_w(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Lwu: ++ __ Ld_wu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Ulwu: ++ __ Ld_wu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Ld: ++ __ Ld_d(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Uld: ++ __ Ld_d(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLa64Sw: ++ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLa64Usw: ++ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLa64Sd: ++ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLa64Usd: ++ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLa64Lwc1: { ++ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand()); ++ break; ++ } ++ case kLa64Ulwc1: { ++ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand()); ++ break; ++ } ++ case kLa64Swc1: { ++ size_t index = 0; ++ MemOperand operand = i.MemoryOperand(&index); ++ FPURegister ft = i.InputOrZeroSingleRegister(index); ++ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ Fst_s(ft, operand); ++ break; ++ } ++ case kLa64Uswc1: { ++ size_t index = 0; ++ MemOperand operand = i.MemoryOperand(&index); ++ FPURegister ft = i.InputOrZeroSingleRegister(index); ++ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ Fst_s(ft, operand); ++ break; ++ } ++ case kLa64Ldc1: ++ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand()); ++ break; ++ case kLa64Uldc1: ++ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand()); ++ break; ++ case kLa64Sdc1: { ++ FPURegister ft = i.InputOrZeroDoubleRegister(2); ++ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ Fst_d(ft, i.MemoryOperand()); ++ break; ++ } ++ case kLa64Usdc1: { ++ FPURegister ft = i.InputOrZeroDoubleRegister(2); ++ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ Fst_d(ft, i.MemoryOperand()); ++ break; ++ } ++ case kLa64Sync: { ++ __ dbar(0); ++ break; ++ } ++ case kLa64Push: ++ if (instr->InputAt(0)->IsFPRegister()) { ++ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); ++ __ Sub_d(sp, sp, Operand(kDoubleSize)); ++ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize); ++ } else { ++ __ Push(i.InputRegister(0)); ++ frame_access_state()->IncreaseSPDelta(1); ++ } ++ break; ++ case kLa64Peek: { ++ // The incoming value is 0-based, but we need a 1-based value. ++ int reverse_slot = i.InputInt32(0) + 1; ++ int offset = ++ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot); ++ if (instr->OutputAt(0)->IsFPRegister()) { ++ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0)); ++ if (op->representation() == MachineRepresentation::kFloat64) { ++ __ Fld_d(i.OutputDoubleRegister(), MemOperand(fp, offset)); ++ } else { ++ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32); ++ __ Fld_s( ++ i.OutputSingleRegister(0), ++ MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset)); ++ } ++ } else { ++ __ Ld_d(i.OutputRegister(0), MemOperand(fp, offset)); ++ } ++ break; ++ } ++ case kLa64StackClaim: { ++ __ Sub_d(sp, sp, Operand(i.InputInt32(0))); ++ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / ++ kSystemPointerSize); ++ break; ++ } ++ case kLa64StoreToStackSlot: { ++ if (instr->InputAt(0)->IsFPRegister()) { ++ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); ++ } else { ++ __ St_d(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); ++ } ++ break; ++ } ++ case kLa64ByteSwap64: { ++ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8); ++ break; ++ } ++ case kLa64ByteSwap32: { ++ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4); ++ break; ++ } ++ case kWord32AtomicLoadInt8: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_b); ++ break; ++ case kWord32AtomicLoadUint8: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu); ++ break; ++ case kWord32AtomicLoadInt16: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_h); ++ break; ++ case kWord32AtomicLoadUint16: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu); ++ break; ++ case kWord32AtomicLoadWord32: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_w); ++ break; ++ case kLa64Word64AtomicLoadUint8: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu); ++ break; ++ case kLa64Word64AtomicLoadUint16: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu); ++ break; ++ case kLa64Word64AtomicLoadUint32: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_wu); ++ break; ++ case kLa64Word64AtomicLoadUint64: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_d); ++ break; ++ case kWord32AtomicStoreWord8: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b); ++ break; ++ case kWord32AtomicStoreWord16: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h); ++ break; ++ case kWord32AtomicStoreWord32: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w); ++ break; ++ case kLa64Word64AtomicStoreWord8: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b); ++ break; ++ case kLa64Word64AtomicStoreWord16: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h); ++ break; ++ case kLa64Word64AtomicStoreWord32: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w); ++ break; ++ case kLa64Word64AtomicStoreWord64: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_d); ++ break; ++ case kWord32AtomicExchangeInt8: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32); ++ break; ++ case kWord32AtomicExchangeUint8: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32); ++ break; ++ case kWord32AtomicExchangeInt16: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32); ++ break; ++ case kWord32AtomicExchangeUint16: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32); ++ break; ++ case kWord32AtomicExchangeWord32: ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amswap_db_w(i.OutputRegister(0), i.InputRegister(2), ++ i.TempRegister(0)); ++ break; ++ case kLa64Word64AtomicExchangeUint8: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64); ++ break; ++ case kLa64Word64AtomicExchangeUint16: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64); ++ break; ++ case kLa64Word64AtomicExchangeUint32: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64); ++ break; ++ case kLa64Word64AtomicExchangeUint64: ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amswap_db_d(i.OutputRegister(0), i.InputRegister(2), ++ i.TempRegister(0)); ++ break; ++ case kWord32AtomicCompareExchangeInt8: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32); ++ break; ++ case kWord32AtomicCompareExchangeUint8: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32); ++ break; ++ case kWord32AtomicCompareExchangeInt16: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32); ++ break; ++ case kWord32AtomicCompareExchangeUint16: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32); ++ break; ++ case kWord32AtomicCompareExchangeWord32: ++ __ slli_w(i.InputRegister(2), i.InputRegister(2), 0); ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_w, Sc_w); ++ break; ++ case kLa64Word64AtomicCompareExchangeUint8: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64); ++ break; ++ case kLa64Word64AtomicCompareExchangeUint16: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64); ++ break; ++ case kLa64Word64AtomicCompareExchangeUint32: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64); ++ break; ++ case kLa64Word64AtomicCompareExchangeUint64: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_d, Sc_d); ++ break; ++ case kWord32AtomicAddWord32: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amadd_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kWord32AtomicSubWord32: ++ ASSEMBLE_ATOMIC_BINOP(Ll_w, Sc_w, Sub_w); ++ break; ++ case kWord32AtomicAndWord32: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amand_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kWord32AtomicOrWord32: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amor_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kWord32AtomicXorWord32: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amxor_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++#define ATOMIC_BINOP_CASE(op, inst) \ ++ case kWord32Atomic##op##Int8: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 8, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Uint8: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 8, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Int16: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 16, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Uint16: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 16, inst, 32); \ ++ break; ++ ATOMIC_BINOP_CASE(Add, Add_w) ++ ATOMIC_BINOP_CASE(Sub, Sub_w) ++ ATOMIC_BINOP_CASE(And, And) ++ ATOMIC_BINOP_CASE(Or, Or) ++ ATOMIC_BINOP_CASE(Xor, Xor) ++#undef ATOMIC_BINOP_CASE ++ ++ case kLa64Word64AtomicAddUint64: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amadd_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kLa64Word64AtomicSubUint64: ++ ASSEMBLE_ATOMIC_BINOP(Ll_d, Sc_d, Sub_d); ++ break; ++ case kLa64Word64AtomicAndUint64: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amand_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kLa64Word64AtomicOrUint64: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kLa64Word64AtomicXorUint64: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amxor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++#define ATOMIC_BINOP_CASE(op, inst) \ ++ case kLa64Word64Atomic##op##Uint8: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 8, inst, 64); \ ++ break; \ ++ case kLa64Word64Atomic##op##Uint16: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 16, inst, 64); \ ++ break; \ ++ case kLa64Word64Atomic##op##Uint32: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, inst, 64); \ ++ break; ++ ATOMIC_BINOP_CASE(Add, Add_d) ++ ATOMIC_BINOP_CASE(Sub, Sub_d) ++ ATOMIC_BINOP_CASE(And, And) ++ ATOMIC_BINOP_CASE(Or, Or) ++ ATOMIC_BINOP_CASE(Xor, Xor) ++#undef ATOMIC_BINOP_CASE ++ case kLa64AssertEqual: ++ __ Assert(eq, static_cast(i.InputOperand(2).immediate()), ++ i.InputRegister(0), Operand(i.InputRegister(1))); ++ break; ++ case kLa64S128Zero: ++ case kLa64I32x4Splat: ++ case kLa64I32x4ExtractLane: ++ case kLa64I32x4AddHoriz: ++ case kLa64I32x4Add: ++ case kLa64I32x4ReplaceLane: ++ case kLa64I32x4Sub: ++ case kLa64F64x2Abs: ++ default: ++ break; ++ } ++ return kSuccess; ++} // NOLINT(readability/fn_size) ++ ++#define UNSUPPORTED_COND(opcode, condition) \ ++ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \ ++ << "\""; \ ++ UNIMPLEMENTED(); ++ ++void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, ++ Instruction* instr, FlagsCondition condition, ++ Label* tlabel, Label* flabel, bool fallthru) { ++#undef __ ++#define __ tasm-> ++ La64OperandConverter i(gen, instr); ++ ++ Condition cc = kNoCondition; ++ // LA64 does not have condition code flags, so compare and branch are ++ // implemented differently than on the other arch's. The compare operations ++ // emit la64 pseudo-instructions, which are handled here by branch ++ // instructions that do the actual comparison. Essential that the input ++ // registers to compare pseudo-op are not modified before this branch op, as ++ // they are tested here. ++ ++ if (instr->arch_opcode() == kLa64Tst) { ++ cc = FlagsConditionToConditionTst(condition); ++ __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg)); ++ } else if (instr->arch_opcode() == kLa64Dadd || ++ instr->arch_opcode() == kLa64Dsub) { ++ cc = FlagsConditionToConditionOvf(condition); ++ __ srai_d(kScratchReg, i.OutputRegister(), 32); ++ __ srai_w(kScratchReg2, i.OutputRegister(), 31); ++ __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg)); ++ } else if (instr->arch_opcode() == kLa64DaddOvf || ++ instr->arch_opcode() == kLa64DsubOvf) { ++ switch (condition) { ++ // Overflow occurs if overflow register is negative ++ case kOverflow: ++ __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg)); ++ break; ++ case kNotOverflow: ++ __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg)); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ break; ++ } ++ } else if (instr->arch_opcode() == kLa64MulOvf) { ++ // Overflow occurs if overflow register is not zero ++ switch (condition) { ++ case kOverflow: ++ __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg)); ++ break; ++ case kNotOverflow: ++ __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg)); ++ break; ++ default: ++ UNSUPPORTED_COND(kLa64MulOvf, condition); ++ break; ++ } ++ } else if (instr->arch_opcode() == kLa64Cmp) { ++ cc = FlagsConditionToConditionCmp(condition); ++ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); ++ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { ++ cc = FlagsConditionToConditionCmp(condition); ++ Register lhs_register = sp; ++ uint32_t offset; ++ if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) { ++ lhs_register = i.TempRegister(0); ++ __ Sub_d(lhs_register, sp, offset); ++ } ++ __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0))); ++ } else if (instr->arch_opcode() == kLa64CmpS || ++ instr->arch_opcode() == kLa64CmpD) { ++ bool predicate; ++ FlagsConditionToConditionCmpFPU(&predicate, condition); ++ if (predicate) { ++ __ BranchTrueF(tlabel); ++ } else { ++ __ BranchFalseF(tlabel); ++ } ++ } else { ++ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", ++ instr->arch_opcode()); ++ UNIMPLEMENTED(); ++ } ++ if (!fallthru) __ Branch(flabel); // no fallthru to flabel. ++#undef __ ++#define __ tasm()-> ++} ++ ++// Assembles branches after an instruction. ++void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { ++ Label* tlabel = branch->true_label; ++ Label* flabel = branch->false_label; ++ ++ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, ++ branch->fallthru); ++} ++ ++void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, ++ Instruction* instr) { ++ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). ++ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { ++ return; ++ } ++ ++ La64OperandConverter i(this, instr); ++ condition = NegateFlagsCondition(condition); ++ ++ switch (instr->arch_opcode()) { ++ case kLa64Cmp: { ++ __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0), ++ i.InputOperand(1), ++ FlagsConditionToConditionCmp(condition)); ++ } ++ return; ++ case kLa64Tst: { ++ switch (condition) { ++ case kEqual: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); ++ break; ++ case kNotEqual: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ return; ++ case kLa64Dadd: ++ case kLa64Dsub: { ++ // Check for overflow creates 1 or 0 for result. ++ __ srli_d(kScratchReg, i.OutputRegister(), 63); ++ __ srli_w(kScratchReg2, i.OutputRegister(), 31); ++ __ xor_(kScratchReg2, kScratchReg, kScratchReg2); ++ switch (condition) { ++ case kOverflow: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg2); ++ break; ++ case kNotOverflow: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ } ++ } ++ return; ++ case kLa64DaddOvf: ++ case kLa64DsubOvf: { ++ // Overflow occurs if overflow register is negative ++ __ Slt(kScratchReg2, kScratchReg, zero_reg); ++ switch (condition) { ++ case kOverflow: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg2); ++ break; ++ case kNotOverflow: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ } ++ } ++ return; ++ case kLa64MulOvf: { ++ // Overflow occurs if overflow register is not zero ++ switch (condition) { ++ case kOverflow: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg); ++ break; ++ case kNotOverflow: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ } ++ } ++ return; ++ case kLa64CmpS: ++ case kLa64CmpD: { ++ bool predicate; ++ FlagsConditionToConditionCmpFPU(&predicate, condition); ++ if (predicate) { ++ __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); ++ } else { ++ __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister); ++ } ++ } ++ return; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++#undef UNSUPPORTED_COND ++ ++void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, ++ BranchInfo* branch) { ++ AssembleArchBranch(instr, branch); ++} ++ ++void CodeGenerator::AssembleArchJump(RpoNumber target) { ++ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target)); ++} ++ ++void CodeGenerator::AssembleArchTrap(Instruction* instr, ++ FlagsCondition condition) { ++ class OutOfLineTrap final : public OutOfLineCode { ++ public: ++ OutOfLineTrap(CodeGenerator* gen, Instruction* instr) ++ : OutOfLineCode(gen), instr_(instr), gen_(gen) {} ++ void Generate() final { ++ La64OperandConverter i(gen_, instr_); ++ TrapId trap_id = ++ static_cast(i.InputInt32(instr_->InputCount() - 1)); ++ GenerateCallToTrap(trap_id); ++ } ++ ++ private: ++ void GenerateCallToTrap(TrapId trap_id) { ++ if (trap_id == TrapId::kInvalid) { ++ // We cannot test calls to the runtime in cctest/test-run-wasm. ++ // Therefore we emit a call to C here instead of a call to the runtime. ++ // We use the context register as the scratch register, because we do ++ // not have a context here. ++ __ PrepareCallCFunction(0, 0, cp); ++ __ CallCFunction( ++ ExternalReference::wasm_call_trap_callback_for_testing(), 0); ++ __ LeaveFrame(StackFrame::WASM_COMPILED); ++ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor(); ++ int pop_count = ++ static_cast(call_descriptor->StackParameterCount()); ++ pop_count += (pop_count & 1); // align ++ __ Drop(pop_count); ++ __ Ret(); ++ } else { ++ gen_->AssembleSourcePosition(instr_); ++ // A direct call to a wasm runtime stub defined in this module. ++ // Just encode the stub index. This will be patched when the code ++ // is added to the native module and copied into wasm code space. ++ __ Call(static_cast
(trap_id), RelocInfo::WASM_STUB_CALL); ++ ReferenceMap* reference_map = ++ new (gen_->zone()) ReferenceMap(gen_->zone()); ++ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); ++ if (FLAG_debug_code) { ++ __ stop(); ++ } ++ } ++ } ++ Instruction* instr_; ++ CodeGenerator* gen_; ++ }; ++ auto ool = new (zone()) OutOfLineTrap(this, instr); ++ Label* tlabel = ool->entry(); ++ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); ++} ++ ++// Assembles boolean materializations after an instruction. ++void CodeGenerator::AssembleArchBoolean(Instruction* instr, ++ FlagsCondition condition) { ++ La64OperandConverter i(this, instr); ++ ++ // Materialize a full 32-bit 1 or 0 value. The result register is always the ++ // last output of the instruction. ++ DCHECK_NE(0u, instr->OutputCount()); ++ Register result = i.OutputRegister(instr->OutputCount() - 1); ++ Condition cc = kNoCondition; ++ // La64 does not have condition code flags, so compare and branch are ++ // implemented differently than on the other arch's. The compare operations ++ // emit la64 pseudo-instructions, which are checked and handled here. ++ ++ if (instr->arch_opcode() == kLa64Tst) { ++ cc = FlagsConditionToConditionTst(condition); ++ if (cc == eq) { ++ __ Sltu(result, kScratchReg, 1); ++ } else { ++ __ Sltu(result, zero_reg, kScratchReg); ++ } ++ return; ++ } else if (instr->arch_opcode() == kLa64Dadd || ++ instr->arch_opcode() == kLa64Dsub) { ++ cc = FlagsConditionToConditionOvf(condition); ++ // Check for overflow creates 1 or 0 for result. ++ __ srli_d(kScratchReg, i.OutputRegister(), 63); ++ __ srli_w(kScratchReg2, i.OutputRegister(), 31); ++ __ xor_(result, kScratchReg, kScratchReg2); ++ if (cc == eq) // Toggle result for not overflow. ++ __ xori(result, result, 1); ++ return; ++ } else if (instr->arch_opcode() == kLa64DaddOvf || ++ instr->arch_opcode() == kLa64DsubOvf) { ++ // Overflow occurs if overflow register is negative ++ __ slt(result, kScratchReg, zero_reg); ++ } else if (instr->arch_opcode() == kLa64MulOvf) { ++ // Overflow occurs if overflow register is not zero ++ __ Sgtu(result, kScratchReg, zero_reg); ++ } else if (instr->arch_opcode() == kLa64Cmp) { ++ cc = FlagsConditionToConditionCmp(condition); ++ switch (cc) { ++ case eq: ++ case ne: { ++ Register left = i.InputRegister(0); ++ Operand right = i.InputOperand(1); ++ if (instr->InputAt(1)->IsImmediate()) { ++ if (is_int12(-right.immediate())) { ++ if (right.immediate() == 0) { ++ if (cc == eq) { ++ __ Sltu(result, left, 1); ++ } else { ++ __ Sltu(result, zero_reg, left); ++ } ++ } else { ++ __ Add_d(result, left, Operand(-right.immediate())); ++ if (cc == eq) { ++ __ Sltu(result, result, 1); ++ } else { ++ __ Sltu(result, zero_reg, result); ++ } ++ } ++ } else { ++ if (is_uint12(right.immediate())) { ++ __ Xor(result, left, right); ++ } else { ++ __ li(kScratchReg, right); ++ __ Xor(result, left, kScratchReg); ++ } ++ if (cc == eq) { ++ __ Sltu(result, result, 1); ++ } else { ++ __ Sltu(result, zero_reg, result); ++ } ++ } ++ } else { ++ __ Xor(result, left, right); ++ if (cc == eq) { ++ __ Sltu(result, result, 1); ++ } else { ++ __ Sltu(result, zero_reg, result); ++ } ++ } ++ } break; ++ case lt: ++ case ge: { ++ Register left = i.InputRegister(0); ++ Operand right = i.InputOperand(1); ++ __ Slt(result, left, right); ++ if (cc == ge) { ++ __ xori(result, result, 1); ++ } ++ } break; ++ case gt: ++ case le: { ++ Register left = i.InputRegister(1); ++ Operand right = i.InputOperand(0); ++ __ Slt(result, left, right); ++ if (cc == le) { ++ __ xori(result, result, 1); ++ } ++ } break; ++ case lo: ++ case hs: { ++ Register left = i.InputRegister(0); ++ Operand right = i.InputOperand(1); ++ __ Sltu(result, left, right); ++ if (cc == hs) { ++ __ xori(result, result, 1); ++ } ++ } break; ++ case hi: ++ case ls: { ++ Register left = i.InputRegister(1); ++ Operand right = i.InputOperand(0); ++ __ Sltu(result, left, right); ++ if (cc == ls) { ++ __ xori(result, result, 1); ++ } ++ } break; ++ default: ++ UNREACHABLE(); ++ } ++ return; ++ } else if (instr->arch_opcode() == kLa64CmpD || ++ instr->arch_opcode() == kLa64CmpS) { ++ FPURegister left = i.InputOrZeroDoubleRegister(0); ++ FPURegister right = i.InputOrZeroDoubleRegister(1); ++ if ((left == kDoubleRegZero || right == kDoubleRegZero) && ++ !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ bool predicate; ++ FlagsConditionToConditionCmpFPU(&predicate, condition); ++ { ++ __ movcf2gr(result, FCC0); ++ if (!predicate) { ++ __ xori(result, result, 1); ++ } ++ } ++ return; ++ } else { ++ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n", ++ instr->arch_opcode()); ++ TRACE_UNIMPL(); ++ UNIMPLEMENTED(); ++ } ++} ++ ++void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { ++ La64OperandConverter i(this, instr); ++ Register input = i.InputRegister(0); ++ std::vector> cases; ++ for (size_t index = 2; index < instr->InputCount(); index += 2) { ++ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))}); ++ } ++ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(), ++ cases.data() + cases.size()); ++} ++ ++void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { ++ La64OperandConverter i(this, instr); ++ Register input = i.InputRegister(0); ++ size_t const case_count = instr->InputCount() - 2; ++ ++ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count)); ++ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) { ++ return GetLabel(i.InputRpo(index + 2)); ++ }); ++} ++ ++void CodeGenerator::FinishFrame(Frame* frame) { ++ auto call_descriptor = linkage()->GetIncomingDescriptor(); ++ ++ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); ++ if (saves_fpu != 0) { ++ int count = base::bits::CountPopulation(saves_fpu); ++ DCHECK_EQ(kNumCalleeSavedFPU, count); ++ frame->AllocateSavedCalleeRegisterSlots(count * ++ (kDoubleSize / kSystemPointerSize)); ++ } ++ ++ const RegList saves = call_descriptor->CalleeSavedRegisters(); ++ if (saves != 0) { ++ int count = base::bits::CountPopulation(saves); ++ DCHECK_EQ(kNumCalleeSaved, count + 1); ++ frame->AllocateSavedCalleeRegisterSlots(count); ++ } ++} ++ ++void CodeGenerator::AssembleConstructFrame() { ++ auto call_descriptor = linkage()->GetIncomingDescriptor(); ++ ++ if (frame_access_state()->has_frame()) { ++ if (call_descriptor->IsCFunctionCall()) { ++ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { ++ __ StubPrologue(StackFrame::C_WASM_ENTRY); ++ // Reserve stack space for saving the c_entry_fp later. ++ __ Sub_d(sp, sp, Operand(kSystemPointerSize)); ++ } else { ++ __ Push(ra, fp); ++ __ mov(fp, sp); ++ } ++ } else if (call_descriptor->IsJSFunctionCall()) { ++ __ Prologue(); ++ if (call_descriptor->PushArgumentCount()) { ++ __ Push(kJavaScriptCallArgCountRegister); ++ } ++ } else { ++ __ StubPrologue(info()->GetOutputStackFrameType()); ++ if (call_descriptor->IsWasmFunctionCall()) { ++ __ Push(kWasmInstanceRegister); ++ } else if (call_descriptor->IsWasmImportWrapper() || ++ call_descriptor->IsWasmCapiFunction()) { ++ // Wasm import wrappers are passed a tuple in the place of the instance. ++ // Unpack the tuple into the instance and the target callable. ++ // This must be done here in the codegen because it cannot be expressed ++ // properly in the graph. ++ __ Ld_d(kJSFunctionRegister, ++ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset)); ++ __ Ld_d(kWasmInstanceRegister, ++ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); ++ __ Push(kWasmInstanceRegister); ++ if (call_descriptor->IsWasmCapiFunction()) { ++ // Reserve space for saving the PC later. ++ __ Sub_d(sp, sp, Operand(kSystemPointerSize)); ++ } ++ } ++ } ++ } ++ ++ int required_slots = ++ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); ++ ++ if (info()->is_osr()) { ++ // TurboFan OSR-compiled functions cannot be entered directly. ++ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); ++ ++ // Unoptimized code jumps directly to this entrypoint while the unoptimized ++ // frame is still on the stack. Optimized code uses OSR values directly from ++ // the unoptimized frame. Thus, all that needs to be done is to allocate the ++ // remaining stack slots. ++ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); ++ osr_pc_offset_ = __ pc_offset(); ++ required_slots -= osr_helper()->UnoptimizedFrameSlots(); ++ ResetSpeculationPoison(); ++ } ++ ++ const RegList saves = call_descriptor->CalleeSavedRegisters(); ++ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); ++ ++ if (required_slots > 0) { ++ DCHECK(frame_access_state()->has_frame()); ++ if (info()->IsWasm() && required_slots > 128) { ++ // For WebAssembly functions with big frames we have to do the stack ++ // overflow check before we construct the frame. Otherwise we may not ++ // have enough space on the stack to call the runtime for the stack ++ // overflow. ++ Label done; ++ ++ // If the frame is bigger than the stack, we throw the stack overflow ++ // exception unconditionally. Thereby we can avoid the integer overflow ++ // check in the condition code. ++ if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { ++ __ Ld_d( ++ kScratchReg, ++ FieldMemOperand(kWasmInstanceRegister, ++ WasmInstanceObject::kRealStackLimitAddressOffset)); ++ __ Ld_d(kScratchReg, MemOperand(kScratchReg, 0)); ++ __ Add_d(kScratchReg, kScratchReg, ++ Operand(required_slots * kSystemPointerSize)); ++ __ Branch(&done, uge, sp, Operand(kScratchReg)); ++ } ++ ++ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); ++ // We come from WebAssembly, there are no references for the GC. ++ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); ++ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); ++ if (FLAG_debug_code) { ++ __ stop(); ++ } ++ ++ __ bind(&done); ++ } ++ } ++ ++ const int returns = frame()->GetReturnSlotCount(); ++ ++ // Skip callee-saved and return slots, which are pushed below. ++ required_slots -= base::bits::CountPopulation(saves); ++ required_slots -= base::bits::CountPopulation(saves_fpu); ++ required_slots -= returns; ++ if (required_slots > 0) { ++ __ Sub_d(sp, sp, Operand(required_slots * kSystemPointerSize)); ++ } ++ ++ if (saves_fpu != 0) { ++ // Save callee-saved FPU registers. ++ __ MultiPushFPU(saves_fpu); ++ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu)); ++ } ++ ++ if (saves != 0) { ++ // Save callee-saved registers. ++ __ MultiPush(saves); ++ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1); ++ } ++ ++ if (returns != 0) { ++ // Create space for returns. ++ __ Sub_d(sp, sp, Operand(returns * kSystemPointerSize)); ++ } ++} ++ ++void CodeGenerator::AssembleReturn(InstructionOperand* pop) { ++ auto call_descriptor = linkage()->GetIncomingDescriptor(); ++ ++ const int returns = frame()->GetReturnSlotCount(); ++ if (returns != 0) { ++ __ Add_d(sp, sp, Operand(returns * kSystemPointerSize)); ++ } ++ ++ // Restore GP registers. ++ const RegList saves = call_descriptor->CalleeSavedRegisters(); ++ if (saves != 0) { ++ __ MultiPop(saves); ++ } ++ ++ // Restore FPU registers. ++ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); ++ if (saves_fpu != 0) { ++ __ MultiPopFPU(saves_fpu); ++ } ++ ++ La64OperandConverter g(this, nullptr); ++ if (call_descriptor->IsCFunctionCall()) { ++ AssembleDeconstructFrame(); ++ } else if (frame_access_state()->has_frame()) { ++ // Canonicalize JSFunction return sites for now unless they have an variable ++ // number of stack slot pops. ++ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) { ++ if (return_label_.is_bound()) { ++ __ Branch(&return_label_); ++ return; ++ } else { ++ __ bind(&return_label_); ++ AssembleDeconstructFrame(); ++ } ++ } else { ++ AssembleDeconstructFrame(); ++ } ++ } ++ int pop_count = static_cast(call_descriptor->StackParameterCount()); ++ if (pop->IsImmediate()) { ++ pop_count += g.ToConstant(pop).ToInt32(); ++ } else { ++ Register pop_reg = g.ToRegister(pop); ++ __ slli_d(pop_reg, pop_reg, kSystemPointerSizeLog2); ++ __ Add_d(sp, sp, pop_reg); ++ } ++ if (pop_count != 0) { ++ __ DropAndRet(pop_count); ++ } else { ++ __ Ret(); ++ } ++} ++ ++void CodeGenerator::FinishCode() {} ++ ++void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} ++ ++void CodeGenerator::AssembleMove(InstructionOperand* source, ++ InstructionOperand* destination) { ++ La64OperandConverter g(this, nullptr); ++ // Dispatch on the source and destination operand kinds. Not all ++ // combinations are possible. ++ if (source->IsRegister()) { ++ DCHECK(destination->IsRegister() || destination->IsStackSlot()); ++ Register src = g.ToRegister(source); ++ if (destination->IsRegister()) { ++ __ mov(g.ToRegister(destination), src); ++ } else { ++ __ St_d(src, g.ToMemOperand(destination)); ++ } ++ } else if (source->IsStackSlot()) { ++ DCHECK(destination->IsRegister() || destination->IsStackSlot()); ++ MemOperand src = g.ToMemOperand(source); ++ if (destination->IsRegister()) { ++ __ Ld_d(g.ToRegister(destination), src); ++ } else { ++ Register temp = kScratchReg; ++ __ Ld_d(temp, src); ++ __ St_d(temp, g.ToMemOperand(destination)); ++ } ++ } else if (source->IsConstant()) { ++ Constant src = g.ToConstant(source); ++ if (destination->IsRegister() || destination->IsStackSlot()) { ++ Register dst = ++ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; ++ switch (src.type()) { ++ case Constant::kInt32: ++ __ li(dst, Operand(src.ToInt32())); ++ break; ++ case Constant::kFloat32: ++ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32())); ++ break; ++ case Constant::kInt64: ++ if (RelocInfo::IsWasmReference(src.rmode())) { ++ __ li(dst, Operand(src.ToInt64(), src.rmode())); ++ } else { ++ __ li(dst, Operand(src.ToInt64())); ++ } ++ break; ++ case Constant::kFloat64: ++ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value())); ++ break; ++ case Constant::kExternalReference: ++ __ li(dst, src.ToExternalReference()); ++ break; ++ case Constant::kDelayedStringConstant: ++ __ li(dst, src.ToDelayedStringConstant()); ++ break; ++ case Constant::kHeapObject: { ++ Handle src_object = src.ToHeapObject(); ++ RootIndex index; ++ if (IsMaterializableFromRoot(src_object, &index)) { ++ __ LoadRoot(dst, index); ++ } else { ++ __ li(dst, src_object); ++ } ++ break; ++ } ++ case Constant::kCompressedHeapObject: ++ UNREACHABLE(); ++ case Constant::kRpoNumber: ++ UNREACHABLE(); // TODO(titzer): loading RPO numbers on LA64. ++ break; ++ } ++ if (destination->IsStackSlot()) __ St_d(dst, g.ToMemOperand(destination)); ++ } else if (src.type() == Constant::kFloat32) { ++ if (destination->IsFPStackSlot()) { ++ MemOperand dst = g.ToMemOperand(destination); ++ if (bit_cast(src.ToFloat32()) == 0) { ++ __ St_d(zero_reg, dst); ++ } else { ++ __ li(kScratchReg, Operand(bit_cast(src.ToFloat32()))); ++ __ St_d(kScratchReg, dst); ++ } ++ } else { ++ DCHECK(destination->IsFPRegister()); ++ FloatRegister dst = g.ToSingleRegister(destination); ++ __ Move(dst, src.ToFloat32()); ++ } ++ } else { ++ DCHECK_EQ(Constant::kFloat64, src.type()); ++ DoubleRegister dst = destination->IsFPRegister() ++ ? g.ToDoubleRegister(destination) ++ : kScratchDoubleReg; ++ __ Move(dst, src.ToFloat64().value()); ++ if (destination->IsFPStackSlot()) { ++ __ Fst_d(dst, g.ToMemOperand(destination)); ++ } ++ } ++ } else if (source->IsFPRegister()) { ++ FPURegister src = g.ToDoubleRegister(source); ++ if (destination->IsFPRegister()) { ++ FPURegister dst = g.ToDoubleRegister(destination); ++ __ Move(dst, src); ++ } else { ++ DCHECK(destination->IsFPStackSlot()); ++ __ Fst_d(src, g.ToMemOperand(destination)); ++ } ++ } else if (source->IsFPStackSlot()) { ++ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); ++ MemOperand src = g.ToMemOperand(source); ++ if (destination->IsFPRegister()) { ++ __ Fld_d(g.ToDoubleRegister(destination), src); ++ } else { ++ DCHECK(destination->IsFPStackSlot()); ++ FPURegister temp = kScratchDoubleReg; ++ __ Fld_d(temp, src); ++ __ Fst_d(temp, g.ToMemOperand(destination)); ++ } ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void CodeGenerator::AssembleSwap(InstructionOperand* source, ++ InstructionOperand* destination) { ++ La64OperandConverter g(this, nullptr); ++ // Dispatch on the source and destination operand kinds. Not all ++ // combinations are possible. ++ if (source->IsRegister()) { ++ // Register-register. ++ Register temp = kScratchReg; ++ Register src = g.ToRegister(source); ++ if (destination->IsRegister()) { ++ Register dst = g.ToRegister(destination); ++ __ Move(temp, src); ++ __ Move(src, dst); ++ __ Move(dst, temp); ++ } else { ++ DCHECK(destination->IsStackSlot()); ++ MemOperand dst = g.ToMemOperand(destination); ++ __ mov(temp, src); ++ __ Ld_d(src, dst); ++ __ St_d(temp, dst); ++ } ++ } else if (source->IsStackSlot()) { ++ DCHECK(destination->IsStackSlot()); ++ Register temp_0 = kScratchReg; ++ Register temp_1 = kScratchReg2; ++ MemOperand src = g.ToMemOperand(source); ++ MemOperand dst = g.ToMemOperand(destination); ++ __ Ld_d(temp_0, src); ++ __ Ld_d(temp_1, dst); ++ __ St_d(temp_0, dst); ++ __ St_d(temp_1, src); ++ } else if (source->IsFPRegister()) { ++ FPURegister temp = kScratchDoubleReg; ++ FPURegister src = g.ToDoubleRegister(source); ++ if (destination->IsFPRegister()) { ++ FPURegister dst = g.ToDoubleRegister(destination); ++ __ Move(temp, src); ++ __ Move(src, dst); ++ __ Move(dst, temp); ++ } else { ++ DCHECK(destination->IsFPStackSlot()); ++ MemOperand dst = g.ToMemOperand(destination); ++ __ Move(temp, src); ++ __ Fld_d(src, dst); ++ __ Fst_d(temp, dst); ++ } ++ } else if (source->IsFPStackSlot()) { ++ DCHECK(destination->IsFPStackSlot()); ++ Register temp_0 = kScratchReg; ++ MemOperand src0 = g.ToMemOperand(source); ++ MemOperand src1(src0.base(), src0.offset() + kIntSize); ++ MemOperand dst0 = g.ToMemOperand(destination); ++ MemOperand dst1(dst0.base(), dst0.offset() + kIntSize); ++ FPURegister temp_1 = kScratchDoubleReg; ++ __ Fld_d(temp_1, dst0); // Save destination in temp_1. ++ __ Ld_w(temp_0, src0); // Then use temp_0 to copy source to destination. ++ __ St_w(temp_0, dst0); ++ __ Ld_w(temp_0, src1); ++ __ St_w(temp_0, dst1); ++ __ Fst_d(temp_1, src0); ++ } else { ++ // No other combinations are possible. ++ UNREACHABLE(); ++ } ++} ++ ++void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { ++ // On 64-bit LA64 we emit the jump tables inline. ++ UNREACHABLE(); ++} ++ ++#undef ASSEMBLE_ATOMIC_LOAD_INTEGER ++#undef ASSEMBLE_ATOMIC_STORE_INTEGER ++#undef ASSEMBLE_ATOMIC_BINOP ++#undef ASSEMBLE_ATOMIC_BINOP_EXT ++#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER ++#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT ++#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER ++#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT ++#undef ASSEMBLE_IEEE754_BINOP ++#undef ASSEMBLE_IEEE754_UNOP ++ ++#undef TRACE_MSG ++#undef TRACE_UNIMPL ++#undef __ ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/la64/instruction-codes-la64.h b/src/3rdparty/chromium/v8/src/compiler/backend/la64/instruction-codes-la64.h +new file mode 100644 +index 0000000000..b8a2d97961 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/la64/instruction-codes-la64.h +@@ -0,0 +1,412 @@ ++// Copyright 2014 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_COMPILER_BACKEND_LA64_INSTRUCTION_CODES_LA64_H_ ++#define V8_COMPILER_BACKEND_LA64_INSTRUCTION_CODES_LA64_H_ ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++// LA64-specific opcodes that specify which assembly sequence to emit. ++// Most opcodes specify a single instruction. ++#define TARGET_ARCH_OPCODE_LIST(V) \ ++ V(La64Add) \ ++ V(La64Dadd) \ ++ V(La64DaddOvf) \ ++ V(La64Sub) \ ++ V(La64Dsub) \ ++ V(La64DsubOvf) \ ++ V(La64Mul) \ ++ V(La64MulOvf) \ ++ V(La64MulHigh) \ ++ V(La64DMulHigh) \ ++ V(La64MulHighU) \ ++ V(La64Dmul) \ ++ V(La64Div) \ ++ V(La64Ddiv) \ ++ V(La64DivU) \ ++ V(La64DdivU) \ ++ V(La64Mod) \ ++ V(La64Dmod) \ ++ V(La64ModU) \ ++ V(La64DmodU) \ ++ V(La64And) \ ++ V(La64And32) \ ++ V(La64Or) \ ++ V(La64Or32) \ ++ V(La64Nor) \ ++ V(La64Nor32) \ ++ V(La64Xor) \ ++ V(La64Xor32) \ ++ V(La64Clz) \ ++ V(La64Lsa) \ ++ V(La64Dlsa) \ ++ V(La64Shl) \ ++ V(La64Shr) \ ++ V(La64Sar) \ ++ V(La64Ext) \ ++ V(La64Ins) \ ++ V(La64Dext) \ ++ V(La64Dins) \ ++ V(La64Dclz) \ ++ V(La64Ctz) \ ++ V(La64Dctz) \ ++ V(La64Popcnt) \ ++ V(La64Dpopcnt) \ ++ V(La64Dshl) \ ++ V(La64Dshr) \ ++ V(La64Dsar) \ ++ V(La64Ror) \ ++ V(La64Dror) \ ++ V(La64Mov) \ ++ V(La64Tst) \ ++ V(La64Cmp) \ ++ V(La64CmpS) \ ++ V(La64AddS) \ ++ V(La64SubS) \ ++ V(La64MulS) \ ++ V(La64DivS) \ ++ V(La64ModS) \ ++ V(La64AbsS) \ ++ V(La64NegS) \ ++ V(La64SqrtS) \ ++ V(La64MaxS) \ ++ V(La64MinS) \ ++ V(La64CmpD) \ ++ V(La64AddD) \ ++ V(La64SubD) \ ++ V(La64MulD) \ ++ V(La64DivD) \ ++ V(La64ModD) \ ++ V(La64AbsD) \ ++ V(La64NegD) \ ++ V(La64SqrtD) \ ++ V(La64MaxD) \ ++ V(La64MinD) \ ++ V(La64Float64RoundDown) \ ++ V(La64Float64RoundTruncate) \ ++ V(La64Float64RoundUp) \ ++ V(La64Float64RoundTiesEven) \ ++ V(La64Float32RoundDown) \ ++ V(La64Float32RoundTruncate) \ ++ V(La64Float32RoundUp) \ ++ V(La64Float32RoundTiesEven) \ ++ V(La64CvtSD) \ ++ V(La64CvtDS) \ ++ V(La64TruncWD) \ ++ V(La64RoundWD) \ ++ V(La64FloorWD) \ ++ V(La64CeilWD) \ ++ V(La64TruncWS) \ ++ V(La64RoundWS) \ ++ V(La64FloorWS) \ ++ V(La64CeilWS) \ ++ V(La64TruncLS) \ ++ V(La64TruncLD) \ ++ V(La64TruncUwD) \ ++ V(La64TruncUwS) \ ++ V(La64TruncUlS) \ ++ V(La64TruncUlD) \ ++ V(La64CvtDW) \ ++ V(La64CvtSL) \ ++ V(La64CvtSW) \ ++ V(La64CvtSUw) \ ++ V(La64CvtSUl) \ ++ V(La64CvtDL) \ ++ V(La64CvtDUw) \ ++ V(La64CvtDUl) \ ++ V(La64Lb) \ ++ V(La64Lbu) \ ++ V(La64Sb) \ ++ V(La64Lh) \ ++ V(La64Ulh) \ ++ V(La64Lhu) \ ++ V(La64Ulhu) \ ++ V(La64Sh) \ ++ V(La64Ush) \ ++ V(La64Ld) \ ++ V(La64Uld) \ ++ V(La64Lw) \ ++ V(La64Ulw) \ ++ V(La64Lwu) \ ++ V(La64Ulwu) \ ++ V(La64Sw) \ ++ V(La64Usw) \ ++ V(La64Sd) \ ++ V(La64Usd) \ ++ V(La64Lwc1) \ ++ V(La64Ulwc1) \ ++ V(La64Swc1) \ ++ V(La64Uswc1) \ ++ V(La64Ldc1) \ ++ V(La64Uldc1) \ ++ V(La64Sdc1) \ ++ V(La64Usdc1) \ ++ V(La64BitcastDL) \ ++ V(La64BitcastLD) \ ++ V(La64Float64ExtractLowWord32) \ ++ V(La64Float64ExtractHighWord32) \ ++ V(La64Float64InsertLowWord32) \ ++ V(La64Float64InsertHighWord32) \ ++ V(La64Float32Max) \ ++ V(La64Float64Max) \ ++ V(La64Float32Min) \ ++ V(La64Float64Min) \ ++ V(La64Float64SilenceNaN) \ ++ V(La64Push) \ ++ V(La64Peek) \ ++ V(La64StoreToStackSlot) \ ++ V(La64ByteSwap64) \ ++ V(La64ByteSwap32) \ ++ V(La64StackClaim) \ ++ V(La64Seb) \ ++ V(La64Seh) \ ++ V(La64Sync) \ ++ V(La64AssertEqual) \ ++ V(La64S128Zero) \ ++ V(La64I32x4Splat) \ ++ V(La64I32x4ExtractLane) \ ++ V(La64I32x4ReplaceLane) \ ++ V(La64I32x4Add) \ ++ V(La64I32x4AddHoriz) \ ++ V(La64I32x4Sub) \ ++ V(La64F64x2Abs) \ ++ V(La64F64x2Neg) \ ++ V(La64F32x4Splat) \ ++ V(La64F32x4ExtractLane) \ ++ V(La64F32x4ReplaceLane) \ ++ V(La64F32x4SConvertI32x4) \ ++ V(La64F32x4UConvertI32x4) \ ++ V(La64I32x4Mul) \ ++ V(La64I32x4MaxS) \ ++ V(La64I32x4MinS) \ ++ V(La64I32x4Eq) \ ++ V(La64I32x4Ne) \ ++ V(La64I32x4Shl) \ ++ V(La64I32x4ShrS) \ ++ V(La64I32x4ShrU) \ ++ V(La64I32x4MaxU) \ ++ V(La64I32x4MinU) \ ++ V(La64F64x2Sqrt) \ ++ V(La64F64x2Add) \ ++ V(La64F64x2Sub) \ ++ V(La64F64x2Mul) \ ++ V(La64F64x2Div) \ ++ V(La64F64x2Min) \ ++ V(La64F64x2Max) \ ++ V(La64F64x2Eq) \ ++ V(La64F64x2Ne) \ ++ V(La64F64x2Lt) \ ++ V(La64F64x2Le) \ ++ V(La64F64x2Splat) \ ++ V(La64F64x2ExtractLane) \ ++ V(La64F64x2ReplaceLane) \ ++ V(La64I64x2Add) \ ++ V(La64I64x2Sub) \ ++ V(La64I64x2Mul) \ ++ V(La64I64x2Neg) \ ++ V(La64I64x2Shl) \ ++ V(La64I64x2ShrS) \ ++ V(La64I64x2ShrU) \ ++ V(La64F32x4Abs) \ ++ V(La64F32x4Neg) \ ++ V(La64F32x4Sqrt) \ ++ V(La64F32x4RecipApprox) \ ++ V(La64F32x4RecipSqrtApprox) \ ++ V(La64F32x4Add) \ ++ V(La64F32x4AddHoriz) \ ++ V(La64F32x4Sub) \ ++ V(La64F32x4Mul) \ ++ V(La64F32x4Div) \ ++ V(La64F32x4Max) \ ++ V(La64F32x4Min) \ ++ V(La64F32x4Eq) \ ++ V(La64F32x4Ne) \ ++ V(La64F32x4Lt) \ ++ V(La64F32x4Le) \ ++ V(La64I32x4SConvertF32x4) \ ++ V(La64I32x4UConvertF32x4) \ ++ V(La64I32x4Neg) \ ++ V(La64I32x4GtS) \ ++ V(La64I32x4GeS) \ ++ V(La64I32x4GtU) \ ++ V(La64I32x4GeU) \ ++ V(La64I32x4Abs) \ ++ V(La64I16x8Splat) \ ++ V(La64I16x8ExtractLaneU) \ ++ V(La64I16x8ExtractLaneS) \ ++ V(La64I16x8ReplaceLane) \ ++ V(La64I16x8Neg) \ ++ V(La64I16x8Shl) \ ++ V(La64I16x8ShrS) \ ++ V(La64I16x8ShrU) \ ++ V(La64I16x8Add) \ ++ V(La64I16x8AddSaturateS) \ ++ V(La64I16x8AddHoriz) \ ++ V(La64I16x8Sub) \ ++ V(La64I16x8SubSaturateS) \ ++ V(La64I16x8Mul) \ ++ V(La64I16x8MaxS) \ ++ V(La64I16x8MinS) \ ++ V(La64I16x8Eq) \ ++ V(La64I16x8Ne) \ ++ V(La64I16x8GtS) \ ++ V(La64I16x8GeS) \ ++ V(La64I16x8AddSaturateU) \ ++ V(La64I16x8SubSaturateU) \ ++ V(La64I16x8MaxU) \ ++ V(La64I16x8MinU) \ ++ V(La64I16x8GtU) \ ++ V(La64I16x8GeU) \ ++ V(La64I16x8RoundingAverageU) \ ++ V(La64I16x8Abs) \ ++ V(La64I8x16Splat) \ ++ V(La64I8x16ExtractLaneU) \ ++ V(La64I8x16ExtractLaneS) \ ++ V(La64I8x16ReplaceLane) \ ++ V(La64I8x16Neg) \ ++ V(La64I8x16Shl) \ ++ V(La64I8x16ShrS) \ ++ V(La64I8x16Add) \ ++ V(La64I8x16AddSaturateS) \ ++ V(La64I8x16Sub) \ ++ V(La64I8x16SubSaturateS) \ ++ V(La64I8x16Mul) \ ++ V(La64I8x16MaxS) \ ++ V(La64I8x16MinS) \ ++ V(La64I8x16Eq) \ ++ V(La64I8x16Ne) \ ++ V(La64I8x16GtS) \ ++ V(La64I8x16GeS) \ ++ V(La64I8x16ShrU) \ ++ V(La64I8x16AddSaturateU) \ ++ V(La64I8x16SubSaturateU) \ ++ V(La64I8x16MaxU) \ ++ V(La64I8x16MinU) \ ++ V(La64I8x16GtU) \ ++ V(La64I8x16GeU) \ ++ V(La64I8x16RoundingAverageU) \ ++ V(La64I8x16Abs) \ ++ V(La64S128And) \ ++ V(La64S128Or) \ ++ V(La64S128Xor) \ ++ V(La64S128Not) \ ++ V(La64S128Select) \ ++ V(La64S128AndNot) \ ++ V(La64S1x4AnyTrue) \ ++ V(La64S1x4AllTrue) \ ++ V(La64S1x8AnyTrue) \ ++ V(La64S1x8AllTrue) \ ++ V(La64S1x16AnyTrue) \ ++ V(La64S1x16AllTrue) \ ++ V(La64S32x4InterleaveRight) \ ++ V(La64S32x4InterleaveLeft) \ ++ V(La64S32x4PackEven) \ ++ V(La64S32x4PackOdd) \ ++ V(La64S32x4InterleaveEven) \ ++ V(La64S32x4InterleaveOdd) \ ++ V(La64S32x4Shuffle) \ ++ V(La64S16x8InterleaveRight) \ ++ V(La64S16x8InterleaveLeft) \ ++ V(La64S16x8PackEven) \ ++ V(La64S16x8PackOdd) \ ++ V(La64S16x8InterleaveEven) \ ++ V(La64S16x8InterleaveOdd) \ ++ V(La64S16x4Reverse) \ ++ V(La64S16x2Reverse) \ ++ V(La64S8x16InterleaveRight) \ ++ V(La64S8x16InterleaveLeft) \ ++ V(La64S8x16PackEven) \ ++ V(La64S8x16PackOdd) \ ++ V(La64S8x16InterleaveEven) \ ++ V(La64S8x16InterleaveOdd) \ ++ V(La64S8x16Shuffle) \ ++ V(La64S8x16Swizzle) \ ++ V(La64S8x16Concat) \ ++ V(La64S8x8Reverse) \ ++ V(La64S8x4Reverse) \ ++ V(La64S8x2Reverse) \ ++ V(La64S8x16LoadSplat) \ ++ V(La64S16x8LoadSplat) \ ++ V(La64S32x4LoadSplat) \ ++ V(La64S64x2LoadSplat) \ ++ V(La64I16x8Load8x8S) \ ++ V(La64I16x8Load8x8U) \ ++ V(La64I32x4Load16x4S) \ ++ V(La64I32x4Load16x4U) \ ++ V(La64I64x2Load32x2S) \ ++ V(La64I64x2Load32x2U) \ ++ V(La64I32x4SConvertI16x8Low) \ ++ V(La64I32x4SConvertI16x8High) \ ++ V(La64I32x4UConvertI16x8Low) \ ++ V(La64I32x4UConvertI16x8High) \ ++ V(La64I16x8SConvertI8x16Low) \ ++ V(La64I16x8SConvertI8x16High) \ ++ V(La64I16x8SConvertI32x4) \ ++ V(La64I16x8UConvertI32x4) \ ++ V(La64I16x8UConvertI8x16Low) \ ++ V(La64I16x8UConvertI8x16High) \ ++ V(La64I8x16SConvertI16x8) \ ++ V(La64I8x16UConvertI16x8) \ ++ V(La64Word64AtomicLoadUint8) \ ++ V(La64Word64AtomicLoadUint16) \ ++ V(La64Word64AtomicLoadUint32) \ ++ V(La64Word64AtomicLoadUint64) \ ++ V(La64Word64AtomicStoreWord8) \ ++ V(La64Word64AtomicStoreWord16) \ ++ V(La64Word64AtomicStoreWord32) \ ++ V(La64Word64AtomicStoreWord64) \ ++ V(La64Word64AtomicAddUint8) \ ++ V(La64Word64AtomicAddUint16) \ ++ V(La64Word64AtomicAddUint32) \ ++ V(La64Word64AtomicAddUint64) \ ++ V(La64Word64AtomicSubUint8) \ ++ V(La64Word64AtomicSubUint16) \ ++ V(La64Word64AtomicSubUint32) \ ++ V(La64Word64AtomicSubUint64) \ ++ V(La64Word64AtomicAndUint8) \ ++ V(La64Word64AtomicAndUint16) \ ++ V(La64Word64AtomicAndUint32) \ ++ V(La64Word64AtomicAndUint64) \ ++ V(La64Word64AtomicOrUint8) \ ++ V(La64Word64AtomicOrUint16) \ ++ V(La64Word64AtomicOrUint32) \ ++ V(La64Word64AtomicOrUint64) \ ++ V(La64Word64AtomicXorUint8) \ ++ V(La64Word64AtomicXorUint16) \ ++ V(La64Word64AtomicXorUint32) \ ++ V(La64Word64AtomicXorUint64) \ ++ V(La64Word64AtomicExchangeUint8) \ ++ V(La64Word64AtomicExchangeUint16) \ ++ V(La64Word64AtomicExchangeUint32) \ ++ V(La64Word64AtomicExchangeUint64) \ ++ V(La64Word64AtomicCompareExchangeUint8) \ ++ V(La64Word64AtomicCompareExchangeUint16) \ ++ V(La64Word64AtomicCompareExchangeUint32) \ ++ V(La64Word64AtomicCompareExchangeUint64) ++ ++// Addressing modes represent the "shape" of inputs to an instruction. ++// Many instructions support multiple addressing modes. Addressing modes ++// are encoded into the InstructionCode of the instruction and tell the ++// code generator after register allocation which assembler method to call. ++// ++// We use the following local notation for addressing modes: ++// ++// R = register ++// O = register or stack slot ++// D = double register ++// I = immediate (handle, external, int32) ++// MRI = [register + immediate] ++// MRR = [register + register] ++// TODO(plind): Add the new r6 address modes. ++#define TARGET_ADDRESSING_MODE_LIST(V) \ ++ V(MRI) /* [%r0 + K] */ \ ++ V(MRR) /* [%r0 + %r1] */ ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_COMPILER_BACKEND_LA64_INSTRUCTION_CODES_LA64_H_ +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/la64/instruction-scheduler-la64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/la64/instruction-scheduler-la64.cc +new file mode 100644 +index 0000000000..a1a5a771d3 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/la64/instruction-scheduler-la64.cc +@@ -0,0 +1,1534 @@ ++// Copyright 2015 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/compiler/backend/instruction-scheduler.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++bool InstructionScheduler::SchedulerSupported() { return true; } ++ ++int InstructionScheduler::GetTargetInstructionFlags( ++ const Instruction* instr) const { ++ switch (instr->arch_opcode()) { ++ case kLa64AbsD: ++ case kLa64AbsS: ++ case kLa64Add: ++ case kLa64AddD: ++ case kLa64AddS: ++ case kLa64And: ++ case kLa64And32: ++ case kLa64AssertEqual: ++ case kLa64BitcastDL: ++ case kLa64BitcastLD: ++ case kLa64ByteSwap32: ++ case kLa64ByteSwap64: ++ case kLa64CeilWD: ++ case kLa64CeilWS: ++ case kLa64Clz: ++ case kLa64Cmp: ++ case kLa64CmpD: ++ case kLa64CmpS: ++ case kLa64Ctz: ++ case kLa64CvtDL: ++ case kLa64CvtDS: ++ case kLa64CvtDUl: ++ case kLa64CvtDUw: ++ case kLa64CvtDW: ++ case kLa64CvtSD: ++ case kLa64CvtSL: ++ case kLa64CvtSUl: ++ case kLa64CvtSUw: ++ case kLa64CvtSW: ++ case kLa64DMulHigh: ++ case kLa64MulHighU: ++ case kLa64Dadd: ++ case kLa64DaddOvf: ++ case kLa64Dclz: ++ case kLa64Dctz: ++ case kLa64Ddiv: ++ case kLa64DdivU: ++ case kLa64Dext: ++ case kLa64Dins: ++ case kLa64Div: ++ case kLa64DivD: ++ case kLa64DivS: ++ case kLa64DivU: ++ case kLa64Dlsa: ++ case kLa64Dmod: ++ case kLa64DmodU: ++ case kLa64Dmul: ++ case kLa64Dpopcnt: ++ case kLa64Dror: ++ case kLa64Dsar: ++ case kLa64Dshl: ++ case kLa64Dshr: ++ case kLa64Dsub: ++ case kLa64DsubOvf: ++ case kLa64Ext: ++ case kLa64F64x2Abs: ++ case kLa64F64x2Neg: ++ case kLa64F64x2Sqrt: ++ case kLa64F64x2Add: ++ case kLa64F64x2Sub: ++ case kLa64F64x2Mul: ++ case kLa64F64x2Div: ++ case kLa64F64x2Min: ++ case kLa64F64x2Max: ++ case kLa64F64x2Eq: ++ case kLa64F64x2Ne: ++ case kLa64F64x2Lt: ++ case kLa64F64x2Le: ++ case kLa64I64x2Add: ++ case kLa64I64x2Sub: ++ case kLa64I64x2Mul: ++ case kLa64I64x2Neg: ++ case kLa64I64x2Shl: ++ case kLa64I64x2ShrS: ++ case kLa64I64x2ShrU: ++ case kLa64F32x4Abs: ++ case kLa64F32x4Add: ++ case kLa64F32x4AddHoriz: ++ case kLa64F32x4Eq: ++ case kLa64F32x4ExtractLane: ++ case kLa64F32x4Lt: ++ case kLa64F32x4Le: ++ case kLa64F32x4Max: ++ case kLa64F32x4Min: ++ case kLa64F32x4Mul: ++ case kLa64F32x4Div: ++ case kLa64F32x4Ne: ++ case kLa64F32x4Neg: ++ case kLa64F32x4Sqrt: ++ case kLa64F32x4RecipApprox: ++ case kLa64F32x4RecipSqrtApprox: ++ case kLa64F32x4ReplaceLane: ++ case kLa64F32x4SConvertI32x4: ++ case kLa64F32x4Splat: ++ case kLa64F32x4Sub: ++ case kLa64F32x4UConvertI32x4: ++ case kLa64F64x2Splat: ++ case kLa64F64x2ExtractLane: ++ case kLa64F64x2ReplaceLane: ++ case kLa64Float32Max: ++ case kLa64Float32Min: ++ case kLa64Float32RoundDown: ++ case kLa64Float32RoundTiesEven: ++ case kLa64Float32RoundTruncate: ++ case kLa64Float32RoundUp: ++ case kLa64Float64ExtractLowWord32: ++ case kLa64Float64ExtractHighWord32: ++ case kLa64Float64InsertLowWord32: ++ case kLa64Float64InsertHighWord32: ++ case kLa64Float64Max: ++ case kLa64Float64Min: ++ case kLa64Float64RoundDown: ++ case kLa64Float64RoundTiesEven: ++ case kLa64Float64RoundTruncate: ++ case kLa64Float64RoundUp: ++ case kLa64Float64SilenceNaN: ++ case kLa64FloorWD: ++ case kLa64FloorWS: ++ case kLa64I16x8Add: ++ case kLa64I16x8AddHoriz: ++ case kLa64I16x8AddSaturateS: ++ case kLa64I16x8AddSaturateU: ++ case kLa64I16x8Eq: ++ case kLa64I16x8ExtractLaneU: ++ case kLa64I16x8ExtractLaneS: ++ case kLa64I16x8GeS: ++ case kLa64I16x8GeU: ++ case kLa64I16x8GtS: ++ case kLa64I16x8GtU: ++ case kLa64I16x8MaxS: ++ case kLa64I16x8MaxU: ++ case kLa64I16x8MinS: ++ case kLa64I16x8MinU: ++ case kLa64I16x8Mul: ++ case kLa64I16x8Ne: ++ case kLa64I16x8Neg: ++ case kLa64I16x8ReplaceLane: ++ case kLa64I8x16SConvertI16x8: ++ case kLa64I16x8SConvertI32x4: ++ case kLa64I16x8SConvertI8x16High: ++ case kLa64I16x8SConvertI8x16Low: ++ case kLa64I16x8Shl: ++ case kLa64I16x8ShrS: ++ case kLa64I16x8ShrU: ++ case kLa64I16x8Splat: ++ case kLa64I16x8Sub: ++ case kLa64I16x8SubSaturateS: ++ case kLa64I16x8SubSaturateU: ++ case kLa64I8x16UConvertI16x8: ++ case kLa64I16x8UConvertI32x4: ++ case kLa64I16x8UConvertI8x16High: ++ case kLa64I16x8UConvertI8x16Low: ++ case kLa64I16x8RoundingAverageU: ++ case kLa64I16x8Abs: ++ case kLa64I32x4Add: ++ case kLa64I32x4AddHoriz: ++ case kLa64I32x4Eq: ++ case kLa64I32x4ExtractLane: ++ case kLa64I32x4GeS: ++ case kLa64I32x4GeU: ++ case kLa64I32x4GtS: ++ case kLa64I32x4GtU: ++ case kLa64I32x4MaxS: ++ case kLa64I32x4MaxU: ++ case kLa64I32x4MinS: ++ case kLa64I32x4MinU: ++ case kLa64I32x4Mul: ++ case kLa64I32x4Ne: ++ case kLa64I32x4Neg: ++ case kLa64I32x4ReplaceLane: ++ case kLa64I32x4SConvertF32x4: ++ case kLa64I32x4SConvertI16x8High: ++ case kLa64I32x4SConvertI16x8Low: ++ case kLa64I32x4Shl: ++ case kLa64I32x4ShrS: ++ case kLa64I32x4ShrU: ++ case kLa64I32x4Splat: ++ case kLa64I32x4Sub: ++ case kLa64I32x4UConvertF32x4: ++ case kLa64I32x4UConvertI16x8High: ++ case kLa64I32x4UConvertI16x8Low: ++ case kLa64I32x4Abs: ++ case kLa64I8x16Add: ++ case kLa64I8x16AddSaturateS: ++ case kLa64I8x16AddSaturateU: ++ case kLa64I8x16Eq: ++ case kLa64I8x16ExtractLaneU: ++ case kLa64I8x16ExtractLaneS: ++ case kLa64I8x16GeS: ++ case kLa64I8x16GeU: ++ case kLa64I8x16GtS: ++ case kLa64I8x16GtU: ++ case kLa64I8x16MaxS: ++ case kLa64I8x16MaxU: ++ case kLa64I8x16MinS: ++ case kLa64I8x16MinU: ++ case kLa64I8x16Mul: ++ case kLa64I8x16Ne: ++ case kLa64I8x16Neg: ++ case kLa64I8x16ReplaceLane: ++ case kLa64I8x16Shl: ++ case kLa64I8x16ShrS: ++ case kLa64I8x16ShrU: ++ case kLa64I8x16Splat: ++ case kLa64I8x16Sub: ++ case kLa64I8x16SubSaturateS: ++ case kLa64I8x16SubSaturateU: ++ case kLa64I8x16RoundingAverageU: ++ case kLa64I8x16Abs: ++ case kLa64Ins: ++ case kLa64Lsa: ++ case kLa64MaxD: ++ case kLa64MaxS: ++ case kLa64MinD: ++ case kLa64MinS: ++ case kLa64Mod: ++ case kLa64ModU: ++ case kLa64Mov: ++ case kLa64Mul: ++ case kLa64MulD: ++ case kLa64MulHigh: ++ case kLa64MulOvf: ++ case kLa64MulS: ++ case kLa64NegD: ++ case kLa64NegS: ++ case kLa64Nor: ++ case kLa64Nor32: ++ case kLa64Or: ++ case kLa64Or32: ++ case kLa64Popcnt: ++ case kLa64Ror: ++ case kLa64RoundWD: ++ case kLa64RoundWS: ++ case kLa64S128And: ++ case kLa64S128Or: ++ case kLa64S128Not: ++ case kLa64S128Select: ++ case kLa64S128AndNot: ++ case kLa64S128Xor: ++ case kLa64S128Zero: ++ case kLa64S16x8InterleaveEven: ++ case kLa64S16x8InterleaveOdd: ++ case kLa64S16x8InterleaveLeft: ++ case kLa64S16x8InterleaveRight: ++ case kLa64S16x8PackEven: ++ case kLa64S16x8PackOdd: ++ case kLa64S16x2Reverse: ++ case kLa64S16x4Reverse: ++ case kLa64S1x16AllTrue: ++ case kLa64S1x16AnyTrue: ++ case kLa64S1x4AllTrue: ++ case kLa64S1x4AnyTrue: ++ case kLa64S1x8AllTrue: ++ case kLa64S1x8AnyTrue: ++ case kLa64S32x4InterleaveEven: ++ case kLa64S32x4InterleaveOdd: ++ case kLa64S32x4InterleaveLeft: ++ case kLa64S32x4InterleaveRight: ++ case kLa64S32x4PackEven: ++ case kLa64S32x4PackOdd: ++ case kLa64S32x4Shuffle: ++ case kLa64S8x16Concat: ++ case kLa64S8x16InterleaveEven: ++ case kLa64S8x16InterleaveOdd: ++ case kLa64S8x16InterleaveLeft: ++ case kLa64S8x16InterleaveRight: ++ case kLa64S8x16PackEven: ++ case kLa64S8x16PackOdd: ++ case kLa64S8x2Reverse: ++ case kLa64S8x4Reverse: ++ case kLa64S8x8Reverse: ++ case kLa64S8x16Shuffle: ++ case kLa64S8x16Swizzle: ++ case kLa64Sar: ++ case kLa64Seb: ++ case kLa64Seh: ++ case kLa64Shl: ++ case kLa64Shr: ++ case kLa64SqrtD: ++ case kLa64SqrtS: ++ case kLa64Sub: ++ case kLa64SubD: ++ case kLa64SubS: ++ case kLa64TruncLD: ++ case kLa64TruncLS: ++ case kLa64TruncUlD: ++ case kLa64TruncUlS: ++ case kLa64TruncUwD: ++ case kLa64TruncUwS: ++ case kLa64TruncWD: ++ case kLa64TruncWS: ++ case kLa64Tst: ++ case kLa64Xor: ++ case kLa64Xor32: ++ return kNoOpcodeFlags; ++ ++ case kLa64Lb: ++ case kLa64Lbu: ++ case kLa64Ld: ++ case kLa64Ldc1: ++ case kLa64Lh: ++ case kLa64Lhu: ++ case kLa64Lw: ++ case kLa64Lwc1: ++ case kLa64Lwu: ++ case kLa64Peek: ++ case kLa64Uld: ++ case kLa64Uldc1: ++ case kLa64Ulh: ++ case kLa64Ulhu: ++ case kLa64Ulw: ++ case kLa64Ulwu: ++ case kLa64Ulwc1: ++ case kLa64S8x16LoadSplat: ++ case kLa64S16x8LoadSplat: ++ case kLa64S32x4LoadSplat: ++ case kLa64S64x2LoadSplat: ++ case kLa64I16x8Load8x8S: ++ case kLa64I16x8Load8x8U: ++ case kLa64I32x4Load16x4S: ++ case kLa64I32x4Load16x4U: ++ case kLa64I64x2Load32x2S: ++ case kLa64I64x2Load32x2U: ++ case kLa64Word64AtomicLoadUint8: ++ case kLa64Word64AtomicLoadUint16: ++ case kLa64Word64AtomicLoadUint32: ++ case kLa64Word64AtomicLoadUint64: ++ ++ return kIsLoadOperation; ++ ++ case kLa64ModD: ++ case kLa64ModS: ++ case kLa64Push: ++ case kLa64Sb: ++ case kLa64Sd: ++ case kLa64Sdc1: ++ case kLa64Sh: ++ case kLa64StackClaim: ++ case kLa64StoreToStackSlot: ++ case kLa64Sw: ++ case kLa64Swc1: ++ case kLa64Usd: ++ case kLa64Usdc1: ++ case kLa64Ush: ++ case kLa64Usw: ++ case kLa64Uswc1: ++ case kLa64Sync: ++ case kLa64Word64AtomicStoreWord8: ++ case kLa64Word64AtomicStoreWord16: ++ case kLa64Word64AtomicStoreWord32: ++ case kLa64Word64AtomicStoreWord64: ++ case kLa64Word64AtomicAddUint8: ++ case kLa64Word64AtomicAddUint16: ++ case kLa64Word64AtomicAddUint32: ++ case kLa64Word64AtomicAddUint64: ++ case kLa64Word64AtomicSubUint8: ++ case kLa64Word64AtomicSubUint16: ++ case kLa64Word64AtomicSubUint32: ++ case kLa64Word64AtomicSubUint64: ++ case kLa64Word64AtomicAndUint8: ++ case kLa64Word64AtomicAndUint16: ++ case kLa64Word64AtomicAndUint32: ++ case kLa64Word64AtomicAndUint64: ++ case kLa64Word64AtomicOrUint8: ++ case kLa64Word64AtomicOrUint16: ++ case kLa64Word64AtomicOrUint32: ++ case kLa64Word64AtomicOrUint64: ++ case kLa64Word64AtomicXorUint8: ++ case kLa64Word64AtomicXorUint16: ++ case kLa64Word64AtomicXorUint32: ++ case kLa64Word64AtomicXorUint64: ++ case kLa64Word64AtomicExchangeUint8: ++ case kLa64Word64AtomicExchangeUint16: ++ case kLa64Word64AtomicExchangeUint32: ++ case kLa64Word64AtomicExchangeUint64: ++ case kLa64Word64AtomicCompareExchangeUint8: ++ case kLa64Word64AtomicCompareExchangeUint16: ++ case kLa64Word64AtomicCompareExchangeUint32: ++ case kLa64Word64AtomicCompareExchangeUint64: ++ return kHasSideEffect; ++ ++#define CASE(Name) case k##Name: ++ COMMON_ARCH_OPCODE_LIST(CASE) ++#undef CASE ++ // Already covered in architecture independent code. ++ UNREACHABLE(); ++ } ++ ++ UNREACHABLE(); ++} ++ ++enum Latency { ++ BRANCH = 4, // Estimated max. ++ RINT_S = 4, // Estimated. ++ RINT_D = 4, // Estimated. ++ ++ MULT = 4, ++ MULTU = 4, ++ DMULT = 4, ++ DMULTU = 4, ++ ++ MUL = 7, ++ DMUL = 7, ++ MUH = 7, ++ MUHU = 7, ++ DMUH = 7, ++ DMUHU = 7, ++ ++ DIV = 50, // Min:11 Max:50 ++ DDIV = 50, ++ DIVU = 50, ++ DDIVU = 50, ++ ++ ABS_S = 4, ++ ABS_D = 4, ++ NEG_S = 4, ++ NEG_D = 4, ++ ADD_S = 4, ++ ADD_D = 4, ++ SUB_S = 4, ++ SUB_D = 4, ++ MAX_S = 4, // Estimated. ++ MIN_S = 4, ++ MAX_D = 4, // Estimated. ++ MIN_D = 4, ++ C_cond_S = 4, ++ C_cond_D = 4, ++ MUL_S = 4, ++ ++ MADD_S = 4, ++ MSUB_S = 4, ++ NMADD_S = 4, ++ NMSUB_S = 4, ++ ++ CABS_cond_S = 4, ++ CABS_cond_D = 4, ++ ++ CVT_D_S = 4, ++ CVT_PS_PW = 4, ++ ++ CVT_S_W = 4, ++ CVT_S_L = 4, ++ CVT_D_W = 4, ++ CVT_D_L = 4, ++ ++ CVT_S_D = 4, ++ ++ CVT_W_S = 4, ++ CVT_W_D = 4, ++ CVT_L_S = 4, ++ CVT_L_D = 4, ++ ++ CEIL_W_S = 4, ++ CEIL_W_D = 4, ++ CEIL_L_S = 4, ++ CEIL_L_D = 4, ++ ++ FLOOR_W_S = 4, ++ FLOOR_W_D = 4, ++ FLOOR_L_S = 4, ++ FLOOR_L_D = 4, ++ ++ ROUND_W_S = 4, ++ ROUND_W_D = 4, ++ ROUND_L_S = 4, ++ ROUND_L_D = 4, ++ ++ TRUNC_W_S = 4, ++ TRUNC_W_D = 4, ++ TRUNC_L_S = 4, ++ TRUNC_L_D = 4, ++ ++ MOV_S = 4, ++ MOV_D = 4, ++ ++ MOVF_S = 4, ++ MOVF_D = 4, ++ ++ MOVN_S = 4, ++ MOVN_D = 4, ++ ++ MOVT_S = 4, ++ MOVT_D = 4, ++ ++ MOVZ_S = 4, ++ MOVZ_D = 4, ++ ++ MUL_D = 5, ++ MADD_D = 5, ++ MSUB_D = 5, ++ NMADD_D = 5, ++ NMSUB_D = 5, ++ ++ RECIP_S = 13, ++ RECIP_D = 26, ++ ++ RSQRT_S = 17, ++ RSQRT_D = 36, ++ ++ DIV_S = 17, ++ SQRT_S = 17, ++ ++ DIV_D = 32, ++ SQRT_D = 32, ++ ++ MTC1 = 4, ++ MTHC1 = 4, ++ DMTC1 = 4, ++ LWC1 = 4, ++ LDC1 = 4, ++ ++ MFC1 = 1, ++ MFHC1 = 1, ++ DMFC1 = 1, ++ MFHI = 1, ++ MFLO = 1, ++ SWC1 = 1, ++ SDC1 = 1, ++}; ++ ++int DadduLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 1; ++ } else { ++ return 2; // Estimated max. ++ } ++} ++ ++int DsubuLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int AndLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int OrLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int NorLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 1; ++ } else { ++ return 2; // Estimated max. ++ } ++} ++ ++int XorLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int MulLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return Latency::MUL; ++ } else { ++ return Latency::MUL + 1; ++ } ++} ++ ++int DmulLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::DMUL; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int MulhLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::MUH; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int MulhuLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::MUH; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DMulhLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::DMUH; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DivLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return Latency::DIV; ++ } else { ++ return Latency::DIV + 1; ++ } ++} ++ ++int DivuLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return Latency::DIVU; ++ } else { ++ return Latency::DIVU + 1; ++ } ++} ++ ++int DdivLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::DDIV; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DdivuLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::DDIVU; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int ModLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = 1; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int ModuLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = 1; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DmodLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = 1; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DmoduLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = 1; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int MovzLatency() { return Latency::BRANCH + 1; } ++ ++int MovnLatency() { return Latency::BRANCH + 1; } ++ ++int DlsaLatency() { ++ // Estimated max. ++ return DadduLatency() + 1; ++} ++ ++int CallLatency() { ++ // Estimated. ++ return DadduLatency(false) + Latency::BRANCH + 5; ++} ++ ++int JumpLatency() { ++ // Estimated max. ++ return 1 + DadduLatency() + Latency::BRANCH + 2; ++} ++ ++int SmiUntagLatency() { return 1; } ++ ++int PrepareForTailCallLatency() { ++ // Estimated max. ++ return 2 * (DlsaLatency() + DadduLatency(false)) + 2 + Latency::BRANCH + ++ Latency::BRANCH + 2 * DsubuLatency(false) + 2 + Latency::BRANCH + 1; ++} ++ ++int AssemblePopArgumentsAdoptFrameLatency() { ++ return 1 + Latency::BRANCH + 1 + SmiUntagLatency() + ++ PrepareForTailCallLatency(); ++} ++ ++int AssertLatency() { return 1; } ++ ++int PrepareCallCFunctionLatency() { ++ int frame_alignment = TurboAssembler::ActivationFrameAlignment(); ++ if (frame_alignment > kSystemPointerSize) { ++ return 1 + DsubuLatency(false) + AndLatency(false) + 1; ++ } else { ++ return DsubuLatency(false); ++ } ++} ++ ++int AdjustBaseAndOffsetLatency() { ++ return 3; // Estimated max. ++} ++ ++int AlignedMemoryLatency() { return AdjustBaseAndOffsetLatency() + 1; } ++ ++int UlhuLatency() { return AlignedMemoryLatency(); } ++ ++int UlwLatency() { return AlignedMemoryLatency(); } ++ ++int UlwuLatency() { return AlignedMemoryLatency(); } ++ ++int UldLatency() { return AlignedMemoryLatency(); } ++ ++int Ulwc1Latency() { return AlignedMemoryLatency(); } ++ ++int Uldc1Latency() { return AlignedMemoryLatency(); } ++ ++int UshLatency() { return AlignedMemoryLatency(); } ++ ++int UswLatency() { return AlignedMemoryLatency(); } ++ ++int UsdLatency() { return AlignedMemoryLatency(); } ++ ++int Uswc1Latency() { return AlignedMemoryLatency(); } ++ ++int Usdc1Latency() { return AlignedMemoryLatency(); } ++ ++int Lwc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LWC1; } ++ ++int Swc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SWC1; } ++ ++int Sdc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SDC1; } ++ ++int Ldc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LDC1; } ++ ++int MultiPushLatency() { ++ int latency = DsubuLatency(false); ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ latency++; ++ } ++ return latency; ++} ++ ++int MultiPushFPULatency() { ++ int latency = DsubuLatency(false); ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ latency += Sdc1Latency(); ++ } ++ return latency; ++} ++ ++int PushCallerSavedLatency(SaveFPRegsMode fp_mode) { ++ int latency = MultiPushLatency(); ++ if (fp_mode == kSaveFPRegs) { ++ latency += MultiPushFPULatency(); ++ } ++ return latency; ++} ++ ++int MultiPopLatency() { ++ int latency = DadduLatency(false); ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ latency++; ++ } ++ return latency; ++} ++ ++int MultiPopFPULatency() { ++ int latency = DadduLatency(false); ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ latency += Ldc1Latency(); ++ } ++ return latency; ++} ++ ++int PopCallerSavedLatency(SaveFPRegsMode fp_mode) { ++ int latency = MultiPopLatency(); ++ if (fp_mode == kSaveFPRegs) { ++ latency += MultiPopFPULatency(); ++ } ++ return latency; ++} ++ ++int CallCFunctionHelperLatency() { ++ // Estimated. ++ int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency(); ++ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { ++ latency++; ++ } else { ++ latency += DadduLatency(false); ++ } ++ return latency; ++} ++ ++int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); } ++ ++int AssembleArchJumpLatency() { ++ // Estimated max. ++ return Latency::BRANCH; ++} ++ ++int GenerateSwitchTableLatency() { ++ int latency = 0; ++ latency = DlsaLatency() + 2; ++ latency += 2; ++ return latency; ++} ++ ++int AssembleArchTableSwitchLatency() { ++ return Latency::BRANCH + GenerateSwitchTableLatency(); ++} ++ ++int DropAndRetLatency() { ++ // Estimated max. ++ return DadduLatency(false) + JumpLatency(); ++} ++ ++int AssemblerReturnLatency() { ++ // Estimated max. ++ return DadduLatency(false) + MultiPopLatency() + MultiPopFPULatency() + ++ Latency::BRANCH + DadduLatency() + 1 + DropAndRetLatency(); ++} ++ ++int TryInlineTruncateDoubleToILatency() { ++ return 2 + Latency::TRUNC_W_D + Latency::MFC1 + 2 + AndLatency(false) + ++ Latency::BRANCH; ++} ++ ++int CallStubDelayedLatency() { return 1 + CallLatency(); } ++ ++int TruncateDoubleToIDelayedLatency() { ++ // TODO(la64): This no longer reflects how TruncateDoubleToI is called. ++ return TryInlineTruncateDoubleToILatency() + 1 + DsubuLatency(false) + ++ Sdc1Latency() + CallStubDelayedLatency() + DadduLatency(false) + 1; ++} ++ ++int CheckPageFlagLatency() { ++ return AndLatency(false) + AlignedMemoryLatency() + AndLatency(false) + ++ Latency::BRANCH; ++} ++ ++int SltuLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 1; ++ } else { ++ return 2; // Estimated max. ++ } ++} ++ ++int BranchShortHelperLatency() { ++ return 2; // Estimated max. ++} ++ ++int BranchShortLatency() { return BranchShortHelperLatency(); } ++ ++int MoveLatency() { return 1; } ++ ++int MovToFloatParametersLatency() { return 2 * MoveLatency(); } ++ ++int MovFromFloatResultLatency() { return MoveLatency(); } ++ ++int DaddOverflowLatency() { ++ // Estimated max. ++ return 6; ++} ++ ++int DsubOverflowLatency() { ++ // Estimated max. ++ return 6; ++} ++ ++int MulOverflowLatency() { ++ // Estimated max. ++ return MulLatency() + MulhLatency() + 2; ++} ++ ++int DclzLatency() { return 1; } ++ ++int CtzLatency() { return 3 + DclzLatency(); } ++ ++int DctzLatency() { return 4; } ++ ++int PopcntLatency() { ++ return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 + ++ AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() + ++ 1 + MulLatency() + 1; ++} ++ ++int DpopcntLatency() { ++ return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 + ++ AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() + ++ 1 + DmulLatency() + 1; ++} ++ ++int CompareFLatency() { return Latency::C_cond_S; } ++ ++int CompareF32Latency() { return CompareFLatency(); } ++ ++int CompareF64Latency() { return CompareFLatency(); } ++ ++int CompareIsNanFLatency() { return CompareFLatency(); } ++ ++int CompareIsNanF32Latency() { return CompareIsNanFLatency(); } ++ ++int CompareIsNanF64Latency() { return CompareIsNanFLatency(); } ++ ++int NegsLatency() { return Latency::NEG_S; } ++ ++int NegdLatency() { return Latency::NEG_D; } ++ ++int Float64RoundLatency() { return Latency::RINT_D + 4; } ++ ++int Float32RoundLatency() { return Latency::RINT_S + 4; } ++ ++int Float32MaxLatency() { ++ // Estimated max. ++ int latency = CompareIsNanF32Latency() + Latency::BRANCH; ++ return latency + Latency::MAX_S; ++} ++ ++int Float64MaxLatency() { ++ // Estimated max. ++ int latency = CompareIsNanF64Latency() + Latency::BRANCH; ++ return latency + Latency::MAX_D; ++} ++ ++int Float32MinLatency() { ++ // Estimated max. ++ int latency = CompareIsNanF32Latency() + Latency::BRANCH; ++ return latency + Latency::MIN_S; ++} ++ ++int Float64MinLatency() { ++ // Estimated max. ++ int latency = CompareIsNanF64Latency() + Latency::BRANCH; ++ return latency + Latency::MIN_D; ++} ++ ++int TruncLSLatency(bool load_status) { ++ int latency = Latency::TRUNC_L_S + Latency::DMFC1; ++ if (load_status) { ++ latency += SltuLatency() + 7; ++ } ++ return latency; ++} ++ ++int TruncLDLatency(bool load_status) { ++ int latency = Latency::TRUNC_L_D + Latency::DMFC1; ++ if (load_status) { ++ latency += SltuLatency() + 7; ++ } ++ return latency; ++} ++ ++int TruncUlSLatency() { ++ // Estimated max. ++ return 2 * CompareF32Latency() + CompareIsNanF32Latency() + ++ 4 * Latency::BRANCH + Latency::SUB_S + 2 * Latency::TRUNC_L_S + ++ 3 * Latency::DMFC1 + OrLatency() + Latency::MTC1 + Latency::MOV_S + ++ SltuLatency() + 4; ++} ++ ++int TruncUlDLatency() { ++ // Estimated max. ++ return 2 * CompareF64Latency() + CompareIsNanF64Latency() + ++ 4 * Latency::BRANCH + Latency::SUB_D + 2 * Latency::TRUNC_L_D + ++ 3 * Latency::DMFC1 + OrLatency() + Latency::DMTC1 + Latency::MOV_D + ++ SltuLatency() + 4; ++} ++ ++int PushLatency() { return DadduLatency() + AlignedMemoryLatency(); } ++ ++int ByteSwapSignedLatency() { return 2; } ++ ++int LlLatency(int offset) { ++ bool is_one_instruction = is_int14(offset); ++ if (is_one_instruction) { ++ return 1; ++ } else { ++ return 3; ++ } ++} ++ ++int ExtractBitsLatency(bool sign_extend, int size) { ++ int latency = 2; ++ if (sign_extend) { ++ switch (size) { ++ case 8: ++ case 16: ++ case 32: ++ latency += 1; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ return latency; ++} ++ ++int InsertBitsLatency() { return 2 + DsubuLatency(false) + 2; } ++ ++int ScLatency(int offset) { ++ bool is_one_instruction = is_int14(offset); ++ if (is_one_instruction) { ++ return 1; ++ } else { ++ return 3; ++ } ++} ++ ++int Word32AtomicExchangeLatency(bool sign_extend, int size) { ++ return DadduLatency(false) + 1 + DsubuLatency() + 2 + LlLatency(0) + ++ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + ++ ScLatency(0) + BranchShortLatency() + 1; ++} ++ ++int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) { ++ return 2 + DsubuLatency() + 2 + LlLatency(0) + ++ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + ++ ScLatency(0) + BranchShortLatency() + 1; ++} ++ ++int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { ++ // Basic latency modeling for LA64 instructions. They have been determined ++ // in empirical way. ++ switch (instr->arch_opcode()) { ++ case kArchCallCodeObject: ++ case kArchCallWasmFunction: ++ return CallLatency(); ++ case kArchTailCallCodeObjectFromJSFunction: ++ case kArchTailCallCodeObject: { ++ int latency = 0; ++ if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) { ++ latency = AssemblePopArgumentsAdoptFrameLatency(); ++ } ++ return latency + JumpLatency(); ++ } ++ case kArchTailCallWasm: ++ case kArchTailCallAddress: ++ return JumpLatency(); ++ case kArchCallJSFunction: { ++ int latency = 0; ++ if (FLAG_debug_code) { ++ latency = 1 + AssertLatency(); ++ } ++ return latency + 1 + DadduLatency(false) + CallLatency(); ++ } ++ case kArchPrepareCallCFunction: ++ return PrepareCallCFunctionLatency(); ++ case kArchSaveCallerRegisters: { ++ auto fp_mode = ++ static_cast(MiscField::decode(instr->opcode())); ++ return PushCallerSavedLatency(fp_mode); ++ } ++ case kArchRestoreCallerRegisters: { ++ auto fp_mode = ++ static_cast(MiscField::decode(instr->opcode())); ++ return PopCallerSavedLatency(fp_mode); ++ } ++ case kArchPrepareTailCall: ++ return 2; ++ case kArchCallCFunction: ++ return CallCFunctionLatency(); ++ case kArchJmp: ++ return AssembleArchJumpLatency(); ++ case kArchTableSwitch: ++ return AssembleArchTableSwitchLatency(); ++ case kArchAbortCSAAssert: ++ return CallLatency() + 1; ++ case kArchDebugBreak: ++ return 1; ++ case kArchComment: ++ case kArchNop: ++ case kArchThrowTerminator: ++ case kArchDeoptimize: ++ return 0; ++ case kArchRet: ++ return AssemblerReturnLatency(); ++ case kArchFramePointer: ++ return 1; ++ case kArchParentFramePointer: ++ // Estimated max. ++ return AlignedMemoryLatency(); ++ case kArchTruncateDoubleToI: ++ return TruncateDoubleToIDelayedLatency(); ++ case kArchStoreWithWriteBarrier: ++ return DadduLatency() + 1 + CheckPageFlagLatency(); ++ case kArchStackSlot: ++ // Estimated max. ++ return DadduLatency(false) + AndLatency(false) + AssertLatency() + ++ DadduLatency(false) + AndLatency(false) + BranchShortLatency() + ++ 1 + DsubuLatency() + DadduLatency(); ++ case kArchWordPoisonOnSpeculation: ++ return AndLatency(); ++ case kIeee754Float64Acos: ++ case kIeee754Float64Acosh: ++ case kIeee754Float64Asin: ++ case kIeee754Float64Asinh: ++ case kIeee754Float64Atan: ++ case kIeee754Float64Atanh: ++ case kIeee754Float64Atan2: ++ case kIeee754Float64Cos: ++ case kIeee754Float64Cosh: ++ case kIeee754Float64Cbrt: ++ case kIeee754Float64Exp: ++ case kIeee754Float64Expm1: ++ case kIeee754Float64Log: ++ case kIeee754Float64Log1p: ++ case kIeee754Float64Log10: ++ case kIeee754Float64Log2: ++ case kIeee754Float64Pow: ++ case kIeee754Float64Sin: ++ case kIeee754Float64Sinh: ++ case kIeee754Float64Tan: ++ case kIeee754Float64Tanh: ++ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + ++ CallCFunctionLatency() + MovFromFloatResultLatency(); ++ case kLa64Add: ++ case kLa64Dadd: ++ return DadduLatency(instr->InputAt(1)->IsRegister()); ++ case kLa64DaddOvf: ++ return DaddOverflowLatency(); ++ case kLa64Sub: ++ case kLa64Dsub: ++ return DsubuLatency(instr->InputAt(1)->IsRegister()); ++ case kLa64DsubOvf: ++ return DsubOverflowLatency(); ++ case kLa64Mul: ++ return MulLatency(); ++ case kLa64MulOvf: ++ return MulOverflowLatency(); ++ case kLa64MulHigh: ++ return MulhLatency(); ++ case kLa64MulHighU: ++ return MulhuLatency(); ++ case kLa64DMulHigh: ++ return DMulhLatency(); ++ case kLa64Div: { ++ int latency = DivLatency(instr->InputAt(1)->IsRegister()); ++ return latency++; ++ } ++ case kLa64DivU: { ++ int latency = DivuLatency(instr->InputAt(1)->IsRegister()); ++ return latency++; ++ } ++ case kLa64Mod: ++ return ModLatency(); ++ case kLa64ModU: ++ return ModuLatency(); ++ case kLa64Dmul: ++ return DmulLatency(); ++ case kLa64Ddiv: { ++ int latency = DdivLatency(); ++ return latency++; ++ } ++ case kLa64DdivU: { ++ int latency = DdivuLatency(); ++ return latency++; ++ } ++ case kLa64Dmod: ++ return DmodLatency(); ++ case kLa64DmodU: ++ return DmoduLatency(); ++ case kLa64Dlsa: ++ case kLa64Lsa: ++ return DlsaLatency(); ++ case kLa64And: ++ return AndLatency(instr->InputAt(1)->IsRegister()); ++ case kLa64And32: { ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = AndLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kLa64Or: ++ return OrLatency(instr->InputAt(1)->IsRegister()); ++ case kLa64Or32: { ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = OrLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kLa64Nor: ++ return NorLatency(instr->InputAt(1)->IsRegister()); ++ case kLa64Nor32: { ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = NorLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kLa64Xor: ++ return XorLatency(instr->InputAt(1)->IsRegister()); ++ case kLa64Xor32: { ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = XorLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kLa64Clz: ++ case kLa64Dclz: ++ return DclzLatency(); ++ case kLa64Ctz: ++ return CtzLatency(); ++ case kLa64Dctz: ++ return DctzLatency(); ++ case kLa64Popcnt: ++ return PopcntLatency(); ++ case kLa64Dpopcnt: ++ return DpopcntLatency(); ++ case kLa64Shl: ++ return 1; ++ case kLa64Shr: ++ case kLa64Sar: ++ return 2; ++ case kLa64Ext: ++ case kLa64Ins: ++ case kLa64Dext: ++ case kLa64Dins: ++ case kLa64Dshl: ++ case kLa64Dshr: ++ case kLa64Dsar: ++ case kLa64Ror: ++ case kLa64Dror: ++ return 1; ++ case kLa64Tst: ++ return AndLatency(instr->InputAt(1)->IsRegister()); ++ case kLa64Mov: ++ return 1; ++ case kLa64CmpS: ++ return MoveLatency() + CompareF32Latency(); ++ case kLa64AddS: ++ return Latency::ADD_S; ++ case kLa64SubS: ++ return Latency::SUB_S; ++ case kLa64MulS: ++ return Latency::MUL_S; ++ case kLa64DivS: ++ return Latency::DIV_S; ++ case kLa64ModS: ++ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + ++ CallCFunctionLatency() + MovFromFloatResultLatency(); ++ case kLa64AbsS: ++ return Latency::ABS_S; ++ case kLa64NegS: ++ return NegdLatency(); ++ case kLa64SqrtS: ++ return Latency::SQRT_S; ++ case kLa64MaxS: ++ return Latency::MAX_S; ++ case kLa64MinS: ++ return Latency::MIN_S; ++ case kLa64CmpD: ++ return MoveLatency() + CompareF64Latency(); ++ case kLa64AddD: ++ return Latency::ADD_D; ++ case kLa64SubD: ++ return Latency::SUB_D; ++ case kLa64MulD: ++ return Latency::MUL_D; ++ case kLa64DivD: ++ return Latency::DIV_D; ++ case kLa64ModD: ++ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + ++ CallCFunctionLatency() + MovFromFloatResultLatency(); ++ case kLa64AbsD: ++ return Latency::ABS_D; ++ case kLa64NegD: ++ return NegdLatency(); ++ case kLa64SqrtD: ++ return Latency::SQRT_D; ++ case kLa64MaxD: ++ return Latency::MAX_D; ++ case kLa64MinD: ++ return Latency::MIN_D; ++ case kLa64Float64RoundDown: ++ case kLa64Float64RoundTruncate: ++ case kLa64Float64RoundUp: ++ case kLa64Float64RoundTiesEven: ++ return Float64RoundLatency(); ++ case kLa64Float32RoundDown: ++ case kLa64Float32RoundTruncate: ++ case kLa64Float32RoundUp: ++ case kLa64Float32RoundTiesEven: ++ return Float32RoundLatency(); ++ case kLa64Float32Max: ++ return Float32MaxLatency(); ++ case kLa64Float64Max: ++ return Float64MaxLatency(); ++ case kLa64Float32Min: ++ return Float32MinLatency(); ++ case kLa64Float64Min: ++ return Float64MinLatency(); ++ case kLa64Float64SilenceNaN: ++ return Latency::SUB_D; ++ case kLa64CvtSD: ++ return Latency::CVT_S_D; ++ case kLa64CvtDS: ++ return Latency::CVT_D_S; ++ case kLa64CvtDW: ++ return Latency::MTC1 + Latency::CVT_D_W; ++ case kLa64CvtSW: ++ return Latency::MTC1 + Latency::CVT_S_W; ++ case kLa64CvtSUw: ++ return 1 + Latency::DMTC1 + Latency::CVT_S_L; ++ case kLa64CvtSL: ++ return Latency::DMTC1 + Latency::CVT_S_L; ++ case kLa64CvtDL: ++ return Latency::DMTC1 + Latency::CVT_D_L; ++ case kLa64CvtDUw: ++ return 1 + Latency::DMTC1 + Latency::CVT_D_L; ++ case kLa64CvtDUl: ++ return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 + ++ 2 * Latency::CVT_D_L + Latency::ADD_D; ++ case kLa64CvtSUl: ++ return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 + ++ 2 * Latency::CVT_S_L + Latency::ADD_S; ++ case kLa64FloorWD: ++ return Latency::FLOOR_W_D + Latency::MFC1; ++ case kLa64CeilWD: ++ return Latency::CEIL_W_D + Latency::MFC1; ++ case kLa64RoundWD: ++ return Latency::ROUND_W_D + Latency::MFC1; ++ case kLa64TruncWD: ++ return Latency::TRUNC_W_D + Latency::MFC1; ++ case kLa64FloorWS: ++ return Latency::FLOOR_W_S + Latency::MFC1; ++ case kLa64CeilWS: ++ return Latency::CEIL_W_S + Latency::MFC1; ++ case kLa64RoundWS: ++ return Latency::ROUND_W_S + Latency::MFC1; ++ case kLa64TruncWS: ++ return Latency::TRUNC_W_S + Latency::MFC1 + 2 + MovnLatency(); ++ case kLa64TruncLS: ++ return TruncLSLatency(instr->OutputCount() > 1); ++ case kLa64TruncLD: ++ return TruncLDLatency(instr->OutputCount() > 1); ++ case kLa64TruncUwD: ++ // Estimated max. ++ return CompareF64Latency() + 2 * Latency::BRANCH + ++ 2 * Latency::TRUNC_W_D + Latency::SUB_D + OrLatency() + ++ Latency::MTC1 + Latency::MFC1 + Latency::MTHC1 + 1; ++ case kLa64TruncUwS: ++ // Estimated max. ++ return CompareF32Latency() + 2 * Latency::BRANCH + ++ 2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() + ++ Latency::MTC1 + 2 * Latency::MFC1 + 2 + MovzLatency(); ++ case kLa64TruncUlS: ++ return TruncUlSLatency(); ++ case kLa64TruncUlD: ++ return TruncUlDLatency(); ++ case kLa64BitcastDL: ++ return Latency::DMFC1; ++ case kLa64BitcastLD: ++ return Latency::DMTC1; ++ case kLa64Float64ExtractLowWord32: ++ return Latency::MFC1; ++ case kLa64Float64InsertLowWord32: ++ return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1; ++ case kLa64Float64ExtractHighWord32: ++ return Latency::MFHC1; ++ case kLa64Float64InsertHighWord32: ++ return Latency::MTHC1; ++ case kLa64Seb: ++ case kLa64Seh: ++ return 1; ++ case kLa64Lbu: ++ case kLa64Lb: ++ case kLa64Lhu: ++ case kLa64Lh: ++ case kLa64Lwu: ++ case kLa64Lw: ++ case kLa64Ld: ++ case kLa64Sb: ++ case kLa64Sh: ++ case kLa64Sw: ++ case kLa64Sd: ++ return AlignedMemoryLatency(); ++ case kLa64Lwc1: ++ return Lwc1Latency(); ++ case kLa64Ldc1: ++ return Ldc1Latency(); ++ case kLa64Swc1: ++ return Swc1Latency(); ++ case kLa64Sdc1: ++ return Sdc1Latency(); ++ case kLa64Ulhu: ++ case kLa64Ulh: ++ return UlhuLatency(); ++ case kLa64Ulwu: ++ return UlwuLatency(); ++ case kLa64Ulw: ++ return UlwLatency(); ++ case kLa64Uld: ++ return UldLatency(); ++ case kLa64Ulwc1: ++ return Ulwc1Latency(); ++ case kLa64Uldc1: ++ return Uldc1Latency(); ++ case kLa64Ush: ++ return UshLatency(); ++ case kLa64Usw: ++ return UswLatency(); ++ case kLa64Usd: ++ return UsdLatency(); ++ case kLa64Uswc1: ++ return Uswc1Latency(); ++ case kLa64Usdc1: ++ return Usdc1Latency(); ++ case kLa64Push: { ++ int latency = 0; ++ if (instr->InputAt(0)->IsFPRegister()) { ++ latency = Sdc1Latency() + DsubuLatency(false); ++ } else { ++ latency = PushLatency(); ++ } ++ return latency; ++ } ++ case kLa64Peek: { ++ int latency = 0; ++ if (instr->OutputAt(0)->IsFPRegister()) { ++ auto op = LocationOperand::cast(instr->OutputAt(0)); ++ switch (op->representation()) { ++ case MachineRepresentation::kFloat64: ++ latency = Ldc1Latency(); ++ break; ++ case MachineRepresentation::kFloat32: ++ latency = Latency::LWC1; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } else { ++ latency = AlignedMemoryLatency(); ++ } ++ return latency; ++ } ++ case kLa64StackClaim: ++ return DsubuLatency(false); ++ case kLa64StoreToStackSlot: { ++ int latency = 0; ++ if (instr->InputAt(0)->IsFPRegister()) { ++ if (instr->InputAt(0)->IsSimd128Register()) { ++ latency = 1; // Estimated value. ++ } else { ++ latency = Sdc1Latency(); ++ } ++ } else { ++ latency = AlignedMemoryLatency(); ++ } ++ return latency; ++ } ++ case kLa64ByteSwap64: ++ return ByteSwapSignedLatency(); ++ case kLa64ByteSwap32: ++ return ByteSwapSignedLatency(); ++ case kWord32AtomicLoadInt8: ++ case kWord32AtomicLoadUint8: ++ case kWord32AtomicLoadInt16: ++ case kWord32AtomicLoadUint16: ++ case kWord32AtomicLoadWord32: ++ return 2; ++ case kWord32AtomicStoreWord8: ++ case kWord32AtomicStoreWord16: ++ case kWord32AtomicStoreWord32: ++ return 3; ++ case kWord32AtomicExchangeInt8: ++ return Word32AtomicExchangeLatency(true, 8); ++ case kWord32AtomicExchangeUint8: ++ return Word32AtomicExchangeLatency(false, 8); ++ case kWord32AtomicExchangeInt16: ++ return Word32AtomicExchangeLatency(true, 16); ++ case kWord32AtomicExchangeUint16: ++ return Word32AtomicExchangeLatency(false, 16); ++ case kWord32AtomicExchangeWord32: ++ return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1; ++ case kWord32AtomicCompareExchangeInt8: ++ return Word32AtomicCompareExchangeLatency(true, 8); ++ case kWord32AtomicCompareExchangeUint8: ++ return Word32AtomicCompareExchangeLatency(false, 8); ++ case kWord32AtomicCompareExchangeInt16: ++ return Word32AtomicCompareExchangeLatency(true, 16); ++ case kWord32AtomicCompareExchangeUint16: ++ return Word32AtomicCompareExchangeLatency(false, 16); ++ case kWord32AtomicCompareExchangeWord32: ++ return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) + ++ BranchShortLatency() + 1; ++ case kLa64AssertEqual: ++ return AssertLatency(); ++ default: ++ return 1; ++ } ++} ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/la64/instruction-selector-la64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/la64/instruction-selector-la64.cc +new file mode 100644 +index 0000000000..67ea5efe39 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/compiler/backend/la64/instruction-selector-la64.cc +@@ -0,0 +1,3096 @@ ++// Copyright 2014 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/base/bits.h" ++#include "src/compiler/backend/instruction-selector-impl.h" ++#include "src/compiler/node-matchers.h" ++#include "src/compiler/node-properties.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++#define TRACE_UNIMPL() \ ++ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) ++ ++#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) ++ ++// Adds la64-specific methods for generating InstructionOperands. ++class La64OperandGenerator final : public OperandGenerator { ++ public: ++ explicit La64OperandGenerator(InstructionSelector* selector) ++ : OperandGenerator(selector) {} ++ ++ InstructionOperand UseOperand(Node* node, InstructionCode opcode) { ++ if (CanBeImmediate(node, opcode)) { ++ return UseImmediate(node); ++ } ++ return UseRegister(node); ++ } ++ ++ // Use the zero register if the node has the immediate value zero, otherwise ++ // assign a register. ++ InstructionOperand UseRegisterOrImmediateZero(Node* node) { ++ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) || ++ (IsFloatConstant(node) && ++ (bit_cast(GetFloatConstantValue(node)) == 0))) { ++ return UseImmediate(node); ++ } ++ return UseRegister(node); ++ } ++ ++ bool IsIntegerConstant(Node* node) { ++ return (node->opcode() == IrOpcode::kInt32Constant) || ++ (node->opcode() == IrOpcode::kInt64Constant); ++ } ++ ++ int64_t GetIntegerConstantValue(Node* node) { ++ if (node->opcode() == IrOpcode::kInt32Constant) { ++ return OpParameter(node->op()); ++ } ++ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode()); ++ return OpParameter(node->op()); ++ } ++ ++ bool IsFloatConstant(Node* node) { ++ return (node->opcode() == IrOpcode::kFloat32Constant) || ++ (node->opcode() == IrOpcode::kFloat64Constant); ++ } ++ ++ double GetFloatConstantValue(Node* node) { ++ if (node->opcode() == IrOpcode::kFloat32Constant) { ++ return OpParameter(node->op()); ++ } ++ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode()); ++ return OpParameter(node->op()); ++ } ++ ++ bool CanBeImmediate(Node* node, InstructionCode mode) { ++ return IsIntegerConstant(node) && ++ CanBeImmediate(GetIntegerConstantValue(node), mode); ++ } ++ ++ bool CanBeImmediate(int64_t value, InstructionCode opcode) { ++ switch (ArchOpcodeField::decode(opcode)) { ++ case kLa64Shl: ++ case kLa64Sar: ++ case kLa64Shr: ++ return is_uint5(value); ++ case kLa64Dshl: ++ case kLa64Dsar: ++ case kLa64Dshr: ++ return is_uint6(value); ++ case kLa64Add: ++ case kLa64And32: ++ case kLa64And: ++ case kLa64Dadd: ++ case kLa64Or32: ++ case kLa64Or: ++ case kLa64Tst: ++ case kLa64Xor: ++ return is_uint12(value); ++ case kLa64Lb: ++ case kLa64Lbu: ++ case kLa64Sb: ++ case kLa64Lh: ++ case kLa64Lhu: ++ case kLa64Sh: ++ case kLa64Lw: ++ case kLa64Sw: ++ case kLa64Ld: ++ case kLa64Sd: ++ case kLa64Lwc1: ++ case kLa64Swc1: ++ case kLa64Ldc1: ++ case kLa64Sdc1: ++ return is_int12(value); ++ default: ++ return is_int12(value); ++ } ++ } ++ ++ private: ++ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const { ++ TRACE_UNIMPL(); ++ return false; ++ } ++}; ++ ++static void VisitRR(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ La64OperandGenerator g(selector); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ La64OperandGenerator g(selector); ++ int32_t imm = OpParameter(node->op()); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm)); ++} ++ ++static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ La64OperandGenerator g(selector); ++ if (g.IsIntegerConstant(node->InputAt(1))) { ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseImmediate(node->InputAt(1))); ++ } else { ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1))); ++ } ++} ++ ++static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ La64OperandGenerator g(selector); ++ int32_t imm = OpParameter(node->op()); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm), ++ g.UseRegister(node->InputAt(1))); ++} ++ ++static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ La64OperandGenerator g(selector); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1))); ++} ++ ++void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { ++ La64OperandGenerator g(selector); ++ selector->Emit( ++ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); ++} ++ ++static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ La64OperandGenerator g(selector); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseOperand(node->InputAt(1), opcode)); ++} ++ ++struct ExtendingLoadMatcher { ++ ExtendingLoadMatcher(Node* node, InstructionSelector* selector) ++ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) { ++ Initialize(node); ++ } ++ ++ bool Matches() const { return matches_; } ++ ++ Node* base() const { ++ DCHECK(Matches()); ++ return base_; ++ } ++ int64_t immediate() const { ++ DCHECK(Matches()); ++ return immediate_; ++ } ++ ArchOpcode opcode() const { ++ DCHECK(Matches()); ++ return opcode_; ++ } ++ ++ private: ++ bool matches_; ++ InstructionSelector* selector_; ++ Node* base_; ++ int64_t immediate_; ++ ArchOpcode opcode_; ++ ++ void Initialize(Node* node) { ++ Int64BinopMatcher m(node); ++ // When loading a 64-bit value and shifting by 32, we should ++ // just load and sign-extend the interesting 4 bytes instead. ++ // This happens, for example, when we're loading and untagging SMIs. ++ DCHECK(m.IsWord64Sar()); ++ if (m.left().IsLoad() && m.right().Is(32) && ++ selector_->CanCover(m.node(), m.left().node())) { ++ DCHECK_EQ(selector_->GetEffectLevel(node), ++ selector_->GetEffectLevel(m.left().node())); ++ MachineRepresentation rep = ++ LoadRepresentationOf(m.left().node()->op()).representation(); ++ DCHECK_EQ(3, ElementSizeLog2Of(rep)); ++ if (rep != MachineRepresentation::kTaggedSigned && ++ rep != MachineRepresentation::kTaggedPointer && ++ rep != MachineRepresentation::kTagged && ++ rep != MachineRepresentation::kWord64) { ++ return; ++ } ++ ++ La64OperandGenerator g(selector_); ++ Node* load = m.left().node(); ++ Node* offset = load->InputAt(1); ++ base_ = load->InputAt(0); ++ opcode_ = kLa64Lw; ++ if (g.CanBeImmediate(offset, opcode_)) { ++ immediate_ = g.GetIntegerConstantValue(offset) + 4; ++ matches_ = g.CanBeImmediate(immediate_, kLa64Lw); ++ } ++ } ++ } ++}; ++ ++bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node, ++ Node* output_node) { ++ ExtendingLoadMatcher m(node, selector); ++ La64OperandGenerator g(selector); ++ if (m.Matches()) { ++ InstructionOperand inputs[2]; ++ inputs[0] = g.UseRegister(m.base()); ++ InstructionCode opcode = ++ m.opcode() | AddressingModeField::encode(kMode_MRI); ++ DCHECK(is_int32(m.immediate())); ++ inputs[1] = g.TempImmediate(static_cast(m.immediate())); ++ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)}; ++ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs), ++ inputs); ++ return true; ++ } ++ return false; ++} ++ ++bool TryMatchImmediate(InstructionSelector* selector, ++ InstructionCode* opcode_return, Node* node, ++ size_t* input_count_return, InstructionOperand* inputs) { ++ La64OperandGenerator g(selector); ++ if (g.CanBeImmediate(node, *opcode_return)) { ++ *opcode_return |= AddressingModeField::encode(kMode_MRI); ++ inputs[0] = g.UseImmediate(node); ++ *input_count_return = 1; ++ return true; ++ } ++ return false; ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, bool has_reverse_opcode, ++ InstructionCode reverse_opcode, ++ FlagsContinuation* cont) { ++ La64OperandGenerator g(selector); ++ Int32BinopMatcher m(node); ++ InstructionOperand inputs[2]; ++ size_t input_count = 0; ++ InstructionOperand outputs[1]; ++ size_t output_count = 0; ++ ++ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count, ++ &inputs[1])) { ++ inputs[0] = g.UseRegister(m.left().node()); ++ input_count++; ++ } else if (has_reverse_opcode && ++ TryMatchImmediate(selector, &reverse_opcode, m.left().node(), ++ &input_count, &inputs[1])) { ++ inputs[0] = g.UseRegister(m.right().node()); ++ opcode = reverse_opcode; ++ input_count++; ++ } else { ++ inputs[input_count++] = g.UseRegister(m.left().node()); ++ inputs[input_count++] = g.UseOperand(m.right().node(), opcode); ++ } ++ ++ if (cont->IsDeoptimize()) { ++ // If we can deoptimize as a result of the binop, we need to make sure that ++ // the deopt inputs are not overwritten by the binop result. One way ++ // to achieve that is to declare the output register as same-as-first. ++ outputs[output_count++] = g.DefineSameAsFirst(node); ++ } else { ++ outputs[output_count++] = g.DefineAsRegister(node); ++ } ++ ++ DCHECK_NE(0u, input_count); ++ DCHECK_EQ(1u, output_count); ++ DCHECK_GE(arraysize(inputs), input_count); ++ DCHECK_GE(arraysize(outputs), output_count); ++ ++ selector->EmitWithContinuation(opcode, output_count, outputs, input_count, ++ inputs, cont); ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, bool has_reverse_opcode, ++ InstructionCode reverse_opcode) { ++ FlagsContinuation cont; ++ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont); ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, FlagsContinuation* cont) { ++ VisitBinop(selector, node, opcode, false, kArchNop, cont); ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode) { ++ VisitBinop(selector, node, opcode, false, kArchNop); ++} ++ ++void InstructionSelector::VisitStackSlot(Node* node) { ++ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); ++ int alignment = rep.alignment(); ++ int slot = frame_->AllocateSpillSlot(rep.size(), alignment); ++ OperandGenerator g(this); ++ ++ Emit(kArchStackSlot, g.DefineAsRegister(node), ++ sequence()->AddImmediate(Constant(slot)), ++ sequence()->AddImmediate(Constant(alignment)), 0, nullptr); ++} ++ ++void InstructionSelector::VisitAbortCSAAssert(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); ++} ++ ++void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, ++ Node* output = nullptr) { ++ La64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(output == nullptr ? node : output), ++ g.UseRegister(base), g.UseImmediate(index)); ++ } else { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR), ++ g.DefineAsRegister(output == nullptr ? node : output), ++ g.UseRegister(base), g.UseRegister(index)); ++ } ++} ++ ++void InstructionSelector::VisitLoadTransform(Node* node) { ++ LoadTransformParameters params = LoadTransformParametersOf(node->op()); ++ ++ InstructionCode opcode = kArchNop; ++ switch (params.transformation) { ++ case LoadTransformation::kS8x16LoadSplat: ++ opcode = kLa64S8x16LoadSplat; ++ break; ++ case LoadTransformation::kS16x8LoadSplat: ++ opcode = kLa64S16x8LoadSplat; ++ break; ++ case LoadTransformation::kS32x4LoadSplat: ++ opcode = kLa64S32x4LoadSplat; ++ break; ++ case LoadTransformation::kS64x2LoadSplat: ++ opcode = kLa64S64x2LoadSplat; ++ break; ++ case LoadTransformation::kI16x8Load8x8S: ++ opcode = kLa64I16x8Load8x8S; ++ break; ++ case LoadTransformation::kI16x8Load8x8U: ++ opcode = kLa64I16x8Load8x8U; ++ break; ++ case LoadTransformation::kI32x4Load16x4S: ++ opcode = kLa64I32x4Load16x4S; ++ break; ++ case LoadTransformation::kI32x4Load16x4U: ++ opcode = kLa64I32x4Load16x4U; ++ break; ++ case LoadTransformation::kI64x2Load32x2S: ++ opcode = kLa64I64x2Load32x2S; ++ break; ++ case LoadTransformation::kI64x2Load32x2U: ++ opcode = kLa64I64x2Load32x2U; ++ break; ++ default: ++ UNIMPLEMENTED(); ++ } ++ ++ EmitLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ ++ InstructionCode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kFloat32: ++ opcode = kLa64Lwc1; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kLa64Ldc1; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ opcode = load_rep.IsUnsigned() ? kLa64Lbu : kLa64Lb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsUnsigned() ? kLa64Lhu : kLa64Lh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = load_rep.IsUnsigned() ? kLa64Lwu : kLa64Lw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kLa64Ld; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ case MachineRepresentation::kSimd128: ++ UNREACHABLE(); ++ } ++ if (node->opcode() == IrOpcode::kPoisonedLoad) { ++ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); ++ opcode |= MiscField::encode(kMemoryAccessPoisoned); ++ } ++ ++ EmitLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } ++ ++void InstructionSelector::VisitProtectedLoad(Node* node) { ++ // TODO(eholk) ++ UNIMPLEMENTED(); ++} ++ ++void InstructionSelector::VisitStore(Node* node) { ++ La64OperandGenerator g(this); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ StoreRepresentation store_rep = StoreRepresentationOf(node->op()); ++ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); ++ MachineRepresentation rep = store_rep.representation(); ++ ++ // TODO(la64): I guess this could be done in a better way. ++ if (write_barrier_kind != kNoWriteBarrier && ++ V8_LIKELY(!FLAG_disable_write_barriers)) { ++ DCHECK(CanBeTaggedPointer(rep)); ++ InstructionOperand inputs[3]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(value); ++ RecordWriteMode record_write_mode = ++ WriteBarrierKindToRecordWriteMode(write_barrier_kind); ++ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; ++ size_t const temp_count = arraysize(temps); ++ InstructionCode code = kArchStoreWithWriteBarrier; ++ code |= MiscField::encode(static_cast(record_write_mode)); ++ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); ++ } else { ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kFloat32: ++ opcode = kLa64Swc1; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kLa64Sdc1; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ opcode = kLa64Sb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kLa64Sh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLa64Sw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kLa64Sd; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ case MachineRepresentation::kSimd128: ++ UNREACHABLE(); ++ return; ++ } ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ g.UseRegister(base), g.UseImmediate(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } else { ++ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(), ++ g.UseRegister(base), g.UseRegister(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } ++ } ++} ++ ++void InstructionSelector::VisitProtectedStore(Node* node) { ++ // TODO(eholk) ++ UNIMPLEMENTED(); ++} ++ ++void InstructionSelector::VisitWord32And(Node* node) { ++ La64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && ++ m.right().HasValue()) { ++ uint32_t mask = m.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 32)) { ++ // The mask must be contiguous, and occupy the least-significant bits. ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); ++ ++ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least ++ // significant bits. ++ Int32BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue()) { ++ // Any shift value can match; int32 shifts use `value % 32`. ++ uint32_t lsb = mleft.right().Value() & 0x1F; ++ ++ // Ext cannot extract bits past the register size, however since ++ // shifting the original value would have introduced some zeros we can ++ // still use Ext with a smaller mask and the remaining bits will be ++ // zeros. ++ if (lsb + mask_width > 32) mask_width = 32 - lsb; ++ ++ Emit(kLa64Ext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(mask_width)); ++ return; ++ } ++ // Other cases fall through to the normal And operation. ++ } ++ } ++ if (m.right().HasValue()) { ++ uint32_t mask = m.right().Value(); ++ uint32_t shift = base::bits::CountPopulation(~mask); ++ uint32_t msb = base::bits::CountLeadingZeros32(~mask); ++ if (shift != 0 && shift != 32 && msb + shift == 32) { ++ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction ++ // and remove constant loading of inverted mask. ++ Emit(kLa64Ins, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0), g.TempImmediate(shift)); ++ return; ++ } ++ } ++ VisitBinop(this, node, kLa64And32, true, kLa64And32); ++} ++ ++void InstructionSelector::VisitWord64And(Node* node) { ++ La64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && ++ m.right().HasValue()) { ++ uint64_t mask = m.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 64)) { ++ // The mask must be contiguous, and occupy the least-significant bits. ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); ++ ++ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least ++ // significant bits. ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue()) { ++ // Any shift value can match; int64 shifts use `value % 64`. ++ uint32_t lsb = static_cast(mleft.right().Value() & 0x3F); ++ ++ // Dext cannot extract bits past the register size, however since ++ // shifting the original value would have introduced some zeros we can ++ // still use Dext with a smaller mask and the remaining bits will be ++ // zeros. ++ if (lsb + mask_width > 64) mask_width = 64 - lsb; ++ ++ if (lsb == 0 && mask_width == 64) { ++ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node())); ++ } else { ++ Emit(kLa64Dext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(static_cast(mask_width))); ++ } ++ return; ++ } ++ // Other cases fall through to the normal And operation. ++ } ++ } ++ if (m.right().HasValue()) { ++ uint64_t mask = m.right().Value(); ++ uint32_t shift = base::bits::CountPopulation(~mask); ++ uint32_t msb = base::bits::CountLeadingZeros64(~mask); ++ if (shift != 0 && shift < 32 && msb + shift == 64) { ++ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction ++ // and remove constant loading of inverted mask. Dins cannot insert bits ++ // past word size, so shifts smaller than 32 are covered. ++ Emit(kLa64Dins, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0), g.TempImmediate(shift)); ++ return; ++ } ++ } ++ VisitBinop(this, node, kLa64And, true, kLa64And); ++} ++ ++void InstructionSelector::VisitWord32Or(Node* node) { ++ VisitBinop(this, node, kLa64Or32, true, kLa64Or32); ++} ++ ++void InstructionSelector::VisitWord64Or(Node* node) { ++ VisitBinop(this, node, kLa64Or, true, kLa64Or); ++} ++ ++void InstructionSelector::VisitWord32Xor(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) && ++ m.right().Is(-1)) { ++ Int32BinopMatcher mleft(m.left().node()); ++ if (!mleft.right().HasValue()) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Nor32, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(mleft.right().node())); ++ return; ++ } ++ } ++ if (m.right().Is(-1)) { ++ // Use Nor for bit negation and eliminate constant loading for xori. ++ La64OperandGenerator g(this); ++ Emit(kLa64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0)); ++ return; ++ } ++ VisitBinop(this, node, kLa64Xor32, true, kLa64Xor32); ++} ++ ++void InstructionSelector::VisitWord64Xor(Node* node) { ++ Int64BinopMatcher m(node); ++ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) && ++ m.right().Is(-1)) { ++ Int64BinopMatcher mleft(m.left().node()); ++ if (!mleft.right().HasValue()) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Nor, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(mleft.right().node())); ++ return; ++ } ++ } ++ if (m.right().Is(-1)) { ++ // Use Nor for bit negation and eliminate constant loading for xori. ++ La64OperandGenerator g(this); ++ Emit(kLa64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0)); ++ return; ++ } ++ VisitBinop(this, node, kLa64Xor, true, kLa64Xor); ++} ++ ++void InstructionSelector::VisitWord32Shl(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32And() && CanCover(node, m.left().node()) && ++ m.right().IsInRange(1, 31)) { ++ La64OperandGenerator g(this); ++ Int32BinopMatcher mleft(m.left().node()); ++ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is ++ // contiguous, and the shift immediate non-zero. ++ if (mleft.right().HasValue()) { ++ uint32_t mask = mleft.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 32)) { ++ uint32_t shift = m.right().Value(); ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); ++ DCHECK_NE(0u, shift); ++ if ((shift + mask_width) >= 32) { ++ // If the mask is contiguous and reaches or extends beyond the top ++ // bit, only the shift is needed. ++ Emit(kLa64Shl, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ } ++ } ++ } ++ VisitRRO(this, kLa64Shl, node); ++} ++ ++void InstructionSelector::VisitWord32Shr(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32And() && m.right().HasValue()) { ++ uint32_t lsb = m.right().Value() & 0x1F; ++ Int32BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && mleft.right().Value() != 0) { ++ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is ++ // shifted into the least-significant bits. ++ uint32_t mask = (mleft.right().Value() >> lsb) << lsb; ++ unsigned mask_width = base::bits::CountPopulation(mask); ++ unsigned mask_msb = base::bits::CountLeadingZeros32(mask); ++ if ((mask_msb + mask_width + lsb) == 32) { ++ La64OperandGenerator g(this); ++ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); ++ Emit(kLa64Ext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(mask_width)); ++ return; ++ } ++ } ++ } ++ VisitRRO(this, kLa64Shr, node); ++} ++ ++void InstructionSelector::VisitWord32Sar(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) { ++ Int32BinopMatcher mleft(m.left().node()); ++ if (m.right().HasValue() && mleft.right().HasValue()) { ++ La64OperandGenerator g(this); ++ uint32_t sar = m.right().Value(); ++ uint32_t shl = mleft.right().Value(); ++ if ((sar == shl) && (sar == 16)) { ++ Emit(kLa64Seh, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node())); ++ return; ++ } else if ((sar == shl) && (sar == 24)) { ++ Emit(kLa64Seb, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node())); ++ return; ++ } else if ((sar == shl) && (sar == 32)) { ++ Emit(kLa64Shl, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(0)); ++ return; ++ } ++ } ++ } ++ VisitRRO(this, kLa64Sar, node); ++} ++ ++void InstructionSelector::VisitWord64Shl(Node* node) { ++ La64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && ++ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) { ++ // There's no need to sign/zero-extend to 64-bit if we shift out the upper ++ // 32 bits anyway. ++ Emit(kLa64Dshl, g.DefineSameAsFirst(node), ++ g.UseRegister(m.left().node()->InputAt(0)), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ if (m.left().IsWord64And() && CanCover(node, m.left().node()) && ++ m.right().IsInRange(1, 63)) { ++ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is ++ // contiguous, and the shift immediate non-zero. ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue()) { ++ uint64_t mask = mleft.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 64)) { ++ uint64_t shift = m.right().Value(); ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); ++ DCHECK_NE(0u, shift); ++ ++ if ((shift + mask_width) >= 64) { ++ // If the mask is contiguous and reaches or extends beyond the top ++ // bit, only the shift is needed. ++ Emit(kLa64Dshl, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ } ++ } ++ } ++ VisitRRO(this, kLa64Dshl, node); ++} ++ ++void InstructionSelector::VisitWord64Shr(Node* node) { ++ Int64BinopMatcher m(node); ++ if (m.left().IsWord64And() && m.right().HasValue()) { ++ uint32_t lsb = m.right().Value() & 0x3F; ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && mleft.right().Value() != 0) { ++ // Select Dext for Shr(And(x, mask), imm) where the result of the mask is ++ // shifted into the least-significant bits. ++ uint64_t mask = (mleft.right().Value() >> lsb) << lsb; ++ unsigned mask_width = base::bits::CountPopulation(mask); ++ unsigned mask_msb = base::bits::CountLeadingZeros64(mask); ++ if ((mask_msb + mask_width + lsb) == 64) { ++ La64OperandGenerator g(this); ++ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask)); ++ Emit(kLa64Dext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(mask_width)); ++ return; ++ } ++ } ++ } ++ VisitRRO(this, kLa64Dshr, node); ++} ++ ++void InstructionSelector::VisitWord64Sar(Node* node) { ++ if (TryEmitExtendingLoad(this, node, node)) return; ++ VisitRRO(this, kLa64Dsar, node); ++} ++ ++void InstructionSelector::VisitWord32Ror(Node* node) { ++ VisitRRO(this, kLa64Ror, node); ++} ++ ++void InstructionSelector::VisitWord32Clz(Node* node) { ++ VisitRR(this, kLa64Clz, node); ++} ++ ++void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } ++ ++void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); } ++ ++void InstructionSelector::VisitWord64ReverseBytes(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64ByteSwap64, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord32ReverseBytes(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64ByteSwap32, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { ++ UNREACHABLE(); ++} ++ ++void InstructionSelector::VisitWord32Ctz(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord64Ctz(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord32Popcnt(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Popcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord64Popcnt(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Dpopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord64Ror(Node* node) { ++ VisitRRO(this, kLa64Dror, node); ++} ++ ++void InstructionSelector::VisitWord64Clz(Node* node) { ++ VisitRR(this, kLa64Dclz, node); ++} ++ ++void InstructionSelector::VisitInt32Add(Node* node) { ++ La64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ ++ // Select Lsa for (left + (left_of_right << imm)). ++ if (m.right().opcode() == IrOpcode::kWord32Shl && ++ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { ++ Int32BinopMatcher mright(m.right().node()); ++ if (mright.right().HasValue() && !m.left().HasValue()) { ++ int32_t shift_value = static_cast(mright.right().Value()); ++ if (shift_value > 0 && shift_value <= 31) { ++ Emit(kLa64Lsa, g.DefineAsRegister(node), ++ g.UseRegister(mright.left().node()), ++ g.UseRegister(m.left().node()), g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ } ++ ++ // Select Lsa for ((left_of_left << imm) + right). ++ if (m.left().opcode() == IrOpcode::kWord32Shl && ++ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { ++ Int32BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && !m.right().HasValue()) { ++ int32_t shift_value = static_cast(mleft.right().Value()); ++ if (shift_value > 0 && shift_value <= 31) { ++ Emit(kLa64Lsa, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(m.right().node()), g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ } ++ ++ VisitBinop(this, node, kLa64Add, true, kLa64Add); ++} ++ ++void InstructionSelector::VisitInt64Add(Node* node) { ++ La64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ ++ // Select Dlsa for (left + (left_of_right << imm)). ++ if (m.right().opcode() == IrOpcode::kWord64Shl && ++ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { ++ Int64BinopMatcher mright(m.right().node()); ++ if (mright.right().HasValue() && !m.left().HasValue()) { ++ int32_t shift_value = static_cast(mright.right().Value()); ++ if (shift_value > 0 && shift_value <= 31) { ++ Emit(kLa64Dlsa, g.DefineAsRegister(node), ++ g.UseRegister(mright.left().node()), ++ g.UseRegister(m.left().node()), g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ } ++ ++ // Select Dlsa for ((left_of_left << imm) + right). ++ if (m.left().opcode() == IrOpcode::kWord64Shl && ++ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && !m.right().HasValue()) { ++ int32_t shift_value = static_cast(mleft.right().Value()); ++ if (shift_value > 0 && shift_value <= 31) { ++ Emit(kLa64Dlsa, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(m.right().node()), g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ } ++ ++ VisitBinop(this, node, kLa64Dadd, true, kLa64Dadd); ++} ++ ++void InstructionSelector::VisitInt32Sub(Node* node) { ++ VisitBinop(this, node, kLa64Sub); ++} ++ ++void InstructionSelector::VisitInt64Sub(Node* node) { ++ VisitBinop(this, node, kLa64Dsub); ++} ++ ++void InstructionSelector::VisitInt32Mul(Node* node) { ++ La64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ if (m.right().HasValue() && m.right().Value() > 0) { ++ uint32_t value = static_cast(m.right().Value()); ++ if (base::bits::IsPowerOfTwo(value)) { ++ Emit(kLa64Shl | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value - 1) && /*kArchVariant == kLa64r6 &&*/ ++ value - 1 > 0 && value - 1 <= 31) { ++ Emit(kLa64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value + 1)) { ++ InstructionOperand temp = g.TempRegister(); ++ Emit(kLa64Shl | AddressingModeField::encode(kMode_None), temp, ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); ++ Emit(kLa64Sub | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); ++ return; ++ } ++ } ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ if (CanCover(node, left) && CanCover(node, right)) { ++ if (left->opcode() == IrOpcode::kWord64Sar && ++ right->opcode() == IrOpcode::kWord64Sar) { ++ Int64BinopMatcher leftInput(left), rightInput(right); ++ if (leftInput.right().Is(32) && rightInput.right().Is(32)) { ++ // Combine untagging shifts with Dmul high. ++ Emit(kLa64DMulHigh, g.DefineSameAsFirst(node), ++ g.UseRegister(leftInput.left().node()), ++ g.UseRegister(rightInput.left().node())); ++ return; ++ } ++ } ++ } ++ VisitRRR(this, kLa64Mul, node); ++} ++ ++void InstructionSelector::VisitInt32MulHigh(Node* node) { ++ VisitRRR(this, kLa64MulHigh, node); ++} ++ ++void InstructionSelector::VisitUint32MulHigh(Node* node) { ++ VisitRRR(this, kLa64MulHighU, node); ++} ++ ++void InstructionSelector::VisitInt64Mul(Node* node) { ++ La64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ // TODO(dusmil): Add optimization for shifts larger than 32. ++ if (m.right().HasValue() && m.right().Value() > 0) { ++ uint32_t value = static_cast(m.right().Value()); ++ if (base::bits::IsPowerOfTwo(value)) { ++ Emit(kLa64Dshl | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value - 1) && /*kArchVariant == kLa64r6 &&*/ ++ value - 1 > 0 && value - 1 <= 31) { ++ // Dlsa macro will handle the shifting value out of bound cases. ++ Emit(kLa64Dlsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value + 1)) { ++ InstructionOperand temp = g.TempRegister(); ++ Emit(kLa64Dshl | AddressingModeField::encode(kMode_None), temp, ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); ++ Emit(kLa64Dsub | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); ++ return; ++ } ++ } ++ Emit(kLa64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitInt32Div(Node* node) { ++ La64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ if (CanCover(node, left) && CanCover(node, right)) { ++ if (left->opcode() == IrOpcode::kWord64Sar && ++ right->opcode() == IrOpcode::kWord64Sar) { ++ Int64BinopMatcher rightInput(right), leftInput(left); ++ if (rightInput.right().Is(32) && leftInput.right().Is(32)) { ++ // Combine both shifted operands with Ddiv. ++ Emit(kLa64Ddiv, g.DefineSameAsFirst(node), ++ g.UseRegister(leftInput.left().node()), ++ g.UseRegister(rightInput.left().node())); ++ return; ++ } ++ } ++ } ++ Emit(kLa64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitUint32Div(Node* node) { ++ La64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Emit(kLa64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitInt32Mod(Node* node) { ++ La64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ if (CanCover(node, left) && CanCover(node, right)) { ++ if (left->opcode() == IrOpcode::kWord64Sar && ++ right->opcode() == IrOpcode::kWord64Sar) { ++ Int64BinopMatcher rightInput(right), leftInput(left); ++ if (rightInput.right().Is(32) && leftInput.right().Is(32)) { ++ // Combine both shifted operands with Dmod. ++ Emit(kLa64Dmod, g.DefineSameAsFirst(node), ++ g.UseRegister(leftInput.left().node()), ++ g.UseRegister(rightInput.left().node())); ++ return; ++ } ++ } ++ } ++ Emit(kLa64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitUint32Mod(Node* node) { ++ La64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Emit(kLa64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitInt64Div(Node* node) { ++ La64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kLa64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitUint64Div(Node* node) { ++ La64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kLa64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitInt64Mod(Node* node) { ++ La64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kLa64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitUint64Mod(Node* node) { ++ La64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kLa64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { ++ VisitRR(this, kLa64CvtDS, node); ++} ++ ++void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) { ++ VisitRR(this, kLa64CvtSW, node); ++} ++ ++void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) { ++ VisitRR(this, kLa64CvtSUw, node); ++} ++ ++void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { ++ VisitRR(this, kLa64CvtDW, node); ++} ++ ++void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) { ++ VisitRR(this, kLa64CvtDL, node); ++} ++ ++void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { ++ VisitRR(this, kLa64CvtDUw, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) { ++ VisitRR(this, kLa64TruncWS, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) { ++ VisitRR(this, kLa64TruncUwS, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { ++ La64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction ++ // which does rounding and conversion to integer format. ++ if (CanCover(node, value)) { ++ switch (value->opcode()) { ++ case IrOpcode::kFloat64RoundDown: ++ Emit(kLa64FloorWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ case IrOpcode::kFloat64RoundUp: ++ Emit(kLa64CeilWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ case IrOpcode::kFloat64RoundTiesEven: ++ Emit(kLa64RoundWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ case IrOpcode::kFloat64RoundTruncate: ++ Emit(kLa64TruncWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ default: ++ break; ++ } ++ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) { ++ Node* next = value->InputAt(0); ++ if (CanCover(value, next)) { ++ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP)) ++ switch (next->opcode()) { ++ case IrOpcode::kFloat32RoundDown: ++ Emit(kLa64FloorWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ case IrOpcode::kFloat32RoundUp: ++ Emit(kLa64CeilWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ case IrOpcode::kFloat32RoundTiesEven: ++ Emit(kLa64RoundWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ case IrOpcode::kFloat32RoundTruncate: ++ Emit(kLa64TruncWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ default: ++ Emit(kLa64TruncWS, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ } ++ } else { ++ // Match float32 -> float64 -> int32 representation change path. ++ Emit(kLa64TruncWS, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ } ++ } ++ } ++ VisitRR(this, kLa64TruncWD, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) { ++ VisitRR(this, kLa64TruncLD, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { ++ VisitRR(this, kLa64TruncUwD, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) { ++ VisitRR(this, kLa64TruncUlD, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) { ++ VisitRR(this, kLa64TruncUwD, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) { ++ VisitRR(this, kLa64TruncLD, node); ++} ++ ++void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) { ++ La64OperandGenerator g(this); ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ this->Emit(kLa64TruncLS, output_count, outputs, 1, inputs); ++} ++ ++void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) { ++ La64OperandGenerator g(this); ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ Emit(kLa64TruncLD, output_count, outputs, 1, inputs); ++} ++ ++void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) { ++ La64OperandGenerator g(this); ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ Emit(kLa64TruncUlS, output_count, outputs, 1, inputs); ++} ++ ++void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) { ++ La64OperandGenerator g(this); ++ ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ Emit(kLa64TruncUlD, output_count, outputs, 1, inputs); ++} ++ ++void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { ++ UNIMPLEMENTED(); ++} ++ ++void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { ++ Node* value = node->InputAt(0); ++ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) { ++ // Generate sign-extending load. ++ LoadRepresentation load_rep = LoadRepresentationOf(value->op()); ++ InstructionCode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ opcode = load_rep.IsUnsigned() ? kLa64Lbu : kLa64Lb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsUnsigned() ? kLa64Lhu : kLa64Lh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLa64Lw; ++ break; ++ default: ++ UNREACHABLE(); ++ return; ++ } ++ EmitLoad(this, value, opcode, node); ++ } else { ++ La64OperandGenerator g(this); ++ Emit(kLa64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0)); ++ } ++} ++ ++void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { ++ La64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ switch (value->opcode()) { ++ // 32-bit operations will write their result in a 64 bit register, ++ // clearing the top 32 bits of the destination register. ++ case IrOpcode::kUint32Div: ++ case IrOpcode::kUint32Mod: ++ case IrOpcode::kUint32MulHigh: { ++ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); ++ return; ++ } ++ case IrOpcode::kLoad: { ++ LoadRepresentation load_rep = LoadRepresentationOf(value->op()); ++ if (load_rep.IsUnsigned()) { ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kWord8: ++ case MachineRepresentation::kWord16: ++ case MachineRepresentation::kWord32: ++ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); ++ return; ++ default: ++ break; ++ } ++ } ++ break; ++ } ++ default: ++ break; ++ } ++ Emit(kLa64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0), g.TempImmediate(32)); ++} ++ ++void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { ++ La64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ if (CanCover(node, value)) { ++ switch (value->opcode()) { ++ case IrOpcode::kWord64Sar: { ++ if (CanCoverTransitively(node, value, value->InputAt(0)) && ++ TryEmitExtendingLoad(this, value, node)) { ++ return; ++ } else { ++ Int64BinopMatcher m(value); ++ if (m.right().IsInRange(32, 63)) { ++ // After smi untagging no need for truncate. Combine sequence. ++ Emit(kLa64Dsar, g.DefineSameAsFirst(node), ++ g.UseRegister(m.left().node()), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ } ++ break; ++ } ++ default: ++ break; ++ } ++ } ++ Emit(kLa64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0), g.TempImmediate(32)); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { ++ La64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding ++ // instruction. ++ if (CanCover(node, value) && ++ value->opcode() == IrOpcode::kChangeInt32ToFloat64) { ++ Emit(kLa64CvtSW, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ } ++ VisitRR(this, kLa64CvtSD, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) { ++ VisitRR(this, kArchTruncateDoubleToI, node); ++} ++ ++void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) { ++ VisitRR(this, kLa64TruncWD, node); ++} ++ ++void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) { ++ VisitRR(this, kLa64CvtSL, node); ++} ++ ++void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) { ++ VisitRR(this, kLa64CvtDL, node); ++} ++ ++void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) { ++ VisitRR(this, kLa64CvtSUl, node); ++} ++ ++void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) { ++ VisitRR(this, kLa64CvtDUl, node); ++} ++ ++void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) { ++ VisitRR(this, kLa64Float64ExtractLowWord32, node); ++} ++ ++void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) { ++ VisitRR(this, kLa64BitcastDL, node); ++} ++ ++void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Float64InsertLowWord32, g.DefineAsRegister(node), ++ ImmediateOperand(ImmediateOperand::INLINE, 0), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { ++ VisitRR(this, kLa64BitcastLD, node); ++} ++ ++void InstructionSelector::VisitFloat32Add(Node* node) { ++ // Optimization with Madd.S(z, x, y) is intentionally removed. ++ // See explanation for madd_s in assembler-la64.cc. ++ VisitRRR(this, kLa64AddS, node); ++} ++ ++void InstructionSelector::VisitFloat64Add(Node* node) { ++ // Optimization with Madd.D(z, x, y) is intentionally removed. ++ // See explanation for madd_d in assembler-la64.cc. ++ VisitRRR(this, kLa64AddD, node); ++} ++ ++void InstructionSelector::VisitFloat32Sub(Node* node) { ++ // Optimization with Msub.S(z, x, y) is intentionally removed. ++ // See explanation for madd_s in assembler-la64.cc. ++ VisitRRR(this, kLa64SubS, node); ++} ++ ++void InstructionSelector::VisitFloat64Sub(Node* node) { ++ // Optimization with Msub.D(z, x, y) is intentionally removed. ++ // See explanation for madd_d in assembler-la64.cc. ++ VisitRRR(this, kLa64SubD, node); ++} ++ ++void InstructionSelector::VisitFloat32Mul(Node* node) { ++ VisitRRR(this, kLa64MulS, node); ++} ++ ++void InstructionSelector::VisitFloat64Mul(Node* node) { ++ VisitRRR(this, kLa64MulD, node); ++} ++ ++void InstructionSelector::VisitFloat32Div(Node* node) { ++ VisitRRR(this, kLa64DivS, node); ++} ++ ++void InstructionSelector::VisitFloat64Div(Node* node) { ++ VisitRRR(this, kLa64DivD, node); ++} ++ ++void InstructionSelector::VisitFloat64Mod(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64ModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0), ++ g.UseFixed(node->InputAt(1), f1)) ++ ->MarkAsCall(); ++} ++ ++void InstructionSelector::VisitFloat32Max(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Float32Max, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat64Max(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Float64Max, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat32Min(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Float32Min, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat64Min(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Float64Min, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat32Abs(Node* node) { ++ VisitRR(this, kLa64AbsS, node); ++} ++ ++void InstructionSelector::VisitFloat64Abs(Node* node) { ++ VisitRR(this, kLa64AbsD, node); ++} ++ ++void InstructionSelector::VisitFloat32Sqrt(Node* node) { ++ VisitRR(this, kLa64SqrtS, node); ++} ++ ++void InstructionSelector::VisitFloat64Sqrt(Node* node) { ++ VisitRR(this, kLa64SqrtD, node); ++} ++ ++void InstructionSelector::VisitFloat32RoundDown(Node* node) { ++ VisitRR(this, kLa64Float32RoundDown, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundDown(Node* node) { ++ VisitRR(this, kLa64Float64RoundDown, node); ++} ++ ++void InstructionSelector::VisitFloat32RoundUp(Node* node) { ++ VisitRR(this, kLa64Float32RoundUp, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundUp(Node* node) { ++ VisitRR(this, kLa64Float64RoundUp, node); ++} ++ ++void InstructionSelector::VisitFloat32RoundTruncate(Node* node) { ++ VisitRR(this, kLa64Float32RoundTruncate, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { ++ VisitRR(this, kLa64Float64RoundTruncate, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { ++ UNREACHABLE(); ++} ++ ++void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) { ++ VisitRR(this, kLa64Float32RoundTiesEven, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) { ++ VisitRR(this, kLa64Float64RoundTiesEven, node); ++} ++ ++void InstructionSelector::VisitFloat32Neg(Node* node) { ++ VisitRR(this, kLa64NegS, node); ++} ++ ++void InstructionSelector::VisitFloat64Neg(Node* node) { ++ VisitRR(this, kLa64NegD, node); ++} ++ ++void InstructionSelector::VisitFloat64Ieee754Binop(Node* node, ++ InstructionCode opcode) { ++ La64OperandGenerator g(this); ++ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2), ++ g.UseFixed(node->InputAt(1), f4)) ++ ->MarkAsCall(); ++} ++ ++void InstructionSelector::VisitFloat64Ieee754Unop(Node* node, ++ InstructionCode opcode) { ++ La64OperandGenerator g(this); ++ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0)) ++ ->MarkAsCall(); ++} ++ ++void InstructionSelector::EmitPrepareArguments( ++ ZoneVector* arguments, const CallDescriptor* call_descriptor, ++ Node* node) { ++ La64OperandGenerator g(this); ++ ++ // Prepare for C function call. ++ if (call_descriptor->IsCFunctionCall()) { ++ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast( ++ call_descriptor->ParameterCount())), ++ 0, nullptr, 0, nullptr); ++ ++ // Poke any stack arguments. ++ int slot = kCArgSlotCount; ++ for (PushParameter input : (*arguments)) { ++ Emit(kLa64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), ++ g.TempImmediate(slot << kSystemPointerSizeLog2)); ++ ++slot; ++ } ++ } else { ++ int push_count = static_cast(call_descriptor->StackParameterCount()); ++ if (push_count > 0) { ++ // Calculate needed space ++ int stack_size = 0; ++ for (PushParameter input : (*arguments)) { ++ if (input.node) { ++ stack_size += input.location.GetSizeInPointers(); ++ } ++ } ++ Emit(kLa64StackClaim, g.NoOutput(), ++ g.TempImmediate(stack_size << kSystemPointerSizeLog2)); ++ } ++ for (size_t n = 0; n < arguments->size(); ++n) { ++ PushParameter input = (*arguments)[n]; ++ if (input.node) { ++ Emit(kLa64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), ++ g.TempImmediate(static_cast(n << kSystemPointerSizeLog2))); ++ } ++ } ++ } ++} ++ ++void InstructionSelector::EmitPrepareResults( ++ ZoneVector* results, const CallDescriptor* call_descriptor, ++ Node* node) { ++ La64OperandGenerator g(this); ++ ++ int reverse_slot = 0; ++ for (PushParameter output : *results) { ++ if (!output.location.IsCallerFrameSlot()) continue; ++ // Skip any alignment holes in nodes. ++ if (output.node != nullptr) { ++ DCHECK(!call_descriptor->IsCFunctionCall()); ++ if (output.location.GetType() == MachineType::Float32()) { ++ MarkAsFloat32(output.node); ++ } else if (output.location.GetType() == MachineType::Float64()) { ++ MarkAsFloat64(output.node); ++ } ++ Emit(kLa64Peek, g.DefineAsRegister(output.node), ++ g.UseImmediate(reverse_slot)); ++ } ++ reverse_slot += output.location.GetSizeInPointers(); ++ } ++} ++ ++bool InstructionSelector::IsTailCallAddressImmediate() { return false; } ++ ++int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; } ++ ++void InstructionSelector::VisitUnalignedLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ La64OperandGenerator g(this); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ ++ ArchOpcode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kFloat32: ++ opcode = kLa64Ulwc1; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kLa64Uldc1; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ UNREACHABLE(); ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsUnsigned() ? kLa64Ulhu : kLa64Ulh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = load_rep.IsUnsigned() ? kLa64Ulwu : kLa64Ulw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kLa64Uld; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ case MachineRepresentation::kSimd128: ++ UNREACHABLE(); ++ } ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ Emit(kLa64Dadd | AddressingModeField::encode(kMode_None), addr_reg, ++ g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired load opcode, using temp addr_reg. ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); ++ } ++} ++ ++void InstructionSelector::VisitUnalignedStore(Node* node) { ++ La64OperandGenerator g(this); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kFloat32: ++ opcode = kLa64Uswc1; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kLa64Usdc1; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ UNREACHABLE(); ++ case MachineRepresentation::kWord16: ++ opcode = kLa64Ush; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLa64Usw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kLa64Usd; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ case MachineRepresentation::kSimd128: ++ UNREACHABLE(); ++ } ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ g.UseRegister(base), g.UseImmediate(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ Emit(kLa64Dadd | AddressingModeField::encode(kMode_None), addr_reg, ++ g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired store opcode, using temp addr_reg. ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); ++ } ++} ++ ++namespace { ++ ++// Shared routine for multiple compare operations. ++static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, ++ InstructionOperand left, InstructionOperand right, ++ FlagsContinuation* cont) { ++ selector->EmitWithContinuation(opcode, left, right, cont); ++} ++ ++// Shared routine for multiple float32 compare operations. ++void VisitFloat32Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ La64OperandGenerator g(selector); ++ Float32BinopMatcher m(node); ++ InstructionOperand lhs, rhs; ++ ++ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) ++ : g.UseRegister(m.left().node()); ++ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) ++ : g.UseRegister(m.right().node()); ++ VisitCompare(selector, kLa64CmpS, lhs, rhs, cont); ++} ++ ++// Shared routine for multiple float64 compare operations. ++void VisitFloat64Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ La64OperandGenerator g(selector); ++ Float64BinopMatcher m(node); ++ InstructionOperand lhs, rhs; ++ ++ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) ++ : g.UseRegister(m.left().node()); ++ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) ++ : g.UseRegister(m.right().node()); ++ VisitCompare(selector, kLa64CmpD, lhs, rhs, cont); ++} ++ ++// Shared routine for multiple word compare operations. ++void VisitWordCompare(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, FlagsContinuation* cont, ++ bool commutative) { ++ La64OperandGenerator g(selector); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ ++ // Match immediates on left or right side of comparison. ++ if (g.CanBeImmediate(right, opcode)) { ++ if (opcode == kLa64Tst) { ++ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), ++ cont); ++ } else { ++ switch (cont->condition()) { ++ case kEqual: ++ case kNotEqual: ++ if (cont->IsSet()) { ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseImmediate(right), cont); ++ } else { ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseRegister(right), cont); ++ } ++ break; ++ case kSignedLessThan: ++ case kSignedGreaterThanOrEqual: ++ case kUnsignedLessThan: ++ case kUnsignedGreaterThanOrEqual: ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseImmediate(right), cont); ++ break; ++ default: ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseRegister(right), cont); ++ } ++ } ++ } else if (g.CanBeImmediate(left, opcode)) { ++ if (!commutative) cont->Commute(); ++ if (opcode == kLa64Tst) { ++ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), ++ cont); ++ } else { ++ switch (cont->condition()) { ++ case kEqual: ++ case kNotEqual: ++ if (cont->IsSet()) { ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseImmediate(left), cont); ++ } else { ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseRegister(left), cont); ++ } ++ break; ++ case kSignedLessThan: ++ case kSignedGreaterThanOrEqual: ++ case kUnsignedLessThan: ++ case kUnsignedGreaterThanOrEqual: ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseImmediate(left), cont); ++ break; ++ default: ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseRegister(left), cont); ++ } ++ } ++ } else { ++ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), ++ cont); ++ } ++} ++ ++bool IsNodeUnsigned(Node* n) { ++ NodeMatcher m(n); ++ ++ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() || ++ m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) { ++ LoadRepresentation load_rep = LoadRepresentationOf(n->op()); ++ return load_rep.IsUnsigned(); ++ } else { ++ return m.IsUint32Div() || m.IsUint32LessThan() || ++ m.IsUint32LessThanOrEqual() || m.IsUint32Mod() || ++ m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() || ++ m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32(); ++ } ++} ++ ++// Shared routine for multiple word compare operations. ++void VisitFullWord32Compare(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, FlagsContinuation* cont) { ++ La64OperandGenerator g(selector); ++ InstructionOperand leftOp = g.TempRegister(); ++ InstructionOperand rightOp = g.TempRegister(); ++ ++ selector->Emit(kLa64Dshl, leftOp, g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(32)); ++ selector->Emit(kLa64Dshl, rightOp, g.UseRegister(node->InputAt(1)), ++ g.TempImmediate(32)); ++ ++ VisitCompare(selector, opcode, leftOp, rightOp, cont); ++} ++ ++void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, ++ FlagsContinuation* cont) { ++ if (FLAG_debug_code) { ++ La64OperandGenerator g(selector); ++ InstructionOperand leftOp = g.TempRegister(); ++ InstructionOperand rightOp = g.TempRegister(); ++ InstructionOperand optimizedResult = g.TempRegister(); ++ InstructionOperand fullResult = g.TempRegister(); ++ FlagsCondition condition = cont->condition(); ++ InstructionCode testOpcode = opcode | ++ FlagsConditionField::encode(condition) | ++ FlagsModeField::encode(kFlags_set); ++ ++ selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1))); ++ ++ selector->Emit(kLa64Dshl, leftOp, g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(32)); ++ selector->Emit(kLa64Dshl, rightOp, g.UseRegister(node->InputAt(1)), ++ g.TempImmediate(32)); ++ selector->Emit(testOpcode, fullResult, leftOp, rightOp); ++ ++ selector->Emit(kLa64AssertEqual, g.NoOutput(), optimizedResult, fullResult, ++ g.TempImmediate(static_cast( ++ AbortReason::kUnsupportedNonPrimitiveCompare))); ++ } ++ ++ VisitWordCompare(selector, node, opcode, cont, false); ++} ++ ++void VisitWord32Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ // LA64 doesn't support Word32 compare instructions. Instead it relies ++ // that the values in registers are correctly sign-extended and uses ++ // Word64 comparison instead. This behavior is correct in most cases, ++ // but doesn't work when comparing signed with unsigned operands. ++ // We could simulate full Word32 compare in all cases but this would ++ // create an unnecessary overhead since unsigned integers are rarely ++ // used in JavaScript. ++ // The solution proposed here tries to match a comparison of signed ++ // with unsigned operand, and perform full Word32Compare only ++ // in those cases. Unfortunately, the solution is not complete because ++ // it might skip cases where Word32 full compare is needed, so ++ // basically it is a hack. ++ // When call to a host function in simulator, if the function return a ++ // int32 value, the simulator do not sign-extended to int64 because in ++ // simulator we do not know the function whether return a int32 or int64. ++ // so we need do a full word32 compare in this case. ++#ifndef USE_SIMULATOR ++ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) { ++#else ++ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) || ++ node->InputAt(0)->opcode() == IrOpcode::kCall || ++ node->InputAt(1)->opcode() == IrOpcode::kCall) { ++#endif ++ VisitFullWord32Compare(selector, node, kLa64Cmp, cont); ++ } else { ++ VisitOptimizedWord32Compare(selector, node, kLa64Cmp, cont); ++ } ++} ++ ++void VisitWord64Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ VisitWordCompare(selector, node, kLa64Cmp, cont, false); ++} ++ ++void EmitWordCompareZero(InstructionSelector* selector, Node* value, ++ FlagsContinuation* cont) { ++ La64OperandGenerator g(selector); ++ selector->EmitWithContinuation(kLa64Cmp, g.UseRegister(value), ++ g.TempImmediate(0), cont); ++} ++ ++void VisitAtomicLoad(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ La64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ if (g.CanBeImmediate(index, opcode)) { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), g.UseRegister(base), ++ g.UseImmediate(index)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ selector->Emit(kLa64Dadd | AddressingModeField::encode(kMode_None), ++ addr_reg, g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired load opcode, using temp addr_reg. ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); ++ } ++} ++ ++void VisitAtomicStore(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ La64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ selector->Emit(kLa64Dadd | AddressingModeField::encode(kMode_None), ++ addr_reg, g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired store opcode, using temp addr_reg. ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.NoOutput(), addr_reg, g.TempImmediate(0), ++ g.UseRegisterOrImmediateZero(value)); ++ } ++} ++ ++void VisitAtomicExchange(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ La64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ AddressingMode addressing_mode = kMode_MRI; ++ InstructionOperand inputs[3]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(value); ++ InstructionOperand outputs[1]; ++ outputs[0] = g.UseUniqueRegister(node); ++ InstructionOperand temp[3]; ++ temp[0] = g.TempRegister(); ++ temp[1] = g.TempRegister(); ++ temp[2] = g.TempRegister(); ++ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); ++ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); ++} ++ ++void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ La64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* old_value = node->InputAt(2); ++ Node* new_value = node->InputAt(3); ++ ++ AddressingMode addressing_mode = kMode_MRI; ++ InstructionOperand inputs[4]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(old_value); ++ inputs[input_count++] = g.UseUniqueRegister(new_value); ++ InstructionOperand outputs[1]; ++ outputs[0] = g.UseUniqueRegister(node); ++ InstructionOperand temp[3]; ++ temp[0] = g.TempRegister(); ++ temp[1] = g.TempRegister(); ++ temp[2] = g.TempRegister(); ++ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); ++ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); ++} ++ ++void VisitAtomicBinop(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ La64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ AddressingMode addressing_mode = kMode_MRI; ++ InstructionOperand inputs[3]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(value); ++ InstructionOperand outputs[1]; ++ outputs[0] = g.UseUniqueRegister(node); ++ InstructionOperand temps[4]; ++ temps[0] = g.TempRegister(); ++ temps[1] = g.TempRegister(); ++ temps[2] = g.TempRegister(); ++ temps[3] = g.TempRegister(); ++ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); ++ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps); ++} ++ ++} // namespace ++ ++void InstructionSelector::VisitStackPointerGreaterThan( ++ Node* node, FlagsContinuation* cont) { ++ StackCheckKind kind = StackCheckKindOf(node->op()); ++ InstructionCode opcode = ++ kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); ++ ++ La64OperandGenerator g(this); ++ ++ // No outputs. ++ InstructionOperand* const outputs = nullptr; ++ const int output_count = 0; ++ ++ // Applying an offset to this stack check requires a temp register. Offsets ++ // are only applied to the first stack check. If applying an offset, we must ++ // ensure the input and temp registers do not alias, thus kUniqueRegister. ++ InstructionOperand temps[] = {g.TempRegister()}; ++ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0); ++ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) ++ ? OperandGenerator::kUniqueRegister ++ : OperandGenerator::kRegister; ++ ++ Node* const value = node->InputAt(0); ++ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; ++ static constexpr int input_count = arraysize(inputs); ++ ++ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, ++ temp_count, temps, cont); ++} ++ ++// Shared routine for word comparisons against zero. ++void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, ++ FlagsContinuation* cont) { ++ // Try to combine with comparisons against 0 by simply inverting the branch. ++ while (CanCover(user, value)) { ++ if (value->opcode() == IrOpcode::kWord32Equal) { ++ Int32BinopMatcher m(value); ++ if (!m.right().Is(0)) break; ++ user = value; ++ value = m.left().node(); ++ } else if (value->opcode() == IrOpcode::kWord64Equal) { ++ Int64BinopMatcher m(value); ++ if (!m.right().Is(0)) break; ++ user = value; ++ value = m.left().node(); ++ } else { ++ break; ++ } ++ ++ cont->Negate(); ++ } ++ ++ if (CanCover(user, value)) { ++ switch (value->opcode()) { ++ case IrOpcode::kWord32Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kInt32LessThan: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThan); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kInt32LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kUint32LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kUint32LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kWord64Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kInt64LessThan: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThan); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kInt64LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kUint64LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kUint64LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kFloat32Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitFloat32Compare(this, value, cont); ++ case IrOpcode::kFloat32LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitFloat32Compare(this, value, cont); ++ case IrOpcode::kFloat32LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitFloat32Compare(this, value, cont); ++ case IrOpcode::kFloat64Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitFloat64Compare(this, value, cont); ++ case IrOpcode::kFloat64LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitFloat64Compare(this, value, cont); ++ case IrOpcode::kFloat64LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitFloat64Compare(this, value, cont); ++ case IrOpcode::kProjection: ++ // Check if this is the overflow output projection of an ++ // WithOverflow node. ++ if (ProjectionIndexOf(value->op()) == 1u) { ++ // We cannot combine the WithOverflow with this branch ++ // unless the 0th projection (the use of the actual value of the ++ // is either nullptr, which means there's no use of the ++ // actual value, or was already defined, which means it is scheduled ++ // *AFTER* this branch). ++ Node* const node = value->InputAt(0); ++ Node* const result = NodeProperties::FindProjection(node, 0); ++ if (result == nullptr || IsDefined(result)) { ++ switch (node->opcode()) { ++ case IrOpcode::kInt32AddWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLa64Dadd, cont); ++ case IrOpcode::kInt32SubWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLa64Dsub, cont); ++ case IrOpcode::kInt32MulWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLa64MulOvf, cont); ++ case IrOpcode::kInt64AddWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLa64DaddOvf, cont); ++ case IrOpcode::kInt64SubWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLa64DsubOvf, cont); ++ default: ++ break; ++ } ++ } ++ } ++ break; ++ case IrOpcode::kWord32And: ++ case IrOpcode::kWord64And: ++ return VisitWordCompare(this, value, kLa64Tst, cont, true); ++ case IrOpcode::kStackPointerGreaterThan: ++ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); ++ return VisitStackPointerGreaterThan(value, cont); ++ default: ++ break; ++ } ++ } ++ ++ // Continuation could not be combined with a compare, emit compare against 0. ++ EmitWordCompareZero(this, value, cont); ++} ++ ++void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ++ La64OperandGenerator g(this); ++ InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); ++ ++ // Emit either ArchTableSwitch or ArchBinarySearchSwitch. ++ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) { ++ static const size_t kMaxTableSwitchValueRange = 2 << 16; ++ size_t table_space_cost = 10 + 2 * sw.value_range(); ++ size_t table_time_cost = 3; ++ size_t lookup_space_cost = 2 + 2 * sw.case_count(); ++ size_t lookup_time_cost = sw.case_count(); ++ if (sw.case_count() > 0 && ++ table_space_cost + 3 * table_time_cost <= ++ lookup_space_cost + 3 * lookup_time_cost && ++ sw.min_value() > std::numeric_limits::min() && ++ sw.value_range() <= kMaxTableSwitchValueRange) { ++ InstructionOperand index_operand = value_operand; ++ if (sw.min_value()) { ++ index_operand = g.TempRegister(); ++ Emit(kLa64Sub, index_operand, value_operand, ++ g.TempImmediate(sw.min_value())); ++ } ++ // Generate a table lookup. ++ return EmitTableSwitch(sw, index_operand); ++ } ++ } ++ ++ // Generate a tree of conditional jumps. ++ return EmitBinarySearchSwitch(sw, value_operand); ++} ++ ++void InstructionSelector::VisitWord32Equal(Node* const node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ Int32BinopMatcher m(node); ++ if (m.right().Is(0)) { ++ return VisitWordCompareZero(m.node(), m.left().node(), &cont); ++ } ++ ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt32LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitUint32LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLa64Dadd, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLa64Dadd, &cont); ++} ++ ++void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLa64Dsub, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLa64Dsub, &cont); ++} ++ ++void InstructionSelector::VisitInt32MulWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLa64MulOvf, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLa64MulOvf, &cont); ++} ++ ++void InstructionSelector::VisitInt64AddWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLa64DaddOvf, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLa64DaddOvf, &cont); ++} ++ ++void InstructionSelector::VisitInt64SubWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLa64DsubOvf, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLa64DsubOvf, &cont); ++} ++ ++void InstructionSelector::VisitWord64Equal(Node* const node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ Int64BinopMatcher m(node); ++ if (m.right().Is(0)) { ++ return VisitWordCompareZero(m.node(), m.left().node(), &cont); ++ } ++ ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt64LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitUint64LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat32Equal(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ VisitFloat32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat32LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitFloat32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitFloat32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat64Equal(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ VisitFloat64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat64LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitFloat64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitFloat64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) { ++ VisitRR(this, kLa64Float64ExtractLowWord32, node); ++} ++ ++void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) { ++ VisitRR(this, kLa64Float64ExtractHighWord32, node); ++} ++ ++void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { ++ VisitRR(this, kLa64Float64SilenceNaN, node); ++} ++ ++void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { ++ La64OperandGenerator g(this); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ Emit(kLa64Float64InsertLowWord32, g.DefineSameAsFirst(node), ++ g.UseRegister(left), g.UseRegister(right)); ++} ++ ++void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { ++ La64OperandGenerator g(this); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ Emit(kLa64Float64InsertHighWord32, g.DefineSameAsFirst(node), ++ g.UseRegister(left), g.UseRegister(right)); ++} ++ ++void InstructionSelector::VisitMemoryBarrier(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Sync, g.NoOutput()); ++} ++ ++void InstructionSelector::VisitWord32AtomicLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kWord8: ++ opcode = ++ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16 ++ : kWord32AtomicLoadUint16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kWord32AtomicLoadWord32; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ VisitAtomicLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord32AtomicStore(Node* node) { ++ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kWord8: ++ opcode = kWord32AtomicStoreWord8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kWord32AtomicStoreWord16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kWord32AtomicStoreWord32; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ ++ VisitAtomicStore(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kWord8: ++ opcode = kLa64Word64AtomicLoadUint8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kLa64Word64AtomicLoadUint16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLa64Word64AtomicLoadUint32; ++ break; ++ case MachineRepresentation::kWord64: ++ opcode = kLa64Word64AtomicLoadUint64; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ VisitAtomicLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicStore(Node* node) { ++ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kWord8: ++ opcode = kLa64Word64AtomicStoreWord8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kLa64Word64AtomicStoreWord16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLa64Word64AtomicStoreWord32; ++ break; ++ case MachineRepresentation::kWord64: ++ opcode = kLa64Word64AtomicStoreWord64; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ ++ VisitAtomicStore(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord32AtomicExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Int8()) { ++ opcode = kWord32AtomicExchangeInt8; ++ } else if (type == MachineType::Uint8()) { ++ opcode = kWord32AtomicExchangeUint8; ++ } else if (type == MachineType::Int16()) { ++ opcode = kWord32AtomicExchangeInt16; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kWord32AtomicExchangeUint16; ++ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { ++ opcode = kWord32AtomicExchangeWord32; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ ++ VisitAtomicExchange(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Uint8()) { ++ opcode = kLa64Word64AtomicExchangeUint8; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kLa64Word64AtomicExchangeUint16; ++ } else if (type == MachineType::Uint32()) { ++ opcode = kLa64Word64AtomicExchangeUint32; ++ } else if (type == MachineType::Uint64()) { ++ opcode = kLa64Word64AtomicExchangeUint64; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ VisitAtomicExchange(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Int8()) { ++ opcode = kWord32AtomicCompareExchangeInt8; ++ } else if (type == MachineType::Uint8()) { ++ opcode = kWord32AtomicCompareExchangeUint8; ++ } else if (type == MachineType::Int16()) { ++ opcode = kWord32AtomicCompareExchangeInt16; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kWord32AtomicCompareExchangeUint16; ++ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { ++ opcode = kWord32AtomicCompareExchangeWord32; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ ++ VisitAtomicCompareExchange(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Uint8()) { ++ opcode = kLa64Word64AtomicCompareExchangeUint8; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kLa64Word64AtomicCompareExchangeUint16; ++ } else if (type == MachineType::Uint32()) { ++ opcode = kLa64Word64AtomicCompareExchangeUint32; ++ } else if (type == MachineType::Uint64()) { ++ opcode = kLa64Word64AtomicCompareExchangeUint64; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ VisitAtomicCompareExchange(this, node, opcode); ++} ++void InstructionSelector::VisitWord32AtomicBinaryOperation( ++ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ++ ArchOpcode uint16_op, ArchOpcode word32_op) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Int8()) { ++ opcode = int8_op; ++ } else if (type == MachineType::Uint8()) { ++ opcode = uint8_op; ++ } else if (type == MachineType::Int16()) { ++ opcode = int16_op; ++ } else if (type == MachineType::Uint16()) { ++ opcode = uint16_op; ++ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { ++ opcode = word32_op; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ ++ VisitAtomicBinop(this, node, opcode); ++} ++ ++#define VISIT_ATOMIC_BINOP(op) \ ++ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ ++ VisitWord32AtomicBinaryOperation( \ ++ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \ ++ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \ ++ kWord32Atomic##op##Word32); \ ++ } ++VISIT_ATOMIC_BINOP(Add) ++VISIT_ATOMIC_BINOP(Sub) ++VISIT_ATOMIC_BINOP(And) ++VISIT_ATOMIC_BINOP(Or) ++VISIT_ATOMIC_BINOP(Xor) ++#undef VISIT_ATOMIC_BINOP ++ ++void InstructionSelector::VisitWord64AtomicBinaryOperation( ++ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op, ++ ArchOpcode uint64_op) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Uint8()) { ++ opcode = uint8_op; ++ } else if (type == MachineType::Uint16()) { ++ opcode = uint16_op; ++ } else if (type == MachineType::Uint32()) { ++ opcode = uint32_op; ++ } else if (type == MachineType::Uint64()) { ++ opcode = uint64_op; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ VisitAtomicBinop(this, node, opcode); ++} ++ ++#define VISIT_ATOMIC_BINOP(op) \ ++ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \ ++ VisitWord64AtomicBinaryOperation( \ ++ node, kLa64Word64Atomic##op##Uint8, kLa64Word64Atomic##op##Uint16, \ ++ kLa64Word64Atomic##op##Uint32, kLa64Word64Atomic##op##Uint64); \ ++ } ++VISIT_ATOMIC_BINOP(Add) ++VISIT_ATOMIC_BINOP(Sub) ++VISIT_ATOMIC_BINOP(And) ++VISIT_ATOMIC_BINOP(Or) ++VISIT_ATOMIC_BINOP(Xor) ++#undef VISIT_ATOMIC_BINOP ++ ++void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { ++ UNREACHABLE(); ++} ++ ++void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { ++ UNREACHABLE(); ++} ++ ++#define SIMD_TYPE_LIST(V) \ ++ V(F32x4) \ ++ V(I32x4) \ ++ V(I16x8) \ ++ V(I8x16) ++ ++#define SIMD_UNOP_LIST(V) \ ++ V(F64x2Abs, kLa64F64x2Abs) \ ++ V(F64x2Neg, kLa64F64x2Neg) \ ++ V(F64x2Sqrt, kLa64F64x2Sqrt) \ ++ V(I64x2Neg, kLa64I64x2Neg) \ ++ V(F32x4SConvertI32x4, kLa64F32x4SConvertI32x4) \ ++ V(F32x4UConvertI32x4, kLa64F32x4UConvertI32x4) \ ++ V(F32x4Abs, kLa64F32x4Abs) \ ++ V(F32x4Neg, kLa64F32x4Neg) \ ++ V(F32x4Sqrt, kLa64F32x4Sqrt) \ ++ V(F32x4RecipApprox, kLa64F32x4RecipApprox) \ ++ V(F32x4RecipSqrtApprox, kLa64F32x4RecipSqrtApprox) \ ++ V(I32x4SConvertF32x4, kLa64I32x4SConvertF32x4) \ ++ V(I32x4UConvertF32x4, kLa64I32x4UConvertF32x4) \ ++ V(I32x4Neg, kLa64I32x4Neg) \ ++ V(I32x4SConvertI16x8Low, kLa64I32x4SConvertI16x8Low) \ ++ V(I32x4SConvertI16x8High, kLa64I32x4SConvertI16x8High) \ ++ V(I32x4UConvertI16x8Low, kLa64I32x4UConvertI16x8Low) \ ++ V(I32x4UConvertI16x8High, kLa64I32x4UConvertI16x8High) \ ++ V(I32x4Abs, kLa64I32x4Abs) \ ++ V(I16x8Neg, kLa64I16x8Neg) \ ++ V(I16x8SConvertI8x16Low, kLa64I16x8SConvertI8x16Low) \ ++ V(I16x8SConvertI8x16High, kLa64I16x8SConvertI8x16High) \ ++ V(I16x8UConvertI8x16Low, kLa64I16x8UConvertI8x16Low) \ ++ V(I16x8UConvertI8x16High, kLa64I16x8UConvertI8x16High) \ ++ V(I16x8Abs, kLa64I16x8Abs) \ ++ V(I8x16Neg, kLa64I8x16Neg) \ ++ V(I8x16Abs, kLa64I8x16Abs) \ ++ V(S128Not, kLa64S128Not) \ ++ V(S1x4AnyTrue, kLa64S1x4AnyTrue) \ ++ V(S1x4AllTrue, kLa64S1x4AllTrue) \ ++ V(S1x8AnyTrue, kLa64S1x8AnyTrue) \ ++ V(S1x8AllTrue, kLa64S1x8AllTrue) \ ++ V(S1x16AnyTrue, kLa64S1x16AnyTrue) \ ++ V(S1x16AllTrue, kLa64S1x16AllTrue) ++ ++#define SIMD_SHIFT_OP_LIST(V) \ ++ V(I64x2Shl) \ ++ V(I64x2ShrS) \ ++ V(I64x2ShrU) \ ++ V(I32x4Shl) \ ++ V(I32x4ShrS) \ ++ V(I32x4ShrU) \ ++ V(I16x8Shl) \ ++ V(I16x8ShrS) \ ++ V(I16x8ShrU) \ ++ V(I8x16Shl) \ ++ V(I8x16ShrS) \ ++ V(I8x16ShrU) ++ ++#define SIMD_BINOP_LIST(V) \ ++ V(F64x2Add, kLa64F64x2Add) \ ++ V(F64x2Sub, kLa64F64x2Sub) \ ++ V(F64x2Mul, kLa64F64x2Mul) \ ++ V(F64x2Div, kLa64F64x2Div) \ ++ V(F64x2Min, kLa64F64x2Min) \ ++ V(F64x2Max, kLa64F64x2Max) \ ++ V(F64x2Eq, kLa64F64x2Eq) \ ++ V(F64x2Ne, kLa64F64x2Ne) \ ++ V(F64x2Lt, kLa64F64x2Lt) \ ++ V(F64x2Le, kLa64F64x2Le) \ ++ V(I64x2Add, kLa64I64x2Add) \ ++ V(I64x2Sub, kLa64I64x2Sub) \ ++ V(I64x2Mul, kLa64I64x2Mul) \ ++ V(F32x4Add, kLa64F32x4Add) \ ++ V(F32x4AddHoriz, kLa64F32x4AddHoriz) \ ++ V(F32x4Sub, kLa64F32x4Sub) \ ++ V(F32x4Mul, kLa64F32x4Mul) \ ++ V(F32x4Div, kLa64F32x4Div) \ ++ V(F32x4Max, kLa64F32x4Max) \ ++ V(F32x4Min, kLa64F32x4Min) \ ++ V(F32x4Eq, kLa64F32x4Eq) \ ++ V(F32x4Ne, kLa64F32x4Ne) \ ++ V(F32x4Lt, kLa64F32x4Lt) \ ++ V(F32x4Le, kLa64F32x4Le) \ ++ V(I32x4Add, kLa64I32x4Add) \ ++ V(I32x4AddHoriz, kLa64I32x4AddHoriz) \ ++ V(I32x4Sub, kLa64I32x4Sub) \ ++ V(I32x4Mul, kLa64I32x4Mul) \ ++ V(I32x4MaxS, kLa64I32x4MaxS) \ ++ V(I32x4MinS, kLa64I32x4MinS) \ ++ V(I32x4MaxU, kLa64I32x4MaxU) \ ++ V(I32x4MinU, kLa64I32x4MinU) \ ++ V(I32x4Eq, kLa64I32x4Eq) \ ++ V(I32x4Ne, kLa64I32x4Ne) \ ++ V(I32x4GtS, kLa64I32x4GtS) \ ++ V(I32x4GeS, kLa64I32x4GeS) \ ++ V(I32x4GtU, kLa64I32x4GtU) \ ++ V(I32x4GeU, kLa64I32x4GeU) \ ++ V(I16x8Add, kLa64I16x8Add) \ ++ V(I16x8AddSaturateS, kLa64I16x8AddSaturateS) \ ++ V(I16x8AddSaturateU, kLa64I16x8AddSaturateU) \ ++ V(I16x8AddHoriz, kLa64I16x8AddHoriz) \ ++ V(I16x8Sub, kLa64I16x8Sub) \ ++ V(I16x8SubSaturateS, kLa64I16x8SubSaturateS) \ ++ V(I16x8SubSaturateU, kLa64I16x8SubSaturateU) \ ++ V(I16x8Mul, kLa64I16x8Mul) \ ++ V(I16x8MaxS, kLa64I16x8MaxS) \ ++ V(I16x8MinS, kLa64I16x8MinS) \ ++ V(I16x8MaxU, kLa64I16x8MaxU) \ ++ V(I16x8MinU, kLa64I16x8MinU) \ ++ V(I16x8Eq, kLa64I16x8Eq) \ ++ V(I16x8Ne, kLa64I16x8Ne) \ ++ V(I16x8GtS, kLa64I16x8GtS) \ ++ V(I16x8GeS, kLa64I16x8GeS) \ ++ V(I16x8GtU, kLa64I16x8GtU) \ ++ V(I16x8GeU, kLa64I16x8GeU) \ ++ V(I16x8RoundingAverageU, kLa64I16x8RoundingAverageU) \ ++ V(I16x8SConvertI32x4, kLa64I16x8SConvertI32x4) \ ++ V(I16x8UConvertI32x4, kLa64I16x8UConvertI32x4) \ ++ V(I8x16Add, kLa64I8x16Add) \ ++ V(I8x16AddSaturateS, kLa64I8x16AddSaturateS) \ ++ V(I8x16AddSaturateU, kLa64I8x16AddSaturateU) \ ++ V(I8x16Sub, kLa64I8x16Sub) \ ++ V(I8x16SubSaturateS, kLa64I8x16SubSaturateS) \ ++ V(I8x16SubSaturateU, kLa64I8x16SubSaturateU) \ ++ V(I8x16Mul, kLa64I8x16Mul) \ ++ V(I8x16MaxS, kLa64I8x16MaxS) \ ++ V(I8x16MinS, kLa64I8x16MinS) \ ++ V(I8x16MaxU, kLa64I8x16MaxU) \ ++ V(I8x16MinU, kLa64I8x16MinU) \ ++ V(I8x16Eq, kLa64I8x16Eq) \ ++ V(I8x16Ne, kLa64I8x16Ne) \ ++ V(I8x16GtS, kLa64I8x16GtS) \ ++ V(I8x16GeS, kLa64I8x16GeS) \ ++ V(I8x16GtU, kLa64I8x16GtU) \ ++ V(I8x16GeU, kLa64I8x16GeU) \ ++ V(I8x16RoundingAverageU, kLa64I8x16RoundingAverageU) \ ++ V(I8x16SConvertI16x8, kLa64I8x16SConvertI16x8) \ ++ V(I8x16UConvertI16x8, kLa64I8x16UConvertI16x8) \ ++ V(S128And, kLa64S128And) \ ++ V(S128Or, kLa64S128Or) \ ++ V(S128Xor, kLa64S128Xor) \ ++ V(S128AndNot, kLa64S128AndNot) ++ ++void InstructionSelector::VisitS128Zero(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64S128Zero, g.DefineAsRegister(node)); ++} ++ ++#define SIMD_VISIT_SPLAT(Type) \ ++ void InstructionSelector::Visit##Type##Splat(Node* node) { \ ++ VisitRR(this, kLa64##Type##Splat, node); \ ++ } ++SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) ++SIMD_VISIT_SPLAT(F64x2) ++#undef SIMD_VISIT_SPLAT ++ ++#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \ ++ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \ ++ VisitRRI(this, kLa64##Type##ExtractLane##Sign, node); \ ++ } ++SIMD_VISIT_EXTRACT_LANE(F64x2, ) ++SIMD_VISIT_EXTRACT_LANE(F32x4, ) ++SIMD_VISIT_EXTRACT_LANE(I32x4, ) ++SIMD_VISIT_EXTRACT_LANE(I16x8, U) ++SIMD_VISIT_EXTRACT_LANE(I16x8, S) ++SIMD_VISIT_EXTRACT_LANE(I8x16, U) ++SIMD_VISIT_EXTRACT_LANE(I8x16, S) ++#undef SIMD_VISIT_EXTRACT_LANE ++ ++#define SIMD_VISIT_REPLACE_LANE(Type) \ ++ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ ++ VisitRRIR(this, kLa64##Type##ReplaceLane, node); \ ++ } ++SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE) ++SIMD_VISIT_REPLACE_LANE(F64x2) ++#undef SIMD_VISIT_REPLACE_LANE ++ ++#define SIMD_VISIT_UNOP(Name, instruction) \ ++ void InstructionSelector::Visit##Name(Node* node) { \ ++ VisitRR(this, instruction, node); \ ++ } ++SIMD_UNOP_LIST(SIMD_VISIT_UNOP) ++#undef SIMD_VISIT_UNOP ++ ++#define SIMD_VISIT_SHIFT_OP(Name) \ ++ void InstructionSelector::Visit##Name(Node* node) { \ ++ VisitSimdShift(this, kLa64##Name, node); \ ++ } ++SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) ++#undef SIMD_VISIT_SHIFT_OP ++ ++#define SIMD_VISIT_BINOP(Name, instruction) \ ++ void InstructionSelector::Visit##Name(Node* node) { \ ++ VisitRRR(this, instruction, node); \ ++ } ++SIMD_BINOP_LIST(SIMD_VISIT_BINOP) ++#undef SIMD_VISIT_BINOP ++ ++void InstructionSelector::VisitS128Select(Node* node) { ++ VisitRRRR(this, kLa64S128Select, node); ++} ++ ++namespace { ++ ++struct ShuffleEntry { ++ uint8_t shuffle[kSimd128Size]; ++ ArchOpcode opcode; ++}; ++ ++static const ShuffleEntry arch_shuffles[] = { ++ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}, ++ kLa64S32x4InterleaveRight}, ++ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}, ++ kLa64S32x4InterleaveLeft}, ++ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}, ++ kLa64S32x4PackEven}, ++ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}, ++ kLa64S32x4PackOdd}, ++ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}, ++ kLa64S32x4InterleaveEven}, ++ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}, ++ kLa64S32x4InterleaveOdd}, ++ ++ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}, ++ kLa64S16x8InterleaveRight}, ++ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}, ++ kLa64S16x8InterleaveLeft}, ++ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}, ++ kLa64S16x8PackEven}, ++ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}, ++ kLa64S16x8PackOdd}, ++ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}, ++ kLa64S16x8InterleaveEven}, ++ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}, ++ kLa64S16x8InterleaveOdd}, ++ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kLa64S16x4Reverse}, ++ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kLa64S16x2Reverse}, ++ ++ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}, ++ kLa64S8x16InterleaveRight}, ++ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}, ++ kLa64S8x16InterleaveLeft}, ++ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}, ++ kLa64S8x16PackEven}, ++ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}, ++ kLa64S8x16PackOdd}, ++ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}, ++ kLa64S8x16InterleaveEven}, ++ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}, ++ kLa64S8x16InterleaveOdd}, ++ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kLa64S8x8Reverse}, ++ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kLa64S8x4Reverse}, ++ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kLa64S8x2Reverse}}; ++ ++bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, ++ size_t num_entries, bool is_swizzle, ++ ArchOpcode* opcode) { ++ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1; ++ for (size_t i = 0; i < num_entries; ++i) { ++ const ShuffleEntry& entry = table[i]; ++ int j = 0; ++ for (; j < kSimd128Size; ++j) { ++ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) { ++ break; ++ } ++ } ++ if (j == kSimd128Size) { ++ *opcode = entry.opcode; ++ return true; ++ } ++ } ++ return false; ++} ++ ++} // namespace ++ ++void InstructionSelector::VisitS8x16Shuffle(Node* node) { ++ uint8_t shuffle[kSimd128Size]; ++ bool is_swizzle; ++ CanonicalizeShuffle(node, shuffle, &is_swizzle); ++ uint8_t shuffle32x4[4]; ++ ArchOpcode opcode; ++ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles), ++ is_swizzle, &opcode)) { ++ VisitRRR(this, opcode, node); ++ return; ++ } ++ Node* input0 = node->InputAt(0); ++ Node* input1 = node->InputAt(1); ++ uint8_t offset; ++ La64OperandGenerator g(this); ++ if (TryMatchConcat(shuffle, &offset)) { ++ Emit(kLa64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1), ++ g.UseRegister(input0), g.UseImmediate(offset)); ++ return; ++ } ++ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) { ++ Emit(kLa64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), ++ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4))); ++ return; ++ } ++ Emit(kLa64S8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), ++ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle)), ++ g.UseImmediate(Pack4Lanes(shuffle + 4)), ++ g.UseImmediate(Pack4Lanes(shuffle + 8)), ++ g.UseImmediate(Pack4Lanes(shuffle + 12))); ++} ++ ++void InstructionSelector::VisitS8x16Swizzle(Node* node) { ++ La64OperandGenerator g(this); ++ InstructionOperand temps[] = {g.TempSimd128Register()}; ++ // We don't want input 0 or input 1 to be the same as output, since we will ++ // modify output before do the calculation. ++ Emit(kLa64S8x16Swizzle, g.DefineAsRegister(node), ++ g.UseUniqueRegister(node->InputAt(0)), ++ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); ++} ++ ++void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) { ++ La64OperandGenerator g(this); ++ Emit(kLa64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0)); ++} ++ ++// static ++MachineOperatorBuilder::Flags ++InstructionSelector::SupportedMachineOperatorFlags() { ++ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags; ++ return flags | MachineOperatorBuilder::kWord32Ctz | ++ MachineOperatorBuilder::kWord64Ctz | ++ MachineOperatorBuilder::kWord32Popcnt | ++ MachineOperatorBuilder::kWord64Popcnt | ++ MachineOperatorBuilder::kWord32ShiftIsSafe | ++ MachineOperatorBuilder::kInt32DivIsSafe | ++ MachineOperatorBuilder::kUint32DivIsSafe | ++ MachineOperatorBuilder::kFloat64RoundDown | ++ MachineOperatorBuilder::kFloat32RoundDown | ++ MachineOperatorBuilder::kFloat64RoundUp | ++ MachineOperatorBuilder::kFloat32RoundUp | ++ MachineOperatorBuilder::kFloat64RoundTruncate | ++ MachineOperatorBuilder::kFloat32RoundTruncate | ++ MachineOperatorBuilder::kFloat64RoundTiesEven | ++ MachineOperatorBuilder::kFloat32RoundTiesEven; ++} ++ ++// static ++MachineOperatorBuilder::AlignmentRequirements ++InstructionSelector::AlignmentRequirements() { ++ return MachineOperatorBuilder::AlignmentRequirements:: ++ FullUnalignedAccessSupport(); ++} ++ ++#undef SIMD_BINOP_LIST ++#undef SIMD_SHIFT_OP_LIST ++#undef SIMD_UNOP_LIST ++#undef SIMD_TYPE_LIST ++#undef TRACE_UNIMPL ++#undef TRACE ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/compiler/c-linkage.cc b/src/3rdparty/chromium/v8/src/compiler/c-linkage.cc +index 4967f2bbfa..c3701e3ef0 100644 +--- a/src/3rdparty/chromium/v8/src/compiler/c-linkage.cc ++++ b/src/3rdparty/chromium/v8/src/compiler/c-linkage.cc +@@ -94,9 +94,22 @@ namespace { + #define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7 + #define CALLEE_SAVE_REGISTERS \ + s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \ +- s7.bit() +-#define CALLEE_SAVE_FP_REGISTERS \ +- f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit() ++ s7.bit() | fp.bit() ++#define CALLEE_SAVE_FP_REGISTERS \ ++ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \ ++ f30.bit() | f31.bit() ++ ++#elif V8_TARGET_ARCH_LA64 ++// =========================================================================== ++// == la64 ================================================================= ++// =========================================================================== ++#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7 ++#define CALLEE_SAVE_REGISTERS \ ++ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \ ++ s7.bit() | fp.bit() ++#define CALLEE_SAVE_FP_REGISTERS \ ++ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \ ++ f30.bit() | f31.bit() + + #elif V8_TARGET_ARCH_PPC64 + // =========================================================================== +diff --git a/src/3rdparty/chromium/v8/src/debug/debug-evaluate.cc b/src/3rdparty/chromium/v8/src/debug/debug-evaluate.cc +index fcf9b8448a..f704f0b6d8 100644 +--- a/src/3rdparty/chromium/v8/src/debug/debug-evaluate.cc ++++ b/src/3rdparty/chromium/v8/src/debug/debug-evaluate.cc +@@ -1067,7 +1067,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) { + } + CHECK(!failed); + #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \ +- defined(V8_TARGET_ARCH_MIPS64) ++ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LA64) + // Isolate-independent builtin calls and jumps do not emit reloc infos + // on PPC. We try to avoid using PC relative code due to performance + // issue with especially older hardwares. +diff --git a/src/3rdparty/chromium/v8/src/debug/la64/debug-la64.cc b/src/3rdparty/chromium/v8/src/debug/la64/debug-la64.cc +new file mode 100644 +index 0000000000..081135d3bb +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/debug/la64/debug-la64.cc +@@ -0,0 +1,56 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/debug/debug.h" ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/debug/liveedit.h" ++#include "src/execution/frames-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++#define __ ACCESS_MASM(masm) ++ ++void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) { ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0); ++ } ++ __ MaybeDropFrames(); ++ ++ // Return to caller. ++ __ Ret(); ++} ++ ++void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) { ++ // Frame is being dropped: ++ // - Drop to the target frame specified by a1. ++ // - Look up current function on the frame. ++ // - Leave the frame. ++ // - Restart the frame by calling the function. ++ __ mov(fp, a1); ++ __ Ld_d(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ ++ // Pop return address and frame. ++ __ LeaveFrame(StackFrame::INTERNAL); ++ ++ __ Ld_d(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_hu( ++ a0, FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ mov(a2, a0); ++ ++ __ InvokeFunction(a1, a2, a0, JUMP_FUNCTION); ++} ++ ++const bool LiveEdit::kFrameDropperSupported = true; ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/deoptimizer/la64/deoptimizer-la64.cc b/src/3rdparty/chromium/v8/src/deoptimizer/la64/deoptimizer-la64.cc +new file mode 100644 +index 0000000000..23a0051d93 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/deoptimizer/la64/deoptimizer-la64.cc +@@ -0,0 +1,241 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/register-configuration.h" ++#include "src/codegen/safepoint-table.h" ++#include "src/deoptimizer/deoptimizer.h" ++ ++namespace v8 { ++namespace internal { ++ ++const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false; ++const int Deoptimizer::kNonLazyDeoptExitSize = 0; ++const int Deoptimizer::kLazyDeoptExitSize = 0; ++ ++#define __ masm-> ++ ++// This code tries to be close to ia32 code so that any changes can be ++// easily ported. ++void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ++ Isolate* isolate, ++ DeoptimizeKind deopt_kind) { ++ NoRootArrayScope no_root_array(masm); ++ ++ // Unlike on ARM we don't save all the registers, just the useful ones. ++ // For the rest, there are gaps on the stack, so the offsets remain the same. ++ const int kNumberOfRegisters = Register::kNumRegisters; ++ ++ RegList restored_regs = kJSCallerSaved | kCalleeSaved; ++ RegList saved_regs = restored_regs | sp.bit() | ra.bit(); ++ ++ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; ++ ++ // Save all double FPU registers before messing with them. ++ __ Sub_d(sp, sp, Operand(kDoubleRegsSize)); ++ const RegisterConfiguration* config = RegisterConfiguration::Default(); ++ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { ++ int code = config->GetAllocatableDoubleCode(i); ++ const DoubleRegister fpu_reg = DoubleRegister::from_code(code); ++ int offset = code * kDoubleSize; ++ __ Fst_d(fpu_reg, MemOperand(sp, offset)); ++ } ++ ++ // Push saved_regs (needed to populate FrameDescription::registers_). ++ // Leave gaps for other registers. ++ __ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize); ++ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { ++ if ((saved_regs & (1 << i)) != 0) { ++ __ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i)); ++ } ++ } ++ ++ __ li(a2, Operand(ExternalReference::Create( ++ IsolateAddressId::kCEntryFPAddress, isolate))); ++ __ St_d(fp, MemOperand(a2, 0)); ++ ++ const int kSavedRegistersAreaSize = ++ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; ++ ++ // Get the bailout is passed as kRootRegister by the caller. ++ __ mov(a2, kRootRegister); ++ ++ // Get the address of the location in the code object (a3) (return ++ // address for lazy deoptimization) and compute the fp-to-sp delta in ++ // register a4. ++ __ mov(a3, ra); ++ __ Add_d(a4, sp, Operand(kSavedRegistersAreaSize)); ++ ++ __ Sub_d(a4, fp, a4); ++ ++ // Allocate a new deoptimizer object. ++ __ PrepareCallCFunction(6, a5); ++ // Pass six arguments, according to n64 ABI. ++ __ mov(a0, zero_reg); ++ Label context_check; ++ __ Ld_d(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); ++ __ JumpIfSmi(a1, &context_check); ++ __ Ld_d(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ __ bind(&context_check); ++ __ li(a1, Operand(static_cast(deopt_kind))); ++ // a2: bailout id already loaded. ++ // a3: code address or 0 already loaded. ++ // a4: already has fp-to-sp delta. ++ __ li(a5, Operand(ExternalReference::isolate_address(isolate))); ++ ++ // Call Deoptimizer::New(). ++ { ++ AllowExternalCallThatCantCauseGC scope(masm); ++ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); ++ } ++ ++ // Preserve "deoptimizer" object in register v0 and get the input ++ // frame descriptor pointer to a1 (deoptimizer->input_); ++ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. ++ // TODO save a0 ++ //__ mov(a0, v0); ++ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset())); ++ ++ // Copy core registers into FrameDescription::registers_[kNumRegisters]. ++ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); ++ for (int i = 0; i < kNumberOfRegisters; i++) { ++ int offset = (i * kPointerSize) + FrameDescription::registers_offset(); ++ if ((saved_regs & (1 << i)) != 0) { ++ __ Ld_d(a2, MemOperand(sp, i * kPointerSize)); ++ __ St_d(a2, MemOperand(a1, offset)); ++ } else if (FLAG_debug_code) { ++ __ li(a2, Operand(kDebugZapValue)); ++ __ St_d(a2, MemOperand(a1, offset)); ++ } ++ } ++ ++ int double_regs_offset = FrameDescription::double_registers_offset(); ++ // Copy FPU registers to ++ // double_registers_[DoubleRegister::kNumAllocatableRegisters] ++ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { ++ int code = config->GetAllocatableDoubleCode(i); ++ int dst_offset = code * kDoubleSize + double_regs_offset; ++ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; ++ __ Fld_d(f0, MemOperand(sp, src_offset)); ++ __ Fst_d(f0, MemOperand(a1, dst_offset)); ++ } ++ ++ // Remove the saved registers from the stack. ++ __ Add_d(sp, sp, Operand(kSavedRegistersAreaSize)); ++ ++ // Compute a pointer to the unwinding limit in register a2; that is ++ // the first stack slot not part of the input frame. ++ __ Ld_d(a2, MemOperand(a1, FrameDescription::frame_size_offset())); ++ __ Add_d(a2, a2, sp); ++ ++ // Unwind the stack down to - but not including - the unwinding ++ // limit and copy the contents of the activation frame to the input ++ // frame description. ++ __ Add_d(a3, a1, Operand(FrameDescription::frame_content_offset())); ++ Label pop_loop; ++ Label pop_loop_header; ++ __ Branch(&pop_loop_header); ++ __ bind(&pop_loop); ++ __ pop(a4); ++ __ St_d(a4, MemOperand(a3, 0)); ++ __ addi_d(a3, a3, sizeof(uint64_t)); ++ __ bind(&pop_loop_header); ++ __ BranchShort(&pop_loop, ne, a2, Operand(sp)); ++ // Compute the output frame in the deoptimizer. ++ __ push(a0); // Preserve deoptimizer object across call. ++ // a0: deoptimizer object; a1: scratch. ++ __ PrepareCallCFunction(1, a1); ++ // Call Deoptimizer::ComputeOutputFrames(). ++ { ++ AllowExternalCallThatCantCauseGC scope(masm); ++ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); ++ } ++ __ pop(a0); // Restore deoptimizer object (class Deoptimizer). ++ ++ __ Ld_d(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset())); ++ ++ // Replace the current (input) frame with the output frames. ++ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; ++ // Outer loop state: a4 = current "FrameDescription** output_", ++ // a1 = one past the last FrameDescription**. ++ __ Ld_w(a1, MemOperand(a0, Deoptimizer::output_count_offset())); ++ __ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_. ++ __ Alsl_d(a1, a1, a4, kPointerSizeLog2, t7); ++ __ Branch(&outer_loop_header); ++ __ bind(&outer_push_loop); ++ // Inner loop state: a2 = current FrameDescription*, a3 = loop index. ++ __ Ld_d(a2, MemOperand(a4, 0)); // output_[ix] ++ __ Ld_d(a3, MemOperand(a2, FrameDescription::frame_size_offset())); ++ __ Branch(&inner_loop_header); ++ __ bind(&inner_push_loop); ++ __ Sub_d(a3, a3, Operand(sizeof(uint64_t))); ++ __ Add_d(a6, a2, Operand(a3)); ++ __ Ld_d(a7, MemOperand(a6, FrameDescription::frame_content_offset())); ++ __ push(a7); ++ __ bind(&inner_loop_header); ++ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); ++ ++ __ Add_d(a4, a4, Operand(kPointerSize)); ++ __ bind(&outer_loop_header); ++ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1)); ++ ++ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset())); ++ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { ++ int code = config->GetAllocatableDoubleCode(i); ++ const DoubleRegister fpu_reg = DoubleRegister::from_code(code); ++ int src_offset = code * kDoubleSize + double_regs_offset; ++ __ Fld_d(fpu_reg, MemOperand(a1, src_offset)); ++ } ++ ++ // Push pc and continuation from the last output frame. ++ __ Ld_d(a6, MemOperand(a2, FrameDescription::pc_offset())); ++ __ push(a6); ++ __ Ld_d(a6, MemOperand(a2, FrameDescription::continuation_offset())); ++ __ push(a6); ++ ++ // Technically restoring 'at' should work unless zero_reg is also restored ++ // but it's safer to check for this. ++ DCHECK(!(t7.bit() & restored_regs)); ++ // Restore the registers from the last output frame. ++ __ mov(t7, a2); ++ for (int i = kNumberOfRegisters - 1; i >= 0; i--) { ++ int offset = (i * kPointerSize) + FrameDescription::registers_offset(); ++ if ((restored_regs & (1 << i)) != 0) { ++ __ Ld_d(ToRegister(i), MemOperand(t7, offset)); ++ } ++ } ++ ++ __ pop(t7); // Get continuation, leave pc on stack. ++ __ pop(ra); ++ __ Jump(t7); ++ __ stop(); ++} ++ ++// Maximum size of a table entry generated below. ++const int Deoptimizer::table_entry_size_ = 2 * kInstrSize; ++ ++Float32 RegisterValues::GetFloatRegister(unsigned n) const { ++ return Float32::FromBits( ++ static_cast(double_registers_[n].get_bits())); ++} ++ ++void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { ++ SetFrameSlot(offset, value); ++} ++ ++void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { ++ SetFrameSlot(offset, value); ++} ++ ++void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { ++ // No embedded constant pool support. ++ UNREACHABLE(); ++} ++ ++void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; } ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/src/diagnostics/gdb-jit.cc b/src/3rdparty/chromium/v8/src/diagnostics/gdb-jit.cc +index 5f36437302..4ef6eba327 100644 +--- a/src/3rdparty/chromium/v8/src/diagnostics/gdb-jit.cc ++++ b/src/3rdparty/chromium/v8/src/diagnostics/gdb-jit.cc +@@ -1077,6 +1077,8 @@ class DebugInfoSection : public DebugSection { + UNIMPLEMENTED(); + #elif V8_TARGET_ARCH_MIPS64 + UNIMPLEMENTED(); ++#elif V8_TARGET_ARCH_LA64 ++ UNIMPLEMENTED(); + #elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX + w->Write(DW_OP_reg31); // The frame pointer is here on PPC64. + #elif V8_TARGET_ARCH_S390 +diff --git a/src/3rdparty/chromium/v8/src/diagnostics/la64/disasm-la64.cc b/src/3rdparty/chromium/v8/src/diagnostics/la64/disasm-la64.cc +new file mode 100644 +index 0000000000..0d3e8ee89f +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/diagnostics/la64/disasm-la64.cc +@@ -0,0 +1,1841 @@ ++#include ++#include ++#include ++#include ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/base/platform/platform.h" ++#include "src/codegen/la64/constants-la64.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/diagnostics/disasm.h" ++ ++namespace v8 { ++namespace internal { ++ ++//------------------------------------------------------------------------------ ++ ++// Decoder decodes and disassembles instructions into an output buffer. ++// It uses the converter to convert register names and call destinations into ++// more informative description. ++class Decoder { ++ public: ++ Decoder(const disasm::NameConverter& converter, ++ v8::internal::Vector out_buffer) ++ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) { ++ out_buffer_[out_buffer_pos_] = '\0'; ++ } ++ ++ ~Decoder() {} ++ ++ // Writes one disassembled instruction into 'buffer' (0-terminated). ++ // Returns the length of the disassembled machine instruction in bytes. ++ int InstructionDecode(byte* instruction); ++ ++ private: ++ // Bottleneck functions to print into the out_buffer. ++ void PrintChar(const char ch); ++ void Print(const char* str); ++ ++ // Printing of common values. ++ void PrintRegister(int reg); ++ void PrintFPURegister(int freg); ++ void PrintFPUStatusRegister(int freg); ++ void PrintRj(Instruction* instr); ++ void PrintRk(Instruction* instr); ++ void PrintRd(Instruction* instr); ++ void PrintFj(Instruction* instr); ++ void PrintFk(Instruction* instr); ++ void PrintFd(Instruction* instr); ++ void PrintFa(Instruction* instr); ++ void PrintSa2(Instruction* instr); ++ void PrintSa3(Instruction* instr); ++ void PrintUi5(Instruction* instr); ++ void PrintUi6(Instruction* instr); ++ void PrintUi12(Instruction* instr); ++ void PrintXi12(Instruction* instr); ++ void PrintMsbw(Instruction* instr); ++ void PrintLsbw(Instruction* instr); ++ void PrintMsbd(Instruction* instr); ++ void PrintLsbd(Instruction* instr); ++ // void PrintCond(Instruction* instr); ++ void PrintSi12(Instruction* instr); ++ void PrintSi14(Instruction* instr); ++ void PrintSi16(Instruction* instr); ++ void PrintSi20(Instruction* instr); ++ void PrintCj(Instruction* instr); ++ void PrintCd(Instruction* instr); ++ void PrintCa(Instruction* instr); ++ void PrintCode(Instruction* instr); ++ void PrintHint5(Instruction* instr); ++ void PrintHint15(Instruction* instr); ++ void PrintPCOffs16(Instruction* instr); ++ void PrintPCOffs21(Instruction* instr); ++ void PrintPCOffs26(Instruction* instr); ++ void PrintOffs16(Instruction* instr); ++ void PrintOffs21(Instruction* instr); ++ void PrintOffs26(Instruction* instr); ++ ++ // Handle formatting of instructions and their options. ++ int FormatRegister(Instruction* instr, const char* option); ++ int FormatFPURegister(Instruction* instr, const char* option); ++ int FormatOption(Instruction* instr, const char* option); ++ void Format(Instruction* instr, const char* format); ++ void Unknown(Instruction* instr); ++ int DecodeBreakInstr(Instruction* instr); ++ ++ // Each of these functions decodes one particular instruction type. ++ int InstructionDecode(Instruction* instr); ++ void DecodeTypekOp6(Instruction* instr); ++ void DecodeTypekOp7(Instruction* instr); ++ void DecodeTypekOp8(Instruction* instr); ++ void DecodeTypekOp10(Instruction* instr); ++ void DecodeTypekOp12(Instruction* instr); ++ void DecodeTypekOp14(Instruction* instr); ++ int DecodeTypekOp17(Instruction* instr); ++ void DecodeTypekOp22(Instruction* instr); ++ ++ const disasm::NameConverter& converter_; ++ v8::internal::Vector out_buffer_; ++ int out_buffer_pos_; ++ ++ DISALLOW_COPY_AND_ASSIGN(Decoder); ++}; ++ ++// Support for assertions in the Decoder formatting functions. ++#define STRING_STARTS_WITH(string, compare_string) \ ++ (strncmp(string, compare_string, strlen(compare_string)) == 0) ++ ++// Append the ch to the output buffer. ++void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; } ++ ++// Append the str to the output buffer. ++void Decoder::Print(const char* str) { ++ char cur = *str++; ++ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) { ++ PrintChar(cur); ++ cur = *str++; ++ } ++ out_buffer_[out_buffer_pos_] = 0; ++} ++ ++// Print the register name according to the active name converter. ++void Decoder::PrintRegister(int reg) { ++ Print(converter_.NameOfCPURegister(reg)); ++} ++ ++void Decoder::PrintRj(Instruction* instr) { ++ int reg = instr->RjValue(); ++ PrintRegister(reg); ++} ++ ++void Decoder::PrintRk(Instruction* instr) { ++ int reg = instr->RkValue(); ++ PrintRegister(reg); ++} ++ ++void Decoder::PrintRd(Instruction* instr) { ++ int reg = instr->RdValue(); ++ PrintRegister(reg); ++} ++ ++// Print the FPUregister name according to the active name converter. ++void Decoder::PrintFPURegister(int freg) { ++ Print(converter_.NameOfXMMRegister(freg)); ++} ++ ++void Decoder::PrintFj(Instruction* instr) { ++ int freg = instr->FjValue(); ++ PrintFPURegister(freg); ++} ++ ++void Decoder::PrintFk(Instruction* instr) { ++ int freg = instr->FkValue(); ++ PrintFPURegister(freg); ++} ++ ++void Decoder::PrintFd(Instruction* instr) { ++ int freg = instr->FdValue(); ++ PrintFPURegister(freg); ++} ++ ++void Decoder::PrintFa(Instruction* instr) { ++ int freg = instr->FaValue(); ++ PrintFPURegister(freg); ++} ++ ++// Print the integer value of the sa field. ++void Decoder::PrintSa2(Instruction* instr) { ++ int sa = instr->Sa2Value(); ++ uint32_t opcode = (instr->InstructionBits() >> 18) << 18; ++ if (opcode == ALSL || opcode == ALSL_D) { ++ sa += 1; ++ } ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); ++} ++ ++void Decoder::PrintSa3(Instruction* instr) { ++ int sa = instr->Sa3Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); ++} ++ ++void Decoder::PrintUi5(Instruction* instr) { ++ int ui = instr->Ui5Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui); ++} ++ ++void Decoder::PrintUi6(Instruction* instr) { ++ int ui = instr->Ui6Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui); ++} ++ ++void Decoder::PrintUi12(Instruction* instr) { ++ int ui = instr->Ui12Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui); ++} ++ ++void Decoder::PrintXi12(Instruction* instr) { ++ int xi = instr->Ui12Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", xi); ++} ++ ++void Decoder::PrintMsbd(Instruction* instr) { ++ int msbd = instr->MsbdValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbd); ++} ++ ++void Decoder::PrintLsbd(Instruction* instr) { ++ int lsbd = instr->LsbdValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbd); ++} ++ ++void Decoder::PrintMsbw(Instruction* instr) { ++ int msbw = instr->MsbwValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbw); ++} ++ ++void Decoder::PrintLsbw(Instruction* instr) { ++ int lsbw = instr->LsbwValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbw); ++} ++ ++void Decoder::PrintSi12(Instruction* instr) { ++ int si = ((instr->Si12Value()) << (32 - kSi12Bits)) >> (32 - kSi12Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); ++} ++ ++void Decoder::PrintSi14(Instruction* instr) { ++ int si = ((instr->Si14Value()) << (32 - kSi14Bits)) >> (32 - kSi14Bits); ++ si <<= 2; ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); ++} ++ ++void Decoder::PrintSi16(Instruction* instr) { ++ int si = ((instr->Si16Value()) << (32 - kSi16Bits)) >> (32 - kSi16Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); ++} ++ ++void Decoder::PrintSi20(Instruction* instr) { ++ int si = ((instr->Si20Value()) << (32 - kSi20Bits)) >> (32 - kSi20Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); ++} ++ ++void Decoder::PrintCj(Instruction* instr) { ++ int cj = instr->CjValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cj); ++} ++ ++void Decoder::PrintCd(Instruction* instr) { ++ int cd = instr->CdValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cd); ++} ++ ++void Decoder::PrintCa(Instruction* instr) { ++ int ca = instr->CaValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ca); ++} ++ ++void Decoder::PrintCode(Instruction* instr) { ++ int code = instr->CodeValue(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", code, code); ++} ++ ++void Decoder::PrintHint5(Instruction* instr) { ++ int hint = instr->Hint5Value(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint); ++} ++ ++void Decoder::PrintHint15(Instruction* instr) { ++ int hint = instr->Hint15Value(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint); ++} ++ ++void Decoder::PrintPCOffs16(Instruction* instr) { ++ int n_bits = 2; ++ int offs = instr->Offs16Value(); ++ int target = ((offs << n_bits) << (32 - kOffsLowBits - n_bits)) >> ++ (32 - kOffsLowBits - n_bits); ++ out_buffer_pos_ += SNPrintF( ++ out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress(reinterpret_cast(instr) + target)); ++} ++ ++void Decoder::PrintPCOffs21(Instruction* instr) { ++ int n_bits = 2; ++ int offs = instr->Offs21Value(); ++ int target = ++ ((offs << n_bits) << (32 - kOffsLowBits - kOffs21HighBits - n_bits)) >> ++ (32 - kOffsLowBits - kOffs21HighBits - n_bits); ++ out_buffer_pos_ += SNPrintF( ++ out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress(reinterpret_cast(instr) + target)); ++} ++ ++void Decoder::PrintPCOffs26(Instruction* instr) { ++ int n_bits = 2; ++ int offs = instr->Offs26Value(); ++ int target = ++ ((offs << n_bits) << (32 - kOffsLowBits - kOffs26HighBits - n_bits)) >> ++ (32 - kOffsLowBits - kOffs26HighBits - n_bits); ++ out_buffer_pos_ += SNPrintF( ++ out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress(reinterpret_cast(instr) + target)); ++} ++ ++void Decoder::PrintOffs16(Instruction* instr) { ++ int offs = instr->Offs16Value(); ++ offs <<= (32 - kOffsLowBits); ++ offs >>= (32 - kOffsLowBits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs); ++} ++ ++void Decoder::PrintOffs21(Instruction* instr) { ++ int offs = instr->Offs21Value(); ++ offs <<= (32 - kOffsLowBits - kOffs21HighBits); ++ offs >>= (32 - kOffsLowBits - kOffs21HighBits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs); ++} ++ ++void Decoder::PrintOffs26(Instruction* instr) { ++ int offs = instr->Offs26Value(); ++ offs <<= (32 - kOffsLowBits - kOffs26HighBits); ++ offs >>= (32 - kOffsLowBits - kOffs26HighBits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs); ++} ++ ++// Handle all register based formatting in this function to reduce the ++// complexity of FormatOption. ++int Decoder::FormatRegister(Instruction* instr, const char* format) { ++ DCHECK_EQ(format[0], 'r'); ++ if (format[1] == 'j') { // 'rj: Rj register. ++ int reg = instr->RjValue(); ++ PrintRegister(reg); ++ return 2; ++ } else if (format[1] == 'k') { // 'rk: rk register. ++ int reg = instr->RkValue(); ++ PrintRegister(reg); ++ return 2; ++ } else if (format[1] == 'd') { // 'rd: rd register. ++ int reg = instr->RdValue(); ++ PrintRegister(reg); ++ return 2; ++ } ++ UNREACHABLE(); ++ return 0; ++} ++ ++// Handle all FPUregister based formatting in this function to reduce the ++// complexity of FormatOption. ++int Decoder::FormatFPURegister(Instruction* instr, const char* format) { ++ DCHECK_EQ(format[0], 'f'); ++ if (format[1] == 'j') { // 'fj: fj register. ++ int reg = instr->FjValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } else if (format[1] == 'k') { // 'fk: fk register. ++ int reg = instr->FkValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } else if (format[1] == 'd') { // 'fd: fd register. ++ int reg = instr->FdValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } else if (format[1] == 'a') { // 'fa: fa register. ++ int reg = instr->FaValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } ++ UNREACHABLE(); ++ return 0; ++} ++ ++// FormatOption takes a formatting string and interprets it based on ++// the current instructions. The format string points to the first ++// character of the option string (the option escape has already been ++// consumed by the caller.) FormatOption returns the number of ++// characters that were consumed from the formatting string. ++int Decoder::FormatOption(Instruction* instr, const char* format) { ++ switch (format[0]) { ++ case 'c': { ++ switch (format[1]) { ++ case 'a': ++ DCHECK(STRING_STARTS_WITH(format, "ca")); ++ PrintCa(instr); ++ return 2; ++ case 'd': ++ DCHECK(STRING_STARTS_WITH(format, "cd")); ++ PrintCd(instr); ++ return 2; ++ case 'j': ++ DCHECK(STRING_STARTS_WITH(format, "cj")); ++ PrintCj(instr); ++ return 2; ++ case 'o': ++ DCHECK(STRING_STARTS_WITH(format, "code")); ++ PrintCode(instr); ++ return 4; ++ } ++ } ++ case 'f': { ++ return FormatFPURegister(instr, format); ++ } ++ case 'h': { ++ if (format[4] == '5') { ++ DCHECK(STRING_STARTS_WITH(format, "hint5")); ++ PrintHint5(instr); ++ return 5; ++ } else if (format[4] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "hint15")); ++ PrintHint15(instr); ++ return 6; ++ } ++ break; ++ } ++ case 'l': { ++ switch (format[3]) { ++ case 'w': ++ DCHECK(STRING_STARTS_WITH(format, "lsbw")); ++ PrintLsbw(instr); ++ return 4; ++ case 'd': ++ DCHECK(STRING_STARTS_WITH(format, "lsbd")); ++ PrintLsbd(instr); ++ return 4; ++ default: ++ return 0; ++ } ++ } ++ case 'm': { ++ if (format[3] == 'w') { ++ DCHECK(STRING_STARTS_WITH(format, "msbw")); ++ PrintMsbw(instr); ++ } else if (format[3] == 'd') { ++ DCHECK(STRING_STARTS_WITH(format, "msbd")); ++ PrintMsbd(instr); ++ } ++ return 4; ++ } ++ case 'o': { ++ if (format[1] == 'f') { ++ if (format[4] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "offs16")); ++ PrintOffs16(instr); ++ return 6; ++ } else if (format[4] == '2') { ++ if (format[5] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "offs21")); ++ PrintOffs21(instr); ++ return 6; ++ } else if (format[5] == '6') { ++ DCHECK(STRING_STARTS_WITH(format, "offs26")); ++ PrintOffs26(instr); ++ return 6; ++ } ++ } ++ } ++ break; ++ } ++ case 'p': { ++ if (format[6] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "pcoffs16")); ++ PrintPCOffs16(instr); ++ return 8; ++ } else if (format[6] == '2') { ++ if (format[7] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "pcoffs21")); ++ PrintPCOffs21(instr); ++ return 8; ++ } else if (format[7] == '6') { ++ DCHECK(STRING_STARTS_WITH(format, "pcoffs26")); ++ PrintPCOffs26(instr); ++ return 8; ++ } ++ } ++ break; ++ } ++ case 'r': { ++ return FormatRegister(instr, format); ++ break; ++ } ++ case 's': { ++ switch (format[1]) { ++ case 'a': ++ if (format[2] == '2') { ++ DCHECK(STRING_STARTS_WITH(format, "sa2")); ++ PrintSa2(instr); ++ } else if (format[2] == '3') { ++ DCHECK(STRING_STARTS_WITH(format, "sa3")); ++ PrintSa3(instr); ++ } ++ return 3; ++ case 'i': ++ if (format[2] == '2') { ++ DCHECK(STRING_STARTS_WITH(format, "si20")); ++ PrintSi20(instr); ++ return 4; ++ } else if (format[2] == '1') { ++ switch (format[3]) { ++ case '2': ++ DCHECK(STRING_STARTS_WITH(format, "si12")); ++ PrintSi12(instr); ++ return 4; ++ case '4': ++ DCHECK(STRING_STARTS_WITH(format, "si14")); ++ PrintSi14(instr); ++ return 4; ++ case '6': ++ DCHECK(STRING_STARTS_WITH(format, "si16")); ++ PrintSi16(instr); ++ return 4; ++ default: ++ break; ++ } ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ } ++ case 'u': { ++ if (format[2] == '5') { ++ DCHECK(STRING_STARTS_WITH(format, "ui5")); ++ PrintUi5(instr); ++ return 3; ++ } else if (format[2] == '6') { ++ DCHECK(STRING_STARTS_WITH(format, "ui6")); ++ PrintUi6(instr); ++ return 3; ++ } else if (format[2] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "ui12")); ++ PrintUi12(instr); ++ return 4; ++ } ++ break; ++ } ++ case 'x': { ++ DCHECK(STRING_STARTS_WITH(format, "xi12")); ++ PrintXi12(instr); ++ return 4; ++ } ++ default: ++ UNREACHABLE(); ++ } ++ return 0; ++} ++ ++// Format takes a formatting string for a whole instruction and prints it into ++// the output buffer. All escaped options are handed to FormatOption to be ++// parsed further. ++void Decoder::Format(Instruction* instr, const char* format) { ++ char cur = *format++; ++ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) { ++ if (cur == '\'') { // Single quote is used as the formatting escape. ++ format += FormatOption(instr, format); ++ } else { ++ out_buffer_[out_buffer_pos_++] = cur; ++ } ++ cur = *format++; ++ } ++ out_buffer_[out_buffer_pos_] = '\0'; ++} ++ ++// For currently unimplemented decodings the disassembler calls Unknown(instr) ++// which will just print "unknown" of the instruction bits. ++void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); } ++ ++int Decoder::DecodeBreakInstr(Instruction* instr) { ++ // This is already known to be BREAK instr, just extract the code. ++ /*if (instr->Bits(14, 0) == static_cast(kMaxStopCode)) { ++ // This is stop(msg). ++ Format(instr, "break, code: 'code"); ++ out_buffer_pos_ += SNPrintF( ++ out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64, ++ static_cast(reinterpret_cast(instr + kInstrSize)), ++ reinterpret_cast( ++ *reinterpret_cast(instr + kInstrSize))); ++ // Size 3: the break_ instr, plus embedded 64-bit char pointer. ++ return 3 * kInstrSize; ++ } else { ++ Format(instr, "break, code: 'code"); ++ return kInstrSize; ++ }*/ ++ Format(instr, "break code: 'code"); ++ return kInstrSize; ++} //=================================================== ++ ++void Decoder::DecodeTypekOp6(Instruction* instr) { ++ switch (instr->Bits(31, 26) << 26) { ++ case ADDU16I_D: ++ Format(instr, "addu16i.d 'rd, 'rj, 'si16"); ++ break; ++ case BEQZ: ++ Format(instr, "beqz 'rj, 'offs21 -> 'pcoffs21"); ++ break; ++ case BNEZ: ++ Format(instr, "bnez 'rj, 'offs21 -> 'pcoffs21"); ++ break; ++ case BCZ: ++ if (instr->Bit(8)) ++ Format(instr, "bcnez fcc'cj, 'offs21 -> 'pcoffs21"); ++ else ++ Format(instr, "bceqz fcc'cj, 'offs21 -> 'pcoffs21"); ++ break; ++ case JIRL: ++ Format(instr, "jirl 'rd, 'rj, 'offs16"); ++ break; ++ case B: ++ Format(instr, "b 'offs26 -> 'pcoffs26"); ++ break; ++ case BL: ++ Format(instr, "bl 'offs26 -> 'pcoffs26"); ++ break; ++ case BEQ: ++ Format(instr, "beq 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BNE: ++ Format(instr, "bne 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BLT: ++ Format(instr, "blt 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BGE: ++ Format(instr, "bge 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BLTU: ++ Format(instr, "bltu 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BGEU: ++ Format(instr, "bgeu 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp7(Instruction* instr) { ++ switch (instr->Bits(31, 25) << 25) { ++ case LU12I_W: ++ Format(instr, "lu12i.w 'rd, 'si20"); ++ break; ++ case LU32I_D: ++ Format(instr, "lu32i.d 'rd, 'si20"); ++ break; ++ case PCADDI: ++ Format(instr, "pcaddi 'rd, 'si20"); ++ break; ++ case PCALAU12I: ++ Format(instr, "pcalau12i 'rd, 'si20"); ++ break; ++ case PCADDU12I: ++ Format(instr, "pcaddu12i 'rd, 'si20"); ++ break; ++ case PCADDU18I: ++ Format(instr, "pcaddu18i 'rd, 'si20"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp8(Instruction* instr) { ++ switch (instr->Bits(31, 24) << 24) { ++ case LDPTR_W: ++ Format(instr, "ldptr.w 'rd, 'rj, 'si14"); ++ break; ++ case STPTR_W: ++ Format(instr, "stptr.w 'rd, 'rj, 'si14"); ++ break; ++ case LDPTR_D: ++ Format(instr, "ldptr.d 'rd, 'rj, 'si14"); ++ break; ++ case STPTR_D: ++ Format(instr, "stptr.d 'rd, 'rj, 'si14"); ++ break; ++ case LL_W: ++ Format(instr, "ll.w 'rd, 'rj, 'si14"); ++ break; ++ case SC_W: ++ Format(instr, "sc.w 'rd, 'rj, 'si14"); ++ break; ++ case LL_D: ++ Format(instr, "ll.d 'rd, 'rj, 'si14"); ++ break; ++ case SC_D: ++ Format(instr, "sc.d 'rd, 'rj, 'si14"); ++ break; ++ case CSR: ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp10(Instruction* instr) { ++ switch (instr->Bits(31, 22) << 22) { ++ case BSTR_W: { ++ if (instr->Bit(21) != 0) { ++ if (instr->Bit(15) == 0) { ++ Format(instr, "bstrins.w 'rd, 'rj, 'msbw, 'lsbw"); ++ } else { ++ Format(instr, "bstrpick.w 'rd, 'rj, 'msbw, 'lsbw"); ++ } ++ } ++ break; ++ } ++ case BSTRINS_D: ++ Format(instr, "bstrins.d 'rd, 'rj, 'msbd, 'lsbd"); ++ break; ++ case BSTRPICK_D: ++ Format(instr, "bstrpick.d 'rd, 'rj, 'msbd, 'lsbd"); ++ break; ++ case SLTI: ++ Format(instr, "slti 'rd, 'rj, 'si12"); ++ break; ++ case SLTUI: ++ Format(instr, "sltui 'rd, 'rj, 'si12"); ++ break; ++ case ADDI_W: ++ Format(instr, "addi.w 'rd, 'rj, 'si12"); ++ break; ++ case ADDI_D: ++ Format(instr, "addi.d 'rd, 'rj, 'si12"); ++ break; ++ case LU52I_D: ++ Format(instr, "lu52i.d 'rd, 'rj, 'si12"); ++ break; ++ case ANDI: ++ Format(instr, "andi 'rd, 'rj, 'xi12"); ++ break; ++ case ORI: ++ Format(instr, "ori 'rd, 'rj, 'xi12"); ++ break; ++ case XORI: ++ Format(instr, "xori 'rd, 'rj, 'xi12"); ++ break; ++ case LD_B: ++ Format(instr, "ld.b 'rd, 'rj, 'si12"); ++ break; ++ case LD_H: ++ Format(instr, "ld.h 'rd, 'rj, 'si12"); ++ break; ++ case LD_W: ++ Format(instr, "ld.w 'rd, 'rj, 'si12"); ++ break; ++ case LD_D: ++ Format(instr, "ld.d 'rd, 'rj, 'si12"); ++ break; ++ case ST_B: ++ Format(instr, "st.b 'rd, 'rj, 'si12"); ++ break; ++ case ST_H: ++ Format(instr, "st.h 'rd, 'rj, 'si12"); ++ break; ++ case ST_W: ++ Format(instr, "st.w 'rd, 'rj, 'si12"); ++ break; ++ case ST_D: ++ Format(instr, "st.d 'rd, 'rj, 'si12"); ++ break; ++ case LD_BU: ++ Format(instr, "ld.bu 'rd, 'rj, 'si12"); ++ break; ++ case LD_HU: ++ Format(instr, "ld.hu 'rd, 'rj, 'si12"); ++ break; ++ case LD_WU: ++ Format(instr, "ld.wu 'rd, 'rj, 'si12"); ++ break; ++ case PRELD: ++ Format(instr, "preld 'hint5, 'rj, 'si12"); ++ break; ++ case FLD_S: ++ Format(instr, "fld.s 'fd, 'rj, 'si12"); ++ break; ++ case FST_S: ++ Format(instr, "fst.s 'fd, 'rj, 'si12"); ++ break; ++ case FLD_D: ++ Format(instr, "fld.d 'fd, 'rj, 'si12"); ++ break; ++ case FST_D: ++ Format(instr, "fst.d 'fd, 'rj, 'si12"); ++ break; ++ case CACHE: ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp12(Instruction* instr) { ++ switch (instr->Bits(31, 20) << 20) { ++ case FMADD_S: ++ Format(instr, "fmadd.s 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FMADD_D: ++ Format(instr, "fmadd.d 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FMSUB_S: ++ Format(instr, "fmsub.s 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FMSUB_D: ++ Format(instr, "fmsub.d 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FNMADD_S: ++ Format(instr, "fnmadd.s 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FNMADD_D: ++ Format(instr, "fnmadd.d 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FNMSUB_S: ++ Format(instr, "fnmsub.s 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FNMSUB_D: ++ Format(instr, "fnmsub.d 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FCMP_COND_S: ++ switch (instr->Bits(19, 15)) { ++ case CAF: ++ Format(instr, "fcmp.caf.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SAF: ++ Format(instr, "fcmp.saf.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CLT: ++ Format(instr, "fcmp.clt.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CEQ: ++ Format(instr, "fcmp.ceq.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SEQ: ++ Format(instr, "fcmp.seq.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CLE: ++ Format(instr, "fcmp.cle.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SLE: ++ Format(instr, "fcmp.sle.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CUN: ++ Format(instr, "fcmp.cun.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SUN: ++ Format(instr, "fcmp.sun.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CULT: ++ Format(instr, "fcmp.cult.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SULT: ++ Format(instr, "fcmp.sult.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CUEQ: ++ Format(instr, "fcmp.cueq.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SUEQ: ++ Format(instr, "fcmp.sueq.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CULE: ++ Format(instr, "fcmp.cule.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SULE: ++ Format(instr, "fcmp.sule.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CNE: ++ Format(instr, "fcmp.cne.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SNE: ++ Format(instr, "fcmp.sne.s fcc'cd, 'fj, 'fk"); ++ break; ++ case COR: ++ Format(instr, "fcmp.cor.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SOR: ++ Format(instr, "fcmp.sor.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CUNE: ++ Format(instr, "fcmp.cune.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SUNE: ++ Format(instr, "fcmp.sune.s fcc'cd, 'fj, 'fk"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ case FCMP_COND_D: ++ switch (instr->Bits(19, 15)) { ++ case CAF: ++ Format(instr, "fcmp.caf.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SAF: ++ Format(instr, "fcmp.saf.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CLT: ++ Format(instr, "fcmp.clt.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CEQ: ++ Format(instr, "fcmp.ceq.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SEQ: ++ Format(instr, "fcmp.seq.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CLE: ++ Format(instr, "fcmp.cle.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SLE: ++ Format(instr, "fcmp.sle.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CUN: ++ Format(instr, "fcmp.cun.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SUN: ++ Format(instr, "fcmp.sun.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CULT: ++ Format(instr, "fcmp.cult.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SULT: ++ Format(instr, "fcmp.sult.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CUEQ: ++ Format(instr, "fcmp.cueq.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SUEQ: ++ Format(instr, "fcmp.sueq.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CULE: ++ Format(instr, "fcmp.cule.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SULE: ++ Format(instr, "fcmp.sule.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CNE: ++ Format(instr, "fcmp.cne.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SNE: ++ Format(instr, "fcmp.sne.d fcc'cd, 'fj, 'fk"); ++ break; ++ case COR: ++ Format(instr, "fcmp.cor.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SOR: ++ Format(instr, "fcmp.sor.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CUNE: ++ Format(instr, "fcmp.cune.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SUNE: ++ Format(instr, "fcmp.sune.d fcc'cd, 'fj, 'fk"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ case FSEL: ++ Format(instr, "fsel 'fd, 'fj, 'fk, fcc'ca"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp14(Instruction* instr) { ++ switch (instr->Bits(31, 18) << 18) { ++ case ALSL: ++ if (instr->Bit(17)) ++ Format(instr, "alsl.wu 'rd, 'rj, 'rk, 'sa2"); ++ else ++ Format(instr, "alsl.w 'rd, 'rj, 'rk, 'sa2"); ++ break; ++ case BYTEPICK_W: ++ Format(instr, "bytepick.w 'rd, 'rj, 'rk, 'sa2"); ++ break; ++ case BYTEPICK_D: ++ Format(instr, "bytepick.d 'rd, 'rj, 'rk, 'sa3"); ++ break; ++ case ALSL_D: ++ Format(instr, "alsl.d 'rd, 'rj, 'rk, 'sa2"); ++ break; ++ case SLLI: ++ if (instr->Bit(16)) ++ Format(instr, "slli.d 'rd, 'rj, 'ui6"); ++ else ++ Format(instr, "slli.w 'rd, 'rj, 'ui5"); ++ break; ++ case SRLI: ++ if (instr->Bit(16)) ++ Format(instr, "srli.d 'rd, 'rj, 'ui6"); ++ else ++ Format(instr, "srli.w 'rd, 'rj, 'ui5"); ++ break; ++ case SRAI: ++ if (instr->Bit(16)) ++ Format(instr, "srai.d 'rd, 'rj, 'ui6"); ++ else ++ Format(instr, "srai.w 'rd, 'rj, 'ui5"); ++ break; ++ case ROTRI: ++ if (instr->Bit(16)) ++ Format(instr, "rotri.d 'rd, 'rj, 'ui6"); ++ else ++ Format(instr, "rotri.w 'rd, 'rj, 'ui5"); ++ break; ++ case LDDIR: ++ case LDPTE: ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++int Decoder::DecodeTypekOp17(Instruction* instr) { ++ switch (instr->Bits(31, 15) << 15) { ++ case ADD_W: ++ Format(instr, "add.w 'rd, 'rj, 'rk"); ++ break; ++ case ADD_D: ++ Format(instr, "add.d 'rd, 'rj, 'rk"); ++ break; ++ case SUB_W: ++ Format(instr, "sub.w 'rd, 'rj, 'rk"); ++ break; ++ case SUB_D: ++ Format(instr, "sub.d 'rd, 'rj, 'rk"); ++ break; ++ case SLT: ++ Format(instr, "slt 'rd, 'rj, 'rk"); ++ break; ++ case SLTU: ++ Format(instr, "sltu 'rd, 'rj, 'rk"); ++ break; ++ case MASKEQZ: ++ Format(instr, "maskeqz 'rd, 'rj, 'rk"); ++ break; ++ case MASKNEZ: ++ Format(instr, "masknez 'rd, 'rj, 'rk"); ++ break; ++ case NOR: ++ Format(instr, "nor 'rd, 'rj, 'rk"); ++ break; ++ case AND: ++ Format(instr, "and 'rd, 'rj, 'rk"); ++ break; ++ case OR: ++ Format(instr, "or 'rd, 'rj, 'rk"); ++ break; ++ case XOR: ++ Format(instr, "xor 'rd, 'rj, 'rk"); ++ break; ++ case ORN: ++ Format(instr, "orn 'rd, 'rj, 'rk"); ++ break; ++ case ANDN: ++ Format(instr, "andn 'rd, 'rj, 'rk"); ++ break; ++ case SLL_W: ++ Format(instr, "sll.w 'rd, 'rj, 'rk"); ++ break; ++ case SRL_W: ++ Format(instr, "srl.w 'rd, 'rj, 'rk"); ++ break; ++ case SRA_W: ++ Format(instr, "sra.w 'rd, 'rj, 'rk"); ++ break; ++ case SLL_D: ++ Format(instr, "sll.d 'rd, 'rj, 'rk"); ++ break; ++ case SRL_D: ++ Format(instr, "srl.d 'rd, 'rj, 'rk"); ++ break; ++ case SRA_D: ++ Format(instr, "sra.d 'rd, 'rj, 'rk"); ++ break; ++ case ROTR_D: ++ Format(instr, "rotr.d 'rd, 'rj, 'rk"); ++ break; ++ case ROTR_W: ++ Format(instr, "rotr.w 'rd, 'rj, 'rk"); ++ break; ++ case MUL_W: ++ Format(instr, "mul.w 'rd, 'rj, 'rk"); ++ break; ++ case MULH_W: ++ Format(instr, "mulh.w 'rd, 'rj, 'rk"); ++ break; ++ case MULH_WU: ++ Format(instr, "mulh.wu 'rd, 'rj, 'rk"); ++ break; ++ case MUL_D: ++ Format(instr, "mul.d 'rd, 'rj, 'rk"); ++ break; ++ case MULH_D: ++ Format(instr, "mulh.d 'rd, 'rj, 'rk"); ++ break; ++ case MULH_DU: ++ Format(instr, "mulh.du 'rd, 'rj, 'rk"); ++ break; ++ case MULW_D_W: ++ Format(instr, "mulw.d.w 'rd, 'rj, 'rk"); ++ break; ++ case MULW_D_WU: ++ Format(instr, "mulw.d.wu 'rd, 'rj, 'rk"); ++ break; ++ case DIV_W: ++ Format(instr, "div.w 'rd, 'rj, 'rk"); ++ break; ++ case MOD_W: ++ Format(instr, "mod.w 'rd, 'rj, 'rk"); ++ break; ++ case DIV_WU: ++ Format(instr, "div.wu 'rd, 'rj, 'rk"); ++ break; ++ case MOD_WU: ++ Format(instr, "mod.wu 'rd, 'rj, 'rk"); ++ break; ++ case DIV_D: ++ Format(instr, "div.d 'rd, 'rj, 'rk"); ++ break; ++ case MOD_D: ++ Format(instr, "mod.d 'rd, 'rj, 'rk"); ++ break; ++ case DIV_DU: ++ Format(instr, "div.du 'rd, 'rj, 'rk"); ++ break; ++ case MOD_DU: ++ Format(instr, "mod.du 'rd, 'rj, 'rk"); ++ break; ++ case BREAK: ++ return DecodeBreakInstr(instr); ++ case FADD_S: ++ Format(instr, "fadd.s 'fd, 'fj, 'fk"); ++ break; ++ case FADD_D: ++ Format(instr, "fadd.d 'fd, 'fj, 'fk"); ++ break; ++ case FSUB_S: ++ Format(instr, "fsub.s 'fd, 'fj, 'fk"); ++ break; ++ case FSUB_D: ++ Format(instr, "fsub.d 'fd, 'fj, 'fk"); ++ break; ++ case FMUL_S: ++ Format(instr, "fmul.s 'fd, 'fj, 'fk"); ++ break; ++ case FMUL_D: ++ Format(instr, "fmul.d 'fd, 'fj, 'fk"); ++ break; ++ case FDIV_S: ++ Format(instr, "fdiv.s 'fd, 'fj, 'fk"); ++ break; ++ case FDIV_D: ++ Format(instr, "fdiv.d 'fd, 'fj, 'fk"); ++ break; ++ case FMAX_S: ++ Format(instr, "fmax.s 'fd, 'fj, 'fk"); ++ break; ++ case FMAX_D: ++ Format(instr, "fmax.d 'fd, 'fj, 'fk"); ++ break; ++ case FMIN_S: ++ Format(instr, "fmin.s 'fd, 'fj, 'fk"); ++ break; ++ case FMIN_D: ++ Format(instr, "fmin.d 'fd, 'fj, 'fk"); ++ break; ++ case FMAXA_S: ++ Format(instr, "fmaxa.s 'fd, 'fj, 'fk"); ++ break; ++ case FMAXA_D: ++ Format(instr, "fmaxa.d 'fd, 'fj, 'fk"); ++ break; ++ case FMINA_S: ++ Format(instr, "fmina.s 'fd, 'fj, 'fk"); ++ break; ++ case FMINA_D: ++ Format(instr, "fmina.d 'fd, 'fj, 'fk"); ++ break; ++ case LDX_B: ++ Format(instr, "ldx.b 'rd, 'rj, 'rk"); ++ break; ++ case LDX_H: ++ Format(instr, "ldx.h 'rd, 'rj, 'rk"); ++ break; ++ case LDX_W: ++ Format(instr, "ldx.w 'rd, 'rj, 'rk"); ++ break; ++ case LDX_D: ++ Format(instr, "ldx.d 'rd, 'rj, 'rk"); ++ break; ++ case STX_B: ++ Format(instr, "stx.b 'rd, 'rj, 'rk"); ++ break; ++ case STX_H: ++ Format(instr, "stx.h 'rd, 'rj, 'rk"); ++ break; ++ case STX_W: ++ Format(instr, "stx.w 'rd, 'rj, 'rk"); ++ break; ++ case STX_D: ++ Format(instr, "stx.d 'rd, 'rj, 'rk"); ++ break; ++ case LDX_BU: ++ Format(instr, "ldx.bu 'rd, 'rj, 'rk"); ++ break; ++ case LDX_HU: ++ Format(instr, "ldx.hu 'rd, 'rj, 'rk"); ++ break; ++ case LDX_WU: ++ Format(instr, "ldx.wu 'rd, 'rj, 'rk"); ++ break; ++ case PRELDX: ++ Format(instr, "preldx 'hint5, 'rj, 'rk"); ++ break; ++ case FLDX_S: ++ Format(instr, "fldx.s 'fd, 'rj, 'rk"); ++ break; ++ case FLDX_D: ++ Format(instr, "fldx.d 'fd, 'rj, 'rk"); ++ break; ++ case FSTX_S: ++ Format(instr, "fstx.s 'fd, 'rj, 'rk"); ++ break; ++ case FSTX_D: ++ Format(instr, "fstx.d 'fd, 'rj, 'rk"); ++ break; ++ case ASRTLE_D: ++ Format(instr, "asrtle.d 'rj, 'rk"); ++ break; ++ case ASRTGT_D: ++ Format(instr, "asrtgt.d 'rj, 'rk"); ++ break; ++ case SYSCALL: ++ Format(instr, "syscall code 'code"); ++ break; ++ case HYPCALL: ++ Format(instr, "hypcall code 'code"); ++ break; ++ case AMSWAP_W: ++ Format(instr, "amswap.w 'rd, 'rk, 'rj"); ++ break; ++ case AMSWAP_D: ++ Format(instr, "amswap.d 'rd, 'rk, 'rj"); ++ break; ++ case AMADD_W: ++ Format(instr, "amadd.w 'rd, 'rk, 'rj"); ++ break; ++ case AMADD_D: ++ Format(instr, "amadd.d 'rd, 'rk, 'rj"); ++ break; ++ case AMAND_W: ++ Format(instr, "amand.w 'rd, 'rk, 'rj"); ++ break; ++ case AMAND_D: ++ Format(instr, "amand.d 'rd, 'rk, 'rj"); ++ break; ++ case AMOR_W: ++ Format(instr, "amor.w 'rd, 'rk, 'rj"); ++ break; ++ case AMOR_D: ++ Format(instr, "amor.d 'rd, 'rk, 'rj"); ++ break; ++ case AMXOR_W: ++ Format(instr, "amxor.w 'rd, 'rk, 'rj"); ++ break; ++ case AMXOR_D: ++ Format(instr, "amxor.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_W: ++ Format(instr, "ammax.w 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_D: ++ Format(instr, "ammax.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_W: ++ Format(instr, "ammin.w 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_D: ++ Format(instr, "ammin.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_WU: ++ Format(instr, "ammax.wu 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DU: ++ Format(instr, "ammax.du 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_WU: ++ Format(instr, "ammin.wu 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DU: ++ Format(instr, "ammin.du 'rd, 'rk, 'rj"); ++ break; ++ case AMSWAP_DB_W: ++ Format(instr, "amswap_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMSWAP_DB_D: ++ Format(instr, "amswap_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMADD_DB_W: ++ Format(instr, "amadd_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMADD_DB_D: ++ Format(instr, "amadd_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMAND_DB_W: ++ Format(instr, "amand_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMAND_DB_D: ++ Format(instr, "amand_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMOR_DB_W: ++ Format(instr, "amor_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMOR_DB_D: ++ Format(instr, "amor_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMXOR_DB_W: ++ Format(instr, "amxor_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMXOR_DB_D: ++ Format(instr, "amxor_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DB_W: ++ Format(instr, "ammax_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DB_D: ++ Format(instr, "ammax_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DB_W: ++ Format(instr, "ammin_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DB_D: ++ Format(instr, "ammin_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DB_WU: ++ Format(instr, "ammax_db.wu 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DB_DU: ++ Format(instr, "ammax_db.du 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DB_WU: ++ Format(instr, "ammin_db.wu 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DB_DU: ++ Format(instr, "ammin_db.du 'rd, 'rk, 'rj"); ++ break; ++ case DBAR: ++ Format(instr, "dbar 'hint15"); ++ break; ++ case IBAR: ++ Format(instr, "ibar 'hint15"); ++ break; ++ case FLDGT_S: ++ Format(instr, "fldgt.s 'fd, 'rj, 'rk"); ++ break; ++ case FLDGT_D: ++ Format(instr, "fldgt.d 'fd, 'rj, 'rk"); ++ break; ++ case FLDLE_S: ++ Format(instr, "fldle.s 'fd, 'rj, 'rk"); ++ break; ++ case FLDLE_D: ++ Format(instr, "fldle.d 'fd, 'rj, 'rk"); ++ break; ++ case FSTGT_S: ++ Format(instr, "fstgt.s 'fd, 'rj, 'rk"); ++ break; ++ case FSTGT_D: ++ Format(instr, "fstgt.d 'fd, 'rj, 'rk"); ++ break; ++ case FSTLE_S: ++ Format(instr, "fstle.s 'fd, 'rj, 'rk"); ++ break; ++ case FSTLE_D: ++ Format(instr, "fstle.d 'fd, 'rj, 'rk"); ++ break; ++ case LDGT_B: ++ Format(instr, "ldgt.b 'rd, 'rj, 'rk"); ++ break; ++ case LDGT_H: ++ Format(instr, "ldgt.h 'rd, 'rj, 'rk"); ++ break; ++ case LDGT_W: ++ Format(instr, "ldgt.w 'rd, 'rj, 'rk"); ++ break; ++ case LDGT_D: ++ Format(instr, "ldgt.d 'rd, 'rj, 'rk"); ++ break; ++ case LDLE_B: ++ Format(instr, "ldle.b 'rd, 'rj, 'rk"); ++ break; ++ case LDLE_H: ++ Format(instr, "ldle.h 'rd, 'rj, 'rk"); ++ break; ++ case LDLE_W: ++ Format(instr, "ldle.w 'rd, 'rj, 'rk"); ++ break; ++ case LDLE_D: ++ Format(instr, "ldle.d 'rd, 'rj, 'rk"); ++ break; ++ case STGT_B: ++ Format(instr, "stgt.b 'rd, 'rj, 'rk"); ++ break; ++ case STGT_H: ++ Format(instr, "stgt.h 'rd, 'rj, 'rk"); ++ break; ++ case STGT_W: ++ Format(instr, "stgt.w 'rd, 'rj, 'rk"); ++ break; ++ case STGT_D: ++ Format(instr, "stgt.d 'rd, 'rj, 'rk"); ++ break; ++ case STLE_B: ++ Format(instr, "stle.b 'rd, 'rj, 'rk"); ++ break; ++ case STLE_H: ++ Format(instr, "stle.h 'rd, 'rj, 'rk"); ++ break; ++ case STLE_W: ++ Format(instr, "stle.w 'rd, 'rj, 'rk"); ++ break; ++ case STLE_D: ++ Format(instr, "stle.d 'rd, 'rj, 'rk"); ++ break; ++ case FSCALEB_S: ++ Format(instr, "fscaleb.s 'fd, 'fj, 'fk"); ++ break; ++ case FSCALEB_D: ++ Format(instr, "fscaleb.d 'fd, 'fj, 'fk"); ++ break; ++ case FCOPYSIGN_S: ++ Format(instr, "fcopysign.s 'fd, 'fj, 'fk"); ++ break; ++ case FCOPYSIGN_D: ++ Format(instr, "fcopysign.d 'fd, 'fj, 'fk"); ++ break; ++ case CRC_W_B_W: ++ Format(instr, "crc.w.b.w 'rd, 'rj, 'rk"); ++ break; ++ case CRC_W_H_W: ++ Format(instr, "crc.w.h.w 'rd, 'rj, 'rk"); ++ break; ++ case CRC_W_W_W: ++ Format(instr, "crc.w.w.w 'rd, 'rj, 'rk"); ++ break; ++ case CRC_W_D_W: ++ Format(instr, "crc.w.d.w 'rd, 'rj, 'rk"); ++ break; ++ case CRCC_W_B_W: ++ Format(instr, "crcc.w.b.w 'rd, 'rj, 'rk"); ++ break; ++ case CRCC_W_H_W: ++ Format(instr, "crcc.w.h.w 'rd, 'rj, 'rk"); ++ break; ++ case CRCC_W_W_W: ++ Format(instr, "crcc.w.w.w 'rd, 'rj, 'rk"); ++ break; ++ case CRCC_W_D_W: ++ Format(instr, "crcc.w.d.w 'rd, 'rj, 'rk"); ++ break; ++ case WAIT_INVTLB: ++ case DBGCALL: ++ default: ++ UNREACHABLE(); ++ } ++ return kInstrSize; ++} ++ ++void Decoder::DecodeTypekOp22(Instruction* instr) { ++ switch (instr->Bits(31, 10) << 10) { ++ case CLZ_W: ++ Format(instr, "clz.w 'rd, 'rj"); ++ break; ++ case CTZ_W: ++ Format(instr, "ctz.w 'rd, 'rj"); ++ break; ++ case CLZ_D: ++ Format(instr, "clz.d 'rd, 'rj"); ++ break; ++ case CTZ_D: ++ Format(instr, "ctz.d 'rd, 'rj"); ++ break; ++ case REVB_2H: ++ Format(instr, "revb.2h 'rd, 'rj"); ++ break; ++ case REVB_4H: ++ Format(instr, "revb.4h 'rd, 'rj"); ++ break; ++ case REVB_2W: ++ Format(instr, "revb.2w 'rd, 'rj"); ++ break; ++ case REVB_D: ++ Format(instr, "revb.d 'rd, 'rj"); ++ break; ++ case REVH_2W: ++ Format(instr, "revh.2w 'rd, 'rj"); ++ break; ++ case REVH_D: ++ Format(instr, "revh.d 'rd, 'rj"); ++ break; ++ case BITREV_4B: ++ Format(instr, "bitrev.4b 'rd, 'rj"); ++ break; ++ case BITREV_8B: ++ Format(instr, "bitrev.8b 'rd, 'rj"); ++ break; ++ case BITREV_W: ++ Format(instr, "bitrev.w 'rd, 'rj"); ++ break; ++ case BITREV_D: ++ Format(instr, "bitrev.d 'rd, 'rj"); ++ break; ++ case EXT_W_B: ++ Format(instr, "ext.w.b 'rd, 'rj"); ++ break; ++ case EXT_W_H: ++ Format(instr, "ext.w.h 'rd, 'rj"); ++ break; ++ case FABS_S: ++ Format(instr, "fabs.s 'fd, 'fj"); ++ break; ++ case FABS_D: ++ Format(instr, "fabs.d 'fd, 'fj"); ++ break; ++ case FNEG_S: ++ Format(instr, "fneg.s 'fd, 'fj"); ++ break; ++ case FNEG_D: ++ Format(instr, "fneg.d 'fd, 'fj"); ++ break; ++ case FSQRT_S: ++ Format(instr, "fsqrt.s 'fd, 'fj"); ++ break; ++ case FSQRT_D: ++ Format(instr, "fsqrt.d 'fd, 'fj"); ++ break; ++ case FMOV_S: ++ Format(instr, "fmov.s 'fd, 'fj"); ++ break; ++ case FMOV_D: ++ Format(instr, "fmov.d 'fd, 'fj"); ++ break; ++ case MOVGR2FR_W: ++ Format(instr, "movgr2fr.w 'fd, 'rj"); ++ break; ++ case MOVGR2FR_D: ++ Format(instr, "movgr2fr.d 'fd, 'rj"); ++ break; ++ case MOVGR2FRH_W: ++ Format(instr, "movgr2frh.w 'fd, 'rj"); ++ break; ++ case MOVFR2GR_S: ++ Format(instr, "movfr2gr.s 'rd, 'fj"); ++ break; ++ case MOVFR2GR_D: ++ Format(instr, "movfr2gr.d 'rd, 'fj"); ++ break; ++ case MOVFRH2GR_S: ++ Format(instr, "movfrh2gr.s 'rd, 'fj"); ++ break; ++ case MOVGR2FCSR: ++ Format(instr, "movgr2fcsr fcsr, 'rj"); ++ break; ++ case MOVFCSR2GR: ++ Format(instr, "movfcsr2gr 'rd, fcsr"); ++ break; ++ case FCVT_S_D: ++ Format(instr, "fcvt.s.d 'fd, 'fj"); ++ break; ++ case FCVT_D_S: ++ Format(instr, "fcvt.d.s 'fd, 'fj"); ++ break; ++ case FTINTRM_W_S: ++ Format(instr, "ftintrm.w.s 'fd, 'fj"); ++ break; ++ case FTINTRM_W_D: ++ Format(instr, "ftintrm.w.d 'fd, 'fj"); ++ break; ++ case FTINTRM_L_S: ++ Format(instr, "ftintrm.l.s 'fd, 'fj"); ++ break; ++ case FTINTRM_L_D: ++ Format(instr, "ftintrm.l.d 'fd, 'fj"); ++ break; ++ case FTINTRP_W_S: ++ Format(instr, "ftintrp.w.s 'fd, 'fj"); ++ break; ++ case FTINTRP_W_D: ++ Format(instr, "ftintrp.w.d 'fd, 'fj"); ++ break; ++ case FTINTRP_L_S: ++ Format(instr, "ftintrp.l.s 'fd, 'fj"); ++ break; ++ case FTINTRP_L_D: ++ Format(instr, "ftintrp.l.d 'fd, 'fj"); ++ break; ++ case FTINTRZ_W_S: ++ Format(instr, "ftintrz.w.s 'fd, 'fj"); ++ break; ++ case FTINTRZ_W_D: ++ Format(instr, "ftintrz.w.d 'fd, 'fj"); ++ break; ++ case FTINTRZ_L_S: ++ Format(instr, "ftintrz.l.s 'fd, 'fj"); ++ break; ++ case FTINTRZ_L_D: ++ Format(instr, "ftintrz.l.d 'fd, 'fj"); ++ break; ++ case FTINTRNE_W_S: ++ Format(instr, "ftintrne.w.s 'fd, 'fj"); ++ break; ++ case FTINTRNE_W_D: ++ Format(instr, "ftintrne.w.d 'fd, 'fj"); ++ break; ++ case FTINTRNE_L_S: ++ Format(instr, "ftintrne.l.s 'fd, 'fj"); ++ break; ++ case FTINTRNE_L_D: ++ Format(instr, "ftintrne.l.d 'fd, 'fj"); ++ break; ++ case FTINT_W_S: ++ Format(instr, "ftint.w.s 'fd, 'fj"); ++ break; ++ case FTINT_W_D: ++ Format(instr, "ftint.w.d 'fd, 'fj"); ++ break; ++ case FTINT_L_S: ++ Format(instr, "ftint.l.s 'fd, 'fj"); ++ break; ++ case FTINT_L_D: ++ Format(instr, "ftint.l.d 'fd, 'fj"); ++ break; ++ case FFINT_S_W: ++ Format(instr, "ffint.s.w 'fd, 'fj"); ++ break; ++ case FFINT_S_L: ++ Format(instr, "ffint.s.l 'fd, 'fj"); ++ break; ++ case FFINT_D_W: ++ Format(instr, "ffint.d.w 'fd, 'fj"); ++ break; ++ case FFINT_D_L: ++ Format(instr, "ffint.d.l 'fd, 'fj"); ++ break; ++ case FRINT_S: ++ Format(instr, "frint.s 'fd, 'fj"); ++ break; ++ case FRINT_D: ++ Format(instr, "frint.d 'fd, 'fj"); ++ break; ++ case MOVFR2CF: ++ Format(instr, "movfr2cf fcc'cd, 'fj"); ++ break; ++ case MOVCF2FR: ++ Format(instr, "movcf2fr 'fd, fcc'cj"); ++ break; ++ case MOVGR2CF: ++ Format(instr, "movgr2cf fcc'cd, 'rj"); ++ break; ++ case MOVCF2GR: ++ Format(instr, "movcf2gr 'rd, fcc'cj"); ++ break; ++ case FRECIP_S: ++ Format(instr, "frecip.s 'fd, 'fj"); ++ break; ++ case FRECIP_D: ++ Format(instr, "frecip.d 'fd, 'fj"); ++ break; ++ case FRSQRT_S: ++ Format(instr, "frsqrt.s 'fd, 'fj"); ++ break; ++ case FRSQRT_D: ++ Format(instr, "frsqrt.d 'fd, 'fj"); ++ break; ++ case FCLASS_S: ++ Format(instr, "fclass.s 'fd, 'fj"); ++ break; ++ case FCLASS_D: ++ Format(instr, "fclass.d 'fd, 'fj"); ++ break; ++ case FLOGB_S: ++ Format(instr, "flogb.s 'fd, 'fj"); ++ break; ++ case FLOGB_D: ++ Format(instr, "flogb.d 'fd, 'fj"); ++ break; ++ case CLO_W: ++ Format(instr, "clo.w 'rd, 'rj"); ++ break; ++ case CTO_W: ++ Format(instr, "cto.w 'rd, 'rj"); ++ break; ++ case CLO_D: ++ Format(instr, "clo.d 'rd, 'rj"); ++ break; ++ case CTO_D: ++ Format(instr, "cto.d 'rd, 'rj"); ++ break; ++ case RDTIMEL_W: ++ Format(instr, "rdtimel.w 'rd, 'rj"); ++ break; ++ case RDTIMEH_W: ++ Format(instr, "rdtimeh.w 'rd, 'rj"); ++ break; ++ case RDTIME_D: ++ Format(instr, "rdtime.d 'rd, 'rj"); ++ break; ++ // case CPUCFG: ++ // Format(instr, "cpucfg 'rd, 'rj"); ++ // break; ++ case IOCSRRD_B: ++ case IOCSRRD_H: ++ case IOCSRRD_W: ++ case IOCSRRD_D: ++ case IOCSRWR_B: ++ case IOCSRWR_H: ++ case IOCSRWR_W: ++ case IOCSRWR_D: ++ case TLBINV: ++ case TLBFLUSH: ++ case TLBP: ++ case TLBR: ++ case TLBWI: ++ case TLBWR: ++ case ERET: ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++int Decoder::InstructionDecode(byte* instr_ptr) { ++ Instruction* instr = Instruction::At(instr_ptr); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ", ++ instr->InstructionBits()); ++ switch (instr->InstructionType()) { ++ case Instruction::kOp6Type: { ++ DecodeTypekOp6(instr); ++ break; ++ } ++ case Instruction::kOp7Type: { ++ DecodeTypekOp7(instr); ++ break; ++ } ++ case Instruction::kOp8Type: { ++ DecodeTypekOp8(instr); ++ break; ++ } ++ case Instruction::kOp10Type: { ++ DecodeTypekOp10(instr); ++ break; ++ } ++ case Instruction::kOp12Type: { ++ DecodeTypekOp12(instr); ++ break; ++ } ++ case Instruction::kOp14Type: { ++ DecodeTypekOp14(instr); ++ break; ++ } ++ case Instruction::kOp17Type: { ++ return DecodeTypekOp17(instr); ++ } ++ case Instruction::kOp22Type: { ++ DecodeTypekOp22(instr); ++ break; ++ } ++ case Instruction::kUnsupported: { ++ Format(instr, "UNSUPPORTED"); ++ break; ++ } ++ default: { ++ Format(instr, "UNSUPPORTED"); ++ break; ++ } ++ } ++ return kInstrSize; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++//------------------------------------------------------------------------------ ++ ++namespace disasm { ++ ++const char* NameConverter::NameOfAddress(byte* addr) const { ++ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast(addr)); ++ return tmp_buffer_.begin(); ++} ++ ++const char* NameConverter::NameOfConstant(byte* addr) const { ++ return NameOfAddress(addr); ++} ++ ++const char* NameConverter::NameOfCPURegister(int reg) const { ++ return v8::internal::Registers::Name(reg); ++} ++ ++const char* NameConverter::NameOfXMMRegister(int reg) const { ++ return v8::internal::FPURegisters::Name(reg); ++} ++ ++const char* NameConverter::NameOfByteCPURegister(int reg) const { ++ UNREACHABLE(); ++ return "nobytereg"; ++} ++ ++const char* NameConverter::NameInCode(byte* addr) const { ++ // The default name converter is called for unknown code. So we will not try ++ // to access any memory. ++ return ""; ++} ++ ++//------------------------------------------------------------------------------ ++ ++int Disassembler::InstructionDecode(v8::internal::Vector buffer, ++ byte* instruction) { ++ v8::internal::Decoder d(converter_, buffer); ++ return d.InstructionDecode(instruction); ++} ++ ++int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; } ++ ++void Disassembler::Disassemble(FILE* f, byte* begin, byte* end, ++ UnimplementedOpcodeAction unimplemented_action) { ++ NameConverter converter; ++ Disassembler d(converter, unimplemented_action); ++ for (byte* pc = begin; pc < end;) { ++ v8::internal::EmbeddedVector buffer; ++ buffer[0] = '\0'; ++ byte* prev_pc = pc; ++ pc += d.InstructionDecode(buffer, pc); ++ v8::internal::PrintF(f, "%p %08x %s\n", static_cast(prev_pc), ++ *reinterpret_cast(prev_pc), buffer.begin()); ++ } ++} ++ ++#undef STRING_STARTS_WITH ++ ++} // namespace disasm ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/diagnostics/perf-jit.h b/src/3rdparty/chromium/v8/src/diagnostics/perf-jit.h +index dbe78ddf2d..cb745ef8cc 100644 +--- a/src/3rdparty/chromium/v8/src/diagnostics/perf-jit.h ++++ b/src/3rdparty/chromium/v8/src/diagnostics/perf-jit.h +@@ -83,6 +83,7 @@ class PerfJitLogger : public CodeEventLogger { + static const uint32_t kElfMachARM = 40; + static const uint32_t kElfMachMIPS = 8; + static const uint32_t kElfMachMIPS64 = 8; ++ static const uint32_t kElfMachLA64 = 258; + static const uint32_t kElfMachARM64 = 183; + static const uint32_t kElfMachS390x = 22; + static const uint32_t kElfMachPPC64 = 21; +@@ -98,6 +99,8 @@ class PerfJitLogger : public CodeEventLogger { + return kElfMachMIPS; + #elif V8_TARGET_ARCH_MIPS64 + return kElfMachMIPS64; ++#elif V8_TARGET_ARCH_LA64 ++ return kElfMachLA64; + #elif V8_TARGET_ARCH_ARM64 + return kElfMachARM64; + #elif V8_TARGET_ARCH_S390X +diff --git a/src/3rdparty/chromium/v8/src/execution/frame-constants.h b/src/3rdparty/chromium/v8/src/execution/frame-constants.h +index 8c3f774319..f8508468ae 100644 +--- a/src/3rdparty/chromium/v8/src/execution/frame-constants.h ++++ b/src/3rdparty/chromium/v8/src/execution/frame-constants.h +@@ -389,6 +389,8 @@ inline static int FrameSlotToFPOffset(int slot) { + #include "src/execution/mips/frame-constants-mips.h" // NOLINT + #elif V8_TARGET_ARCH_MIPS64 + #include "src/execution/mips64/frame-constants-mips64.h" // NOLINT ++#elif V8_TARGET_ARCH_LA64 ++#include "src/execution/la64/frame-constants-la64.h" // NOLINT + #elif V8_TARGET_ARCH_S390 + #include "src/execution/s390/frame-constants-s390.h" // NOLINT + #else +diff --git a/src/3rdparty/chromium/v8/src/execution/la64/frame-constants-la64.cc b/src/3rdparty/chromium/v8/src/execution/la64/frame-constants-la64.cc +new file mode 100644 +index 0000000000..185f0abe3d +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/execution/la64/frame-constants-la64.cc +@@ -0,0 +1,32 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/codegen/la64/assembler-la64-inl.h" ++#include "src/execution/frame-constants.h" ++#include "src/execution/frames.h" ++ ++#include "src/execution/la64/frame-constants-la64.h" ++ ++namespace v8 { ++namespace internal { ++ ++Register JavaScriptFrame::fp_register() { return v8::internal::fp; } ++Register JavaScriptFrame::context_register() { return cp; } ++Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); } ++ ++int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) { ++ return register_count; ++} ++ ++int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) { ++ USE(register_count); ++ return 0; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/execution/la64/frame-constants-la64.h b/src/3rdparty/chromium/v8/src/execution/la64/frame-constants-la64.h +new file mode 100644 +index 0000000000..e6069a60e9 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/execution/la64/frame-constants-la64.h +@@ -0,0 +1,75 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_EXECUTION_LA64_FRAME_CONSTANTS_LA64_H_ ++#define V8_EXECUTION_LA64_FRAME_CONSTANTS_LA64_H_ ++ ++#include "src/base/bits.h" ++#include "src/base/macros.h" ++#include "src/execution/frame-constants.h" ++ ++namespace v8 { ++namespace internal { ++ ++class EntryFrameConstants : public AllStatic { ++ public: ++ // This is the offset to where JSEntry pushes the current value of ++ // Isolate::c_entry_fp onto the stack. ++ static constexpr int kCallerFPOffset = ++ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); ++}; ++ ++class WasmCompileLazyFrameConstants : public TypedFrameConstants { ++ public: ++ static constexpr int kNumberOfSavedGpParamRegs = 7; ++ static constexpr int kNumberOfSavedFpParamRegs = 7; ++ ++ // FP-relative. ++ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(7); ++ static constexpr int kFixedFrameSizeFromFp = ++ TypedFrameConstants::kFixedFrameSizeFromFp + ++ kNumberOfSavedGpParamRegs * kPointerSize + ++ kNumberOfSavedFpParamRegs * kDoubleSize; ++}; ++ ++// Frame constructed by the {WasmDebugBreak} builtin. ++// After pushing the frame type marker, the builtin pushes all Liftoff cache ++// registers (see liftoff-assembler-defs.h). ++class WasmDebugBreakFrameConstants : public TypedFrameConstants { ++ public: ++ // {a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, t6, t7, t8} ++ static constexpr uint32_t kPushedGpRegs = 0b111111111111111110000; ++ // {f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26} ++ static constexpr uint32_t kPushedFpRegs = 0b101010101010101010101010101; ++ ++ static constexpr int kNumPushedGpRegisters = ++ base::bits::CountPopulation(kPushedGpRegs); ++ static constexpr int kNumPushedFpRegisters = ++ base::bits::CountPopulation(kPushedFpRegs); ++ ++ static constexpr int kLastPushedGpRegisterOffset = ++ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize; ++ static constexpr int kLastPushedFpRegisterOffset = ++ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize; ++ ++ // Offsets are fp-relative. ++ static int GetPushedGpRegisterOffset(int reg_code) { ++ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code)); ++ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1); ++ return kLastPushedGpRegisterOffset + ++ base::bits::CountPopulation(lower_regs) * kSystemPointerSize; ++ } ++ ++ static int GetPushedFpRegisterOffset(int reg_code) { ++ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code)); ++ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1); ++ return kLastPushedFpRegisterOffset + ++ base::bits::CountPopulation(lower_regs) * kDoubleSize; ++ } ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_EXECUTION_LA64_FRAME_CONSTANTS_LA64_H_ +diff --git a/src/3rdparty/chromium/v8/src/execution/la64/simulator-la64.cc b/src/3rdparty/chromium/v8/src/execution/la64/simulator-la64.cc +new file mode 100644 +index 0000000000..f4bafa1d8d +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/execution/la64/simulator-la64.cc +@@ -0,0 +1,5804 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/execution/la64/simulator-la64.h" ++ ++// Only build the simulator if not compiling for real LA64 hardware. ++#if defined(USE_SIMULATOR) ++ ++#include ++#include ++#include ++#include ++ ++#include "src/base/bits.h" ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/la64/constants-la64.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/diagnostics/disasm.h" ++#include "src/heap/combined-heap.h" ++#include "src/runtime/runtime-utils.h" ++#include "src/utils/ostreams.h" ++#include "src/utils/vector.h" ++ ++namespace v8 { ++namespace internal { ++ ++DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor, ++ Simulator::GlobalMonitor::Get) ++ ++// #define PRINT_SIM_LOG ++ ++// Util functions. ++inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); } ++ ++uint32_t get_fcsr_condition_bit(uint32_t cc) { ++ if (cc == 0) { ++ return 23; ++ } else { ++ return 24 + cc; ++ } ++} ++ ++static int64_t MultiplyHighSigned(int64_t u, int64_t v) { ++ uint64_t u0, v0, w0; ++ int64_t u1, v1, w1, w2, t; ++ ++ u0 = u & 0xFFFFFFFFL; ++ u1 = u >> 32; ++ v0 = v & 0xFFFFFFFFL; ++ v1 = v >> 32; ++ ++ w0 = u0 * v0; ++ t = u1 * v0 + (w0 >> 32); ++ w1 = t & 0xFFFFFFFFL; ++ w2 = t >> 32; ++ w1 = u0 * v1 + w1; ++ ++ return u1 * v1 + w2 + (w1 >> 32); ++} ++ ++static uint64_t MultiplyHighUnsigned(uint64_t u, uint64_t v) { ++ uint64_t u0, v0, w0; ++ uint64_t u1, v1, w1, w2, t; ++ ++ u0 = u & 0xFFFFFFFFL; ++ u1 = u >> 32; ++ v0 = v & 0xFFFFFFFFL; ++ v1 = v >> 32; ++ ++ w0 = u0 * v0; ++ t = u1 * v0 + (w0 >> 32); ++ w1 = t & 0xFFFFFFFFL; ++ w2 = t >> 32; ++ w1 = u0 * v1 + w1; ++ ++ return u1 * v1 + w2 + (w1 >> 32); ++} ++ ++#ifdef PRINT_SIM_LOG ++inline void printf_instr(const char* _Format, ...) { ++ va_list varList; ++ va_start(varList, _Format); ++ vprintf(_Format, varList); ++ va_end(varList); ++} ++#else ++#define printf_instr(...) ++#endif ++ ++// This macro provides a platform independent use of sscanf. The reason for ++// SScanF not being implemented in a platform independent was through ++// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time ++// Library does not provide vsscanf. ++#define SScanF sscanf // NOLINT ++ ++// The La64Debugger class is used by the simulator while debugging simulated ++// code. ++class La64Debugger { ++ public: ++ explicit La64Debugger(Simulator* sim) : sim_(sim) {} ++ ++ void Stop(Instruction* instr); ++ void Debug(); ++ // Print all registers with a nice formatting. ++ void PrintAllRegs(); ++ void PrintAllRegsIncludingFPU(); ++ ++ private: ++ // We set the breakpoint code to 0xFFFF to easily recognize it. ++ static const Instr kBreakpointInstr = BREAK | 0xFFFF; ++ static const Instr kNopInstr = 0x0; ++ ++ Simulator* sim_; ++ ++ int64_t GetRegisterValue(int regnum); ++ int64_t GetFPURegisterValue(int regnum); ++ float GetFPURegisterValueFloat(int regnum); ++ double GetFPURegisterValueDouble(int regnum); ++ bool GetValue(const char* desc, int64_t* value); ++ ++ // Set or delete a breakpoint. Returns true if successful. ++ bool SetBreakpoint(Instruction* breakpc); ++ bool DeleteBreakpoint(Instruction* breakpc); ++ ++ // Undo and redo all breakpoints. This is needed to bracket disassembly and ++ // execution to skip past breakpoints when run from the debugger. ++ void UndoBreakpoints(); ++ void RedoBreakpoints(); ++}; ++ ++inline void UNSUPPORTED() { printf("Sim: Unsupported instruction.\n"); } ++ ++void La64Debugger::Stop(Instruction* instr) { ++ // Get the stop code. ++ uint32_t code = instr->Bits(25, 6); ++ PrintF("Simulator hit (%u)\n", code); ++ Debug(); ++} ++ ++int64_t La64Debugger::GetRegisterValue(int regnum) { ++ if (regnum == kNumSimuRegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_register(regnum); ++ } ++} ++ ++int64_t La64Debugger::GetFPURegisterValue(int regnum) { ++ if (regnum == kNumFPURegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_fpu_register(regnum); ++ } ++} ++ ++float La64Debugger::GetFPURegisterValueFloat(int regnum) { ++ if (regnum == kNumFPURegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_fpu_register_float(regnum); ++ } ++} ++ ++double La64Debugger::GetFPURegisterValueDouble(int regnum) { ++ if (regnum == kNumFPURegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_fpu_register_double(regnum); ++ } ++} ++ ++bool La64Debugger::GetValue(const char* desc, int64_t* value) { ++ int regnum = Registers::Number(desc); ++ int fpuregnum = FPURegisters::Number(desc); ++ ++ if (regnum != kInvalidRegister) { ++ *value = GetRegisterValue(regnum); ++ return true; ++ } else if (fpuregnum != kInvalidFPURegister) { ++ *value = GetFPURegisterValue(fpuregnum); ++ return true; ++ } else if (strncmp(desc, "0x", 2) == 0) { ++ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast(value)) == ++ 1; ++ } else { ++ return SScanF(desc, "%" SCNu64, reinterpret_cast(value)) == 1; ++ } ++ return false; ++} ++ ++bool La64Debugger::SetBreakpoint(Instruction* breakpc) { ++ // Check if a breakpoint can be set. If not return without any side-effects. ++ if (sim_->break_pc_ != nullptr) { ++ return false; ++ } ++ ++ // Set the breakpoint. ++ sim_->break_pc_ = breakpc; ++ sim_->break_instr_ = breakpc->InstructionBits(); ++ // Not setting the breakpoint instruction in the code itself. It will be set ++ // when the debugger shell continues. ++ return true; ++} ++ ++bool La64Debugger::DeleteBreakpoint(Instruction* breakpc) { ++ if (sim_->break_pc_ != nullptr) { ++ sim_->break_pc_->SetInstructionBits(sim_->break_instr_); ++ } ++ ++ sim_->break_pc_ = nullptr; ++ sim_->break_instr_ = 0; ++ return true; ++} ++ ++void La64Debugger::UndoBreakpoints() { ++ if (sim_->break_pc_ != nullptr) { ++ sim_->break_pc_->SetInstructionBits(sim_->break_instr_); ++ } ++} ++ ++void La64Debugger::RedoBreakpoints() { ++ if (sim_->break_pc_ != nullptr) { ++ sim_->break_pc_->SetInstructionBits(kBreakpointInstr); ++ } ++} ++ ++void La64Debugger::PrintAllRegs() { ++#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n) ++ ++ PrintF("\n"); ++ // at, v0, a0. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64 ++ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n", ++ REG_INFO(1), REG_INFO(2), REG_INFO(4)); ++ // v1, a1. ++ PrintF("%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \n", ++ "", REG_INFO(3), REG_INFO(5)); ++ // a2. ++ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "", ++ REG_INFO(6)); ++ // a3. ++ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "", ++ REG_INFO(7)); ++ PrintF("\n"); ++ // a4-t3, s0-s7 ++ for (int i = 0; i < 8; i++) { ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \n", ++ REG_INFO(8 + i), REG_INFO(16 + i)); ++ } ++ PrintF("\n"); ++ // t8, k0, LO. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", ++ REG_INFO(24), REG_INFO(26), REG_INFO(32)); ++ // t9, k1, HI. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", ++ REG_INFO(25), REG_INFO(27), REG_INFO(33)); ++ // sp, fp, gp. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", ++ REG_INFO(29), REG_INFO(30), REG_INFO(28)); ++ // pc. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \n", ++ REG_INFO(31), REG_INFO(34)); ++ ++#undef REG_INFO ++} ++ ++void La64Debugger::PrintAllRegsIncludingFPU() { ++#define FPU_REG_INFO(n) \ ++ FPURegisters::Name(n), GetFPURegisterValue(n), GetFPURegisterValueDouble(n) ++ ++ PrintAllRegs(); ++ ++ PrintF("\n\n"); ++ // f0, f1, f2, ... f31. ++ // TODO(plind): consider printing 2 columns for space efficiency. ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(0)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(1)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(2)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(3)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(4)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(5)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(6)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(7)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(8)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(9)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(10)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(11)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(12)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(13)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(14)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(15)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(16)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(17)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(18)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(19)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(20)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(21)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(22)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(23)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(24)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(25)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(26)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(27)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(28)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(29)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(30)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(31)); ++ ++#undef FPU_REG_INFO ++} ++ ++void La64Debugger::Debug() { ++ intptr_t last_pc = -1; ++ bool done = false; ++ ++#define COMMAND_SIZE 63 ++#define ARG_SIZE 255 ++ ++#define STR(a) #a ++#define XSTR(a) STR(a) ++ ++ char cmd[COMMAND_SIZE + 1]; ++ char arg1[ARG_SIZE + 1]; ++ char arg2[ARG_SIZE + 1]; ++ char* argv[3] = {cmd, arg1, arg2}; ++ ++ // Make sure to have a proper terminating character if reaching the limit. ++ cmd[COMMAND_SIZE] = 0; ++ arg1[ARG_SIZE] = 0; ++ arg2[ARG_SIZE] = 0; ++ ++ // Undo all set breakpoints while running in the debugger shell. This will ++ // make them invisible to all commands. ++ UndoBreakpoints(); ++ ++ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) { ++ if (last_pc != sim_->get_pc()) { ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ v8::internal::EmbeddedVector buffer; ++ dasm.InstructionDecode(buffer, reinterpret_cast(sim_->get_pc())); ++ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin()); ++ last_pc = sim_->get_pc(); ++ } ++ char* line = ReadLine("sim> "); ++ if (line == nullptr) { ++ break; ++ } else { ++ char* last_input = sim_->last_debugger_input(); ++ if (strcmp(line, "\n") == 0 && last_input != nullptr) { ++ line = last_input; ++ } else { ++ // Ownership is transferred to sim_; ++ sim_->set_last_debugger_input(line); ++ } ++ // Use sscanf to parse the individual parts of the command line. At the ++ // moment no command expects more than two parameters. ++ int argc = SScanF(line, ++ "%" XSTR(COMMAND_SIZE) "s " ++ "%" XSTR(ARG_SIZE) "s " ++ "%" XSTR(ARG_SIZE) "s", ++ cmd, arg1, arg2); ++ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) { ++ Instruction* instr = reinterpret_cast(sim_->get_pc()); ++ if (!(instr->IsTrap()) || ++ instr->InstructionBits() == rtCallRedirInstr) { ++ sim_->InstructionDecode( ++ reinterpret_cast(sim_->get_pc())); ++ } else { ++ // Allow si to jump over generated breakpoints. ++ PrintF("/!\\ Jumping over generated breakpoint.\n"); ++ sim_->set_pc(sim_->get_pc() + kInstrSize); ++ } ++ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) { ++ // Execute the one instruction we broke at with breakpoints disabled. ++ sim_->InstructionDecode(reinterpret_cast(sim_->get_pc())); ++ // Leave the debugger shell. ++ done = true; ++ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { ++ if (argc == 2) { ++ int64_t value; ++ double dvalue; ++ if (strcmp(arg1, "all") == 0) { ++ PrintAllRegs(); ++ } else if (strcmp(arg1, "allf") == 0) { ++ PrintAllRegsIncludingFPU(); ++ } else { ++ int regnum = Registers::Number(arg1); ++ int fpuregnum = FPURegisters::Number(arg1); ++ ++ if (regnum != kInvalidRegister) { ++ value = GetRegisterValue(regnum); ++ PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value, ++ value); ++ } else if (fpuregnum != kInvalidFPURegister) { ++ value = GetFPURegisterValue(fpuregnum); ++ dvalue = GetFPURegisterValueDouble(fpuregnum); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", ++ FPURegisters::Name(fpuregnum), value, dvalue); ++ } else { ++ PrintF("%s unrecognized\n", arg1); ++ } ++ } ++ } else { ++ if (argc == 3) { ++ if (strcmp(arg2, "single") == 0) { ++ int64_t value; ++ float fvalue; ++ int fpuregnum = FPURegisters::Number(arg1); ++ ++ if (fpuregnum != kInvalidFPURegister) { ++ value = GetFPURegisterValue(fpuregnum); ++ value &= 0xFFFFFFFFUL; ++ fvalue = GetFPURegisterValueFloat(fpuregnum); ++ PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue); ++ } else { ++ PrintF("%s unrecognized\n", arg1); ++ } ++ } else { ++ PrintF("print single\n"); ++ } ++ } else { ++ PrintF("print or print single\n"); ++ } ++ } ++ } else if ((strcmp(cmd, "po") == 0) || ++ (strcmp(cmd, "printobject") == 0)) { ++ if (argc == 2) { ++ int64_t value; ++ StdoutStream os; ++ if (GetValue(arg1, &value)) { ++ Object obj(value); ++ os << arg1 << ": \n"; ++#ifdef DEBUG ++ obj.Print(os); ++ os << "\n"; ++#else ++ os << Brief(obj) << "\n"; ++#endif ++ } else { ++ os << arg1 << " unrecognized\n"; ++ } ++ } else { ++ PrintF("printobject \n"); ++ } ++ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 || ++ strcmp(cmd, "dump") == 0) { ++ int64_t* cur = nullptr; ++ int64_t* end = nullptr; ++ int next_arg = 1; ++ ++ if (strcmp(cmd, "stack") == 0) { ++ cur = reinterpret_cast(sim_->get_register(Simulator::sp)); ++ } else { // Command "mem". ++ int64_t value; ++ if (!GetValue(arg1, &value)) { ++ PrintF("%s unrecognized\n", arg1); ++ continue; ++ } ++ cur = reinterpret_cast(value); ++ next_arg++; ++ } ++ ++ int64_t words; ++ if (argc == next_arg) { ++ words = 10; ++ } else { ++ if (!GetValue(argv[next_arg], &words)) { ++ words = 10; ++ } ++ } ++ end = cur + words; ++ ++ bool skip_obj_print = (strcmp(cmd, "dump") == 0); ++ while (cur < end) { ++ PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ", ++ reinterpret_cast(cur), *cur, *cur); ++ Object obj(*cur); ++ Heap* current_heap = sim_->isolate_->heap(); ++ if (!skip_obj_print) { ++ if (obj.IsSmi() || ++ IsValidHeapObject(current_heap, HeapObject::cast(obj))) { ++ PrintF(" ("); ++ if (obj.IsSmi()) { ++ PrintF("smi %d", Smi::ToInt(obj)); ++ } else { ++ obj.ShortPrint(); ++ } ++ PrintF(")"); ++ } ++ } ++ PrintF("\n"); ++ cur++; ++ } ++ ++ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) || ++ (strcmp(cmd, "di") == 0)) { ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ v8::internal::EmbeddedVector buffer; ++ ++ byte* cur = nullptr; ++ byte* end = nullptr; ++ ++ if (argc == 1) { ++ cur = reinterpret_cast(sim_->get_pc()); ++ end = cur + (10 * kInstrSize); ++ } else if (argc == 2) { ++ int regnum = Registers::Number(arg1); ++ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) { ++ // The argument is an address or a register name. ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ cur = reinterpret_cast(value); ++ // Disassemble 10 instructions at . ++ end = cur + (10 * kInstrSize); ++ } ++ } else { ++ // The argument is the number of instructions. ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ cur = reinterpret_cast(sim_->get_pc()); ++ // Disassemble instructions. ++ end = cur + (value * kInstrSize); ++ } ++ } ++ } else { ++ int64_t value1; ++ int64_t value2; ++ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { ++ cur = reinterpret_cast(value1); ++ end = cur + (value2 * kInstrSize); ++ } ++ } ++ ++ while (cur < end) { ++ dasm.InstructionDecode(buffer, cur); ++ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), ++ buffer.begin()); ++ cur += kInstrSize; ++ } ++ } else if (strcmp(cmd, "gdb") == 0) { ++ PrintF("relinquishing control to gdb\n"); ++ v8::base::OS::DebugBreak(); ++ PrintF("regaining control from gdb\n"); ++ } else if (strcmp(cmd, "break") == 0) { ++ if (argc == 2) { ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ if (!SetBreakpoint(reinterpret_cast(value))) { ++ PrintF("setting breakpoint failed\n"); ++ } ++ } else { ++ PrintF("%s unrecognized\n", arg1); ++ } ++ } else { ++ PrintF("break
\n"); ++ } ++ } else if (strcmp(cmd, "del") == 0) { ++ if (!DeleteBreakpoint(nullptr)) { ++ PrintF("deleting breakpoint failed\n"); ++ } ++ } else if (strcmp(cmd, "flags") == 0) { ++ PrintF("No flags on LA64 !\n"); ++ } else if (strcmp(cmd, "stop") == 0) { ++ int64_t value; ++ intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize; ++ Instruction* stop_instr = reinterpret_cast(stop_pc); ++ Instruction* msg_address = ++ reinterpret_cast(stop_pc + kInstrSize); ++ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) { ++ // Remove the current stop. ++ if (sim_->IsStopInstruction(stop_instr)) { ++ stop_instr->SetInstructionBits(kNopInstr); ++ msg_address->SetInstructionBits(kNopInstr); ++ } else { ++ PrintF("Not at debugger stop.\n"); ++ } ++ } else if (argc == 3) { ++ // Print information about all/the specified breakpoint(s). ++ if (strcmp(arg1, "info") == 0) { ++ if (strcmp(arg2, "all") == 0) { ++ PrintF("Stop information:\n"); ++ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; ++ i++) { ++ sim_->PrintStopInfo(i); ++ } ++ } else if (GetValue(arg2, &value)) { ++ sim_->PrintStopInfo(value); ++ } else { ++ PrintF("Unrecognized argument.\n"); ++ } ++ } else if (strcmp(arg1, "enable") == 0) { ++ // Enable all/the specified breakpoint(s). ++ if (strcmp(arg2, "all") == 0) { ++ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; ++ i++) { ++ sim_->EnableStop(i); ++ } ++ } else if (GetValue(arg2, &value)) { ++ sim_->EnableStop(value); ++ } else { ++ PrintF("Unrecognized argument.\n"); ++ } ++ } else if (strcmp(arg1, "disable") == 0) { ++ // Disable all/the specified breakpoint(s). ++ if (strcmp(arg2, "all") == 0) { ++ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; ++ i++) { ++ sim_->DisableStop(i); ++ } ++ } else if (GetValue(arg2, &value)) { ++ sim_->DisableStop(value); ++ } else { ++ PrintF("Unrecognized argument.\n"); ++ } ++ } ++ } else { ++ PrintF("Wrong usage. Use help command for more information.\n"); ++ } ++ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) { ++ // Print registers and disassemble. ++ PrintAllRegs(); ++ PrintF("\n"); ++ ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ v8::internal::EmbeddedVector buffer; ++ ++ byte* cur = nullptr; ++ byte* end = nullptr; ++ ++ if (argc == 1) { ++ cur = reinterpret_cast(sim_->get_pc()); ++ end = cur + (10 * kInstrSize); ++ } else if (argc == 2) { ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ cur = reinterpret_cast(value); ++ // no length parameter passed, assume 10 instructions ++ end = cur + (10 * kInstrSize); ++ } ++ } else { ++ int64_t value1; ++ int64_t value2; ++ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { ++ cur = reinterpret_cast(value1); ++ end = cur + (value2 * kInstrSize); ++ } ++ } ++ ++ while (cur < end) { ++ dasm.InstructionDecode(buffer, cur); ++ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), ++ buffer.begin()); ++ cur += kInstrSize; ++ } ++ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { ++ PrintF("cont\n"); ++ PrintF(" continue execution (alias 'c')\n"); ++ PrintF("stepi\n"); ++ PrintF(" step one instruction (alias 'si')\n"); ++ PrintF("print \n"); ++ PrintF(" print register content (alias 'p')\n"); ++ PrintF(" use register name 'all' to print all registers\n"); ++ PrintF("printobject \n"); ++ PrintF(" print an object from a register (alias 'po')\n"); ++ PrintF("stack []\n"); ++ PrintF(" dump stack content, default dump 10 words)\n"); ++ PrintF("mem
[]\n"); ++ PrintF(" dump memory content, default dump 10 words)\n"); ++ PrintF("dump []\n"); ++ PrintF( ++ " dump memory content without pretty printing JS objects, default " ++ "dump 10 words)\n"); ++ PrintF("flags\n"); ++ PrintF(" print flags\n"); ++ PrintF("disasm []\n"); ++ PrintF("disasm [
]\n"); ++ PrintF("disasm [[
] ]\n"); ++ PrintF(" disassemble code, default is 10 instructions\n"); ++ PrintF(" from pc (alias 'di')\n"); ++ PrintF("gdb\n"); ++ PrintF(" enter gdb\n"); ++ PrintF("break
\n"); ++ PrintF(" set a break point on the address\n"); ++ PrintF("del\n"); ++ PrintF(" delete the breakpoint\n"); ++ PrintF("stop feature:\n"); ++ PrintF(" Description:\n"); ++ PrintF(" Stops are debug instructions inserted by\n"); ++ PrintF(" the Assembler::stop() function.\n"); ++ PrintF(" When hitting a stop, the Simulator will\n"); ++ PrintF(" stop and give control to the Debugger.\n"); ++ PrintF(" All stop codes are watched:\n"); ++ PrintF(" - They can be enabled / disabled: the Simulator\n"); ++ PrintF(" will / won't stop when hitting them.\n"); ++ PrintF(" - The Simulator keeps track of how many times they \n"); ++ PrintF(" are met. (See the info command.) Going over a\n"); ++ PrintF(" disabled stop still increases its counter. \n"); ++ PrintF(" Commands:\n"); ++ PrintF(" stop info all/ : print infos about number \n"); ++ PrintF(" or all stop(s).\n"); ++ PrintF(" stop enable/disable all/ : enables / disables\n"); ++ PrintF(" all or number stop(s)\n"); ++ PrintF(" stop unstop\n"); ++ PrintF(" ignore the stop instruction at the current location\n"); ++ PrintF(" from now on\n"); ++ } else { ++ PrintF("Unknown command: %s\n", cmd); ++ } ++ } ++ } ++ ++ // Add all the breakpoints back to stop execution and enter the debugger ++ // shell when hit. ++ RedoBreakpoints(); ++ ++#undef COMMAND_SIZE ++#undef ARG_SIZE ++ ++#undef STR ++#undef XSTR ++} ++ ++bool Simulator::ICacheMatch(void* one, void* two) { ++ DCHECK_EQ(reinterpret_cast(one) & CachePage::kPageMask, 0); ++ DCHECK_EQ(reinterpret_cast(two) & CachePage::kPageMask, 0); ++ return one == two; ++} ++ ++static uint32_t ICacheHash(void* key) { ++ return static_cast(reinterpret_cast(key)) >> 2; ++} ++ ++static bool AllOnOnePage(uintptr_t start, size_t size) { ++ intptr_t start_page = (start & ~CachePage::kPageMask); ++ intptr_t end_page = ((start + size) & ~CachePage::kPageMask); ++ return start_page == end_page; ++} ++ ++void Simulator::set_last_debugger_input(char* input) { ++ DeleteArray(last_debugger_input_); ++ last_debugger_input_ = input; ++} ++ ++void Simulator::SetRedirectInstruction(Instruction* instruction) { ++ instruction->SetInstructionBits(rtCallRedirInstr); ++} ++ ++void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache, ++ void* start_addr, size_t size) { ++ int64_t start = reinterpret_cast(start_addr); ++ int64_t intra_line = (start & CachePage::kLineMask); ++ start -= intra_line; ++ size += intra_line; ++ size = ((size - 1) | CachePage::kLineMask) + 1; ++ int offset = (start & CachePage::kPageMask); ++ while (!AllOnOnePage(start, size - 1)) { ++ int bytes_to_flush = CachePage::kPageSize - offset; ++ FlushOnePage(i_cache, start, bytes_to_flush); ++ start += bytes_to_flush; ++ size -= bytes_to_flush; ++ DCHECK_EQ((int64_t)0, start & CachePage::kPageMask); ++ offset = 0; ++ } ++ if (size != 0) { ++ FlushOnePage(i_cache, start, size); ++ } ++} ++ ++CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache, ++ void* page) { ++ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page)); ++ if (entry->value == nullptr) { ++ CachePage* new_page = new CachePage(); ++ entry->value = new_page; ++ } ++ return reinterpret_cast(entry->value); ++} ++ ++// Flush from start up to and not including start + size. ++void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache, ++ intptr_t start, size_t size) { ++ DCHECK_LE(size, CachePage::kPageSize); ++ DCHECK(AllOnOnePage(start, size - 1)); ++ DCHECK_EQ(start & CachePage::kLineMask, 0); ++ DCHECK_EQ(size & CachePage::kLineMask, 0); ++ void* page = reinterpret_cast(start & (~CachePage::kPageMask)); ++ int offset = (start & CachePage::kPageMask); ++ CachePage* cache_page = GetCachePage(i_cache, page); ++ char* valid_bytemap = cache_page->ValidityByte(offset); ++ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift); ++} ++ ++void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache, ++ Instruction* instr) { ++ int64_t address = reinterpret_cast(instr); ++ void* page = reinterpret_cast(address & (~CachePage::kPageMask)); ++ void* line = reinterpret_cast(address & (~CachePage::kLineMask)); ++ int offset = (address & CachePage::kPageMask); ++ CachePage* cache_page = GetCachePage(i_cache, page); ++ char* cache_valid_byte = cache_page->ValidityByte(offset); ++ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID); ++ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); ++ if (cache_hit) { ++ // Check that the data in memory matches the contents of the I-cache. ++ CHECK_EQ(0, memcmp(reinterpret_cast(instr), ++ cache_page->CachedData(offset), kInstrSize)); ++ } else { ++ // Cache miss. Load memory into the cache. ++ memcpy(cached_line, line, CachePage::kLineLength); ++ *cache_valid_byte = CachePage::LINE_VALID; ++ } ++} ++ ++Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { ++ // Set up simulator support first. Some of this information is needed to ++ // setup the architecture state. ++ stack_size_ = FLAG_sim_stack_size * KB; ++ stack_ = reinterpret_cast(malloc(stack_size_)); ++ pc_modified_ = false; ++ icount_ = 0; ++ break_count_ = 0; ++ break_pc_ = nullptr; ++ break_instr_ = 0; ++ ++ // Set up architecture state. ++ // All registers are initialized to zero to start with. ++ for (int i = 0; i < kNumSimuRegisters; i++) { ++ registers_[i] = 0; ++ } ++ for (int i = 0; i < kNumFPURegisters; i++) { ++ FPUregisters_[i] = 0; ++ } ++ for (int i = 0; i < kNumCFRegisters; i++) { ++ CFregisters_[i] = 0; ++ } ++ ++ FCSR_ = 0; ++ ++ // The sp is initialized to point to the bottom (high address) of the ++ // allocated stack area. To be safe in potential stack underflows we leave ++ // some buffer below. ++ registers_[sp] = reinterpret_cast(stack_) + stack_size_ - 64; ++ // The ra and pc are initialized to a known bad value that will cause an ++ // access violation if the simulator ever tries to execute it. ++ registers_[pc] = bad_ra; ++ registers_[ra] = bad_ra; ++ ++ last_debugger_input_ = nullptr; ++} ++ ++Simulator::~Simulator() { ++ GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_); ++ free(stack_); ++} ++ ++// Get the active Simulator for the current thread. ++Simulator* Simulator::current(Isolate* isolate) { ++ v8::internal::Isolate::PerIsolateThreadData* isolate_data = ++ isolate->FindOrAllocatePerThreadDataForThisThread(); ++ DCHECK_NOT_NULL(isolate_data); ++ ++ Simulator* sim = isolate_data->simulator(); ++ if (sim == nullptr) { ++ // TODO(146): delete the simulator object when a thread/isolate goes away. ++ sim = new Simulator(isolate); ++ isolate_data->set_simulator(sim); ++ } ++ return sim; ++} ++ ++// Sets the register in the architecture state. It will also deal with updating ++// Simulator internal state for special registers such as PC. ++void Simulator::set_register(int reg, int64_t value) { ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ if (reg == pc) { ++ pc_modified_ = true; ++ } ++ ++ // Zero register always holds 0. ++ registers_[reg] = (reg == 0) ? 0 : value; ++} ++ ++void Simulator::set_dw_register(int reg, const int* dbl) { ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ registers_[reg] = dbl[1]; ++ registers_[reg] = registers_[reg] << 32; ++ registers_[reg] += dbl[0]; ++} ++ ++void Simulator::set_fpu_register(int fpureg, int64_t value) { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ FPUregisters_[fpureg] = value; ++} ++ ++void Simulator::set_fpu_register_word(int fpureg, int32_t value) { ++ // Set ONLY lower 32-bits, leaving upper bits untouched. ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ int32_t* pword; ++ pword = reinterpret_cast(&FPUregisters_[fpureg]); ++ ++ *pword = value; ++} ++ ++void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) { ++ // Set ONLY upper 32-bits, leaving lower bits untouched. ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ int32_t* phiword; ++ phiword = (reinterpret_cast(&FPUregisters_[fpureg])) + 1; ++ ++ *phiword = value; ++} ++ ++void Simulator::set_fpu_register_float(int fpureg, float value) { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ *bit_cast(&FPUregisters_[fpureg]) = value; ++} ++ ++void Simulator::set_fpu_register_double(int fpureg, double value) { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ *bit_cast(&FPUregisters_[fpureg]) = value; ++} ++ ++void Simulator::set_cf_register(int cfreg, bool value) { ++ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters)); ++ CFregisters_[cfreg] = value; ++} ++ ++// Get the register from the architecture state. This function does handle ++// the special case of accessing the PC register. ++int64_t Simulator::get_register(int reg) const { ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ if (reg == 0) ++ return 0; ++ else ++ return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0); ++} ++ ++double Simulator::get_double_from_register_pair(int reg) { ++ // TODO(plind): bad ABI stuff, refactor or remove. ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ ++ double dm_val = 0.0; ++ // Read the bits from the unsigned integer register_[] array ++ // into the double precision floating point value and return it. ++ char buffer[sizeof(registers_[0])]; ++ memcpy(buffer, ®isters_[reg], sizeof(registers_[0])); ++ memcpy(&dm_val, buffer, sizeof(registers_[0])); ++ return (dm_val); ++} ++ ++int64_t Simulator::get_fpu_register(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return FPUregisters_[fpureg]; ++} ++ ++int32_t Simulator::get_fpu_register_word(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return static_cast(FPUregisters_[fpureg] & 0xFFFFFFFF); ++} ++ ++int32_t Simulator::get_fpu_register_signed_word(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return static_cast(FPUregisters_[fpureg] & 0xFFFFFFFF); ++} ++ ++int32_t Simulator::get_fpu_register_hi_word(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return static_cast((FPUregisters_[fpureg] >> 32) & 0xFFFFFFFF); ++} ++ ++float Simulator::get_fpu_register_float(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return *bit_cast(const_cast(&FPUregisters_[fpureg])); ++} ++ ++double Simulator::get_fpu_register_double(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return *bit_cast(&FPUregisters_[fpureg]); ++} ++ ++bool Simulator::get_cf_register(int cfreg) const { ++ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters)); ++ return CFregisters_[cfreg]; ++} ++ ++// Runtime FP routines take up to two double arguments and zero ++// or one integer arguments. All are constructed here, ++// from a0-a3 or fa0 and fa1 (n64). ++void Simulator::GetFpArgs(double* x, double* y, int32_t* z) { ++ const int fparg2 = f1; ++ *x = get_fpu_register_double(f0); ++ *y = get_fpu_register_double(fparg2); ++ *z = static_cast(get_register(a2)); ++} ++ ++// The return value is either in v0/v1 or f0. ++void Simulator::SetFpResult(const double& result) { ++ set_fpu_register_double(0, result); ++} ++ ++// Helper functions for setting and testing the FCSR register's bits. ++void Simulator::set_fcsr_bit(uint32_t cc, bool value) { ++ if (value) { ++ FCSR_ |= (1 << cc); ++ } else { ++ FCSR_ &= ~(1 << cc); ++ } ++} ++ ++bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); } ++ ++void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) { ++ FCSR_ |= mode & kFPURoundingModeMask; ++} ++ ++unsigned int Simulator::get_fcsr_rounding_mode() { ++ return FCSR_ & kFPURoundingModeMask; ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round_error(double original, double rounded) { ++ bool ret = false; ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded > max_int32 || rounded < min_int32) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round64_error(double original, double rounded) { ++ bool ret = false; ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded >= max_int64 || rounded < min_int64) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round_error(float original, float rounded) { ++ bool ret = false; ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded > max_int32 || rounded < min_int32) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++void Simulator::set_fpu_register_word_invalid_result(float original, ++ float rounded) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register_word(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_invalid_result(float original, float rounded) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_invalid_result64(float original, ++ float rounded) { ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded >= max_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResult); ++ } else if (rounded < min_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_word_invalid_result(double original, ++ double rounded) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register_word(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_invalid_result(double original, ++ double rounded) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_invalid_result64(double original, ++ double rounded) { ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded >= max_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResult); ++ } else if (rounded < min_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round64_error(float original, float rounded) { ++ bool ret = false; ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded >= max_int64 || rounded < min_int64) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++// For ftint instructions only ++void Simulator::round_according_to_fcsr(double toRound, double* rounded, ++ int32_t* rounded_int) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or ++ // equal to the infinitely accurate result. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. ++ // switch ((FCSR_ >> 8) & 3) { ++ switch (FCSR_ & kFPURoundingModeMask) { ++ case kRoundToNearest: ++ *rounded = std::floor(toRound + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = trunc(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++void Simulator::round64_according_to_fcsr(double toRound, double* rounded, ++ int64_t* rounded_int) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or. ++ // equal to the infinitely accurate result. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. ++ switch (FCSR_ & kFPURoundingModeMask) { ++ case kRoundToNearest: ++ *rounded = std::floor(toRound + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = std::trunc(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++void Simulator::round_according_to_fcsr(float toRound, float* rounded, ++ int32_t* rounded_int) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or ++ // equal to the infinitely accurate result. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. ++ switch (FCSR_ & kFPURoundingModeMask) { ++ case kRoundToNearest: ++ *rounded = std::floor(toRound + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.f; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = std::trunc(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++void Simulator::round64_according_to_fcsr(float toRound, float* rounded, ++ int64_t* rounded_int) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or. ++ // equal to the infinitely accurate result. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. ++ switch (FCSR_ & kFPURoundingModeMask) { ++ case kRoundToNearest: ++ *rounded = std::floor(toRound + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.f; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = trunc(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++// Raw access to the PC register. ++void Simulator::set_pc(int64_t value) { ++ pc_modified_ = true; ++ registers_[pc] = value; ++} ++ ++bool Simulator::has_bad_pc() const { ++ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc)); ++} ++ ++// Raw access to the PC register without the special adjustment when reading. ++int64_t Simulator::get_pc() const { return registers_[pc]; } ++ ++// TODO(plind): refactor this messy debug code when we do unaligned access. ++void Simulator::DieOrDebug() { ++ if ((1)) { // Flag for this was removed. ++ La64Debugger dbg(this); ++ dbg.Debug(); ++ } else { ++ base::OS::Abort(); ++ } ++} ++ ++void Simulator::TraceRegWr(int64_t value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ union { ++ int64_t fmt_int64; ++ int32_t fmt_int32[2]; ++ float fmt_float[2]; ++ double fmt_double; ++ } v; ++ v.fmt_int64 = value; ++ ++ switch (t) { ++ case WORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32 ++ " uint32:%" PRIu32, ++ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64 ++ " uint64:%" PRIu64, ++ value, icount_, value, value); ++ break; ++ case FLOAT: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e", ++ v.fmt_int64, icount_, v.fmt_float[0]); ++ break; ++ case DOUBLE: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e", ++ v.fmt_int64, icount_, v.fmt_double); ++ break; ++ case FLOAT_DOUBLE: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e dbl:%e", ++ v.fmt_int64, icount_, v.fmt_float[0], v.fmt_double); ++ break; ++ case WORD_DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32 ++ " uint32:%" PRIu32 " int64:%" PRId64 " uint64:%" PRIu64, ++ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0], ++ v.fmt_int64, v.fmt_int64); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++// TODO(plind): consider making icount_ printing a flag option. ++void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ union { ++ int64_t fmt_int64; ++ int32_t fmt_int32[2]; ++ float fmt_float[2]; ++ double fmt_double; ++ } v; ++ v.fmt_int64 = value; ++ ++ switch (t) { ++ case WORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") int32:%" PRId32 " uint32:%" PRIu32, ++ v.fmt_int64, addr, icount_, v.fmt_int32[0], v.fmt_int32[0]); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") int64:%" PRId64 " uint64:%" PRIu64, ++ value, addr, icount_, value, value); ++ break; ++ case FLOAT: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") flt:%e", ++ v.fmt_int64, addr, icount_, v.fmt_float[0]); ++ break; ++ case DOUBLE: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") dbl:%e", ++ v.fmt_int64, addr, icount_, v.fmt_double); ++ break; ++ case FLOAT_DOUBLE: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") flt:%e dbl:%e", ++ v.fmt_int64, addr, icount_, v.fmt_float[0], v.fmt_double); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (t) { ++ case BYTE: ++ SNPrintF(trace_buf_, ++ " %02" PRIx8 " --> [%016" PRIx64 "] (%" PRId64 ++ ")", ++ static_cast(value), addr, icount_); ++ break; ++ case HALF: ++ SNPrintF(trace_buf_, ++ " %04" PRIx16 " --> [%016" PRIx64 "] (%" PRId64 ++ ")", ++ static_cast(value), addr, icount_); ++ break; ++ case WORD: ++ SNPrintF(trace_buf_, ++ " %08" PRIx32 " --> [%016" PRIx64 "] (%" PRId64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " --> [%016" PRIx64 "] (%" PRId64 " )", ++ value, addr, icount_); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++template ++void Simulator::TraceMemRd(int64_t addr, T value) { ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (sizeof(T)) { ++ case 1: ++ SNPrintF(trace_buf_, ++ "%08" PRIx8 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int8:%" PRId8 " uint8:%" PRIu8, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ case 2: ++ SNPrintF(trace_buf_, ++ "%08" PRIx16 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int16:%" PRId16 " uint16:%" PRIu16, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ case 4: ++ SNPrintF(trace_buf_, ++ "%08" PRIx32 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int32:%" PRId32 " uint32:%" PRIu32, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ case 8: ++ SNPrintF(trace_buf_, ++ "%08" PRIx64 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int64:%" PRId64 " uint64:%" PRIu64, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++template ++void Simulator::TraceMemWr(int64_t addr, T value) { ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (sizeof(T)) { ++ case 1: ++ SNPrintF(trace_buf_, ++ " %02" PRIx8 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case 2: ++ SNPrintF(trace_buf_, ++ " %04" PRIx16 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case 4: ++ SNPrintF(trace_buf_, ++ "%08" PRIx32 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case 8: ++ SNPrintF(trace_buf_, ++ "%16" PRIx64 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++// TODO(plind): sign-extend and zero-extend not implmented properly ++// on all the ReadXX functions, I don't think re-interpret cast does it. ++int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ /* if ((addr & 0x3) == 0)*/ { ++ local_monitor_.NotifyLoad(); ++ int32_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr), t); ++ return *ptr; ++ } ++ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ // if ((addr & 0x3) == 0) { ++ local_monitor_.NotifyLoad(); ++ uint32_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr), WORD); ++ return *ptr; ++ // } ++ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ /*if ((addr & 0x3) == 0)*/ { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, WORD); ++ int* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++void Simulator::WriteConditionalW(int64_t addr, int32_t value, ++ Instruction* instr, int32_t rk_reg) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ if ((addr & 0x3) == 0) { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) && ++ GlobalMonitor::Get()->NotifyStoreConditional_Locked( ++ addr, &global_monitor_thread_)) { ++ local_monitor_.NotifyStore(); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, WORD); ++ int* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ set_register(rk_reg, 1); ++ } else { ++ set_register(rk_reg, 0); ++ } ++ return; ++ } ++ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ reinterpret_cast(instr)); ++ DieOrDebug(); ++} ++ ++int64_t Simulator::Read2W(int64_t addr, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ /* if ((addr & kPointerAlignmentMask) == 0)*/ { ++ local_monitor_.NotifyLoad(); ++ int64_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, *ptr); ++ return *ptr; ++ } ++ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ /*if ((addr & kPointerAlignmentMask) == 0)*/ { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, DWORD); ++ int64_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++void Simulator::WriteConditional2W(int64_t addr, int64_t value, ++ Instruction* instr, int32_t rk_reg) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ if ((addr & kPointerAlignmentMask) == 0) { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ if (local_monitor_.NotifyStoreConditional(addr, ++ TransactionSize::DoubleWord) && ++ GlobalMonitor::Get()->NotifyStoreConditional_Locked( ++ addr, &global_monitor_thread_)) { ++ local_monitor_.NotifyStore(); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, DWORD); ++ int64_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ set_register(rk_reg, 1); ++ } else { ++ set_register(rk_reg, 0); ++ } ++ return; ++ } ++ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ reinterpret_cast(instr)); ++ DieOrDebug(); ++} ++ ++double Simulator::ReadD(int64_t addr, Instruction* instr) { ++ /*if ((addr & kDoubleAlignmentMask) == 0)*/ { ++ local_monitor_.NotifyLoad(); ++ double* ptr = reinterpret_cast(addr); ++ return *ptr; ++ } ++ // PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR ++ // "\n", ++ // addr, reinterpret_cast(instr)); ++ // base::OS::Abort(); ++ // return 0; ++} ++ ++void Simulator::WriteD(int64_t addr, double value, Instruction* instr) { ++ /*if ((addr & kDoubleAlignmentMask) == 0)*/ { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ double* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ // PrintF("Unaligned (double) write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR ++ // "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) { ++ // if ((addr & 1) == 0) { ++ local_monitor_.NotifyLoad(); ++ uint16_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr; ++ // } ++ // PrintF("Unaligned unsigned halfword read at 0x%08" PRIx64 ++ // " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++int16_t Simulator::ReadH(int64_t addr, Instruction* instr) { ++ // if ((addr & 1) == 0) { ++ local_monitor_.NotifyLoad(); ++ int16_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr; ++ // } ++ // PrintF("Unaligned signed halfword read at 0x%08" PRIx64 ++ // " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) { ++ // if ((addr & 1) == 0) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, HALF); ++ uint16_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ // } ++ // PrintF("Unaligned unsigned halfword write at 0x%08" PRIx64 ++ // " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) { ++ // if ((addr & 1) == 0) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, HALF); ++ int16_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ // } ++ // PrintF("Unaligned halfword write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR ++ // "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++uint32_t Simulator::ReadBU(int64_t addr) { ++ local_monitor_.NotifyLoad(); ++ uint8_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr & 0xFF; ++} ++ ++int32_t Simulator::ReadB(int64_t addr) { ++ local_monitor_.NotifyLoad(); ++ int8_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr; ++} ++ ++void Simulator::WriteB(int64_t addr, uint8_t value) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, BYTE); ++ uint8_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++} ++ ++void Simulator::WriteB(int64_t addr, int8_t value) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, BYTE); ++ int8_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++} ++ ++template ++T Simulator::ReadMem(int64_t addr, Instruction* instr) { ++ int alignment_mask = (1 << sizeof(T)) - 1; ++ if ((addr & alignment_mask) == 0) { ++ local_monitor_.NotifyLoad(); ++ T* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, *ptr); ++ return *ptr; ++ } ++ PrintF("Unaligned read of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR ++ "\n", ++ sizeof(T), addr, reinterpret_cast(instr)); ++ base::OS::Abort(); ++ return 0; ++} ++ ++template ++void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) { ++ int alignment_mask = (1 << sizeof(T)) - 1; ++ if ((addr & alignment_mask) == 0) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ T* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ TraceMemWr(addr, value); ++ return; ++ } ++ PrintF("Unaligned write of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR ++ "\n", ++ sizeof(T), addr, reinterpret_cast(instr)); ++ base::OS::Abort(); ++} ++ ++// Returns the limit of the stack area to enable checking for stack overflows. ++uintptr_t Simulator::StackLimit(uintptr_t c_limit) const { ++ // The simulator uses a separate JS stack. If we have exhausted the C stack, ++ // we also drop down the JS limit to reflect the exhaustion on the JS stack. ++ if (GetCurrentStackPosition() < c_limit) { ++ return reinterpret_cast(get_sp()); ++ } ++ ++ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes ++ // to prevent overrunning the stack when pushing values. ++ return reinterpret_cast(stack_) + 1024; ++} ++ ++// Unsupported instructions use Format to print an error and stop execution. ++void Simulator::Format(Instruction* instr, const char* format) { ++ PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n", ++ reinterpret_cast(instr), format); ++ UNIMPLEMENTED(); ++} ++ ++// Calls into the V8 runtime are based on this very simple interface. ++// Note: To be able to return two values from some calls the code in runtime.cc ++// uses the ObjectPair which is essentially two 32-bit values stuffed into a ++// 64-bit value. With the code below we assume that all runtime calls return ++// 64 bits of result. If they don't, the v1 result register contains a bogus ++// value, which is fine because it is caller-saved. ++ ++using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1, ++ int64_t arg2, int64_t arg3, ++ int64_t arg4, int64_t arg5, ++ int64_t arg6, int64_t arg7, ++ int64_t arg8, int64_t arg9); ++ ++// These prototypes handle the four types of FP calls. ++using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1); ++using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1); ++using SimulatorRuntimeFPCall = double (*)(double darg0); ++using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0); ++ ++// This signature supports direct call in to API function native callback ++// (refer to InvocationCallback in v8.h). ++using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0); ++using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1); ++ ++// This signature supports direct call to accessor getter callback. ++using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1); ++using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1, ++ void* arg2); ++ ++// Software interrupt instructions are used by the simulator to call into the ++// C-based V8 runtime. They are also used for debugging with simulator. ++void Simulator::SoftwareInterrupt() { ++ // There are several instructions that could get us here, ++ // the break_, dbgcall_, syscall_ and hypcall instructions. ++ int32_t opcode_hi15 = instr_.Bits(31, 17); ++ CHECK_EQ(opcode_hi15, 0x15); ++ uint32_t code = instr_.Bits(14, 0); ++ // We first check if we met a call_rt_redirected. ++ if (instr_.InstructionBits() == rtCallRedirInstr) { ++ Redirection* redirection = Redirection::FromInstruction(instr_.instr()); ++ ++ int64_t* stack_pointer = reinterpret_cast(get_register(sp)); ++ ++ int64_t arg0 = get_register(a0); ++ int64_t arg1 = get_register(a1); ++ int64_t arg2 = get_register(a2); ++ int64_t arg3 = get_register(a3); ++ int64_t arg4 = get_register(a4); ++ int64_t arg5 = get_register(a5); ++ int64_t arg6 = get_register(a6); ++ int64_t arg7 = get_register(a7); ++ int64_t arg8 = stack_pointer[0]; ++ int64_t arg9 = stack_pointer[1]; ++ STATIC_ASSERT(kMaxCParameters == 10); ++ ++ bool fp_call = ++ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) || ++ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) || ++ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) || ++ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL); ++ ++ { ++ // With the hard floating point calling convention, double ++ // arguments are passed in FPU registers. Fetch the arguments ++ // from there and call the builtin using soft floating point ++ // convention. ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_FP_FP_CALL: ++ case ExternalReference::BUILTIN_COMPARE_CALL: ++ arg0 = get_fpu_register(f0); ++ arg1 = get_fpu_register(f1); ++ arg2 = get_fpu_register(f2); ++ arg3 = get_fpu_register(f3); ++ break; ++ case ExternalReference::BUILTIN_FP_CALL: ++ arg0 = get_fpu_register(f0); ++ arg1 = get_fpu_register(f1); ++ break; ++ case ExternalReference::BUILTIN_FP_INT_CALL: ++ arg0 = get_fpu_register(f0); ++ arg1 = get_fpu_register(f1); ++ arg2 = get_register(a2); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ // This is dodgy but it works because the C entry stubs are never moved. ++ // See comment in codegen-arm.cc and bug 1242173. ++ int64_t saved_ra = get_register(ra); ++ ++ intptr_t external = ++ reinterpret_cast(redirection->external_function()); ++ ++ // Based on CpuFeatures::IsSupported(FPU), La64 will use either hardware ++ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this ++ // simulator. Soft-float has additional abstraction of ExternalReference, ++ // to support serialization. ++ if (fp_call) { ++ double dval0, dval1; // one or two double parameters ++ int32_t ival; // zero or one integer parameters ++ int64_t iresult = 0; // integer return value ++ double dresult = 0; // double return value ++ GetFpArgs(&dval0, &dval1, &ival); ++ SimulatorRuntimeCall generic_target = ++ reinterpret_cast(external); ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_FP_FP_CALL: ++ case ExternalReference::BUILTIN_COMPARE_CALL: ++ PrintF("Call to host function at %p with args %f, %f", ++ reinterpret_cast(FUNCTION_ADDR(generic_target)), ++ dval0, dval1); ++ break; ++ case ExternalReference::BUILTIN_FP_CALL: ++ PrintF("Call to host function at %p with arg %f", ++ reinterpret_cast(FUNCTION_ADDR(generic_target)), ++ dval0); ++ break; ++ case ExternalReference::BUILTIN_FP_INT_CALL: ++ PrintF("Call to host function at %p with args %f, %d", ++ reinterpret_cast(FUNCTION_ADDR(generic_target)), ++ dval0, ival); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_COMPARE_CALL: { ++ SimulatorRuntimeCompareCall target = ++ reinterpret_cast(external); ++ iresult = target(dval0, dval1); ++ set_register(v0, static_cast(iresult)); ++ // set_register(v1, static_cast(iresult >> 32)); ++ break; ++ } ++ case ExternalReference::BUILTIN_FP_FP_CALL: { ++ SimulatorRuntimeFPFPCall target = ++ reinterpret_cast(external); ++ dresult = target(dval0, dval1); ++ SetFpResult(dresult); ++ break; ++ } ++ case ExternalReference::BUILTIN_FP_CALL: { ++ SimulatorRuntimeFPCall target = ++ reinterpret_cast(external); ++ dresult = target(dval0); ++ SetFpResult(dresult); ++ break; ++ } ++ case ExternalReference::BUILTIN_FP_INT_CALL: { ++ SimulatorRuntimeFPIntCall target = ++ reinterpret_cast(external); ++ dresult = target(dval0, ival); ++ SetFpResult(dresult); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_COMPARE_CALL: ++ PrintF("Returned %08x\n", static_cast(iresult)); ++ break; ++ case ExternalReference::BUILTIN_FP_FP_CALL: ++ case ExternalReference::BUILTIN_FP_CALL: ++ case ExternalReference::BUILTIN_FP_INT_CALL: ++ PrintF("Returned %f\n", dresult); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } ++ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " \n", ++ reinterpret_cast(external), arg0); ++ } ++ SimulatorRuntimeDirectApiCall target = ++ reinterpret_cast(external); ++ target(arg0); ++ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 ++ " \n", ++ reinterpret_cast(external), arg0, arg1); ++ } ++ SimulatorRuntimeProfilingApiCall target = ++ reinterpret_cast(external); ++ target(arg0, Redirection::ReverseRedirection(arg1)); ++ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 ++ " \n", ++ reinterpret_cast(external), arg0, arg1); ++ } ++ SimulatorRuntimeDirectGetterCall target = ++ reinterpret_cast(external); ++ target(arg0, arg1); ++ } else if (redirection->type() == ++ ExternalReference::PROFILING_GETTER_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 ++ " %08" PRIx64 " \n", ++ reinterpret_cast(external), arg0, arg1, arg2); ++ } ++ SimulatorRuntimeProfilingGetterCall target = ++ reinterpret_cast(external); ++ target(arg0, arg1, Redirection::ReverseRedirection(arg2)); ++ } else { ++ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL || ++ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR); ++ SimulatorRuntimeCall target = ++ reinterpret_cast(external); ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF( ++ "Call to host function at %p " ++ "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 ++ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 ++ " , %08" PRIx64 " , %08" PRIx64 " \n", ++ reinterpret_cast(FUNCTION_ADDR(target)), arg0, arg1, arg2, ++ arg3, arg4, arg5, arg6, arg7, arg8, arg9); ++ } ++ ObjectPair result = ++ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); ++ set_register(v0, (int64_t)(result.x)); ++ set_register(v1, (int64_t)(result.y)); ++ } ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(v1), ++ get_register(v0)); ++ } ++ set_register(ra, saved_ra); ++ set_pc(get_register(ra)); ++ ++ } else if (code <= kMaxStopCode) { ++ if (IsWatchpoint(code)) { ++ PrintWatchpoint(code); ++ } else { ++ IncreaseStopCounter(code); ++ HandleStop(code, instr_.instr()); ++ } ++ } else { ++ // All remaining break_ codes, and all traps are handled here. ++ La64Debugger dbg(this); ++ dbg.Debug(); ++ } ++} ++ ++// Stop helper functions. ++bool Simulator::IsWatchpoint(uint64_t code) { ++ return (code <= kMaxWatchpointCode); ++} ++ ++void Simulator::PrintWatchpoint(uint64_t code) { ++ La64Debugger dbg(this); ++ ++break_count_; ++ PrintF("\n---- break %" PRId64 " marker: %3d (instr count: %8" PRId64 ++ " ) ----------" ++ "----------------------------------", ++ code, break_count_, icount_); ++ dbg.PrintAllRegs(); // Print registers and continue running. ++} ++ ++void Simulator::HandleStop(uint64_t code, Instruction* instr) { ++ // Stop if it is enabled, otherwise go on jumping over the stop ++ // and the message address. ++ if (IsEnabledStop(code)) { ++ La64Debugger dbg(this); ++ dbg.Stop(instr); ++ } ++} ++ ++bool Simulator::IsStopInstruction(Instruction* instr) { ++ int32_t opcode_hi15 = instr->Bits(31, 17); ++ uint32_t code = static_cast(instr->Bits(14, 0)); ++ return (opcode_hi15 == 0x15) && code > kMaxWatchpointCode && ++ code <= kMaxStopCode; ++} ++ ++bool Simulator::IsEnabledStop(uint64_t code) { ++ DCHECK_LE(code, kMaxStopCode); ++ DCHECK_GT(code, kMaxWatchpointCode); ++ return !(watched_stops_[code].count & kStopDisabledBit); ++} ++ ++void Simulator::EnableStop(uint64_t code) { ++ if (!IsEnabledStop(code)) { ++ watched_stops_[code].count &= ~kStopDisabledBit; ++ } ++} ++ ++void Simulator::DisableStop(uint64_t code) { ++ if (IsEnabledStop(code)) { ++ watched_stops_[code].count |= kStopDisabledBit; ++ } ++} ++ ++void Simulator::IncreaseStopCounter(uint64_t code) { ++ DCHECK_LE(code, kMaxStopCode); ++ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) { ++ PrintF("Stop counter for code %" PRId64 ++ " has overflowed.\n" ++ "Enabling this code and reseting the counter to 0.\n", ++ code); ++ watched_stops_[code].count = 0; ++ EnableStop(code); ++ } else { ++ watched_stops_[code].count++; ++ } ++} ++ ++// Print a stop status. ++void Simulator::PrintStopInfo(uint64_t code) { ++ if (code <= kMaxWatchpointCode) { ++ PrintF("That is a watchpoint, not a stop.\n"); ++ return; ++ } else if (code > kMaxStopCode) { ++ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1); ++ return; ++ } ++ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled"; ++ int32_t count = watched_stops_[code].count & ~kStopDisabledBit; ++ // Don't print the state of unused breakpoints. ++ if (count != 0) { ++ if (watched_stops_[code].desc) { ++ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n", ++ code, code, state, count, watched_stops_[code].desc); ++ } else { ++ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code, ++ code, state, count); ++ } ++ } ++} ++ ++void Simulator::SignalException(Exception e) { ++ FATAL("Error: Exception %i raised.", static_cast(e)); ++} ++ ++template ++static T FPAbs(T a); ++ ++template <> ++double FPAbs(double a) { ++ return fabs(a); ++} ++ ++template <> ++float FPAbs(float a) { ++ return fabsf(a); ++} ++ ++template ++static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) { ++ if (std::isnan(a) && std::isnan(b)) { ++ *result = a; ++ } else if (std::isnan(a)) { ++ *result = b; ++ } else if (std::isnan(b)) { ++ *result = a; ++ } else if (b == a) { ++ // Handle -0.0 == 0.0 case. ++ // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax ++ // negates the result. ++ *result = std::signbit(b) - static_cast(kind) ? b : a; ++ } else { ++ return false; ++ } ++ return true; ++} ++ ++template ++static T FPUMin(T a, T b) { ++ T result; ++ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { ++ return result; ++ } else { ++ return b < a ? b : a; ++ } ++} ++ ++template ++static T FPUMax(T a, T b) { ++ T result; ++ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) { ++ return result; ++ } else { ++ return b > a ? b : a; ++ } ++} ++ ++template ++static T FPUMinA(T a, T b) { ++ T result; ++ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { ++ if (FPAbs(a) < FPAbs(b)) { ++ result = a; ++ } else if (FPAbs(b) < FPAbs(a)) { ++ result = b; ++ } else { ++ result = a < b ? a : b; ++ } ++ } ++ return result; ++} ++ ++template ++static T FPUMaxA(T a, T b) { ++ T result; ++ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { ++ if (FPAbs(a) > FPAbs(b)) { ++ result = a; ++ } else if (FPAbs(b) > FPAbs(a)) { ++ result = b; ++ } else { ++ result = a > b ? a : b; ++ } ++ } ++ return result; ++} ++ ++enum class KeepSign : bool { no = false, yes }; ++ ++template ::value, ++ int>::type = 0> ++T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) { ++ DCHECK(std::isnan(arg)); ++ T qNaN = std::numeric_limits::quiet_NaN(); ++ if (keepSign == KeepSign::yes) { ++ return std::copysign(qNaN, result); ++ } ++ return qNaN; ++} ++ ++template ++T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) { ++ if (std::isnan(first)) { ++ return FPUCanonalizeNaNArg(result, first, keepSign); ++ } ++ return result; ++} ++ ++template ++T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) { ++ if (std::isnan(first)) { ++ return FPUCanonalizeNaNArg(result, first, keepSign); ++ } ++ return FPUCanonalizeNaNArgs(result, keepSign, args...); ++} ++ ++template ++T FPUCanonalizeOperation(Func f, T first, Args... args) { ++ return FPUCanonalizeOperation(f, KeepSign::no, first, args...); ++} ++ ++template ++T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) { ++ T result = f(first, args...); ++ if (std::isnan(result)) { ++ result = FPUCanonalizeNaNArgs(result, keepSign, first, args...); ++ } ++ return result; ++} ++ ++// Handle execution based on instruction types. ++void Simulator::DecodeTypeOp6() { ++ int64_t alu_out; ++ // Next pc. ++ int64_t next_pc = bad_ra; ++ ++ // Branch instructions common part. ++ auto BranchAndLinkHelper = [this, &next_pc]() { ++ int64_t current_pc = get_pc(); ++ set_register(ra, current_pc + kInstrSize); ++ int32_t offs26_low16 = ++ static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ int32_t offs26_high10 = static_cast(instr_.Bits(9, 0) << 22) >> 6; ++ int32_t offs26 = offs26_low16 | offs26_high10; ++ next_pc = current_pc + (offs26 << 2); ++ printf_instr("Offs26: %08x\n", offs26); ++ set_pc(next_pc); ++ }; ++ ++ auto BranchOff16Helper = [this, &next_pc](bool do_branch) { ++ int64_t current_pc = get_pc(); ++ int32_t offs16 = static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ printf_instr("Offs16: %08x\n", offs16); ++ int32_t offs = do_branch ? (offs16 << 2) : kInstrSize; ++ next_pc = current_pc + offs; ++ set_pc(next_pc); ++ }; ++ ++ auto BranchOff21Helper = [this, &next_pc](bool do_branch) { ++ int64_t current_pc = get_pc(); ++ int32_t offs21_low16 = ++ static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ int32_t offs21_high5 = static_cast(instr_.Bits(4, 0) << 27) >> 11; ++ int32_t offs = offs21_low16 | offs21_high5; ++ printf_instr("Offs21: %08x\n", offs); ++ offs = do_branch ? (offs << 2) : kInstrSize; ++ next_pc = current_pc + offs; ++ set_pc(next_pc); ++ }; ++ ++ auto BranchOff26Helper = [this, &next_pc]() { ++ int64_t current_pc = get_pc(); ++ int32_t offs26_low16 = ++ static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ int32_t offs26_high10 = static_cast(instr_.Bits(9, 0) << 22) >> 6; ++ int32_t offs26 = offs26_low16 | offs26_high10; ++ next_pc = current_pc + (offs26 << 2); ++ printf_instr("Offs26: %08x\n", offs26); ++ set_pc(next_pc); ++ }; ++ ++ auto JumpOff16Helper = [this, &next_pc]() { ++ int32_t offs16 = static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ printf_instr("JIRL\t %s: %016lx, %s: %016lx, offs16: %x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), offs16); ++ set_register(rd_reg(), get_pc() + kInstrSize); ++ next_pc = rj() + (offs16 << 2); ++ set_pc(next_pc); ++ }; ++ ++ switch (instr_.Bits(31, 26) << 26) { ++ case ADDU16I_D: { ++ printf_instr("ADDU16I_D\t %s: %016lx, %s: %016lx, si16: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si16()); ++ int32_t si16_upper = static_cast(si16()) << 16; ++ alu_out = static_cast(si16_upper) + rj(); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BEQZ: ++ printf_instr("BEQZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj()); ++ BranchOff21Helper(rj() == 0); ++ break; ++ case BNEZ: ++ printf_instr("BNEZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj()); ++ BranchOff21Helper(rj() != 0); ++ break; ++ case BCZ: { ++ if (instr_.Bits(9, 8) == 0b00) { ++ // BCEQZ ++ printf_instr("BCEQZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False"); ++ BranchOff21Helper(cj() == false); ++ } else if (instr_.Bits(9, 8) == 0b01) { ++ // BCNEZ ++ printf_instr("BCNEZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False"); ++ BranchOff21Helper(cj() == true); ++ } else { ++ UNREACHABLE(); ++ } ++ break; ++ } ++ case JIRL: ++ JumpOff16Helper(); ++ break; ++ case B: ++ printf_instr("B\t "); ++ BranchOff26Helper(); ++ break; ++ case BL: ++ printf_instr("BL\t "); ++ BranchAndLinkHelper(); ++ break; ++ case BEQ: ++ printf_instr("BEQ\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj() == rd()); ++ break; ++ case BNE: ++ printf_instr("BNE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj() != rd()); ++ break; ++ case BLT: ++ printf_instr("BLT\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj() < rd()); ++ break; ++ case BGE: ++ printf_instr("BGE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj() >= rd()); ++ break; ++ case BLTU: ++ printf_instr("BLTU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj_u() < rd_u()); ++ break; ++ case BGEU: ++ printf_instr("BGEU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj_u() >= rd_u()); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp7() { ++ int64_t alu_out; ++ ++ switch (instr_.Bits(31, 25) << 25) { ++ case LU12I_W: { ++ printf_instr("LU12I_W\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int32_t si20_upper = static_cast(si20() << 12); ++ SetResult(rd_reg(), static_cast(si20_upper)); ++ break; ++ } ++ case LU32I_D: { ++ printf_instr("LU32I_D\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int32_t si20_signExtend = static_cast(si20() << 12) >> 12; ++ int64_t lower_32bit_mask = 0xFFFFFFFF; ++ alu_out = (static_cast(si20_signExtend) << 32) | ++ (rd() & lower_32bit_mask); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case PCADDI: { ++ printf_instr("PCADDI\t %s: %016lx, si20: %d\n", Registers::Name(rd_reg()), ++ rd(), si20()); ++ int32_t si20_signExtend = static_cast(si20() << 12) >> 10; ++ int64_t current_pc = get_pc(); ++ alu_out = static_cast(si20_signExtend) + current_pc; ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case PCALAU12I: { ++ printf_instr("PCALAU12I\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int32_t si20_signExtend = static_cast(si20() << 12); ++ int64_t current_pc = get_pc(); ++ int64_t clear_lower12bit_mask = 0xFFFFFFFFFFFFF000; ++ alu_out = static_cast(si20_signExtend) + current_pc; ++ SetResult(rd_reg(), alu_out & clear_lower12bit_mask); ++ break; ++ } ++ case PCADDU12I: { ++ printf_instr("PCADDU12I\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int32_t si20_signExtend = static_cast(si20() << 12); ++ int64_t current_pc = get_pc(); ++ alu_out = static_cast(si20_signExtend) + current_pc; ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case PCADDU18I: { ++ printf_instr("PCADDU18I\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int64_t si20_signExtend = (static_cast(si20()) << 44) >> 26; ++ int64_t current_pc = get_pc(); ++ alu_out = si20_signExtend + current_pc; ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp8() { ++ int64_t addr = 0x0; ++ int64_t si14_se = (static_cast(si14()) << 50) >> 48; ++ ++ switch (instr_.Bits(31, 24) << 24) { ++ case LDPTR_W: ++ printf_instr("LDPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ set_register(rd_reg(), ReadW(rj() + si14_se, instr_.instr())); ++ break; ++ case STPTR_W: ++ printf_instr("STPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ WriteW(rj() + si14_se, static_cast(rd()), instr_.instr()); ++ break; ++ case LDPTR_D: ++ printf_instr("LDPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ set_register(rd_reg(), Read2W(rj() + si14_se, instr_.instr())); ++ break; ++ case STPTR_D: ++ printf_instr("STPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ Write2W(rj() + si14_se, rd(), instr_.instr()); ++ break; ++ case LL_W: { ++ printf_instr("LL_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ addr = si14_se + rj(); ++ set_register(rd_reg(), ReadW(addr, instr_.instr())); ++ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr, ++ &global_monitor_thread_); ++ break; ++ } ++ case SC_W: { ++ printf_instr("SC_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ addr = si14_se + rj(); ++ WriteConditionalW(addr, static_cast(rd()), instr_.instr(), ++ rd_reg()); ++ break; ++ } ++ case LL_D: { ++ printf_instr("LL_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ addr = si14_se + rj(); ++ set_register(rd_reg(), Read2W(addr, instr_.instr())); ++ local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr, ++ &global_monitor_thread_); ++ break; ++ } ++ case SC_D: { ++ printf_instr("SC_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ addr = si14_se + rj(); ++ WriteConditional2W(addr, rd(), instr_.instr(), rd_reg()); ++ break; ++ } ++ case CSR: ++ UNIMPLEMENTED(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp10() { ++ int64_t alu_out = 0x0; ++ int64_t si12_se = (static_cast(si12()) << 52) >> 52; ++ uint64_t si12_ze = (static_cast(ui12()) << 52) >> 52; ++ ++ switch (instr_.Bits(31, 22) << 22) { ++ case BSTR_W: { ++ CHECK_EQ(instr_.Bit(21), 1); ++ uint8_t lsbw_ = lsbw(); ++ uint8_t msbw_ = msbw(); ++ CHECK_LE(lsbw_, msbw_); ++ uint8_t size = msbw_ - lsbw_ + 1; ++ uint64_t mask = (1ULL << size) - 1; ++ if (instr_.Bit(15) == 0) { ++ // BSTRINS_W ++ printf_instr( ++ "BSTRINS_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), ++ msbw_, lsbw_); ++ alu_out = static_cast((rd_u() & ~(mask << lsbw_)) | ++ ((rj_u() & mask) << lsbw_)); ++ } else { ++ // BSTRPICK_W ++ printf_instr( ++ "BSTRPICK_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), ++ msbw_, lsbw_); ++ alu_out = static_cast((rj_u() & (mask << lsbw_)) >> lsbw_); ++ } ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BSTRINS_D: { ++ uint8_t lsbd_ = lsbd(); ++ uint8_t msbd_ = msbd(); ++ CHECK_LE(lsbd_, msbd_); ++ printf_instr( ++ "BSTRINS_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), ++ msbd_, lsbd_); ++ uint8_t size = msbd_ - lsbd_ + 1; ++ if (size < 64) { ++ uint64_t mask = (1ULL << size) - 1; ++ alu_out = (rd_u() & ~(mask << lsbd_)) | ((rj_u() & mask) << lsbd_); ++ SetResult(rd_reg(), alu_out); ++ } else if (size == 64) { ++ SetResult(rd_reg(), rj()); ++ } ++ break; ++ } ++ case BSTRPICK_D: { ++ uint8_t lsbd_ = lsbd(); ++ uint8_t msbd_ = msbd(); ++ CHECK_LE(lsbd_, msbd_); ++ printf_instr( ++ "BSTRPICK_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), ++ msbd_, lsbd_); ++ uint8_t size = msbd_ - lsbd_ + 1; ++ if (size < 64) { ++ uint64_t mask = (1ULL << size) - 1; ++ alu_out = (rj_u() & (mask << lsbd_)) >> lsbd_; ++ SetResult(rd_reg(), alu_out); ++ } else if (size == 64) { ++ SetResult(rd_reg(), rj()); ++ } ++ break; ++ } ++ case SLTI: ++ printf_instr("SLTI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ SetResult(rd_reg(), rj() < si12_se ? 1 : 0); ++ break; ++ case SLTUI: ++ printf_instr("SLTUI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ SetResult(rd_reg(), rj_u() < static_cast(si12_se) ? 1 : 0); ++ break; ++ case ADDI_W: { ++ printf_instr("ADDI_W\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ int32_t alu32_out = ++ static_cast(rj()) + static_cast(si12_se); ++ SetResult(rd_reg(), alu32_out); ++ break; ++ } ++ case ADDI_D: ++ printf_instr("ADDI_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ SetResult(rd_reg(), rj() + si12_se); ++ break; ++ case LU52I_D: { ++ printf_instr("LU52I_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ int64_t si12_se = static_cast(si12()) << 52; ++ uint64_t mask = (1ULL << 52) - 1; ++ alu_out = si12_se + (rj() & mask); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case ANDI: ++ printf_instr("ANDI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ SetResult(rd_reg(), rj() & si12_ze); ++ break; ++ case ORI: ++ printf_instr("ORI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ SetResult(rd_reg(), rj_u() | si12_ze); ++ break; ++ case XORI: ++ printf_instr("XORI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ SetResult(rd_reg(), rj_u() ^ si12_ze); ++ break; ++ case LD_B: ++ printf_instr("LD_B\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadB(rj() + si12_se)); ++ break; ++ case LD_H: ++ printf_instr("LD_H\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadH(rj() + si12_se, instr_.instr())); ++ break; ++ case LD_W: ++ printf_instr("LD_W\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadW(rj() + si12_se, instr_.instr())); ++ break; ++ case LD_D: ++ printf_instr("LD_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), Read2W(rj() + si12_se, instr_.instr())); ++ break; ++ case ST_B: ++ printf_instr("ST_B\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ WriteB(rj() + si12_se, static_cast(rd())); ++ break; ++ case ST_H: ++ printf_instr("ST_H\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ WriteH(rj() + si12_se, static_cast(rd()), instr_.instr()); ++ break; ++ case ST_W: ++ printf_instr("ST_W\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ WriteW(rj() + si12_se, static_cast(rd()), instr_.instr()); ++ break; ++ case ST_D: ++ printf_instr("ST_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ Write2W(rj() + si12_se, rd(), instr_.instr()); ++ break; ++ case LD_BU: ++ printf_instr("LD_BU\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadBU(rj() + si12_se)); ++ break; ++ case LD_HU: ++ printf_instr("LD_HU\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadHU(rj() + si12_se, instr_.instr())); ++ break; ++ case LD_WU: ++ printf_instr("LD_WU\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadWU(rj() + si12_se, instr_.instr())); ++ break; ++ case FLD_S: { ++ printf_instr("FLD_S\t %s: %016f, %s: %016lx, si12: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ Registers::Name(rj_reg()), rj(), si12_ze); ++ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits. ++ set_fpu_register_word( ++ fd_reg(), ReadW(rj() + si12_se, instr_.instr(), FLOAT_DOUBLE)); ++ break; ++ } ++ case FST_S: { ++ printf_instr("FST_S\t %s: %016f, %s: %016lx, si12: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ Registers::Name(rj_reg()), rj(), si12_ze); ++ int32_t alu_out_32 = static_cast(get_fpu_register(fd_reg())); ++ WriteW(rj() + si12_se, alu_out_32, instr_.instr()); ++ break; ++ } ++ case FLD_D: { ++ printf_instr("FLD_D\t %s: %016f, %s: %016lx, si12: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj(), si12_ze); ++ set_fpu_register_double(fd_reg(), ReadD(rj() + si12_se, instr_.instr())); ++ TraceMemRd(rj() + si12_se, get_fpu_register(fd_reg()), DOUBLE); ++ break; ++ } ++ case FST_D: { ++ printf_instr("FST_D\t %s: %016f, %s: %016lx, si12: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj(), si12_ze); ++ WriteD(rj() + si12_se, get_fpu_register_double(fd_reg()), instr_.instr()); ++ TraceMemWr(rj() + si12_se, get_fpu_register(fd_reg()), DWORD); ++ break; ++ } ++ case PRELD: ++ case CACHE: ++ UNIMPLEMENTED(); ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp12() { ++ switch (instr_.Bits(31, 20) << 20) { ++ case FMADD_S: ++ printf_instr("FMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fk_reg()), fk_float(), ++ FPURegisters::Name(fa_reg()), fa_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), std::fma(fj_float(), fk_float(), fa_float())); ++ break; ++ case FMADD_D: ++ printf_instr("FMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fk_reg()), fk_double(), ++ FPURegisters::Name(fa_reg()), fa_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), ++ std::fma(fj_double(), fk_double(), fa_double())); ++ break; ++ case FMSUB_S: ++ printf_instr("FMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fk_reg()), fk_float(), ++ FPURegisters::Name(fa_reg()), fa_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), ++ std::fma(fj_float(), fk_float(), -fa_float())); ++ break; ++ case FMSUB_D: ++ printf_instr("FMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fk_reg()), fk_double(), ++ FPURegisters::Name(fa_reg()), fa_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), ++ std::fma(fj_double(), fk_double(), -fa_double())); ++ break; ++ case FNMADD_S: ++ printf_instr("FNMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fk_reg()), fk_float(), ++ FPURegisters::Name(fa_reg()), fa_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), ++ std::fma(-fj_float(), fk_float(), -fa_float())); ++ break; ++ case FNMADD_D: ++ printf_instr("FNMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fk_reg()), fk_double(), ++ FPURegisters::Name(fa_reg()), fa_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), ++ std::fma(-fj_double(), fk_double(), -fa_double())); ++ break; ++ case FNMSUB_S: ++ printf_instr("FNMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fk_reg()), fk_float(), ++ FPURegisters::Name(fa_reg()), fa_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), ++ std::fma(-fj_float(), fk_float(), fa_float())); ++ break; ++ case FNMSUB_D: ++ printf_instr("FNMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fk_reg()), fk_double(), ++ FPURegisters::Name(fa_reg()), fa_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), ++ std::fma(-fj_double(), fk_double(), fa_double())); ++ break; ++ case FCMP_COND_S: { ++ CHECK_EQ(instr_.Bits(4, 3), 0); ++ float fj = fj_float(); ++ float fk = fk_float(); ++ switch (cond()) { ++ case CAF: { ++ printf_instr("FCMP_CAF_S fcc%d\n", cd_reg()); ++ set_cf_register(cd_reg(), false); ++ break; ++ } ++ case CUN: { ++ printf_instr("FCMP_CUN_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CEQ: { ++ printf_instr("FCMP_CEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj == fk); ++ break; ++ } ++ case CUEQ: { ++ printf_instr("FCMP_CUEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj == fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CLT: { ++ printf_instr("FCMP_CLT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj < fk); ++ break; ++ } ++ case CULT: { ++ printf_instr("FCMP_CULT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj < fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CLE: { ++ printf_instr("FCMP_CLE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj <= fk); ++ break; ++ } ++ case CULE: { ++ printf_instr("FCMP_CULE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj <= fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CNE: { ++ printf_instr("FCMP_CNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), (fj < fk) || (fj > fk)); ++ break; ++ } ++ case COR: { ++ printf_instr("FCMP_COR_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk)); ++ break; ++ } ++ case CUNE: { ++ printf_instr("FCMP_CUNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj != fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case SAF: ++ case SUN: ++ case SEQ: ++ case SUEQ: ++ case SLT: ++ case SULT: ++ case SLE: ++ case SULE: ++ case SNE: ++ case SOR: ++ case SUNE: ++ UNIMPLEMENTED(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ } ++ case FCMP_COND_D: { ++ CHECK_EQ(instr_.Bits(4, 3), 0); ++ double fj = fj_double(); ++ double fk = fk_double(); ++ switch (cond()) { ++ case CAF: { ++ printf_instr("FCMP_CAF_D fcc%d\n", cd_reg()); ++ set_cf_register(cd_reg(), false); ++ break; ++ } ++ case CUN: { ++ printf_instr("FCMP_CUN_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CEQ: { ++ printf_instr("FCMP_CEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj == fk); ++ break; ++ } ++ case CUEQ: { ++ printf_instr("FCMP_CUEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj == fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CLT: { ++ printf_instr("FCMP_CLT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj < fk); ++ break; ++ } ++ case CULT: { ++ printf_instr("FCMP_CULT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj < fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CLE: { ++ printf_instr("FCMP_CLE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj <= fk); ++ break; ++ } ++ case CULE: { ++ printf_instr("FCMP_CULE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj <= fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CNE: { ++ printf_instr("FCMP_CNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), (fj < fk) || (fj > fk)); ++ break; ++ } ++ case COR: { ++ printf_instr("FCMP_COR_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk)); ++ break; ++ } ++ case CUNE: { ++ printf_instr("FCMP_CUNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj != fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case SAF: ++ case SUN: ++ case SEQ: ++ case SUEQ: ++ case SLT: ++ case SULT: ++ case SLE: ++ case SULE: ++ case SNE: ++ case SOR: ++ case SUNE: ++ UNIMPLEMENTED(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ } ++ case FSEL: { ++ CHECK_EQ(instr_.Bits(19, 18), 0); ++ printf_instr("FSEL fcc%d, %s: %016f, %s: %016f, %s: %016f\n", ca_reg(), ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ if (ca() == 0) { ++ SetFPUDoubleResult(fd_reg(), fj_double()); ++ } else { ++ SetFPUDoubleResult(fd_reg(), fk_double()); ++ } ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp14() { ++ int64_t alu_out = 0x0; ++ int32_t alu32_out = 0x0; ++ ++ switch (instr_.Bits(31, 18) << 18) { ++ case ALSL: { ++ uint8_t sa = sa2() + 1; ++ alu32_out = ++ (static_cast(rj()) << sa) + static_cast(rk()); ++ if (instr_.Bit(17) == 0) { ++ // ALSL_W ++ printf_instr("ALSL_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa2()); ++ SetResult(rd_reg(), alu32_out); ++ } else { ++ // ALSL_WU ++ printf_instr("ALSL_WU\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa2()); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } ++ break; ++ } ++ case BYTEPICK_W: { ++ CHECK_EQ(instr_.Bit(17), 0); ++ printf_instr("BYTEPICK_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa2()); ++ uint8_t sa = sa2() * 8; ++ if (sa == 0) { ++ alu32_out = static_cast(rk()); ++ } else { ++ int32_t mask = (1 << 31) >> (sa - 1); ++ int32_t rk_hi = (static_cast(rk()) & (~mask)) << sa; ++ int32_t rj_lo = (static_cast(rj()) & mask) >> (32 - sa); ++ alu32_out = rk_hi | rj_lo; ++ } ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ break; ++ } ++ case BYTEPICK_D: { ++ printf_instr("BYTEPICK_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa3: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa3()); ++ uint8_t sa = sa3() * 8; ++ if (sa == 0) { ++ alu_out = rk(); ++ } else { ++ int64_t mask = (1ULL << 63) >> (sa - 1); ++ int64_t rk_hi = (rk() & (~mask)) << sa; ++ int64_t rj_lo = (rj() & mask) >> (64 - sa); ++ alu_out = rk_hi | rj_lo; ++ } ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case ALSL_D: { ++ printf_instr("ALSL_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa2()); ++ CHECK_EQ(instr_.Bit(17), 0); ++ uint8_t sa = sa2() + 1; ++ alu_out = (rj() << sa) + rk(); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case SLLI: { ++ DCHECK_EQ(instr_.Bit(17), 0); ++ if (instr_.Bits(17, 15) == 0b001) { ++ // SLLI_W ++ printf_instr("SLLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui5()); ++ alu32_out = static_cast(rj()) << ui5(); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } else if ((instr_.Bits(17, 16) == 0b01)) { ++ // SLLI_D ++ printf_instr("SLLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui6()); ++ SetResult(rd_reg(), rj() << ui6()); ++ } ++ break; ++ } ++ case SRLI: { ++ DCHECK_EQ(instr_.Bit(17), 0); ++ if (instr_.Bits(17, 15) == 0b001) { ++ // SRLI_W ++ printf_instr("SRLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui5()); ++ alu32_out = static_cast(rj()) >> ui5(); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } else if (instr_.Bits(17, 16) == 0b01) { ++ // SRLI_D ++ printf_instr("SRLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui6()); ++ SetResult(rd_reg(), rj_u() >> ui6()); ++ } ++ break; ++ } ++ case SRAI: { ++ DCHECK_EQ(instr_.Bit(17), 0); ++ if (instr_.Bits(17, 15) == 0b001) { ++ // SRAI_W ++ printf_instr("SRAI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui5()); ++ alu32_out = static_cast(rj()) >> ui5(); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } else if (instr_.Bits(17, 16) == 0b01) { ++ // SRAI_D ++ printf_instr("SRAI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui6()); ++ SetResult(rd_reg(), rj() >> ui6()); ++ } ++ break; ++ } ++ case ROTRI: { ++ DCHECK_EQ(instr_.Bit(17), 0); ++ if (instr_.Bits(17, 15) == 0b001) { ++ // ROTRI_W ++ printf_instr("ROTRI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui5()); ++ alu32_out = static_cast( ++ base::bits::RotateRight32(static_cast(rj_u()), ++ static_cast(ui5()))); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } else if (instr_.Bits(17, 16) == 0b01) { ++ // ROTRI_D ++ printf_instr("ROTRI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui6()); ++ alu_out = ++ static_cast(base::bits::RotateRight64(rj_u(), ui6())); ++ SetResult(rd_reg(), alu_out); ++ printf_instr("ROTRI, %s, %s, %d\n", Registers::Name(rd_reg()), ++ Registers::Name(rj_reg()), ui6()); ++ } ++ break; ++ } ++ case LDDIR: ++ case LDPTE: ++ UNIMPLEMENTED(); ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp17() { ++ int64_t alu_out; ++ ++ switch (instr_.Bits(31, 15) << 15) { ++ case ADD_W: { ++ printf_instr("ADD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t alu32_out = static_cast(rj() + rk()); ++ // Sign-extend result of 32bit operation into 64bit register. ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ break; ++ } ++ case ADD_D: ++ printf_instr("ADD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() + rk()); ++ break; ++ case SUB_W: { ++ printf_instr("SUB_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t alu32_out = static_cast(rj() - rk()); ++ // Sign-extend result of 32bit operation into 64bit register. ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ break; ++ } ++ case SUB_D: ++ printf_instr("SUB_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() - rk()); ++ break; ++ case SLT: ++ printf_instr("SLT\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() < rk() ? 1 : 0); ++ break; ++ case SLTU: ++ printf_instr("SLTU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj_u() < rk_u() ? 1 : 0); ++ break; ++ case MASKEQZ: ++ printf_instr("MASKEQZ\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rk() == 0 ? rj() : 0); ++ break; ++ case MASKNEZ: ++ printf_instr("MASKNEZ\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rk() != 0 ? rj() : 0); ++ break; ++ case NOR: ++ printf_instr("NOR\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), ~(rj() | rk())); ++ break; ++ case AND: ++ printf_instr("AND\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() & rk()); ++ break; ++ case OR: ++ printf_instr("OR\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() | rk()); ++ break; ++ case XOR: ++ printf_instr("XOR\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() ^ rk()); ++ break; ++ case ORN: ++ printf_instr("ORN\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() | (~rk())); ++ break; ++ case ANDN: ++ printf_instr("ANDN\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() & (~rk())); ++ break; ++ case SLL_W: ++ printf_instr("SLL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), (int32_t)rj() << (rk_u() % 32)); ++ break; ++ case SRL_W: { ++ printf_instr("SRL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast((uint32_t)rj_u() >> (rk_u() % 32)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case SRA_W: ++ printf_instr("SRA_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), (int32_t)rj() >> (rk_u() % 32)); ++ break; ++ case SLL_D: ++ printf_instr("SLL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() << (rk_u() % 64)); ++ break; ++ case SRL_D: { ++ printf_instr("SRL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast(rj_u() >> (rk_u() % 64)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case SRA_D: ++ printf_instr("SRA_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() >> (rk_u() % 64)); ++ break; ++ case ROTR_W: { ++ printf_instr("ROTR_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast( ++ base::bits::RotateRight32(static_cast(rj_u()), ++ static_cast(rk_u() % 32))); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case ROTR_D: { ++ printf_instr("ROTR_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast( ++ base::bits::RotateRight64((rj_u()), (rk_u() % 64))); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case MUL_W: { ++ printf_instr("MUL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast(rj()) * static_cast(rk()); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case MULH_W: { ++ printf_instr("MULH_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t rj_lo = static_cast(rj()); ++ int32_t rk_lo = static_cast(rk()); ++ alu_out = static_cast(rj_lo) * static_cast(rk_lo); ++ SetResult(rd_reg(), alu_out >> 32); ++ break; ++ } ++ case MULH_WU: { ++ printf_instr("MULH_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ uint32_t rj_lo = static_cast(rj_u()); ++ uint32_t rk_lo = static_cast(rk_u()); ++ alu_out = static_cast(rj_lo) * static_cast(rk_lo); ++ SetResult(rd_reg(), alu_out >> 32); ++ break; ++ } ++ case MUL_D: ++ printf_instr("MUL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() * rk()); ++ break; ++ case MULH_D: ++ printf_instr("MULH_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), MultiplyHighSigned(rj(), rk())); ++ break; ++ case MULH_DU: ++ printf_instr("MULH_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), MultiplyHighUnsigned(rj_u(), rk_u())); ++ break; ++ case MULW_D_W: { ++ printf_instr("MULW_D_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int64_t rj_i32 = static_cast(rj()); ++ int64_t rk_i32 = static_cast(rk()); ++ SetResult(rd_reg(), rj_i32 * rk_i32); ++ break; ++ } ++ case MULW_D_WU: { ++ printf_instr("MULW_D_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ uint64_t rj_u32 = static_cast(rj_u()); ++ uint64_t rk_u32 = static_cast(rk_u()); ++ SetResult(rd_reg(), rj_u32 * rk_u32); ++ break; ++ } ++ case DIV_W: { ++ printf_instr("DIV_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t rj_i32 = static_cast(rj()); ++ int32_t rk_i32 = static_cast(rk()); ++ if (rj_i32 == INT_MIN && rk_i32 == -1) { ++ SetResult(rd_reg(), INT_MIN); ++ } else if (rk_i32 != 0) { ++ SetResult(rd_reg(), rj_i32 / rk_i32); ++ } ++ break; ++ } ++ case MOD_W: { ++ printf_instr("MOD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t rj_i32 = static_cast(rj()); ++ int32_t rk_i32 = static_cast(rk()); ++ if (rj_i32 == INT_MIN && rk_i32 == -1) { ++ SetResult(rd_reg(), 0); ++ } else if (rk_i32 != 0) { ++ SetResult(rd_reg(), rj_i32 % rk_i32); ++ } ++ break; ++ } ++ case DIV_WU: { ++ printf_instr("DIV_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ uint32_t rj_u32 = static_cast(rj()); ++ uint32_t rk_u32 = static_cast(rk()); ++ if (rk_u32 != 0) { ++ SetResult(rd_reg(), static_cast(rj_u32 / rk_u32)); ++ } ++ break; ++ } ++ case MOD_WU: { ++ printf_instr("MOD_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ uint32_t rj_u32 = static_cast(rj()); ++ uint32_t rk_u32 = static_cast(rk()); ++ if (rk_u32 != 0) { ++ SetResult(rd_reg(), static_cast(rj_u32 % rk_u32)); ++ } ++ break; ++ } ++ case DIV_D: { ++ printf_instr("DIV_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ if (rj() == LONG_MIN && rk() == -1) { ++ SetResult(rd_reg(), LONG_MIN); ++ } else if (rk() != 0) { ++ SetResult(rd_reg(), rj() / rk()); ++ } ++ break; ++ } ++ case MOD_D: { ++ printf_instr("MOD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ if (rj() == LONG_MIN && rk() == -1) { ++ SetResult(rd_reg(), 0); ++ } else if (rk() != 0) { ++ SetResult(rd_reg(), rj() % rk()); ++ } ++ break; ++ } ++ case DIV_DU: { ++ printf_instr("DIV_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ if (rk_u() != 0) { ++ SetResult(rd_reg(), static_cast(rj_u() / rk_u())); ++ } ++ break; ++ } ++ case MOD_DU: { ++ printf_instr("MOD_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ if (rk_u() != 0) { ++ SetResult(rd_reg(), static_cast(rj_u() % rk_u())); ++ } ++ break; ++ } ++ case BREAK: ++ printf_instr("BREAK\t code: %x\n", instr_.Bits(14, 0)); ++ SoftwareInterrupt(); ++ break; ++ case FADD_S: { ++ printf_instr("FADD_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; }, ++ fj_float(), fk_float())); ++ break; ++ } ++ case FADD_D: { ++ printf_instr("FADD_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs + rhs; }, ++ fj_double(), fk_double())); ++ break; ++ } ++ case FSUB_S: { ++ printf_instr("FSUB_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; }, ++ fj_float(), fk_float())); ++ break; ++ } ++ case FSUB_D: { ++ printf_instr("FSUB_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs - rhs; }, ++ fj_double(), fk_double())); ++ break; ++ } ++ case FMUL_S: { ++ printf_instr("FMUL_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; }, ++ fj_float(), fk_float())); ++ break; ++ } ++ case FMUL_D: { ++ printf_instr("FMUL_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs * rhs; }, ++ fj_double(), fk_double())); ++ break; ++ } ++ case FDIV_S: { ++ printf_instr("FDIV_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; }, ++ fj_float(), fk_float())); ++ break; ++ } ++ case FDIV_D: { ++ printf_instr("FDIV_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs / rhs; }, ++ fj_double(), fk_double())); ++ break; ++ } ++ case FMAX_S: ++ printf_instr("FMAX_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult(fd_reg(), FPUMax(fk_float(), fj_float())); ++ break; ++ case FMAX_D: ++ printf_instr("FMAX_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), FPUMax(fk_double(), fj_double())); ++ break; ++ case FMIN_S: ++ printf_instr("FMIN_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult(fd_reg(), FPUMin(fk_float(), fj_float())); ++ break; ++ case FMIN_D: ++ printf_instr("FMIN_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), FPUMin(fk_double(), fj_double())); ++ break; ++ case FMAXA_S: ++ printf_instr("FMAXA_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult(fd_reg(), FPUMaxA(fk_float(), fj_float())); ++ break; ++ case FMAXA_D: ++ printf_instr("FMAXA_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), FPUMaxA(fk_double(), fj_double())); ++ break; ++ case FMINA_S: ++ printf_instr("FMINA_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult(fd_reg(), FPUMinA(fk_float(), fj_float())); ++ break; ++ case FMINA_D: ++ printf_instr("FMINA_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), FPUMinA(fk_double(), fj_double())); ++ break; ++ case LDX_B: ++ printf_instr("LDX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadB(rj() + rk())); ++ break; ++ case LDX_H: ++ printf_instr("LDX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadH(rj() + rk(), instr_.instr())); ++ break; ++ case LDX_W: ++ printf_instr("LDX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadW(rj() + rk(), instr_.instr())); ++ break; ++ case LDX_D: ++ printf_instr("LDX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), Read2W(rj() + rk(), instr_.instr())); ++ break; ++ case STX_B: ++ printf_instr("STX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ WriteB(rj() + rk(), static_cast(rd())); ++ break; ++ case STX_H: ++ printf_instr("STX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ WriteH(rj() + rk(), static_cast(rd()), instr_.instr()); ++ break; ++ case STX_W: ++ printf_instr("STX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ WriteW(rj() + rk(), static_cast(rd()), instr_.instr()); ++ break; ++ case STX_D: ++ printf_instr("STX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ Write2W(rj() + rk(), rd(), instr_.instr()); ++ break; ++ case LDX_BU: ++ printf_instr("LDX_BU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadBU(rj() + rk())); ++ break; ++ case LDX_HU: ++ printf_instr("LDX_HU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadHU(rj() + rk(), instr_.instr())); ++ break; ++ case LDX_WU: ++ printf_instr("LDX_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadWU(rj() + rk(), instr_.instr())); ++ break; ++ case PRELDX: ++ printf("Sim UNIMPLEMENTED: PRELDX\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FLDX_S: ++ printf_instr("FLDX_S\t %s: %016f, %s: %016lx, %s: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), ++ rk()); ++ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits. ++ set_fpu_register_word(fd_reg(), ++ ReadW(rj() + rk(), instr_.instr(), FLOAT_DOUBLE)); ++ break; ++ case FLDX_D: ++ printf_instr("FLDX_D\t %s: %016f, %s: %016lx, %s: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), ++ rk()); ++ set_fpu_register_double(fd_reg(), ReadD(rj() + rk(), instr_.instr())); ++ break; ++ case FSTX_S: ++ printf_instr("FSTX_S\t %s: %016f, %s: %016lx, %s: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), ++ rk()); ++ WriteW(rj() + rk(), static_cast(get_fpu_register(fd_reg())), ++ instr_.instr()); ++ break; ++ case FSTX_D: ++ printf_instr("FSTX_D\t %s: %016f, %s: %016lx, %s: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), ++ rk()); ++ WriteD(rj() + rk(), get_fpu_register_double(fd_reg()), instr_.instr()); ++ break; ++ case ASRTLE_D: ++ printf("Sim UNIMPLEMENTED: ASRTLE_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case ASRTGT_D: ++ printf("Sim UNIMPLEMENTED: ASRTGT_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case DBGCALL: ++ printf("Sim UNIMPLEMENTED: DBGCALL\n"); ++ UNIMPLEMENTED(); ++ break; ++ case SYSCALL: ++ printf("Sim UNIMPLEMENTED: SYSCALL\n"); ++ UNIMPLEMENTED(); ++ break; ++ case HYPCALL: ++ printf("Sim UNIMPLEMENTED: HYPCALL\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMSWAP_W: ++ printf("Sim UNIMPLEMENTED: AMSWAP_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMSWAP_D: ++ printf("Sim UNIMPLEMENTED: AMSWAP_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMADD_W: ++ printf("Sim UNIMPLEMENTED: AMADD_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMADD_D: ++ printf("Sim UNIMPLEMENTED: AMADD_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMAND_W: ++ printf("Sim UNIMPLEMENTED: AMAND_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMAND_D: ++ printf("Sim UNIMPLEMENTED: AMAND_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMOR_W: ++ printf("Sim UNIMPLEMENTED: AMOR_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMOR_D: ++ printf("Sim UNIMPLEMENTED: AMOR_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMXOR_W: ++ printf("Sim UNIMPLEMENTED: AMXOR_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMXOR_D: ++ printf("Sim UNIMPLEMENTED: AMXOR_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_W: ++ printf("Sim UNIMPLEMENTED: AMMAX_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_D: ++ printf("Sim UNIMPLEMENTED: AMMAX_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_W: ++ printf("Sim UNIMPLEMENTED: AMMIN_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_D: ++ printf("Sim UNIMPLEMENTED: AMMIN_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_WU: ++ printf("Sim UNIMPLEMENTED: AMMAX_WU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_DU: ++ printf("Sim UNIMPLEMENTED: AMMAX_DU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_WU: ++ printf("Sim UNIMPLEMENTED: AMMIN_WU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DU: ++ printf("Sim UNIMPLEMENTED: AMMIN_DU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMSWAP_DB_W: { ++ printf_instr("AMSWAP_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), static_cast(rk()), instr_.instr(), ++ rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMSWAP_DB_D: { ++ printf_instr("AMSWAP_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMADD_DB_W: { ++ printf_instr("AMADD_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), ++ static_cast(static_cast(rk()) + ++ static_cast(rd())), ++ instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMADD_DB_D: { ++ printf_instr("AMADD_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk() + rd(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMAND_DB_W: { ++ printf_instr("AMAND_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), ++ static_cast(static_cast(rk()) & ++ static_cast(rd())), ++ instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMAND_DB_D: { ++ printf_instr("AMAND_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk() & rd(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMOR_DB_W: { ++ printf_instr("AMOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), ++ static_cast(static_cast(rk()) | ++ static_cast(rd())), ++ instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMOR_DB_D: { ++ printf_instr("AMOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk() | rd(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMXOR_DB_W: { ++ printf_instr("AMXOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), ++ static_cast(static_cast(rk()) ^ ++ static_cast(rd())), ++ instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMXOR_DB_D: { ++ printf_instr("AMXOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk() ^ rd(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMMAX_DB_W: ++ printf("Sim UNIMPLEMENTED: AMMAX_DB_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_DB_D: ++ printf("Sim UNIMPLEMENTED: AMMAX_DB_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DB_W: ++ printf("Sim UNIMPLEMENTED: AMMIN_DB_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DB_D: ++ printf("Sim UNIMPLEMENTED: AMMIN_DB_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_DB_WU: ++ printf("Sim UNIMPLEMENTED: AMMAX_DB_WU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_DB_DU: ++ printf("Sim UNIMPLEMENTED: AMMAX_DB_DU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DB_WU: ++ printf("Sim UNIMPLEMENTED: AMMIN_DB_WU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DB_DU: ++ printf("Sim UNIMPLEMENTED: AMMIN_DB_DU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case DBAR: ++ printf_instr("DBAR\n"); ++ break; ++ case IBAR: ++ printf("Sim UNIMPLEMENTED: IBAR\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FLDGT_S: ++ printf("Sim UNIMPLEMENTED: FLDGT_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FLDGT_D: ++ printf("Sim UNIMPLEMENTED: FLDGT_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FLDLE_S: ++ printf("Sim UNIMPLEMENTED: FLDLE_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FLDLE_D: ++ printf("Sim UNIMPLEMENTED: FLDLE_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FSTGT_S: ++ printf("Sim UNIMPLEMENTED: FSTGT_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FSTGT_D: ++ printf("Sim UNIMPLEMENTED: FSTGT_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FSTLE_S: ++ printf("Sim UNIMPLEMENTED: FSTLE_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FSTLE_D: ++ printf("Sim UNIMPLEMENTED: FSTLE_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case LDGT_B: ++ printf("Sim UNIMPLEMENTED: LDGT_B\n"); ++ UNIMPLEMENTED(); ++ break; ++ case LDGT_H: ++ printf("Sim UNIMPLEMENTED: LDGT_H\n"); ++ UNIMPLEMENTED(); ++ break; ++ case LDGT_W: ++ printf("Sim UNIMPLEMENTED: LDGT_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case LDGT_D: ++ printf("Sim UNIMPLEMENTED: LDGT_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case LDLE_B: ++ printf("Sim UNIMPLEMENTED: LDLT_B\n"); ++ UNIMPLEMENTED(); ++ break; ++ case LDLE_H: ++ printf("Sim UNIMPLEMENTED: LDLE_H\n"); ++ UNIMPLEMENTED(); ++ break; ++ case LDLE_W: ++ printf("Sim UNIMPLEMENTED: LDLE_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case LDLE_D: ++ printf("Sim UNIMPLEMENTED: LDLE_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case STGT_B: ++ printf("Sim UNIMPLEMENTED: STGT_B\n"); ++ UNIMPLEMENTED(); ++ break; ++ case STGT_H: ++ printf("Sim UNIMPLEMENTED: STGT_H\n"); ++ UNIMPLEMENTED(); ++ break; ++ case STGT_W: ++ printf("Sim UNIMPLEMENTED: STGT_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case STGT_D: ++ printf("Sim UNIMPLEMENTED: STGT_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case STLE_B: ++ printf("Sim UNIMPLEMENTED: STLE_B\n"); ++ UNIMPLEMENTED(); ++ break; ++ case STLE_H: ++ printf("Sim UNIMPLEMENTED: STLE_H\n"); ++ UNIMPLEMENTED(); ++ break; ++ case STLE_W: ++ printf("Sim UNIMPLEMENTED: STLE_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case STLE_D: ++ printf("Sim UNIMPLEMENTED: STLE_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case WAIT_INVTLB: ++ printf("Sim UNIMPLEMENTED: WAIT_INVTLB\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FSCALEB_S: ++ printf("Sim UNIMPLEMENTED: FSCALEB_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FSCALEB_D: ++ printf("Sim UNIMPLEMENTED: FSCALEB_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FCOPYSIGN_S: ++ printf("Sim UNIMPLEMENTED: FCOPYSIGN_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FCOPYSIGN_D: ++ printf("Sim UNIMPLEMENTED: FCOPYSIGN_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CRC_W_B_W: ++ printf("Sim UNIMPLEMENTED: CRC_W_B_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CRC_W_H_W: ++ printf("Sim UNIMPLEMENTED: CRC_W_H_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CRC_W_W_W: ++ printf("Sim UNIMPLEMENTED: CRC_W_W_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CRC_W_D_W: ++ printf("Sim UNIMPLEMENTED: CRC_W_D_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CRCC_W_B_W: ++ printf("Sim UNIMPLEMENTED: CRCC_W_B_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CRCC_W_H_W: ++ printf("Sim UNIMPLEMENTED: CRCC_W_H_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CRCC_W_W_W: ++ printf("Sim UNIMPLEMENTED: CRCC_W_W_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CRCC_W_D_W: ++ printf("Sim UNIMPLEMENTED: CRCC_W_D_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp22() { ++ int64_t alu_out; ++ ++ switch (instr_.Bits(31, 10) << 10) { ++ case CLZ_W: { ++ printf_instr("CLZ_W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = base::bits::CountLeadingZeros32(static_cast(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case CTZ_W: { ++ printf_instr("CTZ_W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = base::bits::CountTrailingZeros32(static_cast(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case CLZ_D: { ++ printf_instr("CLZ_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = base::bits::CountLeadingZeros64(static_cast(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case CTZ_D: { ++ printf_instr("CTZ_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = base::bits::CountTrailingZeros64(static_cast(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVB_2H: { ++ printf_instr("REVB_2H\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint32_t input = static_cast(rj()); ++ uint64_t output = 0; ++ ++ uint32_t mask = 0xFF000000; ++ for (int i = 0; i < 4; i++) { ++ uint32_t tmp = mask & input; ++ if (i % 2 == 0) { ++ tmp = tmp >> 8; ++ } else { ++ tmp = tmp << 8; ++ } ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ ++ alu_out = static_cast(static_cast(output)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVB_4H: { ++ printf_instr("REVB_4H\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFF00000000000000; ++ for (int i = 0; i < 8; i++) { ++ uint64_t tmp = mask & input; ++ if (i % 2 == 0) { ++ tmp = tmp >> 8; ++ } else { ++ tmp = tmp << 8; ++ } ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVB_2W: { ++ printf_instr("REVB_2W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFF000000FF000000; ++ for (int i = 0; i < 4; i++) { ++ uint64_t tmp = mask & input; ++ if (i <= 1) { ++ tmp = tmp >> (24 - i * 16); ++ } else { ++ tmp = tmp << (i * 16 - 24); ++ } ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVB_D: { ++ printf_instr("REVB_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFF00000000000000; ++ for (int i = 0; i < 8; i++) { ++ uint64_t tmp = mask & input; ++ if (i <= 3) { ++ tmp = tmp >> (56 - i * 16); ++ } else { ++ tmp = tmp << (i * 16 - 56); ++ } ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVH_2W: { ++ printf_instr("REVH_2W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFFFF000000000000; ++ for (int i = 0; i < 4; i++) { ++ uint64_t tmp = mask & input; ++ if (i % 2 == 0) { ++ tmp = tmp >> 16; ++ } else { ++ tmp = tmp << 16; ++ } ++ output = output | tmp; ++ mask = mask >> 16; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVH_D: { ++ printf_instr("REVH_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFFFF000000000000; ++ for (int i = 0; i < 4; i++) { ++ uint64_t tmp = mask & input; ++ if (i <= 1) { ++ tmp = tmp >> (48 - i * 32); ++ } else { ++ tmp = tmp << (i * 32 - 48); ++ } ++ output = output | tmp; ++ mask = mask >> 16; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BITREV_4B: { ++ printf_instr("BITREV_4B\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint32_t input = static_cast(rj()); ++ uint32_t output = 0; ++ uint8_t i_byte, o_byte; ++ ++ // Reverse the bit in byte for each individual byte ++ for (int i = 0; i < 4; i++) { ++ output = output >> 8; ++ i_byte = input & 0xFF; ++ ++ // Fast way to reverse bits in byte ++ // Devised by Sean Anderson, July 13, 2001 ++ o_byte = static_cast(((i_byte * 0x0802LU & 0x22110LU) | ++ (i_byte * 0x8020LU & 0x88440LU)) * ++ 0x10101LU >> ++ 16); ++ ++ output = output | (static_cast(o_byte << 24)); ++ input = input >> 8; ++ } ++ ++ alu_out = static_cast(static_cast(output)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BITREV_8B: { ++ printf_instr("BITREV_8B\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ uint8_t i_byte, o_byte; ++ ++ // Reverse the bit in byte for each individual byte ++ for (int i = 0; i < 8; i++) { ++ output = output >> 8; ++ i_byte = input & 0xFF; ++ ++ // Fast way to reverse bits in byte ++ // Devised by Sean Anderson, July 13, 2001 ++ o_byte = static_cast(((i_byte * 0x0802LU & 0x22110LU) | ++ (i_byte * 0x8020LU & 0x88440LU)) * ++ 0x10101LU >> ++ 16); ++ ++ output = output | (static_cast(o_byte) << 56); ++ input = input >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BITREV_W: { ++ printf_instr("BITREV_W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint32_t input = static_cast(rj()); ++ uint32_t output = 0; ++ output = base::bits::ReverseBits(input); ++ alu_out = static_cast(static_cast(output)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BITREV_D: { ++ printf_instr("BITREV_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = static_cast(base::bits::ReverseBits(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case EXT_W_B: { ++ printf_instr("EXT_W_B\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint8_t input = static_cast(rj()); ++ alu_out = static_cast(static_cast(input)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case EXT_W_H: { ++ printf_instr("EXT_W_H\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint16_t input = static_cast(rj()); ++ alu_out = static_cast(static_cast(input)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case FABS_S: ++ printf_instr("FABS_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), std::abs(fj_float())); ++ break; ++ case FABS_D: ++ printf_instr("FABS_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), std::abs(fj_double())); ++ break; ++ case FNEG_S: ++ printf_instr("FNEG_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), -fj_float()); ++ break; ++ case FNEG_D: ++ printf_instr("FNEG_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), -fj_double()); ++ break; ++ case FSQRT_S: { ++ printf_instr("FSQRT_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ if (fj_float() >= 0) { ++ SetFPUFloatResult(fd_reg(), std::sqrt(fj_float())); ++ } else { ++ SetFPUFloatResult(fd_reg(), std::sqrt(-1)); // qnan ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ } ++ break; ++ } ++ case FSQRT_D: { ++ printf_instr("FSQRT_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ if (fj_double() >= 0) { ++ SetFPUDoubleResult(fd_reg(), std::sqrt(fj_double())); ++ } else { ++ SetFPUDoubleResult(fd_reg(), std::sqrt(-1)); // qnan ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ } ++ break; ++ } ++ case FMOV_S: ++ printf_instr("FMOV_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), fj_float()); ++ break; ++ case FMOV_D: ++ printf_instr("FMOV_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUDoubleResult(fd_reg(), fj_double()); ++ break; ++ case MOVGR2FR_W: { ++ printf_instr("MOVGR2FR_W\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj()); ++ set_fpu_register_word(fd_reg(), static_cast(rj())); ++ TraceRegWr(get_fpu_register(fd_reg()), FLOAT_DOUBLE); ++ break; ++ } ++ case MOVGR2FR_D: ++ printf_instr("MOVGR2FR_D\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj()); ++ SetFPUResult2(fd_reg(), rj()); ++ break; ++ case MOVGR2FRH_W: { ++ printf_instr("MOVGR2FRH_W\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj()); ++ set_fpu_register_hi_word(fd_reg(), static_cast(rj())); ++ TraceRegWr(get_fpu_register(fd_reg()), DOUBLE); ++ break; ++ } ++ case MOVFR2GR_S: { ++ printf_instr("MOVFR2GR_S\t %s: %016lx, %s, %016f\n", ++ Registers::Name(rd_reg()), rd(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ set_register(rd_reg(), ++ static_cast(get_fpu_register_word(fj_reg()))); ++ TraceRegWr(get_register(rd_reg()), WORD_DWORD); ++ break; ++ } ++ case MOVFR2GR_D: ++ printf_instr("MOVFR2GR_D\t %s: %016lx, %s, %016f\n", ++ Registers::Name(rd_reg()), rd(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetResult(rd_reg(), get_fpu_register(fj_reg())); ++ break; ++ case MOVFRH2GR_S: ++ printf_instr("MOVFRH2GR_S\t %s: %016lx, %s, %016f\n", ++ Registers::Name(rd_reg()), rd(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetResult(rd_reg(), get_fpu_register_hi_word(fj_reg())); ++ break; ++ case MOVGR2FCSR: { ++ printf_instr("MOVGR2FCSR\t fcsr: %016x, %s, %016lx\n", FCSR_, ++ Registers::Name(rj_reg()), rj()); ++ // fcsr could be 0-3 ++ CHECK_LT(rd_reg(), 4); ++ FCSR_ = static_cast(rj()); ++ TraceRegWr(FCSR_); ++ break; ++ } ++ case MOVFCSR2GR: { ++ printf_instr("MOVFCSR2GR\t %s, %016lx, FCSR: %016x\n", ++ Registers::Name(rd_reg()), rd(), FCSR_); ++ // fcsr could be 0-3 ++ CHECK_LT(rj_reg(), 4); ++ SetResult(rd_reg(), FCSR_); ++ break; ++ } ++ case FCVT_S_D: ++ printf_instr("FCVT_S_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUFloatResult(fd_reg(), static_cast(fj_double())); ++ break; ++ case FCVT_D_S: ++ printf_instr("FCVT_D_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUDoubleResult(fd_reg(), static_cast(fj_float())); ++ break; ++ case FTINTRM_W_S: { ++ printf_instr("FTINTRM_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::floor(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRM_W_D: { ++ printf_instr("FTINTRM_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::floor(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRM_L_S: { ++ printf_instr("FTINTRM_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::floor(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRM_L_D: { ++ printf_instr("FTINTRM_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::floor(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRP_W_S: { ++ printf_instr("FTINTRP_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::ceil(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRP_W_D: { ++ printf_instr("FTINTRP_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::ceil(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRP_L_S: { ++ printf_instr("FTINTRP_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::ceil(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRP_L_D: { ++ printf_instr("FTINTRP_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::ceil(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRZ_W_S: { ++ printf_instr("FTINTRZ_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::trunc(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRZ_W_D: { ++ printf_instr("FTINTRZ_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::trunc(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRZ_L_S: { ++ printf_instr("FTINTRZ_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::trunc(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRZ_L_D: { ++ printf_instr("FTINTRZ_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::trunc(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRNE_W_S: { ++ printf_instr("FTINTRNE_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::floor(fj + 0.5); ++ int32_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fj == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRNE_W_D: { ++ printf_instr("FTINTRNE_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::floor(fj + 0.5); ++ int32_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fj == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRNE_L_S: { ++ printf_instr("FTINTRNE_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::floor(fj + 0.5); ++ int64_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fj == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRNE_L_D: { ++ printf_instr("FTINTRNE_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::floor(fj + 0.5); ++ int64_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fj == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINT_W_S: { ++ printf_instr("FTINT_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded; ++ int32_t result; ++ round_according_to_fcsr(fj, &rounded, &result); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINT_W_D: { ++ printf_instr("FTINT_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded; ++ int32_t result; ++ round_according_to_fcsr(fj, &rounded, &result); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINT_L_S: { ++ printf_instr("FTINT_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded; ++ int64_t result; ++ round64_according_to_fcsr(fj, &rounded, &result); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINT_L_D: { ++ printf_instr("FTINT_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded; ++ int64_t result; ++ round64_according_to_fcsr(fj, &rounded, &result); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FFINT_S_W: { ++ alu_out = get_fpu_register_signed_word(fj_reg()); ++ printf_instr("FFINT_S_W\t %s: %016f, %s, %016x\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), (int)alu_out); ++ SetFPUFloatResult(fd_reg(), static_cast(alu_out)); ++ break; ++ } ++ case FFINT_S_L: { ++ alu_out = get_fpu_register(fj_reg()); ++ printf_instr("FFINT_S_L\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), alu_out); ++ SetFPUFloatResult(fd_reg(), static_cast(alu_out)); ++ break; ++ } ++ case FFINT_D_W: { ++ alu_out = get_fpu_register_signed_word(fj_reg()); ++ printf_instr("FFINT_D_W\t %s: %016f, %s, %016x\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), (int)alu_out); ++ SetFPUDoubleResult(fd_reg(), static_cast(alu_out)); ++ break; ++ } ++ case FFINT_D_L: { ++ alu_out = get_fpu_register(fj_reg()); ++ printf_instr("FFINT_D_L\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), alu_out); ++ SetFPUDoubleResult(fd_reg(), static_cast(alu_out)); ++ break; ++ } ++ case FRINT_S: { ++ printf_instr("FRINT_S\t %s: %016f, %s, %016f mode : ", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float result, temp_result; ++ double temp; ++ float upper = std::ceil(fj); ++ float lower = std::floor(fj); ++ switch (get_fcsr_rounding_mode()) { ++ case kRoundToNearest: ++ printf_instr(" kRoundToNearest\n"); ++ if (upper - fj < fj - lower) { ++ result = upper; ++ } else if (upper - fj > fj - lower) { ++ result = lower; ++ } else { ++ temp_result = upper / 2; ++ float reminder = std::modf(temp_result, &temp); ++ if (reminder == 0) { ++ result = upper; ++ } else { ++ result = lower; ++ } ++ } ++ break; ++ case kRoundToZero: ++ printf_instr(" kRoundToZero\n"); ++ result = (fj > 0 ? lower : upper); ++ break; ++ case kRoundToPlusInf: ++ printf_instr(" kRoundToPlusInf\n"); ++ result = upper; ++ break; ++ case kRoundToMinusInf: ++ printf_instr(" kRoundToMinusInf\n"); ++ result = lower; ++ break; ++ } ++ SetFPUFloatResult(fd_reg(), result); ++ if (result != fj) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ break; ++ } ++ case FRINT_D: { ++ printf_instr("FRINT_D\t %s: %016f, %s, %016f mode : ", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double result, temp, temp_result; ++ double upper = std::ceil(fj); ++ double lower = std::floor(fj); ++ switch (get_fcsr_rounding_mode()) { ++ case kRoundToNearest: ++ printf_instr(" kRoundToNearest\n"); ++ if (upper - fj < fj - lower) { ++ result = upper; ++ } else if (upper - fj > fj - lower) { ++ result = lower; ++ } else { ++ temp_result = upper / 2; ++ double reminder = std::modf(temp_result, &temp); ++ if (reminder == 0) { ++ result = upper; ++ } else { ++ result = lower; ++ } ++ } ++ break; ++ case kRoundToZero: ++ printf_instr(" kRoundToZero\n"); ++ result = (fj > 0 ? lower : upper); ++ break; ++ case kRoundToPlusInf: ++ printf_instr(" kRoundToPlusInf\n"); ++ result = upper; ++ break; ++ case kRoundToMinusInf: ++ printf_instr(" kRoundToMinusInf\n"); ++ result = lower; ++ break; ++ } ++ SetFPUDoubleResult(fd_reg(), result); ++ if (result != fj) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ break; ++ } ++ case MOVFR2CF: ++ printf("Sim UNIMPLEMENTED: MOVFR2CF\n"); ++ UNIMPLEMENTED(); ++ break; ++ case MOVCF2FR: ++ printf("Sim UNIMPLEMENTED: MOVCF2FR\n"); ++ UNIMPLEMENTED(); ++ break; ++ case MOVGR2CF: ++ printf_instr("MOVGR2CF\t FCC%d, %s: %016lx\n", cd_reg(), ++ Registers::Name(rj_reg()), rj()); ++ set_cf_register(cd_reg(), rj() & 1); ++ break; ++ case MOVCF2GR: ++ printf_instr("MOVCF2GR\t %s: %016lx, FCC%d\n", Registers::Name(rd_reg()), ++ rd(), cj_reg()); ++ SetResult(rd_reg(), cj()); ++ break; ++ case FRECIP_S: ++ printf("Sim UNIMPLEMENTED: FRECIP_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FRECIP_D: ++ printf("Sim UNIMPLEMENTED: FRECIP_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FRSQRT_S: ++ printf("Sim UNIMPLEMENTED: FRSQRT_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FRSQRT_D: ++ printf("Sim UNIMPLEMENTED: FRSQRT_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FCLASS_S: ++ printf("Sim UNIMPLEMENTED: FCLASS_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FCLASS_D: ++ printf("Sim UNIMPLEMENTED: FCLASS_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FLOGB_S: ++ printf("Sim UNIMPLEMENTED: FLOGB_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FLOGB_D: ++ printf("Sim UNIMPLEMENTED: FLOGB_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CLO_W: ++ printf("Sim UNIMPLEMENTED: CLO_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CTO_W: ++ printf("Sim UNIMPLEMENTED: CTO_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CLO_D: ++ printf("Sim UNIMPLEMENTED: CLO_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CTO_D: ++ printf("Sim UNIMPLEMENTED: CTO_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case IOCSRRD_B: ++ printf("Sim UNIMPLEMENTED: IOCSRRD_B\n"); ++ UNIMPLEMENTED(); ++ break; ++ case IOCSRRD_H: ++ printf("Sim UNIMPLEMENTED: IOCSRRD_H\n"); ++ UNIMPLEMENTED(); ++ break; ++ case IOCSRRD_W: ++ printf("Sim UNIMPLEMENTED: IOCSRRD_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case IOCSRRD_D: ++ printf("Sim UNIMPLEMENTED: IOCSRRD_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case IOCSRWR_B: ++ printf("Sim UNIMPLEMENTED: IOCSRWR_B\n"); ++ UNIMPLEMENTED(); ++ break; ++ case IOCSRWR_H: ++ printf("Sim UNIMPLEMENTED: IOCSRWR_H\n"); ++ UNIMPLEMENTED(); ++ break; ++ case IOCSRWR_W: ++ printf("Sim UNIMPLEMENTED: IOCSRWR_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case IOCSRWR_D: ++ printf("Sim UNIMPLEMENTED: IOCSRWR_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case TLBINV: ++ printf("Sim UNIMPLEMENTED: TLBINV\n"); ++ UNIMPLEMENTED(); ++ break; ++ case TLBFLUSH: ++ printf("Sim UNIMPLEMENTED: TLBFLUSH\n"); ++ UNIMPLEMENTED(); ++ break; ++ case TLBP: ++ printf("Sim UNIMPLEMENTED: TLBP\n"); ++ UNIMPLEMENTED(); ++ break; ++ case TLBR: ++ printf("Sim UNIMPLEMENTED: TLBR\n"); ++ UNIMPLEMENTED(); ++ break; ++ case TLBWI: ++ printf("Sim UNIMPLEMENTED: TLBWI\n"); ++ UNIMPLEMENTED(); ++ break; ++ case TLBWR: ++ printf("Sim UNIMPLEMENTED: TLBWR\n"); ++ UNIMPLEMENTED(); ++ break; ++ case ERET: ++ printf("Sim UNIMPLEMENTED: ERET\n"); ++ UNIMPLEMENTED(); ++ break; ++ case RDTIMEL_W: ++ printf("Sim UNIMPLEMENTED: RDTIMEL_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case RDTIMEH_W: ++ printf("Sim UNIMPLEMENTED: RDTIMEH_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case RDTIME_D: ++ printf("Sim UNIMPLEMENTED: RDTIME_D\n"); ++ // case CPUCFG: ++ // TODO ++ UNIMPLEMENTED(); ++ break; ++ // Unimplemented opcodes raised an error in the configuration step before, ++ // so we can use the default here to set the destination register in common ++ // cases. ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++// Executes the current instruction. ++void Simulator::InstructionDecode(Instruction* instr) { ++ if (v8::internal::FLAG_check_icache) { ++ CheckICache(i_cache(), instr); ++ } ++ pc_modified_ = false; ++ ++ v8::internal::EmbeddedVector buffer; ++ ++ if (::v8::internal::FLAG_trace_sim) { ++ SNPrintF(trace_buf_, " "); ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ dasm.InstructionDecode(buffer, reinterpret_cast(instr)); ++ } ++ ++ static int instr_count = 0; ++ USE(instr_count); ++ instr_ = instr; ++ printf_instr("\nInstr%3d: %08x, PC: %016lx\t", instr_count++, ++ instr_.Bits(31, 0), get_pc()); ++ switch (instr_.InstructionType()) { ++ case Instruction::kOp6Type: ++ DecodeTypeOp6(); ++ break; ++ case Instruction::kOp7Type: ++ DecodeTypeOp7(); ++ break; ++ case Instruction::kOp8Type: ++ DecodeTypeOp8(); ++ break; ++ case Instruction::kOp10Type: ++ DecodeTypeOp10(); ++ break; ++ case Instruction::kOp12Type: ++ DecodeTypeOp12(); ++ break; ++ case Instruction::kOp14Type: ++ DecodeTypeOp14(); ++ break; ++ case Instruction::kOp17Type: ++ DecodeTypeOp17(); ++ break; ++ case Instruction::kOp22Type: ++ DecodeTypeOp22(); ++ break; ++ default: { ++ printf("instr_: %x\n", instr_.Bits(31, 0)); ++ UNREACHABLE(); ++ } ++ } ++ ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF(" 0x%08" PRIxPTR " %-44s %s\n", ++ reinterpret_cast(instr), buffer.begin(), ++ trace_buf_.begin()); ++ } ++ ++ if (!pc_modified_) { ++ set_register(pc, reinterpret_cast(instr) + kInstrSize); ++ } ++} ++ ++void Simulator::Execute() { ++ // Get the PC to simulate. Cannot use the accessor here as we need the ++ // raw PC value and not the one used as input to arithmetic instructions. ++ int64_t program_counter = get_pc(); ++ if (::v8::internal::FLAG_stop_sim_at == 0) { ++ // Fast version of the dispatch loop without checking whether the simulator ++ // should be stopping at a particular executed instruction. ++ while (program_counter != end_sim_pc) { ++ Instruction* instr = reinterpret_cast(program_counter); ++ icount_++; ++ InstructionDecode(instr); ++ program_counter = get_pc(); ++ } ++ } else { ++ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when ++ // we reach the particular instruction count. ++ while (program_counter != end_sim_pc) { ++ Instruction* instr = reinterpret_cast(program_counter); ++ icount_++; ++ if (icount_ == static_cast(::v8::internal::FLAG_stop_sim_at)) { ++ La64Debugger dbg(this); ++ dbg.Debug(); ++ } else { ++ InstructionDecode(instr); ++ } ++ program_counter = get_pc(); ++ } ++ } ++} ++ ++void Simulator::CallInternal(Address entry) { ++ // Adjust JS-based stack limit to C-based stack limit. ++ isolate_->stack_guard()->AdjustStackLimitForSimulator(); ++ ++ // Prepare to execute the code at entry. ++ set_register(pc, static_cast(entry)); ++ // Put down marker for end of simulation. The simulator will stop simulation ++ // when the PC reaches this value. By saving the "end simulation" value into ++ // the LR the simulation stops when returning to this call point. ++ set_register(ra, end_sim_pc); ++ ++ // Remember the values of callee-saved registers. ++ int64_t s0_val = get_register(s0); ++ int64_t s1_val = get_register(s1); ++ int64_t s2_val = get_register(s2); ++ int64_t s3_val = get_register(s3); ++ int64_t s4_val = get_register(s4); ++ int64_t s5_val = get_register(s5); ++ int64_t s6_val = get_register(s6); ++ int64_t s7_val = get_register(s7); ++ int64_t s8_val = get_register(s8); ++ int64_t gp_val = get_register(gp); ++ int64_t sp_val = get_register(sp); ++ int64_t tp_val = get_register(tp); ++ int64_t fp_val = get_register(fp); ++ ++ // Set up the callee-saved registers with a known value. To be able to check ++ // that they are preserved properly across JS execution. ++ int64_t callee_saved_value = icount_; ++ set_register(s0, callee_saved_value); ++ set_register(s1, callee_saved_value); ++ set_register(s2, callee_saved_value); ++ set_register(s3, callee_saved_value); ++ set_register(s4, callee_saved_value); ++ set_register(s5, callee_saved_value); ++ set_register(s6, callee_saved_value); ++ set_register(s7, callee_saved_value); ++ set_register(s8, callee_saved_value); ++ set_register(gp, callee_saved_value); ++ set_register(tp, callee_saved_value); ++ set_register(fp, callee_saved_value); ++ ++ // Start the simulation. ++ Execute(); ++ ++ // Check that the callee-saved registers have been preserved. ++ CHECK_EQ(callee_saved_value, get_register(s0)); ++ CHECK_EQ(callee_saved_value, get_register(s1)); ++ CHECK_EQ(callee_saved_value, get_register(s2)); ++ CHECK_EQ(callee_saved_value, get_register(s3)); ++ CHECK_EQ(callee_saved_value, get_register(s4)); ++ CHECK_EQ(callee_saved_value, get_register(s5)); ++ CHECK_EQ(callee_saved_value, get_register(s6)); ++ CHECK_EQ(callee_saved_value, get_register(s7)); ++ CHECK_EQ(callee_saved_value, get_register(s8)); ++ CHECK_EQ(callee_saved_value, get_register(gp)); ++ CHECK_EQ(callee_saved_value, get_register(tp)); ++ CHECK_EQ(callee_saved_value, get_register(fp)); ++ ++ // Restore callee-saved registers with the original value. ++ set_register(s0, s0_val); ++ set_register(s1, s1_val); ++ set_register(s2, s2_val); ++ set_register(s3, s3_val); ++ set_register(s4, s4_val); ++ set_register(s5, s5_val); ++ set_register(s6, s6_val); ++ set_register(s7, s7_val); ++ set_register(s8, s8_val); ++ set_register(gp, gp_val); ++ set_register(sp, sp_val); ++ set_register(tp, tp_val); ++ set_register(fp, fp_val); ++} ++ ++intptr_t Simulator::CallImpl(Address entry, int argument_count, ++ const intptr_t* arguments) { ++ constexpr int kRegisterPassedArguments = 8; ++ // Set up arguments. ++ ++ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count); ++ if (reg_arg_count > 0) set_register(a0, arguments[0]); ++ if (reg_arg_count > 1) set_register(a1, arguments[1]); ++ if (reg_arg_count > 2) set_register(a2, arguments[2]); ++ if (reg_arg_count > 3) set_register(a3, arguments[3]); ++ if (reg_arg_count > 4) set_register(a4, arguments[4]); ++ if (reg_arg_count > 5) set_register(a5, arguments[5]); ++ if (reg_arg_count > 6) set_register(a6, arguments[6]); ++ if (reg_arg_count > 7) set_register(a7, arguments[7]); ++ ++ // Remaining arguments passed on stack. ++ int64_t original_stack = get_register(sp); ++ // Compute position of stack on entry to generated code. ++ int stack_args_count = argument_count - reg_arg_count; ++ int stack_args_size = stack_args_count * sizeof(*arguments) + kCArgsSlotsSize; ++ int64_t entry_stack = original_stack - stack_args_size; ++ ++ if (base::OS::ActivationFrameAlignment() != 0) { ++ entry_stack &= -base::OS::ActivationFrameAlignment(); ++ } ++ // Store remaining arguments on stack, from low to high memory. ++ intptr_t* stack_argument = reinterpret_cast(entry_stack); ++ memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count, ++ stack_args_count * sizeof(*arguments)); ++ set_register(sp, entry_stack); ++ ++ CallInternal(entry); ++ ++ // Pop stack passed arguments. ++ CHECK_EQ(entry_stack, get_register(sp)); ++ set_register(sp, original_stack); ++ ++ return get_register(v0); ++} ++ ++double Simulator::CallFP(Address entry, double d0, double d1) { ++ const FPURegister fparg2 = f1; ++ set_fpu_register_double(f0, d0); ++ set_fpu_register_double(fparg2, d1); ++ CallInternal(entry); ++ return get_fpu_register_double(f0); ++} ++ ++uintptr_t Simulator::PushAddress(uintptr_t address) { ++ int64_t new_sp = get_register(sp) - sizeof(uintptr_t); ++ uintptr_t* stack_slot = reinterpret_cast(new_sp); ++ *stack_slot = address; ++ set_register(sp, new_sp); ++ return new_sp; ++} ++ ++uintptr_t Simulator::PopAddress() { ++ int64_t current_sp = get_register(sp); ++ uintptr_t* stack_slot = reinterpret_cast(current_sp); ++ uintptr_t address = *stack_slot; ++ set_register(sp, current_sp + sizeof(uintptr_t)); ++ return address; ++} ++ ++Simulator::LocalMonitor::LocalMonitor() ++ : access_state_(MonitorAccess::Open), ++ tagged_addr_(0), ++ size_(TransactionSize::None) {} ++ ++void Simulator::LocalMonitor::Clear() { ++ access_state_ = MonitorAccess::Open; ++ tagged_addr_ = 0; ++ size_ = TransactionSize::None; ++} ++ ++void Simulator::LocalMonitor::NotifyLoad() { ++ if (access_state_ == MonitorAccess::RMW) { ++ // A non linked load could clear the local monitor. As a result, it's ++ // most strict to unconditionally clear the local monitor on load. ++ Clear(); ++ } ++} ++ ++void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr, ++ TransactionSize size) { ++ access_state_ = MonitorAccess::RMW; ++ tagged_addr_ = addr; ++ size_ = size; ++} ++ ++void Simulator::LocalMonitor::NotifyStore() { ++ if (access_state_ == MonitorAccess::RMW) { ++ // A non exclusive store could clear the local monitor. As a result, it's ++ // most strict to unconditionally clear the local monitor on store. ++ Clear(); ++ } ++} ++ ++bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr, ++ TransactionSize size) { ++ if (access_state_ == MonitorAccess::RMW) { ++ if (addr == tagged_addr_ && size_ == size) { ++ Clear(); ++ return true; ++ } else { ++ return false; ++ } ++ } else { ++ DCHECK(access_state_ == MonitorAccess::Open); ++ return false; ++ } ++} ++ ++Simulator::GlobalMonitor::LinkedAddress::LinkedAddress() ++ : access_state_(MonitorAccess::Open), ++ tagged_addr_(0), ++ next_(nullptr), ++ prev_(nullptr), ++ failure_counter_(0) {} ++ ++void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() { ++ access_state_ = MonitorAccess::Open; ++ tagged_addr_ = 0; ++} ++ ++void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked( ++ uintptr_t addr) { ++ access_state_ = MonitorAccess::RMW; ++ tagged_addr_ = addr; ++} ++ ++void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() { ++ if (access_state_ == MonitorAccess::RMW) { ++ // A non exclusive store could clear the global monitor. As a result, it's ++ // most strict to unconditionally clear global monitors on store. ++ Clear_Locked(); ++ } ++} ++ ++bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked( ++ uintptr_t addr, bool is_requesting_thread) { ++ if (access_state_ == MonitorAccess::RMW) { ++ if (is_requesting_thread) { ++ if (addr == tagged_addr_) { ++ Clear_Locked(); ++ // Introduce occasional sc/scd failures. This is to simulate the ++ // behavior of hardware, which can randomly fail due to background ++ // cache evictions. ++ if (failure_counter_++ >= kMaxFailureCounter) { ++ failure_counter_ = 0; ++ return false; ++ } else { ++ return true; ++ } ++ } ++ } else if ((addr & kExclusiveTaggedAddrMask) == ++ (tagged_addr_ & kExclusiveTaggedAddrMask)) { ++ // Check the masked addresses when responding to a successful lock by ++ // another thread so the implementation is more conservative (i.e. the ++ // granularity of locking is as large as possible.) ++ Clear_Locked(); ++ return false; ++ } ++ } ++ return false; ++} ++ ++void Simulator::GlobalMonitor::NotifyLoadLinked_Locked( ++ uintptr_t addr, LinkedAddress* linked_address) { ++ linked_address->NotifyLoadLinked_Locked(addr); ++ PrependProcessor_Locked(linked_address); ++} ++ ++void Simulator::GlobalMonitor::NotifyStore_Locked( ++ LinkedAddress* linked_address) { ++ // Notify each thread of the store operation. ++ for (LinkedAddress* iter = head_; iter; iter = iter->next_) { ++ iter->NotifyStore_Locked(); ++ } ++} ++ ++bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked( ++ uintptr_t addr, LinkedAddress* linked_address) { ++ DCHECK(IsProcessorInLinkedList_Locked(linked_address)); ++ if (linked_address->NotifyStoreConditional_Locked(addr, true)) { ++ // Notify the other processors that this StoreConditional succeeded. ++ for (LinkedAddress* iter = head_; iter; iter = iter->next_) { ++ if (iter != linked_address) { ++ iter->NotifyStoreConditional_Locked(addr, false); ++ } ++ } ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked( ++ LinkedAddress* linked_address) const { ++ return head_ == linked_address || linked_address->next_ || ++ linked_address->prev_; ++} ++ ++void Simulator::GlobalMonitor::PrependProcessor_Locked( ++ LinkedAddress* linked_address) { ++ if (IsProcessorInLinkedList_Locked(linked_address)) { ++ return; ++ } ++ ++ if (head_) { ++ head_->prev_ = linked_address; ++ } ++ linked_address->prev_ = nullptr; ++ linked_address->next_ = head_; ++ head_ = linked_address; ++} ++ ++void Simulator::GlobalMonitor::RemoveLinkedAddress( ++ LinkedAddress* linked_address) { ++ base::MutexGuard lock_guard(&mutex); ++ if (!IsProcessorInLinkedList_Locked(linked_address)) { ++ return; ++ } ++ ++ if (linked_address->prev_) { ++ linked_address->prev_->next_ = linked_address->next_; ++ } else { ++ head_ = linked_address->next_; ++ } ++ if (linked_address->next_) { ++ linked_address->next_->prev_ = linked_address->prev_; ++ } ++ linked_address->prev_ = nullptr; ++ linked_address->next_ = nullptr; ++} ++ ++#undef SScanF ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // USE_SIMULATOR +diff --git a/src/3rdparty/chromium/v8/src/execution/la64/simulator-la64.h b/src/3rdparty/chromium/v8/src/execution/la64/simulator-la64.h +new file mode 100644 +index 0000000000..de2d1b0d89 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/execution/la64/simulator-la64.h +@@ -0,0 +1,646 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Declares a Simulator for loongisa instructions if we are not generating a ++// native loongisa binary. This Simulator allows us to run and debug loongisa ++// code generation on regular desktop machines. V8 calls into generated code via ++// the GeneratedCode wrapper, which will start execution in the Simulator or ++// forwards to the real entry on a loongisa HW platform. ++ ++#ifndef V8_EXECUTION_LA64_SIMULATOR_LA64_H_ ++#define V8_EXECUTION_LA64_SIMULATOR_LA64_H_ ++ ++// globals.h defines USE_SIMULATOR. ++#include "src/common/globals.h" ++ ++template ++int Compare(const T& a, const T& b) { ++ if (a == b) ++ return 0; ++ else if (a < b) ++ return -1; ++ else ++ return 1; ++} ++ ++// Returns the negative absolute value of its argument. ++template ::value>::type> ++T Nabs(T a) { ++ return a < 0 ? a : -a; ++} ++ ++#if defined(USE_SIMULATOR) ++// Running with a simulator. ++ ++#include "src/base/hashmap.h" ++#include "src/codegen/assembler.h" ++#include "src/codegen/la64/constants-la64.h" ++#include "src/execution/simulator-base.h" ++#include "src/utils/allocation.h" ++ ++namespace v8 { ++namespace internal { ++ ++// ----------------------------------------------------------------------------- ++// Utility functions ++ ++class CachePage { ++ public: ++ static const int LINE_VALID = 0; ++ static const int LINE_INVALID = 1; ++ ++ static const int kPageShift = 12; ++ static const int kPageSize = 1 << kPageShift; ++ static const int kPageMask = kPageSize - 1; ++ static const int kLineShift = 2; // The cache line is only 4 bytes right now. ++ static const int kLineLength = 1 << kLineShift; ++ static const int kLineMask = kLineLength - 1; ++ ++ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); } ++ ++ char* ValidityByte(int offset) { ++ return &validity_map_[offset >> kLineShift]; ++ } ++ ++ char* CachedData(int offset) { return &data_[offset]; } ++ ++ private: ++ char data_[kPageSize]; // The cached data. ++ static const int kValidityMapSize = kPageSize >> kLineShift; ++ char validity_map_[kValidityMapSize]; // One byte per line. ++}; ++ ++class SimInstructionBase : public InstructionBase { ++ public: ++ Type InstructionType() const { return type_; } ++ inline Instruction* instr() const { return instr_; } ++ inline int32_t operand() const { return operand_; } ++ ++ protected: ++ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {} ++ explicit SimInstructionBase(Instruction* instr) {} ++ ++ int32_t operand_; ++ Instruction* instr_; ++ Type type_; ++ ++ private: ++ DISALLOW_ASSIGN(SimInstructionBase); ++}; ++ ++class SimInstruction : public InstructionGetters { ++ public: ++ SimInstruction() {} ++ ++ explicit SimInstruction(Instruction* instr) { *this = instr; } ++ ++ SimInstruction& operator=(Instruction* instr) { ++ operand_ = *reinterpret_cast(instr); ++ instr_ = instr; ++ type_ = InstructionBase::InstructionType(); ++ DCHECK(reinterpret_cast(&operand_) == this); ++ return *this; ++ } ++}; ++ ++class Simulator : public SimulatorBase { ++ public: ++ friend class La64Debugger; ++ ++ // Registers are declared in order. ++ enum Register { ++ no_reg = -1, ++ zero_reg = 0, ++ ra, ++ gp, ++ sp, ++ a0, ++ a1, ++ a2, ++ a3, ++ a4, ++ a5, ++ a6, ++ a7, ++ t0, ++ t1, ++ t2, ++ t3, ++ t4, ++ t5, ++ t6, ++ t7, ++ t8, ++ tp, ++ fp, ++ s0, ++ s1, ++ s2, ++ s3, ++ s4, ++ s5, ++ s6, ++ s7, ++ s8, ++ pc, // pc must be the last register. ++ kNumSimuRegisters, ++ // aliases ++ v0 = a0, ++ v1 = a1 ++ }; ++ ++ // Condition flag registers. ++ enum CFRegister { ++ fcc0, ++ fcc1, ++ fcc2, ++ fcc3, ++ fcc4, ++ fcc5, ++ fcc6, ++ fcc7, ++ kNumCFRegisters ++ }; ++ ++ // Floating point registers. ++ enum FPURegister { ++ f0, ++ f1, ++ f2, ++ f3, ++ f4, ++ f5, ++ f6, ++ f7, ++ f8, ++ f9, ++ f10, ++ f11, ++ f12, ++ f13, ++ f14, ++ f15, ++ f16, ++ f17, ++ f18, ++ f19, ++ f20, ++ f21, ++ f22, ++ f23, ++ f24, ++ f25, ++ f26, ++ f27, ++ f28, ++ f29, ++ f30, ++ f31, ++ kNumFPURegisters ++ }; ++ ++ explicit Simulator(Isolate* isolate); ++ ~Simulator(); ++ ++ // The currently executing Simulator instance. Potentially there can be one ++ // for each native thread. ++ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate); ++ ++ // Accessors for register state. Reading the pc value adheres to the LA64 ++ // architecture specification and is off by a 8 from the currently executing ++ // instruction. ++ void set_register(int reg, int64_t value); ++ void set_register_word(int reg, int32_t value); ++ void set_dw_register(int dreg, const int* dbl); ++ int64_t get_register(int reg) const; ++ double get_double_from_register_pair(int reg); ++ // Same for FPURegisters. ++ void set_fpu_register(int fpureg, int64_t value); ++ void set_fpu_register_word(int fpureg, int32_t value); ++ void set_fpu_register_hi_word(int fpureg, int32_t value); ++ void set_fpu_register_float(int fpureg, float value); ++ void set_fpu_register_double(int fpureg, double value); ++ void set_fpu_register_invalid_result64(float original, float rounded); ++ void set_fpu_register_invalid_result(float original, float rounded); ++ void set_fpu_register_word_invalid_result(float original, float rounded); ++ void set_fpu_register_invalid_result64(double original, double rounded); ++ void set_fpu_register_invalid_result(double original, double rounded); ++ void set_fpu_register_word_invalid_result(double original, double rounded); ++ int64_t get_fpu_register(int fpureg) const; ++ int32_t get_fpu_register_word(int fpureg) const; ++ int32_t get_fpu_register_signed_word(int fpureg) const; ++ int32_t get_fpu_register_hi_word(int fpureg) const; ++ float get_fpu_register_float(int fpureg) const; ++ double get_fpu_register_double(int fpureg) const; ++ void set_cf_register(int cfreg, bool value); ++ bool get_cf_register(int cfreg) const; ++ void set_fcsr_rounding_mode(FPURoundingMode mode); ++ unsigned int get_fcsr_rounding_mode(); ++ void set_fcsr_bit(uint32_t cc, bool value); ++ bool test_fcsr_bit(uint32_t cc); ++ bool set_fcsr_round_error(double original, double rounded); ++ bool set_fcsr_round64_error(double original, double rounded); ++ bool set_fcsr_round_error(float original, float rounded); ++ bool set_fcsr_round64_error(float original, float rounded); ++ void round_according_to_fcsr(double toRound, double* rounded, ++ int32_t* rounded_int); ++ void round64_according_to_fcsr(double toRound, double* rounded, ++ int64_t* rounded_int); ++ void round_according_to_fcsr(float toRound, float* rounded, ++ int32_t* rounded_int); ++ void round64_according_to_fcsr(float toRound, float* rounded, ++ int64_t* rounded_int); ++ // Special case of set_register and get_register to access the raw PC value. ++ void set_pc(int64_t value); ++ int64_t get_pc() const; ++ ++ Address get_sp() const { return static_cast
(get_register(sp)); } ++ ++ // Accessor to the internal simulator stack area. ++ uintptr_t StackLimit(uintptr_t c_limit) const; ++ ++ // Executes LA64 instructions until the PC reaches end_sim_pc. ++ void Execute(); ++ ++ template ++ Return Call(Address entry, Args... args) { ++ return VariadicCall(this, &Simulator::CallImpl, entry, args...); ++ } ++ ++ // Alternative: call a 2-argument double function. ++ double CallFP(Address entry, double d0, double d1); ++ ++ // Push an address onto the JS stack. ++ uintptr_t PushAddress(uintptr_t address); ++ ++ // Pop an address from the JS stack. ++ uintptr_t PopAddress(); ++ ++ // Debugger input. ++ void set_last_debugger_input(char* input); ++ char* last_debugger_input() { return last_debugger_input_; } ++ ++ // Redirection support. ++ static void SetRedirectInstruction(Instruction* instruction); ++ ++ // ICache checking. ++ static bool ICacheMatch(void* one, void* two); ++ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start, ++ size_t size); ++ ++ // Returns true if pc register contains one of the 'special_values' defined ++ // below (bad_ra, end_sim_pc). ++ bool has_bad_pc() const; ++ ++ private: ++ enum special_values { ++ // Known bad pc value to ensure that the simulator does not execute ++ // without being properly setup. ++ bad_ra = -1, ++ // A pc value used to signal the simulator to stop execution. Generally ++ // the ra is set to this value on transition from native C code to ++ // simulated execution, so that the simulator can "return" to the native ++ // C code. ++ end_sim_pc = -2, ++ // Unpredictable value. ++ Unpredictable = 0xbadbeaf ++ }; ++ ++ V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count, ++ const intptr_t* arguments); ++ ++ // Unsupported instructions use Format to print an error and stop execution. ++ void Format(Instruction* instr, const char* format); ++ ++ // Helpers for data value tracing. ++ enum TraceType { ++ BYTE, ++ HALF, ++ WORD, ++ DWORD, ++ FLOAT, ++ DOUBLE, ++ FLOAT_DOUBLE, ++ WORD_DWORD ++ }; ++ ++ // Read and write memory. ++ inline uint32_t ReadBU(int64_t addr); ++ inline int32_t ReadB(int64_t addr); ++ inline void WriteB(int64_t addr, uint8_t value); ++ inline void WriteB(int64_t addr, int8_t value); ++ ++ inline uint16_t ReadHU(int64_t addr, Instruction* instr); ++ inline int16_t ReadH(int64_t addr, Instruction* instr); ++ // Note: Overloaded on the sign of the value. ++ inline void WriteH(int64_t addr, uint16_t value, Instruction* instr); ++ inline void WriteH(int64_t addr, int16_t value, Instruction* instr); ++ ++ inline uint32_t ReadWU(int64_t addr, Instruction* instr); ++ inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD); ++ inline void WriteW(int64_t addr, int32_t value, Instruction* instr); ++ void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr, ++ int32_t rt_reg); ++ inline int64_t Read2W(int64_t addr, Instruction* instr); ++ inline void Write2W(int64_t addr, int64_t value, Instruction* instr); ++ inline void WriteConditional2W(int64_t addr, int64_t value, ++ Instruction* instr, int32_t rt_reg); ++ ++ inline double ReadD(int64_t addr, Instruction* instr); ++ inline void WriteD(int64_t addr, double value, Instruction* instr); ++ ++ template ++ T ReadMem(int64_t addr, Instruction* instr); ++ template ++ void WriteMem(int64_t addr, T value, Instruction* instr); ++ ++ // Helper for debugging memory access. ++ inline void DieOrDebug(); ++ ++ void TraceRegWr(int64_t value, TraceType t = DWORD); ++ void TraceMemWr(int64_t addr, int64_t value, TraceType t); ++ void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD); ++ template ++ void TraceMemRd(int64_t addr, T value); ++ template ++ void TraceMemWr(int64_t addr, T value); ++ ++ SimInstruction instr_; ++ ++ // Executing is handled based on the instruction type. ++ void DecodeTypeOp6(); ++ void DecodeTypeOp7(); ++ void DecodeTypeOp8(); ++ void DecodeTypeOp10(); ++ void DecodeTypeOp12(); ++ void DecodeTypeOp14(); ++ void DecodeTypeOp17(); ++ void DecodeTypeOp22(); ++ ++ inline int32_t rj_reg() const { return instr_.RjValue(); } ++ inline int64_t rj() const { return get_register(rj_reg()); } ++ inline uint64_t rj_u() const { ++ return static_cast(get_register(rj_reg())); ++ } ++ inline int32_t rk_reg() const { return instr_.RkValue(); } ++ inline int64_t rk() const { return get_register(rk_reg()); } ++ inline uint64_t rk_u() const { ++ return static_cast(get_register(rk_reg())); ++ } ++ inline int32_t rd_reg() const { return instr_.RdValue(); } ++ inline int64_t rd() const { return get_register(rd_reg()); } ++ inline uint64_t rd_u() const { ++ return static_cast(get_register(rd_reg())); ++ } ++ inline int32_t fa_reg() const { return instr_.FaValue(); } ++ inline float fa_float() const { return get_fpu_register_float(fa_reg()); } ++ inline double fa_double() const { return get_fpu_register_double(fa_reg()); } ++ inline int32_t fj_reg() const { return instr_.FjValue(); } ++ inline float fj_float() const { return get_fpu_register_float(fj_reg()); } ++ inline double fj_double() const { return get_fpu_register_double(fj_reg()); } ++ inline int32_t fk_reg() const { return instr_.FkValue(); } ++ inline float fk_float() const { return get_fpu_register_float(fk_reg()); } ++ inline double fk_double() const { return get_fpu_register_double(fk_reg()); } ++ inline int32_t fd_reg() const { return instr_.FdValue(); } ++ inline float fd_float() const { return get_fpu_register_float(fd_reg()); } ++ inline double fd_double() const { return get_fpu_register_double(fd_reg()); } ++ inline int32_t cj_reg() const { return instr_.CjValue(); } ++ inline bool cj() const { return get_cf_register(cj_reg()); } ++ inline int32_t cd_reg() const { return instr_.CdValue(); } ++ inline bool cd() const { return get_cf_register(cd_reg()); } ++ inline int32_t ca_reg() const { return instr_.CaValue(); } ++ inline bool ca() const { return get_cf_register(ca_reg()); } ++ inline uint32_t sa2() const { return instr_.Sa2Value(); } ++ inline uint32_t sa3() const { return instr_.Sa3Value(); } ++ inline uint32_t ui5() const { return instr_.Ui5Value(); } ++ inline uint32_t ui6() const { return instr_.Ui6Value(); } ++ inline uint32_t lsbw() const { return instr_.LsbwValue(); } ++ inline uint32_t msbw() const { return instr_.MsbwValue(); } ++ inline uint32_t lsbd() const { return instr_.LsbdValue(); } ++ inline uint32_t msbd() const { return instr_.MsbdValue(); } ++ inline uint32_t cond() const { return instr_.CondValue(); } ++ inline int32_t si12() const { return (instr_.Si12Value() << 20) >> 20; } ++ inline uint32_t ui12() const { return instr_.Ui12Value(); } ++ inline int32_t si14() const { return (instr_.Si14Value() << 18) >> 18; } ++ inline int32_t si16() const { return (instr_.Si16Value() << 16) >> 16; } ++ inline int32_t si20() const { return (instr_.Si20Value() << 12) >> 12; } ++ ++ inline void SetResult(const int32_t rd_reg, const int64_t alu_out) { ++ set_register(rd_reg, alu_out); ++ TraceRegWr(alu_out); ++ } ++ ++ inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) { ++ set_fpu_register_word(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), WORD); ++ } ++ ++ inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) { ++ set_fpu_register_word(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg)); ++ } ++ ++ inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) { ++ set_fpu_register(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg)); ++ } ++ ++ inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) { ++ set_fpu_register(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), DOUBLE); ++ } ++ ++ inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) { ++ set_fpu_register_float(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), FLOAT); ++ } ++ ++ inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) { ++ set_fpu_register_double(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), DOUBLE); ++ } ++ ++ // Used for breakpoints. ++ void SoftwareInterrupt(); ++ ++ // Stop helper functions. ++ bool IsWatchpoint(uint64_t code); ++ void PrintWatchpoint(uint64_t code); ++ void HandleStop(uint64_t code, Instruction* instr); ++ bool IsStopInstruction(Instruction* instr); ++ bool IsEnabledStop(uint64_t code); ++ void EnableStop(uint64_t code); ++ void DisableStop(uint64_t code); ++ void IncreaseStopCounter(uint64_t code); ++ void PrintStopInfo(uint64_t code); ++ ++ // Executes one instruction. ++ void InstructionDecode(Instruction* instr); ++ // Execute one instruction placed in a branch delay slot. ++ ++ // ICache. ++ static void CheckICache(base::CustomMatcherHashMap* i_cache, ++ Instruction* instr); ++ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start, ++ size_t size); ++ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache, ++ void* page); ++ ++ enum Exception { ++ none, ++ kIntegerOverflow, ++ kIntegerUnderflow, ++ kDivideByZero, ++ kNumExceptions ++ }; ++ ++ // Exceptions. ++ void SignalException(Exception e); ++ ++ // Handle arguments and return value for runtime FP functions. ++ void GetFpArgs(double* x, double* y, int32_t* z); ++ void SetFpResult(const double& result); ++ ++ void CallInternal(Address entry); ++ ++ // Architecture state. ++ // Registers. ++ int64_t registers_[kNumSimuRegisters]; ++ // Floating point Registers. ++ int64_t FPUregisters_[kNumFPURegisters]; ++ // Condition flags Registers. ++ bool CFregisters_[kNumCFRegisters]; ++ // FPU control register. ++ uint32_t FCSR_; ++ ++ // Simulator support. ++ // Allocate 1MB for stack. ++ size_t stack_size_; ++ char* stack_; ++ bool pc_modified_; ++ int64_t icount_; ++ int break_count_; ++ EmbeddedVector trace_buf_; ++ ++ // Debugger input. ++ char* last_debugger_input_; ++ ++ v8::internal::Isolate* isolate_; ++ ++ // Registered breakpoints. ++ Instruction* break_pc_; ++ Instr break_instr_; ++ ++ // Stop is disabled if bit 31 is set. ++ static const uint32_t kStopDisabledBit = 1 << 31; ++ ++ // A stop is enabled, meaning the simulator will stop when meeting the ++ // instruction, if bit 31 of watched_stops_[code].count is unset. ++ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times ++ // the breakpoint was hit or gone through. ++ struct StopCountAndDesc { ++ uint32_t count; ++ char* desc; ++ }; ++ StopCountAndDesc watched_stops_[kMaxStopCode + 1]; ++ ++ // Synchronization primitives. ++ enum class MonitorAccess { ++ Open, ++ RMW, ++ }; ++ ++ enum class TransactionSize { ++ None = 0, ++ Word = 4, ++ DoubleWord = 8, ++ }; ++ ++ // The least-significant bits of the address are ignored. The number of bits ++ // is implementation-defined, between 3 and minimum page size. ++ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1); ++ ++ class LocalMonitor { ++ public: ++ LocalMonitor(); ++ ++ // These functions manage the state machine for the local monitor, but do ++ // not actually perform loads and stores. NotifyStoreConditional only ++ // returns true if the store conditional is allowed; the global monitor will ++ // still have to be checked to see whether the memory should be updated. ++ void NotifyLoad(); ++ void NotifyLoadLinked(uintptr_t addr, TransactionSize size); ++ void NotifyStore(); ++ bool NotifyStoreConditional(uintptr_t addr, TransactionSize size); ++ ++ private: ++ void Clear(); ++ ++ MonitorAccess access_state_; ++ uintptr_t tagged_addr_; ++ TransactionSize size_; ++ }; ++ ++ class GlobalMonitor { ++ public: ++ class LinkedAddress { ++ public: ++ LinkedAddress(); ++ ++ private: ++ friend class GlobalMonitor; ++ // These functions manage the state machine for the global monitor, but do ++ // not actually perform loads and stores. ++ void Clear_Locked(); ++ void NotifyLoadLinked_Locked(uintptr_t addr); ++ void NotifyStore_Locked(); ++ bool NotifyStoreConditional_Locked(uintptr_t addr, ++ bool is_requesting_thread); ++ ++ MonitorAccess access_state_; ++ uintptr_t tagged_addr_; ++ LinkedAddress* next_; ++ LinkedAddress* prev_; ++ // A scd can fail due to background cache evictions. Rather than ++ // simulating this, we'll just occasionally introduce cases where an ++ // store conditional fails. This will happen once after every ++ // kMaxFailureCounter exclusive stores. ++ static const int kMaxFailureCounter = 5; ++ int failure_counter_; ++ }; ++ ++ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*. ++ base::Mutex mutex; ++ ++ void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address); ++ void NotifyStore_Locked(LinkedAddress* linked_address); ++ bool NotifyStoreConditional_Locked(uintptr_t addr, ++ LinkedAddress* linked_address); ++ ++ // Called when the simulator is destroyed. ++ void RemoveLinkedAddress(LinkedAddress* linked_address); ++ ++ static GlobalMonitor* Get(); ++ ++ private: ++ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton. ++ GlobalMonitor() = default; ++ friend class base::LeakyObject; ++ ++ bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const; ++ void PrependProcessor_Locked(LinkedAddress* linked_address); ++ ++ LinkedAddress* head_ = nullptr; ++ }; ++ ++ LocalMonitor local_monitor_; ++ GlobalMonitor::LinkedAddress global_monitor_thread_; ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // defined(USE_SIMULATOR) ++#endif // V8_EXECUTION_LA64_SIMULATOR_LA64_H_ +diff --git a/src/3rdparty/chromium/v8/src/execution/mips64/simulator-mips64.cc b/src/3rdparty/chromium/v8/src/execution/mips64/simulator-mips64.cc +index 72f2836329..98c50263a0 100644 +--- a/src/3rdparty/chromium/v8/src/execution/mips64/simulator-mips64.cc ++++ b/src/3rdparty/chromium/v8/src/execution/mips64/simulator-mips64.cc +@@ -28,6 +28,8 @@ namespace internal { + DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor, + Simulator::GlobalMonitor::Get) + ++// #define PRINT_SIM_LOG ++ + // Util functions. + inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); } + +@@ -57,6 +59,17 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) { + return u1 * v1 + w2 + (w1 >> 32); + } + ++#ifdef PRINT_SIM_LOG ++inline void printf_instr(const char* _Format, ...) { ++ va_list varList; ++ va_start(varList, _Format); ++ vprintf(_Format, varList); ++ va_end(varList); ++} ++#else ++#define printf_instr(...) ++#endif ++ + // This macro provides a platform independent use of sscanf. The reason for + // SScanF not being implemented in a platform independent was through + // ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time +@@ -2195,6 +2208,7 @@ void Simulator::SoftwareInterrupt() { + uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1; + // We first check if we met a call_rt_redirected. + if (instr_.InstructionBits() == rtCallRedirInstr) { ++ printf_instr("Simulator::SoftwareInterrupt: BREAK 0xFFFFF\n"); + Redirection* redirection = Redirection::FromInstruction(instr_.instr()); + + int64_t* stack_pointer = reinterpret_cast(get_register(sp)); +@@ -2723,6 +2737,9 @@ void Simulator::DecodeTypeRegisterSRsType() { + KeepSign::yes, fs)); + break; + case SQRT_S: ++ printf_instr("sqrt_s\t %s: %016f, %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd, ++ FPURegisters::Name(fs_reg()), fs); + SetFPUFloatResult( + fd_reg(), + FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs)); +@@ -3115,6 +3132,10 @@ void Simulator::DecodeTypeRegisterDRsType() { + [](double lhs, double rhs) { return lhs + rhs; }, fs, ft)); + break; + case SUB_D: ++ printf_instr("sub_d\t %s: %016f, %s: %016f, %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd, ++ FPURegisters::Name(fs_reg()), fs, ++ FPURegisters::Name(ft_reg()), ft); + SetFPUDoubleResult( + fd_reg(), + FPUCanonalizeOperation( +@@ -3381,6 +3402,10 @@ void Simulator::DecodeTypeRegisterWRsType() { + int64_t alu_out = 0x12345678; + switch (instr_.FunctionFieldRaw()) { + case CVT_S_W: // Convert word to float (single). ++ printf_instr( ++ "CVT_S_W \t %s: %016f, %s: %016x\n", FPURegisters::Name(fd_reg()), ++ get_fpu_register_float(fd_reg()), FPURegisters::Name(fs_reg()), ++ get_fpu_register_signed_word(fs_reg())); + alu_out = get_fpu_register_signed_word(fs_reg()); + SetFPUFloatResult(fd_reg(), static_cast(alu_out)); + break; +@@ -3476,6 +3501,10 @@ void Simulator::DecodeTypeRegisterLRsType() { + SetFPUDoubleResult(fd_reg(), static_cast(i64)); + break; + case CVT_S_L: ++ printf_instr("CVT_S_L \t %s: %016f, %s: %016x\n", ++ FPURegisters::Name(fd_reg()), ++ get_fpu_register_float(fd_reg()), ++ FPURegisters::Name(fs_reg()), get_fpu_register(fs_reg())); + i64 = get_fpu_register(fs_reg()); + SetFPUFloatResult(fd_reg(), static_cast(i64)); + break; +@@ -3569,11 +3598,17 @@ void Simulator::DecodeTypeRegisterCOP1() { + SetResult(rt_reg(), FCSR_); + break; + case MFC1: ++ printf_instr("MFC1 \t %s: %016lx, %s: %016f\n", Registers::Name(rt_reg()), ++ rt(), FPURegisters::Name(fs_reg()), ++ get_fpu_register_float(fs_reg())); + set_register(rt_reg(), + static_cast(get_fpu_register_word(fs_reg()))); + TraceRegWr(get_register(rt_reg()), WORD_DWORD); + break; + case DMFC1: ++ printf_instr( ++ "DMFC1 \t %s: %016lx, %s: %016f\n", Registers::Name(rt_reg()), rt(), ++ FPURegisters::Name(fs_reg()), get_fpu_register_double(fs_reg())); + SetResult(rt_reg(), get_fpu_register(fs_reg())); + break; + case MFHC1: +@@ -3593,12 +3628,18 @@ void Simulator::DecodeTypeRegisterCOP1() { + break; + } + case MTC1: ++ printf_instr( ++ "MTC1 \t %s: %016f, %s: %016lx\n", FPURegisters::Name(fs_reg()), ++ get_fpu_register_float(fs_reg()), Registers::Name(rt_reg()), rt()); + // Hardware writes upper 32-bits to zero on mtc1. + set_fpu_register_hi_word(fs_reg(), 0); + set_fpu_register_word(fs_reg(), static_cast(rt())); + TraceRegWr(get_fpu_register(fs_reg()), FLOAT_DOUBLE); + break; + case DMTC1: ++ printf_instr( ++ "DMTC1 \t %s: %016f, %s: %016lx\n", FPURegisters::Name(fs_reg()), ++ get_fpu_register_float(fs_reg()), Registers::Name(rt_reg()), rt()); + SetFPUResult2(fs_reg(), rt()); + break; + case MTHC1: +@@ -3683,6 +3724,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + case JR: { + int64_t next_pc = rs(); + int64_t current_pc = get_pc(); ++ printf_instr("JALR\t %s: %016lx\n", Registers::Name(rs_reg()), rs()); + Instruction* branch_delay_instr = + reinterpret_cast(current_pc + kInstrSize); + BranchDelayInstructionDecode(branch_delay_instr); +@@ -3694,6 +3736,8 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + int64_t next_pc = rs(); + int64_t current_pc = get_pc(); + int32_t return_addr_reg = rd_reg(); ++ printf_instr("JALR\t %s: %016lx, %s: %016lx\n", Registers::Name(rd_reg()), ++ get_register(rd_reg()), Registers::Name(rs_reg()), rs()); + Instruction* branch_delay_instr = + reinterpret_cast(current_pc + kInstrSize); + BranchDelayInstructionDecode(branch_delay_instr); +@@ -3703,21 +3747,36 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + } + case SLL: ++ printf_instr("SLL\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), static_cast(rt()) << sa()); + break; + case DSLL: ++ printf_instr("DSLL\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), rt() << sa()); + break; + case DSLL32: ++ printf_instr("DSLL32\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), rt() << sa() << 32); + break; + case SRL: + if (rs_reg() == 0) { ++ printf_instr("SRL\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Regular logical right shift of a word by a fixed number of + // bits instruction. RS field is always equal to 0. + // Sign-extend the 32-bit result. + alu_out = static_cast(static_cast(rt_u()) >> sa()); + } else if (rs_reg() == 1) { ++ printf_instr("ROTR\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Logical right-rotate of a word by a fixed number of bits. This + // is special case of SRL instruction, added in MIPS32 Release 2. + // RS field is equal to 00001. +@@ -3731,11 +3790,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case DSRL: + if (rs_reg() == 0) { ++ printf_instr("DSRL\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Regular logical right shift of a word by a fixed number of + // bits instruction. RS field is always equal to 0. + // Sign-extend the 64-bit result. + alu_out = static_cast(rt_u() >> sa()); + } else if (rs_reg() == 1) { ++ printf_instr("DROTR\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Logical right-rotate of a word by a fixed number of bits. This + // is special case of SRL instruction, added in MIPS32 Release 2. + // RS field is equal to 00001. +@@ -3747,11 +3812,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case DSRL32: + if (rs_reg() == 0) { ++ printf_instr("DSRL32\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Regular logical right shift of a word by a fixed number of + // bits instruction. RS field is always equal to 0. + // Sign-extend the 64-bit result. + alu_out = static_cast(rt_u() >> sa() >> 32); + } else if (rs_reg() == 1) { ++ printf_instr("DROTR32\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Logical right-rotate of a word by a fixed number of bits. This + // is special case of SRL instruction, added in MIPS32 Release 2. + // RS field is equal to 00001. +@@ -3763,26 +3834,51 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + SetResult(rd_reg(), alu_out); + break; + case SRA: ++ printf_instr("SRA\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), (int32_t)rt() >> sa()); + break; + case DSRA: ++ printf_instr("DSRA\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), rt() >> sa()); + break; + case DSRA32: ++ printf_instr("DSRA32\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), rt() >> sa() >> 32); + break; + case SLLV: ++ printf_instr("SLLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + SetResult(rd_reg(), (int32_t)rt() << rs()); + break; + case DSLLV: ++ printf_instr("DSLLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + SetResult(rd_reg(), rt() << rs()); + break; + case SRLV: + if (sa() == 0) { ++ printf_instr("SRLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + // Regular logical right-shift of a word by a variable number of + // bits instruction. SA field is always equal to 0. + alu_out = static_cast((uint32_t)rt_u() >> rs()); + } else { ++ printf_instr("ROTRV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + // Logical right-rotate of a word by a variable number of bits. + // This is special case od SRLV instruction, added in MIPS32 + // Release 2. SA field is equal to 00001. +@@ -3794,10 +3890,18 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case DSRLV: + if (sa() == 0) { ++ printf_instr("SRLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + // Regular logical right-shift of a word by a variable number of + // bits instruction. SA field is always equal to 0. + alu_out = static_cast(rt_u() >> rs()); + } else { ++ printf_instr("DROTRV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + // Logical right-rotate of a word by a variable number of bits. + // This is special case od SRLV instruction, added in MIPS32 + // Release 2. SA field is equal to 00001. +@@ -3807,9 +3911,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + SetResult(rd_reg(), alu_out); + break; + case SRAV: ++ printf_instr("SRAV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + SetResult(rd_reg(), (int32_t)rt() >> rs()); + break; + case DSRAV: ++ printf_instr("DSRAV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + SetResult(rd_reg(), rt() >> rs()); + break; + case LSA: { +@@ -4018,6 +4130,10 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case ADD: + case DADD: ++ printf_instr("DADD\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + if (HaveSameSign(rs(), rt())) { + if (rs() > 0) { + if (rs() > (Registers::kMaxValue - rt())) { +@@ -4032,16 +4148,28 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + SetResult(rd_reg(), rs() + rt()); + break; + case ADDU: { ++ printf_instr("ADDU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + int32_t alu32_out = static_cast(rs() + rt()); + // Sign-extend result of 32bit operation into 64bit register. + SetResult(rd_reg(), static_cast(alu32_out)); + break; + } + case DADDU: ++ printf_instr("DADDU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() + rt()); + break; + case SUB: + case DSUB: ++ printf_instr("DSUB\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + if (!HaveSameSign(rs(), rt())) { + if (rs() > 0) { + if (rs() > (Registers::kMaxValue + rt())) { +@@ -4056,30 +4184,62 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + SetResult(rd_reg(), rs() - rt()); + break; + case SUBU: { ++ printf_instr("SUBU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + int32_t alu32_out = static_cast(rs() - rt()); + // Sign-extend result of 32bit operation into 64bit register. + SetResult(rd_reg(), static_cast(alu32_out)); + break; + } + case DSUBU: ++ printf_instr("DSUBU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() - rt()); + break; + case AND: ++ printf_instr("AND\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() & rt()); + break; + case OR: ++ printf_instr("OR\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() | rt()); + break; + case XOR: ++ printf_instr("XOR\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() ^ rt()); + break; + case NOR: ++ printf_instr("NOR\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), ~(rs() | rt())); + break; + case SLT: ++ printf_instr("SLT\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() < rt() ? 1 : 0); + break; + case SLTU: ++ printf_instr("SLTU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs_u() < rt_u() ? 1 : 0); + break; + // Break and trap instructions. +@@ -4106,9 +4266,14 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case SYNC: + // TODO(palfia): Ignore sync instruction for now. ++ printf_instr("sync\n"); + break; + // Conditional moves. + case MOVN: ++ printf_instr("MOVN\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + if (rt()) { + SetResult(rd_reg(), rs()); + } +@@ -4173,6 +4338,9 @@ void Simulator::DecodeTypeRegisterSPECIAL3() { + // Interpret sa field as 5-bit lsb of extract. + uint16_t lsb = sa(); + uint16_t size = msbd + 1; ++ printf_instr("EXT\t %s: %016lx, %s: %016lx, pos: %d, size: %d\n", ++ Registers::Name(rt_reg()), get_register(rt_reg()), ++ Registers::Name(rs_reg()), rs(), lsb, size); + uint64_t mask = (1ULL << size) - 1; + alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); + SetResult(rt_reg(), alu_out); +@@ -4184,6 +4352,9 @@ void Simulator::DecodeTypeRegisterSPECIAL3() { + // Interpret sa field as 5-bit lsb of extract. + uint16_t lsb = sa(); + uint16_t size = msbd + 1; ++ printf_instr("DEXT\t %s: %016lx, %s: %016lx, pos: %d, size: %d\n", ++ Registers::Name(rt_reg()), get_register(rt_reg()), ++ Registers::Name(rs_reg()), rs(), lsb, size); + uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1; + alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); + SetResult(rt_reg(), alu_out); +@@ -6553,6 +6724,7 @@ void Simulator::DecodeTypeImmediate() { + [this, &next_pc, &execute_branch_delay_instruction](bool do_branch) { + execute_branch_delay_instruction = true; + int64_t current_pc = get_pc(); ++ printf_instr("Offs16: %04x\n", instr_.Imm16Value()); + set_register(31, current_pc + 2 * kInstrSize); + if (do_branch) { + int16_t imm16 = instr_.Imm16Value(); +@@ -6565,6 +6737,7 @@ void Simulator::DecodeTypeImmediate() { + auto BranchHelper = [this, &next_pc, + &execute_branch_delay_instruction](bool do_branch) { + execute_branch_delay_instruction = true; ++ printf_instr("Offs16: %04x\n", instr_.Imm16Value()); + int64_t current_pc = get_pc(); + if (do_branch) { + int16_t imm16 = instr_.Imm16Value(); +@@ -6601,6 +6774,7 @@ void Simulator::DecodeTypeImmediate() { + auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) { + int64_t current_pc = get_pc(); + CheckForbiddenSlot(current_pc); ++ printf_instr("Offs: %08x\n", instr_.ImmValue(bits)); + if (do_branch) { + int32_t imm = instr_.ImmValue(bits); + imm <<= 32 - bits; +@@ -6613,6 +6787,7 @@ void Simulator::DecodeTypeImmediate() { + auto BranchCompactHelper = [this, &next_pc](bool do_branch, int bits) { + int64_t current_pc = get_pc(); + CheckForbiddenSlot(current_pc); ++ printf_instr("Offs: %08x\n", instr_.ImmValue(bits)); + if (do_branch) { + int32_t imm = instr_.ImmValue(bits); + imm <<= 32 - bits; +@@ -6707,15 +6882,19 @@ void Simulator::DecodeTypeImmediate() { + case REGIMM: + switch (instr_.RtFieldRaw()) { + case BLTZ: ++ printf_instr("BLTZ\t %s: %016lx, ", Registers::Name(rs_reg), rs); + BranchHelper(rs < 0); + break; + case BGEZ: ++ printf_instr("BGEZ\t %s: %016lx, ", Registers::Name(rs_reg), rs); + BranchHelper(rs >= 0); + break; + case BLTZAL: ++ printf_instr("BLTZAL\t %s: %016lx, ", Registers::Name(rs_reg), rs); + BranchAndLinkHelper(rs < 0); + break; + case BGEZAL: ++ printf_instr("BGEZAL\t %s: %016lx, ", Registers::Name(rs_reg), rs); + BranchAndLinkHelper(rs >= 0); + break; + case DAHI: +@@ -6732,9 +6911,13 @@ void Simulator::DecodeTypeImmediate() { + // When comparing to zero, the encoding of rt field is always 0, so we don't + // need to replace rt with zero. + case BEQ: ++ printf_instr("BEQ\t %s: %016lx, %s: %016lx, ", Registers::Name(rs_reg), ++ rs, Registers::Name(rt_reg), rt); + BranchHelper(rs == rt); + break; + case BNE: ++ printf_instr("BNE\t %s: %016lx, %s: %016lx, ", Registers::Name(rs_reg), ++ rs, Registers::Name(rt_reg), rt); + BranchHelper(rs != rt); + break; + case POP06: // BLEZALC, BGEZALC, BGEUC, BLEZ (pre-r6) +@@ -6754,6 +6937,7 @@ void Simulator::DecodeTypeImmediate() { + BranchHelper(rs <= 0); + } + } else { // BLEZ ++ printf_instr("BLEZ\t %s: %016lx", Registers::Name(rs_reg), rs); + BranchHelper(rs <= 0); + } + break; +@@ -6774,6 +6958,7 @@ void Simulator::DecodeTypeImmediate() { + BranchHelper(rs > 0); + } + } else { // BGTZ ++ printf_instr("BGTZ\t %s: %016lx", Registers::Name(rs_reg), rs); + BranchHelper(rs > 0); + } + break; +@@ -6791,6 +6976,7 @@ void Simulator::DecodeTypeImmediate() { + } + } + } else { // BLEZL ++ printf_instr("BLEZL\t %s: %016lx", Registers::Name(rs_reg), rs); + BranchAndLinkHelper(rs <= 0); + } + break; +@@ -6808,6 +6994,7 @@ void Simulator::DecodeTypeImmediate() { + } + } + } else { // BGTZL ++ printf_instr("BGTZL\t %s: %016lx", Registers::Name(rs_reg), rs); + BranchAndLinkHelper(rs > 0); + } + break; +@@ -6846,6 +7033,9 @@ void Simulator::DecodeTypeImmediate() { + } + } + } else { // ADDI ++ printf_instr("ADDI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + if (HaveSameSign(rs, se_imm16)) { + if (rs > 0) { + if (rs <= Registers::kMaxValue - se_imm16) { +@@ -6876,27 +7066,48 @@ void Simulator::DecodeTypeImmediate() { + break; + // ------------- Arithmetic instructions. + case ADDIU: { ++ printf_instr("ADDIU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + int32_t alu32_out = static_cast(rs + se_imm16); + // Sign-extend result of 32bit operation into 64bit register. + SetResult(rt_reg, static_cast(alu32_out)); + break; + } + case DADDIU: ++ printf_instr("DADDIU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + SetResult(rt_reg, rs + se_imm16); + break; + case SLTI: ++ printf_instr("SLTI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + SetResult(rt_reg, rs < se_imm16 ? 1 : 0); + break; + case SLTIU: ++ printf_instr("SLTIU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + SetResult(rt_reg, rs_u < static_cast(se_imm16) ? 1 : 0); + break; + case ANDI: ++ printf_instr("ANDI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ oe_imm16); + SetResult(rt_reg, rs & oe_imm16); + break; + case ORI: ++ printf_instr("ORI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ oe_imm16); + SetResult(rt_reg, rs | oe_imm16); + break; + case XORI: ++ printf_instr("XORI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ oe_imm16); + SetResult(rt_reg, rs ^ oe_imm16); + break; + case LUI: +@@ -6907,6 +7118,8 @@ void Simulator::DecodeTypeImmediate() { + SetResult(rt_reg, static_cast(alu32_out)); + } else { + // LUI instruction. ++ printf_instr("LUI\t %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, se_imm16); + int32_t alu32_out = static_cast(oe_imm16 << 16); + // Sign-extend result of 32bit operation into 64bit register. + SetResult(rt_reg, static_cast(alu32_out)); +@@ -6919,12 +7132,21 @@ void Simulator::DecodeTypeImmediate() { + break; + // ------------- Memory instructions. + case LB: ++ printf_instr("LB\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadB(rs + se_imm16)); + break; + case LH: ++ printf_instr("LH\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr())); + break; + case LWL: { ++ printf_instr("LWL\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + local_monitor_.NotifyLoad(); + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; +@@ -6938,21 +7160,39 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case LW: ++ printf_instr("LW\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr())); + break; + case LWU: ++ printf_instr("LWU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadWU(rs + se_imm16, instr_.instr())); + break; + case LD: ++ printf_instr("LD\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, Read2W(rs + se_imm16, instr_.instr())); + break; + case LBU: ++ printf_instr("LBU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadBU(rs + se_imm16)); + break; + case LHU: ++ printf_instr("LHU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadHU(rs + se_imm16, instr_.instr())); + break; + case LWR: { ++ printf_instr("LWR\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint8_t byte_shift = kInt32AlignmentMask - al_offset; +@@ -6965,6 +7205,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case LDL: { ++ printf_instr("LDL\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; + uint8_t byte_shift = kInt64AlignmentMask - al_offset; +@@ -6977,6 +7220,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case LDR: { ++ printf_instr("LDR\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; + uint8_t byte_shift = kInt64AlignmentMask - al_offset; +@@ -6989,12 +7235,21 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SB: ++ printf_instr("SB\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + WriteB(rs + se_imm16, static_cast(rt)); + break; + case SH: ++ printf_instr("SH\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + WriteH(rs + se_imm16, static_cast(rt), instr_.instr()); + break; + case SWL: { ++ printf_instr("SWL\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint8_t byte_shift = kInt32AlignmentMask - al_offset; + uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0; +@@ -7005,12 +7260,21 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SW: ++ printf_instr("SW\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + WriteW(rs + se_imm16, static_cast(rt), instr_.instr()); + break; + case SD: ++ printf_instr("SD\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + Write2W(rs + se_imm16, rt, instr_.instr()); + break; + case SWR: { ++ printf_instr("SWR\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint32_t mask = (1 << al_offset * 8) - 1; + addr = rs + se_imm16 - al_offset; +@@ -7020,6 +7284,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SDL: { ++ printf_instr("SDL\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; + uint8_t byte_shift = kInt64AlignmentMask - al_offset; + uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0; +@@ -7030,6 +7297,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SDR: { ++ printf_instr("SDR\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; + uint64_t mask = (1UL << al_offset * 8) - 1; + addr = rs + se_imm16 - al_offset; +@@ -7055,6 +7325,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case LLD: { ++ printf_instr("LLD\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + DCHECK(kArchVariant != kMips64r6); + base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); + addr = rs + se_imm16; +@@ -7065,6 +7338,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SCD: { ++ printf_instr("SCD\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + DCHECK(kArchVariant != kMips64r6); + addr = rs + se_imm16; + WriteConditional2W(addr, rt, instr_.instr(), rt_reg); +@@ -7080,11 +7356,17 @@ void Simulator::DecodeTypeImmediate() { + TraceMemRd(addr, get_fpu_register(ft_reg), DOUBLE); + break; + case SWC1: { ++ printf_instr("SWC1\t %s: %016f, %s: %016lx, imm16: %04lx\n", ++ FPURegisters::Name(ft_reg), get_fpu_register_float(ft_reg), ++ Registers::Name(rs_reg), rs, se_imm16); + int32_t alu_out_32 = static_cast(get_fpu_register(ft_reg)); + WriteW(rs + se_imm16, alu_out_32, instr_.instr()); + break; + } + case SDC1: ++ printf_instr("SDC1\t %s: %016f, %s: %016lx, imm16: %04lx\n", ++ FPURegisters::Name(ft_reg), get_fpu_register_double(ft_reg), ++ Registers::Name(rs_reg), rs, se_imm16); + WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr()); + TraceMemWr(rs + se_imm16, get_fpu_register(ft_reg), DWORD); + break; +@@ -7257,6 +7539,8 @@ void Simulator::DecodeTypeJump() { + int64_t pc_high_bits = current_pc & 0xFFFFFFFFF0000000; + // Next pc. + int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2); ++ printf_instr("%s\t", simInstr.IsLinkingInstruction() ? "JAL" : "J"); ++ printf_instr("offs26: %x\n", instr_.Bits(25, 0)); + + // Execute branch delay slot. + // We don't check for end_sim_pc. First it should not be met as the current pc +@@ -7291,7 +7575,11 @@ void Simulator::InstructionDecode(Instruction* instr) { + dasm.InstructionDecode(buffer, reinterpret_cast(instr)); + } + ++ static int instr_count = 0; ++ USE(instr_count); + instr_ = instr; ++ printf_instr("\nInstr%3d: %08x, PC: %lx\t", instr_count++, instr_.Bits(31, 0), ++ get_pc()); + switch (instr_.InstructionType()) { + case Instruction::kRegisterType: + DecodeTypeRegister(); +diff --git a/src/3rdparty/chromium/v8/src/execution/simulator-base.h b/src/3rdparty/chromium/v8/src/execution/simulator-base.h +index 58aa753a33..9644b3d9b9 100644 +--- a/src/3rdparty/chromium/v8/src/execution/simulator-base.h ++++ b/src/3rdparty/chromium/v8/src/execution/simulator-base.h +@@ -87,7 +87,7 @@ class SimulatorBase { + static typename std::enable_if::value, intptr_t>::type + ConvertArg(T arg) { + static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize"); +-#if V8_TARGET_ARCH_MIPS64 ++#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LA64 + // The MIPS64 calling convention is to sign extend all values, even unsigned + // ones. + using signed_t = typename std::make_signed::type; +diff --git a/src/3rdparty/chromium/v8/src/execution/simulator.h b/src/3rdparty/chromium/v8/src/execution/simulator.h +index a4e07b235b..1bc39ac7e7 100644 +--- a/src/3rdparty/chromium/v8/src/execution/simulator.h ++++ b/src/3rdparty/chromium/v8/src/execution/simulator.h +@@ -24,6 +24,8 @@ + #include "src/execution/mips/simulator-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/execution/mips64/simulator-mips64.h" ++#elif V8_TARGET_ARCH_LA64 ++#include "src/execution/la64/simulator-la64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/execution/s390/simulator-s390.h" + #else +diff --git a/src/3rdparty/chromium/v8/src/flags/flag-definitions.h b/src/3rdparty/chromium/v8/src/flags/flag-definitions.h +index c3f360cdf0..f14b6a1e5e 100644 +--- a/src/3rdparty/chromium/v8/src/flags/flag-definitions.h ++++ b/src/3rdparty/chromium/v8/src/flags/flag-definitions.h +@@ -1246,7 +1246,7 @@ DEFINE_BOOL(check_icache, false, + "Check icache flushes in ARM and MIPS simulator") + DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions") + #if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \ +- defined(V8_TARGET_ARCH_PPC64) ++ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_LA64) + DEFINE_INT(sim_stack_alignment, 16, + "Stack alignment in bytes in simulator. This must be a power of two " + "and it must be at least 16. 16 is default.") +diff --git a/src/3rdparty/chromium/v8/src/heap/base/asm/la64/push_registers_asm.cc b/src/3rdparty/chromium/v8/src/heap/base/asm/la64/push_registers_asm.cc +new file mode 100644 +index 0000000000..c9e6f5d2cc +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/heap/base/asm/la64/push_registers_asm.cc +@@ -0,0 +1,48 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Push all callee-saved registers to get them on the stack for conservative ++// stack scanning. ++// ++// See asm/x64/push_registers_clang.cc for why the function is not generated ++// using clang. ++// ++// Do not depend on V8_TARGET_OS_* defines as some embedders may override the ++// GN toolchain (e.g. ChromeOS) and not provide them. ++asm(".text \n" ++ ".global PushAllRegistersAndIterateStack \n" ++ ".type PushAllRegistersAndIterateStack, %function \n" ++ ".hidden PushAllRegistersAndIterateStack \n" ++ "PushAllRegistersAndIterateStack: \n" ++ // Push all callee-saved registers and save return address. ++ " addi.d $sp, $sp, -96 \n" ++ " st.d $ra, $sp, 88 \n" ++ " st.d $s8, $sp, 80 \n" ++ " st.d $sp, $sp, 72 \n" ++ " st.d $fp, $sp, 64 \n" ++ " st.d $s7, $sp, 56 \n" ++ " st.d $s6, $sp, 48 \n" ++ " st.d $s5, $sp, 40 \n" ++ " st.d $s4, $sp, 32 \n" ++ " st.d $s3, $sp, 24 \n" ++ " st.d $s2, $sp, 16 \n" ++ " st.d $s1, $sp, 8 \n" ++ " st.d $s0, $sp, 0 \n" ++ // Maintain frame pointer. ++ " addi.d $s8, $sp, 0 \n" ++ // Pass 1st parameter (a0) unchanged (Stack*). ++ // Pass 2nd parameter (a1) unchanged (StackVisitor*). ++ // Save 3rd parameter (a2; IterateStackCallback). ++ " addi.d $a3, $a2, 0 \n" ++ // Call the callback. ++ // Pass 3rd parameter as sp (stack pointer). ++ " addi.d $a2, $sp, 0 \n" ++ " jirl $ra, $a3, 0 \n" ++ // Load return address. ++ " ld.d $ra, $sp, 88 \n" ++ // Restore frame pointer. ++ " ld.d $s8, $sp, 80 \n" ++ // Discard all callee-saved registers. ++ " addi.d $sp, $sp, 96 \n" ++ " jirl $zero, $ra, 0 \n"); +diff --git a/src/3rdparty/chromium/v8/src/interpreter/interpreter-assembler.cc b/src/3rdparty/chromium/v8/src/interpreter/interpreter-assembler.cc +index eaea1c91dd..66775d6dfe 100644 +--- a/src/3rdparty/chromium/v8/src/interpreter/interpreter-assembler.cc ++++ b/src/3rdparty/chromium/v8/src/interpreter/interpreter-assembler.cc +@@ -1484,7 +1484,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(TNode target_bytecode) { + + // static + bool InterpreterAssembler::TargetSupportsUnalignedAccess() { +-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ++#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LA64 + return false; + #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \ + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \ +diff --git a/src/3rdparty/chromium/v8/src/libsampler/sampler.cc b/src/3rdparty/chromium/v8/src/libsampler/sampler.cc +index e2091ceb32..6ebb1b8305 100644 +--- a/src/3rdparty/chromium/v8/src/libsampler/sampler.cc ++++ b/src/3rdparty/chromium/v8/src/libsampler/sampler.cc +@@ -415,6 +415,10 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) { + state->pc = reinterpret_cast(mcontext.pc); + state->sp = reinterpret_cast(mcontext.gregs[29]); + state->fp = reinterpret_cast(mcontext.gregs[30]); ++#elif V8_HOST_ARCH_LA64 ++ state->pc = reinterpret_cast(mcontext.__pc); ++ state->sp = reinterpret_cast(mcontext.__gregs[3]); ++ state->fp = reinterpret_cast(mcontext.__gregs[22]); + #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 + #if V8_LIBC_GLIBC + state->pc = reinterpret_cast(ucontext->uc_mcontext.regs->nip); +diff --git a/src/3rdparty/chromium/v8/src/logging/log.cc b/src/3rdparty/chromium/v8/src/logging/log.cc +index dc79ffda5e..6c745cea8c 100644 +--- a/src/3rdparty/chromium/v8/src/logging/log.cc ++++ b/src/3rdparty/chromium/v8/src/logging/log.cc +@@ -588,6 +588,8 @@ void LowLevelLogger::LogCodeInfo() { + const char arch[] = "ppc64"; + #elif V8_TARGET_ARCH_MIPS + const char arch[] = "mips"; ++#elif V8_TARGET_ARCH_LA64 ++ const char arch[] = "la64"; + #elif V8_TARGET_ARCH_ARM64 + const char arch[] = "arm64"; + #elif V8_TARGET_ARCH_S390 +diff --git a/src/3rdparty/chromium/v8/src/objects/backing-store.cc b/src/3rdparty/chromium/v8/src/objects/backing-store.cc +index 52ab0085f7..c96faf197b 100644 +--- a/src/3rdparty/chromium/v8/src/objects/backing-store.cc ++++ b/src/3rdparty/chromium/v8/src/objects/backing-store.cc +@@ -29,7 +29,7 @@ constexpr bool kUseGuardRegions = true; + constexpr bool kUseGuardRegions = false; + #endif + +-#if V8_TARGET_ARCH_MIPS64 ++#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LA64 + // MIPS64 has a user space of 2^40 bytes on most processors, + // address space limits needs to be smaller. + constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB +diff --git a/src/3rdparty/chromium/v8/src/objects/code.h b/src/3rdparty/chromium/v8/src/objects/code.h +index d80e72fa03..7da4c61746 100644 +--- a/src/3rdparty/chromium/v8/src/objects/code.h ++++ b/src/3rdparty/chromium/v8/src/objects/code.h +@@ -412,6 +412,8 @@ class Code : public HeapObject { + static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0; + #elif V8_TARGET_ARCH_MIPS64 + static constexpr int kHeaderPaddingSize = 0; ++#elif V8_TARGET_ARCH_LA64 ++ static constexpr int kHeaderPaddingSize = 0; + #elif V8_TARGET_ARCH_X64 + static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0; + #elif V8_TARGET_ARCH_ARM +diff --git a/src/3rdparty/chromium/v8/src/profiler/tick-sample.cc b/src/3rdparty/chromium/v8/src/profiler/tick-sample.cc +index 00bff91cd0..56654b6288 100644 +--- a/src/3rdparty/chromium/v8/src/profiler/tick-sample.cc ++++ b/src/3rdparty/chromium/v8/src/profiler/tick-sample.cc +@@ -104,7 +104,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate, + state->sp = reinterpret_cast(simulator->sp()); + state->fp = reinterpret_cast(simulator->fp()); + state->lr = reinterpret_cast(simulator->lr()); +-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ++#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LA64 + if (!simulator->has_bad_pc()) { + state->pc = reinterpret_cast(simulator->get_pc()); + } +diff --git a/src/3rdparty/chromium/v8/src/regexp/la64/regexp-macro-assembler-la64.cc b/src/3rdparty/chromium/v8/src/regexp/la64/regexp-macro-assembler-la64.cc +new file mode 100644 +index 0000000000..8a5e9c30c6 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/regexp/la64/regexp-macro-assembler-la64.cc +@@ -0,0 +1,1286 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LA64 ++ ++#include "src/regexp/la64/regexp-macro-assembler-la64.h" ++ ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/logging/log.h" ++#include "src/objects/objects-inl.h" ++#include "src/regexp/regexp-macro-assembler.h" ++#include "src/regexp/regexp-stack.h" ++#include "src/snapshot/embedded/embedded-data.h" ++#include "src/strings/unicode.h" ++ ++namespace v8 { ++namespace internal { ++ ++/* clang-format off ++ * ++ * This assembler uses the following register assignment convention ++ * - t3 : Temporarily stores the index of capture start after a matching pass ++ * for a global regexp. ++ * - a5 : Pointer to current Code object including heap object tag. ++ * - a6 : Current position in input, as negative offset from end of string. ++ * Please notice that this is the byte offset, not the character offset! ++ * - a7 : Currently loaded character. Must be loaded using ++ * LoadCurrentCharacter before using any of the dispatch methods. ++ * - t0 : Points to tip of backtrack stack ++ * - t1 : Unused. ++ * - t2 : End of input (points to byte after last character in input). ++ * - fp : Frame pointer. Used to access arguments, local variables and ++ * RegExp registers. ++ * - sp : Points to tip of C stack. ++ * ++ * The remaining registers are free for computations. ++ * Each call to a public method should retain this convention. ++ * ++ * TODO(plind): O32 documented here with intent of having single 32/64 codebase ++ * in the future. ++ * ++ * The O32 stack will have the following structure: ++ * ++ * - fp[72] Isolate* isolate (address of the current isolate) ++ * - fp[68] direct_call (if 1, direct call from JavaScript code, ++ * if 0, call through the runtime system). ++ * - fp[64] stack_area_base (High end of the memory area to use as ++ * backtracking stack). ++ * - fp[60] capture array size (may fit multiple sets of matches) ++ * - fp[44..59] MIPS O32 four argument slots ++ * - fp[40] int* capture_array (int[num_saved_registers_], for output). ++ * --- sp when called --- ++ * - fp[36] return address (lr). ++ * - fp[32] old frame pointer (r11). ++ * - fp[0..31] backup of registers s0..s7. ++ * --- frame pointer ---- ++ * - fp[-4] end of input (address of end of string). ++ * - fp[-8] start of input (address of first character in string). ++ * - fp[-12] start index (character index of start). ++ * - fp[-16] void* input_string (location of a handle containing the string). ++ * - fp[-20] success counter (only for global regexps to count matches). ++ * - fp[-24] Offset of location before start of input (effectively character ++ * string start - 1). Used to initialize capture registers to a ++ * non-position. ++ * - fp[-28] At start (if 1, we are starting at the start of the ++ * string, otherwise 0) ++ * - fp[-32] register 0 (Only positions must be stored in the first ++ * - register 1 num_saved_registers_ registers) ++ * - ... ++ * - register num_registers-1 ++ * --- sp --- ++ * ++ * ++ * The N64 stack will have the following structure: ++ * ++ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate ++ * kStackFrameHeader ++ * --- sp when called --- ++ * - fp[72] ra Return from RegExp code (ra). kReturnAddress ++ * - fp[64] s9, old-fp Old fp, callee saved(s9). ++ * - fp[0..63] s0..s7 Callee-saved registers s0..s7. ++ * --- frame pointer ---- ++ * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall ++ * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd ++ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters ++ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput ++ * - fp[-40] end of input (address of end of string). kInputEnd ++ * - fp[-48] start of input (address of first character in string). kInputStart ++ * - fp[-56] start index (character index of start). kStartIndex ++ * - fp[-64] void* input_string (location of a handle containing the string). kInputString ++ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures ++ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne ++ * position -1). Used to initialize capture registers to a ++ * non-position. ++ * --------- The following output registers are 32-bit values. --------- ++ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero ++ * - register 1 num_saved_registers_ registers) ++ * - ... ++ * - register num_registers-1 ++ * --- sp --- ++ * ++ * The first num_saved_registers_ registers are initialized to point to ++ * "character -1" in the string (i.e., char_size() bytes before the first ++ * character of the string). The remaining registers start out as garbage. ++ * ++ * The data up to the return address must be placed there by the calling ++ * code and the remaining arguments are passed in registers, e.g. by calling the ++ * code entry as cast to a function with the signature: ++ * int (*match)(String input_string, ++ * int start_index, ++ * Address start, ++ * Address end, ++ * int* capture_output_array, ++ * int num_capture_registers, ++ * byte* stack_area_base, ++ * bool direct_call = false, ++ * Isolate* isolate); ++ * The call is performed by NativeRegExpMacroAssembler::Execute() ++ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper. ++ * ++ * clang-format on ++ */ ++ ++#define __ ACCESS_MASM(masm_) ++ ++const int RegExpMacroAssemblerLA64::kRegExpCodeSize; ++ ++RegExpMacroAssemblerLA64::RegExpMacroAssemblerLA64(Isolate* isolate, Zone* zone, ++ Mode mode, ++ int registers_to_save) ++ : NativeRegExpMacroAssembler(isolate, zone), ++ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes, ++ NewAssemblerBuffer(kRegExpCodeSize))), ++ mode_(mode), ++ num_registers_(registers_to_save), ++ num_saved_registers_(registers_to_save), ++ entry_label_(), ++ start_label_(), ++ success_label_(), ++ backtrack_label_(), ++ exit_label_(), ++ internal_failure_label_() { ++ masm_->set_root_array_available(false); ++ ++ DCHECK_EQ(0, registers_to_save % 2); ++ __ jmp(&entry_label_); // We'll write the entry code later. ++ // If the code gets too big or corrupted, an internal exception will be ++ // raised, and we will exit right away. ++ __ bind(&internal_failure_label_); ++ __ li(a0, Operand(FAILURE)); ++ __ Ret(); ++ __ bind(&start_label_); // And then continue from here. ++} ++ ++RegExpMacroAssemblerLA64::~RegExpMacroAssemblerLA64() { ++ delete masm_; ++ // Unuse labels in case we throw away the assembler without calling GetCode. ++ entry_label_.Unuse(); ++ start_label_.Unuse(); ++ success_label_.Unuse(); ++ backtrack_label_.Unuse(); ++ exit_label_.Unuse(); ++ check_preempt_label_.Unuse(); ++ stack_overflow_label_.Unuse(); ++ internal_failure_label_.Unuse(); ++} ++ ++int RegExpMacroAssemblerLA64::stack_limit_slack() { ++ return RegExpStack::kStackLimitSlack; ++} ++ ++void RegExpMacroAssemblerLA64::AdvanceCurrentPosition(int by) { ++ if (by != 0) { ++ __ Add_d(current_input_offset(), current_input_offset(), ++ Operand(by * char_size())); ++ } ++} ++ ++void RegExpMacroAssemblerLA64::AdvanceRegister(int reg, int by) { ++ DCHECK_LE(0, reg); ++ DCHECK_GT(num_registers_, reg); ++ if (by != 0) { ++ __ Ld_d(a0, register_location(reg)); ++ __ Add_d(a0, a0, Operand(by)); ++ __ St_d(a0, register_location(reg)); ++ } ++} ++ ++void RegExpMacroAssemblerLA64::Backtrack() { ++ CheckPreemption(); ++ if (has_backtrack_limit()) { ++ Label next; ++ __ Ld_d(a0, MemOperand(frame_pointer(), kBacktrackCount)); ++ __ Add_d(a0, a0, Operand(1)); ++ __ St_d(a0, MemOperand(frame_pointer(), kBacktrackCount)); ++ __ Branch(&next, ne, a0, Operand(backtrack_limit())); ++ ++ // Exceeded limits are treated as a failed match. ++ Fail(); ++ ++ __ bind(&next); ++ } ++ // Pop Code offset from backtrack stack, add Code and jump to location. ++ Pop(a0); ++ __ Add_d(a0, a0, code_pointer()); ++ __ Jump(a0); ++} ++ ++void RegExpMacroAssemblerLA64::Bind(Label* label) { __ bind(label); } ++ ++void RegExpMacroAssemblerLA64::CheckCharacter(uint32_t c, Label* on_equal) { ++ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckCharacterGT(uc16 limit, Label* on_greater) { ++ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckAtStart(int cp_offset, Label* on_at_start) { ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(a0, current_input_offset(), ++ Operand(-char_size() + cp_offset * char_size())); ++ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckNotAtStart(int cp_offset, ++ Label* on_not_at_start) { ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(a0, current_input_offset(), ++ Operand(-char_size() + cp_offset * char_size())); ++ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckCharacterLT(uc16 limit, Label* on_less) { ++ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckGreedyLoop(Label* on_equal) { ++ Label backtrack_non_equal; ++ __ Ld_w(a0, MemOperand(backtrack_stackpointer(), 0)); ++ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0)); ++ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), ++ Operand(kIntSize)); ++ __ bind(&backtrack_non_equal); ++ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckNotBackReferenceIgnoreCase( ++ int start_reg, bool read_backward, Label* on_no_match) { ++ Label fallthrough; ++ __ Ld_d(a0, register_location(start_reg)); // Index of start of capture. ++ __ Ld_d(a1, register_location(start_reg + 1)); // Index of end of capture. ++ __ Sub_d(a1, a1, a0); // Length of capture. ++ ++ // At this point, the capture registers are either both set or both cleared. ++ // If the capture length is zero, then the capture is either empty or cleared. ++ // Fall through in both cases. ++ __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); ++ ++ if (read_backward) { ++ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(t1, t1, a1); ++ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1)); ++ } else { ++ __ Add_d(t1, a1, current_input_offset()); ++ // Check that there are enough characters left in the input. ++ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); ++ } ++ ++ if (mode_ == LATIN1) { ++ Label success; ++ Label fail; ++ Label loop_check; ++ ++ // a0 - offset of start of capture. ++ // a1 - length of capture. ++ __ Add_d(a0, a0, Operand(end_of_input_address())); ++ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset())); ++ if (read_backward) { ++ __ Sub_d(a2, a2, Operand(a1)); ++ } ++ __ Add_d(a1, a0, Operand(a1)); ++ ++ // a0 - Address of start of capture. ++ // a1 - Address of end of capture. ++ // a2 - Address of current input position. ++ ++ Label loop; ++ __ bind(&loop); ++ __ Ld_bu(a3, MemOperand(a0, 0)); ++ __ addi_d(a0, a0, char_size()); ++ __ Ld_bu(a4, MemOperand(a2, 0)); ++ __ addi_d(a2, a2, char_size()); ++ ++ __ Branch(&loop_check, eq, a4, Operand(a3)); ++ ++ // Mismatch, try case-insensitive match (converting letters to lower-case). ++ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case. ++ __ Or(a4, a4, Operand(0x20)); // Also convert input character. ++ __ Branch(&fail, ne, a4, Operand(a3)); ++ __ Sub_d(a3, a3, Operand('a')); ++ __ Branch(&loop_check, ls, a3, Operand('z' - 'a')); ++ // Latin-1: Check for values in range [224,254] but not 247. ++ __ Sub_d(a3, a3, Operand(224 - 'a')); ++ // Weren't Latin-1 letters. ++ __ Branch(&fail, hi, a3, Operand(254 - 224)); ++ // Check for 247. ++ __ Branch(&fail, eq, a3, Operand(247 - 224)); ++ ++ __ bind(&loop_check); ++ __ Branch(&loop, lt, a0, Operand(a1)); ++ __ jmp(&success); ++ ++ __ bind(&fail); ++ GoTo(on_no_match); ++ ++ __ bind(&success); ++ // Compute new value of character position after the matched part. ++ __ Sub_d(current_input_offset(), a2, end_of_input_address()); ++ if (read_backward) { ++ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture. ++ __ Ld_d(a2, ++ register_location(start_reg + 1)); // Index of end of capture. ++ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1)); ++ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2)); ++ } ++ } else { ++ DCHECK(mode_ == UC16); ++ // Put regexp engine registers on stack. ++ RegList regexp_registers_to_retain = current_input_offset().bit() | ++ current_character().bit() | ++ backtrack_stackpointer().bit(); ++ __ MultiPush(regexp_registers_to_retain); ++ ++ int argument_count = 4; ++ __ PrepareCallCFunction(argument_count, a2); ++ ++ // a0 - offset of start of capture. ++ // a1 - length of capture. ++ ++ // Put arguments into arguments registers. ++ // Parameters are ++ // a0: Address byte_offset1 - Address captured substring's start. ++ // a1: Address byte_offset2 - Address of current character position. ++ // a2: size_t byte_length - length of capture in bytes(!). ++ // a3: Isolate* isolate. ++ ++ // Address of start of capture. ++ __ Add_d(a0, a0, Operand(end_of_input_address())); ++ // Length of capture. ++ __ mov(a2, a1); ++ // Save length in callee-save register for use on return. ++ __ mov(s3, a1); ++ // Address of current input position. ++ __ Add_d(a1, current_input_offset(), Operand(end_of_input_address())); ++ if (read_backward) { ++ __ Sub_d(a1, a1, Operand(s3)); ++ } ++ // Isolate. ++ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate()))); ++ ++ { ++ AllowExternalCallThatCantCauseGC scope(masm_); ++ ExternalReference function = ++ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); ++ __ CallCFunction(function, argument_count); ++ } ++ ++ // Restore regexp engine registers. ++ __ MultiPop(regexp_registers_to_retain); ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ ++ // Check if function returned non-zero for success or zero for failure. ++ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg)); ++ // On success, increment position by length of capture. ++ if (read_backward) { ++ __ Sub_d(current_input_offset(), current_input_offset(), Operand(s3)); ++ } else { ++ __ Add_d(current_input_offset(), current_input_offset(), Operand(s3)); ++ } ++ } ++ ++ __ bind(&fallthrough); ++} ++ ++void RegExpMacroAssemblerLA64::CheckNotBackReference(int start_reg, ++ bool read_backward, ++ Label* on_no_match) { ++ Label fallthrough; ++ ++ // Find length of back-referenced capture. ++ __ Ld_d(a0, register_location(start_reg)); ++ __ Ld_d(a1, register_location(start_reg + 1)); ++ __ Sub_d(a1, a1, a0); // Length to check. ++ ++ // At this point, the capture registers are either both set or both cleared. ++ // If the capture length is zero, then the capture is either empty or cleared. ++ // Fall through in both cases. ++ __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); ++ ++ if (read_backward) { ++ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(t1, t1, a1); ++ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1)); ++ } else { ++ __ Add_d(t1, a1, current_input_offset()); ++ // Check that there are enough characters left in the input. ++ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); ++ } ++ ++ // Compute pointers to match string and capture string. ++ __ Add_d(a0, a0, Operand(end_of_input_address())); ++ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset())); ++ if (read_backward) { ++ __ Sub_d(a2, a2, Operand(a1)); ++ } ++ __ Add_d(a1, a1, Operand(a0)); ++ ++ Label loop; ++ __ bind(&loop); ++ if (mode_ == LATIN1) { ++ __ Ld_bu(a3, MemOperand(a0, 0)); ++ __ addi_d(a0, a0, char_size()); ++ __ Ld_bu(a4, MemOperand(a2, 0)); ++ __ addi_d(a2, a2, char_size()); ++ } else { ++ DCHECK(mode_ == UC16); ++ __ Ld_hu(a3, MemOperand(a0, 0)); ++ __ addi_d(a0, a0, char_size()); ++ __ Ld_hu(a4, MemOperand(a2, 0)); ++ __ addi_d(a2, a2, char_size()); ++ } ++ BranchOrBacktrack(on_no_match, ne, a3, Operand(a4)); ++ __ Branch(&loop, lt, a0, Operand(a1)); ++ ++ // Move current character position to position after match. ++ __ Sub_d(current_input_offset(), a2, end_of_input_address()); ++ if (read_backward) { ++ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture. ++ __ Ld_d(a2, register_location(start_reg + 1)); // Index of end of capture. ++ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1)); ++ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2)); ++ } ++ __ bind(&fallthrough); ++} ++ ++void RegExpMacroAssemblerLA64::CheckNotCharacter(uint32_t c, ++ Label* on_not_equal) { ++ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckCharacterAfterAnd(uint32_t c, uint32_t mask, ++ Label* on_equal) { ++ __ And(a0, current_character(), Operand(mask)); ++ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); ++ BranchOrBacktrack(on_equal, eq, a0, rhs); ++} ++ ++void RegExpMacroAssemblerLA64::CheckNotCharacterAfterAnd(uint32_t c, ++ uint32_t mask, ++ Label* on_not_equal) { ++ __ And(a0, current_character(), Operand(mask)); ++ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); ++ BranchOrBacktrack(on_not_equal, ne, a0, rhs); ++} ++ ++void RegExpMacroAssemblerLA64::CheckNotCharacterAfterMinusAnd( ++ uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) { ++ DCHECK_GT(String::kMaxUtf16CodeUnit, minus); ++ __ Sub_d(a0, current_character(), Operand(minus)); ++ __ And(a0, a0, Operand(mask)); ++ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckCharacterInRange(uc16 from, uc16 to, ++ Label* on_in_range) { ++ __ Sub_d(a0, current_character(), Operand(from)); ++ // Unsigned lower-or-same condition. ++ BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckCharacterNotInRange( ++ uc16 from, uc16 to, Label* on_not_in_range) { ++ __ Sub_d(a0, current_character(), Operand(from)); ++ // Unsigned higher condition. ++ BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckBitInTable(Handle table, ++ Label* on_bit_set) { ++ __ li(a0, Operand(table)); ++ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) { ++ __ And(a1, current_character(), Operand(kTableSize - 1)); ++ __ Add_d(a0, a0, a1); ++ } else { ++ __ Add_d(a0, a0, current_character()); ++ } ++ ++ __ Ld_bu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize)); ++ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg)); ++} ++ ++bool RegExpMacroAssemblerLA64::CheckSpecialCharacterClass(uc16 type, ++ Label* on_no_match) { ++ // Range checks (c in min..max) are generally implemented by an unsigned ++ // (c - min) <= (max - min) check. ++ switch (type) { ++ case 's': ++ // Match space-characters. ++ if (mode_ == LATIN1) { ++ // One byte space characters are '\t'..'\r', ' ' and \u00a0. ++ Label success; ++ __ Branch(&success, eq, current_character(), Operand(' ')); ++ // Check range 0x09..0x0D. ++ __ Sub_d(a0, current_character(), Operand('\t')); ++ __ Branch(&success, ls, a0, Operand('\r' - '\t')); ++ // \u00a0 (NBSP). ++ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t')); ++ __ bind(&success); ++ return true; ++ } ++ return false; ++ case 'S': ++ // The emitted code for generic character classes is good enough. ++ return false; ++ case 'd': ++ // Match Latin1 digits ('0'..'9'). ++ __ Sub_d(a0, current_character(), Operand('0')); ++ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0')); ++ return true; ++ case 'D': ++ // Match non Latin1-digits. ++ __ Sub_d(a0, current_character(), Operand('0')); ++ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0')); ++ return true; ++ case '.': { ++ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). ++ __ Xor(a0, current_character(), Operand(0x01)); ++ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. ++ __ Sub_d(a0, a0, Operand(0x0B)); ++ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B)); ++ if (mode_ == UC16) { ++ // Compare original value to 0x2028 and 0x2029, using the already ++ // computed (current_char ^ 0x01 - 0x0B). I.e., check for ++ // 0x201D (0x2028 - 0x0B) or 0x201E. ++ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B)); ++ BranchOrBacktrack(on_no_match, ls, a0, Operand(1)); ++ } ++ return true; ++ } ++ case 'n': { ++ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). ++ __ Xor(a0, current_character(), Operand(0x01)); ++ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. ++ __ Sub_d(a0, a0, Operand(0x0B)); ++ if (mode_ == LATIN1) { ++ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B)); ++ } else { ++ Label done; ++ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B)); ++ // Compare original value to 0x2028 and 0x2029, using the already ++ // computed (current_char ^ 0x01 - 0x0B). I.e., check for ++ // 0x201D (0x2028 - 0x0B) or 0x201E. ++ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B)); ++ BranchOrBacktrack(on_no_match, hi, a0, Operand(1)); ++ __ bind(&done); ++ } ++ return true; ++ } ++ case 'w': { ++ if (mode_ != LATIN1) { ++ // Table is 256 entries, so all Latin1 characters can be tested. ++ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z')); ++ } ++ ExternalReference map = ++ ExternalReference::re_word_character_map(isolate()); ++ __ li(a0, Operand(map)); ++ __ Add_d(a0, a0, current_character()); ++ __ Ld_bu(a0, MemOperand(a0, 0)); ++ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg)); ++ return true; ++ } ++ case 'W': { ++ Label done; ++ if (mode_ != LATIN1) { ++ // Table is 256 entries, so all Latin1 characters can be tested. ++ __ Branch(&done, hi, current_character(), Operand('z')); ++ } ++ ExternalReference map = ++ ExternalReference::re_word_character_map(isolate()); ++ __ li(a0, Operand(map)); ++ __ Add_d(a0, a0, current_character()); ++ __ Ld_bu(a0, MemOperand(a0, 0)); ++ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg)); ++ if (mode_ != LATIN1) { ++ __ bind(&done); ++ } ++ return true; ++ } ++ case '*': ++ // Match any character. ++ return true; ++ // No custom implementation (yet): s(UC16), S(UC16). ++ default: ++ return false; ++ } ++} ++ ++void RegExpMacroAssemblerLA64::Fail() { ++ __ li(a0, Operand(FAILURE)); ++ __ jmp(&exit_label_); ++} ++ ++Handle RegExpMacroAssemblerLA64::GetCode(Handle source) { ++ Label return_v0; ++ if (0 /* todo masm_->has_exception()*/) { ++ // If the code gets corrupted due to long regular expressions and lack of ++ // space on trampolines, an internal exception flag is set. If this case ++ // is detected, we will jump into exit sequence right away. ++ //__ bind_to(&entry_label_, internal_failure_label_.pos()); ++ } else { ++ // Finalize code - write the entry point code now we know how many ++ // registers we need. ++ ++ // Entry code: ++ __ bind(&entry_label_); ++ ++ // Tell the system that we have a stack frame. Because the type is MANUAL, ++ // no is generated. ++ FrameScope scope(masm_, StackFrame::MANUAL); ++ ++ // Actually emit code to start a new stack frame. ++ // Push arguments ++ // Save callee-save registers. ++ // Start new stack frame. ++ // Store link register in existing stack-cell. ++ // Order here should correspond to order of offset constants in header file. ++ // TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs ++ // or dont save. ++ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() | s3.bit() | ++ s4.bit() | s5.bit() | s6.bit() | s7.bit(); ++ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit(); ++ ++ argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit(); ++ ++ __ MultiPush(ra.bit(), fp.bit(), argument_registers | registers_to_retain); ++ // Set frame pointer in space for it if this is not a direct call ++ // from generated code. ++ // TODO(plind): this 8 is the # of argument regs, should have definition. ++ __ Add_d(frame_pointer(), sp, Operand(8 * kPointerSize)); ++ STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize); ++ __ mov(a0, zero_reg); ++ __ push(a0); // Make room for success counter and initialize it to 0. ++ STATIC_ASSERT(kStringStartMinusOne == ++ kSuccessfulCaptures - kSystemPointerSize); ++ __ push(a0); // Make room for "string start - 1" constant. ++ STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize); ++ __ push(a0); // The backtrack counter ++ ++ // Check if we have space on the stack for registers. ++ Label stack_limit_hit; ++ Label stack_ok; ++ ++ ExternalReference stack_limit = ++ ExternalReference::address_of_jslimit(masm_->isolate()); ++ __ li(a0, Operand(stack_limit)); ++ __ Ld_d(a0, MemOperand(a0, 0)); ++ __ Sub_d(a0, sp, a0); ++ // Handle it if the stack pointer is already below the stack limit. ++ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg)); ++ // Check if there is room for the variable number of registers above ++ // the stack limit. ++ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize)); ++ // Exit with OutOfMemory exception. There is not enough space on the stack ++ // for our working registers. ++ __ li(a0, Operand(EXCEPTION)); ++ __ jmp(&return_v0); ++ ++ __ bind(&stack_limit_hit); ++ CallCheckStackGuardState(a0); ++ // If returned value is non-zero, we exit with the returned value as result. ++ __ Branch(&return_v0, ne, a0, Operand(zero_reg)); ++ ++ __ bind(&stack_ok); ++ // Allocate space on stack for registers. ++ __ Sub_d(sp, sp, Operand(num_registers_ * kPointerSize)); ++ // Load string end. ++ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ // Load input start. ++ __ Ld_d(a0, MemOperand(frame_pointer(), kInputStart)); ++ // Find negative length (offset of start relative to end). ++ __ Sub_d(current_input_offset(), a0, end_of_input_address()); ++ // Set a0 to address of char before start of the input string ++ // (effectively string position -1). ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStartIndex)); ++ __ Sub_d(a0, current_input_offset(), Operand(char_size())); ++ __ slli_d(t1, a1, (mode_ == UC16) ? 1 : 0); ++ __ Sub_d(a0, a0, t1); ++ // Store this value in a local variable, for use when clearing ++ // position registers. ++ __ St_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ ++ // Initialize code pointer register ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ ++ Label load_char_start_regexp, start_regexp; ++ // Load newline if index is at start, previous character otherwise. ++ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg)); ++ __ li(current_character(), Operand('\n')); ++ __ jmp(&start_regexp); ++ ++ // Global regexp restarts matching here. ++ __ bind(&load_char_start_regexp); ++ // Load previous char as initial value of current character register. ++ LoadCurrentCharacterUnchecked(-1, 1); ++ __ bind(&start_regexp); ++ ++ // Initialize on-stack registers. ++ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. ++ // Fill saved registers with initial value = start offset - 1. ++ if (num_saved_registers_ > 8) { ++ // Address of register 0. ++ __ Add_d(a1, frame_pointer(), Operand(kRegisterZero)); ++ __ li(a2, Operand(num_saved_registers_)); ++ Label init_loop; ++ __ bind(&init_loop); ++ __ St_d(a0, MemOperand(a1, 0)); ++ __ Add_d(a1, a1, Operand(-kPointerSize)); ++ __ Sub_d(a2, a2, Operand(1)); ++ __ Branch(&init_loop, ne, a2, Operand(zero_reg)); ++ } else { ++ for (int i = 0; i < num_saved_registers_; i++) { ++ __ St_d(a0, register_location(i)); ++ } ++ } ++ } ++ ++ // Initialize backtrack stack pointer. ++ __ Ld_d(backtrack_stackpointer(), ++ MemOperand(frame_pointer(), kStackHighEnd)); ++ ++ __ jmp(&start_label_); ++ ++ // Exit code: ++ if (success_label_.is_linked()) { ++ // Save captures when successful. ++ __ bind(&success_label_); ++ if (num_saved_registers_ > 0) { ++ // Copy captures to output. ++ __ Ld_d(a1, MemOperand(frame_pointer(), kInputStart)); ++ __ Ld_d(a0, MemOperand(frame_pointer(), kRegisterOutput)); ++ __ Ld_d(a2, MemOperand(frame_pointer(), kStartIndex)); ++ __ Sub_d(a1, end_of_input_address(), a1); ++ // a1 is length of input in bytes. ++ if (mode_ == UC16) { ++ __ srli_d(a1, a1, 1); ++ } ++ // a1 is length of input in characters. ++ __ Add_d(a1, a1, Operand(a2)); ++ // a1 is length of string in characters. ++ ++ DCHECK_EQ(0, num_saved_registers_ % 2); ++ // Always an even number of capture registers. This allows us to ++ // unroll the loop once to add an operation between a load of a register ++ // and the following use of that register. ++ for (int i = 0; i < num_saved_registers_; i += 2) { ++ __ Ld_d(a2, register_location(i)); ++ __ Ld_d(a3, register_location(i + 1)); ++ if (i == 0 && global_with_zero_length_check()) { ++ // Keep capture start in a4 for the zero-length check later. ++ __ mov(t3, a2); ++ } ++ if (mode_ == UC16) { ++ __ srai_d(a2, a2, 1); ++ __ Add_d(a2, a2, a1); ++ __ srai_d(a3, a3, 1); ++ __ Add_d(a3, a3, a1); ++ } else { ++ __ Add_d(a2, a1, Operand(a2)); ++ __ Add_d(a3, a1, Operand(a3)); ++ } ++ // V8 expects the output to be an int32_t array. ++ __ St_w(a2, MemOperand(a0, 0)); ++ __ Add_d(a0, a0, kIntSize); ++ __ St_w(a3, MemOperand(a0, 0)); ++ __ Add_d(a0, a0, kIntSize); ++ } ++ } ++ ++ if (global()) { ++ // Restart matching if the regular expression is flagged as global. ++ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); ++ __ Ld_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); ++ __ Ld_d(a2, MemOperand(frame_pointer(), kRegisterOutput)); ++ // Increment success counter. ++ __ Add_d(a0, a0, 1); ++ __ St_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); ++ // Capture results have been stored, so the number of remaining global ++ // output registers is reduced by the number of stored captures. ++ __ Sub_d(a1, a1, num_saved_registers_); ++ // Check whether we have enough room for another set of capture results. ++ //__ mov(v0, a0); ++ __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_)); ++ ++ __ St_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); ++ // Advance the location for output. ++ __ Add_d(a2, a2, num_saved_registers_ * kIntSize); ++ __ St_d(a2, MemOperand(frame_pointer(), kRegisterOutput)); ++ ++ // Prepare a0 to initialize registers with its value in the next run. ++ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ ++ if (global_with_zero_length_check()) { ++ // Special case for zero-length matches. ++ // t3: capture start index ++ // Not a zero-length match, restart. ++ __ Branch(&load_char_start_regexp, ne, current_input_offset(), ++ Operand(t3)); ++ // Offset from the end is zero if we already reached the end. ++ __ Branch(&exit_label_, eq, current_input_offset(), ++ Operand(zero_reg)); ++ // Advance current position after a zero-length match. ++ Label advance; ++ __ bind(&advance); ++ __ Add_d(current_input_offset(), current_input_offset(), ++ Operand((mode_ == UC16) ? 2 : 1)); ++ if (global_unicode()) CheckNotInSurrogatePair(0, &advance); ++ } ++ ++ __ Branch(&load_char_start_regexp); ++ } else { ++ __ li(a0, Operand(SUCCESS)); ++ } ++ } ++ // Exit and return v0. ++ __ bind(&exit_label_); ++ if (global()) { ++ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); ++ } ++ ++ __ bind(&return_v0); ++ // Skip sp past regexp registers and local variables.. ++ __ mov(sp, frame_pointer()); ++ // Restore registers s0..s7 and return (restoring ra to pc). ++ __ MultiPop(ra.bit(), fp.bit(), registers_to_retain); ++ __ Ret(); ++ ++ // Backtrack code (branch target for conditional backtracks). ++ if (backtrack_label_.is_linked()) { ++ __ bind(&backtrack_label_); ++ Backtrack(); ++ } ++ ++ Label exit_with_exception; ++ ++ // Preempt-code. ++ if (check_preempt_label_.is_linked()) { ++ SafeCallTarget(&check_preempt_label_); ++ // Put regexp engine registers on stack. ++ RegList regexp_registers_to_retain = current_input_offset().bit() | ++ current_character().bit() | ++ backtrack_stackpointer().bit(); ++ __ MultiPush(regexp_registers_to_retain); ++ CallCheckStackGuardState(a0); ++ __ MultiPop(regexp_registers_to_retain); ++ // If returning non-zero, we should end execution with the given ++ // result as return value. ++ __ Branch(&return_v0, ne, a0, Operand(zero_reg)); ++ ++ // String might have moved: Reload end of string from frame. ++ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ SafeReturn(); ++ } ++ ++ // Backtrack stack overflow code. ++ if (stack_overflow_label_.is_linked()) { ++ SafeCallTarget(&stack_overflow_label_); ++ // Reached if the backtrack-stack limit has been hit. ++ // Put regexp engine registers on stack first. ++ RegList regexp_registers = ++ current_input_offset().bit() | current_character().bit(); ++ __ MultiPush(regexp_registers); ++ ++ // Call GrowStack(backtrack_stackpointer(), &stack_base) ++ static const int num_arguments = 3; ++ __ PrepareCallCFunction(num_arguments, a0); ++ __ mov(a0, backtrack_stackpointer()); ++ __ Add_d(a1, frame_pointer(), Operand(kStackHighEnd)); ++ __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate()))); ++ ExternalReference grow_stack = ++ ExternalReference::re_grow_stack(masm_->isolate()); ++ __ CallCFunction(grow_stack, num_arguments); ++ // Restore regexp registers. ++ __ MultiPop(regexp_registers); ++ // If return nullptr, we have failed to grow the stack, and ++ // must exit with a stack-overflow exception. ++ __ Branch(&exit_with_exception, eq, a0, Operand(zero_reg)); ++ // Otherwise use return value as new stack pointer. ++ __ mov(backtrack_stackpointer(), a0); ++ // Restore saved registers and continue. ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ SafeReturn(); ++ } ++ ++ if (exit_with_exception.is_linked()) { ++ // If any of the code above needed to exit with an exception. ++ __ bind(&exit_with_exception); ++ // Exit with Result EXCEPTION(-1) to signal thrown exception. ++ __ li(a0, Operand(EXCEPTION)); ++ __ jmp(&return_v0); ++ } ++ } ++ ++ CodeDesc code_desc; ++ masm_->GetCode(isolate(), &code_desc); ++ Handle code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP) ++ .set_self_reference(masm_->CodeObject()) ++ .Build(); ++ LOG(masm_->isolate(), ++ RegExpCodeCreateEvent(Handle::cast(code), source)); ++ return Handle::cast(code); ++} ++ ++void RegExpMacroAssemblerLA64::GoTo(Label* to) { ++ if (to == nullptr) { ++ Backtrack(); ++ return; ++ } ++ __ jmp(to); ++ return; ++} ++ ++void RegExpMacroAssemblerLA64::IfRegisterGE(int reg, int comparand, ++ Label* if_ge) { ++ __ Ld_d(a0, register_location(reg)); ++ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand)); ++} ++ ++void RegExpMacroAssemblerLA64::IfRegisterLT(int reg, int comparand, ++ Label* if_lt) { ++ __ Ld_d(a0, register_location(reg)); ++ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand)); ++} ++ ++void RegExpMacroAssemblerLA64::IfRegisterEqPos(int reg, Label* if_eq) { ++ __ Ld_d(a0, register_location(reg)); ++ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset())); ++} ++ ++RegExpMacroAssembler::IrregexpImplementation ++RegExpMacroAssemblerLA64::Implementation() { ++ return kLA64Implementation; ++} ++ ++void RegExpMacroAssemblerLA64::LoadCurrentCharacterImpl(int cp_offset, ++ Label* on_end_of_input, ++ bool check_bounds, ++ int characters, ++ int eats_at_least) { ++ // It's possible to preload a small number of characters when each success ++ // path requires a large number of characters, but not the reverse. ++ DCHECK_GE(eats_at_least, characters); ++ ++ DCHECK(cp_offset < (1 << 30)); // Be sane! (And ensure negation works). ++ if (check_bounds) { ++ if (cp_offset >= 0) { ++ CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input); ++ } else { ++ CheckPosition(cp_offset, on_end_of_input); ++ } ++ } ++ LoadCurrentCharacterUnchecked(cp_offset, characters); ++} ++ ++void RegExpMacroAssemblerLA64::PopCurrentPosition() { ++ Pop(current_input_offset()); ++} ++ ++void RegExpMacroAssemblerLA64::PopRegister(int register_index) { ++ Pop(a0); ++ __ St_d(a0, register_location(register_index)); ++} ++ ++void RegExpMacroAssemblerLA64::PushBacktrack(Label* label) { ++ if (label->is_bound()) { ++ int target = label->pos(); ++ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag)); ++ } else { ++ // TODO: Optimize like arm64 without ld_wu? ++ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); ++ Label after_constant; ++ __ Branch(&after_constant); ++ int offset = masm_->pc_offset(); ++ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag; ++ //__ emit(0); ++ __ nop(); ++ masm_->label_at_put(label, offset); ++ __ bind(&after_constant); ++ if (is_int12(cp_offset)) { ++ __ Ld_wu(a0, MemOperand(code_pointer(), cp_offset)); ++ } else { ++ __ Add_d(a0, code_pointer(), cp_offset); ++ __ Ld_wu(a0, MemOperand(a0, 0)); ++ } ++ } ++ Push(a0); ++ CheckStackLimit(); ++} ++ ++void RegExpMacroAssemblerLA64::PushCurrentPosition() { ++ Push(current_input_offset()); ++} ++ ++void RegExpMacroAssemblerLA64::PushRegister(int register_index, ++ StackCheckFlag check_stack_limit) { ++ __ Ld_d(a0, register_location(register_index)); ++ Push(a0); ++ if (check_stack_limit) CheckStackLimit(); ++} ++ ++void RegExpMacroAssemblerLA64::ReadCurrentPositionFromRegister(int reg) { ++ __ Ld_d(current_input_offset(), register_location(reg)); ++} ++ ++void RegExpMacroAssemblerLA64::ReadStackPointerFromRegister(int reg) { ++ __ Ld_d(backtrack_stackpointer(), register_location(reg)); ++ __ Ld_d(a0, MemOperand(frame_pointer(), kStackHighEnd)); ++ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0)); ++} ++ ++void RegExpMacroAssemblerLA64::SetCurrentPositionFromEnd(int by) { ++ Label after_position; ++ __ Branch(&after_position, ge, current_input_offset(), ++ Operand(-by * char_size())); ++ __ li(current_input_offset(), -by * char_size()); ++ // On RegExp code entry (where this operation is used), the character before ++ // the current position is expected to be already loaded. ++ // We have advanced the position, so it's safe to read backwards. ++ LoadCurrentCharacterUnchecked(-1, 1); ++ __ bind(&after_position); ++} ++ ++void RegExpMacroAssemblerLA64::SetRegister(int register_index, int to) { ++ DCHECK(register_index >= num_saved_registers_); // Reserved for positions! ++ __ li(a0, Operand(to)); ++ __ St_d(a0, register_location(register_index)); ++} ++ ++bool RegExpMacroAssemblerLA64::Succeed() { ++ __ jmp(&success_label_); ++ return global(); ++} ++ ++void RegExpMacroAssemblerLA64::WriteCurrentPositionToRegister(int reg, ++ int cp_offset) { ++ if (cp_offset == 0) { ++ __ St_d(current_input_offset(), register_location(reg)); ++ } else { ++ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size())); ++ __ St_d(a0, register_location(reg)); ++ } ++} ++ ++void RegExpMacroAssemblerLA64::ClearRegisters(int reg_from, int reg_to) { ++ DCHECK(reg_from <= reg_to); ++ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ for (int reg = reg_from; reg <= reg_to; reg++) { ++ __ St_d(a0, register_location(reg)); ++ } ++} ++ ++void RegExpMacroAssemblerLA64::WriteStackPointerToRegister(int reg) { ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStackHighEnd)); ++ __ Sub_d(a0, backtrack_stackpointer(), a1); ++ __ St_d(a0, register_location(reg)); ++} ++ ++bool RegExpMacroAssemblerLA64::CanReadUnaligned() { return false; } ++ ++// Private methods: ++ ++void RegExpMacroAssemblerLA64::CallCheckStackGuardState(Register scratch) { ++ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins()); ++ DCHECK(!masm_->options().isolate_independent_code); ++ ++ int stack_alignment = base::OS::ActivationFrameAlignment(); ++ ++ // Align the stack pointer and save the original sp value on the stack. ++ __ mov(scratch, sp); ++ __ Sub_d(sp, sp, Operand(kPointerSize)); ++ DCHECK(base::bits::IsPowerOfTwo(stack_alignment)); ++ __ And(sp, sp, Operand(-stack_alignment)); ++ __ St_d(scratch, MemOperand(sp, 0)); ++ ++ __ mov(a2, frame_pointer()); ++ // Code of self. ++ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ ++ // We need to make room for the return address on the stack. ++ DCHECK(IsAligned(stack_alignment, kPointerSize)); ++ __ Sub_d(sp, sp, Operand(stack_alignment)); ++ ++ // The stack pointer now points to cell where the return address will be ++ // written. Arguments are in registers, meaning we treat the return address as ++ // argument 5. Since DirectCEntry will handle allocating space for the C ++ // argument slots, we don't need to care about that here. This is how the ++ // stack will look (sp meaning the value of sp at this moment): ++ // [sp + 3] - empty slot if needed for alignment. ++ // [sp + 2] - saved sp. ++ // [sp + 1] - second word reserved for return value. ++ // [sp + 0] - first word reserved for return value. ++ ++ // a0 will point to the return address, placed by DirectCEntry. ++ __ mov(a0, sp); ++ ++ ExternalReference stack_guard_check = ++ ExternalReference::re_check_stack_guard_state(masm_->isolate()); ++ __ li(t7, Operand(stack_guard_check)); ++ ++ EmbeddedData d = EmbeddedData::FromBlob(); ++ CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry)); ++ Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry); ++ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ __ Call(kScratchReg); ++ ++ // DirectCEntry allocated space for the C argument slots so we have to ++ // drop them with the return address from the stack with loading saved sp. ++ // At this point stack must look: ++ // [sp + 7] - empty slot if needed for alignment. ++ // [sp + 6] - saved sp. ++ // [sp + 5] - second word reserved for return value. ++ // [sp + 4] - first word reserved for return value. ++ // [sp + 3] - C argument slot. ++ // [sp + 2] - C argument slot. ++ // [sp + 1] - C argument slot. ++ // [sp + 0] - C argument slot. ++ __ Ld_d(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize)); ++ ++ __ li(code_pointer(), Operand(masm_->CodeObject())); ++} ++ ++// Helper function for reading a value out of a stack frame. ++template ++static T& frame_entry(Address re_frame, int frame_offset) { ++ return reinterpret_cast(Memory(re_frame + frame_offset)); ++} ++ ++template ++static T* frame_entry_address(Address re_frame, int frame_offset) { ++ return reinterpret_cast(re_frame + frame_offset); ++} ++ ++int64_t RegExpMacroAssemblerLA64::CheckStackGuardState(Address* return_address, ++ Address raw_code, ++ Address re_frame) { ++ Code re_code = Code::cast(Object(raw_code)); ++ return NativeRegExpMacroAssembler::CheckStackGuardState( ++ frame_entry(re_frame, kIsolate), ++ static_cast(frame_entry(re_frame, kStartIndex)), ++ static_cast( ++ frame_entry(re_frame, kDirectCall)), ++ return_address, re_code, ++ frame_entry_address
(re_frame, kInputString), ++ frame_entry_address(re_frame, kInputStart), ++ frame_entry_address(re_frame, kInputEnd)); ++} ++ ++MemOperand RegExpMacroAssemblerLA64::register_location(int register_index) { ++ DCHECK(register_index < (1 << 30)); ++ if (num_registers_ <= register_index) { ++ num_registers_ = register_index + 1; ++ } ++ return MemOperand(frame_pointer(), ++ kRegisterZero - register_index * kPointerSize); ++} ++ ++void RegExpMacroAssemblerLA64::CheckPosition(int cp_offset, ++ Label* on_outside_input) { ++ if (cp_offset >= 0) { ++ BranchOrBacktrack(on_outside_input, ge, current_input_offset(), ++ Operand(-cp_offset * char_size())); ++ } else { ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size())); ++ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1)); ++ } ++} ++ ++void RegExpMacroAssemblerLA64::BranchOrBacktrack(Label* to, Condition condition, ++ Register rs, ++ const Operand& rt) { ++ if (condition == al) { // Unconditional. ++ if (to == nullptr) { ++ Backtrack(); ++ return; ++ } ++ __ jmp(to); ++ return; ++ } ++ if (to == nullptr) { ++ __ Branch(&backtrack_label_, condition, rs, rt); ++ return; ++ } ++ __ Branch(to, condition, rs, rt); ++} ++ ++void RegExpMacroAssemblerLA64::SafeCall(Label* to, Condition cond, Register rs, ++ const Operand& rt) { ++ __ Branch(to, cond, rs, rt, true); ++} ++ ++void RegExpMacroAssemblerLA64::SafeReturn() { ++ __ pop(ra); ++ __ Add_d(t1, ra, Operand(masm_->CodeObject())); ++ __ Jump(t1); ++} ++ ++void RegExpMacroAssemblerLA64::SafeCallTarget(Label* name) { ++ __ bind(name); ++ __ Sub_d(ra, ra, Operand(masm_->CodeObject())); ++ __ push(ra); ++} ++ ++void RegExpMacroAssemblerLA64::Push(Register source) { ++ DCHECK(source != backtrack_stackpointer()); ++ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), ++ Operand(-kIntSize)); ++ __ St_w(source, MemOperand(backtrack_stackpointer(), 0)); ++} ++ ++void RegExpMacroAssemblerLA64::Pop(Register target) { ++ DCHECK(target != backtrack_stackpointer()); ++ __ Ld_w(target, MemOperand(backtrack_stackpointer(), 0)); ++ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize); ++} ++ ++void RegExpMacroAssemblerLA64::CheckPreemption() { ++ // Check for preemption. ++ ExternalReference stack_limit = ++ ExternalReference::address_of_jslimit(masm_->isolate()); ++ __ li(a0, Operand(stack_limit)); ++ __ Ld_d(a0, MemOperand(a0, 0)); ++ SafeCall(&check_preempt_label_, ls, sp, Operand(a0)); ++} ++ ++void RegExpMacroAssemblerLA64::CheckStackLimit() { ++ ExternalReference stack_limit = ++ ExternalReference::address_of_regexp_stack_limit_address( ++ masm_->isolate()); ++ ++ __ li(a0, Operand(stack_limit)); ++ __ Ld_d(a0, MemOperand(a0, 0)); ++ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0)); ++} ++ ++void RegExpMacroAssemblerLA64::LoadCurrentCharacterUnchecked(int cp_offset, ++ int characters) { ++ Register offset = current_input_offset(); ++ if (cp_offset != 0) { ++ // t3 is not being used to store the capture start index at this point. ++ __ Add_d(t3, current_input_offset(), Operand(cp_offset * char_size())); ++ offset = t3; ++ } ++ // We assume that we cannot do unaligned loads on LA64, so this function ++ // must only be used to load a single character at a time. ++ DCHECK_EQ(1, characters); ++ __ Add_d(t1, end_of_input_address(), Operand(offset)); ++ if (mode_ == LATIN1) { ++ __ Ld_bu(current_character(), MemOperand(t1, 0)); ++ } else { ++ DCHECK(mode_ == UC16); ++ __ Ld_hu(current_character(), MemOperand(t1, 0)); ++ } ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LA64 +diff --git a/src/3rdparty/chromium/v8/src/regexp/la64/regexp-macro-assembler-la64.h b/src/3rdparty/chromium/v8/src/regexp/la64/regexp-macro-assembler-la64.h +new file mode 100644 +index 0000000000..5ebf37807c +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/regexp/la64/regexp-macro-assembler-la64.h +@@ -0,0 +1,216 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_REGEXP_LA64_REGEXP_MACRO_ASSEMBLER_LA64_H_ ++#define V8_REGEXP_LA64_REGEXP_MACRO_ASSEMBLER_LA64_H_ ++ ++#include "src/codegen/la64/assembler-la64.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/regexp/regexp-macro-assembler.h" ++ ++namespace v8 { ++namespace internal { ++ ++class V8_EXPORT_PRIVATE RegExpMacroAssemblerLA64 ++ : public NativeRegExpMacroAssembler { ++ public: ++ RegExpMacroAssemblerLA64(Isolate* isolate, Zone* zone, Mode mode, ++ int registers_to_save); ++ virtual ~RegExpMacroAssemblerLA64(); ++ virtual int stack_limit_slack(); ++ virtual void AdvanceCurrentPosition(int by); ++ virtual void AdvanceRegister(int reg, int by); ++ virtual void Backtrack(); ++ virtual void Bind(Label* label); ++ virtual void CheckAtStart(int cp_offset, Label* on_at_start); ++ virtual void CheckCharacter(uint32_t c, Label* on_equal); ++ virtual void CheckCharacterAfterAnd(uint32_t c, uint32_t mask, ++ Label* on_equal); ++ virtual void CheckCharacterGT(uc16 limit, Label* on_greater); ++ virtual void CheckCharacterLT(uc16 limit, Label* on_less); ++ // A "greedy loop" is a loop that is both greedy and with a simple ++ // body. It has a particularly simple implementation. ++ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position); ++ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start); ++ virtual void CheckNotBackReference(int start_reg, bool read_backward, ++ Label* on_no_match); ++ virtual void CheckNotBackReferenceIgnoreCase(int start_reg, ++ bool read_backward, ++ Label* on_no_match); ++ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal); ++ virtual void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask, ++ Label* on_not_equal); ++ virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask, ++ Label* on_not_equal); ++ virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range); ++ virtual void CheckCharacterNotInRange(uc16 from, uc16 to, ++ Label* on_not_in_range); ++ virtual void CheckBitInTable(Handle table, Label* on_bit_set); ++ ++ // Checks whether the given offset from the current position is before ++ // the end of the string. ++ virtual void CheckPosition(int cp_offset, Label* on_outside_input); ++ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match); ++ virtual void Fail(); ++ virtual Handle GetCode(Handle source); ++ virtual void GoTo(Label* label); ++ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); ++ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); ++ virtual void IfRegisterEqPos(int reg, Label* if_eq); ++ virtual IrregexpImplementation Implementation(); ++ virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input, ++ bool check_bounds, int characters, ++ int eats_at_least); ++ virtual void PopCurrentPosition(); ++ virtual void PopRegister(int register_index); ++ virtual void PushBacktrack(Label* label); ++ virtual void PushCurrentPosition(); ++ virtual void PushRegister(int register_index, ++ StackCheckFlag check_stack_limit); ++ virtual void ReadCurrentPositionFromRegister(int reg); ++ virtual void ReadStackPointerFromRegister(int reg); ++ virtual void SetCurrentPositionFromEnd(int by); ++ virtual void SetRegister(int register_index, int to); ++ virtual bool Succeed(); ++ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); ++ virtual void ClearRegisters(int reg_from, int reg_to); ++ virtual void WriteStackPointerToRegister(int reg); ++ virtual bool CanReadUnaligned(); ++ ++ // Called from RegExp if the stack-guard is triggered. ++ // If the code object is relocated, the return address is fixed before ++ // returning. ++ // {raw_code} is an Address because this is called via ExternalReference. ++ static int64_t CheckStackGuardState(Address* return_address, Address raw_code, ++ Address re_frame); ++ ++ void print_regexp_frame_constants(); ++ ++ private: ++ // Offsets from frame_pointer() of function parameters and stored registers. ++ static const int kFramePointer = 0; ++ ++ // Above the frame pointer - Stored registers and stack passed parameters. ++ // Registers s0 to s7, fp, and ra. ++ static const int kStoredRegisters = kFramePointer; ++ // Return address (stored from link register, read into pc on return). ++ ++ // TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp. ++ ++ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize; ++ // Stack frame header. ++ static const int kStackFrameHeader = kReturnAddress; ++ // Stack parameters placed by caller. ++ static const int kIsolate = kStackFrameHeader + kPointerSize; ++ ++ // Below the frame pointer. ++ // Register parameters stored by setup code. ++ static const int kDirectCall = kFramePointer - kPointerSize; ++ static const int kStackHighEnd = kDirectCall - kPointerSize; ++ static const int kNumOutputRegisters = kStackHighEnd - kPointerSize; ++ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize; ++ static const int kInputEnd = kRegisterOutput - kPointerSize; ++ static const int kInputStart = kInputEnd - kPointerSize; ++ static const int kStartIndex = kInputStart - kPointerSize; ++ static const int kInputString = kStartIndex - kPointerSize; ++ // When adding local variables remember to push space for them in ++ // the frame in GetCode. ++ static const int kSuccessfulCaptures = kInputString - kPointerSize; ++ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize; ++ static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize; ++ // First register address. Following registers are below it on the stack. ++ static const int kRegisterZero = kBacktrackCount - kSystemPointerSize; ++ ++ // Initial size of code buffer. ++ static const int kRegExpCodeSize = 1024; ++ ++ // Load a number of characters at the given offset from the ++ // current position, into the current-character register. ++ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count); ++ ++ // Check whether preemption has been requested. ++ void CheckPreemption(); ++ ++ // Check whether we are exceeding the stack limit on the backtrack stack. ++ void CheckStackLimit(); ++ ++ // Generate a call to CheckStackGuardState. ++ void CallCheckStackGuardState(Register scratch); ++ ++ // The ebp-relative location of a regexp register. ++ MemOperand register_location(int register_index); ++ ++ // Register holding the current input position as negative offset from ++ // the end of the string. ++ inline Register current_input_offset() { return a6; } ++ ++ // The register containing the current character after LoadCurrentCharacter. ++ inline Register current_character() { return a7; } ++ ++ // Register holding address of the end of the input string. ++ inline Register end_of_input_address() { return t2; } ++ ++ // Register holding the frame address. Local variables, parameters and ++ // regexp registers are addressed relative to this. ++ inline Register frame_pointer() { return fp; } ++ ++ // The register containing the backtrack stack top. Provides a meaningful ++ // name to the register. ++ inline Register backtrack_stackpointer() { return t0; } ++ ++ // Register holding pointer to the current code object. ++ inline Register code_pointer() { return a5; } ++ ++ // Byte size of chars in the string to match (decided by the Mode argument). ++ inline int char_size() { return static_cast(mode_); } ++ ++ // Equivalent to a conditional branch to the label, unless the label ++ // is nullptr, in which case it is a conditional Backtrack. ++ void BranchOrBacktrack(Label* to, Condition condition, Register rs, ++ const Operand& rt); ++ ++ // Call and return internally in the generated code in a way that ++ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack) ++ inline void SafeCall(Label* to, Condition cond, Register rs, ++ const Operand& rt); ++ inline void SafeReturn(); ++ inline void SafeCallTarget(Label* name); ++ ++ // Pushes the value of a register on the backtrack stack. Decrements the ++ // stack pointer by a word size and stores the register's value there. ++ inline void Push(Register source); ++ ++ // Pops a value from the backtrack stack. Reads the word at the stack pointer ++ // and increments it by a word size. ++ inline void Pop(Register target); ++ ++ Isolate* isolate() const { return masm_->isolate(); } ++ ++ MacroAssembler* masm_; ++ ++ // Which mode to generate code for (Latin1 or UC16). ++ Mode mode_; ++ ++ // One greater than maximal register index actually used. ++ int num_registers_; ++ ++ // Number of registers to output at the end (the saved registers ++ // are always 0..num_saved_registers_-1). ++ int num_saved_registers_; ++ ++ // Labels used internally. ++ Label entry_label_; ++ Label start_label_; ++ Label success_label_; ++ Label backtrack_label_; ++ Label exit_label_; ++ Label check_preempt_label_; ++ Label stack_overflow_label_; ++ Label internal_failure_label_; ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_REGEXP_LA64_REGEXP_MACRO_ASSEMBLER_LA64_H_ +diff --git a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-arch.h b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-arch.h +index 8ec12a0ae6..cdc9565518 100644 +--- a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-arch.h ++++ b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-arch.h +@@ -21,6 +21,8 @@ + #include "src/regexp/mips/regexp-macro-assembler-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/regexp/mips64/regexp-macro-assembler-mips64.h" ++#elif V8_TARGET_ARCH_LA64 ++#include "src/regexp/la64/regexp-macro-assembler-la64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/regexp/s390/regexp-macro-assembler-s390.h" + #else +diff --git a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc +index 0a12201743..b357ec85e8 100644 +--- a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc ++++ b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc +@@ -15,8 +15,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer( + : RegExpMacroAssembler(isolate, assembler->zone()), assembler_(assembler) { + IrregexpImplementation type = assembler->Implementation(); + DCHECK_LT(type, 9); +- const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS", "S390", +- "PPC", "X64", "X87", "Bytecode"}; ++ const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS", "LA64", ++ "S390", "PPC", "X64", "X87", "Bytecode"}; + PrintF("RegExpMacroAssembler%s();\n", impl_names[type]); + } + +diff --git a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler.h b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler.h +index e83446cdc9..6047a71e6c 100644 +--- a/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler.h ++++ b/src/3rdparty/chromium/v8/src/regexp/regexp-macro-assembler.h +@@ -43,6 +43,7 @@ class RegExpMacroAssembler { + kARMImplementation, + kARM64Implementation, + kMIPSImplementation, ++ kLA64Implementation, + kS390Implementation, + kPPCImplementation, + kX64Implementation, +diff --git a/src/3rdparty/chromium/v8/src/regexp/regexp.cc b/src/3rdparty/chromium/v8/src/regexp/regexp.cc +index 4319990a39..641a2af9cc 100644 +--- a/src/3rdparty/chromium/v8/src/regexp/regexp.cc ++++ b/src/3rdparty/chromium/v8/src/regexp/regexp.cc +@@ -854,6 +854,9 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data, + #elif V8_TARGET_ARCH_MIPS64 + macro_assembler.reset(new RegExpMacroAssemblerMIPS( + isolate, zone, mode, (data->capture_count + 1) * 2)); ++#elif V8_TARGET_ARCH_LA64 ++ macro_assembler.reset(new RegExpMacroAssemblerLA64( ++ isolate, zone, mode, (data->capture_count + 1) * 2)); + #else + #error "Unsupported architecture" + #endif +diff --git a/src/3rdparty/chromium/v8/src/runtime/runtime-atomics.cc b/src/3rdparty/chromium/v8/src/runtime/runtime-atomics.cc +index 34259c6e67..a0a5825f8f 100644 +--- a/src/3rdparty/chromium/v8/src/runtime/runtime-atomics.cc ++++ b/src/3rdparty/chromium/v8/src/runtime/runtime-atomics.cc +@@ -20,7 +20,8 @@ namespace internal { + + // Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h. + #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ +- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X ++ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \ ++ V8_TARGET_ARCH_LA64 + + namespace { + +diff --git a/src/3rdparty/chromium/v8/src/snapshot/deserializer.h b/src/3rdparty/chromium/v8/src/snapshot/deserializer.h +index 62814a881a..dfc04f19b1 100644 +--- a/src/3rdparty/chromium/v8/src/snapshot/deserializer.h ++++ b/src/3rdparty/chromium/v8/src/snapshot/deserializer.h +@@ -28,8 +28,9 @@ class Object; + // Used for platforms with embedded constant pools to trigger deserialization + // of objects found in code. + #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ +- defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \ +- defined(V8_TARGET_ARCH_PPC64) || V8_EMBEDDED_CONSTANT_POOL ++ defined(V8_TARGET_ARCH_LA64) || defined(V8_TARGET_ARCH_PPC) || \ ++ defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_PPC64) || \ ++ V8_EMBEDDED_CONSTANT_POOL + #define V8_CODE_EMBEDS_OBJECT_POINTER 1 + #else + #define V8_CODE_EMBEDS_OBJECT_POINTER 0 +diff --git a/src/3rdparty/chromium/v8/src/wasm/baseline/la64/liftoff-assembler-la64.h b/src/3rdparty/chromium/v8/src/wasm/baseline/la64/liftoff-assembler-la64.h +new file mode 100644 +index 0000000000..7c82427a4f +--- /dev/null ++++ b/src/3rdparty/chromium/v8/src/wasm/baseline/la64/liftoff-assembler-la64.h +@@ -0,0 +1,1503 @@ ++// Copyright 2017 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_WASM_BASELINE_LA64_LIFTOFF_ASSEMBLER_LA64_H_ ++#define V8_WASM_BASELINE_LA64_LIFTOFF_ASSEMBLER_LA64_H_ ++ ++#include "src/wasm/baseline/liftoff-assembler.h" ++ ++namespace v8 { ++namespace internal { ++namespace wasm { ++ ++namespace liftoff { ++ ++// Liftoff Frames. ++// ++// slot Frame ++// +--------------------+--------------------------- ++// n+4 | optional padding slot to keep the stack 16 byte aligned. ++// n+3 | parameter n | ++// ... | ... | ++// 4 | parameter 1 | or parameter 2 ++// 3 | parameter 0 | or parameter 1 ++// 2 | (result address) | or parameter 0 ++// -----+--------------------+--------------------------- ++// 1 | return addr (ra) | ++// 0 | previous frame (fp)| ++// -----+--------------------+ <-- frame ptr (fp) ++// -1 | 0xa: WASM_COMPILED | ++// -2 | instance | ++// -----+--------------------+--------------------------- ++// -3 | slot 0 | ^ ++// -4 | slot 1 | | ++// | | Frame slots ++// | | | ++// | | v ++// | optional padding slot to keep the stack 16 byte aligned. ++// -----+--------------------+ <-- stack ptr (sp) ++// ++ ++// fp-8 holds the stack marker, fp-16 is the instance parameter. ++constexpr int kInstanceOffset = 16; ++ ++inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); } ++ ++inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } ++ ++inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, ++ ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kI32: ++ assm->Ld_w(dst.gp(), src); ++ break; ++ case ValueType::kI64: ++ assm->Ld_d(dst.gp(), src); ++ break; ++ case ValueType::kF32: ++ assm->Fld_s(dst.fp(), src); ++ break; ++ case ValueType::kF64: ++ assm->Fld_d(dst.fp(), src); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, ++ LiftoffRegister src, ValueType type) { ++ MemOperand dst(base, offset); ++ switch (type.kind()) { ++ case ValueType::kI32: ++ assm->St_w(src.gp(), dst); ++ break; ++ case ValueType::kI64: ++ assm->St_d(src.gp(), dst); ++ break; ++ case ValueType::kF32: ++ assm->Fst_s(src.fp(), dst); ++ break; ++ case ValueType::kF64: ++ assm->Fst_d(src.fp(), dst); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kI32: ++ assm->addi_d(sp, sp, -kSystemPointerSize); ++ assm->St_w(reg.gp(), MemOperand(sp, 0)); ++ break; ++ case ValueType::kI64: ++ assm->push(reg.gp()); ++ break; ++ case ValueType::kF32: ++ assm->addi_d(sp, sp, -kSystemPointerSize); ++ assm->Fst_s(reg.fp(), MemOperand(sp, 0)); ++ break; ++ case ValueType::kF64: ++ assm->addi_d(sp, sp, -kSystemPointerSize); ++ assm->Fst_d(reg.fp(), MemOperand(sp, 0)); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++} // namespace liftoff ++ ++int LiftoffAssembler::PrepareStackFrame() { ++ int offset = pc_offset(); ++ // When constant that represents size of stack frame can't be represented ++ // as 16bit we need three instructions to add it to sp, so we reserve space ++ // for this case. ++ addi_d(sp, sp, 0); ++ nop(); ++ nop(); ++ return offset; ++} ++ ++void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) { ++ // We can't run out of space, just pass anything big enough to not cause the ++ // assembler to try to grow the buffer. ++ constexpr int kAvailableSpace = 256; ++ TurboAssembler patching_assembler( ++ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ++ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ++ // If bytes can be represented as 16bit, daddiu will be generated and two ++ // nops will stay untouched. Otherwise, lui-ori sequence will load it to ++ // register and, as third instruction, daddu will be generated. ++ patching_assembler.Add_d(sp, sp, Operand(-frame_size)); ++} ++ ++void LiftoffAssembler::FinishCode() {} ++ ++void LiftoffAssembler::AbortCompilation() {} ++ ++// static ++constexpr int LiftoffAssembler::StaticStackFrameSize() { ++ return liftoff::kInstanceOffset; ++} ++ ++int LiftoffAssembler::SlotSizeForType(ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kS128: ++ return type.element_size_bytes(); ++ default: ++ return kStackSlotSize; ++ } ++} ++ ++bool LiftoffAssembler::NeedsAlignment(ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kS128: ++ return true; ++ default: ++ // No alignment because all other types are kStackSlotSize. ++ return false; ++ } ++} ++ ++void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, ++ RelocInfo::Mode rmode) { ++ switch (value.type().kind()) { ++ case ValueType::kI32: ++ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); ++ break; ++ case ValueType::kI64: ++ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); ++ break; ++ case ValueType::kF32: ++ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); ++ break; ++ case ValueType::kF64: ++ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, ++ int size) { ++ DCHECK_LE(offset, kMaxInt); ++ Ld_d(dst, liftoff::GetInstanceOperand()); ++ DCHECK(size == 4 || size == 8); ++ if (size == 4) { ++ Ld_w(dst, MemOperand(dst, offset)); ++ } else { ++ Ld_d(dst, MemOperand(dst, offset)); ++ } ++} ++ ++void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, ++ uint32_t offset) { ++ LoadFromInstance(dst, offset, kTaggedSize); ++} ++ ++void LiftoffAssembler::SpillInstance(Register instance) { ++ St_d(instance, liftoff::GetInstanceOperand()); ++} ++ ++void LiftoffAssembler::FillInstanceInto(Register dst) { ++ Ld_d(dst, liftoff::GetInstanceOperand()); ++} ++ ++void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, ++ Register offset_reg, ++ uint32_t offset_imm, ++ LiftoffRegList pinned) { ++ STATIC_ASSERT(kTaggedSize == kInt64Size); ++ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm, ++ LoadType::kI64Load, pinned); ++} ++ ++void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ++ Register offset_reg, uint32_t offset_imm, ++ LoadType type, LiftoffRegList pinned, ++ uint32_t* protected_load_pc, bool is_load_mem) { ++ Register src = no_reg; ++ if (offset_reg != no_reg) { ++ src = GetUnusedRegister(kGpReg, pinned).gp(); ++ emit_ptrsize_add(src, src_addr, offset_reg); ++ } ++ MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm) ++ : MemOperand(src_addr, offset_imm); ++ ++ if (protected_load_pc) *protected_load_pc = pc_offset(); ++ switch (type.value()) { ++ case LoadType::kI32Load8U: ++ case LoadType::kI64Load8U: ++ Ld_bu(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load8S: ++ case LoadType::kI64Load8S: ++ Ld_b(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load16U: ++ case LoadType::kI64Load16U: ++ TurboAssembler::Ld_hu(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load16S: ++ case LoadType::kI64Load16S: ++ TurboAssembler::Ld_h(dst.gp(), src_op); ++ break; ++ case LoadType::kI64Load32U: ++ TurboAssembler::Ld_wu(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load: ++ case LoadType::kI64Load32S: ++ TurboAssembler::Ld_w(dst.gp(), src_op); ++ break; ++ case LoadType::kI64Load: ++ TurboAssembler::Ld_d(dst.gp(), src_op); ++ break; ++ case LoadType::kF32Load: ++ TurboAssembler::Fld_s(dst.fp(), src_op); ++ break; ++ case LoadType::kF64Load: ++ TurboAssembler::Fld_d(dst.fp(), src_op); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister src, ++ StoreType type, LiftoffRegList pinned, ++ uint32_t* protected_store_pc, bool is_store_mem) { ++ Register dst = no_reg; ++ MemOperand dst_op = MemOperand(dst_addr, offset_imm); ++ if (offset_reg != no_reg) { ++ if (is_store_mem) { ++ pinned.set(src); ++ } ++ dst = GetUnusedRegister(kGpReg, pinned).gp(); ++ emit_ptrsize_add(dst, dst_addr, offset_reg); ++ dst_op = MemOperand(dst, offset_imm); ++ } ++ ++ if (protected_store_pc) *protected_store_pc = pc_offset(); ++ switch (type.value()) { ++ case StoreType::kI32Store8: ++ case StoreType::kI64Store8: ++ St_b(src.gp(), dst_op); ++ break; ++ case StoreType::kI32Store16: ++ case StoreType::kI64Store16: ++ TurboAssembler::St_h(src.gp(), dst_op); ++ break; ++ case StoreType::kI32Store: ++ case StoreType::kI64Store32: ++ TurboAssembler::St_w(src.gp(), dst_op); ++ break; ++ case StoreType::kI64Store: ++ TurboAssembler::St_d(src.gp(), dst_op); ++ break; ++ case StoreType::kF32Store: ++ TurboAssembler::Fst_s(src.fp(), dst_op); ++ break; ++ case StoreType::kF64Store: ++ TurboAssembler::Fst_d(src.fp(), dst_op); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, ++ Register offset_reg, uint32_t offset_imm, ++ LoadType type, LiftoffRegList pinned) { ++ bailout(kAtomics, "AtomicLoad"); ++} ++ ++void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister src, ++ StoreType type, LiftoffRegList pinned) { ++ bailout(kAtomics, "AtomicStore"); ++} ++ ++void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ StoreType type) { ++ bailout(kAtomics, "AtomicAdd"); ++} ++ ++void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ StoreType type) { ++ bailout(kAtomics, "AtomicSub"); ++} ++ ++void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ StoreType type) { ++ bailout(kAtomics, "AtomicAnd"); ++} ++ ++void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ StoreType type) { ++ bailout(kAtomics, "AtomicOr"); ++} ++ ++void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ StoreType type) { ++ bailout(kAtomics, "AtomicXor"); ++} ++ ++void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, ++ LiftoffRegister value, StoreType type) { ++ bailout(kAtomics, "AtomicExchange"); ++} ++ ++void LiftoffAssembler::AtomicCompareExchange( ++ Register dst_addr, Register offset_reg, uint32_t offset_imm, ++ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result, ++ StoreType type) { ++ bailout(kAtomics, "AtomicCompareExchange"); ++} ++ ++void LiftoffAssembler::AtomicFence() { dbar(0); } ++ ++void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ++ uint32_t caller_slot_idx, ++ ValueType type) { ++ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1)); ++ liftoff::Load(this, dst, src, type); ++} ++ ++void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ++ ValueType type) { ++ DCHECK_NE(dst_offset, src_offset); ++ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); ++ Fill(reg, src_offset, type); ++ Spill(dst_offset, reg, type); ++} ++ ++void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { ++ DCHECK_NE(dst, src); ++ // TODO(ksreten): Handle different sizes here. ++ TurboAssembler::Move(dst, src); ++} ++ ++void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ++ ValueType type) { ++ DCHECK_NE(dst, src); ++ TurboAssembler::Move(dst, src); ++} ++ ++void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { ++ RecordUsedSpillOffset(offset); ++ MemOperand dst = liftoff::GetStackSlot(offset); ++ switch (type.kind()) { ++ case ValueType::kI32: ++ St_w(reg.gp(), dst); ++ break; ++ case ValueType::kI64: ++ St_d(reg.gp(), dst); ++ break; ++ case ValueType::kF32: ++ Fst_s(reg.fp(), dst); ++ break; ++ case ValueType::kF64: ++ TurboAssembler::Fst_d(reg.fp(), dst); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::Spill(int offset, WasmValue value) { ++ RecordUsedSpillOffset(offset); ++ MemOperand dst = liftoff::GetStackSlot(offset); ++ switch (value.type().kind()) { ++ case ValueType::kI32: { ++ LiftoffRegister tmp = GetUnusedRegister(kGpReg); ++ TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); ++ St_w(tmp.gp(), dst); ++ break; ++ } ++ case ValueType::kI64: { ++ LiftoffRegister tmp = GetUnusedRegister(kGpReg); ++ TurboAssembler::li(tmp.gp(), value.to_i64()); ++ St_d(tmp.gp(), dst); ++ break; ++ } ++ default: ++ // kWasmF32 and kWasmF64 are unreachable, since those ++ // constants are not tracked. ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { ++ MemOperand src = liftoff::GetStackSlot(offset); ++ switch (type.kind()) { ++ case ValueType::kI32: ++ Ld_w(reg.gp(), src); ++ break; ++ case ValueType::kI64: ++ Ld_d(reg.gp(), src); ++ break; ++ case ValueType::kF32: ++ Fld_s(reg.fp(), src); ++ break; ++ case ValueType::kF64: ++ TurboAssembler::Fld_d(reg.fp(), src); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) { ++ UNREACHABLE(); ++} ++ ++void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ++ DCHECK_LT(0, size); ++ RecordUsedSpillOffset(start + size); ++ ++ if (size <= 12 * kStackSlotSize) { ++ // Special straight-line code for up to 12 slots. Generates one ++ // instruction per slot (<= 12 instructions total). ++ uint32_t remainder = size; ++ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) { ++ St_d(zero_reg, liftoff::GetStackSlot(start + remainder)); ++ } ++ DCHECK(remainder == 4 || remainder == 0); ++ if (remainder) { ++ St_w(zero_reg, liftoff::GetStackSlot(start + remainder)); ++ } ++ } else { ++ // General case for bigger counts (12 instructions). ++ // Use a0 for start address (inclusive), a1 for end address (exclusive). ++ Push(a1, a0); ++ Add_d(a0, fp, Operand(-start - size)); ++ Add_d(a1, fp, Operand(-start)); ++ ++ Label loop; ++ bind(&loop); ++ St_d(zero_reg, MemOperand(a0, kSystemPointerSize)); ++ addi_d(a0, a0, kSystemPointerSize); ++ BranchShort(&loop, ne, a0, Operand(a1)); ++ ++ Pop(a1, a0); ++ } ++} ++ ++void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { ++ TurboAssembler::Clz_d(dst.gp(), src.gp()); ++} ++ ++void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { ++ TurboAssembler::Ctz_d(dst.gp(), src.gp()); ++} ++ ++bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, ++ LiftoffRegister src) { ++ TurboAssembler::Popcnt_d(dst.gp(), src.gp()); ++ return true; ++} ++ ++void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { ++ TurboAssembler::Mul_w(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero, ++ Label* trap_div_unrepresentable) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ ++ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. ++ TurboAssembler::li(kScratchReg, 1); ++ TurboAssembler::li(kScratchReg2, 1); ++ TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); ++ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); ++ add_d(kScratchReg, kScratchReg, kScratchReg2); ++ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, ++ Operand(zero_reg)); ++ ++ TurboAssembler::Div_w(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ TurboAssembler::Div_wu(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ TurboAssembler::Mod_w(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ TurboAssembler::Mod_wu(dst, lhs, rhs); ++} ++ ++#define I32_BINOP(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ ++ Register rhs) { \ ++ instruction(dst, lhs, rhs); \ ++ } ++ ++// clang-format off ++I32_BINOP(add, add_w) ++I32_BINOP(sub, sub_w) ++I32_BINOP(and, and_) ++I32_BINOP(or, or_) ++I32_BINOP(xor, xor_) ++// clang-format on ++ ++#undef I32_BINOP ++ ++#define I32_BINOP_I(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ ++ int32_t imm) { \ ++ instruction(dst, lhs, Operand(imm)); \ ++ } ++ ++// clang-format off ++I32_BINOP_I(add, Add_w) ++I32_BINOP_I(and, And) ++I32_BINOP_I(or, Or) ++I32_BINOP_I(xor, Xor) ++// clang-format on ++ ++#undef I32_BINOP_I ++ ++void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { ++ TurboAssembler::Clz_w(dst, src); ++} ++ ++void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { ++ TurboAssembler::Ctz_w(dst, src); ++} ++ ++bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { ++ TurboAssembler::Popcnt_w(dst, src); ++ return true; ++} ++ ++#define I32_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ ++ Register amount) { \ ++ instruction(dst, src, amount); \ ++ } ++#define I32_SHIFTOP_I(name, instruction, instruction1) \ ++ I32_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ ++ int amount) { \ ++ instruction1(dst, src, amount & 0x1f); \ ++ } ++ ++I32_SHIFTOP_I(shl, sll_w, slli_w) ++I32_SHIFTOP_I(sar, sra_w, srai_w) ++I32_SHIFTOP_I(shr, srl_w, srli_w) ++ ++#undef I32_SHIFTOP ++#undef I32_SHIFTOP_I ++ ++void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp()); ++} ++ ++bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero, ++ Label* trap_div_unrepresentable) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ ++ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. ++ TurboAssembler::li(kScratchReg, 1); ++ TurboAssembler::li(kScratchReg2, 1); ++ TurboAssembler::LoadZeroOnCondition( ++ kScratchReg, lhs.gp(), Operand(std::numeric_limits::min()), eq); ++ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); ++ add_d(kScratchReg, kScratchReg, kScratchReg2); ++ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, ++ Operand(zero_reg)); ++ ++ TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp()); ++ return true; ++} ++ ++bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp()); ++ return true; ++} ++ ++bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp()); ++ return true; ++} ++ ++bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp()); ++ return true; ++} ++ ++#define I64_BINOP(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name( \ ++ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \ ++ instruction(dst.gp(), lhs.gp(), rhs.gp()); \ ++ } ++ ++// clang-format off ++I64_BINOP(add, Add_d) ++I64_BINOP(sub, Sub_d) ++I64_BINOP(and, and_) ++I64_BINOP(or, or_) ++I64_BINOP(xor, xor_) ++// clang-format on ++ ++#undef I64_BINOP ++ ++#define I64_BINOP_I(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \ ++ LiftoffRegister lhs, int32_t imm) { \ ++ instruction(dst.gp(), lhs.gp(), Operand(imm)); \ ++ } ++ ++// clang-format off ++I64_BINOP_I(add, Add_d) ++I64_BINOP_I(and, And) ++I64_BINOP_I(or, Or) ++I64_BINOP_I(xor, Xor) ++// clang-format on ++ ++#undef I64_BINOP_I ++ ++#define I64_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name( \ ++ LiftoffRegister dst, LiftoffRegister src, Register amount) { \ ++ instruction(dst.gp(), src.gp(), amount); \ ++ } ++#define I64_SHIFTOP_I(name, instruction, instructioni) \ ++ I64_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \ ++ LiftoffRegister src, int amount) { \ ++ DCHECK(is_uint6(amount)); \ ++ instructioni(dst.gp(), src.gp(), amount); \ ++ } ++ ++I64_SHIFTOP_I(shl, sll_d, slli_d) ++I64_SHIFTOP_I(sar, sra_d, srai_d) ++I64_SHIFTOP_I(shr, srl_d, srli_d) ++ ++#undef I64_SHIFTOP ++#undef I64_SHIFTOP_I ++ ++void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) { ++ add_w(dst, src, zero_reg); ++} ++ ++void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { ++ TurboAssembler::Neg_s(dst, src); ++} ++ ++void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { ++ TurboAssembler::Neg_d(dst, src); ++} ++ ++void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float32Min(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float32Max(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ bailout(kComplexOperation, "f32_copysign"); ++} ++ ++void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float64Min(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float64Max(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ bailout(kComplexOperation, "f64_copysign"); ++} ++ ++#define FP_BINOP(name, instruction) \ ++ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \ ++ DoubleRegister rhs) { \ ++ instruction(dst, lhs, rhs); \ ++ } ++#define FP_UNOP(name, instruction) \ ++ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ ++ instruction(dst, src); \ ++ } ++#define FP_UNOP_RETURN_TRUE(name, instruction) \ ++ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ ++ instruction(dst, src); \ ++ return true; \ ++ } ++ ++FP_BINOP(f32_add, fadd_s) ++FP_BINOP(f32_sub, fsub_s) ++FP_BINOP(f32_mul, fmul_s) ++FP_BINOP(f32_div, fdiv_s) ++FP_UNOP(f32_abs, fabs_s) ++FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s) ++FP_UNOP_RETURN_TRUE(f32_floor, Floor_s) ++FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s) ++FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s) ++FP_UNOP(f32_sqrt, fsqrt_s) ++FP_BINOP(f64_add, fadd_d) ++FP_BINOP(f64_sub, fsub_d) ++FP_BINOP(f64_mul, fmul_d) ++FP_BINOP(f64_div, fdiv_d) ++FP_UNOP(f64_abs, fabs_d) ++FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d) ++FP_UNOP_RETURN_TRUE(f64_floor, Floor_d) ++FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d) ++FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d) ++FP_UNOP(f64_sqrt, fsqrt_d) ++ ++#undef FP_BINOP ++#undef FP_UNOP ++#undef FP_UNOP_RETURN_TRUE ++ ++bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, ++ LiftoffRegister dst, ++ LiftoffRegister src, Label* trap) { ++ switch (opcode) { ++ case kExprI32ConvertI64: ++ TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0); ++ return true; ++ case kExprI32SConvertF32: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_s(rounded.fp(), src.fp()); ++ ftintrz_w_s(kScratchDoubleReg, rounded.fp()); ++ movfr2gr_s(dst.gp(), kScratchDoubleReg); ++ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, ++ // because INT32_MIN allows easier out-of-bounds detection. ++ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); ++ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); ++ ++ // Checking if trap. ++ movgr2fr_w(kScratchDoubleReg, dst.gp()); ++ ffint_s_w(converted_back.fp(), kScratchDoubleReg); ++ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32UConvertF32: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_s(rounded.fp(), src.fp()); ++ TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); ++ // Avoid UINT32_MAX as an overflow indicator and use 0 instead, ++ // because 0 allows easier out-of-bounds detection. ++ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); ++ ++ // Checking if trap. ++ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); ++ fcvt_s_d(converted_back.fp(), converted_back.fp()); ++ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32SConvertF64: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_d(rounded.fp(), src.fp()); ++ ftintrz_w_d(kScratchDoubleReg, rounded.fp()); ++ movfr2gr_s(dst.gp(), kScratchDoubleReg); ++ ++ // Checking if trap. ++ ffint_d_w(converted_back.fp(), kScratchDoubleReg); ++ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32UConvertF64: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_d(rounded.fp(), src.fp()); ++ TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); ++ ++ // Checking if trap. ++ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); ++ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32ReinterpretF32: ++ TurboAssembler::FmoveLow(dst.gp(), src.fp()); ++ return true; ++ case kExprI64SConvertI32: ++ slli_w(dst.gp(), src.gp(), 0); ++ return true; ++ case kExprI64UConvertI32: ++ TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0); ++ return true; ++ case kExprI64SConvertF32: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_s(rounded.fp(), src.fp()); ++ ftintrz_l_s(kScratchDoubleReg, rounded.fp()); ++ movfr2gr_d(dst.gp(), kScratchDoubleReg); ++ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, ++ // because INT64_MIN allows easier out-of-bounds detection. ++ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); ++ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); ++ ++ // Checking if trap. ++ movgr2fr_d(kScratchDoubleReg, dst.gp()); ++ ffint_s_l(converted_back.fp(), kScratchDoubleReg); ++ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI64UConvertF32: { ++ // Real conversion. ++ TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, ++ kScratchReg); ++ ++ // Checking if trap. ++ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); ++ return true; ++ } ++ case kExprI64SConvertF64: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_d(rounded.fp(), src.fp()); ++ ftintrz_l_d(kScratchDoubleReg, rounded.fp()); ++ movfr2gr_d(dst.gp(), kScratchDoubleReg); ++ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, ++ // because INT64_MIN allows easier out-of-bounds detection. ++ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); ++ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); ++ ++ // Checking if trap. ++ movgr2fr_d(kScratchDoubleReg, dst.gp()); ++ ffint_d_l(converted_back.fp(), kScratchDoubleReg); ++ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI64UConvertF64: { ++ // Real conversion. ++ TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, ++ kScratchReg); ++ ++ // Checking if trap. ++ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); ++ return true; ++ } ++ case kExprI64ReinterpretF64: ++ movfr2gr_d(dst.gp(), src.fp()); ++ return true; ++ case kExprF32SConvertI32: { ++ LiftoffRegister scratch = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst)); ++ movgr2fr_w(scratch.fp(), src.gp()); ++ ffint_s_w(dst.fp(), scratch.fp()); ++ return true; ++ } ++ case kExprF32UConvertI32: ++ TurboAssembler::Ffint_s_uw(dst.fp(), src.gp()); ++ return true; ++ case kExprF32ConvertF64: ++ fcvt_s_d(dst.fp(), src.fp()); ++ return true; ++ case kExprF32ReinterpretI32: ++ TurboAssembler::FmoveLow(dst.fp(), src.gp()); ++ return true; ++ case kExprF64SConvertI32: { ++ LiftoffRegister scratch = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst)); ++ movgr2fr_w(scratch.fp(), src.gp()); ++ ffint_d_w(dst.fp(), scratch.fp()); ++ return true; ++ } ++ case kExprF64UConvertI32: ++ TurboAssembler::Ffint_d_uw(dst.fp(), src.gp()); ++ return true; ++ case kExprF64ConvertF32: ++ fcvt_d_s(dst.fp(), src.fp()); ++ return true; ++ case kExprF64ReinterpretI64: ++ movgr2fr_d(dst.fp(), src.gp()); ++ return true; ++ default: ++ return false; ++ } ++} ++ ++void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) { ++ bailout(kComplexOperation, "i32_signextend_i8"); ++} ++ ++void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) { ++ bailout(kComplexOperation, "i32_signextend_i16"); ++} ++ ++void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kComplexOperation, "i64_signextend_i8"); ++} ++ ++void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kComplexOperation, "i64_signextend_i16"); ++} ++ ++void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kComplexOperation, "i64_signextend_i32"); ++} ++ ++void LiftoffAssembler::emit_jump(Label* label) { ++ TurboAssembler::Branch(label); ++} ++ ++void LiftoffAssembler::emit_jump(Register target) { ++ TurboAssembler::Jump(target); ++} ++ ++void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, ++ ValueType type, Register lhs, ++ Register rhs) { ++ if (rhs != no_reg) { ++ TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); ++ } else { ++ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); ++ } ++} ++ ++void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { ++ sltui(dst, src, 1); ++} ++ ++void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, ++ Register lhs, Register rhs) { ++ Register tmp = dst; ++ if (dst == lhs || dst == rhs) { ++ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); ++ } ++ // Write 1 as result. ++ TurboAssembler::li(tmp, 1); ++ ++ // If negative condition is true, write 0 as result. ++ Condition neg_cond = NegateCondition(cond); ++ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); ++ ++ // If tmp != dst, result will be moved. ++ TurboAssembler::Move(dst, tmp); ++} ++ ++void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { ++ sltui(dst, src.gp(), 1); ++} ++ ++void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ Register tmp = dst; ++ if (dst == lhs.gp() || dst == rhs.gp()) { ++ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); ++ } ++ // Write 1 as result. ++ TurboAssembler::li(tmp, 1); ++ ++ // If negative condition is true, write 0 as result. ++ Condition neg_cond = NegateCondition(cond); ++ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), ++ neg_cond); ++ ++ // If tmp != dst, result will be moved. ++ TurboAssembler::Move(dst, tmp); ++} ++ ++namespace liftoff { ++ ++inline FPUCondition ConditionToConditionCmpFPU(Condition condition, ++ bool* predicate) { ++ switch (condition) { ++ case kEqual: ++ *predicate = true; ++ return CEQ; ++ case kUnequal: ++ *predicate = false; ++ return CEQ; ++ case kUnsignedLessThan: ++ *predicate = true; ++ return CLT; ++ case kUnsignedGreaterEqual: ++ *predicate = false; ++ return CLT; ++ case kUnsignedLessEqual: ++ *predicate = true; ++ return CLE; ++ case kUnsignedGreaterThan: ++ *predicate = false; ++ return CLE; ++ default: ++ *predicate = true; ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++} // namespace liftoff ++ ++void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, ++ DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label not_nan, cont; ++ TurboAssembler::CompareIsNanF32(lhs, rhs); ++ TurboAssembler::BranchFalseF(¬_nan); ++ // If one of the operands is NaN, return 1 for f32.ne, else 0. ++ if (cond == ne) { ++ TurboAssembler::li(dst, 1); ++ } else { ++ TurboAssembler::Move(dst, zero_reg); ++ } ++ TurboAssembler::Branch(&cont); ++ ++ bind(¬_nan); ++ ++ TurboAssembler::li(dst, 1); ++ bool predicate; ++ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); ++ TurboAssembler::CompareF32(lhs, rhs, fcond); ++ if (predicate) { ++ TurboAssembler::LoadZeroIfNotFPUCondition(dst); ++ } else { ++ TurboAssembler::LoadZeroIfFPUCondition(dst); ++ } ++ ++ bind(&cont); ++} ++ ++void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, ++ DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label not_nan, cont; ++ TurboAssembler::CompareIsNanF64(lhs, rhs); ++ TurboAssembler::BranchFalseF(¬_nan); ++ // If one of the operands is NaN, return 1 for f64.ne, else 0. ++ if (cond == ne) { ++ TurboAssembler::li(dst, 1); ++ } else { ++ TurboAssembler::Move(dst, zero_reg); ++ } ++ TurboAssembler::Branch(&cont); ++ ++ bind(¬_nan); ++ ++ TurboAssembler::li(dst, 1); ++ bool predicate; ++ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); ++ TurboAssembler::CompareF64(lhs, rhs, fcond); ++ if (predicate) { ++ TurboAssembler::LoadZeroIfNotFPUCondition(dst); ++ } else { ++ TurboAssembler::LoadZeroIfFPUCondition(dst); ++ } ++ ++ bind(&cont); ++} ++ ++void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kSimd, "emit_i8x16_splat"); ++} ++ ++void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kSimd, "emit_i16x8_splat"); ++} ++ ++void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kSimd, "emit_i32x4_splat"); ++} ++ ++void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kSimd, "emit_i64x2_splat"); ++} ++ ++void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kSimd, "emit_f32x4_splat"); ++} ++ ++void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { ++ TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0)); ++ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); ++} ++ ++void LiftoffAssembler::CallTrapCallbackForTesting() { ++ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp()); ++ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); ++} ++ ++void LiftoffAssembler::AssertUnreachable(AbortReason reason) { ++ if (emit_debug_code()) Abort(reason); ++} ++ ++void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { ++ LiftoffRegList gp_regs = regs & kGpCacheRegList; ++ unsigned num_gp_regs = gp_regs.GetNumRegsSet(); ++ if (num_gp_regs) { ++ unsigned offset = num_gp_regs * kSystemPointerSize; ++ addi_d(sp, sp, -offset); ++ while (!gp_regs.is_empty()) { ++ LiftoffRegister reg = gp_regs.GetFirstRegSet(); ++ offset -= kSystemPointerSize; ++ St_d(reg.gp(), MemOperand(sp, offset)); ++ gp_regs.clear(reg); ++ } ++ DCHECK_EQ(offset, 0); ++ } ++ LiftoffRegList fp_regs = regs & kFpCacheRegList; ++ unsigned num_fp_regs = fp_regs.GetNumRegsSet(); ++ if (num_fp_regs) { ++ addi_d(sp, sp, -(num_fp_regs * kStackSlotSize)); ++ unsigned offset = 0; ++ while (!fp_regs.is_empty()) { ++ LiftoffRegister reg = fp_regs.GetFirstRegSet(); ++ TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset)); ++ fp_regs.clear(reg); ++ offset += sizeof(double); ++ } ++ DCHECK_EQ(offset, num_fp_regs * sizeof(double)); ++ } ++} ++ ++void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { ++ LiftoffRegList fp_regs = regs & kFpCacheRegList; ++ unsigned fp_offset = 0; ++ while (!fp_regs.is_empty()) { ++ LiftoffRegister reg = fp_regs.GetFirstRegSet(); ++ TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset)); ++ fp_regs.clear(reg); ++ fp_offset += sizeof(double); ++ } ++ if (fp_offset) addi_d(sp, sp, fp_offset); ++ LiftoffRegList gp_regs = regs & kGpCacheRegList; ++ unsigned gp_offset = 0; ++ while (!gp_regs.is_empty()) { ++ LiftoffRegister reg = gp_regs.GetLastRegSet(); ++ Ld_d(reg.gp(), MemOperand(sp, gp_offset)); ++ gp_regs.clear(reg); ++ gp_offset += kSystemPointerSize; ++ } ++ addi_d(sp, sp, gp_offset); ++} ++ ++void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ++ DCHECK_LT(num_stack_slots, ++ (1 << 16) / kSystemPointerSize); // 16 bit immediate ++ TurboAssembler::DropAndRet(static_cast(num_stack_slots)); ++} ++ ++void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ++ const LiftoffRegister* args, ++ const LiftoffRegister* rets, ++ ValueType out_argument_type, int stack_bytes, ++ ExternalReference ext_ref) { ++ addi_d(sp, sp, -stack_bytes); ++ ++ int arg_bytes = 0; ++ for (ValueType param_type : sig->parameters()) { ++ liftoff::Store(this, sp, arg_bytes, *args++, param_type); ++ arg_bytes += param_type.element_size_bytes(); ++ } ++ DCHECK_LE(arg_bytes, stack_bytes); ++ ++ // Pass a pointer to the buffer with the arguments to the C function. ++ // On mips, the first argument is passed in {a0}. ++ constexpr Register kFirstArgReg = a0; ++ mov(kFirstArgReg, sp); ++ ++ // Now call the C function. ++ constexpr int kNumCCallArgs = 1; ++ PrepareCallCFunction(kNumCCallArgs, kScratchReg); ++ CallCFunction(ext_ref, kNumCCallArgs); ++ ++ // Move return value to the right register. ++ const LiftoffRegister* next_result_reg = rets; ++ if (sig->return_count() > 0) { ++ DCHECK_EQ(1, sig->return_count()); ++ constexpr Register kReturnReg = a0; ++ if (kReturnReg != next_result_reg->gp()) { ++ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0)); ++ } ++ ++next_result_reg; ++ } ++ ++ // Load potential output value from the buffer on the stack. ++ if (out_argument_type != kWasmStmt) { ++ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type); ++ } ++ ++ addi_d(sp, sp, stack_bytes); ++} ++ ++void LiftoffAssembler::CallNativeWasmCode(Address addr) { ++ Call(addr, RelocInfo::WASM_CALL); ++} ++ ++void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, ++ compiler::CallDescriptor* call_descriptor, ++ Register target) { ++ if (target == no_reg) { ++ pop(kScratchReg); ++ Call(kScratchReg); ++ } else { ++ Call(target); ++ } ++} ++ ++void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { ++ // A direct call to a wasm runtime stub defined in this module. ++ // Just encode the stub index. This will be patched at relocation. ++ Call(static_cast
(sid), RelocInfo::WASM_STUB_CALL); ++} ++ ++void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { ++ addi_d(sp, sp, -size); ++ TurboAssembler::Move(addr, sp); ++} ++ ++void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { ++ addi_d(sp, sp, size); ++} ++ ++void LiftoffStackSlots::Construct() { ++ for (auto& slot : slots_) { ++ const LiftoffAssembler::VarState& src = slot.src_; ++ switch (src.loc()) { ++ case LiftoffAssembler::VarState::kStack: ++ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_)); ++ asm_->push(kScratchReg); ++ break; ++ case LiftoffAssembler::VarState::kRegister: ++ liftoff::push(asm_, src.reg(), src.type()); ++ break; ++ case LiftoffAssembler::VarState::kIntConst: { ++ asm_->li(kScratchReg, Operand(src.i32_const())); ++ asm_->push(kScratchReg); ++ break; ++ } ++ } ++ } ++} ++ ++} // namespace wasm ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_WASM_BASELINE_LA64_LIFTOFF_ASSEMBLER_LA64_H_ +diff --git a/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h b/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h +index 781fb87dbc..286fe8bdea 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h ++++ b/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h +@@ -46,6 +46,14 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = + constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf( + f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26); + ++#elif V8_TARGET_ARCH_LA64 ++/*todo*/ ++constexpr RegList kLiftoffAssemblerGpCacheRegs = ++ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7); ++ ++constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf( ++ f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26); ++ + #elif V8_TARGET_ARCH_ARM + + // r7: cp, r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc. +@@ -90,7 +98,7 @@ constexpr Condition kUnsignedLessEqual = below_equal; + constexpr Condition kUnsignedGreaterThan = above; + constexpr Condition kUnsignedGreaterEqual = above_equal; + +-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ++#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LA64 + + constexpr Condition kEqual = eq; + constexpr Condition kUnequal = ne; +diff --git a/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler.h +index 6573ff4aa4..4e26ea95d2 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler.h ++++ b/src/3rdparty/chromium/v8/src/wasm/baseline/liftoff-assembler.h +@@ -1045,6 +1045,8 @@ class LiftoffStackSlots { + #include "src/wasm/baseline/mips/liftoff-assembler-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h" ++#elif V8_TARGET_ARCH_LA64 ++#include "src/wasm/baseline/la64/liftoff-assembler-la64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/wasm/baseline/s390/liftoff-assembler-s390.h" + #else +diff --git a/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.cc b/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.cc +index 90cdad4672..33f8b9e6e9 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.cc ++++ b/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.cc +@@ -268,6 +268,37 @@ void JumpTableAssembler::NopBytes(int bytes) { + } + } + ++#elif V8_TARGET_ARCH_LA64 ++void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, ++ Address lazy_compile_target) { ++ DCHECK(is_int32(func_index)); ++ int start = pc_offset(); ++ li(kWasmCompileLazyFuncIndexRegister, (int32_t)func_index); // max. 2 instr ++ // Jump produces max. 3 instructions for 32-bit platform ++ // and max. 4 instructions for 64-bit platform. ++ Jump(lazy_compile_target, RelocInfo::NONE); ++ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset(); ++ DCHECK_EQ(nop_bytes % kInstrSize, 0); ++ for (int i = 0; i < nop_bytes; i += kInstrSize) nop(); ++} ++bool JumpTableAssembler::EmitJumpSlot(Address target) { ++ PatchAndJump(target); ++ return true; ++} ++void JumpTableAssembler::EmitFarJumpSlot(Address target) { ++ JumpToInstructionStream(target); ++} ++void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { ++ UNREACHABLE(); ++} ++void JumpTableAssembler::NopBytes(int bytes) { ++ DCHECK_LE(0, bytes); ++ DCHECK_EQ(0, bytes % kInstrSize); ++ for (; bytes > 0; bytes -= kInstrSize) { ++ nop(); ++ } ++} ++ + #elif V8_TARGET_ARCH_PPC64 + void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, + Address lazy_compile_target) { +diff --git a/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.h b/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.h +index 253f0bc018..71c1c7eeb3 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.h ++++ b/src/3rdparty/chromium/v8/src/wasm/jump-table-assembler.h +@@ -215,6 +215,12 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { + static constexpr int kJumpTableSlotSize = 8 * kInstrSize; + static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize; + static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize; ++#elif V8_TARGET_ARCH_LA64 ++ // TODO ++ static constexpr int kJumpTableLineSize = 8 * kInstrSize; ++ static constexpr int kJumpTableSlotSize = 8 * kInstrSize; ++ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize; ++ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize; + #else + #error Unknown architecture. + #endif +diff --git a/src/3rdparty/chromium/v8/src/wasm/wasm-linkage.h b/src/3rdparty/chromium/v8/src/wasm/wasm-linkage.h +index 7e56ea6eae..b8efe962a7 100644 +--- a/src/3rdparty/chromium/v8/src/wasm/wasm-linkage.h ++++ b/src/3rdparty/chromium/v8/src/wasm/wasm-linkage.h +@@ -75,6 +75,15 @@ constexpr Register kGpReturnRegisters[] = {v0, v1}; + constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14}; + constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4}; + ++#elif V8_TARGET_ARCH_LA64 ++// =========================================================================== ++// == LA64 TODO ============================================================= ++// =========================================================================== ++constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7}; ++constexpr Register kGpReturnRegisters[] = {a0, a1}; ++constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14}; ++constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4}; ++ + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 + // =========================================================================== + // == ppc & ppc64 ============================================================ +diff --git a/src/3rdparty/chromium/v8/test/cctest/BUILD.gn b/src/3rdparty/chromium/v8/test/cctest/BUILD.gn +index 89fe36f65b..00ee01294c 100644 +--- a/src/3rdparty/chromium/v8/test/cctest/BUILD.gn ++++ b/src/3rdparty/chromium/v8/test/cctest/BUILD.gn +@@ -353,6 +353,12 @@ v8_source_set("cctest_sources") { + "test-disasm-mips64.cc", + "test-macro-assembler-mips64.cc", + ] ++ } else if (v8_current_cpu == "la64") { ++ sources += [ ### loongson(arch:la64) ### ++ "test-assembler-la64.cc", ++ "test-disasm-la64.cc", ++ "test-macro-assembler-la64.cc", ++ ] + } else if (v8_current_cpu == "x64") { + sources += [ ### gcmole(arch:x64) ### + "test-assembler-x64.cc", +@@ -407,7 +413,8 @@ v8_source_set("cctest_sources") { + v8_current_cpu == "arm" || v8_current_cpu == "arm64" || + v8_current_cpu == "s390" || v8_current_cpu == "s390x" || + v8_current_cpu == "mips" || v8_current_cpu == "mips64" || +- v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64") { ++ v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64" || ++ v8_current_cpu == "la64") { + # Disable fmadd/fmsub so that expected results match generated code in + # RunFloat64MulAndFloat64Add1 and friends. + if (!is_win) { +diff --git a/src/3rdparty/chromium/v8/test/cctest/test-assembler-la64.cc b/src/3rdparty/chromium/v8/test/cctest/test-assembler-la64.cc +new file mode 100644 +index 0000000000..366bcb7cd2 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/test/cctest/test-assembler-la64.cc +@@ -0,0 +1,5127 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following ++// disclaimer in the documentation and/or other materials provided ++// with the distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived ++// from this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++#include // NOLINT(readability/streams) ++ ++#include "src/base/utils/random-number-generator.h" ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/diagnostics/disassembler.h" ++#include "src/execution/simulator.h" ++#include "src/heap/factory.h" ++#include "src/init/v8.h" ++#include "test/cctest/cctest.h" ++ ++namespace v8 { ++namespace internal { ++ ++// Define these function prototypes to match JSEntryFunction in execution.cc. ++// TODO(mips64): Refine these signatures per test case. ++using F1 = void*(int x, int p1, int p2, int p3, int p4); ++using F2 = void*(int x, int y, int p2, int p3, int p4); ++using F3 = void*(void* p, int p1, int p2, int p3, int p4); ++using F4 = void*(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4); ++using F5 = void*(void* p0, void* p1, int p2, int p3, int p4); ++ ++#define __ assm. ++// v0->a2, v1->a3 ++TEST(LA0) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ // Addition. ++ __ addi_d(a2, a0, 0xC); ++ ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0xAB0, 0, 0, 0, 0)); ++ CHECK_EQ(0xABCL, res); ++} ++ ++TEST(LA1) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ Label L, C; ++ ++ __ ori(a1, a0, 0); ++ __ ori(a2, zero_reg, 0); ++ __ b(&C); ++ ++ __ bind(&L); ++ __ add_d(a2, a2, a1); ++ __ addi_d(a1, a1, -1); ++ ++ __ bind(&C); ++ __ ori(a3, a1, 0); ++ ++ __ Branch(&L, ne, a3, Operand((int64_t)0)); ++ ++ __ or_(a0, a2, zero_reg); ++ __ or_(a1, a3, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(50, 0, 0, 0, 0)); ++ CHECK_EQ(1275L, res); ++} ++ ++TEST(LA2) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ ++ __ ori(a4, zero_reg, 0); // 00000000 ++ __ lu12i_w(a4, 0x12345); // 12345000 ++ __ ori(a4, a4, 0); // 12345000 ++ __ ori(a2, a4, 0xF0F); // 12345F0F ++ __ Branch(&error, ne, a2, Operand(0x12345F0F)); ++ ++ __ ori(a4, zero_reg, 0); ++ __ lu32i_d(a4, 0x12345); // 1 2345 0000 0000 ++ __ ori(a4, a4, 0xFFF); // 1 2345 0000 0FFF ++ __ addi_d(a2, a4, 1); ++ __ Branch(&error, ne, a2, Operand(0x1234500001000)); ++ ++ __ ori(a4, zero_reg, 0); ++ __ lu52i_d(a4, zero_reg, 0x123); // 1230 0000 0000 0000 ++ __ ori(a4, a4, 0xFFF); // 123F 0000 0000 0FFF ++ __ addi_d(a2, a4, 1); // 1230 0000 0000 1000 ++ __ Branch(&error, ne, a2, Operand(0x1230000000001000)); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(LA3) { ++ // Test 32bit calculate instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ ++ __ li(a4, 0x00000004); ++ __ li(a5, 0x00001234); ++ __ li(a6, 0x12345678); ++ __ li(a7, 0x7FFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFC)); ++ __ li(t1, static_cast(0xFFFFEDCC)); ++ __ li(t2, static_cast(0xEDCBA988)); ++ __ li(t3, static_cast(0x80000000)); ++ ++ __ ori(a2, zero_reg, 0); // 0x00000000 ++ __ add_w(a2, a4, a5); // 0x00001238 ++ __ sub_w(a2, a2, a4); // 0x00001234 ++ __ Branch(&error, ne, a2, Operand(0x00001234)); ++ __ ori(a3, zero_reg, 0); // 0x00000000 ++ __ add_w(a3, a7, a4); // 32bit addu result is sign-extended into 64bit reg. ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFF80000003)); ++ ++ __ sub_w(a3, t3, a4); // 0x7FFFFFFC ++ __ Branch(&error, ne, a3, Operand(0x7FFFFFFC)); ++ ++ __ ori(a2, zero_reg, 0); // 0x00000000 ++ __ ori(a3, zero_reg, 0); // 0x00000000 ++ __ addi_w(a2, zero_reg, 0x421); // 0x00007421 ++ __ addi_w(a2, a2, -0x1); // 0x00007420 ++ __ addi_w(a2, a2, -0x20); // 0x00007400 ++ __ Branch(&error, ne, a2, Operand(0x0000400)); ++ __ addi_w(a3, a7, 0x1); // 0x80000000 - result is sign-extended. ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFF80000000)); ++ ++ __ ori(a2, zero_reg, 0); // 0x00000000 ++ __ ori(a3, zero_reg, 0); // 0x00000000 ++ __ alsl_w(a2, a6, a4, 3); // 0xFFFFFFFF91A2B3C4 ++ __ alsl_w(a2, a2, a4, 2); // 0x468ACF14 ++ __ Branch(&error, ne, a2, Operand(0x468acf14)); ++ __ ori(a0, zero_reg, 31); ++ __ alsl_wu(a3, a6, a4, 3); // 0x91A2B3C4 ++ __ alsl_wu(a3, a3, a7, 1); // 0xFFFFFFFFA3456787 ++ __ Branch(&error, ne, a3, Operand(0xA3456787)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ ori(a3, zero_reg, 0); ++ __ mul_w(a2, a5, a7); ++ __ div_w(a2, a2, a4); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFFB73)); ++ __ mul_w(a3, a4, t1); ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFFFFFB730)); ++ __ div_w(a3, t3, a4); ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFE0000000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulh_w(a2, a4, t1); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFFFFF)); ++ __ mulh_w(a2, a4, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulh_wu(a2, a4, t1); ++ __ Branch(&error, ne, a2, Operand(0x3)); ++ __ mulh_wu(a2, a4, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulw_d_w(a2, a4, t1); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFB730)); ++ __ mulw_d_w(a2, a4, a6); ++ __ Branch(&error, ne, a2, Operand(0x48D159E0)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulw_d_wu(a2, a4, t1); ++ __ Branch(&error, ne, a2, Operand(0x3FFFFB730)); //========0xFFFFB730 ++ __ ori(a2, zero_reg, 81); ++ __ mulw_d_wu(a2, a4, a6); ++ __ Branch(&error, ne, a2, Operand(0x48D159E0)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ div_wu(a2, a7, a5); ++ __ Branch(&error, ne, a2, Operand(0x70821)); ++ __ div_wu(a2, t0, a5); ++ __ Branch(&error, ne, a2, Operand(0xE1042)); ++ __ div_wu(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mod_w(a2, a6, a5); ++ __ Branch(&error, ne, a2, Operand(0xDA8)); ++ __ ori(a2, zero_reg, 0); ++ __ mod_w(a2, t2, a5); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFF258)); ++ __ ori(a2, zero_reg, 0); ++ __ mod_w(a2, t2, t1); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFF258)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mod_wu(a2, a6, a5); ++ __ Branch(&error, ne, a2, Operand(0xDA8)); ++ __ mod_wu(a2, t2, a5); ++ __ Branch(&error, ne, a2, Operand(0xF0)); ++ __ mod_wu(a2, t2, t1); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFEDCBA988)); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(LA4) { ++ // Test 64bit calculate instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ ++ __ li(a4, 0x17312); ++ __ li(a5, 0x1012131415161718); ++ __ li(a6, 0x51F4B764A26E7412); ++ __ li(a7, 0x7FFFFFFFFFFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); ++ __ li(t1, static_cast(0xDF6B8F35A10E205C)); ++ __ li(t2, static_cast(0x81F25A87C4236841)); ++ __ li(t3, static_cast(0x8000000000000000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ add_d(a2, a4, a5); ++ __ sub_d(a2, a2, a4); ++ __ Branch(&error, ne, a2, Operand(0x1012131415161718)); ++ __ ori(a3, zero_reg, 0); ++ __ add_d(a3, a6, a7); //溢出 ++ __ Branch(&error, ne, a3, Operand(0xd1f4b764a26e7411)); ++ __ sub_d(a3, t3, a4); //溢出 ++ __ Branch(&error, ne, a3, Operand(0x7ffffffffffe8cee)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ addi_d(a2, a5, 0x412); //正值 ++ __ Branch(&error, ne, a2, Operand(0x1012131415161b2a)); ++ __ addi_d(a2, a7, 0x547); //负值 ++ __ Branch(&error, ne, a2, Operand(0x8000000000000546)); ++ ++ __ ori(t4, zero_reg, 0); ++ __ addu16i_d(a2, t4, 0x1234); ++ __ Branch(&error, ne, a2, Operand(0x12340000)); ++ __ addu16i_d(a2, a2, 0x9876); ++ __ Branch(&error, ne, a2, Operand(0xffffffffaaaa0000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ alsl_d(a2, t2, t0, 3); ++ __ Branch(&error, ne, a2, Operand(0xf92d43e211b374f)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mul_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0xdbe6a8729a547fb0)); ++ __ mul_d(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x57ad69f40f870584)); ++ __ mul_d(a2, a4, t0); ++ __ Branch(&error, ne, a2, Operand(0xfffffffff07523fe)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulh_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467)); ++ __ mulh_d(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x15d)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulh_du(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467)); ++ __ mulh_du(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xdf6b8f35a10e1700)); ++ __ mulh_du(a2, a4, t0); ++ __ Branch(&error, ne, a2, Operand(0x17311)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ div_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ div_d(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ div_d(a2, t1, a4); ++ __ Branch(&error, ne, a2, Operand(0xffffe985f631e6d9)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ div_du(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ div_du(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ div_du(a2, t1, a4); ++ __ Branch(&error, ne, a2, Operand(0x9a22ffd3973d)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mod_d(a2, a6, a4); ++ __ Branch(&error, ne, a2, Operand(0x13558)); ++ __ mod_d(a2, t2, t0); ++ __ Branch(&error, ne, a2, Operand(0xfffffffffffffb0a)); ++ __ mod_d(a2, t1, a4); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffff6a1a)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mod_du(a2, a6, a4); ++ __ Branch(&error, ne, a2, Operand(0x13558)); ++ __ mod_du(a2, t2, t0); ++ __ Branch(&error, ne, a2, Operand(0x81f25a87c4236841)); ++ __ mod_du(a2, t1, a4); ++ __ Branch(&error, ne, a2, Operand(0x1712)); ++ ++ // Everything was correctly executed. Load the expected result. ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ // Got an error. Return a wrong result. ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(LA5) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ ++ __ li(a4, 0x17312); ++ __ li(a5, 0x1012131415161718); ++ __ li(a6, 0x51F4B764A26E7412); ++ __ li(a7, 0x7FFFFFFFFFFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); ++ __ li(t1, static_cast(0xDF6B8F35A10E205C)); ++ __ li(t2, static_cast(0x81F25A87C4236841)); ++ __ li(t3, static_cast(0x8000000000000000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ slt(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ slt(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ slt(a2, t1, t1); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ sltu(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ sltu(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ sltu(a2, t1, t1); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ slti(a2, a5, 0x123); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ slti(a2, t0, 0x123); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ sltui(a2, a5, 0x123); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ sltui(a2, t0, 0x123); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ and_(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0x1310)); ++ __ and_(a2, a6, a7); ++ __ Branch(&error, ne, a2, Operand(0x51F4B764A26E7412)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ or_(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xfffffffffffff55f)); ++ __ or_(a2, t2, t3); ++ __ Branch(&error, ne, a2, Operand(0x81f25a87c4236841)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ nor(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0xefedecebeae888e5)); ++ __ nor(a2, a6, a7); ++ __ Branch(&error, ne, a2, Operand(0x8000000000000000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ xor_(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x209470ca5ef1d51b)); ++ __ xor_(a2, t2, t3); ++ __ Branch(&error, ne, a2, Operand(0x1f25a87c4236841)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ andn(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0x16002)); ++ __ andn(a2, a6, a7); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ orn(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7)); ++ __ orn(a2, t2, t3); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffffffff)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ andi(a2, a4, 0x123); ++ __ Branch(&error, ne, a2, Operand(0x102)); ++ __ andi(a2, a6, 0xDCB); ++ __ Branch(&error, ne, a2, Operand(0x402)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ xori(a2, t0, 0x123); ++ __ Branch(&error, ne, a2, Operand(0xfffffffffffff464)); ++ __ xori(a2, t2, 0xDCB); ++ __ Branch(&error, ne, a2, Operand(0x81f25a87c423658a)); ++ ++ // Everything was correctly executed. Load the expected result. ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ // Got an error. Return a wrong result. ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(LA6) { ++ // Test loads and stores instruction. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct T { ++ int64_t si1; ++ int64_t si2; ++ int64_t si3; ++ int64_t result_ld_b_si1; ++ int64_t result_ld_b_si2; ++ int64_t result_ld_h_si1; ++ int64_t result_ld_h_si2; ++ int64_t result_ld_w_si1; ++ int64_t result_ld_w_si2; ++ int64_t result_ld_d_si1; ++ int64_t result_ld_d_si3; ++ int64_t result_ld_bu_si2; ++ int64_t result_ld_hu_si2; ++ int64_t result_ld_wu_si2; ++ int64_t result_st_b; ++ int64_t result_st_h; ++ int64_t result_st_w; ++ }; ++ T t; ++ ++ // Ld_b ++ __ Ld_b(a4, MemOperand(a0, offsetof(T, si1))); ++ __ St_d(a4, MemOperand(a0, offsetof(T, result_ld_b_si1))); ++ ++ __ Ld_b(a4, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(a4, MemOperand(a0, offsetof(T, result_ld_b_si2))); ++ ++ // Ld_h ++ __ Ld_h(a5, MemOperand(a0, offsetof(T, si1))); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_ld_h_si1))); ++ ++ __ Ld_h(a5, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_ld_h_si2))); ++ ++ // Ld_w ++ __ Ld_w(a6, MemOperand(a0, offsetof(T, si1))); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_ld_w_si1))); ++ ++ __ Ld_w(a6, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_ld_w_si2))); ++ ++ // Ld_d ++ __ Ld_d(a7, MemOperand(a0, offsetof(T, si1))); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_ld_d_si1))); ++ ++ __ Ld_d(a7, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_ld_d_si3))); ++ ++ // Ld_bu ++ __ Ld_bu(t0, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_ld_bu_si2))); ++ ++ // Ld_hu ++ __ Ld_hu(t1, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_ld_hu_si2))); ++ ++ // Ld_wu ++ __ Ld_wu(t2, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_ld_wu_si2))); ++ ++ // St ++ __ li(t4, 0x11111111); ++ ++ // St_b ++ __ Ld_d(t5, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t5, MemOperand(a0, offsetof(T, result_st_b))); ++ __ St_b(t4, MemOperand(a0, offsetof(T, result_st_b))); ++ ++ // St_h ++ __ Ld_d(t6, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t6, MemOperand(a0, offsetof(T, result_st_h))); ++ __ St_h(t4, MemOperand(a0, offsetof(T, result_st_h))); ++ ++ // St_w ++ __ Ld_d(t7, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t7, MemOperand(a0, offsetof(T, result_st_w))); ++ __ St_w(t4, MemOperand(a0, offsetof(T, result_st_w))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.si1 = 0x11223344; ++ t.si2 = 0x99AABBCC; ++ t.si3 = 0x1122334455667788; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x44), t.result_ld_b_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFFFFFFFFCC), t.result_ld_b_si2); ++ ++ CHECK_EQ(static_cast(0x3344), t.result_ld_h_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFFFFFFBBCC), t.result_ld_h_si2); ++ ++ CHECK_EQ(static_cast(0x11223344), t.result_ld_w_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFF99AABBCC), t.result_ld_w_si2); ++ ++ CHECK_EQ(static_cast(0x11223344), t.result_ld_d_si1); ++ CHECK_EQ(static_cast(0x1122334455667788), t.result_ld_d_si3); ++ ++ CHECK_EQ(static_cast(0xCC), t.result_ld_bu_si2); ++ CHECK_EQ(static_cast(0xBBCC), t.result_ld_hu_si2); ++ CHECK_EQ(static_cast(0x99AABBCC), t.result_ld_wu_si2); ++ ++ CHECK_EQ(static_cast(0x1122334455667711), t.result_st_b); ++ CHECK_EQ(static_cast(0x1122334455661111), t.result_st_h); ++ CHECK_EQ(static_cast(0x1122334411111111), t.result_st_w); ++} ++ ++TEST(LA7) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct T { ++ int64_t si1; ++ int64_t si2; ++ int64_t si3; ++ int64_t result_ldx_b_si1; ++ int64_t result_ldx_b_si2; ++ int64_t result_ldx_h_si1; ++ int64_t result_ldx_h_si2; ++ int64_t result_ldx_w_si1; ++ int64_t result_ldx_w_si2; ++ int64_t result_ldx_d_si1; ++ int64_t result_ldx_d_si3; ++ int64_t result_ldx_bu_si2; ++ int64_t result_ldx_hu_si2; ++ int64_t result_ldx_wu_si2; ++ int64_t result_stx_b; ++ int64_t result_stx_h; ++ int64_t result_stx_w; ++ }; ++ T t; ++ ++ // ldx_b ++ __ li(a2, static_cast(offsetof(T, si1))); ++ __ Ld_b(a4, MemOperand(a0, a2)); ++ __ St_d(a4, MemOperand(a0, offsetof(T, result_ldx_b_si1))); ++ ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_b(a4, MemOperand(a0, a2)); ++ __ St_d(a4, MemOperand(a0, offsetof(T, result_ldx_b_si2))); ++ ++ // ldx_h ++ __ li(a2, static_cast(offsetof(T, si1))); ++ __ Ld_h(a5, MemOperand(a0, a2)); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_ldx_h_si1))); ++ ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_h(a5, MemOperand(a0, a2)); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_ldx_h_si2))); ++ ++ // ldx_w ++ __ li(a2, static_cast(offsetof(T, si1))); ++ __ Ld_w(a6, MemOperand(a0, a2)); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_ldx_w_si1))); ++ ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_w(a6, MemOperand(a0, a2)); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_ldx_w_si2))); ++ ++ // Ld_d ++ __ li(a2, static_cast(offsetof(T, si1))); ++ __ Ld_d(a7, MemOperand(a0, a2)); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_ldx_d_si1))); ++ ++ __ li(a2, static_cast(offsetof(T, si3))); ++ __ Ld_d(a7, MemOperand(a0, a2)); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_ldx_d_si3))); ++ ++ // Ld_bu ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_bu(t0, MemOperand(a0, a2)); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_ldx_bu_si2))); ++ ++ // Ld_hu ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_hu(t1, MemOperand(a0, a2)); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_ldx_hu_si2))); ++ ++ // Ld_wu ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_wu(t2, MemOperand(a0, a2)); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_ldx_wu_si2))); ++ ++ // St ++ __ li(t4, 0x11111111); ++ ++ // St_b ++ __ Ld_d(t5, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t5, MemOperand(a0, offsetof(T, result_stx_b))); ++ __ li(a2, static_cast(offsetof(T, result_stx_b))); ++ __ St_b(t4, MemOperand(a0, a2)); ++ ++ // St_h ++ __ Ld_d(t6, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t6, MemOperand(a0, offsetof(T, result_stx_h))); ++ __ li(a2, static_cast(offsetof(T, result_stx_h))); ++ __ St_h(t4, MemOperand(a0, a2)); ++ ++ // St_w ++ __ Ld_d(t7, MemOperand(a0, offsetof(T, si3))); ++ __ li(a2, static_cast(offsetof(T, result_stx_w))); ++ __ St_d(t7, MemOperand(a0, a2)); ++ __ li(a3, static_cast(offsetof(T, result_stx_w))); ++ __ St_w(t4, MemOperand(a0, a3)); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.si1 = 0x11223344; ++ t.si2 = 0x99AABBCC; ++ t.si3 = 0x1122334455667788; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x44), t.result_ldx_b_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFFFFFFFFCC), t.result_ldx_b_si2); ++ ++ CHECK_EQ(static_cast(0x3344), t.result_ldx_h_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFFFFFFBBCC), t.result_ldx_h_si2); ++ ++ CHECK_EQ(static_cast(0x11223344), t.result_ldx_w_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFF99AABBCC), t.result_ldx_w_si2); ++ ++ CHECK_EQ(static_cast(0x11223344), t.result_ldx_d_si1); ++ CHECK_EQ(static_cast(0x1122334455667788), t.result_ldx_d_si3); ++ ++ CHECK_EQ(static_cast(0xCC), t.result_ldx_bu_si2); ++ CHECK_EQ(static_cast(0xBBCC), t.result_ldx_hu_si2); ++ CHECK_EQ(static_cast(0x99AABBCC), t.result_ldx_wu_si2); ++ ++ CHECK_EQ(static_cast(0x1122334455667711), t.result_stx_b); ++ CHECK_EQ(static_cast(0x1122334455661111), t.result_stx_h); ++ CHECK_EQ(static_cast(0x1122334411111111), t.result_stx_w); ++} ++ ++TEST(LDPTR_STPTR) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ int64_t test[10]; ++ ++ __ ldptr_w(a4, a0, 0); ++ __ stptr_d(a4, a0, 24); // test[3] ++ ++ __ ldptr_w(a5, a0, 8); // test[1] ++ __ stptr_d(a5, a0, 32); // test[4] ++ ++ __ ldptr_d(a6, a0, 16); // test[2] ++ __ stptr_d(a6, a0, 40); // test[5] ++ ++ __ li(t0, 0x11111111); ++ ++ __ stptr_d(a6, a0, 48); // test[6] ++ __ stptr_w(t0, a0, 48); // test[6] ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test[0] = 0x11223344; ++ test[1] = 0x99AABBCC; ++ test[2] = 0x1122334455667788; ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x11223344), test[3]); ++ CHECK_EQ(static_cast(0xFFFFFFFF99AABBCC), test[4]); ++ CHECK_EQ(static_cast(0x1122334455667788), test[5]); ++ CHECK_EQ(static_cast(0x1122334411111111), test[6]); ++} ++ ++TEST(LA8) { ++ // Test 32bit shift instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ int32_t input; ++ int32_t result_sll_w_0; ++ int32_t result_sll_w_8; ++ int32_t result_sll_w_10; ++ int32_t result_sll_w_31; ++ int32_t result_srl_w_0; ++ int32_t result_srl_w_8; ++ int32_t result_srl_w_10; ++ int32_t result_srl_w_31; ++ int32_t result_sra_w_0; ++ int32_t result_sra_w_8; ++ int32_t result_sra_w_10; ++ int32_t result_sra_w_31; ++ int32_t result_rotr_w_0; ++ int32_t result_rotr_w_8; ++ int32_t result_slli_w_0; ++ int32_t result_slli_w_8; ++ int32_t result_slli_w_10; ++ int32_t result_slli_w_31; ++ int32_t result_srli_w_0; ++ int32_t result_srli_w_8; ++ int32_t result_srli_w_10; ++ int32_t result_srli_w_31; ++ int32_t result_srai_w_0; ++ int32_t result_srai_w_8; ++ int32_t result_srai_w_10; ++ int32_t result_srai_w_31; ++ int32_t result_rotri_w_0; ++ int32_t result_rotri_w_8; ++ int32_t result_rotri_w_10; ++ int32_t result_rotri_w_31; ++ }; ++ T t; ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ __ Ld_w(a4, MemOperand(a0, offsetof(T, input))); ++ ++ // sll_w ++ __ li(a5, 0); ++ __ sll_w(t0, a4, a5); ++ __ li(a5, 0x8); ++ __ sll_w(t1, a4, a5); ++ __ li(a5, 0xA); ++ __ sll_w(t2, a4, a5); ++ __ li(a5, 0x1F); ++ __ sll_w(t3, a4, a5); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_sll_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_sll_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_sll_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_sll_w_31))); ++ ++ // srl_w ++ __ li(a5, 0x0); ++ __ srl_w(t0, a4, a5); ++ __ li(a5, 0x8); ++ __ srl_w(t1, a4, a5); ++ __ li(a5, 0xA); ++ __ srl_w(t2, a4, a5); ++ __ li(a5, 0x1F); ++ __ srl_w(t3, a4, a5); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_srl_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_srl_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_srl_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_srl_w_31))); ++ ++ // sra_w ++ __ li(a5, 0x0); ++ __ sra_w(t0, a4, a5); ++ __ li(a5, 0x8); ++ __ sra_w(t1, a4, a5); ++ ++ __ li(a6, static_cast(0x80000000)); ++ __ add_w(a6, a6, a4); ++ __ li(a5, 0xA); ++ __ sra_w(t2, a6, a5); ++ __ li(a5, 0x1F); ++ __ sra_w(t3, a6, a5); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_sra_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_sra_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_sra_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_sra_w_31))); ++ ++ // rotr ++ __ li(a5, 0x0); ++ __ rotr_w(t0, a4, a5); ++ __ li(a6, 0x8); ++ __ rotr_w(t1, a4, a6); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotr_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotr_w_8))); ++ ++ // slli_w ++ __ slli_w(t0, a4, 0); ++ __ slli_w(t1, a4, 0x8); ++ __ slli_w(t2, a4, 0xA); ++ __ slli_w(t3, a4, 0x1F); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_slli_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_slli_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_slli_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_slli_w_31))); ++ ++ // srli_w ++ __ srli_w(t0, a4, 0); ++ __ srli_w(t1, a4, 0x8); ++ __ srli_w(t2, a4, 0xA); ++ __ srli_w(t3, a4, 0x1F); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_srli_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_srli_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_srli_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_srli_w_31))); ++ ++ // srai_w ++ __ srai_w(t0, a4, 0); ++ __ srai_w(t1, a4, 0x8); ++ ++ __ li(a6, static_cast(0x80000000)); ++ __ add_w(a6, a6, a4); ++ __ srai_w(t2, a6, 0xA); ++ __ srai_w(t3, a6, 0x1F); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_srai_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_srai_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_srai_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_srai_w_31))); ++ ++ // rotri_w ++ __ rotri_w(t0, a4, 0); ++ __ rotri_w(t1, a4, 0x8); ++ __ rotri_w(t2, a4, 0xA); ++ __ rotri_w(t3, a4, 0x1F); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotri_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotri_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotri_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotri_w_31))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.input = 0x12345678; ++ f.Call(&t, 0x0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_sll_w_0); ++ CHECK_EQ(static_cast(0x34567800), t.result_sll_w_8); ++ CHECK_EQ(static_cast(0xD159E000), t.result_sll_w_10); ++ CHECK_EQ(static_cast(0x0), t.result_sll_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_srl_w_0); ++ CHECK_EQ(static_cast(0x123456), t.result_srl_w_8); ++ CHECK_EQ(static_cast(0x48D15), t.result_srl_w_10); ++ CHECK_EQ(static_cast(0x0), t.result_srl_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_sra_w_0); ++ CHECK_EQ(static_cast(0x123456), t.result_sra_w_8); ++ CHECK_EQ(static_cast(0xFFE48D15), t.result_sra_w_10); ++ CHECK_EQ(static_cast(0xFFFFFFFF), t.result_sra_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_rotr_w_0); ++ CHECK_EQ(static_cast(0x78123456), t.result_rotr_w_8); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_slli_w_0); ++ CHECK_EQ(static_cast(0x34567800), t.result_slli_w_8); ++ CHECK_EQ(static_cast(0xD159E000), t.result_slli_w_10); ++ CHECK_EQ(static_cast(0x0), t.result_slli_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_srli_w_0); ++ CHECK_EQ(static_cast(0x123456), t.result_srli_w_8); ++ CHECK_EQ(static_cast(0x48D15), t.result_srli_w_10); ++ CHECK_EQ(static_cast(0x0), t.result_srli_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_srai_w_0); ++ CHECK_EQ(static_cast(0x123456), t.result_srai_w_8); ++ CHECK_EQ(static_cast(0xFFE48D15), t.result_srai_w_10); ++ CHECK_EQ(static_cast(0xFFFFFFFF), t.result_srai_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_rotri_w_0); ++ CHECK_EQ(static_cast(0x78123456), t.result_rotri_w_8); ++ CHECK_EQ(static_cast(0x9E048D15), t.result_rotri_w_10); ++ CHECK_EQ(static_cast(0x2468ACF0), t.result_rotri_w_31); ++} ++ ++TEST(LA9) { ++ // Test 64bit shift instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ int64_t input; ++ int64_t result_sll_d_0; ++ int64_t result_sll_d_13; ++ int64_t result_sll_d_30; ++ int64_t result_sll_d_63; ++ int64_t result_srl_d_0; ++ int64_t result_srl_d_13; ++ int64_t result_srl_d_30; ++ int64_t result_srl_d_63; ++ int64_t result_sra_d_0; ++ int64_t result_sra_d_13; ++ int64_t result_sra_d_30; ++ int64_t result_sra_d_63; ++ int64_t result_rotr_d_0; ++ int64_t result_rotr_d_13; ++ int64_t result_slli_d_0; ++ int64_t result_slli_d_13; ++ int64_t result_slli_d_30; ++ int64_t result_slli_d_63; ++ int64_t result_srli_d_0; ++ int64_t result_srli_d_13; ++ int64_t result_srli_d_30; ++ int64_t result_srli_d_63; ++ int64_t result_srai_d_0; ++ int64_t result_srai_d_13; ++ int64_t result_srai_d_30; ++ int64_t result_srai_d_63; ++ int64_t result_rotri_d_0; ++ int64_t result_rotri_d_13; ++ int64_t result_rotri_d_30; ++ int64_t result_rotri_d_63; ++ }; ++ ++ T t; ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, input))); ++ ++ // sll_d ++ __ li(a5, 0); ++ __ sll_d(t0, a4, a5); ++ __ li(a5, 0xD); ++ __ sll_d(t1, a4, a5); ++ __ li(a5, 0x1E); ++ __ sll_d(t2, a4, a5); ++ __ li(a5, 0x3F); ++ __ sll_d(t3, a4, a5); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_sll_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_sll_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_sll_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_sll_d_63))); ++ ++ // srl_d ++ __ li(a5, 0x0); ++ __ srl_d(t0, a4, a5); ++ __ li(a5, 0xD); ++ __ srl_d(t1, a4, a5); ++ __ li(a5, 0x1E); ++ __ srl_d(t2, a4, a5); ++ __ li(a5, 0x3F); ++ __ srl_d(t3, a4, a5); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_srl_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_srl_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_srl_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_srl_d_63))); ++ ++ // sra_d ++ __ li(a5, 0x0); ++ __ sra_d(t0, a4, a5); ++ __ li(a5, 0xD); ++ __ sra_d(t1, a4, a5); ++ ++ __ li(a6, static_cast(0x8000000000000000)); ++ __ add_d(a6, a6, a4); ++ __ li(a5, 0x1E); ++ __ sra_d(t2, a6, a5); ++ __ li(a5, 0x3F); ++ __ sra_d(t3, a6, a5); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_sra_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_sra_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_sra_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_sra_d_63))); ++ ++ // rotr ++ __ li(a5, 0x0); ++ __ rotr_d(t0, a4, a5); ++ __ li(a6, 0xD); ++ __ rotr_d(t1, a4, a6); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotr_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotr_d_13))); ++ ++ // slli_d ++ __ slli_d(t0, a4, 0); ++ __ slli_d(t1, a4, 0xD); ++ __ slli_d(t2, a4, 0x1E); ++ __ slli_d(t3, a4, 0x3F); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_slli_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_slli_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_slli_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_slli_d_63))); ++ ++ // srli_d ++ __ srli_d(t0, a4, 0); ++ __ srli_d(t1, a4, 0xD); ++ __ srli_d(t2, a4, 0x1E); ++ __ srli_d(t3, a4, 0x3F); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_srli_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_srli_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_srli_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_srli_d_63))); ++ ++ // srai_d ++ __ srai_d(t0, a4, 0); ++ __ srai_d(t1, a4, 0xD); ++ ++ __ li(a6, static_cast(0x8000000000000000)); ++ __ add_d(a6, a6, a4); ++ __ srai_d(t2, a6, 0x1E); ++ __ srai_d(t3, a6, 0x3F); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_srai_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_srai_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_srai_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_srai_d_63))); ++ ++ // rotri_d ++ __ rotri_d(t0, a4, 0); ++ __ rotri_d(t1, a4, 0xD); ++ __ rotri_d(t2, a4, 0x1E); ++ __ rotri_d(t3, a4, 0x3F); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotri_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotri_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotri_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotri_d_63))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.input = 0x51F4B764A26E7412; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_sll_d_0); ++ CHECK_EQ(static_cast(0x96ec944dce824000), t.result_sll_d_13); ++ CHECK_EQ(static_cast(0x289b9d0480000000), t.result_sll_d_30); ++ CHECK_EQ(static_cast(0x0), t.result_sll_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_srl_d_0); ++ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_srl_d_13); ++ CHECK_EQ(static_cast(0x147d2dd92), t.result_srl_d_30); ++ CHECK_EQ(static_cast(0x0), t.result_srl_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_sra_d_0); ++ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_sra_d_13); ++ CHECK_EQ(static_cast(0xffffffff47d2dd92), t.result_sra_d_30); ++ CHECK_EQ(static_cast(0xffffffffffffffff), t.result_sra_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_rotr_d_0); ++ CHECK_EQ(static_cast(0xa0928fa5bb251373), t.result_rotr_d_13); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_slli_d_0); ++ CHECK_EQ(static_cast(0x96ec944dce824000), t.result_slli_d_13); ++ CHECK_EQ(static_cast(0x289b9d0480000000), t.result_slli_d_30); ++ CHECK_EQ(static_cast(0x0), t.result_slli_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_srli_d_0); ++ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_srli_d_13); ++ CHECK_EQ(static_cast(0x147d2dd92), t.result_srli_d_30); ++ CHECK_EQ(static_cast(0x0), t.result_srli_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_srai_d_0); ++ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_srai_d_13); ++ CHECK_EQ(static_cast(0xffffffff47d2dd92), t.result_srai_d_30); ++ CHECK_EQ(static_cast(0xffffffffffffffff), t.result_srai_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_rotri_d_0); ++ CHECK_EQ(static_cast(0xa0928fa5bb251373), t.result_rotri_d_13); ++ CHECK_EQ(static_cast(0x89b9d04947d2dd92), t.result_rotri_d_30); ++ CHECK_EQ(static_cast(0xa3e96ec944dce824), t.result_rotri_d_63); ++} ++ ++TEST(LA10) { ++ // Test 32bit bit operation instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct T { ++ int64_t si1; ++ int64_t si2; ++ int32_t result_ext_w_b_si1; ++ int32_t result_ext_w_b_si2; ++ int32_t result_ext_w_h_si1; ++ int32_t result_ext_w_h_si2; ++ int32_t result_clo_w_si1; ++ int32_t result_clo_w_si2; ++ int32_t result_clz_w_si1; ++ int32_t result_clz_w_si2; ++ int32_t result_cto_w_si1; ++ int32_t result_cto_w_si2; ++ int32_t result_ctz_w_si1; ++ int32_t result_ctz_w_si2; ++ int32_t result_bytepick_w_si1; ++ int32_t result_bytepick_w_si2; ++ int32_t result_revb_2h_si1; ++ int32_t result_revb_2h_si2; ++ int32_t result_bitrev_4b_si1; ++ int32_t result_bitrev_4b_si2; ++ int32_t result_bitrev_w_si1; ++ int32_t result_bitrev_w_si2; ++ int32_t result_bstrins_w_si1; ++ int32_t result_bstrins_w_si2; ++ int32_t result_bstrpick_w_si1; ++ int32_t result_bstrpick_w_si2; ++ }; ++ T t; ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, si1))); ++ __ Ld_d(a5, MemOperand(a0, offsetof(T, si2))); ++ ++ // ext_w_b ++ __ ext_w_b(t0, a4); ++ __ ext_w_b(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_ext_w_b_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_ext_w_b_si2))); ++ ++ // ext_w_h ++ __ ext_w_h(t0, a4); ++ __ ext_w_h(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_ext_w_h_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_ext_w_h_si2))); ++ ++ /* //clo_w ++ __ clo_w(t0, a4); ++ __ clo_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_clo_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_clo_w_si2)));*/ ++ ++ // clz_w ++ __ clz_w(t0, a4); ++ __ clz_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_clz_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_clz_w_si2))); ++ ++ /* //cto_w ++ __ cto_w(t0, a4); ++ __ cto_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_cto_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_cto_w_si2)));*/ ++ ++ // ctz_w ++ __ ctz_w(t0, a4); ++ __ ctz_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_ctz_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_ctz_w_si2))); ++ ++ // bytepick_w ++ __ bytepick_w(t0, a4, a5, 0); ++ __ bytepick_w(t1, a5, a4, 2); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bytepick_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bytepick_w_si2))); ++ ++ // revb_2h ++ __ revb_2h(t0, a4); ++ __ revb_2h(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_revb_2h_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_revb_2h_si2))); ++ ++ // bitrev ++ __ bitrev_4b(t0, a4); ++ __ bitrev_4b(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bitrev_4b_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bitrev_4b_si2))); ++ ++ // bitrev_w ++ __ bitrev_w(t0, a4); ++ __ bitrev_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bitrev_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bitrev_w_si2))); ++ ++ // bstrins ++ __ or_(t0, zero_reg, zero_reg); ++ __ or_(t1, zero_reg, zero_reg); ++ __ bstrins_w(t0, a4, 0xD, 0x4); ++ __ bstrins_w(t1, a5, 0x16, 0x5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bstrins_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bstrins_w_si2))); ++ ++ // bstrpick ++ __ or_(t0, zero_reg, zero_reg); ++ __ or_(t1, zero_reg, zero_reg); ++ __ bstrpick_w(t0, a4, 0xD, 0x4); ++ __ bstrpick_w(t1, a5, 0x16, 0x5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bstrpick_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bstrpick_w_si2))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.si1 = 0x51F4B764A26E7412; ++ t.si2 = 0x81F25A87C423B891; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x12), t.result_ext_w_b_si1); ++ CHECK_EQ(static_cast(0xffffff91), t.result_ext_w_b_si2); ++ CHECK_EQ(static_cast(0x7412), t.result_ext_w_h_si1); ++ CHECK_EQ(static_cast(0xffffb891), t.result_ext_w_h_si2); ++ // CHECK_EQ(static_cast(0x1), t.result_clo_w_si1); ++ // CHECK_EQ(static_cast(0x2), t.result_clo_w_si2); ++ CHECK_EQ(static_cast(0x0), t.result_clz_w_si1); ++ CHECK_EQ(static_cast(0x0), t.result_clz_w_si2); ++ // CHECK_EQ(static_cast(0x0), t.result_cto_w_si1); ++ // CHECK_EQ(static_cast(0x1), t.result_cto_w_si2); ++ CHECK_EQ(static_cast(0x1), t.result_ctz_w_si1); ++ CHECK_EQ(static_cast(0x0), t.result_ctz_w_si2); ++ CHECK_EQ(static_cast(0xc423b891), t.result_bytepick_w_si1); ++ CHECK_EQ(static_cast(0x7412c423), ++ t.result_bytepick_w_si2); // 0xffffc423 ++ CHECK_EQ(static_cast(0x6ea21274), t.result_revb_2h_si1); ++ CHECK_EQ(static_cast(0x23c491b8), t.result_revb_2h_si2); ++ CHECK_EQ(static_cast(0x45762e48), t.result_bitrev_4b_si1); ++ CHECK_EQ(static_cast(0x23c41d89), t.result_bitrev_4b_si2); ++ CHECK_EQ(static_cast(0x482e7645), t.result_bitrev_w_si1); ++ CHECK_EQ(static_cast(0x891dc423), t.result_bitrev_w_si2); ++ CHECK_EQ(static_cast(0x120), t.result_bstrins_w_si1); ++ CHECK_EQ(static_cast(0x771220), t.result_bstrins_w_si2); ++ CHECK_EQ(static_cast(0x341), t.result_bstrpick_w_si1); ++ CHECK_EQ(static_cast(0x11dc4), t.result_bstrpick_w_si2); ++} ++ ++TEST(LA11) { ++ // Test 64bit bit operation instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct T { ++ int64_t si1; ++ int64_t si2; ++ int64_t result_clo_d_si1; ++ int64_t result_clo_d_si2; ++ int64_t result_clz_d_si1; ++ int64_t result_clz_d_si2; ++ int64_t result_cto_d_si1; ++ int64_t result_cto_d_si2; ++ int64_t result_ctz_d_si1; ++ int64_t result_ctz_d_si2; ++ int64_t result_bytepick_d_si1; ++ int64_t result_bytepick_d_si2; ++ int64_t result_revb_4h_si1; ++ int64_t result_revb_4h_si2; ++ int64_t result_revb_2w_si1; ++ int64_t result_revb_2w_si2; ++ int64_t result_revb_d_si1; ++ int64_t result_revb_d_si2; ++ int64_t result_revh_2w_si1; ++ int64_t result_revh_2w_si2; ++ int64_t result_revh_d_si1; ++ int64_t result_revh_d_si2; ++ int64_t result_bitrev_8b_si1; ++ int64_t result_bitrev_8b_si2; ++ int64_t result_bitrev_d_si1; ++ int64_t result_bitrev_d_si2; ++ int64_t result_bstrins_d_si1; ++ int64_t result_bstrins_d_si2; ++ int64_t result_bstrpick_d_si1; ++ int64_t result_bstrpick_d_si2; ++ int64_t result_maskeqz_si1; ++ int64_t result_maskeqz_si2; ++ int64_t result_masknez_si1; ++ int64_t result_masknez_si2; ++ }; ++ ++ T t; ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, si1))); ++ __ Ld_d(a5, MemOperand(a0, offsetof(T, si2))); ++ ++ /* //clo_d ++ __ clo_d(t0, a4); ++ __ clo_d(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_clo_d_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_clo_d_si2)));*/ ++ ++ // clz_d ++ __ or_(t0, zero_reg, zero_reg); ++ __ clz_d(t0, a4); ++ __ clz_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_clz_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_clz_d_si2))); ++ ++ /* //cto_d ++ __ cto_d(t0, a4); ++ __ cto_d(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_cto_d_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_cto_d_si2)));*/ ++ ++ // ctz_d ++ __ ctz_d(t0, a4); ++ __ ctz_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_ctz_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_ctz_d_si2))); ++ ++ // bytepick_d ++ __ bytepick_d(t0, a4, a5, 0); ++ __ bytepick_d(t1, a5, a4, 5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bytepick_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bytepick_d_si2))); ++ ++ // revb_4h ++ __ revb_4h(t0, a4); ++ __ revb_4h(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_4h_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_4h_si2))); ++ ++ // revb_2w ++ __ revb_2w(t0, a4); ++ __ revb_2w(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_2w_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_2w_si2))); ++ ++ // revb_d ++ __ revb_d(t0, a4); ++ __ revb_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_d_si2))); ++ ++ // revh_2w ++ __ revh_2w(t0, a4); ++ __ revh_2w(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revh_2w_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revh_2w_si2))); ++ ++ // revh_d ++ __ revh_d(t0, a4); ++ __ revh_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revh_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revh_d_si2))); ++ ++ // bitrev_8b ++ __ bitrev_8b(t0, a4); ++ __ bitrev_8b(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bitrev_8b_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bitrev_8b_si2))); ++ ++ // bitrev_d ++ __ bitrev_d(t0, a4); ++ __ bitrev_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bitrev_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bitrev_d_si2))); ++ ++ // bstrins_d ++ __ or_(t0, zero_reg, zero_reg); ++ __ or_(t1, zero_reg, zero_reg); ++ __ bstrins_d(t0, a4, 5, 0); ++ __ bstrins_d(t1, a5, 39, 12); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bstrins_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bstrins_d_si2))); ++ ++ // bstrpick_d ++ __ or_(t0, zero_reg, zero_reg); ++ __ or_(t1, zero_reg, zero_reg); ++ __ bstrpick_d(t0, a4, 5, 0); ++ __ bstrpick_d(t1, a5, 63, 48); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bstrpick_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bstrpick_d_si2))); ++ ++ // maskeqz ++ __ maskeqz(t0, a4, a4); ++ __ maskeqz(t1, a5, zero_reg); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_maskeqz_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_maskeqz_si2))); ++ ++ // masknez ++ __ masknez(t0, a4, a4); ++ __ masknez(t1, a5, zero_reg); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_masknez_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_masknez_si2))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.si1 = 0x10C021098B710CDE; ++ t.si2 = 0xFB8017FF781A15C3; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ // CHECK_EQ(static_cast(0x0), t.result_clo_d_si1); ++ // CHECK_EQ(static_cast(0x5), t.result_clo_d_si2); ++ CHECK_EQ(static_cast(0x3), t.result_clz_d_si1); ++ CHECK_EQ(static_cast(0x0), t.result_clz_d_si2); ++ // CHECK_EQ(static_cast(0x0), t.result_cto_d_si1); ++ // CHECK_EQ(static_cast(0x2), t.result_cto_d_si2); ++ CHECK_EQ(static_cast(0x1), t.result_ctz_d_si1); ++ CHECK_EQ(static_cast(0x0), t.result_ctz_d_si2); ++ CHECK_EQ(static_cast(0xfb8017ff781a15c3), t.result_bytepick_d_si1); ++ CHECK_EQ(static_cast(0x710cde0000000000), t.result_bytepick_d_si2); ++ CHECK_EQ(static_cast(0xc0100921718bde0c), t.result_revb_4h_si1); ++ CHECK_EQ(static_cast(0x80fbff171a78c315), t.result_revb_4h_si2); ++ CHECK_EQ(static_cast(0x921c010de0c718b), t.result_revb_2w_si1); ++ CHECK_EQ(static_cast(0xff1780fbc3151a78), t.result_revb_2w_si2); ++ CHECK_EQ(static_cast(0xde0c718b0921c010), t.result_revb_d_si1); ++ CHECK_EQ(static_cast(0xc3151a78ff1780fb), t.result_revb_d_si2); ++ CHECK_EQ(static_cast(0x210910c00cde8b71), t.result_revh_2w_si1); ++ CHECK_EQ(static_cast(0x17fffb8015c3781a), t.result_revh_2w_si2); ++ CHECK_EQ(static_cast(0xcde8b71210910c0), t.result_revh_d_si1); ++ CHECK_EQ(static_cast(0x15c3781a17fffb80), t.result_revh_d_si2); ++ CHECK_EQ(static_cast(0x8038490d18e307b), t.result_bitrev_8b_si1); ++ CHECK_EQ(static_cast(0xdf01e8ff1e58a8c3), t.result_bitrev_8b_si2); ++ CHECK_EQ(static_cast(0x7b308ed190840308), t.result_bitrev_d_si1); ++ CHECK_EQ(static_cast(0xc3a8581effe801df), t.result_bitrev_d_si2); ++ CHECK_EQ(static_cast(0x1e), t.result_bstrins_d_si1); ++ CHECK_EQ(static_cast(0x81a15c3000), t.result_bstrins_d_si2); ++ CHECK_EQ(static_cast(0x1e), t.result_bstrpick_d_si1); ++ CHECK_EQ(static_cast(0xfb80), t.result_bstrpick_d_si2); ++ CHECK_EQ(static_cast(0), t.result_maskeqz_si1); ++ CHECK_EQ(static_cast(0xFB8017FF781A15C3), t.result_maskeqz_si2); ++ CHECK_EQ(static_cast(0x10C021098B710CDE), t.result_masknez_si1); ++ CHECK_EQ(static_cast(0), t.result_masknez_si2); ++} ++ ++uint64_t run_beq(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ beq(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BEQ) { ++ CcTest::InitializeVM(); ++ struct TestCaseBeq { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBeq tc[] = { ++ // value1, value2, offset, expected_res ++ { 0, 0, -6, 0x3 }, ++ { 1, 1, -3, 0x30 }, ++ { -2, -2, 3, 0x300 }, ++ { 3, -3, 6, 0 }, ++ { 4, 4, 6, 0x700 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeq); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_beq(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bne(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bne(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BNE) { ++ CcTest::InitializeVM(); ++ struct TestCaseBne { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBne tc[] = { ++ // value1, value2, offset, expected_res ++ { 1, -1, -6, 0x3 }, ++ { 2, -2, -3, 0x30 }, ++ { 3, -3, 3, 0x300 }, ++ { 4, -4, 6, 0x700 }, ++ { 0, 0, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBne); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bne(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_blt(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ blt(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BLT) { ++ CcTest::InitializeVM(); ++ struct TestCaseBlt { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBlt tc[] = { ++ // value1, value2, offset, expected_res ++ { -1, 1, -6, 0x3 }, ++ { -2, 2, -3, 0x30 }, ++ { -3, 3, 3, 0x300 }, ++ { -4, 4, 6, 0x700 }, ++ { 5, -5, 6, 0 }, ++ { 0, 0, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBlt); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_blt(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bge(uint64_t value1, uint64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bge(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BGE) { ++ CcTest::InitializeVM(); ++ struct TestCaseBge { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBge tc[] = { ++ // value1, value2, offset, expected_res ++ { 0, 0, -6, 0x3 }, ++ { 1, 1, -3, 0x30 }, ++ { 2, -2, 3, 0x300 }, ++ { 3, -3, 6, 0x700 }, ++ { -4, 4, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBge); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bge(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bltu(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bltu(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BLTU) { ++ CcTest::InitializeVM(); ++ struct TestCaseBltu { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBltu tc[] = { ++ // value1, value2, offset, expected_res ++ { 0, 1, -6, 0x3 }, ++ { 1, -1, -3, 0x30 }, ++ { 2, -2, 3, 0x300 }, ++ { 3, -3, 6, 0x700 }, ++ { 4, 4, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBltu); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bltu(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bgeu(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bgeu(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BGEU) { ++ CcTest::InitializeVM(); ++ struct TestCaseBgeu { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBgeu tc[] = { ++ // value1, value2, offset, expected_res ++ { 0, 0, -6, 0x3 }, ++ { -1, 1, -3, 0x30 }, ++ { -2, 2, 3, 0x300 }, ++ { -3, 3, 6, 0x700 }, ++ { 4, -4, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBgeu); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bgeu(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_beqz(int64_t value, int32_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ beqz(a0, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BEQZ) { ++ CcTest::InitializeVM(); ++ struct TestCaseBeqz { ++ int64_t value; ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBeqz tc[] = { ++ // value, offset, expected_res ++ { 0, -6, 0x3 }, ++ { 0, -3, 0x30 }, ++ { 0, 3, 0x300 }, ++ { 0, 6, 0x700 }, ++ { 1, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeqz); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_beqz(tc[i].value, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bnez_b(int64_t value, int32_t offset) { ++ // bnez, b. ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(5); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(2); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bnez(a0, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(-4); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(-7); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BNEZ_B) { ++ CcTest::InitializeVM(); ++ struct TestCaseBnez { ++ int64_t value; ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBnez tc[] = { ++ // value, offset, expected_res ++ { 1, -6, 0x3 }, ++ { -2, -3, 0x30 }, ++ { 3, 3, 0x300 }, ++ { -4, 6, 0x700 }, ++ { 0, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBnez); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bnez_b(tc[i].value, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bl(int32_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block; ++ __ li(a2, 0l); ++ __ push(ra); // push is implemented by two instructions, addi_d and st_d ++ __ b(&main_block); ++ ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bl(offset); ++ __ or_(a0, a2, zero_reg); ++ __ pop(ra); // pop is implemented by two instructions, ld_d and addi_d. ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BL) { ++ CcTest::InitializeVM(); ++ struct TestCaseBl { ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBl tc[] = { ++ // offset, expected_res ++ { -6, 0x3 }, ++ { -3, 0x30 }, ++ { 5, 0x300 }, ++ { 8, 0x700 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBl); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bl(tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++TEST(PCADD) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ __ push(ra); ++ ++ // pcaddi ++ __ li(a4, 0x1FFFFC); ++ __ li(a5, 0); ++ __ li(a6, static_cast(0xFFE00000)); ++ ++ __ bl(1); ++ __ pcaddi(a3, 0x7FFFF); ++ __ add_d(a2, ra, a4); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ bl(1); ++ __ pcaddi(a3, 0); ++ __ add_d(a2, ra, a5); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ bl(1); ++ __ pcaddi(a3, 0x80000); ++ __ add_d(a2, ra, a6); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ // pcaddu12i ++ __ li(a4, 0x7FFFF000); ++ __ li(a5, 0); ++ __ li(a6, static_cast(0x80000000)); ++ ++ __ bl(1); ++ __ pcaddu12i(a2, 0x7FFFF); ++ __ add_d(a3, ra, a4); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ bl(1); ++ __ pcaddu12i(a2, 0); ++ __ add_d(a3, ra, a5); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ bl(1); ++ __ pcaddu12i(a2, 0x80000); ++ __ add_d(a3, ra, a6); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ // pcaddu18i ++ __ li(a4, 0x1FFFFC0000); ++ __ li(a5, 0); ++ __ li(a6, static_cast(0xFFFFFFE000000000)); ++ ++ __ bl(1); ++ __ pcaddu18i(a2, 0x7FFFF); ++ __ add_d(a3, ra, a4); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ bl(1); ++ __ pcaddu18i(a2, 0); ++ __ add_d(a3, ra, a5); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ bl(1); ++ __ pcaddu18i(a2, 0x80000); ++ __ add_d(a3, ra, a6); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ // pcalau12i ++ __ li(a4, 0x7FFFF000); ++ __ li(a5, 0); ++ __ li(a6, static_cast(0x80000000)); ++ __ li(a7, static_cast(0xFFFFFFFFFFFFF000)); ++ ++ __ bl(1); ++ __ pcalau12i(a3, 0x7FFFF); ++ __ add_d(a2, ra, a4); ++ __ and_(t0, a2, a7); ++ __ and_(t1, a3, a7); ++ __ Branch(&error, ne, t0, Operand(t1)); ++ ++ __ bl(1); ++ __ pcalau12i(a3, 0); ++ __ add_d(a2, ra, a5); ++ __ and_(t0, a2, a7); ++ __ and_(t1, a3, a7); ++ __ Branch(&error, ne, t0, Operand(t1)); ++ ++ __ bl(1); ++ __ pcalau12i(a2, 0x80000); ++ __ add_d(a3, ra, a6); ++ __ and_(t0, a2, a7); ++ __ and_(t1, a3, a7); ++ __ Branch(&error, ne, t0, Operand(t1)); ++ ++ __ li(a0, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a0, 0x666); ++ ++ __ bind(&exit); ++ __ pop(ra); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++uint64_t run_jirl(int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block; ++ __ li(a2, 0l); ++ __ push(ra); ++ __ b(&main_block); ++ ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ pcaddi(a3, 1); ++ __ jirl(ra, a3, offset); ++ __ or_(a0, a2, zero_reg); ++ __ pop(ra); // pop is implemented by two instructions, ld_d and addi_d. ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(JIRL) { ++ CcTest::InitializeVM(); ++ struct TestCaseJirl { ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseJirl tc[] = { ++ // offset, expected_res ++ { -7, 0x3 }, ++ { -4, 0x30 }, ++ { 5, 0x300 }, ++ { 8, 0x700 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseJirl); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_jirl(tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++TEST(LA12) { ++ // Test floating point calculate instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ double a; ++ double b; ++ double c; ++ double d; ++ double e; ++ double f; ++ double result_fadd_d; ++ double result_fsub_d; ++ double result_fmul_d; ++ double result_fdiv_d; ++ double result_fmadd_d; ++ double result_fmsub_d; ++ double result_fnmadd_d; ++ double result_fnmsub_d; ++ double result_fsqrt_d; ++ double result_frecip_d; ++ double result_frsqrt_d; ++ double result_fscaleb_d; ++ double result_flogb_d; ++ double result_fcopysign_d; ++ double result_fclass_d; ++ }; ++ T t; ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ // Double precision floating point instructions. ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(T, b))); ++ ++ __ fneg_d(f10, f8); ++ __ fadd_d(f11, f9, f10); ++ __ Fst_d(f11, MemOperand(a0, offsetof(T, result_fadd_d))); ++ __ fabs_d(f11, f11); ++ __ fsub_d(f12, f11, f9); ++ __ Fst_d(f12, MemOperand(a0, offsetof(T, result_fsub_d))); ++ ++ __ Fld_d(f13, MemOperand(a0, offsetof(T, c))); ++ __ Fld_d(f14, MemOperand(a0, offsetof(T, d))); ++ __ Fld_d(f15, MemOperand(a0, offsetof(T, e))); ++ ++ __ fmin_d(f16, f13, f14); ++ __ fmul_d(f17, f15, f16); ++ __ Fst_d(f17, MemOperand(a0, offsetof(T, result_fmul_d))); ++ __ fmax_d(f18, f13, f14); ++ __ fdiv_d(f19, f15, f18); ++ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fdiv_d))); ++ ++ __ fmina_d(f16, f13, f14); ++ __ fmadd_d(f18, f17, f15, f16); ++ __ Fst_d(f18, MemOperand(a0, offsetof(T, result_fmadd_d))); ++ __ fnmadd_d(f19, f17, f15, f16); ++ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fnmadd_d))); ++ __ fmaxa_d(f16, f13, f14); ++ __ fmsub_d(f20, f17, f15, f16); ++ __ Fst_d(f20, MemOperand(a0, offsetof(T, result_fmsub_d))); ++ __ fnmsub_d(f21, f17, f15, f16); ++ __ Fst_d(f21, MemOperand(a0, offsetof(T, result_fnmsub_d))); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, f))); ++ __ fsqrt_d(f10, f8); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_fsqrt_d))); ++ //__ frecip_d(f11, f10); ++ //__ frsqrt_d(f12, f8); ++ //__ Fst_d(f11, MemOperand(a0, offsetof(T, result_frecip_d))); ++ //__ Fst_d(f12, MemOperand(a0, offsetof(T, result_frsqrt_d))); ++ ++ /*__ fscaleb_d(f16, f13, f15); ++ __ flogb_d(f17, f15); ++ __ fcopysign_d(f18, f8, f9); ++ __ fclass_d(f19, f9); ++ __ Fst_d(f16, MemOperand(a0, offsetof(T, result_fscaleb_d))); ++ __ Fst_d(f17, MemOperand(a0, offsetof(T, result_flogb_d))); ++ __ Fst_d(f18, MemOperand(a0, offsetof(T, result_fcopysign_d))); ++ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fclass_d)));*/ ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ // Double test values. ++ t.a = 1.5e14; ++ t.b = -2.75e11; ++ t.c = 1.5; ++ t.d = -2.75; ++ t.e = 120.0; ++ t.f = 120.44; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(-1.502750e14), t.result_fadd_d); ++ CHECK_EQ(static_cast(1.505500e14), t.result_fsub_d); ++ CHECK_EQ(static_cast(-3.300000e02), t.result_fmul_d); ++ CHECK_EQ(static_cast(8.000000e01), t.result_fdiv_d); ++ CHECK_EQ(static_cast(-3.959850e04), t.result_fmadd_d); ++ CHECK_EQ(static_cast(-3.959725e04), t.result_fmsub_d); ++ CHECK_EQ(static_cast(3.959850e04), t.result_fnmadd_d); ++ CHECK_EQ(static_cast(3.959725e04), t.result_fnmsub_d); ++ CHECK_EQ(static_cast(10.97451593465515908537), t.result_fsqrt_d); ++ // CHECK_EQ(static_cast( 8.164965e-08), t.result_frecip_d); ++ // CHECK_EQ(static_cast( 8.164966e-08), t.result_frsqrt_d); ++ // CHECK_EQ(static_cast(), t.result_fscaleb_d); ++ // CHECK_EQ(static_cast( 6.906891), t.result_flogb_d); ++ // CHECK_EQ(static_cast( 2.75e11), t.result_fcopysign_d); ++ // CHECK_EQ(static_cast(), t.result_fclass_d); ++} ++ ++TEST(LA13) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ float a; ++ float b; ++ float c; ++ float d; ++ float e; ++ float result_fadd_s; ++ float result_fsub_s; ++ float result_fmul_s; ++ float result_fdiv_s; ++ float result_fmadd_s; ++ float result_fmsub_s; ++ float result_fnmadd_s; ++ float result_fnmsub_s; ++ float result_fsqrt_s; ++ float result_frecip_s; ++ float result_frsqrt_s; ++ float result_fscaleb_s; ++ float result_flogb_s; ++ float result_fcopysign_s; ++ float result_fclass_s; ++ }; ++ T t; ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ // Float precision floating point instructions. ++ __ Fld_s(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(T, b))); ++ ++ __ fneg_s(f10, f8); ++ __ fadd_s(f11, f9, f10); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_fadd_s))); ++ __ fabs_s(f11, f11); ++ __ fsub_s(f12, f11, f9); ++ __ Fst_s(f12, MemOperand(a0, offsetof(T, result_fsub_s))); ++ ++ __ Fld_s(f13, MemOperand(a0, offsetof(T, c))); ++ __ Fld_s(f14, MemOperand(a0, offsetof(T, d))); ++ __ Fld_s(f15, MemOperand(a0, offsetof(T, e))); ++ ++ __ fmin_s(f16, f13, f14); ++ __ fmul_s(f17, f15, f16); ++ __ Fst_s(f17, MemOperand(a0, offsetof(T, result_fmul_s))); ++ __ fmax_s(f18, f13, f14); ++ __ fdiv_s(f19, f15, f18); ++ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fdiv_s))); ++ ++ __ fmina_s(f16, f13, f14); ++ __ fmadd_s(f18, f17, f15, f16); ++ __ Fst_s(f18, MemOperand(a0, offsetof(T, result_fmadd_s))); ++ __ fnmadd_s(f19, f17, f15, f16); ++ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fnmadd_s))); ++ __ fmaxa_s(f16, f13, f14); ++ __ fmsub_s(f20, f17, f15, f16); ++ __ Fst_s(f20, MemOperand(a0, offsetof(T, result_fmsub_s))); ++ __ fnmsub_s(f21, f17, f15, f16); ++ __ Fst_s(f21, MemOperand(a0, offsetof(T, result_fnmsub_s))); ++ ++ __ fsqrt_s(f10, f8); ++ //__ frecip_s(f11, f10); ++ //__ frsqrt_s(f12, f8); ++ __ Fst_s(f10, MemOperand(a0, offsetof(T, result_fsqrt_s))); ++ //__ Fst_s(f11, MemOperand(a0, offsetof(T, result_frecip_s))); ++ //__ Fst_s(f12, MemOperand(a0, offsetof(T, result_frsqrt_s))); ++ ++ /*__ fscaleb_s(f16, f13, f15); ++ __ flogb_s(f17, f15); ++ __ fcopysign_s(f18, f8, f9); ++ __ fclass_s(f19, f9); ++ __ Fst_s(f16, MemOperand(a0, offsetof(T, result_fscaleb_s))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(T, result_flogb_s))); ++ __ Fst_s(f18, MemOperand(a0, offsetof(T, result_fcopysign_s))); ++ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fclass_s)));*/ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ // Float test values. ++ t.a = 1.5e6; ++ t.b = -2.75e4; ++ t.c = 1.5; ++ t.d = -2.75; ++ t.e = 120.0; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(-1.527500e06), t.result_fadd_s); ++ CHECK_EQ(static_cast(1.555000e06), t.result_fsub_s); ++ CHECK_EQ(static_cast(-3.300000e02), t.result_fmul_s); ++ CHECK_EQ(static_cast(8.000000e01), t.result_fdiv_s); ++ CHECK_EQ(static_cast(-3.959850e04), t.result_fmadd_s); ++ CHECK_EQ(static_cast(-3.959725e04), t.result_fmsub_s); ++ CHECK_EQ(static_cast(3.959850e04), t.result_fnmadd_s); ++ CHECK_EQ(static_cast(3.959725e04), t.result_fnmsub_s); ++ CHECK_EQ(static_cast(1224.744873), t.result_fsqrt_s); ++ // CHECK_EQ(static_cast( 8.164966e-04), t.result_frecip_s); ++ // CHECK_EQ(static_cast( 8.164966e-04), t.result_frsqrt_s); ++ // CHECK_EQ(static_cast(), t.result_fscaleb_s); ++ // CHECK_EQ(static_cast( 6.906890), t.result_flogb_s); ++ // CHECK_EQ(static_cast( 2.75e4), t.result_fcopysign_s); ++ // CHECK_EQ(static_cast(), t.result_fclass_s); ++} ++ ++TEST(FCMP_COND) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double dTrue; ++ double dFalse; ++ double dOp1; ++ double dOp2; ++ double dCaf; ++ double dCun; ++ double dCeq; ++ double dCueq; ++ double dClt; ++ double dCult; ++ double dCle; ++ double dCule; ++ double dCne; ++ double dCor; ++ double dCune; ++ double dSaf; ++ double dSun; ++ double dSeq; ++ double dSueq; ++ double dSlt; ++ double dSult; ++ double dSle; ++ double dSule; ++ double dSne; ++ double dSor; ++ double dSune; ++ float fTrue; ++ float fFalse; ++ float fOp1; ++ float fOp2; ++ float fCaf; ++ float fCun; ++ float fCeq; ++ float fCueq; ++ float fClt; ++ float fCult; ++ float fCle; ++ float fCule; ++ float fCne; ++ float fCor; ++ float fCune; ++ float fSaf; ++ float fSun; ++ float fSeq; ++ float fSueq; ++ float fSlt; ++ float fSult; ++ float fSle; ++ float fSule; ++ float fSne; ++ float fSor; ++ float fSune; ++ }; ++ ++ TestFloat test; ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, dOp1))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, dOp2))); ++ ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, fOp1))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, fOp2))); ++ ++ __ Fld_d(f12, MemOperand(a0, offsetof(TestFloat, dFalse))); ++ __ Fld_d(f13, MemOperand(a0, offsetof(TestFloat, dTrue))); ++ ++ __ Fld_s(f14, MemOperand(a0, offsetof(TestFloat, fFalse))); ++ __ Fld_s(f15, MemOperand(a0, offsetof(TestFloat, fTrue))); ++ ++ __ fcmp_cond_d(CAF, f8, f9, FCC0); ++ __ fcmp_cond_s(CAF, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCaf))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCaf))); ++ ++ __ fcmp_cond_d(CUN, f8, f9, FCC0); ++ __ fcmp_cond_s(CUN, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCun))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCun))); ++ ++ __ fcmp_cond_d(CEQ, f8, f9, FCC0); ++ __ fcmp_cond_s(CEQ, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCeq))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCeq))); ++ ++ __ fcmp_cond_d(CUEQ, f8, f9, FCC0); ++ __ fcmp_cond_s(CUEQ, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCueq))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCueq))); ++ ++ __ fcmp_cond_d(CLT, f8, f9, FCC0); ++ __ fcmp_cond_s(CLT, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dClt))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fClt))); ++ ++ __ fcmp_cond_d(CULT, f8, f9, FCC0); ++ __ fcmp_cond_s(CULT, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCult))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCult))); ++ ++ __ fcmp_cond_d(CLE, f8, f9, FCC0); ++ __ fcmp_cond_s(CLE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCle))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCle))); ++ ++ __ fcmp_cond_d(CULE, f8, f9, FCC0); ++ __ fcmp_cond_s(CULE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCule))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCule))); ++ ++ __ fcmp_cond_d(CNE, f8, f9, FCC0); ++ __ fcmp_cond_s(CNE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCne))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCne))); ++ ++ __ fcmp_cond_d(COR, f8, f9, FCC0); ++ __ fcmp_cond_s(COR, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCor))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCor))); ++ ++ __ fcmp_cond_d(CUNE, f8, f9, FCC0); ++ __ fcmp_cond_s(CUNE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCune))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCune))); ++ ++ /* __ fcmp_cond_d(SAF, f8, f9, FCC0); ++ __ fcmp_cond_s(SAF, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSaf))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSaf))); ++ ++ __ fcmp_cond_d(SUN, f8, f9, FCC0); ++ __ fcmp_cond_s(SUN, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSun))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSun))); ++ ++ __ fcmp_cond_d(SEQ, f8, f9, FCC0); ++ __ fcmp_cond_s(SEQ, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSeq))); ++ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSeq))); ++ ++ __ fcmp_cond_d(SUEQ, f8, f9, FCC0); ++ __ fcmp_cond_s(SUEQ, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSueq))); ++ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSueq))); ++ ++ __ fcmp_cond_d(SLT, f8, f9, FCC0); ++ __ fcmp_cond_s(SLT, f10, f11, FCC1); ++ __ fsel(f16, f12, f13, FCC0); ++ __ fsel(f17, f14, f15, FCC1); ++ __ Fld_d(f16, MemOperand(a0, offsetof(TestFloat, dSlt))); ++ __ Fst_d(f17, MemOperand(a0, offsetof(TestFloat, fSlt))); ++ ++ __ fcmp_cond_d(SULT, f8, f9, FCC0); ++ __ fcmp_cond_s(SULT, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSult))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSult))); ++ ++ __ fcmp_cond_d(SLE, f8, f9, FCC0); ++ __ fcmp_cond_s(SLE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSle))); ++ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSle))); ++ ++ __ fcmp_cond_d(SULE, f8, f9, FCC0); ++ __ fcmp_cond_s(SULE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSule))); ++ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSule))); ++ ++ __ fcmp_cond_d(SNE, f8, f9, FCC0); ++ __ fcmp_cond_s(SNE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSne))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSne))); ++ ++ __ fcmp_cond_d(SOR, f8, f9, FCC0); ++ __ fcmp_cond_s(SOR, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSor))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSor))); ++ ++ __ fcmp_cond_d(SUNE, f8, f9, FCC0); ++ __ fcmp_cond_s(SUNE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSune))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSune)));*/ ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.dTrue = 1234.0; ++ test.dFalse = 0.0; ++ test.fTrue = 12.0; ++ test.fFalse = 0.0; ++ ++ test.dOp1 = 2.0; ++ test.dOp2 = 3.0; ++ test.fOp1 = 2.0; ++ test.fOp2 = 3.0; ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(test.dCaf, test.dFalse); ++ CHECK_EQ(test.fCaf, test.fFalse); ++ CHECK_EQ(test.dCun, test.dFalse); ++ CHECK_EQ(test.fCun, test.fFalse); ++ CHECK_EQ(test.dCeq, test.dFalse); ++ CHECK_EQ(test.fCeq, test.fFalse); ++ CHECK_EQ(test.dCueq, test.dFalse); ++ CHECK_EQ(test.fCueq, test.fFalse); ++ CHECK_EQ(test.dClt, test.dTrue); ++ CHECK_EQ(test.fClt, test.fTrue); ++ CHECK_EQ(test.dCult, test.dTrue); ++ CHECK_EQ(test.fCult, test.fTrue); ++ CHECK_EQ(test.dCle, test.dTrue); ++ CHECK_EQ(test.fCle, test.fTrue); ++ CHECK_EQ(test.dCule, test.dTrue); ++ CHECK_EQ(test.fCule, test.fTrue); ++ CHECK_EQ(test.dCne, test.dTrue); ++ CHECK_EQ(test.fCne, test.fTrue); ++ CHECK_EQ(test.dCor, test.dTrue); ++ CHECK_EQ(test.fCor, test.fTrue); ++ CHECK_EQ(test.dCune, test.dTrue); ++ CHECK_EQ(test.fCune, test.fTrue); ++ /* CHECK_EQ(test.dSaf, test.dFalse); ++ CHECK_EQ(test.fSaf, test.fFalse); ++ CHECK_EQ(test.dSun, test.dFalse); ++ CHECK_EQ(test.fSun, test.fFalse); ++ CHECK_EQ(test.dSeq, test.dFalse); ++ CHECK_EQ(test.fSeq, test.fFalse); ++ CHECK_EQ(test.dSueq, test.dFalse); ++ CHECK_EQ(test.fSueq, test.fFalse); ++ CHECK_EQ(test.dClt, test.dTrue); ++ CHECK_EQ(test.fClt, test.fTrue); ++ CHECK_EQ(test.dCult, test.dTrue); ++ CHECK_EQ(test.fCult, test.fTrue); ++ CHECK_EQ(test.dSle, test.dTrue); ++ CHECK_EQ(test.fSle, test.fTrue); ++ CHECK_EQ(test.dSule, test.dTrue); ++ CHECK_EQ(test.fSule, test.fTrue); ++ CHECK_EQ(test.dSne, test.dTrue); ++ CHECK_EQ(test.fSne, test.fTrue); ++ CHECK_EQ(test.dSor, test.dTrue); ++ CHECK_EQ(test.fSor, test.fTrue); ++ CHECK_EQ(test.dSune, test.dTrue); ++ CHECK_EQ(test.fSune, test.fTrue);*/ ++ ++ test.dOp1 = std::numeric_limits::max(); ++ test.dOp2 = std::numeric_limits::min(); ++ test.fOp1 = std::numeric_limits::min(); ++ test.fOp2 = -std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(test.dCaf, test.dFalse); ++ CHECK_EQ(test.fCaf, test.fFalse); ++ CHECK_EQ(test.dCun, test.dFalse); ++ CHECK_EQ(test.fCun, test.fFalse); ++ CHECK_EQ(test.dCeq, test.dFalse); ++ CHECK_EQ(test.fCeq, test.fFalse); ++ CHECK_EQ(test.dCueq, test.dFalse); ++ CHECK_EQ(test.fCueq, test.fFalse); ++ CHECK_EQ(test.dClt, test.dFalse); ++ CHECK_EQ(test.fClt, test.fFalse); ++ CHECK_EQ(test.dCult, test.dFalse); ++ CHECK_EQ(test.fCult, test.fFalse); ++ CHECK_EQ(test.dCle, test.dFalse); ++ CHECK_EQ(test.fCle, test.fFalse); ++ CHECK_EQ(test.dCule, test.dFalse); ++ CHECK_EQ(test.fCule, test.fFalse); ++ CHECK_EQ(test.dCne, test.dTrue); ++ CHECK_EQ(test.fCne, test.fTrue); ++ CHECK_EQ(test.dCor, test.dTrue); ++ CHECK_EQ(test.fCor, test.fTrue); ++ CHECK_EQ(test.dCune, test.dTrue); ++ CHECK_EQ(test.fCune, test.fTrue); ++ /* CHECK_EQ(test.dSaf, test.dFalse); ++ CHECK_EQ(test.fSaf, test.fFalse); ++ CHECK_EQ(test.dSun, test.dFalse); ++ CHECK_EQ(test.fSun, test.fFalse); ++ CHECK_EQ(test.dSeq, test.dFalse); ++ CHECK_EQ(test.fSeq, test.fFalse); ++ CHECK_EQ(test.dSueq, test.dFalse); ++ CHECK_EQ(test.fSueq, test.fFalse); ++ CHECK_EQ(test.dSlt, test.dFalse); ++ CHECK_EQ(test.fSlt, test.fFalse); ++ CHECK_EQ(test.dSult, test.dFalse); ++ CHECK_EQ(test.fSult, test.fFalse); ++ CHECK_EQ(test.dSle, test.dFalse); ++ CHECK_EQ(test.fSle, test.fFalse); ++ CHECK_EQ(test.dSule, test.dFalse); ++ CHECK_EQ(test.fSule, test.fFalse); ++ CHECK_EQ(test.dSne, test.dTrue); ++ CHECK_EQ(test.fSne, test.fTrue); ++ CHECK_EQ(test.dSor, test.dTrue); ++ CHECK_EQ(test.fSor, test.fTrue); ++ CHECK_EQ(test.dSune, test.dTrue); ++ CHECK_EQ(test.fSune, test.fTrue);*/ ++ ++ test.dOp1 = std::numeric_limits::quiet_NaN(); ++ test.dOp2 = 0.0; ++ test.fOp1 = std::numeric_limits::quiet_NaN(); ++ test.fOp2 = 0.0; ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(test.dCaf, test.dFalse); ++ CHECK_EQ(test.fCaf, test.fFalse); ++ CHECK_EQ(test.dCun, test.dTrue); ++ CHECK_EQ(test.fCun, test.fTrue); ++ CHECK_EQ(test.dCeq, test.dFalse); ++ CHECK_EQ(test.fCeq, test.fFalse); ++ CHECK_EQ(test.dCueq, test.dTrue); ++ CHECK_EQ(test.fCueq, test.fTrue); ++ CHECK_EQ(test.dClt, test.dFalse); ++ CHECK_EQ(test.fClt, test.fFalse); ++ CHECK_EQ(test.dCult, test.dTrue); ++ CHECK_EQ(test.fCult, test.fTrue); ++ CHECK_EQ(test.dCle, test.dFalse); ++ CHECK_EQ(test.fCle, test.fFalse); ++ CHECK_EQ(test.dCule, test.dTrue); ++ CHECK_EQ(test.fCule, test.fTrue); ++ CHECK_EQ(test.dCne, test.dFalse); ++ CHECK_EQ(test.fCne, test.fFalse); ++ CHECK_EQ(test.dCor, test.dFalse); ++ CHECK_EQ(test.fCor, test.fFalse); ++ CHECK_EQ(test.dCune, test.dTrue); ++ CHECK_EQ(test.fCune, test.fTrue); ++ /* CHECK_EQ(test.dSaf, test.dTrue); ++ CHECK_EQ(test.fSaf, test.fTrue); ++ CHECK_EQ(test.dSun, test.dTrue); ++ CHECK_EQ(test.fSun, test.fTrue); ++ CHECK_EQ(test.dSeq, test.dFalse); ++ CHECK_EQ(test.fSeq, test.fFalse); ++ CHECK_EQ(test.dSueq, test.dTrue); ++ CHECK_EQ(test.fSueq, test.fTrue); ++ CHECK_EQ(test.dSlt, test.dFalse); ++ CHECK_EQ(test.fSlt, test.fFalse); ++ CHECK_EQ(test.dSult, test.dTrue); ++ CHECK_EQ(test.fSult, test.fTrue); ++ CHECK_EQ(test.dSle, test.dFalse); ++ CHECK_EQ(test.fSle, test.fFalse); ++ CHECK_EQ(test.dSule, test.dTrue); ++ CHECK_EQ(test.fSule, test.fTrue); ++ CHECK_EQ(test.dSne, test.dFalse); ++ CHECK_EQ(test.fSne, test.fFalse); ++ CHECK_EQ(test.dSor, test.dFalse); ++ CHECK_EQ(test.fSor, test.fFalse); ++ CHECK_EQ(test.dSune, test.dTrue); ++ CHECK_EQ(test.fSune, test.fTrue);*/ ++} ++ ++TEST(FCVT) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ float fcvt_d_s_in; ++ double fcvt_s_d_in; ++ double fcvt_d_s_out; ++ float fcvt_s_d_out; ++ int fcsr; ++ }; ++ TestFloat test; ++ __ xor_(a4, a4, a4); ++ __ xor_(a5, a5, a5); ++ __ Ld_w(a4, MemOperand(a0, offsetof(TestFloat, fcsr))); ++ __ movfcsr2gr(a5); ++ __ movgr2fcsr(a4); ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, fcvt_d_s_in))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, fcvt_s_d_in))); ++ __ fcvt_d_s(f10, f8); ++ __ fcvt_s_d(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, fcvt_d_s_out))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, fcvt_s_d_out))); ++ __ movgr2fcsr(a5); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.fcsr = kRoundToZero; ++ ++ test.fcvt_d_s_in = -0.51; ++ test.fcvt_s_d_in = -0.51; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++ ++ test.fcvt_d_s_in = 0.49; ++ test.fcvt_s_d_in = 0.49; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++ ++ test.fcvt_d_s_in = std::numeric_limits::max(); ++ test.fcvt_s_d_in = std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++ ++ test.fcvt_d_s_in = -std::numeric_limits::max(); ++ test.fcvt_s_d_in = -std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++ ++ test.fcvt_d_s_in = std::numeric_limits::min(); ++ test.fcvt_s_d_in = std::numeric_limits::min(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++} ++ ++TEST(FFINT) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ int32_t ffint_s_w_in; ++ int64_t ffint_s_l_in; ++ int32_t ffint_d_w_in; ++ int64_t ffint_d_l_in; ++ float ffint_s_w_out; ++ float ffint_s_l_out; ++ double ffint_d_w_out; ++ double ffint_d_l_out; ++ int fcsr; ++ }; ++ TestFloat test; ++ __ xor_(a4, a4, a4); ++ __ xor_(a5, a5, a5); ++ __ Ld_w(a4, MemOperand(a0, offsetof(TestFloat, fcsr))); ++ __ movfcsr2gr(a5); ++ __ movgr2fcsr(a4); ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, ffint_s_w_in))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, ffint_s_l_in))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, ffint_d_w_in))); ++ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, ffint_d_l_in))); ++ __ ffint_s_w(f12, f8); ++ __ ffint_s_l(f13, f9); ++ __ ffint_d_w(f14, f10); ++ __ ffint_d_l(f15, f11); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, ffint_s_w_out))); ++ __ Fst_s(f13, MemOperand(a0, offsetof(TestFloat, ffint_s_l_out))); ++ __ Fst_d(f14, MemOperand(a0, offsetof(TestFloat, ffint_d_w_out))); ++ __ Fst_d(f15, MemOperand(a0, offsetof(TestFloat, ffint_d_l_out))); ++ __ movgr2fcsr(a5); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.fcsr = kRoundToZero; ++ ++ test.ffint_s_w_in = -1; ++ test.ffint_s_l_in = -1; ++ test.ffint_d_w_in = -1; ++ test.ffint_d_l_in = -1; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); ++ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); ++ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); ++ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); ++ ++ test.ffint_s_w_in = 1; ++ test.ffint_s_l_in = 1; ++ test.ffint_d_w_in = 1; ++ test.ffint_d_l_in = 1; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); ++ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); ++ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); ++ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); ++ ++ test.ffint_s_w_in = std::numeric_limits::max(); ++ test.ffint_s_l_in = std::numeric_limits::max(); ++ test.ffint_d_w_in = std::numeric_limits::max(); ++ test.ffint_d_l_in = std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); ++ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); ++ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); ++ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); ++ ++ test.ffint_s_w_in = std::numeric_limits::min(); ++ test.ffint_s_l_in = std::numeric_limits::min(); ++ test.ffint_d_w_in = std::numeric_limits::min(); ++ test.ffint_d_l_in = std::numeric_limits::min(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); ++ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); ++ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); ++ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); ++} ++ ++TEST(FTINT) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ int fcsr; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RN_W[kTableLength] = { ++ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_RN_L[kTableLength] = { ++ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ double outputs_RZ_W[kTableLength] = { ++ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_RZ_L[kTableLength] = { ++ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ double outputs_RP_W[kTableLength] = { ++ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_RP_L[kTableLength] = { ++ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ double outputs_RM_W[kTableLength] = { ++ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_RM_L[kTableLength] = { ++ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ int fcsr_inputs[4] = {kRoundToNearest, kRoundToZero, kRoundToPlusInf, ++ kRoundToMinusInf}; ++ double* outputs[8] = { ++ outputs_RN_W, outputs_RN_L, outputs_RZ_W, outputs_RZ_L, ++ outputs_RP_W, outputs_RP_L, outputs_RM_W, outputs_RM_L, ++ }; ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ xor_(a5, a5, a5); ++ __ Ld_w(a5, MemOperand(a0, offsetof(Test, fcsr))); ++ __ movfcsr2gr(a4); ++ __ movgr2fcsr(a5); ++ __ ftint_w_d(f10, f8); ++ __ ftint_w_s(f11, f9); ++ __ ftint_l_d(f12, f8); ++ __ ftint_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ movgr2fcsr(a4); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int j = 0; j < 4; j++) { ++ test.fcsr = fcsr_inputs[j]; ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs[2 * j][i]); ++ CHECK_EQ(test.d, outputs[2 * j][i]); ++ CHECK_EQ(test.e, outputs[2 * j + 1][i]); ++ CHECK_EQ(test.f, outputs[2 * j + 1][i]); ++ } ++ } ++} ++ ++TEST(FTINTRM) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_w[kTableLength] = { ++ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_l[kTableLength] = { ++ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ ftintrm_w_d(f10, f8); ++ __ ftintrm_w_s(f11, f9); ++ __ ftintrm_l_d(f12, f8); ++ __ ftintrm_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_w[i]); ++ CHECK_EQ(test.d, outputs_w[i]); ++ CHECK_EQ(test.e, outputs_l[i]); ++ CHECK_EQ(test.f, outputs_l[i]); ++ } ++} ++ ++TEST(FTINTRP) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_w[kTableLength] = { ++ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_l[kTableLength] = { ++ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ ftintrp_w_d(f10, f8); ++ __ ftintrp_w_s(f11, f9); ++ __ ftintrp_l_d(f12, f8); ++ __ ftintrp_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_w[i]); ++ CHECK_EQ(test.d, outputs_w[i]); ++ CHECK_EQ(test.e, outputs_l[i]); ++ CHECK_EQ(test.f, outputs_l[i]); ++ } ++} ++ ++TEST(FTINTRZ) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_w[kTableLength] = { ++ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_l[kTableLength] = { ++ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ ftintrz_w_d(f10, f8); ++ __ ftintrz_w_s(f11, f9); ++ __ ftintrz_l_d(f12, f8); ++ __ ftintrz_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_w[i]); ++ CHECK_EQ(test.d, outputs_w[i]); ++ CHECK_EQ(test.e, outputs_l[i]); ++ CHECK_EQ(test.f, outputs_l[i]); ++ } ++} ++ ++TEST(FTINTRNE) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_w[kTableLength] = { ++ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_l[kTableLength] = { ++ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ ftintrne_w_d(f10, f8); ++ __ ftintrne_w_s(f11, f9); ++ __ ftintrne_l_d(f12, f8); ++ __ ftintrne_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_w[i]); ++ CHECK_EQ(test.d, outputs_w[i]); ++ CHECK_EQ(test.e, outputs_l[i]); ++ CHECK_EQ(test.f, outputs_l[i]); ++ } ++} ++ ++TEST(FRINT) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ double c; ++ float d; ++ int fcsr; ++ }; ++ Test test; ++ ++ const int kTableLength = 32; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307, ++ 309485009821345068724781056.89, ++ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, ++ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max() - 0.1, ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37, ++ 309485009821345068724781056.89, ++ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, ++ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 0.6, ++ std::numeric_limits::infinity() ++ }; ++ float outputs_RN_S[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E38, 0, ++ 309485009821345068724781057.0, ++ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, ++ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RN_D[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E308, 0, ++ 309485009821345068724781057.0, ++ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, ++ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ float outputs_RZ_S[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E38, 0, ++ 309485009821345068724781057.0, ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RZ_D[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E308, 0, ++ 309485009821345068724781057.0, ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max() - 1, ++ std::numeric_limits::infinity() ++ }; ++ float outputs_RP_S[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E38, 1, ++ 309485009821345068724781057.0, ++ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RP_D[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E308, 1, ++ 309485009821345068724781057.0, ++ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ float outputs_RM_S[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E38, 0, ++ 309485009821345068724781057.0, ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RM_D[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E308, 0, ++ 309485009821345068724781057.0, ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ // clang-format on ++ ++ int fcsr_inputs[4] = {kRoundToNearest, kRoundToZero, kRoundToPlusInf, ++ kRoundToMinusInf}; ++ double* outputs_d[4] = {outputs_RN_D, outputs_RZ_D, outputs_RP_D, ++ outputs_RM_D}; ++ float* outputs_s[4] = {outputs_RN_S, outputs_RZ_S, outputs_RP_S, ++ outputs_RM_S}; ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ xor_(a5, a5, a5); ++ __ Ld_w(a5, MemOperand(a0, offsetof(Test, fcsr))); ++ __ movfcsr2gr(a4); ++ __ movgr2fcsr(a5); ++ __ frint_d(f10, f8); ++ __ frint_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ movgr2fcsr(a4); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int j = 0; j < 4; j++) { ++ test.fcsr = fcsr_inputs[j]; ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_d[j][i]); ++ CHECK_EQ(test.d, outputs_s[j][i]); ++ } ++ } ++} ++ ++TEST(FMOV) { ++ const int kTableLength = 7; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double a; ++ float b; ++ double c; ++ float d; ++ }; ++ ++ TestFloat test; ++ ++ // clang-format off ++ double inputs_D[kTableLength] = { ++ 5.3, -5.3, 0.29, -0.29, 0, ++ std::numeric_limits::max(), ++ -std::numeric_limits::max() ++ }; ++ float inputs_S[kTableLength] = { ++ 4.8, -4.8, 0.29, -0.29, 0, ++ std::numeric_limits::max(), ++ -std::numeric_limits::max() ++ }; ++ ++ double outputs_D[kTableLength] = { ++ 5.3, -5.3, 0.29, -0.29, 0, ++ std::numeric_limits::max(), ++ -std::numeric_limits::max() ++ }; ++ ++ float outputs_S[kTableLength] = { ++ 4.8, -4.8, 0.29, -0.29, 0, ++ std::numeric_limits::max(), ++ -std::numeric_limits::max() ++ }; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ fmov_d(f10, f8); ++ __ fmov_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_D[i]; ++ test.b = inputs_S[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_D[i]); ++ CHECK_EQ(test.d, outputs_S[i]); ++ } ++} ++ ++TEST(LA14) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ double a; ++ double b; ++ double c; ++ double d; ++ int64_t high; ++ int64_t low; ++ }; ++ T t; ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(T, b))); ++ ++ __ movfr2gr_s(a4, f8); ++ __ movfrh2gr_s(a5, f8); ++ __ movfr2gr_d(a6, f9); ++ ++ __ movgr2fr_w(f9, a4); ++ __ movgr2frh_w(f9, a5); ++ __ movgr2fr_d(f8, a6); ++ ++ __ Fst_d(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fst_d(f9, MemOperand(a0, offsetof(T, c))); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, d))); ++ __ movfrh2gr_s(a4, f8); ++ __ movfr2gr_s(a5, f8); ++ ++ __ St_d(a4, MemOperand(a0, offsetof(T, high))); ++ __ St_d(a5, MemOperand(a0, offsetof(T, low))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ t.a = 1.5e22; ++ t.b = 2.75e11; ++ t.c = 17.17; ++ t.d = -2.75e11; ++ f.Call(&t, 0, 0, 0, 0); ++ CHECK_EQ(2.75e11, t.a); ++ CHECK_EQ(2.75e11, t.b); ++ CHECK_EQ(1.5e22, t.c); ++ CHECK_EQ(static_cast(0xFFFFFFFFC25001D1L), t.high); ++ CHECK_EQ(static_cast(0xFFFFFFFFBF800000L), t.low); ++ ++ t.a = -1.5e22; ++ t.b = -2.75e11; ++ t.c = 17.17; ++ t.d = 274999868928.0; ++ f.Call(&t, 0, 0, 0, 0); ++ CHECK_EQ(-2.75e11, t.a); ++ CHECK_EQ(-2.75e11, t.b); ++ CHECK_EQ(-1.5e22, t.c); ++ CHECK_EQ(static_cast(0x425001D1L), t.high); ++ CHECK_EQ(static_cast(0x3F800000L), t.low); ++} ++ ++uint64_t run_bceqz(int fcc_value, int32_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0); ++ __ li(t0, fcc_value); ++ __ b(&main_block); ++ // Block 1 ++ for (int32_t i = -104; i <= -55; ++i) { ++ __ addi_d(a2, a2, 0x1); ++ } ++ __ b(&L); ++ ++ // Block 2 ++ for (int32_t i = -53; i <= -4; ++i) { ++ __ addi_d(a2, a2, 0x10); ++ } ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ movcf2gr(t1, FCC0); ++ __ movgr2cf(FCC0, t0); ++ __ bceqz(FCC0, offset); ++ __ bind(&L); ++ __ movgr2cf(FCC0, t1); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ for (int32_t i = 4; i <= 53; ++i) { ++ __ addi_d(a2, a2, 0x100); ++ } ++ __ b(&L); ++ ++ // Block 5 ++ for (int32_t i = 55; i <= 104; ++i) { ++ __ addi_d(a2, a2, 0x300); ++ } ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BCEQZ) { ++ CcTest::InitializeVM(); ++ struct TestCaseBceqz { ++ int fcc; ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBceqz tc[] = { ++ // fcc, offset, expected_res ++ { 0, -90, 0x24 }, ++ { 0, -27, 0x180 }, ++ { 0, 47, 0x700 }, ++ { 0, 70, 0x6900 }, ++ { 1, -27, 0 }, ++ { 1, 47, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBceqz); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bceqz(tc[i].fcc, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bcnez(int fcc_value, int32_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0); ++ __ li(t0, fcc_value); ++ __ b(&main_block); ++ // Block 1 ++ for (int32_t i = -104; i <= -55; ++i) { ++ __ addi_d(a2, a2, 0x1); ++ } ++ __ b(&L); ++ ++ // Block 2 ++ for (int32_t i = -53; i <= -4; ++i) { ++ __ addi_d(a2, a2, 0x10); ++ } ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ movcf2gr(t1, FCC0); ++ __ movgr2cf(FCC0, t0); ++ __ bcnez(FCC0, offset); ++ __ bind(&L); ++ __ movgr2cf(FCC0, t1); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ for (int32_t i = 4; i <= 53; ++i) { ++ __ addi_d(a2, a2, 0x100); ++ } ++ __ b(&L); ++ ++ // Block 5 ++ for (int32_t i = 55; i <= 104; ++i) { ++ __ addi_d(a2, a2, 0x300); ++ } ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BCNEZ) { ++ CcTest::InitializeVM(); ++ struct TestCaseBcnez { ++ int fcc; ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBcnez tc[] = { ++ // fcc, offset, expected_res ++ { 1, -90, 0x24 }, ++ { 1, -27, 0x180 }, ++ { 1, 47, 0x700 }, ++ { 1, 70, 0x6900 }, ++ { 0, -27, 0 }, ++ { 0, 47, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBcnez); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bcnez(tc[i].fcc, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++TEST(jump_tables1) { ++ // Test jump tables with forward jumps. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ const int kNumCases = 512; ++ int values[kNumCases]; ++ isolate->random_number_generator()->NextBytes(values, sizeof(values)); ++ Label labels[kNumCases]; ++ ++ __ addi_d(sp, sp, -8); ++ __ St_d(ra, MemOperand(sp, 0)); ++ __ Align(8); ++ ++ Label done; ++ { ++ __ BlockTrampolinePoolFor(kNumCases * 2 + 6); ++ __ pcaddi(ra, 2); ++ __ slli_d(t7, a0, 3); ++ __ add_d(t7, t7, ra); ++ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize)); ++ __ jirl(zero_reg, t7, 0); ++ __ nop(); ++ for (int i = 0; i < kNumCases; ++i) { ++ __ dd(&labels[i]); ++ } ++ } ++ ++ for (int i = 0; i < kNumCases; ++i) { ++ __ bind(&labels[i]); ++ __ lu12i_w(a2, (values[i] >> 12) & 0xFFFFF); ++ __ ori(a2, a2, values[i] & 0xFFF); ++ __ b(&done); ++ __ nop(); ++ } ++ ++ __ bind(&done); ++ __ Ld_d(ra, MemOperand(sp, 0)); ++ __ addi_d(sp, sp, 8); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CHECK_EQ(0, assm.UnboundLabelsCount()); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kNumCases; ++i) { ++ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); ++ ::printf("f(%d) = %" PRId64 "\n", i, res); ++ CHECK_EQ((values[i]), static_cast(res)); ++ } ++} ++ ++TEST(jump_tables2) { ++ // Test jump tables with backward jumps. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ const int kNumCases = 512; ++ int values[kNumCases]; ++ isolate->random_number_generator()->NextBytes(values, sizeof(values)); ++ Label labels[kNumCases]; ++ ++ __ addi_d(sp, sp, -8); ++ __ St_d(ra, MemOperand(sp, 0)); ++ ++ Label done, dispatch; ++ __ b(&dispatch); ++ __ nop(); ++ ++ for (int i = 0; i < kNumCases; ++i) { ++ __ bind(&labels[i]); ++ __ lu12i_w(a2, (values[i] >> 12) & 0xFFFFF); ++ __ ori(a2, a2, values[i] & 0xFFF); ++ __ b(&done); ++ __ nop(); ++ } ++ ++ __ Align(8); ++ __ bind(&dispatch); ++ { ++ __ BlockTrampolinePoolFor(kNumCases * 2 + 6); ++ __ pcaddi(ra, 2); ++ __ slli_d(t7, a0, 3); ++ __ add_d(t7, t7, ra); ++ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize)); ++ __ jirl(zero_reg, t7, 0); ++ __ nop(); ++ for (int i = 0; i < kNumCases; ++i) { ++ __ dd(&labels[i]); ++ } ++ } ++ ++ __ bind(&done); ++ __ Ld_d(ra, MemOperand(sp, 0)); ++ __ addi_d(sp, sp, 8); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kNumCases; ++i) { ++ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); ++ ::printf("f(%d) = %" PRId64 "\n", i, res); ++ CHECK_EQ(values[i], res); ++ } ++} ++ ++TEST(jump_tables3) { ++ // Test jump tables with backward jumps and embedded heap objects. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ const int kNumCases = 512; ++ Handle values[kNumCases]; ++ for (int i = 0; i < kNumCases; ++i) { ++ double value = isolate->random_number_generator()->NextDouble(); ++ values[i] = isolate->factory()->NewHeapNumber(value); ++ } ++ Label labels[kNumCases]; ++ Object obj; ++ int64_t imm64; ++ ++ __ addi_d(sp, sp, -8); ++ __ St_d(ra, MemOperand(sp, 0)); ++ ++ Label done, dispatch; ++ __ b(&dispatch); ++ __ nop(); ++ ++ for (int i = 0; i < kNumCases; ++i) { ++ __ bind(&labels[i]); ++ obj = *values[i]; ++ imm64 = obj.ptr(); ++ __ lu12i_w(a2, (imm64 >> 12) & 0xFFFFF); ++ __ ori(a2, a2, imm64 & 0xFFF); ++ __ lu32i_d(a2, (imm64 >> 32) & 0xFFFFF); ++ __ lu52i_d(a2, a2, (imm64 >> 52) & 0xFFF); ++ __ b(&done); ++ } ++ ++ __ Align(8); ++ __ bind(&dispatch); ++ { ++ __ BlockTrampolinePoolFor(kNumCases * 2 + 6); ++ __ pcaddi(ra, 2); ++ __ slli_d(t7, a0, 3); // In delay slot. ++ __ add_d(t7, t7, ra); ++ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize)); ++ __ jirl(zero_reg, t7, 0); ++ __ nop(); ++ for (int i = 0; i < kNumCases; ++i) { ++ __ dd(&labels[i]); ++ } ++ } ++ __ bind(&done); ++ __ Ld_d(ra, MemOperand(sp, 0)); ++ __ addi_d(sp, sp, 8); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kNumCases; ++i) { ++ Handle result( ++ Object(reinterpret_cast
(f.Call(i, 0, 0, 0, 0))), isolate); ++#ifdef OBJECT_PRINT ++ ::printf("f(%d) = ", i); ++ result->Print(std::cout); ++ ::printf("\n"); ++#endif ++ CHECK(values[i].is_identical_to(result)); ++ } ++} ++ ++uint64_t run_li_macro(int64_t imm, LiFlags mode, int32_t num_instr = 0) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label code_start; ++ __ bind(&code_start); ++ __ li(a2, imm, mode); ++ if (num_instr > 0) { ++ CHECK_EQ(assm.InstructionsGeneratedSince(&code_start), num_instr); ++ CHECK_EQ(__ InstrCountForLi64Bit(imm), num_instr); ++ } ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(li_macro) { ++ CcTest::InitializeVM(); ++ ++ // Test li macro-instruction for border cases. ++ ++ struct TestCase_li { ++ uint64_t imm; ++ int32_t num_instr; ++ }; ++ // clang-format off ++ struct TestCase_li tc[] = { ++ // imm, num_instr ++ {0xFFFFFFFFFFFFF800, 1}, // min_int12 ++ // The test case above generates addi_d instruction. ++ // This is int12 value and we can load it using just addi_d. ++ { 0x800, 1}, // max_int12 + 1 ++ // Generates ori ++ // max_int12 + 1 is not int12 but is uint12, just use ori. ++ {0xFFFFFFFFFFFFF7FF, 2}, // min_int12 - 1 ++ // Generates lu12i + ori ++ // We load int32 value using lu12i_w + ori. ++ { 0x801, 1}, // max_int12 + 2 ++ // Generates ori ++ // Also an uint1 value, use ori. ++ { 0x00001000, 1}, // max_uint12 + 1 ++ // Generates lu12i_w ++ // Low 12 bits are 0, load value using lu12i_w. ++ { 0x00001001, 2}, // max_uint12 + 2 ++ // Generates lu12i_w + ori ++ // We have to generate two instructions in this case. ++ {0x00000000FFFFFFFF, 2}, // max_uint32 ++ // addi_w + lu32i_d ++ {0x00000000FFFFFFFE, 2}, // max_uint32 - 1 ++ // addi_w + lu32i_d ++ {0xFFFFFFFF80000000, 1}, // min_int32 ++ // lu12i_w ++ {0x0000000080000000, 2}, // max_int32 + 1 ++ // lu12i_w + lu32i_d ++ {0xFFFF0000FFFF8765, 3}, ++ // lu12i_w + ori + lu32i_d ++ {0x1234ABCD87654321, 4}, ++ // lu12i_w + ori + lu32i_d + lu52i_d ++ {0xFFFF789100000000, 2}, ++ // xor + lu32i_d ++ {0xF12F789100000000, 3}, ++ // xor + lu32i_d + lu52i_d ++ {0xF120000000000800, 2}, ++ // ori + lu52i_d ++ {0xFFF0000000000000, 1}, ++ // lu52i_d ++ {0xF100000000000000, 1}, ++ {0x0122000000000000, 2}, ++ {0x1234FFFF77654321, 4}, ++ {0x1230000077654321, 3}, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase_li); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ CHECK_EQ(tc[i].imm, ++ run_li_macro(tc[i].imm, OPTIMIZE_SIZE, tc[i].num_instr)); ++ CHECK_EQ(tc[i].imm, run_li_macro(tc[i].imm, CONSTANT_SIZE)); ++ if (is_int48(tc[i].imm)) { ++ CHECK_EQ(tc[i].imm, run_li_macro(tc[i].imm, ADDRESS_LOAD)); ++ } ++ } ++} ++ ++TEST(FMIN_FMAX) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double a; ++ double b; ++ float c; ++ float d; ++ double e; ++ double f; ++ float g; ++ float h; ++ }; ++ ++ TestFloat test; ++ const double dnan = std::numeric_limits::quiet_NaN(); ++ const double dinf = std::numeric_limits::infinity(); ++ const double dminf = -std::numeric_limits::infinity(); ++ const float fnan = std::numeric_limits::quiet_NaN(); ++ const float finf = std::numeric_limits::infinity(); ++ const float fminf = -std::numeric_limits::infinity(); ++ const int kTableLength = 13; ++ ++ // clang-format off ++ double inputsa[kTableLength] = {2.0, 3.0, dnan, 3.0, -0.0, 0.0, dinf, ++ dnan, 42.0, dinf, dminf, dinf, dnan}; ++ double inputsb[kTableLength] = {3.0, 2.0, 3.0, dnan, 0.0, -0.0, dnan, ++ dinf, dinf, 42.0, dinf, dminf, dnan}; ++ double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, ++ -0.0, dinf, dinf, 42.0, 42.0, ++ dminf, dminf, dnan}; ++ double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, dinf, ++ dinf, dinf, dinf, dinf, dinf, dnan}; ++ ++ float inputsc[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf, ++ fnan, 42.0, finf, fminf, finf, fnan}; ++ float inputsd[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan, ++ finf, finf, 42.0, finf, fminf, fnan}; ++ float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, ++ -0.0, finf, finf, 42.0, 42.0, ++ fminf, fminf, fnan}; ++ float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf, ++ finf, finf, finf, finf, finf, fnan}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ fmin_d(f12, f8, f9); ++ __ fmax_d(f13, f8, f9); ++ __ fmin_s(f14, f10, f11); ++ __ fmax_s(f15, f10, f11); ++ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, f))); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, g))); ++ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, h))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 4; i < kTableLength; i++) { ++ test.a = inputsa[i]; ++ test.b = inputsb[i]; ++ test.c = inputsc[i]; ++ test.d = inputsd[i]; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(0, memcmp(&test.e, &outputsdmin[i], sizeof(test.e))); ++ CHECK_EQ(0, memcmp(&test.f, &outputsdmax[i], sizeof(test.f))); ++ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); ++ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); ++ } ++} ++ ++TEST(FMINA_FMAXA) { ++ const int kTableLength = 23; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ const double dnan = std::numeric_limits::quiet_NaN(); ++ const double dinf = std::numeric_limits::infinity(); ++ const double dminf = -std::numeric_limits::infinity(); ++ const float fnan = std::numeric_limits::quiet_NaN(); ++ const float finf = std::numeric_limits::infinity(); ++ const float fminf = std::numeric_limits::infinity(); ++ ++ struct TestFloat { ++ double a; ++ double b; ++ double resd1; ++ double resd2; ++ float c; ++ float d; ++ float resf1; ++ float resf2; ++ }; ++ ++ TestFloat test; ++ // clang-format off ++ double inputsa[kTableLength] = { ++ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8, ++ dnan, 3.0, -0.0, 0.0, dinf, dnan, 42.0, dinf, dminf, dinf, dnan}; ++ double inputsb[kTableLength] = { ++ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, ++ 3.0, dnan, 0.0, -0.0, dnan, dinf, dinf, 42.0, dinf, dminf, dnan}; ++ double resd1[kTableLength] = { ++ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, ++ 3.0, 3.0, -0.0, -0.0, dinf, dinf, 42.0, 42.0, dminf, dminf, dnan}; ++ double resd2[kTableLength] = { ++ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, ++ 3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf, dinf, dinf, dinf, dnan}; ++ float inputsc[kTableLength] = { ++ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8, ++ fnan, 3.0, -0.0, 0.0, finf, fnan, 42.0, finf, fminf, finf, fnan}; ++ float inputsd[kTableLength] = { ++ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, ++ 3.0, fnan, -0.0, 0.0, fnan, finf, finf, 42.0, finf, fminf, fnan}; ++ float resf1[kTableLength] = { ++ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, ++ 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan}; ++ float resf2[kTableLength] = { ++ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, ++ 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ fmina_d(f12, f8, f9); ++ __ fmaxa_d(f13, f8, f9); ++ __ fmina_s(f14, f10, f11); ++ __ fmaxa_s(f15, f10, f11); ++ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, resd1))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resd2))); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, resf1))); ++ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, resf2))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputsa[i]; ++ test.b = inputsb[i]; ++ test.c = inputsc[i]; ++ test.d = inputsd[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ if (i < kTableLength - 1) { ++ CHECK_EQ(test.resd1, resd1[i]); ++ CHECK_EQ(test.resd2, resd2[i]); ++ CHECK_EQ(test.resf1, resf1[i]); ++ CHECK_EQ(test.resf2, resf2[i]); ++ } else { ++ CHECK(std::isnan(test.resd1)); ++ CHECK(std::isnan(test.resd2)); ++ CHECK(std::isnan(test.resf1)); ++ CHECK(std::isnan(test.resf2)); ++ } ++ } ++} ++ ++TEST(FADD) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double a; ++ double b; ++ double c; ++ float d; ++ float e; ++ float f; ++ }; ++ ++ TestFloat test; ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ fadd_d(f10, f8, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ Fld_s(f12, MemOperand(a0, offsetof(TestFloat, e))); ++ __ fadd_s(f13, f11, f12); ++ __ Fst_s(f13, MemOperand(a0, offsetof(TestFloat, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.a = 2.0; ++ test.b = 3.0; ++ test.d = 2.0; ++ test.e = 3.0; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, 5.0); ++ CHECK_EQ(test.f, 5.0); ++ ++ test.a = std::numeric_limits::max(); ++ test.b = -std::numeric_limits::max(); // lowest() ++ test.d = std::numeric_limits::max(); ++ test.e = -std::numeric_limits::max(); // lowest() ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, 0.0); ++ CHECK_EQ(test.f, 0.0); ++ ++ test.a = std::numeric_limits::max(); ++ test.b = std::numeric_limits::max(); ++ test.d = std::numeric_limits::max(); ++ test.e = std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(!std::isfinite(test.c)); ++ CHECK(!std::isfinite(test.f)); ++ ++ test.a = 5.0; ++ test.b = std::numeric_limits::signaling_NaN(); ++ test.d = 5.0; ++ test.e = std::numeric_limits::signaling_NaN(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.c)); ++ CHECK(std::isnan(test.f)); ++} ++ ++TEST(FSUB) { ++ const int kTableLength = 12; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ float a; ++ float b; ++ float resultS; ++ double c; ++ double d; ++ double resultD; ++ }; ++ ++ TestFloat test; ++ ++ // clang-format off ++ double inputfs_D[kTableLength] = { ++ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9, ++ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 ++ }; ++ double inputft_D[kTableLength] = { ++ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, ++ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 ++ }; ++ double outputs_D[kTableLength] = { ++ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, ++ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 ++ }; ++ float inputfs_S[kTableLength] = { ++ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9, ++ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 ++ }; ++ float inputft_S[kTableLength] = { ++ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, ++ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 ++ }; ++ float outputs_S[kTableLength] = { ++ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, ++ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 ++ }; ++ // clang-format on ++ ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_d(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ fsub_s(f12, f8, f9); ++ __ fsub_d(f13, f10, f11); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputfs_S[i]; ++ test.b = inputft_S[i]; ++ test.c = inputfs_D[i]; ++ test.d = inputft_D[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.resultS, outputs_S[i]); ++ CHECK_EQ(test.resultD, outputs_D[i]); ++ } ++} ++ ++TEST(FMUL) { ++ const int kTableLength = 4; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ float a; ++ float b; ++ float resultS; ++ double c; ++ double d; ++ double resultD; ++ }; ++ ++ TestFloat test; ++ // clang-format off ++ double inputfs_D[kTableLength] = { ++ 5.3, -5.3, 5.3, -2.9 ++ }; ++ double inputft_D[kTableLength] = { ++ 4.8, 4.8, -4.8, -0.29 ++ }; ++ ++ float inputfs_S[kTableLength] = { ++ 5.3, -5.3, 5.3, -2.9 ++ }; ++ float inputft_S[kTableLength] = { ++ 4.8, 4.8, -4.8, -0.29 ++ }; ++ // clang-format on ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_d(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ fmul_s(f12, f8, f9); ++ __ fmul_d(f13, f10, f11); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputfs_S[i]; ++ test.b = inputft_S[i]; ++ test.c = inputfs_D[i]; ++ test.d = inputft_D[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.resultS, inputfs_S[i] * inputft_S[i]); ++ CHECK_EQ(test.resultD, inputfs_D[i] * inputft_D[i]); ++ } ++} ++ ++TEST(FDIV) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double dOp1; ++ double dOp2; ++ double dRes; ++ float fOp1; ++ float fOp2; ++ float fRes; ++ }; ++ ++ Test test; ++ ++ __ movfcsr2gr(a4); ++ __ movgr2fcsr(zero_reg); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, dOp1))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(Test, dOp2))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(Test, fOp1))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(Test, fOp2))); ++ __ fdiv_d(f12, f8, f9); ++ __ fdiv_s(f13, f10, f11); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, dRes))); ++ __ Fst_s(f13, MemOperand(a0, offsetof(Test, fRes))); ++ ++ __ movgr2fcsr(a4); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ f.Call(&test, 0, 0, 0, 0); ++ const int test_size = 3; ++ // clang-format off ++ double dOp1[test_size] = { ++ 5.0, DBL_MAX, DBL_MAX}; ++ ++ double dOp2[test_size] = { ++ 2.0, 2.0, -DBL_MAX}; ++ ++ double dRes[test_size] = { ++ 2.5, DBL_MAX / 2.0, -1.0}; ++ ++ float fOp1[test_size] = { ++ 5.0, FLT_MAX, FLT_MAX}; ++ ++ float fOp2[test_size] = { ++ 2.0, 2.0, -FLT_MAX}; ++ ++ float fRes[test_size] = { ++ 2.5, FLT_MAX / 2.0, -1.0}; ++ // clang-format on ++ ++ for (int i = 0; i < test_size; i++) { ++ test.dOp1 = dOp1[i]; ++ test.dOp2 = dOp2[i]; ++ test.fOp1 = fOp1[i]; ++ test.fOp2 = fOp2[i]; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.dRes, dRes[i]); ++ CHECK_EQ(test.fRes, fRes[i]); ++ } ++ ++ test.dOp1 = DBL_MAX; ++ test.dOp2 = -0.0; ++ test.fOp1 = FLT_MAX; ++ test.fOp2 = -0.0; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(!std::isfinite(test.dRes)); ++ CHECK(!std::isfinite(test.fRes)); ++ ++ test.dOp1 = 0.0; ++ test.dOp2 = -0.0; ++ test.fOp1 = 0.0; ++ test.fOp2 = -0.0; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.dRes)); ++ CHECK(std::isnan(test.fRes)); ++ ++ test.dOp1 = std::numeric_limits::quiet_NaN(); ++ test.dOp2 = -5.0; ++ test.fOp1 = std::numeric_limits::quiet_NaN(); ++ test.fOp2 = -5.0; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.dRes)); ++ CHECK(std::isnan(test.fRes)); ++} ++ ++TEST(FABS) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double a; ++ float b; ++ }; ++ ++ TestFloat test; ++ ++ __ movfcsr2gr(a4); ++ __ movgr2fcsr(zero_reg); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ fabs_d(f10, f8); ++ __ fabs_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, b))); ++ ++ __ movgr2fcsr(a4); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.a = -2.0; ++ test.b = -2.0; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, 2.0); ++ CHECK_EQ(test.b, 2.0); ++ ++ test.a = 2.0; ++ test.b = 2.0; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, 2.0); ++ CHECK_EQ(test.b, 2.0); ++ ++ // Testing biggest positive number ++ test.a = std::numeric_limits::max(); ++ test.b = std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, std::numeric_limits::max()); ++ CHECK_EQ(test.b, std::numeric_limits::max()); ++ ++ // Testing smallest negative number ++ test.a = -std::numeric_limits::max(); // lowest() ++ test.b = -std::numeric_limits::max(); // lowest() ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, std::numeric_limits::max()); ++ CHECK_EQ(test.b, std::numeric_limits::max()); ++ ++ // Testing smallest positive number ++ test.a = -std::numeric_limits::min(); ++ test.b = -std::numeric_limits::min(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, std::numeric_limits::min()); ++ CHECK_EQ(test.b, std::numeric_limits::min()); ++ ++ // Testing infinity ++ test.a = ++ -std::numeric_limits::max() / std::numeric_limits::min(); ++ test.b = ++ -std::numeric_limits::max() / std::numeric_limits::min(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, std::numeric_limits::max() / ++ std::numeric_limits::min()); ++ CHECK_EQ(test.b, std::numeric_limits::max() / ++ std::numeric_limits::min()); ++ ++ test.a = std::numeric_limits::quiet_NaN(); ++ test.b = std::numeric_limits::quiet_NaN(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.a)); ++ CHECK(std::isnan(test.b)); ++ ++ test.a = std::numeric_limits::signaling_NaN(); ++ test.b = std::numeric_limits::signaling_NaN(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.a)); ++ CHECK(std::isnan(test.b)); ++} ++ ++template ++struct TestCaseMaddMsub { ++ T fj, fk, fa, fd_fmadd, fd_fmsub, fd_fnmadd, fd_fnmsub; ++}; ++ ++template ++void helper_fmadd_fmsub_fnmadd_fnmsub(F func) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ T x = std::sqrt(static_cast(2.0)); ++ T y = std::sqrt(static_cast(3.0)); ++ T z = std::sqrt(static_cast(5.0)); ++ T x2 = 11.11, y2 = 22.22, z2 = 33.33; ++ // clang-format off ++ TestCaseMaddMsub test_cases[] = { ++ {x, y, z, 0.0, 0.0, 0.0, 0.0}, ++ {x, y, -z, 0.0, 0.0, 0.0, 0.0}, ++ {x, -y, z, 0.0, 0.0, 0.0, 0.0}, ++ {x, -y, -z, 0.0, 0.0, 0.0, 0.0}, ++ {-x, y, z, 0.0, 0.0, 0.0, 0.0}, ++ {-x, y, -z, 0.0, 0.0, 0.0, 0.0}, ++ {-x, -y, z, 0.0, 0.0, 0.0, 0.0}, ++ {-x, -y, -z, 0.0, 0.0, 0.0, 0.0}, ++ {-3.14, 0.2345, -123.000056, 0.0, 0.0, 0.0, 0.0}, ++ {7.3, -23.257, -357.1357, 0.0, 0.0, 0.0, 0.0}, ++ {x2, y2, z2, 0.0, 0.0, 0.0, 0.0}, ++ {x2, y2, -z2, 0.0, 0.0, 0.0, 0.0}, ++ {x2, -y2, z2, 0.0, 0.0, 0.0, 0.0}, ++ {x2, -y2, -z2, 0.0, 0.0, 0.0, 0.0}, ++ {-x2, y2, z2, 0.0, 0.0, 0.0, 0.0}, ++ {-x2, y2, -z2, 0.0, 0.0, 0.0, 0.0}, ++ {-x2, -y2, z2, 0.0, 0.0, 0.0, 0.0}, ++ {-x2, -y2, -z2, 0.0, 0.0, 0.0, 0.0}, ++ }; ++ // clang-format on ++ if (std::is_same::value) { ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestCaseMaddMsub, fj))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestCaseMaddMsub, fk))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestCaseMaddMsub, fa))); ++ } else if (std::is_same::value) { ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestCaseMaddMsub, fj))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestCaseMaddMsub, fk))); ++ __ Fld_d(f10, MemOperand(a0, offsetof(TestCaseMaddMsub, fa))); ++ } else { ++ UNREACHABLE(); ++ } ++ ++ func(assm); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub); ++ TestCaseMaddMsub tc; ++ for (size_t i = 0; i < kTableLength; i++) { ++ tc.fj = test_cases[i].fj; ++ tc.fk = test_cases[i].fk; ++ tc.fa = test_cases[i].fa; ++ ++ f.Call(&tc, 0, 0, 0, 0); ++ ++ T res_fmadd; ++ T res_fmsub; ++ T res_fnmadd; ++ T res_fnmsub; ++ res_fmadd = std::fma(tc.fj, tc.fk, tc.fa); ++ res_fmsub = std::fma(tc.fj, tc.fk, -tc.fa); ++ res_fnmadd = -std::fma(tc.fj, tc.fk, tc.fa); ++ res_fnmsub = -std::fma(tc.fj, tc.fk, -tc.fa); ++ ++ CHECK_EQ(tc.fd_fmadd, res_fmadd); ++ CHECK_EQ(tc.fd_fmsub, res_fmsub); ++ CHECK_EQ(tc.fd_fnmadd, res_fnmadd); ++ CHECK_EQ(tc.fd_fnmsub, res_fnmsub); ++ } ++} ++ ++TEST(FMADD_FMSUB_FNMADD_FNMSUB_S) { ++ helper_fmadd_fmsub_fnmadd_fnmsub([](MacroAssembler& assm) { ++ __ fmadd_s(f11, f8, f9, f10); ++ __ Fst_s(f11, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmadd))); ++ __ fmsub_s(f12, f8, f9, f10); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmsub))); ++ __ fnmadd_s(f13, f8, f9, f10); ++ __ Fst_s(f13, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmadd))); ++ __ fnmsub_s(f14, f8, f9, f10); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmsub))); ++ }); ++} ++ ++TEST(FMADD_FMSUB_FNMADD_FNMSUB_D) { ++ helper_fmadd_fmsub_fnmadd_fnmsub([](MacroAssembler& assm) { ++ __ fmadd_d(f11, f8, f9, f10); ++ __ Fst_d(f11, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmadd))); ++ __ fmsub_d(f12, f8, f9, f10); ++ __ Fst_d(f12, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmsub))); ++ __ fnmadd_d(f13, f8, f9, f10); ++ __ Fst_d(f13, ++ MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmadd))); ++ __ fnmsub_d(f14, f8, f9, f10); ++ __ Fst_d(f14, ++ MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmsub))); ++ }); ++} ++ ++/* ++TEST(FSQRT_FRSQRT_FRECIP) { ++ const int kTableLength = 4; ++ const double deltaDouble = 2E-15; ++ const float deltaFloat = 2E-7; ++ const float sqrt2_s = sqrt(2); ++ const double sqrt2_d = sqrt(2); ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ float a; ++ float resultS1; ++ float resultS2; ++ float resultS3; ++ double b; ++ double resultD1; ++ double resultD2; ++ double resultD3; ++ }; ++ TestFloat test; ++ // clang-format off ++ double inputs_D[kTableLength] = { ++ 0.0L, 4.0L, 2.0L, 4e-28L ++ }; ++ ++ double outputs_D[kTableLength] = { ++ 0.0L, 2.0L, sqrt2_d, 2e-14L ++ }; ++ float inputs_S[kTableLength] = { ++ 0.0, 4.0, 2.0, 4e-28 ++ }; ++ ++ float outputs_S[kTableLength] = { ++ 0.0, 2.0, sqrt2_s, 2e-14 ++ }; ++ // clang-format on ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ fsqrt_s(f10, f8); ++ __ fsqrt_d(f11, f9); ++ __ frsqrt_s(f12, f8); ++ __ frsqrt_d(f13, f9); ++ __ frecip_s(f14, f8); ++ __ frecip_d(f15, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(TestFloat, resultS1))); ++ __ Fst_d(f11, MemOperand(a0, offsetof(TestFloat, resultD1))); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS2))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD2))); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, resultS3))); ++ __ Fst_d(f15, MemOperand(a0, offsetof(TestFloat, resultD3))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ for (int i = 0; i < kTableLength; i++) { ++ float f1; ++ double d1; ++ test.a = inputs_S[i]; ++ test.b = inputs_D[i]; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(test.resultS1, outputs_S[i]); ++ CHECK_EQ(test.resultD1, outputs_D[i]); ++ ++ if (i != 0) { ++ f1 = test.resultS2 - 1.0F/outputs_S[i]; ++ f1 = (f1 < 0) ? f1 : -f1; ++ CHECK(f1 <= deltaFloat); ++ d1 = test.resultD2 - 1.0L/outputs_D[i]; ++ d1 = (d1 < 0) ? d1 : -d1; ++ CHECK(d1 <= deltaDouble); ++ f1 = test.resultS3 - 1.0F/inputs_S[i]; ++ f1 = (f1 < 0) ? f1 : -f1; ++ CHECK(f1 <= deltaFloat); ++ d1 = test.resultD3 - 1.0L/inputs_D[i]; ++ d1 = (d1 < 0) ? d1 : -d1; ++ CHECK(d1 <= deltaDouble); ++ } else { ++ CHECK_EQ(test.resultS2, 1.0F/outputs_S[i]); ++ CHECK_EQ(test.resultD2, 1.0L/outputs_D[i]); ++ CHECK_EQ(test.resultS3, 1.0F/inputs_S[i]); ++ CHECK_EQ(test.resultD3, 1.0L/inputs_D[i]); ++ } ++ } ++}*/ ++ ++TEST(LA15) { ++ // Test chaining of label usages within instructions (issue 1644). ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ Assembler assm(AssemblerOptions{}); ++ ++ Label target; ++ __ beq(a0, a1, &target); ++ __ nop(); ++ __ bne(a0, a1, &target); ++ __ nop(); ++ __ bind(&target); ++ __ nop(); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ f.Call(1, 1, 0, 0, 0); ++} ++ ++TEST(Trampoline) { ++ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; ++ ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ Label done; ++ size_t nr_calls = kMaxBranchOffset / kInstrSize + 5; ++ ++ __ xor_(a2, a2, a2); ++ __ BranchShort(&done, eq, a0, Operand(a1)); ++ for (size_t i = 0; i < nr_calls; ++i) { ++ __ addi_d(a2, a2, 1); ++ } ++ __ bind(&done); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ int64_t res = reinterpret_cast(f.Call(42, 42, 0, 0, 0)); ++ CHECK_EQ(0, res); ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/test/cctest/test-disasm-la64.cc b/src/3rdparty/chromium/v8/test/cctest/test-disasm-la64.cc +new file mode 100644 +index 0000000000..36e46dc213 +--- /dev/null ++++ b/src/3rdparty/chromium/v8/test/cctest/test-disasm-la64.cc +@@ -0,0 +1,966 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following ++// disclaimer in the documentation and/or other materials provided ++// with the distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived ++// from this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++// ++ ++#include ++#include ++ ++#include "src/init/v8.h" ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/debug/debug.h" ++#include "src/diagnostics/disasm.h" ++#include "src/diagnostics/disassembler.h" ++#include "src/execution/frames-inl.h" ++#include "test/cctest/cctest.h" ++ ++namespace v8 { ++namespace internal { ++ ++bool DisassembleAndCompare(byte* pc, const char* compare_string) { ++ disasm::NameConverter converter; ++ disasm::Disassembler disasm(converter); ++ EmbeddedVector disasm_buffer; ++ ++ /* if (prev_instr_compact_branch) { ++ disasm.InstructionDecode(disasm_buffer, pc); ++ pc += 4; ++ }*/ ++ ++ disasm.InstructionDecode(disasm_buffer, pc); ++ ++ if (strcmp(compare_string, disasm_buffer.begin()) != 0) { ++ fprintf(stderr, ++ "expected: \n" ++ "%s\n" ++ "disassembled: \n" ++ "%s\n\n", ++ compare_string, disasm_buffer.begin()); ++ return false; ++ } ++ return true; ++} ++ ++// Set up V8 to a state where we can at least run the assembler and ++// disassembler. Declare the variables and allocate the data structures used ++// in the rest of the macros. ++#define SET_UP() \ ++ CcTest::InitializeVM(); \ ++ Isolate* isolate = CcTest::i_isolate(); \ ++ HandleScope scope(isolate); \ ++ byte* buffer = reinterpret_cast(malloc(4 * 1024)); \ ++ Assembler assm(AssemblerOptions{}, \ ++ ExternalAssemblerBuffer(buffer, 4 * 1024)); \ ++ bool failure = false; ++ ++// This macro assembles one instruction using the preallocated assembler and ++// disassembles the generated instruction, comparing the output to the expected ++// value. If the comparison fails an error message is printed, but the test ++// continues to run until the end. ++#define COMPARE(asm_, compare_string) \ ++ { \ ++ int pc_offset = assm.pc_offset(); \ ++ byte* progcounter = &buffer[pc_offset]; \ ++ assm.asm_; \ ++ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \ ++ } ++ ++// Verify that all invocations of the COMPARE macro passed successfully. ++// Exit with a failure if at least one of the tests failed. ++#define VERIFY_RUN() \ ++ if (failure) { \ ++ FATAL("LA64 Disassembler tests failed.\n"); \ ++ } ++ ++#define COMPARE_PC_REL(asm_, compare_string, offset) \ ++ { \ ++ int pc_offset = assm.pc_offset(); \ ++ byte* progcounter = &buffer[pc_offset]; \ ++ char str_with_address[100]; \ ++ printf("%p\n", static_cast(progcounter)); \ ++ snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \ ++ compare_string, static_cast(progcounter + (offset * 4))); \ ++ assm.asm_; \ ++ if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \ ++ } ++ ++TEST(TypeOp6) { ++ SET_UP(); ++ ++ COMPARE(jirl(ra, t7, 0), "4c000261 jirl ra, t7, 0"); ++ COMPARE(jirl(ra, t7, 32767), "4dfffe61 jirl ra, t7, 32767"); ++ COMPARE(jirl(ra, t7, -32768), "4e000261 jirl ra, t7, -32768"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp6PC) { ++ SET_UP(); ++ ++ COMPARE_PC_REL(beqz(t7, 1048575), "43fffe6f beqz t7, 1048575", ++ 1048575); ++ COMPARE_PC_REL(beqz(t0, -1048576), "40000190 beqz t0, -1048576", ++ -1048576); ++ COMPARE_PC_REL(beqz(t1, 0), "400001a0 beqz t1, 0", 0); ++ ++ COMPARE_PC_REL(bnez(a2, 1048575), "47fffccf bnez a2, 1048575", ++ 1048575); ++ COMPARE_PC_REL(bnez(s3, -1048576), "44000350 bnez s3, -1048576", ++ -1048576); ++ COMPARE_PC_REL(bnez(t8, 0), "44000280 bnez t8, 0", 0); ++ ++ COMPARE_PC_REL(bceqz(FCC0, 1048575), "4bfffc0f bceqz fcc0, 1048575", ++ 1048575); ++ COMPARE_PC_REL(bceqz(FCC0, -1048576), ++ "48000010 bceqz fcc0, -1048576", -1048576); ++ COMPARE_PC_REL(bceqz(FCC0, 0), "48000000 bceqz fcc0, 0", 0); ++ ++ COMPARE_PC_REL(bcnez(FCC0, 1048575), "4bfffd0f bcnez fcc0, 1048575", ++ 1048575); ++ COMPARE_PC_REL(bcnez(FCC0, -1048576), ++ "48000110 bcnez fcc0, -1048576", -1048576); ++ COMPARE_PC_REL(bcnez(FCC0, 0), "48000100 bcnez fcc0, 0", 0); ++ ++ COMPARE_PC_REL(b(33554431), "53fffdff b 33554431", 33554431); ++ COMPARE_PC_REL(b(-33554432), "50000200 b -33554432", -33554432); ++ COMPARE_PC_REL(b(0), "50000000 b 0", 0); ++ ++ COMPARE_PC_REL(beq(t0, a6, 32767), "59fffd8a beq t0, a6, 32767", ++ 32767); ++ COMPARE_PC_REL(beq(t1, a0, -32768), "5a0001a4 beq t1, a0, -32768", ++ -32768); ++ COMPARE_PC_REL(beq(a4, t1, 0), "5800010d beq a4, t1, 0", 0); ++ ++ COMPARE_PC_REL(bne(a3, a4, 32767), "5dfffce8 bne a3, a4, 32767", ++ 32767); ++ COMPARE_PC_REL(bne(a6, a5, -32768), "5e000149 bne a6, a5, -32768", ++ -32768); ++ COMPARE_PC_REL(bne(a4, a5, 0), "5c000109 bne a4, a5, 0", 0); ++ ++ COMPARE_PC_REL(blt(a4, a6, 32767), "61fffd0a blt a4, a6, 32767", ++ 32767); ++ COMPARE_PC_REL(blt(a4, a5, -32768), "62000109 blt a4, a5, -32768", ++ -32768); ++ COMPARE_PC_REL(blt(a4, a6, 0), "6000010a blt a4, a6, 0", 0); ++ ++ COMPARE_PC_REL(bge(s7, a5, 32767), "65ffffc9 bge s7, a5, 32767", ++ 32767); ++ COMPARE_PC_REL(bge(a1, a3, -32768), "660000a7 bge a1, a3, -32768", ++ -32768); ++ COMPARE_PC_REL(bge(a5, s3, 0), "6400013a bge a5, s3, 0", 0); ++ ++ COMPARE_PC_REL(bltu(a5, s7, 32767), "69fffd3e bltu a5, s7, 32767", ++ 32767); ++ COMPARE_PC_REL(bltu(a4, a5, -32768), "6a000109 bltu a4, a5, -32768", ++ -32768); ++ COMPARE_PC_REL(bltu(a4, t6, 0), "68000112 bltu a4, t6, 0", 0); ++ ++ COMPARE_PC_REL(bgeu(a7, a6, 32767), "6dfffd6a bgeu a7, a6, 32767", ++ 32767); ++ COMPARE_PC_REL(bgeu(a5, a3, -32768), "6e000127 bgeu a5, a3, -32768", ++ -32768); ++ COMPARE_PC_REL(bgeu(t2, t1, 0), "6c0001cd bgeu t2, t1, 0", 0); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp7) { ++ SET_UP(); ++ ++ COMPARE(lu12i_w(a4, 524287), "14ffffe8 lu12i.w a4, 524287"); ++ COMPARE(lu12i_w(a5, -524288), "15000009 lu12i.w a5, -524288"); ++ COMPARE(lu12i_w(a6, 0), "1400000a lu12i.w a6, 0"); ++ ++ COMPARE(lu32i_d(a7, 524287), "16ffffeb lu32i.d a7, 524287"); ++ COMPARE(lu32i_d(t0, 524288), "1700000c lu32i.d t0, -524288"); ++ COMPARE(lu32i_d(t1, 0), "1600000d lu32i.d t1, 0"); ++ ++ COMPARE(pcaddi(t1, 1), "1800002d pcaddi t1, 1"); ++ COMPARE(pcaddi(t2, 524287), "18ffffee pcaddi t2, 524287"); ++ COMPARE(pcaddi(t3, -524288), "1900000f pcaddi t3, -524288"); ++ COMPARE(pcaddi(t4, 0), "18000010 pcaddi t4, 0"); ++ ++ COMPARE(pcalau12i(t5, 524287), "1afffff1 pcalau12i t5, 524287"); ++ COMPARE(pcalau12i(t6, -524288), "1b000012 pcalau12i t6, -524288"); ++ COMPARE(pcalau12i(a4, 0), "1a000008 pcalau12i a4, 0"); ++ ++ COMPARE(pcaddu12i(a5, 524287), "1cffffe9 pcaddu12i a5, 524287"); ++ COMPARE(pcaddu12i(a6, -524288), "1d00000a pcaddu12i a6, -524288"); ++ COMPARE(pcaddu12i(a7, 0), "1c00000b pcaddu12i a7, 0"); ++ ++ COMPARE(pcaddu18i(t0, 524287), "1effffec pcaddu18i t0, 524287"); ++ COMPARE(pcaddu18i(t1, -524288), "1f00000d pcaddu18i t1, -524288"); ++ COMPARE(pcaddu18i(t2, 0), "1e00000e pcaddu18i t2, 0"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp8) { ++ SET_UP(); ++ ++ COMPARE(ll_w(t2, t3, 32764), "207ffdee ll.w t2, t3, 32764"); ++ COMPARE(ll_w(t3, t4, -32768), "2080020f ll.w t3, t4, -32768"); ++ COMPARE(ll_w(t5, t6, 0), "20000251 ll.w t5, t6, 0"); ++ ++ COMPARE(sc_w(a6, a7, 32764), "217ffd6a sc.w a6, a7, 32764"); ++ COMPARE(sc_w(t0, t1, -32768), "218001ac sc.w t0, t1, -32768"); ++ COMPARE(sc_w(t2, t3, 0), "210001ee sc.w t2, t3, 0"); ++ ++ COMPARE(ll_d(a0, a1, 32764), "227ffca4 ll.d a0, a1, 32764"); ++ COMPARE(ll_d(a2, a3, -32768), "228000e6 ll.d a2, a3, -32768"); ++ COMPARE(ll_d(a4, a5, 0), "22000128 ll.d a4, a5, 0"); ++ ++ COMPARE(sc_d(t4, t5, 32764), "237ffe30 sc.d t4, t5, 32764"); ++ COMPARE(sc_d(t6, a0, -32768), "23800092 sc.d t6, a0, -32768"); ++ COMPARE(sc_d(a1, a2, 0), "230000c5 sc.d a1, a2, 0"); ++ ++ COMPARE(ldptr_w(a4, a5, 32764), "247ffd28 ldptr.w a4, a5, 32764"); ++ COMPARE(ldptr_w(a6, a7, -32768), "2480016a ldptr.w a6, a7, -32768"); ++ COMPARE(ldptr_w(t0, t1, 0), "240001ac ldptr.w t0, t1, 0"); ++ ++ COMPARE(stptr_w(a4, a5, 32764), "257ffd28 stptr.w a4, a5, 32764"); ++ COMPARE(stptr_w(a6, a7, -32768), "2580016a stptr.w a6, a7, -32768"); ++ COMPARE(stptr_w(t0, t1, 0), "250001ac stptr.w t0, t1, 0"); ++ ++ COMPARE(ldptr_d(t2, t3, 32764), "267ffdee ldptr.d t2, t3, 32764"); ++ COMPARE(ldptr_d(t4, t5, -32768), "26800230 ldptr.d t4, t5, -32768"); ++ COMPARE(ldptr_d(t6, a4, 0), "26000112 ldptr.d t6, a4, 0"); ++ ++ COMPARE(stptr_d(a5, a6, 32764), "277ffd49 stptr.d a5, a6, 32764"); ++ COMPARE(stptr_d(a7, t0, -32768), "2780018b stptr.d a7, t0, -32768"); ++ COMPARE(stptr_d(t1, t2, 0), "270001cd stptr.d t1, t2, 0"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp10) { ++ SET_UP(); ++ ++ COMPARE(bstrins_w(a4, a5, 31, 16), ++ "007f4128 bstrins.w a4, a5, 31, 16"); ++ COMPARE(bstrins_w(a6, a7, 5, 0), "0065016a bstrins.w a6, a7, 5, 0"); ++ ++ COMPARE(bstrins_d(a3, zero_reg, 17, 0), ++ "00910007 bstrins.d a3, zero_reg, 17, 0"); ++ COMPARE(bstrins_d(t1, zero_reg, 17, 0), ++ "0091000d bstrins.d t1, zero_reg, 17, 0"); ++ ++ COMPARE(bstrpick_w(t0, t1, 31, 29), ++ "007ff5ac bstrpick.w t0, t1, 31, 29"); ++ COMPARE(bstrpick_w(a4, a5, 16, 0), ++ "00708128 bstrpick.w a4, a5, 16, 0"); ++ ++ COMPARE(bstrpick_d(a5, a5, 31, 0), ++ "00df0129 bstrpick.d a5, a5, 31, 0"); ++ COMPARE(bstrpick_d(a4, a4, 25, 2), ++ "00d90908 bstrpick.d a4, a4, 25, 2"); ++ ++ COMPARE(slti(t2, a5, 2047), "021ffd2e slti t2, a5, 2047"); ++ COMPARE(slti(a7, a1, -2048), "022000ab slti a7, a1, -2048"); ++ ++ COMPARE(sltui(a7, a7, 2047), "025ffd6b sltui a7, a7, 2047"); ++ COMPARE(sltui(t1, t1, -2048), "026001ad sltui t1, t1, -2048"); ++ ++ COMPARE(addi_w(t0, t2, 2047), "029ffdcc addi.w t0, t2, 2047"); ++ COMPARE(addi_w(a0, a0, -2048), "02a00084 addi.w a0, a0, -2048"); ++ ++ COMPARE(addi_d(a0, zero_reg, 2047), ++ "02dffc04 addi.d a0, zero_reg, 2047"); ++ COMPARE(addi_d(t7, t7, -2048), "02e00273 addi.d t7, t7, -2048"); ++ ++ COMPARE(lu52i_d(a0, a0, 2047), "031ffc84 lu52i.d a0, a0, 2047"); ++ COMPARE(lu52i_d(a1, a1, -2048), "032000a5 lu52i.d a1, a1, -2048"); ++ ++ COMPARE(andi(s3, a3, 0xfff), "037ffcfa andi s3, a3, 0xfff"); ++ COMPARE(andi(a4, a4, 0), "03400108 andi a4, a4, 0x0"); ++ ++ COMPARE(ori(t6, t6, 0xfff), "03bffe52 ori t6, t6, 0xfff"); ++ COMPARE(ori(t6, t6, 0), "03800252 ori t6, t6, 0x0"); ++ ++ COMPARE(xori(t1, t1, 0xfff), "03fffdad xori t1, t1, 0xfff"); ++ COMPARE(xori(a3, a3, 0x0), "03c000e7 xori a3, a3, 0x0"); ++ ++ COMPARE(ld_b(a1, a1, 2047), "281ffca5 ld.b a1, a1, 2047"); ++ COMPARE(ld_b(a4, a4, -2048), "28200108 ld.b a4, a4, -2048"); ++ ++ COMPARE(ld_h(a4, a0, 2047), "285ffc88 ld.h a4, a0, 2047"); ++ COMPARE(ld_h(a4, a3, -2048), "286000e8 ld.h a4, a3, -2048"); ++ ++ COMPARE(ld_w(a6, a6, 2047), "289ffd4a ld.w a6, a6, 2047"); ++ COMPARE(ld_w(a5, a4, -2048), "28a00109 ld.w a5, a4, -2048"); ++ ++ COMPARE(ld_d(a0, a3, 2047), "28dffce4 ld.d a0, a3, 2047"); ++ COMPARE(ld_d(a6, fp, -2048), "28e002ca ld.d a6, fp, -2048"); ++ COMPARE(ld_d(a0, a6, 0), "28c00144 ld.d a0, a6, 0"); ++ ++ COMPARE(st_b(a4, a0, 2047), "291ffc88 st.b a4, a0, 2047"); ++ COMPARE(st_b(a6, a5, -2048), "2920012a st.b a6, a5, -2048"); ++ ++ COMPARE(st_h(a4, a0, 2047), "295ffc88 st.h a4, a0, 2047"); ++ COMPARE(st_h(t1, t2, -2048), "296001cd st.h t1, t2, -2048"); ++ ++ COMPARE(st_w(t3, a4, 2047), "299ffd0f st.w t3, a4, 2047"); ++ COMPARE(st_w(a3, t2, -2048), "29a001c7 st.w a3, t2, -2048"); ++ ++ COMPARE(st_d(s3, sp, 2047), "29dffc7a st.d s3, sp, 2047"); ++ COMPARE(st_d(fp, s6, -2048), "29e003b6 st.d fp, s6, -2048"); ++ ++ COMPARE(ld_bu(a6, a0, 2047), "2a1ffc8a ld.bu a6, a0, 2047"); ++ COMPARE(ld_bu(a7, a7, -2048), "2a20016b ld.bu a7, a7, -2048"); ++ ++ COMPARE(ld_hu(a7, a7, 2047), "2a5ffd6b ld.hu a7, a7, 2047"); ++ COMPARE(ld_hu(a3, a3, -2048), "2a6000e7 ld.hu a3, a3, -2048"); ++ ++ COMPARE(ld_wu(a3, a0, 2047), "2a9ffc87 ld.wu a3, a0, 2047"); ++ COMPARE(ld_wu(a3, a5, -2048), "2aa00127 ld.wu a3, a5, -2048"); ++ ++ COMPARE(preld(31, a7, 2047), "2adffd7f preld 0x1f(31), a7, 2047"); ++ COMPARE(preld(0, t0, -2048), "2ae00180 preld 0x0(0), t0, -2048"); ++ ++ COMPARE(fld_s(f0, a3, 2047), "2b1ffce0 fld.s f0, a3, 2047"); ++ COMPARE(fld_s(f0, a1, -2048), "2b2000a0 fld.s f0, a1, -2048"); ++ ++ COMPARE(fld_d(f0, a0, 2047), "2b9ffc80 fld.d f0, a0, 2047"); ++ COMPARE(fld_d(f0, fp, -2048), "2ba002c0 fld.d f0, fp, -2048"); ++ ++ COMPARE(fst_d(f0, fp, 2047), "2bdffec0 fst.d f0, fp, 2047"); ++ COMPARE(fst_d(f0, a0, -2048), "2be00080 fst.d f0, a0, -2048"); ++ ++ COMPARE(fst_s(f0, a5, 2047), "2b5ffd20 fst.s f0, a5, 2047"); ++ COMPARE(fst_s(f0, a3, -2048), "2b6000e0 fst.s f0, a3, -2048"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp12) { ++ SET_UP(); ++ ++ COMPARE(fmadd_s(f0, f1, f2, f3), "08118820 fmadd.s f0, f1, f2, f3"); ++ COMPARE(fmadd_s(f4, f5, f6, f7), "081398a4 fmadd.s f4, f5, f6, f7"); ++ ++ COMPARE(fmadd_d(f8, f9, f10, f11), ++ "0825a928 fmadd.d f8, f9, f10, f11"); ++ COMPARE(fmadd_d(f12, f13, f14, f15), ++ "0827b9ac fmadd.d f12, f13, f14, f15"); ++ ++ COMPARE(fmsub_s(f0, f1, f2, f3), "08518820 fmsub.s f0, f1, f2, f3"); ++ COMPARE(fmsub_s(f4, f5, f6, f7), "085398a4 fmsub.s f4, f5, f6, f7"); ++ ++ COMPARE(fmsub_d(f8, f9, f10, f11), ++ "0865a928 fmsub.d f8, f9, f10, f11"); ++ COMPARE(fmsub_d(f12, f13, f14, f15), ++ "0867b9ac fmsub.d f12, f13, f14, f15"); ++ ++ COMPARE(fnmadd_s(f0, f1, f2, f3), ++ "08918820 fnmadd.s f0, f1, f2, f3"); ++ COMPARE(fnmadd_s(f4, f5, f6, f7), ++ "089398a4 fnmadd.s f4, f5, f6, f7"); ++ ++ COMPARE(fnmadd_d(f8, f9, f10, f11), ++ "08a5a928 fnmadd.d f8, f9, f10, f11"); ++ COMPARE(fnmadd_d(f12, f13, f14, f15), ++ "08a7b9ac fnmadd.d f12, f13, f14, f15"); ++ ++ COMPARE(fnmsub_s(f0, f1, f2, f3), ++ "08d18820 fnmsub.s f0, f1, f2, f3"); ++ COMPARE(fnmsub_s(f4, f5, f6, f7), ++ "08d398a4 fnmsub.s f4, f5, f6, f7"); ++ ++ COMPARE(fnmsub_d(f8, f9, f10, f11), ++ "08e5a928 fnmsub.d f8, f9, f10, f11"); ++ COMPARE(fnmsub_d(f12, f13, f14, f15), ++ "08e7b9ac fnmsub.d f12, f13, f14, f15"); ++ ++ COMPARE(fcmp_cond_s(CAF, f1, f2, FCC0), ++ "0c100820 fcmp.caf.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(CUN, f5, f6, FCC0), ++ "0c1418a0 fcmp.cun.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(CEQ, f9, f10, FCC0), ++ "0c122920 fcmp.ceq.s fcc0, f9, f10"); ++ COMPARE(fcmp_cond_s(CUEQ, f13, f14, FCC0), ++ "0c1639a0 fcmp.cueq.s fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_s(CLT, f1, f2, FCC0), ++ "0c110820 fcmp.clt.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(CULT, f5, f6, FCC0), ++ "0c1518a0 fcmp.cult.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(CLE, f9, f10, FCC0), ++ "0c132920 fcmp.cle.s fcc0, f9, f10"); ++ COMPARE(fcmp_cond_s(CULE, f13, f14, FCC0), ++ "0c1739a0 fcmp.cule.s fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_s(CNE, f1, f2, FCC0), ++ "0c180820 fcmp.cne.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(COR, f5, f6, FCC0), ++ "0c1a18a0 fcmp.cor.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(CUNE, f9, f10, FCC0), ++ "0c1c2920 fcmp.cune.s fcc0, f9, f10"); ++ COMPARE(fcmp_cond_s(SAF, f13, f14, FCC0), ++ "0c10b9a0 fcmp.saf.s fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_s(SUN, f1, f2, FCC0), ++ "0c148820 fcmp.sun.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(SEQ, f5, f6, FCC0), ++ "0c1298a0 fcmp.seq.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(SUEQ, f9, f10, FCC0), ++ "0c16a920 fcmp.sueq.s fcc0, f9, f10"); ++ // COMPARE(fcmp_cond_s(SLT, f13, f14, FCC0), ++ // "0c11b9a0 fcmp.slt.s fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_s(SULT, f1, f2, FCC0), ++ "0c158820 fcmp.sult.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(SLE, f5, f6, FCC0), ++ "0c1398a0 fcmp.sle.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(SULE, f9, f10, FCC0), ++ "0c17a920 fcmp.sule.s fcc0, f9, f10"); ++ COMPARE(fcmp_cond_s(SNE, f13, f14, FCC0), ++ "0c18b9a0 fcmp.sne.s fcc0, f13, f14"); ++ COMPARE(fcmp_cond_s(SOR, f13, f14, FCC0), ++ "0c1ab9a0 fcmp.sor.s fcc0, f13, f14"); ++ COMPARE(fcmp_cond_s(SUNE, f1, f2, FCC0), ++ "0c1c8820 fcmp.sune.s fcc0, f1, f2"); ++ ++ COMPARE(fcmp_cond_d(CAF, f1, f2, FCC0), ++ "0c200820 fcmp.caf.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(CUN, f5, f6, FCC0), ++ "0c2418a0 fcmp.cun.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(CEQ, f9, f10, FCC0), ++ "0c222920 fcmp.ceq.d fcc0, f9, f10"); ++ COMPARE(fcmp_cond_d(CUEQ, f13, f14, FCC0), ++ "0c2639a0 fcmp.cueq.d fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_d(CLT, f1, f2, FCC0), ++ "0c210820 fcmp.clt.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(CULT, f5, f6, FCC0), ++ "0c2518a0 fcmp.cult.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(CLE, f9, f10, FCC0), ++ "0c232920 fcmp.cle.d fcc0, f9, f10"); ++ COMPARE(fcmp_cond_d(CULE, f13, f14, FCC0), ++ "0c2739a0 fcmp.cule.d fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_d(CNE, f1, f2, FCC0), ++ "0c280820 fcmp.cne.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(COR, f5, f6, FCC0), ++ "0c2a18a0 fcmp.cor.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(CUNE, f9, f10, FCC0), ++ "0c2c2920 fcmp.cune.d fcc0, f9, f10"); ++ COMPARE(fcmp_cond_d(SAF, f13, f14, FCC0), ++ "0c20b9a0 fcmp.saf.d fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_d(SUN, f1, f2, FCC0), ++ "0c248820 fcmp.sun.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(SEQ, f5, f6, FCC0), ++ "0c2298a0 fcmp.seq.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(SUEQ, f9, f10, FCC0), ++ "0c26a920 fcmp.sueq.d fcc0, f9, f10"); ++ // COMPARE(fcmp_cond_d(SLT, f13, f14, FCC0), ++ // "0c21b9a0 fcmp.slt.d fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_d(SULT, f1, f2, FCC0), ++ "0c258820 fcmp.sult.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(SLE, f5, f6, FCC0), ++ "0c2398a0 fcmp.sle.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(SULE, f9, f10, FCC0), ++ "0c27a920 fcmp.sule.d fcc0, f9, f10"); ++ COMPARE(fcmp_cond_d(SNE, f13, f14, FCC0), ++ "0c28b9a0 fcmp.sne.d fcc0, f13, f14"); ++ COMPARE(fcmp_cond_d(SOR, f13, f14, FCC0), ++ "0c2ab9a0 fcmp.sor.d fcc0, f13, f14"); ++ COMPARE(fcmp_cond_d(SUNE, f1, f2, FCC0), ++ "0c2c8820 fcmp.sune.d fcc0, f1, f2"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp14) { ++ SET_UP(); ++ ++ COMPARE(alsl_w(a0, a1, a2, 1), "000418a4 alsl.w a0, a1, a2, 1"); ++ COMPARE(alsl_w(a3, a4, a5, 3), "00052507 alsl.w a3, a4, a5, 3"); ++ COMPARE(alsl_w(a6, a7, t0, 4), "0005b16a alsl.w a6, a7, t0, 4"); ++ ++ COMPARE(alsl_wu(t1, t2, t3, 1), "00063dcd alsl.wu t1, t2, t3, 1"); ++ COMPARE(alsl_wu(t4, t5, t6, 3), "00074a30 alsl.wu t4, t5, t6, 3"); ++ COMPARE(alsl_wu(a0, a1, a2, 4), "000798a4 alsl.wu a0, a1, a2, 4"); ++ ++ COMPARE(alsl_d(a3, a4, a5, 1), "002c2507 alsl.d a3, a4, a5, 1"); ++ COMPARE(alsl_d(a6, a7, t0, 3), "002d316a alsl.d a6, a7, t0, 3"); ++ COMPARE(alsl_d(t1, t2, t3, 4), "002dbdcd alsl.d t1, t2, t3, 4"); ++ ++ COMPARE(bytepick_w(t4, t5, t6, 0), ++ "00084a30 bytepick.w t4, t5, t6, 0"); ++ COMPARE(bytepick_w(a0, a1, a2, 3), ++ "000998a4 bytepick.w a0, a1, a2, 3"); ++ ++ COMPARE(bytepick_d(a6, a7, t0, 0), ++ "000c316a bytepick.d a6, a7, t0, 0"); ++ COMPARE(bytepick_d(t4, t5, t6, 7), ++ "000fca30 bytepick.d t4, t5, t6, 7"); ++ ++ COMPARE(slli_w(a3, a3, 31), "0040fce7 slli.w a3, a3, 31"); ++ COMPARE(slli_w(a6, a6, 1), "0040854a slli.w a6, a6, 1"); ++ ++ COMPARE(slli_d(t3, t2, 63), "0041fdcf slli.d t3, t2, 63"); ++ COMPARE(slli_d(t4, a6, 1), "00410550 slli.d t4, a6, 1"); ++ ++ COMPARE(srli_w(a7, a7, 31), "0044fd6b srli.w a7, a7, 31"); ++ COMPARE(srli_w(a4, a4, 1), "00448508 srli.w a4, a4, 1"); ++ ++ COMPARE(srli_d(a4, a3, 63), "0045fce8 srli.d a4, a3, 63"); ++ COMPARE(srli_d(a4, a4, 1), "00450508 srli.d a4, a4, 1"); ++ ++ COMPARE(srai_d(a0, a0, 63), "0049fc84 srai.d a0, a0, 63"); ++ COMPARE(srai_d(a4, a1, 1), "004904a8 srai.d a4, a1, 1"); ++ ++ COMPARE(srai_w(s4, a3, 31), "0048fcfb srai.w s4, a3, 31"); ++ COMPARE(srai_w(s4, a5, 1), "0048853b srai.w s4, a5, 1"); ++ ++ COMPARE(rotri_d(t7, t6, 1), "004d0653 rotri.d t7, t6, 1"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp17) { ++ SET_UP(); ++ ++ COMPARE(sltu(t5, t4, a4), "0012a211 sltu t5, t4, a4"); ++ COMPARE(sltu(t4, zero_reg, t4), "0012c010 sltu t4, zero_reg, t4"); ++ ++ COMPARE(add_w(a4, a4, a6), "00102908 add.w a4, a4, a6"); ++ COMPARE(add_w(a5, a6, t3), "00103d49 add.w a5, a6, t3"); ++ ++ COMPARE(add_d(a4, t0, t1), "0010b588 add.d a4, t0, t1"); ++ COMPARE(add_d(a6, a3, t1), "0010b4ea add.d a6, a3, t1"); ++ ++ COMPARE(sub_w(a7, a7, a2), "0011196b sub.w a7, a7, a2"); ++ COMPARE(sub_w(a2, a2, s3), "001168c6 sub.w a2, a2, s3"); ++ ++ COMPARE(sub_d(s3, ra, s3), "0011e83a sub.d s3, ra, s3"); ++ COMPARE(sub_d(a0, a1, a2), "001198a4 sub.d a0, a1, a2"); ++ ++ COMPARE(slt(a5, a5, a6), "00122929 slt a5, a5, a6"); ++ COMPARE(slt(a6, t3, t4), "001241ea slt a6, t3, t4"); ++ ++ COMPARE(masknez(a5, a5, a3), "00131d29 masknez a5, a5, a3"); ++ COMPARE(masknez(a3, a4, a5), "00132507 masknez a3, a4, a5"); ++ ++ COMPARE(maskeqz(a6, a7, t0), "0013b16a maskeqz a6, a7, t0"); ++ COMPARE(maskeqz(t1, t2, t3), "0013bdcd maskeqz t1, t2, t3"); ++ ++ COMPARE(or_(s3, sp, zero_reg), "0015007a or s3, sp, zero_reg"); ++ COMPARE(or_(a4, a0, zero_reg), "00150088 or a4, a0, zero_reg"); ++ ++ COMPARE(and_(sp, sp, t6), "0014c863 and sp, sp, t6"); ++ COMPARE(and_(a3, a3, a7), "0014ace7 and a3, a3, a7"); ++ ++ COMPARE(nor(a7, a7, a7), "00142d6b nor a7, a7, a7"); ++ COMPARE(nor(t4, t5, t6), "00144a30 nor t4, t5, t6"); ++ ++ COMPARE(xor_(a0, a1, a2), "001598a4 xor a0, a1, a2"); ++ COMPARE(xor_(a3, a4, a5), "0015a507 xor a3, a4, a5"); ++ ++ COMPARE(orn(a6, a7, t0), "0016316a orn a6, a7, t0"); ++ COMPARE(orn(t1, t2, t3), "00163dcd orn t1, t2, t3"); ++ ++ COMPARE(andn(t4, t5, t6), "0016ca30 andn t4, t5, t6"); ++ COMPARE(andn(a0, a1, a2), "001698a4 andn a0, a1, a2"); ++ ++ COMPARE(sll_w(a3, t0, a7), "00172d87 sll.w a3, t0, a7"); ++ COMPARE(sll_w(a3, a4, a3), "00171d07 sll.w a3, a4, a3"); ++ ++ COMPARE(srl_w(a3, a4, a3), "00179d07 srl.w a3, a4, a3"); ++ COMPARE(srl_w(a3, t1, t4), "0017c1a7 srl.w a3, t1, t4"); ++ ++ COMPARE(sra_w(a4, t4, a4), "00182208 sra.w a4, t4, a4"); ++ COMPARE(sra_w(a3, t1, a6), "001829a7 sra.w a3, t1, a6"); ++ ++ COMPARE(sll_d(a3, a1, a3), "00189ca7 sll.d a3, a1, a3"); ++ COMPARE(sll_d(a7, a4, t0), "0018b10b sll.d a7, a4, t0"); ++ ++ COMPARE(srl_d(a7, a7, t0), "0019316b srl.d a7, a7, t0"); ++ COMPARE(srl_d(t0, a6, t0), "0019314c srl.d t0, a6, t0"); ++ ++ COMPARE(sra_d(a3, a4, a5), "0019a507 sra.d a3, a4, a5"); ++ COMPARE(sra_d(a6, a7, t0), "0019b16a sra.d a6, a7, t0"); ++ ++ COMPARE(rotr_d(t1, t2, t3), "001bbdcd rotr.d t1, t2, t3"); ++ COMPARE(rotr_d(t4, t5, t6), "001bca30 rotr.d t4, t5, t6"); ++ ++ COMPARE(rotr_w(a0, a1, a2), "001b18a4 rotr.w a0, a1, a2"); ++ COMPARE(rotr_w(a3, a4, a5), "001b2507 rotr.w a3, a4, a5"); ++ ++ COMPARE(mul_w(t8, a5, t7), "001c4d34 mul.w t8, a5, t7"); ++ COMPARE(mul_w(t4, t5, t6), "001c4a30 mul.w t4, t5, t6"); ++ ++ COMPARE(mulh_w(s3, a3, t7), "001cccfa mulh.w s3, a3, t7"); ++ COMPARE(mulh_w(a0, a1, a2), "001c98a4 mulh.w a0, a1, a2"); ++ ++ COMPARE(mulh_wu(a6, a7, t0), "001d316a mulh.wu a6, a7, t0"); ++ COMPARE(mulh_wu(t1, t2, t3), "001d3dcd mulh.wu t1, t2, t3"); ++ ++ COMPARE(mul_d(t2, a5, t1), "001db52e mul.d t2, a5, t1"); ++ COMPARE(mul_d(a4, a4, a5), "001da508 mul.d a4, a4, a5"); ++ ++ COMPARE(mulh_d(a3, a4, a5), "001e2507 mulh.d a3, a4, a5"); ++ COMPARE(mulh_d(a6, a7, t0), "001e316a mulh.d a6, a7, t0"); ++ ++ COMPARE(mulh_du(t1, t2, t3), "001ebdcd mulh.du t1, t2, t3"); ++ COMPARE(mulh_du(t4, t5, t6), "001eca30 mulh.du t4, t5, t6"); ++ ++ COMPARE(mulw_d_w(a0, a1, a2), "001f18a4 mulw.d.w a0, a1, a2"); ++ COMPARE(mulw_d_w(a3, a4, a5), "001f2507 mulw.d.w a3, a4, a5"); ++ ++ COMPARE(mulw_d_wu(a6, a7, t0), "001fb16a mulw.d.wu a6, a7, t0"); ++ COMPARE(mulw_d_wu(t1, t2, t3), "001fbdcd mulw.d.wu t1, t2, t3"); ++ ++ COMPARE(div_w(a5, a5, a3), "00201d29 div.w a5, a5, a3"); ++ COMPARE(div_w(t4, t5, t6), "00204a30 div.w t4, t5, t6"); ++ ++ COMPARE(mod_w(a6, t3, a6), "0020a9ea mod.w a6, t3, a6"); ++ COMPARE(mod_w(a3, a4, a3), "00209d07 mod.w a3, a4, a3"); ++ ++ COMPARE(div_wu(t1, t2, t3), "00213dcd div.wu t1, t2, t3"); ++ COMPARE(div_wu(t4, t5, t6), "00214a30 div.wu t4, t5, t6"); ++ ++ COMPARE(mod_wu(a0, a1, a2), "002198a4 mod.wu a0, a1, a2"); ++ COMPARE(mod_wu(a3, a4, a5), "0021a507 mod.wu a3, a4, a5"); ++ ++ COMPARE(div_d(t0, t0, a6), "0022298c div.d t0, t0, a6"); ++ COMPARE(div_d(a7, a7, a5), "0022256b div.d a7, a7, a5"); ++ ++ COMPARE(mod_d(a6, a7, t0), "0022b16a mod.d a6, a7, t0"); ++ COMPARE(mod_d(t1, t2, t3), "0022bdcd mod.d t1, t2, t3"); ++ ++ COMPARE(div_du(t4, t5, t6), "00234a30 div.du t4, t5, t6"); ++ COMPARE(div_du(a0, a1, a2), "002318a4 div.du a0, a1, a2"); ++ ++ COMPARE(mod_du(a3, a4, a5), "0023a507 mod.du a3, a4, a5"); ++ COMPARE(mod_du(a6, a7, t0), "0023b16a mod.du a6, a7, t0"); ++ ++ COMPARE(fadd_s(f3, f4, f5), "01009483 fadd.s f3, f4, f5"); ++ COMPARE(fadd_s(f6, f7, f8), "0100a0e6 fadd.s f6, f7, f8"); ++ ++ COMPARE(fadd_d(f0, f1, f0), "01010020 fadd.d f0, f1, f0"); ++ COMPARE(fadd_d(f0, f1, f2), "01010820 fadd.d f0, f1, f2"); ++ ++ COMPARE(fsub_s(f9, f10, f11), "0102ad49 fsub.s f9, f10, f11"); ++ COMPARE(fsub_s(f12, f13, f14), "0102b9ac fsub.s f12, f13, f14"); ++ ++ COMPARE(fsub_d(f30, f0, f30), "0103781e fsub.d f30, f0, f30"); ++ COMPARE(fsub_d(f0, f0, f1), "01030400 fsub.d f0, f0, f1"); ++ ++ COMPARE(fmul_s(f15, f16, f17), "0104c60f fmul.s f15, f16, f17"); ++ COMPARE(fmul_s(f18, f19, f20), "0104d272 fmul.s f18, f19, f20"); ++ ++ COMPARE(fmul_d(f0, f0, f1), "01050400 fmul.d f0, f0, f1"); ++ COMPARE(fmul_d(f0, f0, f0), "01050000 fmul.d f0, f0, f0"); ++ ++ COMPARE(fdiv_s(f0, f1, f2), "01068820 fdiv.s f0, f1, f2"); ++ COMPARE(fdiv_s(f3, f4, f5), "01069483 fdiv.s f3, f4, f5"); ++ ++ COMPARE(fdiv_d(f0, f0, f1), "01070400 fdiv.d f0, f0, f1"); ++ COMPARE(fdiv_d(f0, f1, f0), "01070020 fdiv.d f0, f1, f0"); ++ ++ COMPARE(fmax_s(f9, f10, f11), "0108ad49 fmax.s f9, f10, f11"); ++ COMPARE(fmin_s(f6, f7, f8), "010aa0e6 fmin.s f6, f7, f8"); ++ ++ COMPARE(fmax_d(f0, f1, f0), "01090020 fmax.d f0, f1, f0"); ++ COMPARE(fmin_d(f0, f1, f0), "010b0020 fmin.d f0, f1, f0"); ++ ++ COMPARE(fmaxa_s(f12, f13, f14), "010cb9ac fmaxa.s f12, f13, f14"); ++ COMPARE(fmina_s(f15, f16, f17), "010ec60f fmina.s f15, f16, f17"); ++ ++ COMPARE(fmaxa_d(f18, f19, f20), "010d5272 fmaxa.d f18, f19, f20"); ++ COMPARE(fmina_d(f0, f1, f2), "010f0820 fmina.d f0, f1, f2"); ++ ++ COMPARE(ldx_b(a0, a1, a2), "380018a4 ldx.b a0, a1, a2"); ++ COMPARE(ldx_h(a3, a4, a5), "38042507 ldx.h a3, a4, a5"); ++ COMPARE(ldx_w(a6, a7, t0), "3808316a ldx.w a6, a7, t0"); ++ ++ COMPARE(stx_b(t1, t2, t3), "38103dcd stx.b t1, t2, t3"); ++ COMPARE(stx_h(t4, t5, t6), "38144a30 stx.h t4, t5, t6"); ++ COMPARE(stx_w(a0, a1, a2), "381818a4 stx.w a0, a1, a2"); ++ ++ COMPARE(ldx_bu(a3, a4, a5), "38202507 ldx.bu a3, a4, a5"); ++ COMPARE(ldx_hu(a6, a7, t0), "3824316a ldx.hu a6, a7, t0"); ++ COMPARE(ldx_wu(t1, t2, t3), "38283dcd ldx.wu t1, t2, t3"); ++ ++ COMPARE(ldx_d(a2, s6, t6), "380c4ba6 ldx.d a2, s6, t6"); ++ COMPARE(ldx_d(t7, s6, t6), "380c4bb3 ldx.d t7, s6, t6"); ++ ++ COMPARE(stx_d(a4, a3, t6), "381c48e8 stx.d a4, a3, t6"); ++ COMPARE(stx_d(a0, a3, t6), "381c48e4 stx.d a0, a3, t6"); ++ ++ COMPARE(preldx(0, t5, t6), "382c4a20 preldx 0x0(0), t5, t6"); ++ COMPARE(preldx(31, a1, a2), "382c18bf preldx 0x1f(31), a1, a2"); ++ ++ COMPARE(amswap_db_w(a0, a3, t6), "38691e44 amswap_db.w a0, a3, t6"); ++ COMPARE(amswap_db_d(a0, a3, t6), "38699e44 amswap_db.d a0, a3, t6"); ++ COMPARE(amadd_db_w(a0, a3, t6), "386a1e44 amadd_db.w a0, a3, t6"); ++ COMPARE(amadd_db_d(a0, a3, t6), "386a9e44 amadd_db.d a0, a3, t6"); ++ COMPARE(amand_db_w(a0, a3, t6), "386b1e44 amand_db.w a0, a3, t6"); ++ COMPARE(amand_db_d(a0, a3, t6), "386b9e44 amand_db.d a0, a3, t6"); ++ COMPARE(amor_db_w(a0, a3, t6), "386c1e44 amor_db.w a0, a3, t6"); ++ COMPARE(amor_db_d(a0, a3, t6), "386c9e44 amor_db.d a0, a3, t6"); ++ COMPARE(amxor_db_w(a0, a3, t6), "386d1e44 amxor_db.w a0, a3, t6"); ++ COMPARE(amxor_db_d(a0, a3, t6), "386d9e44 amxor_db.d a0, a3, t6"); ++ ++ COMPARE(dbar(0), "38720000 dbar 0x0(0)"); ++ COMPARE(ibar(5555), "387295b3 ibar 0x15b3(5555)"); ++ ++ COMPARE(break_(0), "002a0000 break code: 0x0(0)"); ++ COMPARE(break_(0x3fc0), "002a3fc0 break code: 0x3fc0(16320)"); ++ ++ COMPARE(fldx_s(f3, a4, a5), "38302503 fldx.s f3, a4, a5"); ++ COMPARE(fldx_d(f6, a7, t0), "38343166 fldx.d f6, a7, t0"); ++ ++ COMPARE(fstx_s(f1, t2, t3), "38383dc1 fstx.s f1, t2, t3"); ++ COMPARE(fstx_d(f4, t5, t6), "383c4a24 fstx.d f4, t5, t6"); ++ ++ COMPARE(asrtle_d(a0, a1), "00011480 asrtle.d a0, a1"); ++ COMPARE(asrtgt_d(a2, a3), "00019cc0 asrtgt.d a2, a3"); ++ ++ COMPARE(syscall(2), "002b0002 syscall code 0x2(2)"); ++ // COMPARE(hypcall(2), ++ // "002b8002 hypcall 0x2(2)"); ++ ++ COMPARE(amswap_w(a4, a5, a6), "38602548 amswap.w a4, a5, a6"); ++ COMPARE(amswap_d(a7, t0, t1), "3860b1ab amswap.d a7, t0, t1"); ++ ++ COMPARE(amadd_w(t2, t3, t4), "38613e0e amadd.w t2, t3, t4"); ++ COMPARE(amadd_d(t5, t6, a0), "3861c891 amadd.d t5, t6, a0"); ++ ++ COMPARE(amand_w(a1, a2, a3), "386218e5 amand.w a1, a2, a3"); ++ COMPARE(amand_d(a4, a5, a6), "3862a548 amand.d a4, a5, a6"); ++ ++ COMPARE(amor_w(a7, t0, t1), "386331ab amor.w a7, t0, t1"); ++ COMPARE(amor_d(t2, t3, t4), "3863be0e amor.d t2, t3, t4"); ++ ++ COMPARE(amxor_w(t5, t6, a0), "38644891 amxor.w t5, t6, a0"); ++ COMPARE(amxor_d(a1, a2, a3), "386498e5 amxor.d a1, a2, a3"); ++ ++ COMPARE(ammax_w(a4, a5, a6), "38652548 ammax.w a4, a5, a6"); ++ COMPARE(ammax_d(a7, t0, t1), "3865b1ab ammax.d a7, t0, t1"); ++ ++ COMPARE(ammin_w(t2, t3, t4), "38663e0e ammin.w t2, t3, t4"); ++ COMPARE(ammin_d(t5, t6, a0), "3866c891 ammin.d t5, t6, a0"); ++ ++ COMPARE(ammax_wu(a1, a2, a3), "386718e5 ammax.wu a1, a2, a3"); ++ COMPARE(ammax_du(a4, a5, a6), "3867a548 ammax.du a4, a5, a6"); ++ ++ COMPARE(ammin_wu(a7, t0, t1), "386831ab ammin.wu a7, t0, t1"); ++ COMPARE(ammin_du(t2, t3, t4), "3868be0e ammin.du t2, t3, t4"); ++ ++ COMPARE(ammax_db_d(a0, a1, a2), "386e94c4 ammax_db.d a0, a1, a2"); ++ COMPARE(ammax_db_du(a3, a4, a5), "3870a127 ammax_db.du a3, a4, a5"); ++ ++ COMPARE(ammax_db_w(a6, a7, t0), "386e2d8a ammax_db.w a6, a7, t0"); ++ COMPARE(ammax_db_wu(t1, t2, t3), "387039ed ammax_db.wu t1, t2, t3"); ++ ++ COMPARE(ammin_db_d(t4, t5, t6), "386fc650 ammin_db.d t4, t5, t6"); ++ COMPARE(ammin_db_du(a0, a1, a2), "387194c4 ammin_db.du a0, a1, a2"); ++ ++ COMPARE(ammin_db_wu(a3, a4, a5), "38712127 ammin_db.wu a3, a4, a5"); ++ COMPARE(ammin_db_w(a6, a7, t0), "386f2d8a ammin_db.w a6, a7, t0"); ++ ++ COMPARE(fldgt_s(f0, a1, a2), "387418a0 fldgt.s f0, a1, a2"); ++ COMPARE(fldgt_d(f2, a3, a4), "3874a0e2 fldgt.d f2, a3, a4"); ++ ++ COMPARE(fldle_s(f5, a6, a7), "38752d45 fldle.s f5, a6, a7"); ++ COMPARE(fldle_d(f8, t0, t1), "3875b588 fldle.d f8, t0, t1"); ++ ++ COMPARE(fstgt_s(f11, t2, t3), "38763dcb fstgt.s f11, t2, t3"); ++ COMPARE(fstgt_d(f14, t4, t5), "3876c60e fstgt.d f14, t4, t5"); ++ ++ COMPARE(fstle_s(f17, t6, a0), "38771251 fstle.s f17, t6, a0"); ++ COMPARE(fstle_d(f20, a1, a2), "387798b4 fstle.d f20, a1, a2"); ++ ++ COMPARE(ldgt_b(a1, a2, a3), "38781cc5 ldgt.b a1, a2, a3"); ++ COMPARE(ldgt_h(a4, a5, a6), "3878a928 ldgt.h a4, a5, a6"); ++ COMPARE(ldgt_w(a7, t0, t1), "3879358b ldgt.w a7, t0, t1"); ++ COMPARE(ldgt_d(t2, t3, t4), "3879c1ee ldgt.d t2, t3, t4"); ++ ++ COMPARE(ldle_b(t5, t6, a0), "387a1251 ldle.b t5, t6, a0"); ++ COMPARE(ldle_h(a1, a2, a3), "387a9cc5 ldle.h a1, a2, a3"); ++ COMPARE(ldle_w(a4, a5, a6), "387b2928 ldle.w a4, a5, a6"); ++ COMPARE(ldle_d(a7, t0, t1), "387bb58b ldle.d a7, t0, t1"); ++ ++ COMPARE(stgt_b(t2, t3, t4), "387c41ee stgt.b t2, t3, t4"); ++ COMPARE(stgt_h(t5, t6, a0), "387c9251 stgt.h t5, t6, a0"); ++ COMPARE(stgt_w(a1, a2, a3), "387d1cc5 stgt.w a1, a2, a3"); ++ COMPARE(stgt_d(a4, a5, a6), "387da928 stgt.d a4, a5, a6"); ++ ++ COMPARE(stle_b(a7, t0, t1), "387e358b stle.b a7, t0, t1"); ++ COMPARE(stle_h(t2, t3, t4), "387ec1ee stle.h t2, t3, t4"); ++ COMPARE(stle_w(t5, t6, a0), "387f1251 stle.w t5, t6, a0"); ++ COMPARE(stle_d(a1, a2, a3), "387f9cc5 stle.d a1, a2, a3"); ++ ++ COMPARE(fscaleb_s(f0, f1, f2), "01108820 fscaleb.s f0, f1, f2"); ++ COMPARE(fscaleb_d(f3, f4, f5), "01111483 fscaleb.d f3, f4, f5"); ++ ++ COMPARE(fcopysign_s(f6, f7, f8), "0112a0e6 fcopysign.s f6, f7, f8"); ++ COMPARE(fcopysign_d(f9, f10, f12), ++ "01133149 fcopysign.d f9, f10, f12"); ++ ++ COMPARE(crc_w_b_w(a4, a5, a6), "00242928 crc.w.b.w a4, a5, a6"); ++ COMPARE(crc_w_h_w(a7, t0, t1), "0024b58b crc.w.h.w a7, t0, t1"); ++ COMPARE(crc_w_w_w(t2, t3, t4), "002541ee crc.w.w.w t2, t3, t4"); ++ COMPARE(crc_w_d_w(t5, t6, a0), "00259251 crc.w.d.w t5, t6, a0"); ++ ++ COMPARE(crcc_w_b_w(a1, a2, a3), "00261cc5 crcc.w.b.w a1, a2, a3"); ++ COMPARE(crcc_w_h_w(a4, a5, a6), "0026a928 crcc.w.h.w a4, a5, a6"); ++ COMPARE(crcc_w_w_w(a7, t0, t1), "0027358b crcc.w.w.w a7, t0, t1"); ++ COMPARE(crcc_w_d_w(t2, t3, t4), "0027c1ee crcc.w.d.w t2, t3, t4"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp22) { ++ SET_UP(); ++ ++ COMPARE(clz_w(a3, a0), "00001487 clz.w a3, a0"); ++ COMPARE(ctz_w(a0, a1), "00001ca4 ctz.w a0, a1"); ++ COMPARE(clz_d(a2, a3), "000024e6 clz.d a2, a3"); ++ COMPARE(ctz_d(a4, a5), "00002d28 ctz.d a4, a5"); ++ ++ COMPARE(clo_w(a0, a1), "000010a4 clo.w a0, a1"); ++ COMPARE(cto_w(a2, a3), "000018e6 cto.w a2, a3"); ++ COMPARE(clo_d(a4, a5), "00002128 clo.d a4, a5"); ++ COMPARE(cto_d(a6, a7), "0000296a cto.d a6, a7"); ++ ++ COMPARE(revb_2h(a6, a7), "0000316a revb.2h a6, a7"); ++ COMPARE(revb_4h(t0, t1), "000035ac revb.4h t0, t1"); ++ COMPARE(revb_2w(t2, t3), "000039ee revb.2w t2, t3"); ++ COMPARE(revb_d(t4, t5), "00003e30 revb.d t4, t5"); ++ ++ COMPARE(revh_2w(a0, a1), "000040a4 revh.2w a0, a1"); ++ COMPARE(revh_d(a2, a3), "000044e6 revh.d a2, a3"); ++ ++ COMPARE(bitrev_4b(a4, a5), "00004928 bitrev.4b a4, a5"); ++ COMPARE(bitrev_8b(a6, a7), "00004d6a bitrev.8b a6, a7"); ++ COMPARE(bitrev_w(t0, t1), "000051ac bitrev.w t0, t1"); ++ COMPARE(bitrev_d(t2, t3), "000055ee bitrev.d t2, t3"); ++ ++ COMPARE(ext_w_b(t4, t5), "00005e30 ext.w.b t4, t5"); ++ COMPARE(ext_w_h(a0, a1), "000058a4 ext.w.h a0, a1"); ++ ++ COMPARE(fabs_s(f2, f3), "01140462 fabs.s f2, f3"); ++ COMPARE(fabs_d(f0, f0), "01140800 fabs.d f0, f0"); ++ ++ COMPARE(fneg_s(f0, f1), "01141420 fneg.s f0, f1"); ++ COMPARE(fneg_d(f0, f0), "01141800 fneg.d f0, f0"); ++ ++ COMPARE(fsqrt_s(f4, f5), "011444a4 fsqrt.s f4, f5"); ++ COMPARE(fsqrt_d(f0, f0), "01144800 fsqrt.d f0, f0"); ++ ++ COMPARE(fmov_s(f6, f7), "011494e6 fmov.s f6, f7"); ++ COMPARE(fmov_d(f0, f1), "01149820 fmov.d f0, f1"); ++ COMPARE(fmov_d(f1, f0), "01149801 fmov.d f1, f0"); ++ ++ COMPARE(movgr2fr_d(f0, t6), "0114aa40 movgr2fr.d f0, t6"); ++ COMPARE(movgr2fr_d(f1, t6), "0114aa41 movgr2fr.d f1, t6"); ++ ++ COMPARE(movgr2fr_w(f30, a3), "0114a4fe movgr2fr.w f30, a3"); ++ COMPARE(movgr2fr_w(f30, a0), "0114a49e movgr2fr.w f30, a0"); ++ ++ COMPARE(movgr2frh_w(f30, t6), "0114ae5e movgr2frh.w f30, t6"); ++ COMPARE(movgr2frh_w(f0, a3), "0114ace0 movgr2frh.w f0, a3"); ++ ++ COMPARE(movfr2gr_s(a3, f30), "0114b7c7 movfr2gr.s a3, f30"); ++ ++ COMPARE(movfr2gr_d(a6, f30), "0114bbca movfr2gr.d a6, f30"); ++ COMPARE(movfr2gr_d(t7, f30), "0114bbd3 movfr2gr.d t7, f30"); ++ ++ COMPARE(movfrh2gr_s(a5, f0), "0114bc09 movfrh2gr.s a5, f0"); ++ COMPARE(movfrh2gr_s(a4, f0), "0114bc08 movfrh2gr.s a4, f0"); ++ ++ COMPARE(movgr2fcsr(a2), "0114c0c0 movgr2fcsr fcsr, a2"); ++ COMPARE(movfcsr2gr(a4), "0114c808 movfcsr2gr a4, fcsr"); ++ ++ COMPARE(movfr2cf(FCC0, f0), "0114d000 movfr2cf fcc0, f0"); ++ COMPARE(movcf2fr(f1, FCC1), "0114d421 movcf2fr f1, fcc1"); ++ ++ COMPARE(movgr2cf(FCC2, a0), "0114d882 movgr2cf fcc2, a0"); ++ COMPARE(movcf2gr(a1, FCC3), "0114dc65 movcf2gr a1, fcc3"); ++ ++ COMPARE(fcvt_s_d(f0, f0), "01191800 fcvt.s.d f0, f0"); ++ COMPARE(fcvt_d_s(f0, f0), "01192400 fcvt.d.s f0, f0"); ++ ++ COMPARE(ftintrm_w_s(f8, f9), "011a0528 ftintrm.w.s f8, f9"); ++ COMPARE(ftintrm_w_d(f10, f11), "011a096a ftintrm.w.d f10, f11"); ++ COMPARE(ftintrm_l_s(f12, f13), "011a25ac ftintrm.l.s f12, f13"); ++ COMPARE(ftintrm_l_d(f14, f15), "011a29ee ftintrm.l.d f14, f15"); ++ ++ COMPARE(ftintrp_w_s(f16, f17), "011a4630 ftintrp.w.s f16, f17"); ++ COMPARE(ftintrp_w_d(f18, f19), "011a4a72 ftintrp.w.d f18, f19"); ++ COMPARE(ftintrp_l_s(f20, f21), "011a66b4 ftintrp.l.s f20, f21"); ++ COMPARE(ftintrp_l_d(f0, f1), "011a6820 ftintrp.l.d f0, f1"); ++ ++ COMPARE(ftintrz_w_s(f30, f4), "011a849e ftintrz.w.s f30, f4"); ++ COMPARE(ftintrz_w_d(f30, f4), "011a889e ftintrz.w.d f30, f4"); ++ COMPARE(ftintrz_l_s(f30, f0), "011aa41e ftintrz.l.s f30, f0"); ++ COMPARE(ftintrz_l_d(f30, f30), "011aabde ftintrz.l.d f30, f30"); ++ ++ COMPARE(ftintrne_w_s(f2, f3), "011ac462 ftintrne.w.s f2, f3"); ++ COMPARE(ftintrne_w_d(f4, f5), "011ac8a4 ftintrne.w.d f4, f5"); ++ COMPARE(ftintrne_l_s(f6, f7), "011ae4e6 ftintrne.l.s f6, f7"); ++ COMPARE(ftintrne_l_d(f8, f9), "011ae928 ftintrne.l.d f8, f9"); ++ ++ COMPARE(ftint_w_s(f10, f11), "011b056a ftint.w.s f10, f11"); ++ COMPARE(ftint_w_d(f12, f13), "011b09ac ftint.w.d f12, f13"); ++ COMPARE(ftint_l_s(f14, f15), "011b25ee ftint.l.s f14, f15"); ++ COMPARE(ftint_l_d(f16, f17), "011b2a30 ftint.l.d f16, f17"); ++ ++ COMPARE(ffint_s_w(f18, f19), "011d1272 ffint.s.w f18, f19"); ++ COMPARE(ffint_s_l(f20, f21), "011d1ab4 ffint.s.l f20, f21"); ++ COMPARE(ffint_d_w(f0, f1), "011d2020 ffint.d.w f0, f1"); ++ COMPARE(ffint_d_l(f2, f3), "011d2862 ffint.d.l f2, f3"); ++ ++ COMPARE(frint_s(f4, f5), "011e44a4 frint.s f4, f5"); ++ COMPARE(frint_d(f6, f7), "011e48e6 frint.d f6, f7"); ++ ++ COMPARE(frecip_s(f8, f9), "01145528 frecip.s f8, f9"); ++ COMPARE(frecip_d(f10, f11), "0114596a frecip.d f10, f11"); ++ ++ COMPARE(frsqrt_s(f12, f13), "011465ac frsqrt.s f12, f13"); ++ COMPARE(frsqrt_d(f14, f15), "011469ee frsqrt.d f14, f15"); ++ ++ COMPARE(fclass_s(f16, f17), "01143630 fclass.s f16, f17"); ++ COMPARE(fclass_d(f18, f19), "01143a72 fclass.d f18, f19"); ++ ++ COMPARE(flogb_s(f20, f21), "011426b4 flogb.s f20, f21"); ++ COMPARE(flogb_d(f0, f1), "01142820 flogb.d f0, f1"); ++ ++ COMPARE(rdtimel_w(t0, t1), "000061ac rdtimel.w t0, t1"); ++ COMPARE(rdtimeh_w(t2, t3), "000065ee rdtimeh.w t2, t3"); ++ COMPARE(rdtime_d(t4, t5), "00006a30 rdtime.d t4, t5"); ++ ++ VERIFY_RUN(); ++} ++ ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/test/cctest/test-macro-assembler-la64.cc b/src/3rdparty/chromium/v8/test/cctest/test-macro-assembler-la64.cc +new file mode 100644 +index 0000000000..ef536b862b +--- /dev/null ++++ b/src/3rdparty/chromium/v8/test/cctest/test-macro-assembler-la64.cc +@@ -0,0 +1,2894 @@ ++// Copyright 2013 the V8 project authors. All rights reserved. ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following ++// disclaimer in the documentation and/or other materials provided ++// with the distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived ++// from this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++#include ++ ++#include // NOLINT(readability/streams) ++ ++#include "src/base/utils/random-number-generator.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/execution/simulator.h" ++#include "src/init/v8.h" ++#include "src/objects/heap-number.h" ++#include "src/objects/objects-inl.h" ++#include "src/utils/ostreams.h" ++#include "test/cctest/cctest.h" ++ ++namespace v8 { ++namespace internal { ++ ++// TODO(mips64): Refine these signatures per test case. ++using FV = void*(int64_t x, int64_t y, int p2, int p3, int p4); ++using F1 = void*(int x, int p1, int p2, int p3, int p4); ++using F2 = void*(int x, int y, int p2, int p3, int p4); ++using F3 = void*(void* p, int p1, int p2, int p3, int p4); ++using F4 = void*(void* p0, void* p1, int p2, int p3, int p4); ++ ++#define __ masm-> ++ ++TEST(BYTESWAP) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ uint64_t s8; ++ uint64_t s4; ++ uint64_t s2; ++ uint64_t u4; ++ uint64_t u2; ++ }; ++ ++ T t; ++ // clang-format off ++ uint64_t test_values[] = {0x5612FFCD9D327ACC, ++ 0x781A15C3, ++ 0xFCDE, ++ 0x9F, ++ 0xC81A15C3, ++ 0x8000000000000000, ++ 0xFFFFFFFFFFFFFFFF, ++ 0x0000000080000000, ++ 0x0000000000008000}; ++ // clang-format on ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ MacroAssembler* masm = &assembler; ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, s8))); ++ __ ByteSwapSigned(a4, a4, 8); ++ __ St_d(a4, MemOperand(a0, offsetof(T, s8))); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, s4))); ++ __ ByteSwapSigned(a4, a4, 4); ++ __ St_d(a4, MemOperand(a0, offsetof(T, s4))); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, s2))); ++ __ ByteSwapSigned(a4, a4, 2); ++ __ St_d(a4, MemOperand(a0, offsetof(T, s2))); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, u4))); ++ __ ByteSwapSigned(a4, a4, 4); ++ __ St_d(a4, MemOperand(a0, offsetof(T, u4))); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, u2))); ++ __ ByteSwapSigned(a4, a4, 2); ++ __ St_d(a4, MemOperand(a0, offsetof(T, u2))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ for (size_t i = 0; i < arraysize(test_values); i++) { ++ int32_t in_s4 = static_cast(test_values[i]); ++ int16_t in_s2 = static_cast(test_values[i]); ++ uint32_t in_u4 = static_cast(test_values[i]); ++ uint16_t in_u2 = static_cast(test_values[i]); ++ ++ t.s8 = test_values[i]; ++ t.s4 = static_cast(in_s4); ++ t.s2 = static_cast(in_s2); ++ t.u4 = static_cast(in_u4); ++ t.u2 = static_cast(in_u2); ++ ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(ByteReverse(test_values[i]), t.s8); ++ CHECK_EQ(ByteReverse(in_s4), static_cast(t.s4)); ++ CHECK_EQ(ByteReverse(in_s2), static_cast(t.s2)); ++ CHECK_EQ(ByteReverse(in_u4), static_cast(t.u4)); ++ CHECK_EQ(ByteReverse(in_u2), static_cast(t.u2)); ++ } ++} ++ ++TEST(LoadConstants) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope handles(isolate); ++ ++ int64_t refConstants[64]; ++ int64_t result[64]; ++ ++ int64_t mask = 1; ++ for (int i = 0; i < 64; i++) { ++ refConstants[i] = ~(mask << i); ++ } ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ or_(a4, a0, zero_reg); ++ for (int i = 0; i < 64; i++) { ++ // Load constant. ++ __ li(a5, Operand(refConstants[i])); ++ __ St_d(a5, MemOperand(a4, zero_reg)); ++ __ Add_d(a4, a4, Operand(kPointerSize)); ++ } ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ (void)f.Call(reinterpret_cast(result), 0, 0, 0, 0); ++ // Check results. ++ for (int i = 0; i < 64; i++) { ++ CHECK(refConstants[i] == result[i]); ++ } ++} ++ ++TEST(LoadAddress) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope handles(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ Label to_jump, skip; ++ __ mov(a4, a0); ++ ++ __ Branch(&skip); ++ __ bind(&to_jump); ++ __ nop(); ++ __ nop(); ++ __ jirl(zero_reg, ra, 0); ++ __ bind(&skip); ++ __ li(a4, Operand(masm->jump_address(&to_jump)), ADDRESS_LOAD); ++ int check_size = masm->InstructionsGeneratedSince(&skip); ++ CHECK_EQ(3, check_size); ++ __ jirl(zero_reg, a4, 0); ++ __ stop(); ++ __ stop(); ++ __ stop(); ++ __ stop(); ++ __ stop(); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ (void)f.Call(0, 0, 0, 0, 0); ++ // Check results. ++} ++ ++TEST(jump_tables4) { ++ // Similar to test-assembler-mips jump_tables1, with extra test for branch ++ // trampoline required before emission of the dd table (where trampolines are ++ // blocked), and proper transition to long-branch mode. ++ // Regression test for v8:4294. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ const int kNumCases = 512; ++ int values[kNumCases]; ++ isolate->random_number_generator()->NextBytes(values, sizeof(values)); ++ Label labels[kNumCases]; ++ Label near_start, end, done; ++ ++ __ Push(ra); ++ __ xor_(a2, a2, a2); ++ ++ __ Branch(&end); ++ __ bind(&near_start); ++ ++ for (int i = 0; i < 32768 - 256; ++i) { ++ __ Add_d(a2, a2, 1); ++ } ++ ++ __ GenerateSwitchTable(a0, kNumCases, ++ [&labels](size_t i) { return labels + i; }); ++ ++ for (int i = 0; i < kNumCases; ++i) { ++ __ bind(&labels[i]); ++ __ li(a2, values[i]); ++ __ Branch(&done); ++ } ++ ++ __ bind(&done); ++ __ Pop(ra); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ __ bind(&end); ++ __ Branch(&near_start); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kNumCases; ++i) { ++ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); ++ ::printf("f(%d) = %" PRId64 "\n", i, res); ++ CHECK_EQ(values[i], res); ++ } ++} ++ ++TEST(jump_tables6) { ++ // Similar to test-assembler-mips jump_tables1, with extra test for branch ++ // trampoline required after emission of the dd table (where trampolines are ++ // blocked). This test checks if number of really generated instructions is ++ // greater than number of counted instructions from code, as we are expecting ++ // generation of trampoline in this case ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ const int kSwitchTableCases = 40; ++ ++ const int kMaxBranchOffset = (1 << (18 - 1)) - 1; ++ const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize; ++ const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize; ++ ++ const int kMaxOffsetForTrampolineStart = ++ kMaxBranchOffset - 16 * kTrampolineSlotsSize; ++ const int kFillInstr = (kMaxOffsetForTrampolineStart / kInstrSize) - ++ (kSwitchTablePrologueSize + 2 * kSwitchTableCases) - ++ 20; ++ ++ int values[kSwitchTableCases]; ++ isolate->random_number_generator()->NextBytes(values, sizeof(values)); ++ Label labels[kSwitchTableCases]; ++ Label near_start, end, done; ++ ++ __ Push(ra); ++ __ xor_(a2, a2, a2); ++ ++ int offs1 = masm->pc_offset(); ++ int gen_insn = 0; ++ ++ __ Branch(&end); ++ gen_insn += 1; ++ __ bind(&near_start); ++ ++ for (int i = 0; i < kFillInstr; ++i) { ++ __ Add_d(a2, a2, 1); ++ } ++ gen_insn += kFillInstr; ++ ++ __ GenerateSwitchTable(a0, kSwitchTableCases, ++ [&labels](size_t i) { return labels + i; }); ++ gen_insn += (kSwitchTablePrologueSize + 2 * kSwitchTableCases); ++ ++ for (int i = 0; i < kSwitchTableCases; ++i) { ++ __ bind(&labels[i]); ++ __ li(a2, values[i]); ++ __ Branch(&done); ++ } ++ gen_insn += 3 * kSwitchTableCases; ++ ++ // If offset from here to first branch instr is greater than max allowed ++ // offset for trampoline ... ++ CHECK_LT(kMaxOffsetForTrampolineStart, masm->pc_offset() - offs1); ++ // ... number of generated instructions must be greater then "gen_insn", ++ // as we are expecting trampoline generation ++ CHECK_LT(gen_insn, (masm->pc_offset() - offs1) / kInstrSize); ++ ++ __ bind(&done); ++ __ Pop(ra); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ __ bind(&end); ++ __ Branch(&near_start); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kSwitchTableCases; ++i) { ++ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); ++ ::printf("f(%d) = %" PRId64 "\n", i, res); ++ CHECK_EQ(values[i], res); ++ } ++} ++ ++static uint64_t run_alsl_w(uint32_t rj, uint32_t rk, int8_t sa) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ Alsl_w(a2, a0, a1, sa); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assembler.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(rj, rk, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(ALSL_W) { ++ CcTest::InitializeVM(); ++ struct TestCaseAlsl { ++ int32_t rj; ++ int32_t rk; ++ uint8_t sa; ++ uint64_t expected_res; ++ }; ++ // clang-format off ++ struct TestCaseAlsl tc[] = {// rj, rk, sa, expected_res ++ {0x1, 0x4, 1, 0x6}, ++ {0x1, 0x4, 2, 0x8}, ++ {0x1, 0x4, 3, 0xC}, ++ {0x1, 0x4, 4, 0x14}, ++ {0x1, 0x4, 5, 0x24}, ++ {0x1, 0x0, 1, 0x2}, ++ {0x1, 0x0, 2, 0x4}, ++ {0x1, 0x0, 3, 0x8}, ++ {0x1, 0x0, 4, 0x10}, ++ {0x1, 0x0, 5, 0x20}, ++ {0x0, 0x4, 1, 0x4}, ++ {0x0, 0x4, 2, 0x4}, ++ {0x0, 0x4, 3, 0x4}, ++ {0x0, 0x4, 4, 0x4}, ++ {0x0, 0x4, 5, 0x4}, ++ ++ // Shift overflow. ++ {INT32_MAX, 0x4, 1, 0x2}, ++ {INT32_MAX >> 1, 0x4, 2, 0x0}, ++ {INT32_MAX >> 2, 0x4, 3, 0xFFFFFFFFFFFFFFFC}, ++ {INT32_MAX >> 3, 0x4, 4, 0xFFFFFFFFFFFFFFF4}, ++ {INT32_MAX >> 4, 0x4, 5, 0xFFFFFFFFFFFFFFE4}, ++ ++ // Signed addition overflow. ++ {0x1, INT32_MAX - 1, 1, 0xFFFFFFFF80000000}, ++ {0x1, INT32_MAX - 3, 2, 0xFFFFFFFF80000000}, ++ {0x1, INT32_MAX - 7, 3, 0xFFFFFFFF80000000}, ++ {0x1, INT32_MAX - 15, 4, 0xFFFFFFFF80000000}, ++ {0x1, INT32_MAX - 31, 5, 0xFFFFFFFF80000000}, ++ ++ // Addition overflow. ++ {0x1, -2, 1, 0x0}, ++ {0x1, -4, 2, 0x0}, ++ {0x1, -8, 3, 0x0}, ++ {0x1, -16, 4, 0x0}, ++ {0x1, -32, 5, 0x0}}; ++ // clang-format on ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlsl); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_alsl_w(tc[i].rj, tc[i].rk, tc[i].sa); ++ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Alsl_w(a0, %x, %x, %hhu)\n", ++ tc[i].expected_res, res, tc[i].rj, tc[i].rk, tc[i].sa); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++static uint64_t run_alsl_d(uint64_t rj, uint64_t rk, int8_t sa) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ Alsl_d(a2, a0, a1, sa); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assembler.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(rj, rk, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(ALSL_D) { ++ CcTest::InitializeVM(); ++ struct TestCaseAlsl { ++ int64_t rj; ++ int64_t rk; ++ uint8_t sa; ++ uint64_t expected_res; ++ }; ++ // clang-format off ++ struct TestCaseAlsl tc[] = {// rj, rk, sa, expected_res ++ {0x1, 0x4, 1, 0x6}, ++ {0x1, 0x4, 2, 0x8}, ++ {0x1, 0x4, 3, 0xC}, ++ {0x1, 0x4, 4, 0x14}, ++ {0x1, 0x4, 5, 0x24}, ++ {0x1, 0x0, 1, 0x2}, ++ {0x1, 0x0, 2, 0x4}, ++ {0x1, 0x0, 3, 0x8}, ++ {0x1, 0x0, 4, 0x10}, ++ {0x1, 0x0, 5, 0x20}, ++ {0x0, 0x4, 1, 0x4}, ++ {0x0, 0x4, 2, 0x4}, ++ {0x0, 0x4, 3, 0x4}, ++ {0x0, 0x4, 4, 0x4}, ++ {0x0, 0x4, 5, 0x4}, ++ ++ // Shift overflow. ++ {INT64_MAX, 0x4, 1, 0x2}, ++ {INT64_MAX >> 1, 0x4, 2, 0x0}, ++ {INT64_MAX >> 2, 0x4, 3, 0xFFFFFFFFFFFFFFFC}, ++ {INT64_MAX >> 3, 0x4, 4, 0xFFFFFFFFFFFFFFF4}, ++ {INT64_MAX >> 4, 0x4, 5, 0xFFFFFFFFFFFFFFE4}, ++ ++ // Signed addition overflow. ++ {0x1, INT64_MAX - 1, 1, 0x8000000000000000}, ++ {0x1, INT64_MAX - 3, 2, 0x8000000000000000}, ++ {0x1, INT64_MAX - 7, 3, 0x8000000000000000}, ++ {0x1, INT64_MAX - 15, 4, 0x8000000000000000}, ++ {0x1, INT64_MAX - 31, 5, 0x8000000000000000}, ++ ++ // Addition overflow. ++ {0x1, -2, 1, 0x0}, ++ {0x1, -4, 2, 0x0}, ++ {0x1, -8, 3, 0x0}, ++ {0x1, -16, 4, 0x0}, ++ {0x1, -32, 5, 0x0}}; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlsl); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_alsl_d(tc[i].rj, tc[i].rk, tc[i].sa); ++ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Dlsa(v0, %" PRIx64 ", %" PRIx64 ++ ", %hhu)\n", ++ tc[i].expected_res, res, tc[i].rj, tc[i].rk, tc[i].sa); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++// clang-format off ++static const std::vector ffint_ftintrz_uint32_test_values() { ++ static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00FFFF00, ++ 0x7FFFFFFF, 0x80000000, 0x80000001, ++ 0x80FFFF00, 0x8FFFFFFF, 0xFFFFFFFF}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector ffint_ftintrz_int32_test_values() { ++ static const int32_t kValues[] = { ++ static_cast(0x00000000), static_cast(0x00000001), ++ static_cast(0x00FFFF00), static_cast(0x7FFFFFFF), ++ static_cast(0x80000000), static_cast(0x80000001), ++ static_cast(0x80FFFF00), static_cast(0x8FFFFFFF), ++ static_cast(0xFFFFFFFF)}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector ffint_ftintrz_uint64_test_values() { ++ static const uint64_t kValues[] = { ++ 0x0000000000000000, 0x0000000000000001, 0x0000FFFFFFFF0000, ++ 0x7FFFFFFFFFFFFFFF, 0x8000000000000000, 0x8000000000000001, ++ 0x8000FFFFFFFF0000, 0x8FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector ffint_ftintrz_int64_test_values() { ++ static const int64_t kValues[] = {static_cast(0x0000000000000000), ++ static_cast(0x0000000000000001), ++ static_cast(0x0000FFFFFFFF0000), ++ static_cast(0x7FFFFFFFFFFFFFFF), ++ static_cast(0x8000000000000000), ++ static_cast(0x8000000000000001), ++ static_cast(0x8000FFFFFFFF0000), ++ static_cast(0x8FFFFFFFFFFFFFFF), ++ static_cast(0xFFFFFFFFFFFFFFFF)}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ // clang-off on ++ ++// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... } ++#define FOR_INPUTS(ctype, itype, var, test_vector) \ ++ std::vector var##_vec = test_vector(); \ ++ for (std::vector::iterator var = var##_vec.begin(); \ ++ var != var##_vec.end(); ++var) ++ ++#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \ ++ std::vector var##_vec = test_vector(); \ ++ std::vector::iterator var; \ ++ std::vector::reverse_iterator var2; \ ++ for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \ ++ var != var##_vec.end(); ++var, ++var2) ++ ++#define FOR_ENUM_INPUTS(var, type, test_vector) \ ++ FOR_INPUTS(enum type, type, var, test_vector) ++#define FOR_STRUCT_INPUTS(var, type, test_vector) \ ++ FOR_INPUTS(struct type, type, var, test_vector) ++#define FOR_INT32_INPUTS(var, test_vector) \ ++ FOR_INPUTS(int32_t, int32, var, test_vector) ++#define FOR_INT32_INPUTS2(var, var2, test_vector) \ ++ FOR_INPUTS2(int32_t, int32, var, var2, test_vector) ++#define FOR_INT64_INPUTS(var, test_vector) \ ++ FOR_INPUTS(int64_t, int64, var, test_vector) ++#define FOR_UINT32_INPUTS(var, test_vector) \ ++ FOR_INPUTS(uint32_t, uint32, var, test_vector) ++#define FOR_UINT64_INPUTS(var, test_vector) \ ++ FOR_INPUTS(uint64_t, uint64, var, test_vector) ++ ++template ++RET_TYPE run_CVT(IN_TYPE x, Func GenerateConvertInstructionFunc) { ++ using F_CVT = RET_TYPE(IN_TYPE x0, int x1, int x2, int x3, int x4); ++ ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assm; ++ ++ GenerateConvertInstructionFunc(masm); ++ __ movfr2gr_d(a2, f9); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ ++ return reinterpret_cast(f.Call(x, 0, 0, 0, 0)); ++} ++ ++TEST(Ffint_s_uw_Ftintrz_uw_s) { ++ CcTest::InitializeVM(); ++ FOR_UINT32_INPUTS(i, ffint_ftintrz_uint32_test_values) { ++ ++ uint32_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ Ffint_s_uw(f8, a0); ++ __ movgr2frh_w(f9, zero_reg); ++ __ Ftintrz_uw_s(f9, f8, f10); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(Ffint_s_ul_Ftintrz_ul_s) { ++ CcTest::InitializeVM(); ++ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) { ++ uint64_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ Ffint_s_ul(f8, a0); ++ __ Ftintrz_ul_s(f9, f8, f10, a2); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(Ffint_d_uw_Ftintrz_uw_d) { ++ CcTest::InitializeVM(); ++ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) { ++ uint32_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ Ffint_d_uw(f8, a0); ++ __ movgr2frh_w(f9, zero_reg); ++ __ Ftintrz_uw_d(f9, f8, f10); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(Ffint_d_ul_Ftintrz_ul_d) { ++ CcTest::InitializeVM(); ++ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) { ++ uint64_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ Ffint_d_ul(f8, a0); ++ __ Ftintrz_ul_d(f9, f8, f10, a2); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(Ffint_d_l_Ftintrz_l_ud) { ++ CcTest::InitializeVM(); ++ FOR_INT64_INPUTS(i, ffint_ftintrz_int64_test_values) { ++ int64_t input = *i; ++ uint64_t abs_input = (input < 0) ? -input : input; ++ auto fn = [](MacroAssembler* masm) { ++ __ movgr2fr_d(f8, a0); ++ __ ffint_d_l(f10, f8); ++ __ Ftintrz_l_ud(f9, f10, f11); ++ }; ++ CHECK_EQ(static_cast(abs_input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(ffint_d_l_Ftint_l_d) { ++ CcTest::InitializeVM(); ++ FOR_INT64_INPUTS(i, ffint_ftintrz_int64_test_values) { ++ int64_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ movgr2fr_d(f8, a0); ++ __ ffint_d_l(f10, f8); ++ __ Ftintrz_l_d(f9, f10); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(ffint_d_w_Ftint_w_d) { ++ CcTest::InitializeVM(); ++ FOR_INT32_INPUTS(i, ffint_ftintrz_int32_test_values) { ++ int32_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ movgr2fr_w(f8, a0); ++ __ ffint_d_w(f10, f8); ++ __ Ftintrz_w_d(f9, f10); ++ __ movfr2gr_s(a4, f9); ++ __ movgr2fr_d(f9, a4); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++ ++static const std::vector overflow_int64_test_values() { ++ // clang-format off ++ static const int64_t kValues[] = {static_cast(0xF000000000000000), ++ static_cast(0x0000000000000001), ++ static_cast(0xFF00000000000000), ++ static_cast(0x0000F00111111110), ++ static_cast(0x0F00001000000000), ++ static_cast(0x991234AB12A96731), ++ static_cast(0xB0FFFF0F0F0F0F01), ++ static_cast(0x00006FFFFFFFFFFF), ++ static_cast(0xFFFFFFFFFFFFFFFF)}; ++ // clang-format on ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++TEST(OverflowInstructions) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope handles(isolate); ++ ++ struct T { ++ int64_t lhs; ++ int64_t rhs; ++ int64_t output_add1; ++ int64_t output_add2; ++ int64_t output_sub1; ++ int64_t output_sub2; ++ int64_t output_mul1; ++ int64_t output_mul2; ++ int64_t overflow_add1; ++ int64_t overflow_add2; ++ int64_t overflow_sub1; ++ int64_t overflow_sub2; ++ int64_t overflow_mul1; ++ int64_t overflow_mul2; ++ }; ++ T t; ++ ++ FOR_INT64_INPUTS(i, overflow_int64_test_values) { ++ FOR_INT64_INPUTS(j, overflow_int64_test_values) { ++ int64_t ii = *i; ++ int64_t jj = *j; ++ int64_t expected_add, expected_sub; ++ int32_t ii32 = static_cast(ii); ++ int32_t jj32 = static_cast(jj); ++ int32_t expected_mul; ++ int64_t expected_add_ovf, expected_sub_ovf, expected_mul_ovf; ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ ld_d(t0, a0, offsetof(T, lhs)); ++ __ ld_d(t1, a0, offsetof(T, rhs)); ++ ++ __ AdddOverflow(t2, t0, Operand(t1), t3); ++ __ st_d(t2, a0, offsetof(T, output_add1)); ++ __ st_d(t3, a0, offsetof(T, overflow_add1)); ++ __ or_(t3, zero_reg, zero_reg); ++ __ AdddOverflow(t0, t0, Operand(t1), t3); ++ __ st_d(t0, a0, offsetof(T, output_add2)); ++ __ st_d(t3, a0, offsetof(T, overflow_add2)); ++ ++ __ ld_d(t0, a0, offsetof(T, lhs)); ++ __ ld_d(t1, a0, offsetof(T, rhs)); ++ ++ __ SubdOverflow(t2, t0, Operand(t1), t3); ++ __ st_d(t2, a0, offsetof(T, output_sub1)); ++ __ st_d(t3, a0, offsetof(T, overflow_sub1)); ++ __ or_(t3, zero_reg, zero_reg); ++ __ SubdOverflow(t0, t0, Operand(t1), t3); ++ __ st_d(t0, a0, offsetof(T, output_sub2)); ++ __ st_d(t3, a0, offsetof(T, overflow_sub2)); ++ ++ __ ld_d(t0, a0, offsetof(T, lhs)); ++ __ ld_d(t1, a0, offsetof(T, rhs)); ++ __ slli_w(t0, t0, 0); ++ __ slli_w(t1, t1, 0); ++ ++ __ MulOverflow(t2, t0, Operand(t1), t3); ++ __ st_d(t2, a0, offsetof(T, output_mul1)); ++ __ st_d(t3, a0, offsetof(T, overflow_mul1)); ++ __ or_(t3, zero_reg, zero_reg); ++ __ MulOverflow(t0, t0, Operand(t1), t3); ++ __ st_d(t0, a0, offsetof(T, output_mul2)); ++ __ st_d(t3, a0, offsetof(T, overflow_mul2)); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = ++ Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.lhs = ii; ++ t.rhs = jj; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ expected_add_ovf = base::bits::SignedAddOverflow64(ii, jj, &expected_add); ++ expected_sub_ovf = base::bits::SignedSubOverflow64(ii, jj, &expected_sub); ++ expected_mul_ovf = ++ base::bits::SignedMulOverflow32(ii32, jj32, &expected_mul); ++ ++ CHECK_EQ(expected_add_ovf, t.overflow_add1 < 0); ++ CHECK_EQ(expected_sub_ovf, t.overflow_sub1 < 0); ++ CHECK_EQ(expected_mul_ovf, t.overflow_mul1 != 0); ++ ++ CHECK_EQ(t.overflow_add1, t.overflow_add2); ++ CHECK_EQ(t.overflow_sub1, t.overflow_sub2); ++ CHECK_EQ(t.overflow_mul1, t.overflow_mul2); ++ ++ CHECK_EQ(expected_add, t.output_add1); ++ CHECK_EQ(expected_add, t.output_add2); ++ CHECK_EQ(expected_sub, t.output_sub1); ++ CHECK_EQ(expected_sub, t.output_sub2); ++ if (!expected_mul_ovf) { ++ CHECK_EQ(expected_mul, t.output_mul1); ++ CHECK_EQ(expected_mul, t.output_mul2); ++ } ++ } ++ } ++} ++ ++TEST(min_max_nan) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct TestFloat { ++ double a; ++ double b; ++ double c; ++ double d; ++ float e; ++ float f; ++ float g; ++ float h; ++ }; ++ ++ TestFloat test; ++ const double dnan = std::numeric_limits::quiet_NaN(); ++ const double dinf = std::numeric_limits::infinity(); ++ const double dminf = -std::numeric_limits::infinity(); ++ const float fnan = std::numeric_limits::quiet_NaN(); ++ const float finf = std::numeric_limits::infinity(); ++ const float fminf = -std::numeric_limits::infinity(); ++ const int kTableLength = 13; ++ ++ // clang-format off ++ double inputsa[kTableLength] = {dnan, 3.0, -0.0, 0.0, 42.0, dinf, dminf, ++ dinf, dnan, 3.0, dinf, dnan, dnan}; ++ double inputsb[kTableLength] = {dnan, 2.0, 0.0, -0.0, dinf, 42.0, dinf, ++ dminf, 3.0, dnan, dnan, dinf, dnan}; ++ double outputsdmin[kTableLength] = {dnan, 2.0, -0.0, -0.0, 42.0, ++ 42.0, dminf, dminf, dnan, dnan, ++ dnan, dnan, dnan}; ++ double outputsdmax[kTableLength] = {dnan, 3.0, 0.0, 0.0, dinf, dinf, dinf, ++ dinf, dnan, dnan, dnan, dnan, dnan}; ++ ++ float inputse[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, finf, fminf, ++ finf, fnan, 3.0, finf, fnan, fnan}; ++ float inputsf[kTableLength] = {3.0, 2.0, 0.0, -0.0, finf, 42.0, finf, ++ fminf, 3.0, fnan, fnan, finf, fnan}; ++ float outputsfmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0, 42.0, fminf, ++ fminf, fnan, fnan, fnan, fnan, fnan}; ++ float outputsfmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, finf, finf, finf, ++ finf, fnan, fnan, fnan, fnan, fnan}; ++ ++ // clang-format on ++ auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) { ++ __ bind(nan); ++ __ LoadRoot(t8, RootIndex::kNanValue); ++ __ Fld_d(dst, FieldMemOperand(t8, HeapNumber::kValueOffset)); ++ __ Branch(back); ++ }; ++ ++ auto handle_snan = [masm, fnan](FPURegister dst, Label* nan, Label* back) { ++ __ bind(nan); ++ __ Move(dst, fnan); ++ __ Branch(back); ++ }; ++ ++ Label handle_mind_nan, handle_maxd_nan, handle_mins_nan, handle_maxs_nan; ++ Label back_mind_nan, back_maxd_nan, back_mins_nan, back_maxs_nan; ++ ++ __ push(s6); ++ __ InitializeRootRegister(); ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, e))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, f))); ++ __ Float64Min(f12, f8, f9, &handle_mind_nan); ++ __ bind(&back_mind_nan); ++ __ Float64Max(f13, f8, f9, &handle_maxd_nan); ++ __ bind(&back_maxd_nan); ++ __ Float32Min(f14, f10, f11, &handle_mins_nan); ++ __ bind(&back_mins_nan); ++ __ Float32Max(f15, f10, f11, &handle_maxs_nan); ++ __ bind(&back_maxs_nan); ++ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, d))); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, g))); ++ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, h))); ++ __ pop(s6); ++ __ jirl(zero_reg, ra, 0); ++ ++ handle_dnan(f12, &handle_mind_nan, &back_mind_nan); ++ handle_dnan(f13, &handle_maxd_nan, &back_maxd_nan); ++ handle_snan(f14, &handle_mins_nan, &back_mins_nan); ++ handle_snan(f15, &handle_maxs_nan, &back_maxs_nan); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputsa[i]; ++ test.b = inputsb[i]; ++ test.e = inputse[i]; ++ test.f = inputsf[i]; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c))); ++ CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d))); ++ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); ++ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); ++ } ++} ++ ++template ++bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset, ++ IN_TYPE value, Func GenerateUnalignedInstructionFunc) { ++ using F_CVT = int32_t(char* x0, int x1, int x2, int x3, int x4); ++ ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assm; ++ IN_TYPE res; ++ ++ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ ++ MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE)); ++ f.Call(memory_buffer, 0, 0, 0, 0); ++ MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE)); ++ ++ return res == value; ++} ++ ++static const std::vector unsigned_test_values() { ++ // clang-format off ++ static const uint64_t kValues[] = { ++ 0x2180F18A06384414, 0x000A714532102277, 0xBC1ACCCF180649F0, ++ 0x8000000080008000, 0x0000000000000001, 0xFFFFFFFFFFFFFFFF, ++ }; ++ // clang-format on ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector unsigned_test_offset() { ++ static const int32_t kValues[] = {// value, offset ++ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector unsigned_test_offset_increment() { ++ static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++TEST(Ld_b) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint16_t value = static_cast(*i & 0xFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_b(a2, MemOperand(a0, in_offset)); ++ __ St_b(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_b(a0, MemOperand(a0, in_offset)); ++ __ St_b(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_2)); ++ ++ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_bu(a0, MemOperand(a0, in_offset)); ++ __ St_b(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_3)); ++ ++ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_bu(a2, MemOperand(a0, in_offset)); ++ __ St_b(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_4)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_b_bitextension) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint16_t value = static_cast(*i & 0xFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ Label success, fail, end, different; ++ __ Ld_b(t0, MemOperand(a0, in_offset)); ++ __ Ld_bu(t1, MemOperand(a0, in_offset)); ++ __ Branch(&different, ne, t0, Operand(t1)); ++ ++ // If signed and unsigned values are same, check ++ // the upper bits to see if they are zero ++ __ srai_w(t0, t0, 7); ++ __ Branch(&success, eq, t0, Operand(zero_reg)); ++ __ Branch(&fail); ++ ++ // If signed and unsigned values are different, ++ // check that the upper bits are complementary ++ __ bind(&different); ++ __ srai_w(t1, t1, 7); ++ __ Branch(&fail, ne, t1, Operand(1)); ++ __ srai_w(t0, t0, 7); ++ __ addi_d(t0, t0, 1); ++ __ Branch(&fail, ne, t0, Operand(zero_reg)); ++ // Fall through to success ++ ++ __ bind(&success); ++ __ Ld_b(t0, MemOperand(a0, in_offset)); ++ __ St_b(t0, MemOperand(a0, out_offset)); ++ __ Branch(&end); ++ __ bind(&fail); ++ __ St_b(zero_reg, MemOperand(a0, out_offset)); ++ __ bind(&end); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_h) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint16_t value = static_cast(*i & 0xFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_h(a2, MemOperand(a0, in_offset)); ++ __ St_h(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_h(a0, MemOperand(a0, in_offset)); ++ __ St_h(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_2)); ++ ++ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_hu(a0, MemOperand(a0, in_offset)); ++ __ St_h(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_3)); ++ ++ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_hu(a2, MemOperand(a0, in_offset)); ++ __ St_h(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_4)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_h_bitextension) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint16_t value = static_cast(*i & 0xFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ Label success, fail, end, different; ++ __ Ld_h(t0, MemOperand(a0, in_offset)); ++ __ Ld_hu(t1, MemOperand(a0, in_offset)); ++ __ Branch(&different, ne, t0, Operand(t1)); ++ ++ // If signed and unsigned values are same, check ++ // the upper bits to see if they are zero ++ __ srai_w(t0, t0, 15); ++ __ Branch(&success, eq, t0, Operand(zero_reg)); ++ __ Branch(&fail); ++ ++ // If signed and unsigned values are different, ++ // check that the upper bits are complementary ++ __ bind(&different); ++ __ srai_w(t1, t1, 15); ++ __ Branch(&fail, ne, t1, Operand(1)); ++ __ srai_w(t0, t0, 15); ++ __ addi_d(t0, t0, 1); ++ __ Branch(&fail, ne, t0, Operand(zero_reg)); ++ // Fall through to success ++ ++ __ bind(&success); ++ __ Ld_h(t0, MemOperand(a0, in_offset)); ++ __ St_h(t0, MemOperand(a0, out_offset)); ++ __ Branch(&end); ++ __ bind(&fail); ++ __ St_h(zero_reg, MemOperand(a0, out_offset)); ++ __ bind(&end); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_w) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint32_t value = static_cast(*i & 0xFFFFFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_w(a2, MemOperand(a0, in_offset)); ++ __ St_w(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_w(a0, MemOperand(a0, in_offset)); ++ __ St_w(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, ++ run_Unaligned(buffer_middle, in_offset, out_offset, ++ (uint32_t)value, fn_2)); ++ ++ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_wu(a2, MemOperand(a0, in_offset)); ++ __ St_w(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_3)); ++ ++ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_wu(a0, MemOperand(a0, in_offset)); ++ __ St_w(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, ++ run_Unaligned(buffer_middle, in_offset, out_offset, ++ (uint32_t)value, fn_4)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_w_extension) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint32_t value = static_cast(*i & 0xFFFFFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ Label success, fail, end, different; ++ __ Ld_w(t0, MemOperand(a0, in_offset)); ++ __ Ld_wu(t1, MemOperand(a0, in_offset)); ++ __ Branch(&different, ne, t0, Operand(t1)); ++ ++ // If signed and unsigned values are same, check ++ // the upper bits to see if they are zero ++ __ srai_d(t0, t0, 31); ++ __ Branch(&success, eq, t0, Operand(zero_reg)); ++ __ Branch(&fail); ++ ++ // If signed and unsigned values are different, ++ // check that the upper bits are complementary ++ __ bind(&different); ++ __ srai_d(t1, t1, 31); ++ __ Branch(&fail, ne, t1, Operand(1)); ++ __ srai_d(t0, t0, 31); ++ __ addi_d(t0, t0, 1); ++ __ Branch(&fail, ne, t0, Operand(zero_reg)); ++ // Fall through to success ++ ++ __ bind(&success); ++ __ Ld_w(t0, MemOperand(a0, in_offset)); ++ __ St_w(t0, MemOperand(a0, out_offset)); ++ __ Branch(&end); ++ __ bind(&fail); ++ __ St_w(zero_reg, MemOperand(a0, out_offset)); ++ __ bind(&end); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_d) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint64_t value = *i; ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_d(a2, MemOperand(a0, in_offset)); ++ __ St_d(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_d(a0, MemOperand(a0, in_offset)); ++ __ St_d(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, ++ run_Unaligned(buffer_middle, in_offset, out_offset, ++ (uint32_t)value, fn_2)); ++ } ++ } ++ } ++} ++ ++TEST(Fld_s) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ float value = static_cast(*i & 0xFFFFFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Fld_s(f0, MemOperand(a0, in_offset)); ++ __ Fst_s(f0, MemOperand(a0, out_offset)); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++TEST(Fld_d) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ double value = static_cast(*i); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Fld_d(f0, MemOperand(a0, in_offset)); ++ __ Fst_d(f0, MemOperand(a0, out_offset)); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++static const std::vector sltu_test_values() { ++ // clang-format off ++ static const uint64_t kValues[] = { ++ 0, ++ 1, ++ 0x7FE, ++ 0x7FF, ++ 0x800, ++ 0x801, ++ 0xFFE, ++ 0xFFF, ++ 0xFFFFFFFFFFFFF7FE, ++ 0xFFFFFFFFFFFFF7FF, ++ 0xFFFFFFFFFFFFF800, ++ 0xFFFFFFFFFFFFF801, ++ 0xFFFFFFFFFFFFFFFE, ++ 0xFFFFFFFFFFFFFFFF, ++ }; ++ // clang-format on ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++template ++bool run_Sltu(uint64_t rj, uint64_t rk, Func GenerateSltuInstructionFunc) { ++ using F_CVT = int64_t(uint64_t x0, uint64_t x1, int x2, int x3, int x4); ++ ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assm; ++ ++ GenerateSltuInstructionFunc(masm, rk); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(rj, rk, 0, 0, 0)); ++ return res == 1; ++} ++ ++TEST(Sltu) { ++ CcTest::InitializeVM(); ++ ++ FOR_UINT64_INPUTS(i, sltu_test_values) { ++ FOR_UINT64_INPUTS(j, sltu_test_values) { ++ uint64_t rj = *i; ++ uint64_t rk = *j; ++ ++ auto fn_1 = [](MacroAssembler* masm, uint64_t imm) { ++ __ Sltu(a2, a0, Operand(imm)); ++ }; ++ CHECK_EQ(rj < rk, run_Sltu(rj, rk, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, uint64_t imm) { ++ __ Sltu(a2, a0, a1); ++ }; ++ CHECK_EQ(rj < rk, run_Sltu(rj, rk, fn_2)); ++ } ++ } ++} ++ ++template ++static GeneratedCode GenerateMacroFloat32MinMax(MacroAssembler* masm) { ++ T a = T::from_code(8); // f8 ++ T b = T::from_code(9); // f9 ++ T c = T::from_code(10); // f10 ++ ++ Label ool_min_abc, ool_min_aab, ool_min_aba; ++ Label ool_max_abc, ool_max_aab, ool_max_aba; ++ ++ Label done_min_abc, done_min_aab, done_min_aba; ++ Label done_max_abc, done_max_aab, done_max_aba; ++ ++#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \ ++ __ Fld_s(x, MemOperand(a0, offsetof(Inputs, src1_))); \ ++ __ Fld_s(y, MemOperand(a0, offsetof(Inputs, src2_))); \ ++ __ fminmax(res, x, y, &ool); \ ++ __ bind(&done); \ ++ __ Fst_s(a, MemOperand(a1, offsetof(Results, res_field))) ++ ++ // a = min(b, c); ++ FLOAT_MIN_MAX(Float32Min, a, b, c, done_min_abc, ool_min_abc, min_abc_); ++ // a = min(a, b); ++ FLOAT_MIN_MAX(Float32Min, a, a, b, done_min_aab, ool_min_aab, min_aab_); ++ // a = min(b, a); ++ FLOAT_MIN_MAX(Float32Min, a, b, a, done_min_aba, ool_min_aba, min_aba_); ++ ++ // a = max(b, c); ++ FLOAT_MIN_MAX(Float32Max, a, b, c, done_max_abc, ool_max_abc, max_abc_); ++ // a = max(a, b); ++ FLOAT_MIN_MAX(Float32Max, a, a, b, done_max_aab, ool_max_aab, max_aab_); ++ // a = max(b, a); ++ FLOAT_MIN_MAX(Float32Max, a, b, a, done_max_aba, ool_max_aba, max_aba_); ++ ++#undef FLOAT_MIN_MAX ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ // Generate out-of-line cases. ++ __ bind(&ool_min_abc); ++ __ Float32MinOutOfLine(a, b, c); ++ __ Branch(&done_min_abc); ++ ++ __ bind(&ool_min_aab); ++ __ Float32MinOutOfLine(a, a, b); ++ __ Branch(&done_min_aab); ++ ++ __ bind(&ool_min_aba); ++ __ Float32MinOutOfLine(a, b, a); ++ __ Branch(&done_min_aba); ++ ++ __ bind(&ool_max_abc); ++ __ Float32MaxOutOfLine(a, b, c); ++ __ Branch(&done_max_abc); ++ ++ __ bind(&ool_max_aab); ++ __ Float32MaxOutOfLine(a, a, b); ++ __ Branch(&done_max_aab); ++ ++ __ bind(&ool_max_aba); ++ __ Float32MaxOutOfLine(a, b, a); ++ __ Branch(&done_max_aba); ++ ++ CodeDesc desc; ++ masm->GetCode(masm->isolate(), &desc); ++ Handle code = ++ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build(); ++#ifdef DEBUG ++ StdoutStream os; ++ code->Print(os); ++#endif ++ return GeneratedCode::FromCode(*code); ++} ++ ++TEST(macro_float_minmax_f32) { ++ // Test the Float32Min and Float32Max macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct Inputs { ++ float src1_; ++ float src2_; ++ }; ++ ++ struct Results { ++ // Check all register aliasing possibilities in order to exercise all ++ // code-paths in the macro assembler. ++ float min_abc_; ++ float min_aab_; ++ float min_aba_; ++ float max_abc_; ++ float max_aab_; ++ float max_aba_; ++ }; ++ ++ GeneratedCode f = ++ GenerateMacroFloat32MinMax(masm); ++ ++#define CHECK_MINMAX(src1, src2, min, max) \ ++ do { \ ++ Inputs inputs = {src1, src2}; \ ++ Results results; \ ++ f.Call(&inputs, &results, 0, 0, 0); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_abc_)); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_aab_)); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_aba_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_abc_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_aab_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_aba_)); \ ++ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \ ++ } while (0) ++ ++ float nan_a = std::numeric_limits::quiet_NaN(); ++ float nan_b = std::numeric_limits::quiet_NaN(); ++ ++ CHECK_MINMAX(1.0f, -1.0f, -1.0f, 1.0f); ++ CHECK_MINMAX(-1.0f, 1.0f, -1.0f, 1.0f); ++ CHECK_MINMAX(0.0f, -1.0f, -1.0f, 0.0f); ++ CHECK_MINMAX(-1.0f, 0.0f, -1.0f, 0.0f); ++ CHECK_MINMAX(-0.0f, -1.0f, -1.0f, -0.0f); ++ CHECK_MINMAX(-1.0f, -0.0f, -1.0f, -0.0f); ++ CHECK_MINMAX(0.0f, 1.0f, 0.0f, 1.0f); ++ CHECK_MINMAX(1.0f, 0.0f, 0.0f, 1.0f); ++ ++ CHECK_MINMAX(0.0f, 0.0f, 0.0f, 0.0f); ++ CHECK_MINMAX(-0.0f, -0.0f, -0.0f, -0.0f); ++ CHECK_MINMAX(-0.0f, 0.0f, -0.0f, 0.0f); ++ CHECK_MINMAX(0.0f, -0.0f, -0.0f, 0.0f); ++ ++ CHECK_MINMAX(0.0f, nan_a, nan_a, nan_a); ++ CHECK_MINMAX(nan_a, 0.0f, nan_a, nan_a); ++ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a); ++ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b); ++ ++#undef CHECK_MINMAX ++} ++ ++template ++static GeneratedCode GenerateMacroFloat64MinMax(MacroAssembler* masm) { ++ T a = T::from_code(8); // f8 ++ T b = T::from_code(9); // f9 ++ T c = T::from_code(10); // f10 ++ ++ Label ool_min_abc, ool_min_aab, ool_min_aba; ++ Label ool_max_abc, ool_max_aab, ool_max_aba; ++ ++ Label done_min_abc, done_min_aab, done_min_aba; ++ Label done_max_abc, done_max_aab, done_max_aba; ++ ++#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \ ++ __ Fld_d(x, MemOperand(a0, offsetof(Inputs, src1_))); \ ++ __ Fld_d(y, MemOperand(a0, offsetof(Inputs, src2_))); \ ++ __ fminmax(res, x, y, &ool); \ ++ __ bind(&done); \ ++ __ Fst_d(a, MemOperand(a1, offsetof(Results, res_field))) ++ ++ // a = min(b, c); ++ FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_); ++ // a = min(a, b); ++ FLOAT_MIN_MAX(Float64Min, a, a, b, done_min_aab, ool_min_aab, min_aab_); ++ // a = min(b, a); ++ FLOAT_MIN_MAX(Float64Min, a, b, a, done_min_aba, ool_min_aba, min_aba_); ++ ++ // a = max(b, c); ++ FLOAT_MIN_MAX(Float64Max, a, b, c, done_max_abc, ool_max_abc, max_abc_); ++ // a = max(a, b); ++ FLOAT_MIN_MAX(Float64Max, a, a, b, done_max_aab, ool_max_aab, max_aab_); ++ // a = max(b, a); ++ FLOAT_MIN_MAX(Float64Max, a, b, a, done_max_aba, ool_max_aba, max_aba_); ++ ++#undef FLOAT_MIN_MAX ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ // Generate out-of-line cases. ++ __ bind(&ool_min_abc); ++ __ Float64MinOutOfLine(a, b, c); ++ __ Branch(&done_min_abc); ++ ++ __ bind(&ool_min_aab); ++ __ Float64MinOutOfLine(a, a, b); ++ __ Branch(&done_min_aab); ++ ++ __ bind(&ool_min_aba); ++ __ Float64MinOutOfLine(a, b, a); ++ __ Branch(&done_min_aba); ++ ++ __ bind(&ool_max_abc); ++ __ Float64MaxOutOfLine(a, b, c); ++ __ Branch(&done_max_abc); ++ ++ __ bind(&ool_max_aab); ++ __ Float64MaxOutOfLine(a, a, b); ++ __ Branch(&done_max_aab); ++ ++ __ bind(&ool_max_aba); ++ __ Float64MaxOutOfLine(a, b, a); ++ __ Branch(&done_max_aba); ++ ++ CodeDesc desc; ++ masm->GetCode(masm->isolate(), &desc); ++ Handle code = ++ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build(); ++#ifdef DEBUG ++ StdoutStream os; ++ code->Print(os); ++#endif ++ return GeneratedCode::FromCode(*code); ++} ++ ++TEST(macro_float_minmax_f64) { ++ // Test the Float64Min and Float64Max macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct Inputs { ++ double src1_; ++ double src2_; ++ }; ++ ++ struct Results { ++ // Check all register aliasing possibilities in order to exercise all ++ // code-paths in the macro assembler. ++ double min_abc_; ++ double min_aab_; ++ double min_aba_; ++ double max_abc_; ++ double max_aab_; ++ double max_aba_; ++ }; ++ ++ GeneratedCode f = ++ GenerateMacroFloat64MinMax(masm); ++ ++#define CHECK_MINMAX(src1, src2, min, max) \ ++ do { \ ++ Inputs inputs = {src1, src2}; \ ++ Results results; \ ++ f.Call(&inputs, &results, 0, 0, 0); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_abc_)); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_aab_)); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_aba_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_abc_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_aab_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_aba_)); \ ++ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \ ++ } while (0) ++ ++ double nan_a = std::numeric_limits::quiet_NaN(); ++ double nan_b = std::numeric_limits::quiet_NaN(); ++ ++ CHECK_MINMAX(1.0, -1.0, -1.0, 1.0); ++ CHECK_MINMAX(-1.0, 1.0, -1.0, 1.0); ++ CHECK_MINMAX(0.0, -1.0, -1.0, 0.0); ++ CHECK_MINMAX(-1.0, 0.0, -1.0, 0.0); ++ CHECK_MINMAX(-0.0, -1.0, -1.0, -0.0); ++ CHECK_MINMAX(-1.0, -0.0, -1.0, -0.0); ++ CHECK_MINMAX(0.0, 1.0, 0.0, 1.0); ++ CHECK_MINMAX(1.0, 0.0, 0.0, 1.0); ++ ++ CHECK_MINMAX(0.0, 0.0, 0.0, 0.0); ++ CHECK_MINMAX(-0.0, -0.0, -0.0, -0.0); ++ CHECK_MINMAX(-0.0, 0.0, -0.0, 0.0); ++ CHECK_MINMAX(0.0, -0.0, -0.0, 0.0); ++ ++ CHECK_MINMAX(0.0, nan_a, nan_a, nan_a); ++ CHECK_MINMAX(nan_a, 0.0, nan_a, nan_a); ++ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a); ++ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b); ++ ++#undef CHECK_MINMAX ++} ++ ++uint64_t run_Sub_w(uint64_t imm, int32_t num_instr) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label code_start; ++ __ bind(&code_start); ++ __ Sub_w(a2, zero_reg, Operand(imm)); ++ CHECK_EQ(masm->InstructionsGeneratedSince(&code_start), num_instr); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(SUB_W) { ++ CcTest::InitializeVM(); ++ ++ // Test Subu macro-instruction for min_int12 and max_int12 border cases. ++ // For subtracting int16 immediate values we use addiu. ++ ++ struct TestCaseSub { ++ uint64_t imm; ++ uint64_t expected_res; ++ int32_t num_instr; ++ }; ++ ++ // We call Sub_w(v0, zero_reg, imm) to test cases listed below. ++ // 0 - imm = expected_res ++ // clang-format off ++ struct TestCaseSub tc[] = { ++ // imm, expected_res, num_instr ++ {0xFFFFFFFFFFFFF800, 0x800, 2}, // min_int12 ++ // The test case above generates ori + add_w instruction sequence. ++ // We can't have just addi_ because -min_int12 > max_int12 so use ++ // register. We can load min_int12 to at register with addi_w and then ++ // subtract at with sub_w, but now we use ori + add_w because -min_int12 can ++ // be loaded using ori. ++ {0x800, 0xFFFFFFFFFFFFF800, 1}, // max_int12 + 1 ++ // Generates addi_w ++ // max_int12 + 1 is not int12 but -(max_int12 + 1) is, just use addi_w. ++ {0xFFFFFFFFFFFFF7FF, 0x801, 2}, // min_int12 - 1 ++ // Generates ori + add_w ++ // To load this value to at we need two instructions and another one to ++ // subtract, lu12i + ori + sub_w. But we can load -value to at using just ++ // ori and then add at register with add_w. ++ {0x801, 0xFFFFFFFFFFFFF7FF, 2}, // max_int12 + 2 ++ // Generates ori + sub_w ++ // Not int12 but is uint12, load value to at with ori and subtract with ++ // sub_w. ++ {0x00010000, 0xFFFFFFFFFFFF0000, 2}, ++ // Generates lu12i_w + sub_w ++ // Load value using lui to at and subtract with subu. ++ {0x00010001, 0xFFFFFFFFFFFEFFFF, 3}, ++ // Generates lu12i + ori + sub_w ++ // We have to generate three instructions in this case. ++ {0x7FFFFFFF, 0xFFFFFFFF80000001, 3}, // max_int32 ++ // Generates lu12i_w + ori + sub_w ++ {0xFFFFFFFF80000000, 0xFFFFFFFF80000000, 2}, // min_int32 ++ // The test case above generates lu12i + sub_w intruction sequence. ++ // The result of 0 - min_int32 eqauls max_int32 + 1, which wraps around to ++ // min_int32 again. ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSub); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ CHECK_EQ(tc[i].expected_res, run_Sub_w(tc[i].imm, tc[i].num_instr)); ++ } ++} ++ ++uint64_t run_Sub_d(uint64_t imm, int32_t num_instr) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label code_start; ++ __ bind(&code_start); ++ __ Sub_d(a2, zero_reg, Operand(imm)); ++ CHECK_EQ(masm->InstructionsGeneratedSince(&code_start), num_instr); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(SUB_D) { ++ CcTest::InitializeVM(); ++ ++ // Test Sub_d macro-instruction for min_int12 and max_int12 border cases. ++ // For subtracting int12 immediate values we use addi_d. ++ ++ struct TestCaseSub { ++ uint64_t imm; ++ uint64_t expected_res; ++ int32_t num_instr; ++ }; ++ // We call Sub(v0, zero_reg, imm) to test cases listed below. ++ // 0 - imm = expected_res ++ // clang-format off ++ struct TestCaseSub tc[] = { ++ // imm, expected_res, num_instr ++ {0xFFFFFFFFFFFFF800, 0x800, 2}, // min_int12 ++ // The test case above generates addi_d instruction. ++ // This is int12 value and we can load it using just addi_d. ++ { 0x800, 0xFFFFFFFFFFFFF800, 1}, // max_int12 + 1 ++ // Generates addi_d ++ // max_int12 + 1 is not int12 but is uint12, just use ori. ++ {0xFFFFFFFFFFFFF7FF, 0x801, 2}, // min_int12 - 1 ++ // Generates ori + add_d ++ { 0x801, 0xFFFFFFFFFFFFF7FF, 2}, // max_int12 + 2 ++ // Generates ori + add_d ++ { 0x00001000, 0xFFFFFFFFFFFFF000, 2}, // max_uint12 + 1 ++ // Generates lu12i_w + sub_d ++ { 0x00001001, 0xFFFFFFFFFFFFEFFF, 3}, // max_uint12 + 2 ++ // Generates lu12i_w + ori + sub_d ++ {0x00000000FFFFFFFF, 0xFFFFFFFF00000001, 3}, // max_uint32 ++ // Generates addi_w + li32i_d + sub_d ++ {0x00000000FFFFFFFE, 0xFFFFFFFF00000002, 3}, // max_uint32 - 1 ++ // Generates addi_w + li32i_d + sub_d ++ {0xFFFFFFFF80000000, 0x80000000, 2}, // min_int32 ++ // Generates lu12i_w + sub_d ++ {0x0000000080000000, 0xFFFFFFFF80000000, 2}, // max_int32 + 1 ++ // Generates lu12i_w + add_d ++ {0xFFFF0000FFFF8765, 0x0000FFFF0000789B, 4}, ++ // Generates lu12i_w + ori + lu32i_d + sub ++ {0x1234ABCD87654321, 0xEDCB5432789ABCDF, 5}, ++ // Generates lu12i_w + ori + lu32i_d + lu52i_d + sub ++ {0xFFFF789100000000, 0x876F00000000, 3}, ++ // Generates xor + lu32i_d + sub ++ {0xF12F789100000000, 0xED0876F00000000, 4}, ++ // Generates xor + lu32i_d + lu52i_d + sub ++ {0xF120000000000800, 0xEDFFFFFFFFFF800, 3}, ++ // Generates ori + lu52i_d + sub ++ {0xFFF0000000000000, 0x10000000000000, 2} ++ // Generates lu52i_d + sub ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSub); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ CHECK_EQ(tc[i].expected_res, run_Sub_d(tc[i].imm, tc[i].num_instr)); ++ } ++} ++ ++TEST(Move) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct T { ++ float a; ++ float b; ++ float result_a; ++ float result_b; ++ double c; ++ double d; ++ double e; ++ double result_c; ++ double result_d; ++ double result_e; ++ }; ++ T t; ++ __ li(a4, static_cast(0x80000000)); ++ __ St_w(a4, MemOperand(a0, offsetof(T, a))); ++ __ li(a5, static_cast(0x12345678)); ++ __ St_w(a5, MemOperand(a0, offsetof(T, b))); ++ __ li(a6, static_cast(0x8877665544332211)); ++ __ St_d(a6, MemOperand(a0, offsetof(T, c))); ++ __ li(a7, static_cast(0x1122334455667788)); ++ __ St_d(a7, MemOperand(a0, offsetof(T, d))); ++ __ li(t0, static_cast(0)); ++ __ St_d(t0, MemOperand(a0, offsetof(T, e))); ++ ++ __ Move(f8, static_cast(0x80000000)); ++ __ Move(f9, static_cast(0x12345678)); ++ __ Move(f10, static_cast(0x8877665544332211)); ++ __ Move(f11, static_cast(0x1122334455667788)); ++ __ Move(f12, static_cast(0)); ++ __ Fst_s(f8, MemOperand(a0, offsetof(T, result_a))); ++ __ Fst_s(f9, MemOperand(a0, offsetof(T, result_b))); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_c))); ++ __ Fst_d(f11, MemOperand(a0, offsetof(T, result_d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(T, result_e))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ f.Call(&t, 0, 0, 0, 0); ++ CHECK_EQ(t.a, t.result_a); ++ CHECK_EQ(t.b, t.result_b); ++ CHECK_EQ(t.c, t.result_c); ++ CHECK_EQ(t.d, t.result_d); ++ CHECK_EQ(t.e, t.result_e); ++} ++ ++TEST(Movz_Movn) { ++ const int kTableLength = 4; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct Test { ++ int64_t rt; ++ int64_t a; ++ int64_t b; ++ int64_t bold; ++ int64_t b1; ++ int64_t bold1; ++ int32_t c; ++ int32_t d; ++ int32_t dold; ++ int32_t d1; ++ int32_t dold1; ++ }; ++ ++ Test test; ++ // clang-format off ++ int64_t inputs_D[kTableLength] = { ++ 7, 8, -9, -10 ++ }; ++ int32_t inputs_W[kTableLength] = { ++ 3, 4, -5, -6 ++ }; ++ ++ int32_t outputs_W[kTableLength] = { ++ 3, 4, -5, -6 ++ }; ++ int64_t outputs_D[kTableLength] = { ++ 7, 8, -9, -10 ++ }; ++ // clang-format on ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(Test, a))); ++ __ Ld_w(a5, MemOperand(a0, offsetof(Test, c))); ++ __ Ld_d(a6, MemOperand(a0, offsetof(Test, rt))); ++ __ li(t0, 1); ++ __ li(t1, 1); ++ __ li(t2, 1); ++ __ li(t3, 1); ++ __ St_d(t0, MemOperand(a0, offsetof(Test, bold))); ++ __ St_d(t1, MemOperand(a0, offsetof(Test, bold1))); ++ __ St_w(t2, MemOperand(a0, offsetof(Test, dold))); ++ __ St_w(t3, MemOperand(a0, offsetof(Test, dold1))); ++ __ Movz(t0, a4, a6); ++ __ Movn(t1, a4, a6); ++ __ Movz(t2, a5, a6); ++ __ Movn(t3, a5, a6); ++ __ St_d(t0, MemOperand(a0, offsetof(Test, b))); ++ __ St_d(t1, MemOperand(a0, offsetof(Test, b1))); ++ __ St_w(t2, MemOperand(a0, offsetof(Test, d))); ++ __ St_w(t3, MemOperand(a0, offsetof(Test, d1))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_D[i]; ++ test.c = inputs_W[i]; ++ ++ test.rt = 1; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.b, test.bold); ++ CHECK_EQ(test.d, test.dold); ++ CHECK_EQ(test.b1, outputs_D[i]); ++ CHECK_EQ(test.d1, outputs_W[i]); ++ ++ test.rt = 0; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.b, outputs_D[i]); ++ CHECK_EQ(test.d, outputs_W[i]); ++ CHECK_EQ(test.b1, test.bold1); ++ CHECK_EQ(test.d1, test.dold1); ++ } ++} ++ ++TEST(macro_instructions1) { ++ // Test 32bit calculate instructions macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label exit, error; ++ ++ __ li(a4, 0x00000004); ++ __ li(a5, 0x00001234); ++ __ li(a6, 0x12345678); ++ __ li(a7, 0x7FFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFC)); ++ __ li(t1, static_cast(0xFFFFEDCC)); ++ __ li(t2, static_cast(0xEDCBA988)); ++ __ li(t3, static_cast(0x80000000)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ add_w(a2, a7, t1); ++ __ Add_w(a3, t1, a7); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Add_w(t4, t1, static_cast(0x7FFFFFFF)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ __ addi_w(a2, a6, 0x800); ++ __ Add_w(a3, a6, 0xFFFFF800); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ mul_w(a2, t1, a7); ++ __ Mul_w(a3, t1, a7); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Mul_w(t4, t1, static_cast(0x7FFFFFFF)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ mulh_w(a2, t1, a7); ++ __ Mulh_w(a3, t1, a7); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Mulh_w(t4, t1, static_cast(0x7FFFFFFF)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mulh_wu(a2, a4, static_cast(0xFFFFEDCC)); ++ __ Branch(&error, ne, a2, Operand(0x3)); ++ __ Mulh_wu(a3, a4, t1); ++ __ Branch(&error, ne, a3, Operand(0x3)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ div_w(a2, a7, t2); ++ __ Div_w(a3, a7, t2); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Div_w(t4, a7, static_cast(0xEDCBA988)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Div_wu(a2, a7, a5); ++ __ Branch(&error, ne, a2, Operand(0x70821)); ++ __ Div_wu(a3, t0, static_cast(0x00001234)); ++ __ Branch(&error, ne, a3, Operand(0xE1042)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mod_w(a2, a6, a5); ++ __ Branch(&error, ne, a2, Operand(0xDA8)); ++ __ Mod_w(a3, t2, static_cast(0x00001234)); ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFFFFFF258)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mod_wu(a2, a6, a5); ++ __ Branch(&error, ne, a2, Operand(0xDA8)); ++ __ Mod_wu(a3, t2, static_cast(0x00001234)); ++ __ Branch(&error, ne, a3, Operand(0xF0)); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(macro_instructions2) { ++ // Test 64bit calculate instructions macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label exit, error; ++ ++ __ li(a4, 0x17312); ++ __ li(a5, 0x1012131415161718); ++ __ li(a6, 0x51F4B764A26E7412); ++ __ li(a7, 0x7FFFFFFFFFFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); ++ __ li(t1, static_cast(0xDF6B8F35A10E205C)); ++ __ li(t2, static_cast(0x81F25A87C4236841)); ++ __ li(t3, static_cast(0x8000000000000000)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ add_d(a2, a7, t1); ++ __ Add_d(a3, t1, a7); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Add_d(t4, t1, Operand(0x7FFFFFFFFFFFFFFF)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ __ addi_d(a2, a6, 0x800); ++ __ Add_d(a3, a6, Operand(0xFFFFFFFFFFFFF800)); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mul_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0xdbe6a8729a547fb0)); ++ __ Mul_d(a3, t0, Operand(0xDF6B8F35A10E205C)); ++ __ Branch(&error, ne, a3, Operand(0x57ad69f40f870584)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mulh_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467)); ++ __ Mulh_d(a3, t0, Operand(0xDF6B8F35A10E205C)); ++ __ Branch(&error, ne, a3, Operand(0x15d)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Div_d(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Div_d(a3, t1, Operand(0x17312)); ++ __ Branch(&error, ne, a3, Operand(0xffffe985f631e6d9)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Div_du(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ Div_du(a3, t1, 0x17312); ++ __ Branch(&error, ne, a3, Operand(0x9a22ffd3973d)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mod_d(a2, a6, a4); ++ __ Branch(&error, ne, a2, Operand(0x13558)); ++ __ Mod_d(a3, t2, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(0xfffffffffffffb0a)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mod_du(a2, a6, a4); ++ __ Branch(&error, ne, a2, Operand(0x13558)); ++ __ Mod_du(a3, t2, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(0x81f25a87c4236841)); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(macro_instructions3) { ++ // Test 64bit calculate instructions macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label exit, error; ++ ++ __ li(a4, 0x17312); ++ __ li(a5, 0x1012131415161718); ++ __ li(a6, 0x51F4B764A26E7412); ++ __ li(a7, 0x7FFFFFFFFFFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); ++ __ li(t1, static_cast(0xDF6B8F35A10E205C)); ++ __ li(t2, static_cast(0x81F25A87C4236841)); ++ __ li(t3, static_cast(0x8000000000000000)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ And(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0x1310)); ++ __ And(a3, a6, Operand(0x7FFFFFFFFFFFFFFF)); ++ __ Branch(&error, ne, a3, Operand(0x51F4B764A26E7412)); ++ __ andi(a2, a6, 0xDCB); ++ __ And(a3, a6, Operand(0xDCB)); ++ __ Branch(&error, ne, a3, Operand(a2)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Or(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xfffffffffffff55f)); ++ __ Or(a3, t2, Operand(0x8000000000000000)); ++ __ Branch(&error, ne, a3, Operand(0x81f25a87c4236841)); ++ __ ori(a2, a5, 0xDCB); ++ __ Or(a3, a5, Operand(0xDCB)); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Orn(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7)); ++ __ Orn(a3, t2, Operand(0x81F25A87C4236841)); ++ __ Branch(&error, ne, a3, Operand(0xffffffffffffffff)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Xor(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x209470ca5ef1d51b)); ++ __ Xor(a3, t2, Operand(0x8000000000000000)); ++ __ Branch(&error, ne, a3, Operand(0x1f25a87c4236841)); ++ __ Xor(a2, t2, Operand(0xDCB)); ++ __ Branch(&error, ne, a2, Operand(0x81f25a87c423658a)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Nor(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0xefedecebeae888e5)); ++ __ Nor(a3, a6, Operand(0x7FFFFFFFFFFFFFFF)); ++ __ Branch(&error, ne, a3, Operand(0x8000000000000000)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Andn(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0x16002)); ++ __ Andn(a3, a6, Operand(0x7FFFFFFFFFFFFFFF)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Orn(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7)); ++ __ Orn(a3, t2, Operand(0x8000000000000000)); ++ __ Branch(&error, ne, a3, Operand(0xffffffffffffffff)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Neg(a2, a7); ++ __ Branch(&error, ne, a2, Operand(0x8000000000000001)); ++ __ Neg(a3, t0); ++ __ Branch(&error, ne, a3, Operand(0xAB9)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Slt(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ Slt(a3, a7, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ __ Slt(a3, a4, 0x800); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sle(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ Sle(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); ++ __ Sle(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sleu(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ Sleu(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); ++ __ Sleu(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0x1))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sge(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Sge(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); ++ __ Sge(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0x1))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sgeu(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Sgeu(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); ++ __ Sgeu(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sgt(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Sgt(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ __ Sgt(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0x1))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sgtu(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Sgtu(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ __ Sgtu(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(Rotr_w) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct T { ++ int32_t input; ++ int32_t result_rotr_0; ++ int32_t result_rotr_4; ++ int32_t result_rotr_8; ++ int32_t result_rotr_12; ++ int32_t result_rotr_16; ++ int32_t result_rotr_20; ++ int32_t result_rotr_24; ++ int32_t result_rotr_28; ++ int32_t result_rotr_32; ++ int32_t result_rotri_0; ++ int32_t result_rotri_4; ++ int32_t result_rotri_8; ++ int32_t result_rotri_12; ++ int32_t result_rotri_16; ++ int32_t result_rotri_20; ++ int32_t result_rotri_24; ++ int32_t result_rotri_28; ++ int32_t result_rotri_32; ++ }; ++ T t; ++ ++ __ Ld_w(a4, MemOperand(a0, offsetof(T, input))); ++ ++ __ Rotr_w(a5, a4, 0); ++ __ Rotr_w(a6, a4, 0x04); ++ __ Rotr_w(a7, a4, 0x08); ++ __ Rotr_w(t0, a4, 0x0C); ++ __ Rotr_w(t1, a4, 0x10); ++ __ Rotr_w(t2, a4, -0x0C); ++ __ Rotr_w(t3, a4, -0x08); ++ __ Rotr_w(t4, a4, -0x04); ++ __ Rotr_w(t5, a4, 0x20); ++ __ St_w(a5, MemOperand(a0, offsetof(T, result_rotr_0))); ++ __ St_w(a6, MemOperand(a0, offsetof(T, result_rotr_4))); ++ __ St_w(a7, MemOperand(a0, offsetof(T, result_rotr_8))); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotr_12))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotr_16))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotr_20))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotr_24))); ++ __ St_w(t4, MemOperand(a0, offsetof(T, result_rotr_28))); ++ __ St_w(t5, MemOperand(a0, offsetof(T, result_rotr_32))); ++ ++ __ li(t5, 0); ++ __ Rotr_w(a5, a4, t5); ++ __ li(t5, 0x04); ++ __ Rotr_w(a6, a4, t5); ++ __ li(t5, 0x08); ++ __ Rotr_w(a7, a4, t5); ++ __ li(t5, 0x0C); ++ __ Rotr_w(t0, a4, t5); ++ __ li(t5, 0x10); ++ __ Rotr_w(t1, a4, t5); ++ __ li(t5, -0x0C); ++ __ Rotr_w(t2, a4, t5); ++ __ li(t5, -0x08); ++ __ Rotr_w(t3, a4, t5); ++ __ li(t5, -0x04); ++ __ Rotr_w(t4, a4, t5); ++ __ li(t5, 0x20); ++ __ Rotr_w(t5, a4, t5); ++ ++ __ St_w(a5, MemOperand(a0, offsetof(T, result_rotri_0))); ++ __ St_w(a6, MemOperand(a0, offsetof(T, result_rotri_4))); ++ __ St_w(a7, MemOperand(a0, offsetof(T, result_rotri_8))); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotri_12))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotri_16))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotri_20))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotri_24))); ++ __ St_w(t4, MemOperand(a0, offsetof(T, result_rotri_28))); ++ __ St_w(t5, MemOperand(a0, offsetof(T, result_rotri_32))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.input = 0x12345678; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_rotr_0); ++ CHECK_EQ(static_cast(0x81234567), t.result_rotr_4); ++ CHECK_EQ(static_cast(0x78123456), t.result_rotr_8); ++ CHECK_EQ(static_cast(0x67812345), t.result_rotr_12); ++ CHECK_EQ(static_cast(0x56781234), t.result_rotr_16); ++ CHECK_EQ(static_cast(0x45678123), t.result_rotr_20); ++ CHECK_EQ(static_cast(0x34567812), t.result_rotr_24); ++ CHECK_EQ(static_cast(0x23456781), t.result_rotr_28); ++ CHECK_EQ(static_cast(0x12345678), t.result_rotr_32); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_rotri_0); ++ CHECK_EQ(static_cast(0x81234567), t.result_rotri_4); ++ CHECK_EQ(static_cast(0x78123456), t.result_rotri_8); ++ CHECK_EQ(static_cast(0x67812345), t.result_rotri_12); ++ CHECK_EQ(static_cast(0x56781234), t.result_rotri_16); ++ CHECK_EQ(static_cast(0x45678123), t.result_rotri_20); ++ CHECK_EQ(static_cast(0x34567812), t.result_rotri_24); ++ CHECK_EQ(static_cast(0x23456781), t.result_rotri_28); ++ CHECK_EQ(static_cast(0x12345678), t.result_rotri_32); ++} ++ ++TEST(Rotr_d) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct T { ++ int64_t input; ++ int64_t result_rotr_0; ++ int64_t result_rotr_8; ++ int64_t result_rotr_16; ++ int64_t result_rotr_24; ++ int64_t result_rotr_32; ++ int64_t result_rotr_40; ++ int64_t result_rotr_48; ++ int64_t result_rotr_56; ++ int64_t result_rotr_64; ++ int64_t result_rotri_0; ++ int64_t result_rotri_8; ++ int64_t result_rotri_16; ++ int64_t result_rotri_24; ++ int64_t result_rotri_32; ++ int64_t result_rotri_40; ++ int64_t result_rotri_48; ++ int64_t result_rotri_56; ++ int64_t result_rotri_64; ++ }; ++ T t; ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, input))); ++ ++ __ Rotr_d(a5, a4, 0); ++ __ Rotr_d(a6, a4, 0x08); ++ __ Rotr_d(a7, a4, 0x10); ++ __ Rotr_d(t0, a4, 0x18); ++ __ Rotr_d(t1, a4, 0x20); ++ __ Rotr_d(t2, a4, -0x18); ++ __ Rotr_d(t3, a4, -0x10); ++ __ Rotr_d(t4, a4, -0x08); ++ __ Rotr_d(t5, a4, 0x40); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_rotr_0))); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_rotr_8))); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_rotr_16))); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotr_24))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotr_32))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotr_40))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotr_48))); ++ __ St_d(t4, MemOperand(a0, offsetof(T, result_rotr_56))); ++ __ St_d(t5, MemOperand(a0, offsetof(T, result_rotr_64))); ++ ++ __ li(t5, 0); ++ __ Rotr_d(a5, a4, t5); ++ __ li(t5, 0x08); ++ __ Rotr_d(a6, a4, t5); ++ __ li(t5, 0x10); ++ __ Rotr_d(a7, a4, t5); ++ __ li(t5, 0x18); ++ __ Rotr_d(t0, a4, t5); ++ __ li(t5, 0x20); ++ __ Rotr_d(t1, a4, t5); ++ __ li(t5, -0x18); ++ __ Rotr_d(t2, a4, t5); ++ __ li(t5, -0x10); ++ __ Rotr_d(t3, a4, t5); ++ __ li(t5, -0x08); ++ __ Rotr_d(t4, a4, t5); ++ __ li(t5, 0x40); ++ __ Rotr_d(t5, a4, t5); ++ ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_rotri_0))); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_rotri_8))); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_rotri_16))); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotri_24))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotri_32))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotri_40))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotri_48))); ++ __ St_d(t4, MemOperand(a0, offsetof(T, result_rotri_56))); ++ __ St_d(t5, MemOperand(a0, offsetof(T, result_rotri_64))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.input = 0x0123456789ABCDEF; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotr_0); ++ CHECK_EQ(static_cast(0xEF0123456789ABCD), t.result_rotr_8); ++ CHECK_EQ(static_cast(0xCDEF0123456789AB), t.result_rotr_16); ++ CHECK_EQ(static_cast(0xABCDEF0123456789), t.result_rotr_24); ++ CHECK_EQ(static_cast(0x89ABCDEF01234567), t.result_rotr_32); ++ CHECK_EQ(static_cast(0x6789ABCDEF012345), t.result_rotr_40); ++ CHECK_EQ(static_cast(0x456789ABCDEF0123), t.result_rotr_48); ++ CHECK_EQ(static_cast(0x23456789ABCDEF01), t.result_rotr_56); ++ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotr_64); ++ ++ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotri_0); ++ CHECK_EQ(static_cast(0xEF0123456789ABCD), t.result_rotri_8); ++ CHECK_EQ(static_cast(0xCDEF0123456789AB), t.result_rotri_16); ++ CHECK_EQ(static_cast(0xABCDEF0123456789), t.result_rotri_24); ++ CHECK_EQ(static_cast(0x89ABCDEF01234567), t.result_rotri_32); ++ CHECK_EQ(static_cast(0x6789ABCDEF012345), t.result_rotri_40); ++ CHECK_EQ(static_cast(0x456789ABCDEF0123), t.result_rotri_48); ++ CHECK_EQ(static_cast(0x23456789ABCDEF01), t.result_rotri_56); ++ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotri_64); ++} ++ ++TEST(macro_instructions4) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct T { ++ double a; ++ float b; ++ double result_floor_a; ++ float result_floor_b; ++ double result_ceil_a; ++ float result_ceil_b; ++ double result_trunc_a; ++ float result_trunc_b; ++ double result_round_a; ++ float result_round_b; ++ }; ++ T t; ++ ++ const int kTableLength = 16; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, ++ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, ++ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307, ++ std::numeric_limits::max() - 0.1, ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, ++ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, ++ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37, ++ std::numeric_limits::lowest() + 0.6, ++ std::numeric_limits::infinity() ++ }; ++ float outputs_round_s[kTableLength] = { ++ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, ++ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, ++ 1.7976931348623157E+38, 0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_round_d[kTableLength] = { ++ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, ++ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, ++ 1.7976931348623157E+308, 0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ float outputs_trunc_s[kTableLength] = { ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 1.7976931348623157E+38, 0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_trunc_d[kTableLength] = { ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 1.7976931348623157E+308, 0, ++ std::numeric_limits::max() - 1, ++ std::numeric_limits::infinity() ++ }; ++ float outputs_ceil_s[kTableLength] = { ++ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 1.7976931348623157E38, 1, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_ceil_d[kTableLength] = { ++ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 1.7976931348623157E308, 1, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ float outputs_floor_s[kTableLength] = { ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, ++ 1.7976931348623157E38, 0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_floor_d[kTableLength] = { ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, ++ 1.7976931348623157E308, 0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(T, b))); ++ __ Floor_d(f10, f8); ++ __ Floor_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_floor_a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_floor_b))); ++ __ Ceil_d(f10, f8); ++ __ Ceil_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_ceil_a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_ceil_b))); ++ __ Trunc_d(f10, f8); ++ __ Trunc_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_trunc_a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_trunc_b))); ++ __ Round_d(f10, f8); ++ __ Round_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_round_a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_round_b))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ t.a = inputs_d[i]; ++ t.b = inputs_s[i]; ++ f.Call(&t, 0, 0, 0, 0); ++ CHECK_EQ(t.result_floor_a, outputs_floor_d[i]); ++ CHECK_EQ(t.result_floor_b, outputs_floor_s[i]); ++ CHECK_EQ(t.result_ceil_a, outputs_ceil_d[i]); ++ CHECK_EQ(t.result_ceil_b, outputs_ceil_s[i]); ++ CHECK_EQ(t.result_trunc_a, outputs_trunc_d[i]); ++ CHECK_EQ(t.result_trunc_b, outputs_trunc_s[i]); ++ CHECK_EQ(t.result_round_a, outputs_round_d[i]); ++ CHECK_EQ(t.result_round_b, outputs_round_s[i]); ++ } ++} ++ ++uint64_t run_ExtractBits(uint64_t source, int pos, int size, bool sign_extend) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ if (sign_extend) { ++ __ ExtractBits(t0, a0, a1, size, true); ++ } else { ++ __ ExtractBits(t0, a0, a1, size); ++ } ++ __ or_(a0, t0, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(source, pos, 0, 0, 0)); ++ return res; ++} ++ ++TEST(ExtractBits) { ++ CcTest::InitializeVM(); ++ ++ struct TestCase { ++ uint64_t source; ++ int pos; ++ int size; ++ bool sign_extend; ++ uint64_t res; ++ }; ++ ++ // clang-format off ++ struct TestCase tc[] = { ++ //source, pos, size, sign_extend, res; ++ {0x800, 4, 8, false, 0x80}, ++ {0x800, 4, 8, true, 0xFFFFFFFFFFFFFF80}, ++ {0x800, 5, 8, true, 0x40}, ++ {0x40000, 3, 16, false, 0x8000}, ++ {0x40000, 3, 16, true, 0xFFFFFFFFFFFF8000}, ++ {0x40000, 4, 16, true, 0x4000}, ++ {0x200000000, 2, 32, false, 0x80000000}, ++ {0x200000000, 2, 32, true, 0xFFFFFFFF80000000}, ++ {0x200000000, 3, 32, true, 0x40000000}, ++ }; ++ // clang-format on ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t result = ++ run_ExtractBits(tc[i].source, tc[i].pos, tc[i].size, tc[i].sign_extend); ++ CHECK_EQ(tc[i].res, result); ++ } ++} ++ ++uint64_t run_InsertBits(uint64_t dest, uint64_t source, int pos, int size) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ InsertBits(a0, a1, a2, size); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(dest, source, pos, 0, 0)); ++ return res; ++} ++ ++TEST(InsertBits) { ++ CcTest::InitializeVM(); ++ ++ struct TestCase { ++ uint64_t dest; ++ uint64_t source; ++ int pos; ++ int size; ++ uint64_t res; ++ }; ++ ++ // clang-format off ++ struct TestCase tc[] = { ++ //dest source, pos, size, res; ++ {0x11111111, 0x1234, 32, 16, 0x123411111111}, ++ {0x111111111111, 0xFFFFF, 24, 10, 0x1113FF111111}, ++ {0x1111111111111111, 0xFEDCBA, 16, 4, 0x11111111111A1111}, ++ }; ++ // clang-format on ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t result = ++ run_InsertBits(tc[i].dest, tc[i].source, tc[i].pos, tc[i].size); ++ CHECK_EQ(tc[i].res, result); ++ } ++} ++ ++TEST(Popcnt) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct TestCase { ++ uint32_t a; ++ uint64_t b; ++ int expected_a; ++ int expected_b; ++ int result_a; ++ int result_b; ++ }; ++ // clang-format off ++ struct TestCase tc[] = { ++ { 0x12345678, 0x1122334455667788, 13, 26, 0, 0}, ++ { 0x1234, 0x123456, 5, 9, 0, 0}, ++ { 0xFFF00000, 0xFFFF000000000000, 12, 16, 0, 0}, ++ { 0xFF000012, 0xFFFF000000001234, 10, 21, 0, 0} ++ }; ++ // clang-format on ++ ++ __ Ld_w(t0, MemOperand(a0, offsetof(TestCase, a))); ++ __ Ld_d(t1, MemOperand(a0, offsetof(TestCase, b))); ++ __ Popcnt_w(t2, t0); ++ __ Popcnt_d(t3, t1); ++ __ St_w(t2, MemOperand(a0, offsetof(TestCase, result_a))); ++ __ St_w(t3, MemOperand(a0, offsetof(TestCase, result_b))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ f.Call(&tc[i], 0, 0, 0, 0); ++ CHECK_EQ(tc[i].expected_a, tc[i].result_a); ++ CHECK_EQ(tc[i].expected_b, tc[i].result_b); ++ } ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 +diff --git a/src/3rdparty/chromium/v8/tools/dev/gm.py b/src/3rdparty/chromium/v8/tools/dev/gm.py +index 9d5cbf056a..0363af7148 100755 +--- a/src/3rdparty/chromium/v8/tools/dev/gm.py ++++ b/src/3rdparty/chromium/v8/tools/dev/gm.py +@@ -39,7 +39,7 @@ BUILD_TARGETS_ALL = ["all"] + + # All arches that this script understands. + ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64", +- "s390", "s390x", "android_arm", "android_arm64"] ++ "s390", "s390x", "android_arm", "android_arm64", "la64"] + # Arches that get built/run when you don't specify any. + DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"] + # Modes that this script understands. +@@ -246,7 +246,7 @@ class Config(object): + if self.arch == "android_arm": return "\nv8_target_cpu = \"arm\"" + if self.arch == "android_arm64": return "\nv8_target_cpu = \"arm64\"" + if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64", +- "s390", "s390x"): ++ "s390", "s390x", "la64"): + return "\nv8_target_cpu = \"%s\"" % self.arch + return "" + +-- +2.25.1 + diff --git a/0013-make-qtwebengine-can-be-compiled-for-loongarch64.patch b/0013-make-qtwebengine-can-be-compiled-for-loongarch64.patch new file mode 100644 index 0000000..8312cf9 --- /dev/null +++ b/0013-make-qtwebengine-can-be-compiled-for-loongarch64.patch @@ -0,0 +1,193 @@ +From d58954b4d1e1d1c9fcd7696ce033b045d2f532ab Mon Sep 17 00:00:00 2001 +From: CaoHuixiong +Date: Wed, 13 Jan 2021 11:55:03 +0000 +Subject: [PATCH 13/13] make qtwebengine can be compiled for loongarch64 + +--- + configure.pri | 3 +- + mkspecs/features/functions.prf | 1 + + src/3rdparty/chromium/DEPS | 28 +++++++++---------- + .../swiftshader/src/Reactor/BUILD.gn | 4 +-- + src/3rdparty/gn/tools/gn/args.cc | 3 ++ + src/3rdparty/gn/tools/gn/variables.cc | 1 + + src/3rdparty/gn/util/build_config.h | 12 ++++++++ + src/buildtools/config/linux.pri | 5 ++++ + 8 files changed, 40 insertions(+), 17 deletions(-) + +diff --git a/configure.pri b/configure.pri +index d3ba9b147..ded9bdbf0 100644 +--- a/configure.pri ++++ b/configure.pri +@@ -142,7 +142,8 @@ defineTest(qtConfTest_detectPlatform) { + defineTest(qtConfTest_detectArch) { + contains(QT_ARCH, "i386")|contains(QT_ARCH, "x86_64"): return(true) + contains(QT_ARCH, "arm")|contains(QT_ARCH, "arm64"): return(true) +- contains(QT_ARCH, "mips"): return(true) ++ contains(QT_ARCH, "mips")|contains(QT_ARCH, "mips64"): return(true) ++ contains(QT_ARCH, "loongarch")|contains(QT_ARCH, "loongarch64"): return(true) + qtLog("Architecture not supported.") + return(false) + } +diff --git a/mkspecs/features/functions.prf b/mkspecs/features/functions.prf +index d3ceb4c5e..674b97fa4 100644 +--- a/mkspecs/features/functions.prf ++++ b/mkspecs/features/functions.prf +@@ -106,6 +106,7 @@ defineReplace(gnArch) { + contains(qtArch, "arm64"): return(arm64) + contains(qtArch, "mips"): return(mipsel) + contains(qtArch, "mips64"): return(mips64el) ++ contains(qtArch, "loongarch64"): return(la64) + return(unknown) + } + +diff --git a/src/3rdparty/chromium/DEPS b/src/3rdparty/chromium/DEPS +index 517526264..09d10d143 100644 +--- a/src/3rdparty/chromium/DEPS ++++ b/src/3rdparty/chromium/DEPS +@@ -85,7 +85,7 @@ vars = { + + # Check out and download nacl by default. This can be disabled e.g. with + # custom_vars. +- 'checkout_nacl': False, ++ 'checkout_nacl': true, + + # By default, do not check out src-internal. This can be overridden e.g. with + # custom_vars. +@@ -3582,19 +3582,19 @@ hooks = [ + 'src/tools', + ], + }, +-# { +-# # Verify that we have the right GN binary and force-install it if we +-# # don't, in order to work around crbug.com/944367. +-# # TODO(crbug.com/944667) Get rid of this when cipd is ensuring we +-# # have the right binary more carefully and we no longer need this. +-# 'name': 'ensure_gn_version', +-# 'pattern': '.', +-# 'action': [ +-# 'python', +-# 'src/buildtools/ensure_gn_version.py', +-# Var('gn_version') +-# ], +-# }, ++ { ++ # Verify that we have the right GN binary and force-install it if we ++ # don't, in order to work around crbug.com/944367. ++ # TODO(crbug.com/944667) Get rid of this when cipd is ensuring we ++ # have the right binary more carefully and we no longer need this. ++ 'name': 'ensure_gn_version', ++ 'pattern': '.', ++ 'action': [ ++ 'python', ++ 'src/buildtools/ensure_gn_version.py', ++ Var('gn_version') ++ ], ++ }, + { + # This downloads binaries for Native Client's newlib toolchain. + # Done in lieu of building the toolchain from scratch as it can take +diff --git a/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn b/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn +index 7d0e33cc2..403d53aaa 100644 +--- a/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn ++++ b/src/3rdparty/chromium/third_party/swiftshader/src/Reactor/BUILD.gn +@@ -18,8 +18,8 @@ declare_args() { + # Subzero produces smaller binaries, but doesn't support ARM64, MIPS64, and + # PPC64. + use_swiftshader_with_subzero = +- current_cpu != "arm64" && current_cpu != "mips64el" && current_cpu != "ppc64" && current_cpu != "la64" +- supports_llvm = is_linux || is_fuchsia || is_win || is_android || is_mac ++ current_cpu != "arm64" && current_cpu != "mips64el" && current_cpu != "ppc64" ++ supports_llvm = (is_linux || is_fuchsia || is_win || is_android || is_mac) && current_cpu != "la64" + } + + config("swiftshader_reactor_private_config") { +diff --git a/src/3rdparty/gn/tools/gn/args.cc b/src/3rdparty/gn/tools/gn/args.cc +index 802c3731d..748f1ff3e 100644 +--- a/src/3rdparty/gn/tools/gn/args.cc ++++ b/src/3rdparty/gn/tools/gn/args.cc +@@ -327,6 +327,7 @@ void Args::SetSystemVarsLocked(Scope* dest) const { + static const char kArm64[] = "arm64"; + static const char kMips[] = "mipsel"; + static const char kMips64[] = "mips64el"; ++ static const char kLa64[] = "la64"; + static const char kS390X[] = "s390x"; + static const char kPPC64[] = "ppc64"; + const char* arch = nullptr; +@@ -346,6 +347,8 @@ void Args::SetSystemVarsLocked(Scope* dest) const { + arch = kMips; + else if (os_arch == "mips64") + arch = kMips64; ++ else if (os_arch == "loongarch64") ++ arch = kLa64; + else if (os_arch == "s390x") + arch = kS390X; + else if (os_arch == "ppc64" || os_arch == "ppc64le") +diff --git a/src/3rdparty/gn/tools/gn/variables.cc b/src/3rdparty/gn/tools/gn/variables.cc +index ff6d45cb6..771d7b04c 100644 +--- a/src/3rdparty/gn/tools/gn/variables.cc ++++ b/src/3rdparty/gn/tools/gn/variables.cc +@@ -111,6 +111,7 @@ Possible values + - "arm" + - "arm64" + - "mipsel" ++ - "la64" + )"; + + const char kTargetName[] = "target_name"; +diff --git a/src/3rdparty/gn/util/build_config.h b/src/3rdparty/gn/util/build_config.h +index addd7cfb0..14c0dab42 100644 +--- a/src/3rdparty/gn/util/build_config.h ++++ b/src/3rdparty/gn/util/build_config.h +@@ -172,6 +172,18 @@ + #define ARCH_CPU_32_BITS 1 + #define ARCH_CPU_BIG_ENDIAN 1 + #endif ++#elif defined(__loongarch__) ++#if defined(__LP64__) ++#define ARCH_CPU_LOONGARCH_FAMILY 1 ++#define ARCH_CPU_LA64 1 ++#define ARCH_CPU_64_BITS 1 ++#define ARCH_CPU_LITTLE_ENDIAN 1 ++#else ++#define ARCH_CPU_LOONGARCH_FAMILY 1 ++#define ARCH_CPU_LA 1 ++#define ARCH_CPU_32_BITS 1 ++#define ARCH_CPU_LITTLE_ENDIAN 1 ++#endif + #else + #error Please add support for your architecture in build_config.h + #endif +diff --git a/src/buildtools/config/linux.pri b/src/buildtools/config/linux.pri +index 56c18bdb5..d48ceec2b 100644 +--- a/src/buildtools/config/linux.pri ++++ b/src/buildtools/config/linux.pri +@@ -116,18 +116,26 @@ contains(QT_ARCH, "mips") { + equals(MARCH, "mips32r6"): gn_args += mips_arch_variant=\"r6\" + else: equals(MARCH, "mips32r2"): gn_args += mips_arch_variant=\"r2\" + else: equals(MARCH, "mips32"): gn_args += mips_arch_variant=\"r1\" ++ else: gn_args += mips_arch_variant=\"loongson3a\" + } else { + contains(QMAKE_CFLAGS, "mips32r6"): gn_args += mips_arch_variant=\"r6\" + else: contains(QMAKE_CFLAGS, "mips32r2"): gn_args += mips_arch_variant=\"r2\" + else: contains(QMAKE_CFLAGS, "mips32"): gn_args += mips_arch_variant=\"r1\" ++ else: gn_args += mips_arch_variant=\"loongson3a\" + } + + contains(QMAKE_CFLAGS, "-mmsa"): gn_args += mips_use_msa=true + + contains(QMAKE_CFLAGS, "-mdsp2"): gn_args += mips_dsp_rev=2 + else: contains(QMAKE_CFLAGS, "-mdsp"): gn_args += mips_dsp_rev=1 ++ DEFINES += _MIPS_ARCH_LOONGSON3A + } + ++contains(QT_ARCH, "loongarch64") { ++ DEFINES += ARCH_CPU_LA64 ++ gn_args += debug_devtools=false ++} ++ + host_build { + gn_args += custom_toolchain=\"$$QTWEBENGINE_OUT_ROOT/src/toolchain:host\" + GN_HOST_CPU = $$gnArch($$QT_ARCH) +-- +2.20.1 + diff --git a/0014-fix-compile-errors-for-mips64el.patch b/0014-fix-compile-errors-for-mips64el.patch new file mode 100644 index 0000000..e3bb59e --- /dev/null +++ b/0014-fix-compile-errors-for-mips64el.patch @@ -0,0 +1,41 @@ +diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc +index 88de7ae..66b7f57 100644 +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc +@@ -267,7 +267,7 @@ bool LinuxPtraceDumper::GetThreadInfoByIndex(size_t index, ThreadInfo* info) { + } + #endif + +-#if defined(__mips__) ++#if defined(__mips__) && !defined(__mips64) + sys_ptrace(PTRACE_PEEKUSER, tid, + reinterpret_cast(PC), &info->mcontext.pc); + sys_ptrace(PTRACE_PEEKUSER, tid, +diff --git a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +index 1c896b5..d2fc2cb 100644 +--- a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h ++++ b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +@@ -37,7 +37,11 @@ + + #elif defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS) + ++#if defined(_MIPS_ARCH_LOONGSON3A) ++#include ++#else + #include // for __NR_O32_Linux and __NR_O32_Linux_syscalls ++#endif + #define MIN_SYSCALL __NR_O32_Linux + #define MAX_PUBLIC_SYSCALL (MIN_SYSCALL + __NR_O32_Linux_syscalls) + #define MAX_SYSCALL MAX_PUBLIC_SYSCALL +@@ -51,7 +55,11 @@ + + #elif (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) + ++#if defined(_MIPS_ARCH_LOONGSON3A) ++#include ++#else + #include // for __NR_64_Linux and __NR_64_Linux_syscalls ++#endif + #define MIN_SYSCALL __NR_64_Linux + #define MAX_PUBLIC_SYSCALL (MIN_SYSCALL + __NR_64_Linux_syscalls) + #define MAX_SYSCALL MAX_PUBLIC_SYSCALL diff --git a/0015-fix-compiler-internal-error-for-loongarch64.patch b/0015-fix-compiler-internal-error-for-loongarch64.patch new file mode 100644 index 0000000..a3300ed --- /dev/null +++ b/0015-fix-compiler-internal-error-for-loongarch64.patch @@ -0,0 +1,25 @@ +From 7608c4395dd82a74ec833f43927f9fd442c19b4f Mon Sep 17 00:00:00 2001 +From: loongson +Date: Fri, 14 May 2021 16:37:20 +0800 +Subject: [PATCH] fix compiler internal error for loongarch64 + +--- + .../third_party/skia/third_party/skcms/src/Transform_inl.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/3rdparty/chromium/third_party/skia/third_party/skcms/src/Transform_inl.h b/src/3rdparty/chromium/third_party/skia/third_party/skcms/src/Transform_inl.h +index c4b3122..80b3a28 100644 +--- a/src/3rdparty/chromium/third_party/skia/third_party/skcms/src/Transform_inl.h ++++ b/src/3rdparty/chromium/third_party/skia/third_party/skcms/src/Transform_inl.h +@@ -685,7 +685,7 @@ SI void sample_clut_16(const skcms_A2B* a2b, I32 ix, F* r, F* g, F* b) { + // GCC 7.2.0 hits an internal compiler error with -finline-functions (or -O3) + // when targeting MIPS 64, i386, or s390x, I think attempting to inline clut() into exec_ops(). + #if 1 && defined(__GNUC__) && !defined(__clang__) \ +- && (defined(__mips64) || defined(__i386) || defined(__s390x__)) ++ && (defined(__mips64) || defined(__loongarch64) || defined(__i386) || defined(__s390x__)) + #define MAYBE_NOINLINE __attribute__((noinline)) + #else + #define MAYBE_NOINLINE +-- +2.20.1 + diff --git a/0016-fix-compile-error-for-loongarch64.patch b/0016-fix-compile-error-for-loongarch64.patch new file mode 100644 index 0000000..400a0bf --- /dev/null +++ b/0016-fix-compile-error-for-loongarch64.patch @@ -0,0 +1,54 @@ +Description: + TODO: Put a short summary on the line above and replace this paragraph + with a longer explanation of this change. Complete the meta-information + with other relevant fields (see below for details). To make it easier, the + information below has been extracted from the changelog. Adjust it or drop + it. + . + qtwebengine-opensource-src (5.15.2+dfsg-lnd.3) unstable; urgency=medium + . + * Fix some error because update tools. +Author: Jianjun Han + +--- +The information above should follow the Patch Tagging Guidelines, please +checkout http://dep.debian.net/deps/dep3/ to learn about the format. Here +are templates for supplementary fields that you might want to add: + +Origin: , +Bug: +Bug-Debian: https://bugs.debian.org/ +Bug-Ubuntu: https://launchpad.net/bugs/ +Forwarded: +Reviewed-By: +Last-Update: 2022-02-28 + +--- qtwebengine-opensource-src-5.15.2+dfsg.orig/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h ++++ qtwebengine-opensource-src-5.15.2+dfsg/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h +@@ -2826,7 +2826,7 @@ struct kernel_statfs { + #define LSS_BODY(type,name,args...) \ + register int64_t __res_a0 __asm__("a0"); \ + int64_t __res; \ +- __asm__ __volatile__ ("li $a7, %1\n" \ ++ __asm__ __volatile__ ("li.w $a7, %1\n" \ + "syscall 0x0\n" \ + : "=r"(__res_a0) \ + : "i"(__NR_##name) , ## args \ +@@ -2906,7 +2906,7 @@ struct kernel_statfs { + * %a3 = newtls, + * %a4 = child_tidptr) + */ +- "li a7, %8\n" ++ "li.w a7, %8\n" + "syscall 0x0\n" + + /* if (%a0 != 0) +@@ -2923,7 +2923,7 @@ struct kernel_statfs { + + /* Call _exit(%a0). + */ +- "li $a7, %9\n" ++ "li.w $a7, %9\n" + "syscall 0x0\n" + "1:\n" + : "=r" (__res) diff --git a/mipsel-code-range-size.patch b/mipsel-code-range-size.patch new file mode 100644 index 0000000..ab8fb5c --- /dev/null +++ b/mipsel-code-range-size.patch @@ -0,0 +1,19 @@ +Description: reduce code range size on mipsel + This fixes OOM error when running mksnapshot. +Author: Dmitry Shachnev +Forwarded: no +Last-Update: 2020-09-28 + +Index: qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/v8/src/common/globals.h +=================================================================== +--- qtwebengine-everywhere-src-5.15.2.orig/src/3rdparty/chromium/v8/src/common/globals.h ++++ qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/v8/src/common/globals.h +@@ -224,7 +224,7 @@ constexpr size_t kMinimumCodeRangeSize = + constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux + #elif V8_TARGET_ARCH_MIPS + constexpr bool kPlatformRequiresCodeRange = false; +-constexpr size_t kMaximalCodeRangeSize = 2048LL * MB; ++constexpr size_t kMaximalCodeRangeSize = 1024LL * MB; + constexpr size_t kMinimumCodeRangeSize = 0 * MB; + constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page. + #else diff --git a/mipsel-link-atomic.patch b/mipsel-link-atomic.patch new file mode 100644 index 0000000..e2af7e0 --- /dev/null +++ b/mipsel-link-atomic.patch @@ -0,0 +1,26 @@ +Description: add -latomic to dependencies of base component on mipsel + This is needed to fix build of qwebengine_convert_dict, which uses the + generated convert_dict.pri to get the list of libraries. + . + qmake adds its own -latomic, but it comes before the list of static + libraries, so it does not help. We need -latomic after that list. + . + The error was: + . + /usr/bin/ld.bfd: <>/src/core/release/obj/base/base/base_jumbo_17.o: undefined reference to symbol '__atomic_load_8@@LIBATOMIC_1.0' + /usr/bin/ld.bfd: /usr/lib/gcc/mipsel-linux-gnu/9/libatomic.so: error adding symbols: DSO missing from command line +Author: Dmitry Shachnev +Forwarded: no +Last-Update: 2020-03-20 + +--- a/src/3rdparty/chromium/base/BUILD.gn ++++ b/src/3rdparty/chromium/base/BUILD.gn +@@ -1325,7 +1325,7 @@ jumbo_component("base") { + # Needed for if using newer C++ library than sysroot, except if + # building inside the cros_sdk environment - use host_toolchain as a + # more robust check for this. +- if (!use_sysroot && (is_android || (is_linux && !is_chromecast && !use_qt)) && ++ if (!use_sysroot && (is_android || (is_linux && !is_chromecast && !use_qt) || current_cpu == "mipsel") && + host_toolchain != "//build/toolchain/cros:host") { + libs += [ "atomic" ] + } diff --git a/mipsel-linux-5.patch b/mipsel-linux-5.patch new file mode 100644 index 0000000..f362453 --- /dev/null +++ b/mipsel-linux-5.patch @@ -0,0 +1,43 @@ +Description: fix mipsel build with Linux ≥ 5.0 + Linux 5.0 switched to generated system call table files, which needs some + changes in Chromium code. See this commit: + https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=99bf73ebf9c4193d +Author: Dmitry Shachnev +Forwarded: not-yet +Last-Update: 2020-03-02 + +--- a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h ++++ b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +@@ -37,14 +37,14 @@ + + #elif defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS) + +-#include // for __NR_O32_Linux and __NR_Linux_syscalls ++#include // for __NR_O32_Linux and __NR_O32_Linux_syscalls + #define MIN_SYSCALL __NR_O32_Linux +-#define MAX_PUBLIC_SYSCALL (MIN_SYSCALL + __NR_Linux_syscalls) ++#define MAX_PUBLIC_SYSCALL (MIN_SYSCALL + __NR_O32_Linux_syscalls) + #define MAX_SYSCALL MAX_PUBLIC_SYSCALL + + #elif defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS) + +-#include // for __NR_64_Linux and __NR_64_Linux_syscalls ++#include // for __NR_64_Linux and __NR_64_Linux_syscalls + #define MIN_SYSCALL __NR_64_Linux + #define MAX_PUBLIC_SYSCALL (MIN_SYSCALL + __NR_64_Linux_syscalls) + #define MAX_SYSCALL MAX_PUBLIC_SYSCALL +--- a/src/3rdparty/chromium/sandbox/linux/bpf_dsl/syscall_set.cc ++++ b/src/3rdparty/chromium/sandbox/linux/bpf_dsl/syscall_set.cc +@@ -17,10 +17,10 @@ namespace { + + #if defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS) + // This is true for Mips O32 ABI. +-static_assert(MIN_SYSCALL == __NR_Linux, "min syscall number should be 4000"); ++static_assert(MIN_SYSCALL == __NR_O32_Linux, "min syscall number should be 4000"); + #elif defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS) + // This is true for MIPS N64 ABI. +-static_assert(MIN_SYSCALL == __NR_Linux, "min syscall number should be 5000"); ++static_assert(MIN_SYSCALL == __NR_64_Linux, "min syscall number should be 5000"); + #else + // This true for supported architectures (Intel and ARM EABI). + static_assert(MIN_SYSCALL == 0u, diff --git a/mipsel-no-dav1d.patch b/mipsel-no-dav1d.patch new file mode 100644 index 0000000..4cacbde --- /dev/null +++ b/mipsel-no-dav1d.patch @@ -0,0 +1,17 @@ +Description: disable dav1d support on mipsel + dav1d does not support MIPS, and there is no config directory for it. +Author: Dmitry Shachnev +Forwarded: no +Last-Update: 2020-03-17 + +--- a/src/3rdparty/chromium/media/media_options.gni ++++ b/src/3rdparty/chromium/media/media_options.gni +@@ -93,7 +93,7 @@ declare_args() { + # are combined and we could override more logging than expected. + enable_logging_override = !use_jumbo_build && is_chromecast + +- enable_dav1d_decoder = !is_android && !is_ios ++ enable_dav1d_decoder = !is_android && !is_ios && target_cpu != "mips64el" && target_cpu != "mipsel" + + # Enable browser managed persistent metadata storage for EME persistent + # session and persistent usage record session. diff --git a/mipsel-ptrace-include.patch b/mipsel-ptrace-include.patch new file mode 100644 index 0000000..7aab167 --- /dev/null +++ b/mipsel-ptrace-include.patch @@ -0,0 +1,20 @@ +Description: on mipsel PTRACE_GET_THREAD_AREA is defined in asm/ptrace.h +Author: Dmitry Shachnev +Forwarded: no +Last-Update: 2020-03-17 + +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc +@@ -41,8 +41,12 @@ + // the one in Ubuntu 16.04 LTS) is missing PTRACE_GET_THREAD_AREA. + // asm/ptrace-abi.h doesn't exist on arm32 and PTRACE_GET_THREAD_AREA isn't + // defined on aarch64, so don't try to include this on those platforms. ++#if defined(__mips__) ++#include ++#else + #include + #endif ++#endif + #endif // !OS_NACL_NONSFI + + #if defined(OS_ANDROID) diff --git a/qt5-qtwebengine.spec b/qt5-qtwebengine.spec index ffede36..421381a 100644 --- a/qt5-qtwebengine.spec +++ b/qt5-qtwebengine.spec @@ -1,5 +1,4 @@ -%define anolis_release .0.2 -Excludearch:loongarch64 +%define anolis_release .0.3 %global qt_module qtwebengine %global _hardened_build 1 @@ -105,12 +104,44 @@ Patch26: qtwebengine-everywhere-5.13.2-use-python2.patch Patch27: qtwebengine-gcc11.patch Patch100: 0001-add-pagesize-64k-support.patch ## Upstream patches: - -%if 0%{?fedora} || 0%{?epel} > 7 +Patch900:run-unbundling-script.patch +Patch901:system-lcms2.patch +Patch902:system-nspr-prtime.patch +Patch903:system-icu-utf.patch +Patch904:verbose-gn-bootstrap.patch +Patch905:mipsel-linux-5.patch +Patch906:mipsel-ptrace-include.patch +Patch907:mipsel-no-dav1d.patch +Patch908:mipsel-link-atomic.patch +Patch909:sandbox-time64-syscalls.patch +Patch910:mipsel-code-range-size.patch + +Patch1001: 0001-port-chromium_qt-to-loongarch64.patch +Patch1002: 0002-fix-third_party-for-loongarch64.patch +Patch1003: 0003-port-breakpad-for-loongarch64.patch +Patch1004: 0004-port-ffmpeg-to-loongarch64-for-chromium.patch +Patch1005: 0005-port-ffmpeg-to-loongarch64-for-chromium-add-la64-rel.patch +Patch1006: 0006-fix-third_party-for-loongarch64-add-files-for-la64.patch +Patch1007: 0007-port-icu-for-loongarch64.patch +Patch1008: 0008-port-lss-for-loongarch64.patch +Patch1009: 0009-port-pdfium-for-loongarch64.patch +Patch1010: 0010-port-swiftshader-for-loongarch64.patch +Patch1011: 0011-port-webrtc-for-loongarch64.patch +Patch1012: 0012-port-v8-for-loongarch64.patch +Patch1013: 0013-make-qtwebengine-can-be-compiled-for-loongarch64.patch +Patch1014: 0014-fix-compile-errors-for-mips64el.patch +Patch1015: 0015-fix-compiler-internal-error-for-loongarch64.patch +Patch1016: 0016-fix-compile-error-for-loongarch64.patch +Patch1100: 0001-fix-loongarch64-build-error.patch +Patch1101: 0001-add-sw_64-support.patch +Patch1102: 0001-fix-sw_64-build-error.patch + +#%if 0%{?fedora} || 0%{?epel} > 7 # handled by qt5-srpm-macros, which defines %%qt5_qtwebengine_arches -ExclusiveArch: %{qt5_qtwebengine_arches} -%endif +#ExclusiveArch: %{qt5_qtwebengine_arches} +#%endif +BuildRequires: ffmpeg-devel BuildRequires: qt5-qtbase-devel BuildRequires: qt5-qtbase-private-devel # TODO: check of = is really needed or if >= would be good enough -- rex @@ -384,7 +415,7 @@ mv pulse src/3rdparty/chromium/ pushd src/3rdparty/chromium popd - +%ifnarch loongarch64 %patch0 -p1 -b .linux-pri %if 0%{?use_system_libicu} %patch1 -p1 -b .no-icudtl-dat @@ -405,6 +436,42 @@ popd %patch26 -p1 -b .use-python2 %patch27 -p1 -b .gcc11 %patch100 -p1 +%endif +%ifarch loongarch64 +%patch900 -p1 +%patch901 -p1 +%patch902 -p1 +%patch903 -p1 +%patch904 -p1 +%patch905 -p1 +%patch906 -p1 +%patch907 -p1 +%patch908 -p1 +%patch909 -p1 +%patch910 -p1 +%patch1001 -p1 +%patch1002 -p1 +%patch1003 -p1 +##%patch1004 -p1 --fuzz=0 +%patch1005 -p1 +%patch1006 -p1 +%patch1007 -p1 +%patch1008 -p1 +%patch1009 -p1 +%patch1010 -p1 +%patch1011 -p1 +%patch1012 -p1 +%patch1013 -p1 +%patch1014 -p1 +%patch1015 -p1 +%patch1016 -p1 +%patch1100 -p1 +%patch26 -p1 +%endif +%ifarch sw_64 +%patch1101 -p1 +%patch1102 -p1 +%endif # the xkbcommon config/feature was renamed in 5.12, so need to adjust QT_CONFIG references # when building on older Qt releases @@ -468,7 +535,15 @@ export NINJA_PATH=%{__ninja} %{qmake_qt5} \ %{?debug_config:CONFIG+="%{debug_config}}" \ +%ifarch loongarch64 + CONFIG+="link_pulseaudio use_system_ffmpeg=true" \ +%endif +%ifarch sw_64 + CONFIG+="link_pulseaudio use_gold_linker use_system_ffmpeg=true" \ +%endif +%ifarch x86_64 aarch64 i686 CONFIG+="link_pulseaudio use_gold_linker" \ +%endif %{?use_system_libicu:QMAKE_EXTRA_ARGS+="-system-webengine-icu"} \ QMAKE_EXTRA_ARGS+="-webengine-kerberos" \ . @@ -635,6 +710,9 @@ done %changelog +* Mon Aug 18 2025 zhuhongbo - 5.15.2-1.04.0.3 +- feat: build qt5-qtwebengine arch loongarch64 + * Fri Jan 12 2024 yangxianzhao - 5.15.2-1.04.0.2 - rebuild for qt5-qtbase diff --git a/run-unbundling-script.patch b/run-unbundling-script.patch new file mode 100644 index 0000000..c2cc249 --- /dev/null +++ b/run-unbundling-script.patch @@ -0,0 +1,21 @@ +Description: unbundle yasm using the unbundling script +Author: Kevin Kofler +Origin: Fedora, https://src.fedoraproject.org/rpms/qt5-qtwebengine/blob/master/f/qtwebengine-everywhere-src-5.10.0-linux-pri.patch +Forwarded: not-needed +Reviewed-by: Sandro Knauß +Last-Update: 2020-03-02 +--- +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ +--- a/src/buildtools/config/linux.pri ++++ b/src/buildtools/config/linux.pri +@@ -176,3 +176,10 @@ host_build { + } + gn_args += use_glib=false + } ++ ++# run the unbundling script Chromium provides ++CHROMIUM_SRC_DIR = "$$QTWEBENGINE_ROOT/$$getChromiumSrcDir()" ++R_G_F_PY = "$$CHROMIUM_SRC_DIR/build/linux/unbundle/replace_gn_files.py" ++R_G_F_PY_ARGS = "--system-libraries yasm" ++log("Running python2 $$R_G_F_PY $$R_G_F_PY_ARGS$${EOL}") ++!system("python2 $$R_G_F_PY $$R_G_F_PY_ARGS"): error("-- unbundling failed") diff --git a/sandbox-time64-syscalls.patch b/sandbox-time64-syscalls.patch new file mode 100644 index 0000000..6a98dd4 --- /dev/null +++ b/sandbox-time64-syscalls.patch @@ -0,0 +1,89 @@ +Description: fix seccomp-bpf failures in syscalls 0403, 0407 + glibc ≥ 2.31 uses these syscalls on 32-bit platforms: + . + - https://sourceware.org/git/?p=glibc.git;a=commit;h=2e44b10b42d68d98 + - https://sourceware.org/git/?p=glibc.git;a=commit;h=ec138c67cbda8b58 +Author: Andreas Müller +Forwarded: no +Last-Update: 2020-09-02 + +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc +@@ -148,7 +148,14 @@ ResultExpr EvaluateSyscallImpl(int fs_de + return Allow(); + #endif + +- if (sysno == __NR_clock_gettime || sysno == __NR_clock_nanosleep) { ++ if (sysno == __NR_clock_gettime || sysno == __NR_clock_nanosleep ++#if defined(__NR_clock_gettime64) ++ || sysno == __NR_clock_gettime64 ++#endif ++#if defined(__NR_clock_nanosleep_time64) ++ || sysno == __NR_clock_nanosleep_time64 ++#endif ++ ) { + return RestrictClockID(); + } + +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc +@@ -60,6 +60,12 @@ class RestrictClockIdPolicy : public bpf + case __NR_clock_gettime: + case __NR_clock_getres: + case __NR_clock_nanosleep: ++#if defined(__NR_clock_nanosleep_time64) ++ case __NR_clock_nanosleep_time64: ++#endif ++#if defined(__NR_clock_gettime64) ++ case __NR_clock_gettime64: ++#endif + return RestrictClockID(); + default: + return Allow(); +--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc ++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +@@ -39,6 +39,12 @@ bool SyscallSets::IsAllowedGettime(int s + // filtered by RestrictClokID(). + case __NR_clock_gettime: // Parameters filtered by RestrictClockID(). + case __NR_clock_nanosleep: // Parameters filtered by RestrictClockID(). ++#if defined(__NR_clock_gettime64) ++ case __NR_clock_gettime64: // Parameters filtered by RestrictClockID(). ++#endif ++#if defined(__NR_clock_nanosleep_time64) ++ case __NR_clock_nanosleep_time64: // Parameters filtered by RestrictClockID(). ++#endif + case __NR_clock_settime: // Privileged. + #if defined(__i386__) || \ + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) +--- a/src/3rdparty/chromium/sandbox/linux/system_headers/arm_linux_syscalls.h ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/arm_linux_syscalls.h +@@ -1385,6 +1385,14 @@ + #define __NR_memfd_create (__NR_SYSCALL_BASE+385) + #endif + ++#if !defined(__NR_clock_gettime64) ++#define __NR_clock_gettime64 (__NR_SYSCALL_BASE+403) ++#endif ++ ++#if !defined(__NR_clock_nanosleep_time64) ++#define __NR_clock_nanosleep_time64 (__NR_SYSCALL_BASE+407) ++#endif ++ + // ARM private syscalls. + #if !defined(__ARM_NR_BASE) + #define __ARM_NR_BASE (__NR_SYSCALL_BASE + 0xF0000) +--- a/src/3rdparty/chromium/sandbox/linux/system_headers/mips_linux_syscalls.h ++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/mips_linux_syscalls.h +@@ -1433,4 +1433,12 @@ + #define __NR_memfd_create (__NR_Linux + 354) + #endif + ++#if !defined(__NR_clock_gettime64) ++#define __NR_clock_gettime64 (__NR_Linux + 403) ++#endif ++ ++#if !defined(__NR_clock_nanosleep_time64) ++#define __NR_clock_nanosleep_time64 (__NR_Linux + 407) ++#endif ++ + #endif // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_SYSCALLS_H_ diff --git a/system-icu-utf.patch b/system-icu-utf.patch new file mode 100644 index 0000000..aecfadb --- /dev/null +++ b/system-icu-utf.patch @@ -0,0 +1,561 @@ +Description: use the system ICU UTF functions + We already depend on ICU, so it is useless to copy these functions here. + I checked the history of that directory, and other than the renames I am + undoing, there were no modifications at all. +Author: Kevin, Kofler +Origin: Fedora, https://src.fedoraproject.org/rpms/qt5-qtwebengine/blob/master/f/qtwebengine-everywhere-src-5.10.0-system-icu-utf.patch +Forwarded: not-needed +Reviewed-by: Sandro Knauß +Last-Update: 2020-09-08 +--- +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ +--- a/src/3rdparty/chromium/base/BUILD.gn ++++ b/src/3rdparty/chromium/base/BUILD.gn +@@ -110,6 +110,7 @@ config("base_flags") { + } + ldflags = [ + "-lnspr4", ++ "-licuuc", + ] + } + +@@ -714,8 +715,6 @@ jumbo_component("base") { + "third_party/cityhash/city.h", + "third_party/cityhash_v103/src/city_v103.cc", + "third_party/cityhash_v103/src/city_v103.h", +- "third_party/icu/icu_utf.cc", +- "third_party/icu/icu_utf.h", + "third_party/superfasthash/superfasthash.c", + "thread_annotations.h", + "threading/hang_watcher.cc", +@@ -1305,6 +1304,7 @@ jumbo_component("base") { + "//base/third_party/dynamic_annotations", + "//build:branding_buildflags", + "//third_party/modp_b64", ++ "//third_party/icu:icuuc", + ] + + public_deps = [ +--- a/src/3rdparty/chromium/base/files/file_path.cc ++++ b/src/3rdparty/chromium/base/files/file_path.cc +@@ -19,7 +19,7 @@ + + #if defined(OS_MACOSX) + #include "base/mac/scoped_cftyperef.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + #endif + + #if defined(OS_WIN) +@@ -1183,9 +1183,9 @@ inline int HFSReadNextNonIgnorableCodepo + int* index) { + int codepoint = 0; + while (*index < length && codepoint == 0) { +- // CBU8_NEXT returns a value < 0 in error cases. For purposes of string ++ // U8_NEXT returns a value < 0 in error cases. For purposes of string + // comparison, we just use that value and flag it with DCHECK. +- CBU8_NEXT(string, *index, length, codepoint); ++ U8_NEXT(string, *index, length, codepoint); + DCHECK_GT(codepoint, 0); + if (codepoint > 0) { + // Check if there is a subtable for this upper byte. +--- a/src/3rdparty/chromium/base/json/json_parser.cc ++++ b/src/3rdparty/chromium/base/json/json_parser.cc +@@ -17,7 +17,7 @@ + #include "base/strings/stringprintf.h" + #include "base/strings/utf_string_conversion_utils.h" + #include "base/strings/utf_string_conversions.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + #include "base/values.h" + + namespace base { +@@ -565,10 +565,10 @@ bool JSONParser::DecodeUTF16(uint32_t* o + + // If this is a high surrogate, consume the next code unit to get the + // low surrogate. +- if (CBU16_IS_SURROGATE(code_unit16_high)) { ++ if (U16_IS_SURROGATE(code_unit16_high)) { + // Make sure this is the high surrogate. If not, it's an encoding + // error. +- if (!CBU16_IS_SURROGATE_LEAD(code_unit16_high)) ++ if (!U16_IS_SURROGATE_LEAD(code_unit16_high)) + return false; + + // Make sure that the token has more characters to consume the +@@ -588,7 +588,7 @@ bool JSONParser::DecodeUTF16(uint32_t* o + if (!UnprefixedHexStringToInt(*escape_sequence, &code_unit16_low)) + return false; + +- if (!CBU16_IS_TRAIL(code_unit16_low)) { ++ if (!U16_IS_TRAIL(code_unit16_low)) { + if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) + return false; + *out_code_point = kUnicodeReplacementPoint; +@@ -596,12 +596,12 @@ bool JSONParser::DecodeUTF16(uint32_t* o + } + + uint32_t code_point = +- CBU16_GET_SUPPLEMENTARY(code_unit16_high, code_unit16_low); ++ U16_GET_SUPPLEMENTARY(code_unit16_high, code_unit16_low); + + *out_code_point = code_point; + } else { + // Not a surrogate. +- DCHECK(CBU16_IS_SINGLE(code_unit16_high)); ++ DCHECK(U16_IS_SINGLE(code_unit16_high)); + + *out_code_point = code_unit16_high; + } +--- a/src/3rdparty/chromium/base/json/string_escape.cc ++++ b/src/3rdparty/chromium/base/json/string_escape.cc +@@ -14,7 +14,7 @@ + #include "base/strings/stringprintf.h" + #include "base/strings/utf_string_conversion_utils.h" + #include "base/strings/utf_string_conversions.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + + namespace base { + +@@ -92,7 +92,7 @@ bool EscapeJSONStringImpl(const S& str, + for (int32_t i = 0; i < length; ++i) { + uint32_t code_point; + if (!ReadUnicodeCharacter(str.data(), length, &i, &code_point) || +- code_point == static_cast(CBU_SENTINEL) || ++ code_point == static_cast(U_SENTINEL) || + !IsValidCodepoint(code_point)) { + code_point = kReplacementCodePoint; + did_replacement = true; +--- a/src/3rdparty/chromium/base/strings/pattern.cc ++++ b/src/3rdparty/chromium/base/strings/pattern.cc +@@ -4,13 +4,13 @@ + + #include "base/strings/pattern.h" + +-#include "base/third_party/icu/icu_utf.h" ++#include + + namespace base { + + namespace { + +-constexpr bool IsWildcard(base_icu::UChar32 character) { ++constexpr bool IsWildcard(UChar32 character) { + return character == '*' || character == '?'; + } + +@@ -55,9 +55,9 @@ constexpr bool SearchForChars(const CHAR + // Check if the chars match, if so, increment the ptrs. + const CHAR* pattern_next = *pattern; + const CHAR* string_next = *string; +- base_icu::UChar32 pattern_char = next(&pattern_next, pattern_end); ++ UChar32 pattern_char = next(&pattern_next, pattern_end); + if (pattern_char == next(&string_next, string_end) && +- pattern_char != CBU_SENTINEL) { ++ pattern_char != U_SENTINEL) { + *pattern = pattern_next; + *string = string_next; + continue; +@@ -121,20 +121,20 @@ constexpr bool MatchPatternT(const CHAR* + } + + struct NextCharUTF8 { +- base_icu::UChar32 operator()(const char** p, const char* end) { +- base_icu::UChar32 c; ++ UChar32 operator()(const char** p, const char* end) { ++ UChar32 c; + int offset = 0; +- CBU8_NEXT(*p, offset, end - *p, c); ++ U8_NEXT(*p, offset, end - *p, c); + *p += offset; + return c; + } + }; + + struct NextCharUTF16 { +- base_icu::UChar32 operator()(const char16** p, const char16* end) { +- base_icu::UChar32 c; ++ UChar32 operator()(const char16** p, const char16* end) { ++ UChar32 c; + int offset = 0; +- CBU16_NEXT(*p, offset, end - *p, c); ++ U16_NEXT(*p, offset, end - *p, c); + *p += offset; + return c; + } +--- a/src/3rdparty/chromium/base/strings/string_split.cc ++++ b/src/3rdparty/chromium/base/strings/string_split.cc +@@ -8,7 +8,7 @@ + + #include "base/logging.h" + #include "base/strings/string_util.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + + namespace base { + +--- a/src/3rdparty/chromium/base/strings/string_util.cc ++++ b/src/3rdparty/chromium/base/strings/string_util.cc +@@ -25,7 +25,7 @@ + #include "base/stl_util.h" + #include "base/strings/utf_string_conversion_utils.h" + #include "base/strings/utf_string_conversions.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + #include "build/build_config.h" + + namespace base { +@@ -327,19 +327,19 @@ void TruncateUTF8ToByteSize(const std::s + } + DCHECK_LE(byte_size, + static_cast(std::numeric_limits::max())); +- // Note: This cast is necessary because CBU8_NEXT uses int32_ts. ++ // Note: This cast is necessary because U8_NEXT uses int32_ts. + int32_t truncation_length = static_cast(byte_size); + int32_t char_index = truncation_length - 1; + const char* data = input.data(); + +- // Using CBU8, we will move backwards from the truncation point ++ // Using U8, we will move backwards from the truncation point + // to the beginning of the string looking for a valid UTF8 + // character. Once a full UTF8 character is found, we will + // truncate the string to the end of that character. + while (char_index >= 0) { + int32_t prev = char_index; +- base_icu::UChar32 code_point = 0; +- CBU8_NEXT(data, char_index, truncation_length, code_point); ++ UChar32 code_point = 0; ++ U8_NEXT(data, char_index, truncation_length, code_point); + if (!IsValidCharacter(code_point) || + !IsValidCodepoint(code_point)) { + char_index = prev - 1; +@@ -498,7 +498,7 @@ inline static bool DoIsStringUTF8(String + + while (char_index < src_len) { + int32_t code_point; +- CBU8_NEXT(src, char_index, src_len, code_point); ++ U8_NEXT(src, char_index, src_len, code_point); + if (!Validator(code_point)) + return false; + } +--- a/src/3rdparty/chromium/base/strings/utf_string_conversion_utils.cc ++++ b/src/3rdparty/chromium/base/strings/utf_string_conversion_utils.cc +@@ -4,7 +4,7 @@ + + #include "base/strings/utf_string_conversion_utils.h" + +-#include "base/third_party/icu/icu_utf.h" ++#include + #include "build/build_config.h" + + namespace base { +@@ -19,7 +19,7 @@ bool ReadUnicodeCharacter(const char* sr + // use a signed type for code_point. But this function returns false + // on error anyway, so code_point_out is unsigned. + int32_t code_point; +- CBU8_NEXT(src, *char_index, src_len, code_point); ++ U8_NEXT(src, *char_index, src_len, code_point); + *code_point_out = static_cast(code_point); + + // The ICU macro above moves to the next char, we want to point to the last +@@ -34,16 +34,16 @@ bool ReadUnicodeCharacter(const char16* + int32_t src_len, + int32_t* char_index, + uint32_t* code_point) { +- if (CBU16_IS_SURROGATE(src[*char_index])) { +- if (!CBU16_IS_SURROGATE_LEAD(src[*char_index]) || ++ if (U16_IS_SURROGATE(src[*char_index])) { ++ if (!U16_IS_SURROGATE_LEAD(src[*char_index]) || + *char_index + 1 >= src_len || +- !CBU16_IS_TRAIL(src[*char_index + 1])) { ++ !U16_IS_TRAIL(src[*char_index + 1])) { + // Invalid surrogate pair. + return false; + } + + // Valid surrogate pair. +- *code_point = CBU16_GET_SUPPLEMENTARY(src[*char_index], ++ *code_point = U16_GET_SUPPLEMENTARY(src[*char_index], + src[*char_index + 1]); + (*char_index)++; + } else { +@@ -77,30 +77,30 @@ size_t WriteUnicodeCharacter(uint32_t co + } + + +- // CBU8_APPEND_UNSAFE can append up to 4 bytes. ++ // U8_APPEND_UNSAFE can append up to 4 bytes. + size_t char_offset = output->length(); + size_t original_char_offset = char_offset; +- output->resize(char_offset + CBU8_MAX_LENGTH); ++ output->resize(char_offset + U8_MAX_LENGTH); + +- CBU8_APPEND_UNSAFE(&(*output)[0], char_offset, code_point); ++ U8_APPEND_UNSAFE(&(*output)[0], char_offset, code_point); + +- // CBU8_APPEND_UNSAFE will advance our pointer past the inserted character, so ++ // U8_APPEND_UNSAFE will advance our pointer past the inserted character, so + // it will represent the new length of the string. + output->resize(char_offset); + return char_offset - original_char_offset; + } + + size_t WriteUnicodeCharacter(uint32_t code_point, string16* output) { +- if (CBU16_LENGTH(code_point) == 1) { ++ if (U16_LENGTH(code_point) == 1) { + // Thie code point is in the Basic Multilingual Plane (BMP). + output->push_back(static_cast(code_point)); + return 1; + } + // Non-BMP characters use a double-character encoding. + size_t char_offset = output->length(); +- output->resize(char_offset + CBU16_MAX_LENGTH); +- CBU16_APPEND_UNSAFE(&(*output)[0], char_offset, code_point); +- return CBU16_MAX_LENGTH; ++ output->resize(char_offset + U16_MAX_LENGTH); ++ U16_APPEND_UNSAFE(&(*output)[0], char_offset, code_point); ++ return U16_MAX_LENGTH; + } + + // Generalized Unicode converter ----------------------------------------------- +--- a/src/3rdparty/chromium/base/strings/utf_string_conversions.cc ++++ b/src/3rdparty/chromium/base/strings/utf_string_conversions.cc +@@ -12,7 +12,7 @@ + #include "base/strings/string_piece.h" + #include "base/strings/string_util.h" + #include "base/strings/utf_string_conversion_utils.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + #include "build/build_config.h" + + namespace base { +@@ -71,12 +71,12 @@ using EnableIfBitsAre = std::enable_if_t + + template = true> + void UnicodeAppendUnsafe(Char* out, int32_t* size, uint32_t code_point) { +- CBU8_APPEND_UNSAFE(out, *size, code_point); ++ U8_APPEND_UNSAFE(out, *size, code_point); + } + + template = true> + void UnicodeAppendUnsafe(Char* out, int32_t* size, uint32_t code_point) { +- CBU16_APPEND_UNSAFE(out, *size, code_point); ++ U16_APPEND_UNSAFE(out, *size, code_point); + } + + template = true> +@@ -97,7 +97,7 @@ bool DoUTFConversion(const char* src, + + for (int32_t i = 0; i < src_len;) { + int32_t code_point; +- CBU8_NEXT(src, i, src_len, code_point); ++ U8_NEXT(src, i, src_len, code_point); + + if (!IsValidCodepoint(code_point)) { + success = false; +@@ -118,7 +118,7 @@ bool DoUTFConversion(const char16* src, + bool success = true; + + auto ConvertSingleChar = [&success](char16 in) -> int32_t { +- if (!CBU16_IS_SINGLE(in) || !IsValidCodepoint(in)) { ++ if (!U16_IS_SINGLE(in) || !IsValidCodepoint(in)) { + success = false; + return kErrorCodePoint; + } +@@ -132,8 +132,8 @@ bool DoUTFConversion(const char16* src, + while (i < src_len - 1) { + int32_t code_point; + +- if (CBU16_IS_LEAD(src[i]) && CBU16_IS_TRAIL(src[i + 1])) { +- code_point = CBU16_GET_SUPPLEMENTARY(src[i], src[i + 1]); ++ if (U16_IS_LEAD(src[i]) && U16_IS_TRAIL(src[i + 1])) { ++ code_point = U16_GET_SUPPLEMENTARY(src[i], src[i + 1]); + if (!IsValidCodepoint(code_point)) { + code_point = kErrorCodePoint; + success = false; +--- a/src/3rdparty/chromium/components/download/internal/common/download_path_reservation_tracker.cc ++++ b/src/3rdparty/chromium/components/download/internal/common/download_path_reservation_tracker.cc +@@ -23,7 +23,7 @@ + #include "base/task/lazy_thread_pool_task_runner.h" + #include "base/task/post_task.h" + #include "base/task_runner_util.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + #include "base/time/time.h" + #include "build/build_config.h" + #include "components/download/public/common/download_features.h" +--- a/src/3rdparty/chromium/components/filename_generation/filename_generation.cc ++++ b/src/3rdparty/chromium/components/filename_generation/filename_generation.cc +@@ -11,7 +11,7 @@ + #include "base/strings/string_util.h" + #include "base/strings/sys_string_conversions.h" + #include "base/strings/utf_string_conversions.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + #include "build/build_config.h" + #include "components/url_formatter/url_formatter.h" + #include "net/base/filename_util.h" +@@ -167,7 +167,7 @@ bool TruncateFilename(base::FilePath* pa + #elif defined(OS_WIN) + // UTF-16. + DCHECK(name.size() > limit); +- truncated = name.substr(0, CBU16_IS_TRAIL(name[limit]) ? limit - 1 : limit); ++ truncated = name.substr(0, U16_IS_TRAIL(name[limit]) ? limit - 1 : limit); + #else + // We cannot generally assume that the file name encoding is in UTF-8 (see + // the comment for FilePath::AsUTF8Unsafe), hence no safe way to truncate. +--- a/src/3rdparty/chromium/content/browser/devtools/devtools_stream_file.cc ++++ b/src/3rdparty/chromium/content/browser/devtools/devtools_stream_file.cc +@@ -11,7 +11,7 @@ + #include "base/strings/string_util.h" + #include "base/task/lazy_thread_pool_task_runner.h" + #include "base/task/post_task.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + #include "content/public/browser/browser_task_traits.h" + #include "content/public/browser/browser_thread.h" + #include "storage/browser/file_system/file_system_context.h" +@@ -105,7 +105,7 @@ void DevToolsStreamFile::ReadOnFileSeque + } else { + // Provided client has requested sufficient large block, make their + // life easier by not truncating in the middle of a UTF-8 character. +- if (size_got > 6 && !CBU8_IS_SINGLE(buffer[size_got - 1])) { ++ if (size_got > 6 && !U8_IS_SINGLE(buffer[size_got - 1])) { + base::TruncateUTF8ToByteSize(buffer, size_got, &buffer); + size_got = buffer.size(); + } else { +--- a/src/3rdparty/chromium/net/base/escape.cc ++++ b/src/3rdparty/chromium/net/base/escape.cc +@@ -9,7 +9,7 @@ + #include "base/strings/string_util.h" + #include "base/strings/utf_string_conversion_utils.h" + #include "base/strings/utf_string_conversions.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + + namespace net { + +@@ -134,14 +134,14 @@ bool UnescapeUTF8CharacterAtIndex(base:: + std::string* unescaped_out) { + DCHECK(unescaped_out->empty()); + +- unsigned char bytes[CBU8_MAX_LENGTH]; ++ unsigned char bytes[U8_MAX_LENGTH]; + if (!UnescapeUnsignedByteAtIndex(escaped_text, index, &bytes[0])) + return false; + + size_t num_bytes = 1; + + // If this is a lead byte, need to collect trail bytes as well. +- if (CBU8_IS_LEAD(bytes[0])) { ++ if (U8_IS_LEAD(bytes[0])) { + // Look for the last trail byte of the UTF-8 character. Give up once + // reach max character length number of bytes, or hit an unescaped + // character. No need to check length of escaped_text, as +@@ -149,7 +149,7 @@ bool UnescapeUTF8CharacterAtIndex(base:: + while (num_bytes < base::size(bytes) && + UnescapeUnsignedByteAtIndex(escaped_text, index + num_bytes * 3, + &bytes[num_bytes]) && +- CBU8_IS_TRAIL(bytes[num_bytes])) { ++ U8_IS_TRAIL(bytes[num_bytes])) { + ++num_bytes; + } + } +--- a/src/3rdparty/chromium/net/cert/internal/parse_name.cc ++++ b/src/3rdparty/chromium/net/cert/internal/parse_name.cc +@@ -9,7 +9,7 @@ + #include "base/strings/utf_string_conversion_utils.h" + #include "base/strings/utf_string_conversions.h" + #include "base/sys_byteorder.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + + namespace net { + +@@ -34,7 +34,7 @@ bool ConvertBmpStringValue(const der::In + + // BMPString only supports codepoints in the Basic Multilingual Plane; + // surrogates are not allowed. +- if (CBU_IS_SURROGATE(c)) ++ if (U_IS_SURROGATE(c)) + return false; + } + return base::UTF16ToUTF8(in_16bit.data(), in_16bit.size(), out); +@@ -54,7 +54,7 @@ bool ConvertUniversalStringValue(const d + for (const uint32_t c : in_32bit) { + // UniversalString is UCS-4 in big-endian order. + uint32_t codepoint = base::NetToHost32(c); +- if (!CBU_IS_UNICODE_CHAR(codepoint)) ++ if (!U_IS_UNICODE_CHAR(codepoint)) + return false; + + base::WriteUnicodeCharacter(codepoint, out); +--- a/src/3rdparty/chromium/net/tools/transport_security_state_generator/BUILD.gn ++++ b/src/3rdparty/chromium/net/tools/transport_security_state_generator/BUILD.gn +@@ -43,6 +43,9 @@ source_set("transport_security_state_gen + "//testing/gtest", + "//third_party/boringssl", + ] ++ libs = [ ++ "icuuc", ++ ] + } + + executable("transport_security_state_generator") { +--- a/src/3rdparty/chromium/third_party/openscreen/src/third_party/chromium_quic/build/base/BUILD.gn ++++ b/src/3rdparty/chromium/third_party/openscreen/src/third_party/chromium_quic/build/base/BUILD.gn +@@ -427,8 +427,6 @@ source_set("base") { + "../../src/base/test/fuzzed_data_provider.cc", + "../../src/base/test/fuzzed_data_provider.h", + "../../src/base/third_party/dynamic_annotations/dynamic_annotations.h", +- "../../src/base/third_party/icu/icu_utf.cc", +- "../../src/base/third_party/icu/icu_utf.h", + "../../src/base/third_party/nspr/prtime.cc", + "../../src/base/third_party/nspr/prtime.h", + "../../src/base/third_party/superfasthash/superfasthash.c", +--- a/src/3rdparty/chromium/ui/base/ime/character_composer.cc ++++ b/src/3rdparty/chromium/ui/base/ime/character_composer.cc +@@ -11,7 +11,7 @@ + #include "base/strings/string_util.h" + #include "base/strings/utf_string_conversion_utils.h" + #include "base/strings/utf_string_conversions.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + #include "ui/events/event.h" + #include "ui/events/keycodes/dom/dom_key.h" + #include "ui/events/keycodes/dom/keycode_converter.h" +@@ -35,12 +35,12 @@ bool CheckCharacterComposeTable( + bool UTF32CharacterToUTF16(uint32_t character, base::string16* output) { + output->clear(); + // Reject invalid character. (e.g. codepoint greater than 0x10ffff) +- if (!CBU_IS_UNICODE_CHAR(character)) ++ if (!U_IS_UNICODE_CHAR(character)) + return false; + if (character) { +- output->resize(CBU16_LENGTH(character)); ++ output->resize(U16_LENGTH(character)); + size_t i = 0; +- CBU16_APPEND_UNSAFE(&(*output)[0], i, character); ++ U16_APPEND_UNSAFE(&(*output)[0], i, character); + } + return true; + } +--- a/src/3rdparty/chromium/ui/gfx/utf16_indexing.cc ++++ b/src/3rdparty/chromium/ui/gfx/utf16_indexing.cc +@@ -5,13 +5,13 @@ + #include "ui/gfx/utf16_indexing.h" + + #include "base/logging.h" +-#include "base/third_party/icu/icu_utf.h" ++#include + + namespace gfx { + + bool IsValidCodePointIndex(const base::string16& s, size_t index) { + return index == 0 || index == s.length() || +- !(CBU16_IS_TRAIL(s[index]) && CBU16_IS_LEAD(s[index - 1])); ++ !(U16_IS_TRAIL(s[index]) && U16_IS_LEAD(s[index - 1])); + } + + ptrdiff_t UTF16IndexToOffset(const base::string16& s, size_t base, size_t pos) { diff --git a/system-lcms2.patch b/system-lcms2.patch new file mode 100644 index 0000000..25dec87 --- /dev/null +++ b/system-lcms2.patch @@ -0,0 +1,81 @@ +Description: Use system lcms2 +Author: Sandro Knauß +Origin: Debian +Forwarded: https://bugreports.qt.io/browse/QTBUG-61746 +Reviewed-by: Sandro Knauß +Last-Update: 2020-11-20 +--- +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ +--- a/src/3rdparty/chromium/third_party/pdfium/third_party/BUILD.gn ++++ b/src/3rdparty/chromium/third_party/pdfium/third_party/BUILD.gn +@@ -256,61 +256,19 @@ if (!pdf_use_skia && !pdf_use_skia_paths + } + } + +-config("fx_lcms2_warnings") { +- visibility = [ ":*" ] +- if (is_clang) { +- cflags = [ +- # cmslut.cc is sloppy with aggregate initialization. Version 2.7 of this +- # library doesn't appear to have this problem. +- "-Wno-missing-braces", ++import("//build/shim_headers.gni") + +- # FindPrev() in cmsplugin.c is unused. +- "-Wno-unused-function", +- ] +- } ++shim_headers("lcms2_shim") { ++ root_path = "lcms/include" ++ headers = [ ++ "lcms2.h", ++ "lcms2_plugin.h", ++ ] + } + + source_set("fx_lcms2") { +- configs -= [ "//build/config/compiler:chromium_code" ] +- configs += [ +- "//build/config/compiler:no_chromium_code", +- "//build/config/sanitizers:cfi_icall_generalize_pointers", +- ":pdfium_third_party_config", +- +- # Must be after no_chromium_code for warning flags to be ordered correctly. +- ":fx_lcms2_warnings", +- ] +- sources = [ +- "lcms/include/lcms2.h", +- "lcms/include/lcms2_plugin.h", +- "lcms/src/cmsalpha.c", +- "lcms/src/cmscam02.c", +- "lcms/src/cmscgats.c", +- "lcms/src/cmscnvrt.c", +- "lcms/src/cmserr.c", +- "lcms/src/cmsgamma.c", +- "lcms/src/cmsgmt.c", +- "lcms/src/cmshalf.c", +- "lcms/src/cmsintrp.c", +- "lcms/src/cmsio0.c", +- "lcms/src/cmsio1.c", +- "lcms/src/cmslut.c", +- "lcms/src/cmsmd5.c", +- "lcms/src/cmsmtrx.c", +- "lcms/src/cmsnamed.c", +- "lcms/src/cmsopt.c", +- "lcms/src/cmspack.c", +- "lcms/src/cmspcs.c", +- "lcms/src/cmsplugin.c", +- "lcms/src/cmsps2.c", +- "lcms/src/cmssamp.c", +- "lcms/src/cmssm.c", +- "lcms/src/cmstypes.c", +- "lcms/src/cmsvirt.c", +- "lcms/src/cmswtpnt.c", +- "lcms/src/cmsxform.c", +- ] +- deps = [ "../core/fxcrt" ] ++ deps = [ ":lcms2_shim" ] ++ libs = ["lcms2"] + } + + if (!build_with_chromium) { diff --git a/system-nspr-prtime.patch b/system-nspr-prtime.patch new file mode 100644 index 0000000..3f6d06a --- /dev/null +++ b/system-nspr-prtime.patch @@ -0,0 +1,51 @@ +Description: Use system NSPR prtime +Author: Kevin Kofler +Origin: Fedora, https://src.fedoraproject.org/rpms/qt5-qtwebengine/blob/master/f/qtwebengine-everywhere-src-5.10.0-system-nspr-prtime.patch +Forwarded: not-needed +Reviewed-by: Sandro Knauß +Last-Update: 2020-03-02 +--- +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ +--- a/src/3rdparty/chromium/base/BUILD.gn ++++ b/src/3rdparty/chromium/base/BUILD.gn +@@ -108,6 +108,9 @@ config("base_flags") { + "-Wglobal-constructors", + ] + } ++ ldflags = [ ++ "-lnspr4", ++ ] + } + + config("base_implementation") { +@@ -713,8 +716,6 @@ jumbo_component("base") { + "third_party/cityhash_v103/src/city_v103.h", + "third_party/icu/icu_utf.cc", + "third_party/icu/icu_utf.h", +- "third_party/nspr/prtime.cc", +- "third_party/nspr/prtime.h", + "third_party/superfasthash/superfasthash.c", + "thread_annotations.h", + "threading/hang_watcher.cc", +--- a/src/3rdparty/chromium/base/time/pr_time_unittest.cc ++++ b/src/3rdparty/chromium/base/time/pr_time_unittest.cc +@@ -7,7 +7,7 @@ + + #include "base/compiler_specific.h" + #include "base/stl_util.h" +-#include "base/third_party/nspr/prtime.h" ++#include + #include "base/time/time.h" + #include "build/build_config.h" + #include "testing/gtest/include/gtest/gtest.h" +--- a/src/3rdparty/chromium/base/time/time.cc ++++ b/src/3rdparty/chromium/base/time/time.cc +@@ -14,7 +14,7 @@ + #include "base/macros.h" + #include "base/no_destructor.h" + #include "base/strings/stringprintf.h" +-#include "base/third_party/nspr/prtime.h" ++#include + #include "base/time/time_override.h" + #include "build/build_config.h" + diff --git a/verbose-gn-bootstrap.patch b/verbose-gn-bootstrap.patch new file mode 100644 index 0000000..e2314d1 --- /dev/null +++ b/verbose-gn-bootstrap.patch @@ -0,0 +1,16 @@ +Description: print compiler commands when bootstrapping gn +Author: Dmitry Shachnev +Forwarded: no +Last-Update: 2019-03-20 + +--- a/src/buildtools/gn.pro ++++ b/src/buildtools/gn.pro +@@ -32,7 +32,7 @@ build_pass|!debug_and_release { + !system("$$pythonPathForSystem() $$gn_configure") { + error("GN generation error!") + } +- !system("cd $$system_quote($$system_path($$out_path)) && $$ninja_path $$basename(out)" ) { ++ !system("cd $$system_quote($$system_path($$out_path)) && $$ninja_path -v $$basename(out)" ) { + error("GN build error!") + } + } -- Gitee