mirror of https://github.com/nodejs/node.git
deps: update V8 to 10.9.194.4
PR-URL: https://github.com/nodejs/node/pull/45579 Reviewed-By: Michaël Zasso <targos@protonmail.com> Reviewed-By: James M Snell <jasnell@gmail.com>
This commit is contained in:
parent
fac00cd432
commit
16e03e7968
|
@ -42,7 +42,6 @@ import sys
|
||||||
# Flags from YCM's default config.
|
# Flags from YCM's default config.
|
||||||
flags = [
|
flags = [
|
||||||
'-DUSE_CLANG_COMPLETER',
|
'-DUSE_CLANG_COMPLETER',
|
||||||
'-std=gnu++14',
|
|
||||||
'-x',
|
'-x',
|
||||||
'c++',
|
'c++',
|
||||||
]
|
]
|
||||||
|
@ -143,23 +142,25 @@ def GetClangCommandFromNinjaForFilename(v8_root, filename):
|
||||||
# Parse flags that are important for YCM's purposes.
|
# Parse flags that are important for YCM's purposes.
|
||||||
for flag in clang_line.split(' '):
|
for flag in clang_line.split(' '):
|
||||||
if flag.startswith('-I'):
|
if flag.startswith('-I'):
|
||||||
|
v8_flags.append(MakeIncludePathAbsolute(flag, "-I", out_dir))
|
||||||
|
elif flag.startswith('-isystem'):
|
||||||
|
v8_flags.append(MakeIncludePathAbsolute(flag, "-isystem", out_dir))
|
||||||
|
elif flag.startswith('-std') or flag.startswith(
|
||||||
|
'-pthread') or flag.startswith('-no'):
|
||||||
|
v8_flags.append(flag)
|
||||||
|
elif flag.startswith('-') and flag[1] in 'DWFfmgOX':
|
||||||
|
v8_flags.append(flag)
|
||||||
|
return v8_flags
|
||||||
|
|
||||||
|
|
||||||
|
def MakeIncludePathAbsolute(flag, prefix, out_dir):
|
||||||
# Relative paths need to be resolved, because they're relative to the
|
# Relative paths need to be resolved, because they're relative to the
|
||||||
# output dir, not the source.
|
# output dir, not the source.
|
||||||
if flag[2] == '/':
|
if flag[len(prefix)] == '/':
|
||||||
v8_flags.append(flag)
|
return flag
|
||||||
else:
|
else:
|
||||||
abs_path = os.path.normpath(os.path.join(out_dir, flag[2:]))
|
abs_path = os.path.normpath(os.path.join(out_dir, flag[len(prefix):]))
|
||||||
v8_flags.append('-I' + abs_path)
|
return prefix + abs_path
|
||||||
elif flag.startswith('-std'):
|
|
||||||
v8_flags.append(flag)
|
|
||||||
elif flag.startswith('-') and flag[1] in 'DWFfmO':
|
|
||||||
if flag == '-Wno-deprecated-register' or flag == '-Wno-header-guard':
|
|
||||||
# These flags causes libclang (3.3) to crash. Remove it until things
|
|
||||||
# are fixed.
|
|
||||||
continue
|
|
||||||
v8_flags.append(flag)
|
|
||||||
|
|
||||||
return v8_flags
|
|
||||||
|
|
||||||
|
|
||||||
def FlagsForFile(filename):
|
def FlagsForFile(filename):
|
||||||
|
@ -180,3 +181,9 @@ def FlagsForFile(filename):
|
||||||
'flags': final_flags,
|
'flags': final_flags,
|
||||||
'do_cache': True
|
'do_cache': True
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def Settings(**kwargs):
|
||||||
|
if kwargs['language'] == 'cfamily':
|
||||||
|
return FlagsForFile(kwargs['filename'])
|
||||||
|
return {}
|
||||||
|
|
|
@ -114,6 +114,7 @@ Fedor Indutny <fedor@indutny.com>
|
||||||
Felix Geisendörfer <haimuiba@gmail.com>
|
Felix Geisendörfer <haimuiba@gmail.com>
|
||||||
Feng Yu <f3n67u@gmail.com>
|
Feng Yu <f3n67u@gmail.com>
|
||||||
Filipe David Manana <fdmanana@gmail.com>
|
Filipe David Manana <fdmanana@gmail.com>
|
||||||
|
Frank Lemanschik <frank@dspeed.eu>
|
||||||
Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
|
Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
|
||||||
Gao Sheng <gaosheng08@meituan.com>
|
Gao Sheng <gaosheng08@meituan.com>
|
||||||
Geoffrey Garside <ggarside@gmail.com>
|
Geoffrey Garside <ggarside@gmail.com>
|
||||||
|
@ -152,6 +153,7 @@ Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||||
Joel Stanley <joel@jms.id.au>
|
Joel Stanley <joel@jms.id.au>
|
||||||
Johan Bergström <johan@bergstroem.nu>
|
Johan Bergström <johan@bergstroem.nu>
|
||||||
Jonathan Liu <net147@gmail.com>
|
Jonathan Liu <net147@gmail.com>
|
||||||
|
Juan Arboleda <soyjuanarbol@gmail.com>
|
||||||
Julien Brianceau <jbriance@cisco.com>
|
Julien Brianceau <jbriance@cisco.com>
|
||||||
JunHo Seo <sejunho@gmail.com>
|
JunHo Seo <sejunho@gmail.com>
|
||||||
Junha Park <jpark3@scu.edu>
|
Junha Park <jpark3@scu.edu>
|
||||||
|
|
|
@ -39,6 +39,7 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
|
||||||
# v8_enable_trace_baseline_exec
|
# v8_enable_trace_baseline_exec
|
||||||
# v8_enable_trace_feedback_updates
|
# v8_enable_trace_feedback_updates
|
||||||
# v8_enable_atomic_object_field_writes
|
# v8_enable_atomic_object_field_writes
|
||||||
|
# v8_enable_conservative_stack_scanning
|
||||||
# v8_enable_concurrent_marking
|
# v8_enable_concurrent_marking
|
||||||
# v8_enable_ignition_dispatch_counting
|
# v8_enable_ignition_dispatch_counting
|
||||||
# v8_enable_builtins_profiling
|
# v8_enable_builtins_profiling
|
||||||
|
@ -1381,6 +1382,8 @@ filegroup(
|
||||||
"src/handles/global-handles-inl.h",
|
"src/handles/global-handles-inl.h",
|
||||||
"src/handles/global-handles.cc",
|
"src/handles/global-handles.cc",
|
||||||
"src/handles/global-handles.h",
|
"src/handles/global-handles.h",
|
||||||
|
"src/handles/traced-handles.cc",
|
||||||
|
"src/handles/traced-handles.h",
|
||||||
"src/handles/handles-inl.h",
|
"src/handles/handles-inl.h",
|
||||||
"src/handles/handles.cc",
|
"src/handles/handles.cc",
|
||||||
"src/handles/handles.h",
|
"src/handles/handles.h",
|
||||||
|
@ -1963,6 +1966,7 @@ filegroup(
|
||||||
"src/profiler/heap-snapshot-generator-inl.h",
|
"src/profiler/heap-snapshot-generator-inl.h",
|
||||||
"src/profiler/heap-snapshot-generator.cc",
|
"src/profiler/heap-snapshot-generator.cc",
|
||||||
"src/profiler/heap-snapshot-generator.h",
|
"src/profiler/heap-snapshot-generator.h",
|
||||||
|
"src/profiler/output-stream-writer.h",
|
||||||
"src/profiler/profile-generator-inl.h",
|
"src/profiler/profile-generator-inl.h",
|
||||||
"src/profiler/profile-generator.cc",
|
"src/profiler/profile-generator.cc",
|
||||||
"src/profiler/profile-generator.h",
|
"src/profiler/profile-generator.h",
|
||||||
|
@ -2664,6 +2668,8 @@ filegroup(
|
||||||
"src/compiler/all-nodes.h",
|
"src/compiler/all-nodes.h",
|
||||||
"src/compiler/allocation-builder.h",
|
"src/compiler/allocation-builder.h",
|
||||||
"src/compiler/allocation-builder-inl.h",
|
"src/compiler/allocation-builder-inl.h",
|
||||||
|
"src/compiler/backend/bitcast-elider.cc",
|
||||||
|
"src/compiler/backend/bitcast-elider.h",
|
||||||
"src/compiler/backend/code-generator.cc",
|
"src/compiler/backend/code-generator.cc",
|
||||||
"src/compiler/backend/code-generator.h",
|
"src/compiler/backend/code-generator.h",
|
||||||
"src/compiler/backend/code-generator-impl.h",
|
"src/compiler/backend/code-generator-impl.h",
|
||||||
|
@ -2887,7 +2893,7 @@ filegroup(
|
||||||
"src/compiler/turboshaft/graph.h",
|
"src/compiler/turboshaft/graph.h",
|
||||||
"src/compiler/turboshaft/graph-visualizer.cc",
|
"src/compiler/turboshaft/graph-visualizer.cc",
|
||||||
"src/compiler/turboshaft/graph-visualizer.h",
|
"src/compiler/turboshaft/graph-visualizer.h",
|
||||||
"src/compiler/turboshaft/machine-optimization-assembler.h",
|
"src/compiler/turboshaft/machine-optimization-reducer.h",
|
||||||
"src/compiler/turboshaft/operations.cc",
|
"src/compiler/turboshaft/operations.cc",
|
||||||
"src/compiler/turboshaft/operations.h",
|
"src/compiler/turboshaft/operations.h",
|
||||||
"src/compiler/turboshaft/operation-matching.h",
|
"src/compiler/turboshaft/operation-matching.h",
|
||||||
|
@ -2897,12 +2903,14 @@ filegroup(
|
||||||
"src/compiler/turboshaft/recreate-schedule.h",
|
"src/compiler/turboshaft/recreate-schedule.h",
|
||||||
"src/compiler/turboshaft/representations.cc",
|
"src/compiler/turboshaft/representations.cc",
|
||||||
"src/compiler/turboshaft/representations.h",
|
"src/compiler/turboshaft/representations.h",
|
||||||
|
"src/compiler/turboshaft/select-lowering-reducer.h",
|
||||||
"src/compiler/turboshaft/sidetable.h",
|
"src/compiler/turboshaft/sidetable.h",
|
||||||
"src/compiler/turboshaft/simplify-tf-loops.cc",
|
"src/compiler/turboshaft/simplify-tf-loops.cc",
|
||||||
"src/compiler/turboshaft/simplify-tf-loops.h",
|
"src/compiler/turboshaft/simplify-tf-loops.h",
|
||||||
|
"src/compiler/turboshaft/snapshot-table.h",
|
||||||
"src/compiler/turboshaft/utils.cc",
|
"src/compiler/turboshaft/utils.cc",
|
||||||
"src/compiler/turboshaft/utils.h",
|
"src/compiler/turboshaft/utils.h",
|
||||||
"src/compiler/turboshaft/value-numbering-assembler.h",
|
"src/compiler/turboshaft/value-numbering-reducer.h",
|
||||||
"src/compiler/type-cache.cc",
|
"src/compiler/type-cache.cc",
|
||||||
"src/compiler/type-cache.h",
|
"src/compiler/type-cache.h",
|
||||||
"src/compiler/type-narrowing-reducer.cc",
|
"src/compiler/type-narrowing-reducer.cc",
|
||||||
|
@ -3152,16 +3160,16 @@ filegroup(
|
||||||
# Note these cannot be v8_target_is_* selects because these contain
|
# Note these cannot be v8_target_is_* selects because these contain
|
||||||
# inline assembly that runs inside the executable. Since these are
|
# inline assembly that runs inside the executable. Since these are
|
||||||
# linked directly into mksnapshot, they must use the actual target cpu.
|
# linked directly into mksnapshot, they must use the actual target cpu.
|
||||||
"@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"],
|
"@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/save_registers_asm.cc"],
|
||||||
"@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"],
|
"@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/save_registers_asm.cc"],
|
||||||
"@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"],
|
"@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/save_registers_asm.cc"],
|
||||||
"@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"],
|
"@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/save_registers_asm.cc"],
|
||||||
"@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/push_registers_asm.cc"],
|
"@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/save_registers_asm.cc"],
|
||||||
"@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/push_registers_asm.cc"],
|
"@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/save_registers_asm.cc"],
|
||||||
"@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/push_registers_asm.cc"],
|
"@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/save_registers_asm.cc"],
|
||||||
"@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/push_registers_masm.asm"],
|
"@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/save_registers_masm.asm"],
|
||||||
"@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/push_registers_masm.asm"],
|
"@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/save_registers_masm.asm"],
|
||||||
"@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/push_registers_masm.S"],
|
"@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/save_registers_masm.S"],
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1460,6 +1460,10 @@ config("toolchain") {
|
||||||
"/wd4715", # 'function' : not all control paths return a value'
|
"/wd4715", # 'function' : not all control paths return a value'
|
||||||
# MSVC does not analyze switch (enum) for completeness.
|
# MSVC does not analyze switch (enum) for completeness.
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# TODO(https://crbug.com/1377771): Keep MSVC on C++17 until source code is
|
||||||
|
# made compatible with C++20.
|
||||||
|
cflags_cc = [ "/std:c++17" ]
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!is_clang && !is_win) {
|
if (!is_clang && !is_win) {
|
||||||
|
@ -1476,6 +1480,11 @@ config("toolchain") {
|
||||||
# Disable gcc warnings for using enum constant in boolean context.
|
# Disable gcc warnings for using enum constant in boolean context.
|
||||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97266
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97266
|
||||||
"-Wno-int-in-bool-context",
|
"-Wno-int-in-bool-context",
|
||||||
|
|
||||||
|
# Disable gcc deprecation warnings, which are firing on implicit capture
|
||||||
|
# of `this` in capture-by-value lambdas and preventing a build roll which
|
||||||
|
# enables C++20 (see https://crbug.com/1374227).
|
||||||
|
"-Wno-deprecated",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2268,6 +2277,8 @@ action("v8_dump_build_config") {
|
||||||
"v8_current_cpu=\"$v8_current_cpu\"",
|
"v8_current_cpu=\"$v8_current_cpu\"",
|
||||||
"v8_enable_atomic_object_field_writes=" +
|
"v8_enable_atomic_object_field_writes=" +
|
||||||
"$v8_enable_atomic_object_field_writes",
|
"$v8_enable_atomic_object_field_writes",
|
||||||
|
"v8_enable_conservative_stack_scanning=" +
|
||||||
|
"$v8_enable_conservative_stack_scanning",
|
||||||
"v8_enable_concurrent_marking=$v8_enable_concurrent_marking",
|
"v8_enable_concurrent_marking=$v8_enable_concurrent_marking",
|
||||||
"v8_enable_single_generation=$v8_enable_single_generation",
|
"v8_enable_single_generation=$v8_enable_single_generation",
|
||||||
"v8_enable_i18n_support=$v8_enable_i18n_support",
|
"v8_enable_i18n_support=$v8_enable_i18n_support",
|
||||||
|
@ -2803,6 +2814,7 @@ v8_header_set("v8_internal_headers") {
|
||||||
"src/compiler/all-nodes.h",
|
"src/compiler/all-nodes.h",
|
||||||
"src/compiler/allocation-builder-inl.h",
|
"src/compiler/allocation-builder-inl.h",
|
||||||
"src/compiler/allocation-builder.h",
|
"src/compiler/allocation-builder.h",
|
||||||
|
"src/compiler/backend/bitcast-elider.h",
|
||||||
"src/compiler/backend/code-generator-impl.h",
|
"src/compiler/backend/code-generator-impl.h",
|
||||||
"src/compiler/backend/code-generator.h",
|
"src/compiler/backend/code-generator.h",
|
||||||
"src/compiler/backend/frame-elider.h",
|
"src/compiler/backend/frame-elider.h",
|
||||||
|
@ -2923,16 +2935,18 @@ v8_header_set("v8_internal_headers") {
|
||||||
"src/compiler/turboshaft/graph-builder.h",
|
"src/compiler/turboshaft/graph-builder.h",
|
||||||
"src/compiler/turboshaft/graph-visualizer.h",
|
"src/compiler/turboshaft/graph-visualizer.h",
|
||||||
"src/compiler/turboshaft/graph.h",
|
"src/compiler/turboshaft/graph.h",
|
||||||
"src/compiler/turboshaft/machine-optimization-assembler.h",
|
"src/compiler/turboshaft/machine-optimization-reducer.h",
|
||||||
"src/compiler/turboshaft/operation-matching.h",
|
"src/compiler/turboshaft/operation-matching.h",
|
||||||
"src/compiler/turboshaft/operations.h",
|
"src/compiler/turboshaft/operations.h",
|
||||||
"src/compiler/turboshaft/optimization-phase.h",
|
"src/compiler/turboshaft/optimization-phase.h",
|
||||||
"src/compiler/turboshaft/recreate-schedule.h",
|
"src/compiler/turboshaft/recreate-schedule.h",
|
||||||
"src/compiler/turboshaft/representations.h",
|
"src/compiler/turboshaft/representations.h",
|
||||||
|
"src/compiler/turboshaft/select-lowering-reducer.h",
|
||||||
"src/compiler/turboshaft/sidetable.h",
|
"src/compiler/turboshaft/sidetable.h",
|
||||||
"src/compiler/turboshaft/simplify-tf-loops.h",
|
"src/compiler/turboshaft/simplify-tf-loops.h",
|
||||||
|
"src/compiler/turboshaft/snapshot-table.h",
|
||||||
"src/compiler/turboshaft/utils.h",
|
"src/compiler/turboshaft/utils.h",
|
||||||
"src/compiler/turboshaft/value-numbering-assembler.h",
|
"src/compiler/turboshaft/value-numbering-reducer.h",
|
||||||
"src/compiler/type-cache.h",
|
"src/compiler/type-cache.h",
|
||||||
"src/compiler/type-narrowing-reducer.h",
|
"src/compiler/type-narrowing-reducer.h",
|
||||||
"src/compiler/typed-optimization.h",
|
"src/compiler/typed-optimization.h",
|
||||||
|
@ -3024,6 +3038,7 @@ v8_header_set("v8_internal_headers") {
|
||||||
"src/handles/maybe-handles.h",
|
"src/handles/maybe-handles.h",
|
||||||
"src/handles/persistent-handles.h",
|
"src/handles/persistent-handles.h",
|
||||||
"src/handles/shared-object-conveyor-handles.h",
|
"src/handles/shared-object-conveyor-handles.h",
|
||||||
|
"src/handles/traced-handles.h",
|
||||||
"src/heap/allocation-observer.h",
|
"src/heap/allocation-observer.h",
|
||||||
"src/heap/allocation-result.h",
|
"src/heap/allocation-result.h",
|
||||||
"src/heap/allocation-stats.h",
|
"src/heap/allocation-stats.h",
|
||||||
|
@ -3422,6 +3437,7 @@ v8_header_set("v8_internal_headers") {
|
||||||
"src/profiler/heap-profiler.h",
|
"src/profiler/heap-profiler.h",
|
||||||
"src/profiler/heap-snapshot-generator-inl.h",
|
"src/profiler/heap-snapshot-generator-inl.h",
|
||||||
"src/profiler/heap-snapshot-generator.h",
|
"src/profiler/heap-snapshot-generator.h",
|
||||||
|
"src/profiler/output-stream-writer.h",
|
||||||
"src/profiler/profile-generator-inl.h",
|
"src/profiler/profile-generator-inl.h",
|
||||||
"src/profiler/profile-generator.h",
|
"src/profiler/profile-generator.h",
|
||||||
"src/profiler/profiler-listener.h",
|
"src/profiler/profiler-listener.h",
|
||||||
|
@ -4041,6 +4057,7 @@ v8_compiler_sources = [
|
||||||
"src/compiler/access-info.cc",
|
"src/compiler/access-info.cc",
|
||||||
"src/compiler/add-type-assertions-reducer.cc",
|
"src/compiler/add-type-assertions-reducer.cc",
|
||||||
"src/compiler/all-nodes.cc",
|
"src/compiler/all-nodes.cc",
|
||||||
|
"src/compiler/backend/bitcast-elider.cc",
|
||||||
"src/compiler/backend/code-generator.cc",
|
"src/compiler/backend/code-generator.cc",
|
||||||
"src/compiler/backend/frame-elider.cc",
|
"src/compiler/backend/frame-elider.cc",
|
||||||
"src/compiler/backend/gap-resolver.cc",
|
"src/compiler/backend/gap-resolver.cc",
|
||||||
|
@ -4429,6 +4446,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||||
"src/handles/local-handles.cc",
|
"src/handles/local-handles.cc",
|
||||||
"src/handles/persistent-handles.cc",
|
"src/handles/persistent-handles.cc",
|
||||||
"src/handles/shared-object-conveyor-handles.cc",
|
"src/handles/shared-object-conveyor-handles.cc",
|
||||||
|
"src/handles/traced-handles.cc",
|
||||||
"src/heap/allocation-observer.cc",
|
"src/heap/allocation-observer.cc",
|
||||||
"src/heap/array-buffer-sweeper.cc",
|
"src/heap/array-buffer-sweeper.cc",
|
||||||
"src/heap/base-space.cc",
|
"src/heap/base-space.cc",
|
||||||
|
@ -4737,6 +4755,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||||
|
|
||||||
if (v8_enable_maglev) {
|
if (v8_enable_maglev) {
|
||||||
sources += [
|
sources += [
|
||||||
|
"src/maglev/maglev-assembler.cc",
|
||||||
"src/maglev/maglev-code-generator.cc",
|
"src/maglev/maglev-code-generator.cc",
|
||||||
"src/maglev/maglev-compilation-info.cc",
|
"src/maglev/maglev-compilation-info.cc",
|
||||||
"src/maglev/maglev-compilation-unit.cc",
|
"src/maglev/maglev-compilation-unit.cc",
|
||||||
|
@ -5745,31 +5764,31 @@ v8_source_set("v8_heap_base") {
|
||||||
|
|
||||||
if (is_clang || !is_win) {
|
if (is_clang || !is_win) {
|
||||||
if (current_cpu == "x64") {
|
if (current_cpu == "x64") {
|
||||||
sources += [ "src/heap/base/asm/x64/push_registers_asm.cc" ]
|
sources += [ "src/heap/base/asm/x64/save_registers_asm.cc" ]
|
||||||
} else if (current_cpu == "x86") {
|
} else if (current_cpu == "x86") {
|
||||||
sources += [ "src/heap/base/asm/ia32/push_registers_asm.cc" ]
|
sources += [ "src/heap/base/asm/ia32/save_registers_asm.cc" ]
|
||||||
} else if (current_cpu == "arm") {
|
} else if (current_cpu == "arm") {
|
||||||
sources += [ "src/heap/base/asm/arm/push_registers_asm.cc" ]
|
sources += [ "src/heap/base/asm/arm/save_registers_asm.cc" ]
|
||||||
} else if (current_cpu == "arm64") {
|
} else if (current_cpu == "arm64") {
|
||||||
sources += [ "src/heap/base/asm/arm64/push_registers_asm.cc" ]
|
sources += [ "src/heap/base/asm/arm64/save_registers_asm.cc" ]
|
||||||
} else if (current_cpu == "ppc64") {
|
} else if (current_cpu == "ppc64") {
|
||||||
sources += [ "src/heap/base/asm/ppc/push_registers_asm.cc" ]
|
sources += [ "src/heap/base/asm/ppc/save_registers_asm.cc" ]
|
||||||
} else if (current_cpu == "s390x") {
|
} else if (current_cpu == "s390x") {
|
||||||
sources += [ "src/heap/base/asm/s390/push_registers_asm.cc" ]
|
sources += [ "src/heap/base/asm/s390/save_registers_asm.cc" ]
|
||||||
} else if (current_cpu == "mips64el") {
|
} else if (current_cpu == "mips64el") {
|
||||||
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
|
sources += [ "src/heap/base/asm/mips64/save_registers_asm.cc" ]
|
||||||
} else if (current_cpu == "loong64") {
|
} else if (current_cpu == "loong64") {
|
||||||
sources += [ "src/heap/base/asm/loong64/push_registers_asm.cc" ]
|
sources += [ "src/heap/base/asm/loong64/save_registers_asm.cc" ]
|
||||||
} else if (current_cpu == "riscv64" || current_cpu == "riscv32") {
|
} else if (current_cpu == "riscv64" || current_cpu == "riscv32") {
|
||||||
sources += [ "src/heap/base/asm/riscv/push_registers_asm.cc" ]
|
sources += [ "src/heap/base/asm/riscv/save_registers_asm.cc" ]
|
||||||
}
|
}
|
||||||
} else if (is_win) {
|
} else if (is_win) {
|
||||||
if (current_cpu == "x64") {
|
if (current_cpu == "x64") {
|
||||||
sources += [ "src/heap/base/asm/x64/push_registers_masm.asm" ]
|
sources += [ "src/heap/base/asm/x64/save_registers_masm.asm" ]
|
||||||
} else if (current_cpu == "x86") {
|
} else if (current_cpu == "x86") {
|
||||||
sources += [ "src/heap/base/asm/ia32/push_registers_masm.asm" ]
|
sources += [ "src/heap/base/asm/ia32/save_registers_masm.asm" ]
|
||||||
} else if (current_cpu == "arm64") {
|
} else if (current_cpu == "arm64") {
|
||||||
sources += [ "src/heap/base/asm/arm64/push_registers_masm.S" ]
|
sources += [ "src/heap/base/asm/arm64/save_registers_masm.S" ]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ vars = {
|
||||||
# most commonly useful for developers. Bots and developers that need to use
|
# most commonly useful for developers. Bots and developers that need to use
|
||||||
# other images (e.g., qemu.arm64) can override this with additional images.
|
# other images (e.g., qemu.arm64) can override this with additional images.
|
||||||
'checkout_fuchsia_boot_images': "qemu.x64",
|
'checkout_fuchsia_boot_images': "qemu.x64",
|
||||||
|
'checkout_fuchsia_product_bundles': '"{checkout_fuchsia_boot_images}" != ""',
|
||||||
|
|
||||||
'checkout_instrumented_libraries': False,
|
'checkout_instrumented_libraries': False,
|
||||||
'checkout_ittapi': False,
|
'checkout_ittapi': False,
|
||||||
|
@ -42,22 +43,22 @@ vars = {
|
||||||
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/',
|
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/',
|
||||||
|
|
||||||
# reclient CIPD package version
|
# reclient CIPD package version
|
||||||
'reclient_version': 're_client_version:0.69.0.458df98-gomaip',
|
'reclient_version': 're_client_version:0.83.0.da55f4f-gomaip',
|
||||||
|
|
||||||
# GN CIPD package version.
|
# GN CIPD package version.
|
||||||
'gn_version': 'git_revision:cc28efe62ef0c2fb32455f414a29c4a55bb7fbc4',
|
'gn_version': 'git_revision:a4d67be044b42963de801001e7146f9657c7fad4',
|
||||||
|
|
||||||
# ninja CIPD package version
|
# ninja CIPD package version
|
||||||
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
|
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
|
||||||
'ninja_version': 'version:2@1.8.2.chromium.3',
|
'ninja_version': 'version:2@1.8.2.chromium.3',
|
||||||
|
|
||||||
# luci-go CIPD package version.
|
# luci-go CIPD package version.
|
||||||
'luci_go': 'git_revision:20c50aa39686d91330c2daceccaa4ef1a0a72ee4',
|
'luci_go': 'git_revision:f8f64a8c560d2bf68a3ad1137979d17cffb36d30',
|
||||||
|
|
||||||
# Three lines of non-changing comments so that
|
# Three lines of non-changing comments so that
|
||||||
# the commit queue can handle CLs rolling Fuchsia sdk
|
# the commit queue can handle CLs rolling Fuchsia sdk
|
||||||
# and whatever else without interference from each other.
|
# and whatever else without interference from each other.
|
||||||
'fuchsia_version': 'version:9.20220919.2.1',
|
'fuchsia_version': 'version:10.20221109.1.1',
|
||||||
|
|
||||||
# Three lines of non-changing comments so that
|
# Three lines of non-changing comments so that
|
||||||
# the commit queue can handle CLs rolling android_sdk_build-tools_version
|
# the commit queue can handle CLs rolling android_sdk_build-tools_version
|
||||||
|
@ -97,9 +98,9 @@ deps = {
|
||||||
'base/trace_event/common':
|
'base/trace_event/common':
|
||||||
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556',
|
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556',
|
||||||
'build':
|
'build':
|
||||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + '7e7c21a9ac34c4fc2b255aa44d639efec9c33b90',
|
Var('chromium_url') + '/chromium/src/build.git' + '@' + '875cb19167f2e0d7b1eca89a4d5b5693421424c6',
|
||||||
'buildtools':
|
'buildtools':
|
||||||
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '9174abb6ac087b46f22248dc713b6c0328b8f774',
|
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '49ac7cf34ab2e59a10629a7a722cfb94348c4996',
|
||||||
'buildtools/clang_format/script':
|
'buildtools/clang_format/script':
|
||||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7',
|
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7',
|
||||||
'buildtools/linux64': {
|
'buildtools/linux64': {
|
||||||
|
@ -123,11 +124,11 @@ deps = {
|
||||||
'condition': 'host_os == "mac"',
|
'condition': 'host_os == "mac"',
|
||||||
},
|
},
|
||||||
'buildtools/third_party/libc++/trunk':
|
'buildtools/third_party/libc++/trunk':
|
||||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '2e919977e0030ce61bd19c40cefe31b995f1e2d4',
|
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '4218f3525ad438b22b0e173d963515a09d143398',
|
||||||
'buildtools/third_party/libc++abi/trunk':
|
'buildtools/third_party/libc++abi/trunk':
|
||||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'db2a783a7d1ef0f0ef31da4b6e3de0c31fcfd93f',
|
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '1a32724f721e1c3b6c590a07fe4a954344f15e48',
|
||||||
'buildtools/third_party/libunwind/trunk':
|
'buildtools/third_party/libunwind/trunk':
|
||||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '08ebcbe7b672a04e341cb3a88d8bf4276f96ac6e',
|
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a318d6a4c283a9d342d2a1e20292c1496fe12997',
|
||||||
'buildtools/win': {
|
'buildtools/win': {
|
||||||
'packages': [
|
'packages': [
|
||||||
{
|
{
|
||||||
|
@ -153,13 +154,13 @@ deps = {
|
||||||
'test/mozilla/data':
|
'test/mozilla/data':
|
||||||
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
|
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
|
||||||
'test/test262/data':
|
'test/test262/data':
|
||||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '58b7a2358286b918efd38eac4b2facbc8ada1206',
|
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ade328d530525333751e8a3b58f02e18624da085',
|
||||||
'third_party/android_ndk': {
|
'third_party/android_ndk': {
|
||||||
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
|
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
|
||||||
'condition': 'checkout_android',
|
'condition': 'checkout_android',
|
||||||
},
|
},
|
||||||
'third_party/android_platform': {
|
'third_party/android_platform': {
|
||||||
'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '04b33506bfd9d0e866bd8bd62f4cbf323d84dc79',
|
'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '1bf9b932433ebb78828bf3c8cd0ccc86b9ef4787',
|
||||||
'condition': 'checkout_android',
|
'condition': 'checkout_android',
|
||||||
},
|
},
|
||||||
'third_party/android_sdk/public': {
|
'third_party/android_sdk/public': {
|
||||||
|
@ -201,7 +202,7 @@ deps = {
|
||||||
'dep_type': 'cipd',
|
'dep_type': 'cipd',
|
||||||
},
|
},
|
||||||
'third_party/catapult': {
|
'third_party/catapult': {
|
||||||
'url': Var('chromium_url') + '/catapult.git' + '@' + 'ff03621a71c01a6f2b0f3bf2677cf815291a9e85',
|
'url': Var('chromium_url') + '/catapult.git' + '@' + 'f0b11967c94cba8f7cca91d2da20c98d4420fc25',
|
||||||
'condition': 'checkout_android',
|
'condition': 'checkout_android',
|
||||||
},
|
},
|
||||||
'third_party/colorama/src': {
|
'third_party/colorama/src': {
|
||||||
|
@ -209,7 +210,7 @@ deps = {
|
||||||
'condition': 'checkout_android',
|
'condition': 'checkout_android',
|
||||||
},
|
},
|
||||||
'third_party/depot_tools':
|
'third_party/depot_tools':
|
||||||
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'a724859f7a9b3531c0373d86886a42314e772532',
|
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ae1a70891738fb14f64fbb884e00b87ac663aa15',
|
||||||
'third_party/fuchsia-sdk/sdk': {
|
'third_party/fuchsia-sdk/sdk': {
|
||||||
'packages': [
|
'packages': [
|
||||||
{
|
{
|
||||||
|
@ -226,9 +227,9 @@ deps = {
|
||||||
'third_party/googletest/src':
|
'third_party/googletest/src':
|
||||||
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07',
|
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07',
|
||||||
'third_party/icu':
|
'third_party/icu':
|
||||||
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '20f8ac695af59b6c830def7d4e95bfeb13dd7be5',
|
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'da07448619763d1cde255b361324242646f5b268',
|
||||||
'third_party/instrumented_libraries':
|
'third_party/instrumented_libraries':
|
||||||
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'e09c4b66b6e87116eb190651421f1a6e2f3b9c52',
|
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '7bb87a375ffc3effd17a50f690099dcfb9ee280b',
|
||||||
'third_party/ittapi': {
|
'third_party/ittapi': {
|
||||||
# Force checkout ittapi libraries to pass v8 header includes check on
|
# Force checkout ittapi libraries to pass v8 header includes check on
|
||||||
# bots that has check_v8_header_includes enabled.
|
# bots that has check_v8_header_includes enabled.
|
||||||
|
@ -236,13 +237,13 @@ deps = {
|
||||||
'condition': "checkout_ittapi or check_v8_header_includes",
|
'condition': "checkout_ittapi or check_v8_header_includes",
|
||||||
},
|
},
|
||||||
'third_party/jinja2':
|
'third_party/jinja2':
|
||||||
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'ee69aa00ee8536f61db6a451f3858745cf587de6',
|
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '4633bf431193690c3491244f5a0acbe9ac776233',
|
||||||
'third_party/jsoncpp/source':
|
'third_party/jsoncpp/source':
|
||||||
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448',
|
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448',
|
||||||
'third_party/logdog/logdog':
|
'third_party/logdog/logdog':
|
||||||
Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '0b2078a90f7a638d576b3a7c407d136f2fb62399',
|
Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '0b2078a90f7a638d576b3a7c407d136f2fb62399',
|
||||||
'third_party/markupsafe':
|
'third_party/markupsafe':
|
||||||
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '1b882ef6372b58bfd55a3285f37ed801be9137cd',
|
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '13f4e8c9e206567eeb13bf585406ddc574005748',
|
||||||
'third_party/ninja': {
|
'third_party/ninja': {
|
||||||
'packages': [
|
'packages': [
|
||||||
{
|
{
|
||||||
|
@ -262,9 +263,9 @@ deps = {
|
||||||
'condition': 'checkout_android',
|
'condition': 'checkout_android',
|
||||||
},
|
},
|
||||||
'third_party/zlib':
|
'third_party/zlib':
|
||||||
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'd689fca54d7b43154f7cf77f785d19f2628fa133',
|
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '8bbd6c3129b5146489f2321f054e855c347857f4',
|
||||||
'tools/clang':
|
'tools/clang':
|
||||||
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a5e0d72349d028a4023927d6d166a8478355fac3',
|
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'd3df9cc5362e0af4cda798b0612dde39783b3dc0',
|
||||||
'tools/luci-go': {
|
'tools/luci-go': {
|
||||||
'packages': [
|
'packages': [
|
||||||
{
|
{
|
||||||
|
@ -573,11 +574,11 @@ hooks = [
|
||||||
{
|
{
|
||||||
'name': 'Download Fuchsia system images',
|
'name': 'Download Fuchsia system images',
|
||||||
'pattern': '.',
|
'pattern': '.',
|
||||||
'condition': 'checkout_fuchsia',
|
'condition': 'checkout_fuchsia and checkout_fuchsia_product_bundles',
|
||||||
'action': [
|
'action': [
|
||||||
'python3',
|
'python3',
|
||||||
'build/fuchsia/update_images.py',
|
'build/fuchsia/update_product_bundles.py',
|
||||||
'--boot-images={checkout_fuchsia_boot_images}',
|
'{checkout_fuchsia_boot_images}',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -5,6 +5,7 @@ file:ENG_REVIEW_OWNERS
|
||||||
per-file .*=file:INFRA_OWNERS
|
per-file .*=file:INFRA_OWNERS
|
||||||
per-file .bazelrc=file:COMMON_OWNERS
|
per-file .bazelrc=file:COMMON_OWNERS
|
||||||
per-file .mailmap=file:COMMON_OWNERS
|
per-file .mailmap=file:COMMON_OWNERS
|
||||||
|
per-file .ycm_extra_conf.py=file:COMMON_OWNERS
|
||||||
per-file codereview.settings=file:INFRA_OWNERS
|
per-file codereview.settings=file:INFRA_OWNERS
|
||||||
per-file AUTHORS=file:COMMON_OWNERS
|
per-file AUTHORS=file:COMMON_OWNERS
|
||||||
per-file BUILD.bazel=file:COMMON_OWNERS
|
per-file BUILD.bazel=file:COMMON_OWNERS
|
||||||
|
|
|
@ -151,14 +151,6 @@ def _default_args():
|
||||||
"-fno-integrated-as",
|
"-fno-integrated-as",
|
||||||
],
|
],
|
||||||
"//conditions:default": [],
|
"//conditions:default": [],
|
||||||
}) + select({
|
|
||||||
"@v8//bazel/config:is_debug":[
|
|
||||||
"-fvisibility=default",
|
|
||||||
],
|
|
||||||
"//conditions:default": [
|
|
||||||
"-fvisibility=hidden",
|
|
||||||
"-fvisibility-inlines-hidden",
|
|
||||||
],
|
|
||||||
}),
|
}),
|
||||||
includes = ["include"],
|
includes = ["include"],
|
||||||
linkopts = select({
|
linkopts = select({
|
||||||
|
@ -518,6 +510,7 @@ def build_config_content(cpu, icu):
|
||||||
("v8_current_cpu", cpu),
|
("v8_current_cpu", cpu),
|
||||||
("v8_dict_property_const_tracking", "false"),
|
("v8_dict_property_const_tracking", "false"),
|
||||||
("v8_enable_atomic_object_field_writes", "false"),
|
("v8_enable_atomic_object_field_writes", "false"),
|
||||||
|
("v8_enable_conservative_stack_scanning", "false"),
|
||||||
("v8_enable_concurrent_marking", "false"),
|
("v8_enable_concurrent_marking", "false"),
|
||||||
("v8_enable_i18n_support", icu),
|
("v8_enable_i18n_support", icu),
|
||||||
("v8_enable_verify_predictable", "false"),
|
("v8_enable_verify_predictable", "false"),
|
||||||
|
|
|
@ -44,10 +44,6 @@ declare_args() {
|
||||||
#
|
#
|
||||||
# There are test cases for this code posted as an attachment to
|
# There are test cases for this code posted as an attachment to
|
||||||
# https://crbug.com/625353.
|
# https://crbug.com/625353.
|
||||||
#
|
|
||||||
# TODO(GYP): Currently only regular (non-cross) compiles, and cross-compiles
|
|
||||||
# from x64 hosts to Intel, ARM, or MIPS targets, are implemented. Add support
|
|
||||||
# for the other supported configurations.
|
|
||||||
|
|
||||||
if (v8_snapshot_toolchain == "") {
|
if (v8_snapshot_toolchain == "") {
|
||||||
if (current_os == host_os && current_cpu == host_cpu) {
|
if (current_os == host_os && current_cpu == host_cpu) {
|
||||||
|
@ -69,22 +65,21 @@ if (v8_snapshot_toolchain == "") {
|
||||||
# therefore snapshots will need to be built using native mksnapshot
|
# therefore snapshots will need to be built using native mksnapshot
|
||||||
# in combination with qemu
|
# in combination with qemu
|
||||||
v8_snapshot_toolchain = current_toolchain
|
v8_snapshot_toolchain = current_toolchain
|
||||||
|
} else if (host_cpu == current_cpu) {
|
||||||
|
# Cross-build from same ISA on one OS to another. For example:
|
||||||
|
# * targeting win/x64 on a linux/x64 host
|
||||||
|
# * targeting win/arm64 on a mac/arm64 host
|
||||||
|
v8_snapshot_toolchain = host_toolchain
|
||||||
} else if (host_cpu == "arm64" && current_cpu == "x64") {
|
} else if (host_cpu == "arm64" && current_cpu == "x64") {
|
||||||
# Cross-build from arm64 to intel (likely on an Apple Silicon mac).
|
# Cross-build from arm64 to intel (likely on an Apple Silicon mac).
|
||||||
v8_snapshot_toolchain =
|
v8_snapshot_toolchain =
|
||||||
"//build/toolchain/${host_os}:clang_arm64_v8_$v8_current_cpu"
|
"//build/toolchain/${host_os}:clang_arm64_v8_$v8_current_cpu"
|
||||||
} else if (host_cpu == "x64") {
|
} else if (host_cpu == "x64") {
|
||||||
# This is a cross-compile from an x64 host to either a non-Intel target
|
# This is a cross-compile from an x64 host to either a non-Intel target
|
||||||
# cpu or a different target OS. Clang will always be used by default on the
|
# cpu or to 32-bit x86 on a different target OS.
|
||||||
# host, unless this is a ChromeOS build, in which case the same toolchain
|
|
||||||
# (Clang or GCC) will be used for target and host by default.
|
|
||||||
if (is_chromeos && !is_clang) {
|
|
||||||
_clang = ""
|
|
||||||
} else {
|
|
||||||
_clang = "clang_"
|
|
||||||
}
|
|
||||||
|
|
||||||
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
|
assert(v8_current_cpu != "x64", "handled by host_cpu == current_cpu branch")
|
||||||
|
if (v8_current_cpu == "x86") {
|
||||||
_cpus = v8_current_cpu
|
_cpus = v8_current_cpu
|
||||||
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
|
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
|
||||||
v8_current_cpu == "riscv64" || v8_current_cpu == "loong64") {
|
v8_current_cpu == "riscv64" || v8_current_cpu == "loong64") {
|
||||||
|
@ -104,7 +99,7 @@ if (v8_snapshot_toolchain == "") {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_cpus != "") {
|
if (_cpus != "") {
|
||||||
v8_snapshot_toolchain = "//build/toolchain/${host_os}:${_clang}${_cpus}"
|
v8_snapshot_toolchain = "//build/toolchain/${host_os}:clang_${_cpus}"
|
||||||
} else if (is_win && v8_current_cpu == "arm64") {
|
} else if (is_win && v8_current_cpu == "arm64") {
|
||||||
# cross compile Windows arm64 with host toolchain.
|
# cross compile Windows arm64 with host toolchain.
|
||||||
v8_snapshot_toolchain = host_toolchain
|
v8_snapshot_toolchain = host_toolchain
|
||||||
|
|
|
@ -26,6 +26,8 @@ This allows Oilpan to run garbage collection in parallel with mutators running i
|
||||||
References to objects belonging to another thread's heap are modeled using cross-thread roots.
|
References to objects belonging to another thread's heap are modeled using cross-thread roots.
|
||||||
This is even true for on-heap to on-heap references.
|
This is even true for on-heap to on-heap references.
|
||||||
|
|
||||||
|
Oilpan heaps may generally not be accessed from different threads unless otherwise noted.
|
||||||
|
|
||||||
## Heap partitioning
|
## Heap partitioning
|
||||||
|
|
||||||
Oilpan's heaps are partitioned into spaces.
|
Oilpan's heaps are partitioned into spaces.
|
||||||
|
|
|
@ -19,6 +19,11 @@ class WriteBarrierTypeForNonCagedHeapPolicy;
|
||||||
* Opaque handle used for additional heap APIs.
|
* Opaque handle used for additional heap APIs.
|
||||||
*/
|
*/
|
||||||
class HeapHandle {
|
class HeapHandle {
|
||||||
|
public:
|
||||||
|
// Deleted copy ctor to avoid treating the type by value.
|
||||||
|
HeapHandle(const HeapHandle&) = delete;
|
||||||
|
HeapHandle& operator=(const HeapHandle&) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
HeapHandle() = default;
|
HeapHandle() = default;
|
||||||
|
|
||||||
|
|
|
@ -33,8 +33,9 @@ class V8_EXPORT Platform {
|
||||||
virtual ~Platform() = default;
|
virtual ~Platform() = default;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the allocator used by cppgc to allocate its heap and various
|
* \returns the allocator used by cppgc to allocate its heap and various
|
||||||
* support structures.
|
* support structures. Returning nullptr results in using the `PageAllocator`
|
||||||
|
* provided by `cppgc::InitializeProcess()` instead.
|
||||||
*/
|
*/
|
||||||
virtual PageAllocator* GetPageAllocator() = 0;
|
virtual PageAllocator* GetPageAllocator() = 0;
|
||||||
|
|
||||||
|
@ -133,9 +134,10 @@ class V8_EXPORT Platform {
|
||||||
* Can be called multiple times when paired with `ShutdownProcess()`.
|
* Can be called multiple times when paired with `ShutdownProcess()`.
|
||||||
*
|
*
|
||||||
* \param page_allocator The allocator used for maintaining meta data. Must stay
|
* \param page_allocator The allocator used for maintaining meta data. Must stay
|
||||||
* always alive and not change between multiple calls to InitializeProcess.
|
* always alive and not change between multiple calls to InitializeProcess. If
|
||||||
|
* no allocator is provided, a default internal version will be used.
|
||||||
*/
|
*/
|
||||||
V8_EXPORT void InitializeProcess(PageAllocator* page_allocator);
|
V8_EXPORT void InitializeProcess(PageAllocator* page_allocator = nullptr);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Must be called after destroying the last used heap. Some process-global
|
* Must be called after destroying the last used heap. Some process-global
|
||||||
|
|
|
@ -458,13 +458,14 @@ domain Debugger
|
||||||
# New value for breakpoints active state.
|
# New value for breakpoints active state.
|
||||||
boolean active
|
boolean active
|
||||||
|
|
||||||
# Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or
|
# Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions,
|
||||||
# no exceptions. Initial pause on exceptions state is `none`.
|
# or caught exceptions, no exceptions. Initial pause on exceptions state is `none`.
|
||||||
command setPauseOnExceptions
|
command setPauseOnExceptions
|
||||||
parameters
|
parameters
|
||||||
# Pause on exceptions mode.
|
# Pause on exceptions mode.
|
||||||
enum state
|
enum state
|
||||||
none
|
none
|
||||||
|
caught
|
||||||
uncaught
|
uncaught
|
||||||
all
|
all
|
||||||
|
|
||||||
|
|
|
@ -251,8 +251,26 @@ class V8_EXPORT ArrayBuffer : public Object {
|
||||||
* preventing JavaScript from ever accessing underlying backing store.
|
* preventing JavaScript from ever accessing underlying backing store.
|
||||||
* ArrayBuffer should have been externalized and must be detachable.
|
* ArrayBuffer should have been externalized and must be detachable.
|
||||||
*/
|
*/
|
||||||
|
V8_DEPRECATE_SOON(
|
||||||
|
"Use the version which takes a key parameter (passing a null handle is "
|
||||||
|
"ok).")
|
||||||
void Detach();
|
void Detach();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detaches this ArrayBuffer and all its views (typed arrays).
|
||||||
|
* Detaching sets the byte length of the buffer and all typed arrays to zero,
|
||||||
|
* preventing JavaScript from ever accessing underlying backing store.
|
||||||
|
* ArrayBuffer should have been externalized and must be detachable. Returns
|
||||||
|
* Nothing if the key didn't pass the [[ArrayBufferDetachKey]] check,
|
||||||
|
* Just(true) otherwise.
|
||||||
|
*/
|
||||||
|
V8_WARN_UNUSED_RESULT Maybe<bool> Detach(v8::Local<v8::Value> key);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the ArrayBufferDetachKey.
|
||||||
|
*/
|
||||||
|
void SetDetachKey(v8::Local<v8::Value> key);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a shared pointer to the backing store of this array buffer. This
|
* Get a shared pointer to the backing store of this array buffer. This
|
||||||
* pointer coordinates the lifetime management of the internal storage
|
* pointer coordinates the lifetime management of the internal storage
|
||||||
|
|
|
@ -237,7 +237,8 @@ using LogEventCallback = void (*)(const char* name,
|
||||||
enum class CrashKeyId {
|
enum class CrashKeyId {
|
||||||
kIsolateAddress,
|
kIsolateAddress,
|
||||||
kReadonlySpaceFirstPageAddress,
|
kReadonlySpaceFirstPageAddress,
|
||||||
kMapSpaceFirstPageAddress,
|
kMapSpaceFirstPageAddress V8_ENUM_DEPRECATE_SOON("Map space got removed"),
|
||||||
|
kOldSpaceFirstPageAddress,
|
||||||
kCodeRangeBaseAddress,
|
kCodeRangeBaseAddress,
|
||||||
kCodeSpaceFirstPageAddress,
|
kCodeSpaceFirstPageAddress,
|
||||||
kDumpType,
|
kDumpType,
|
||||||
|
|
|
@ -169,6 +169,9 @@ class V8_EXPORT Context : public Data {
|
||||||
/** Returns the microtask queue associated with a current context. */
|
/** Returns the microtask queue associated with a current context. */
|
||||||
MicrotaskQueue* GetMicrotaskQueue();
|
MicrotaskQueue* GetMicrotaskQueue();
|
||||||
|
|
||||||
|
/** Sets the microtask queue associated with the current context. */
|
||||||
|
void SetMicrotaskQueue(MicrotaskQueue* queue);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The field at kDebugIdIndex used to be reserved for the inspector.
|
* The field at kDebugIdIndex used to be reserved for the inspector.
|
||||||
* It now serves no purpose.
|
* It now serves no purpose.
|
||||||
|
|
|
@ -77,9 +77,6 @@ struct WrapperDescriptor final {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct V8_EXPORT CppHeapCreateParams {
|
struct V8_EXPORT CppHeapCreateParams {
|
||||||
CppHeapCreateParams(const CppHeapCreateParams&) = delete;
|
|
||||||
CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete;
|
|
||||||
|
|
||||||
std::vector<std::unique_ptr<cppgc::CustomSpaceBase>> custom_spaces;
|
std::vector<std::unique_ptr<cppgc::CustomSpaceBase>> custom_spaces;
|
||||||
WrapperDescriptor wrapper_descriptor;
|
WrapperDescriptor wrapper_descriptor;
|
||||||
/**
|
/**
|
||||||
|
@ -98,6 +95,10 @@ struct V8_EXPORT CppHeapCreateParams {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A heap for allocating managed C++ objects.
|
* A heap for allocating managed C++ objects.
|
||||||
|
*
|
||||||
|
* Similar to v8::Isolate, the heap may only be accessed from one thread at a
|
||||||
|
* time. The heap may be used from different threads using the
|
||||||
|
* v8::Locker/v8::Unlocker APIs which is different from generic Oilpan.
|
||||||
*/
|
*/
|
||||||
class V8_EXPORT CppHeap {
|
class V8_EXPORT CppHeap {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -53,7 +53,7 @@ class V8_EXPORT Data {
|
||||||
bool IsContext() const;
|
bool IsContext() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Data();
|
Data() = delete;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -72,7 +72,7 @@ class V8_EXPORT EmbedderRootsHandler {
|
||||||
class V8_EXPORT
|
class V8_EXPORT
|
||||||
// GCC doesn't like combining __attribute__(()) with [[deprecated]].
|
// GCC doesn't like combining __attribute__(()) with [[deprecated]].
|
||||||
#ifdef __clang__
|
#ifdef __clang__
|
||||||
V8_DEPRECATE_SOON("Use CppHeap when working with v8::TracedReference.")
|
V8_DEPRECATED("Use CppHeap when working with v8::TracedReference.")
|
||||||
#endif // __clang__
|
#endif // __clang__
|
||||||
EmbedderHeapTracer {
|
EmbedderHeapTracer {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -581,6 +581,8 @@ class Internals {
|
||||||
static const int kNodeStateMask = 0x3;
|
static const int kNodeStateMask = 0x3;
|
||||||
static const int kNodeStateIsWeakValue = 2;
|
static const int kNodeStateIsWeakValue = 2;
|
||||||
|
|
||||||
|
static const int kTracedNodeClassIdOffset = kApiSystemPointerSize;
|
||||||
|
|
||||||
static const int kFirstNonstringType = 0x80;
|
static const int kFirstNonstringType = 0x80;
|
||||||
static const int kOddballType = 0x83;
|
static const int kOddballType = 0x83;
|
||||||
static const int kForeignType = 0xcc;
|
static const int kForeignType = 0xcc;
|
||||||
|
|
|
@ -954,22 +954,20 @@ class V8_EXPORT Isolate {
|
||||||
* Attaches a managed C++ heap as an extension to the JavaScript heap. The
|
* Attaches a managed C++ heap as an extension to the JavaScript heap. The
|
||||||
* embedder maintains ownership of the CppHeap. At most one C++ heap can be
|
* embedder maintains ownership of the CppHeap. At most one C++ heap can be
|
||||||
* attached to V8.
|
* attached to V8.
|
||||||
|
*
|
||||||
* AttachCppHeap cannot be used simultaneously with SetEmbedderHeapTracer.
|
* AttachCppHeap cannot be used simultaneously with SetEmbedderHeapTracer.
|
||||||
*
|
*
|
||||||
* This is an experimental feature and may still change significantly.
|
* Multi-threaded use requires the use of v8::Locker/v8::Unlocker, see
|
||||||
|
* CppHeap.
|
||||||
*/
|
*/
|
||||||
void AttachCppHeap(CppHeap*);
|
void AttachCppHeap(CppHeap*);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Detaches a managed C++ heap if one was attached using `AttachCppHeap()`.
|
* Detaches a managed C++ heap if one was attached using `AttachCppHeap()`.
|
||||||
*
|
|
||||||
* This is an experimental feature and may still change significantly.
|
|
||||||
*/
|
*/
|
||||||
void DetachCppHeap();
|
void DetachCppHeap();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is an experimental feature and may still change significantly.
|
|
||||||
|
|
||||||
* \returns the C++ heap managed by V8. Only available if such a heap has been
|
* \returns the C++ heap managed by V8. Only available if such a heap has been
|
||||||
* attached using `AttachCppHeap()`.
|
* attached using `AttachCppHeap()`.
|
||||||
*/
|
*/
|
||||||
|
@ -1526,8 +1524,10 @@ class V8_EXPORT Isolate {
|
||||||
|
|
||||||
void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
|
void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
|
||||||
|
|
||||||
|
V8_DEPRECATED("Wasm SIMD is always enabled")
|
||||||
void SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback);
|
void SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback);
|
||||||
|
|
||||||
|
V8_DEPRECATED("Wasm exceptions are always enabled")
|
||||||
void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
|
void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
|
||||||
|
|
||||||
void SetSharedArrayBufferConstructorEnabledCallback(
|
void SetSharedArrayBufferConstructorEnabledCallback(
|
||||||
|
|
|
@ -118,7 +118,12 @@ class V8_EXPORT V8_NODISCARD MicrotasksScope {
|
||||||
public:
|
public:
|
||||||
enum Type { kRunMicrotasks, kDoNotRunMicrotasks };
|
enum Type { kRunMicrotasks, kDoNotRunMicrotasks };
|
||||||
|
|
||||||
|
V8_DEPRECATE_SOON(
|
||||||
|
"May be incorrect if context was created with non-default microtask "
|
||||||
|
"queue")
|
||||||
MicrotasksScope(Isolate* isolate, Type type);
|
MicrotasksScope(Isolate* isolate, Type type);
|
||||||
|
|
||||||
|
MicrotasksScope(Local<Context> context, Type type);
|
||||||
MicrotasksScope(Isolate* isolate, MicrotaskQueue* microtask_queue, Type type);
|
MicrotasksScope(Isolate* isolate, MicrotaskQueue* microtask_queue, Type type);
|
||||||
~MicrotasksScope();
|
~MicrotasksScope();
|
||||||
|
|
||||||
|
|
|
@ -252,7 +252,7 @@ class NonCopyablePersistentTraits {
|
||||||
* This will clone the contents of storage cell, but not any of the flags, etc.
|
* This will clone the contents of storage cell, but not any of the flags, etc.
|
||||||
*/
|
*/
|
||||||
template <class T>
|
template <class T>
|
||||||
struct CopyablePersistentTraits {
|
struct V8_DEPRECATED("Use v8::Global instead") CopyablePersistentTraits {
|
||||||
using CopyablePersistent = Persistent<T, CopyablePersistentTraits<T>>;
|
using CopyablePersistent = Persistent<T, CopyablePersistentTraits<T>>;
|
||||||
static const bool kResetInDestructor = true;
|
static const bool kResetInDestructor = true;
|
||||||
template <class S, class M>
|
template <class S, class M>
|
||||||
|
|
|
@ -923,6 +923,7 @@ class Platform {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows the embedder to manage memory page allocations.
|
* Allows the embedder to manage memory page allocations.
|
||||||
|
* Returning nullptr will cause V8 to use the default page allocator.
|
||||||
*/
|
*/
|
||||||
virtual PageAllocator* GetPageAllocator() = 0;
|
virtual PageAllocator* GetPageAllocator() = 0;
|
||||||
|
|
||||||
|
|
|
@ -175,6 +175,32 @@ class V8_EXPORT CpuProfileNode {
|
||||||
static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
|
static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An interface for exporting data from V8, using "push" model.
|
||||||
|
*/
|
||||||
|
class V8_EXPORT OutputStream {
|
||||||
|
public:
|
||||||
|
enum WriteResult { kContinue = 0, kAbort = 1 };
|
||||||
|
virtual ~OutputStream() = default;
|
||||||
|
/** Notify about the end of stream. */
|
||||||
|
virtual void EndOfStream() = 0;
|
||||||
|
/** Get preferred output chunk size. Called only once. */
|
||||||
|
virtual int GetChunkSize() { return 1024; }
|
||||||
|
/**
|
||||||
|
* Writes the next chunk of snapshot data into the stream. Writing
|
||||||
|
* can be stopped by returning kAbort as function result. EndOfStream
|
||||||
|
* will not be called in case writing was aborted.
|
||||||
|
*/
|
||||||
|
virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
|
||||||
|
/**
|
||||||
|
* Writes the next chunk of heap stats data into the stream. Writing
|
||||||
|
* can be stopped by returning kAbort as function result. EndOfStream
|
||||||
|
* will not be called in case writing was aborted.
|
||||||
|
*/
|
||||||
|
virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
|
||||||
|
return kAbort;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* CpuProfile contains a CPU profile in a form of top-down call tree
|
* CpuProfile contains a CPU profile in a form of top-down call tree
|
||||||
|
@ -182,6 +208,9 @@ class V8_EXPORT CpuProfileNode {
|
||||||
*/
|
*/
|
||||||
class V8_EXPORT CpuProfile {
|
class V8_EXPORT CpuProfile {
|
||||||
public:
|
public:
|
||||||
|
enum SerializationFormat {
|
||||||
|
kJSON = 0 // See format description near 'Serialize' method.
|
||||||
|
};
|
||||||
/** Returns CPU profile title. */
|
/** Returns CPU profile title. */
|
||||||
Local<String> GetTitle() const;
|
Local<String> GetTitle() const;
|
||||||
|
|
||||||
|
@ -235,6 +264,25 @@ class V8_EXPORT CpuProfile {
|
||||||
* All pointers to nodes previously returned become invalid.
|
* All pointers to nodes previously returned become invalid.
|
||||||
*/
|
*/
|
||||||
void Delete();
|
void Delete();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepare a serialized representation of the profile. The result
|
||||||
|
* is written into the stream provided in chunks of specified size.
|
||||||
|
*
|
||||||
|
* For the JSON format, heap contents are represented as an object
|
||||||
|
* with the following structure:
|
||||||
|
*
|
||||||
|
* {
|
||||||
|
* nodes: [nodes array],
|
||||||
|
* startTime: number,
|
||||||
|
* endTime: number
|
||||||
|
* samples: [strings array]
|
||||||
|
* timeDeltas: [numbers array]
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void Serialize(OutputStream* stream,
|
||||||
|
SerializationFormat format = kJSON) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum CpuProfilingMode {
|
enum CpuProfilingMode {
|
||||||
|
@ -576,37 +624,6 @@ class V8_EXPORT HeapGraphNode {
|
||||||
const HeapGraphEdge* GetChild(int index) const;
|
const HeapGraphEdge* GetChild(int index) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* An interface for exporting data from V8, using "push" model.
|
|
||||||
*/
|
|
||||||
class V8_EXPORT OutputStream {
|
|
||||||
public:
|
|
||||||
enum WriteResult {
|
|
||||||
kContinue = 0,
|
|
||||||
kAbort = 1
|
|
||||||
};
|
|
||||||
virtual ~OutputStream() = default;
|
|
||||||
/** Notify about the end of stream. */
|
|
||||||
virtual void EndOfStream() = 0;
|
|
||||||
/** Get preferred output chunk size. Called only once. */
|
|
||||||
virtual int GetChunkSize() { return 1024; }
|
|
||||||
/**
|
|
||||||
* Writes the next chunk of snapshot data into the stream. Writing
|
|
||||||
* can be stopped by returning kAbort as function result. EndOfStream
|
|
||||||
* will not be called in case writing was aborted.
|
|
||||||
*/
|
|
||||||
virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
|
|
||||||
/**
|
|
||||||
* Writes the next chunk of heap stats data into the stream. Writing
|
|
||||||
* can be stopped by returning kAbort as function result. EndOfStream
|
|
||||||
* will not be called in case writing was aborted.
|
|
||||||
*/
|
|
||||||
virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
|
|
||||||
return kAbort;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* HeapSnapshots record the state of the JS heap at some moment.
|
* HeapSnapshots record the state of the JS heap at some moment.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -403,7 +403,7 @@ void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) {
|
||||||
using I = internal::Internals;
|
using I = internal::Internals;
|
||||||
if (IsEmpty()) return;
|
if (IsEmpty()) return;
|
||||||
internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
|
internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
|
||||||
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
|
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kTracedNodeClassIdOffset;
|
||||||
*reinterpret_cast<uint16_t*>(addr) = class_id;
|
*reinterpret_cast<uint16_t*>(addr) = class_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -411,7 +411,7 @@ uint16_t TracedReferenceBase::WrapperClassId() const {
|
||||||
using I = internal::Internals;
|
using I = internal::Internals;
|
||||||
if (IsEmpty()) return 0;
|
if (IsEmpty()) return 0;
|
||||||
internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
|
internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
|
||||||
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
|
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kTracedNodeClassIdOffset;
|
||||||
return *reinterpret_cast<uint16_t*>(addr);
|
return *reinterpret_cast<uint16_t*>(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -244,6 +244,11 @@ class V8_EXPORT Value : public Data {
|
||||||
*/
|
*/
|
||||||
bool IsWeakSet() const;
|
bool IsWeakSet() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if this value is a WeakRef.
|
||||||
|
*/
|
||||||
|
bool IsWeakRef() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if this value is an ArrayBuffer.
|
* Returns true if this value is an ArrayBuffer.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -9,9 +9,9 @@
|
||||||
// NOTE these macros are used by some of the tool scripts and the build
|
// NOTE these macros are used by some of the tool scripts and the build
|
||||||
// system so their names cannot be changed without changing the scripts.
|
// system so their names cannot be changed without changing the scripts.
|
||||||
#define V8_MAJOR_VERSION 10
|
#define V8_MAJOR_VERSION 10
|
||||||
#define V8_MINOR_VERSION 8
|
#define V8_MINOR_VERSION 9
|
||||||
#define V8_BUILD_NUMBER 168
|
#define V8_BUILD_NUMBER 194
|
||||||
#define V8_PATCH_LEVEL 20
|
#define V8_PATCH_LEVEL 4
|
||||||
|
|
||||||
// Use 1 for candidates and 0 otherwise.
|
// Use 1 for candidates and 0 otherwise.
|
||||||
// (Boolean macro values are not supported by all preprocessors.)
|
// (Boolean macro values are not supported by all preprocessors.)
|
||||||
|
|
|
@ -288,6 +288,9 @@ path. Add it with -I<path> to the command line
|
||||||
//
|
//
|
||||||
// V8_HAS_ATTRIBUTE_ALWAYS_INLINE - __attribute__((always_inline))
|
// V8_HAS_ATTRIBUTE_ALWAYS_INLINE - __attribute__((always_inline))
|
||||||
// supported
|
// supported
|
||||||
|
// V8_HAS_ATTRIBUTE_CONSTINIT - __attribute__((require_constant_
|
||||||
|
// initialization))
|
||||||
|
// supported
|
||||||
// V8_HAS_ATTRIBUTE_NONNULL - __attribute__((nonnull)) supported
|
// V8_HAS_ATTRIBUTE_NONNULL - __attribute__((nonnull)) supported
|
||||||
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
|
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
|
||||||
// V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported
|
// V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported
|
||||||
|
@ -334,6 +337,8 @@ path. Add it with -I<path> to the command line
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
|
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
|
||||||
|
# define V8_HAS_ATTRIBUTE_CONSTINIT \
|
||||||
|
(__has_attribute(require_constant_initialization))
|
||||||
# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull))
|
# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull))
|
||||||
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
|
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
|
||||||
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
|
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
|
||||||
|
@ -450,6 +455,16 @@ path. Add it with -I<path> to the command line
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
// A macro to mark a declaration as requiring constant initialization.
|
||||||
|
// Use like:
|
||||||
|
// int* foo V8_CONSTINIT;
|
||||||
|
#if V8_HAS_ATTRIBUTE_CONSTINIT
|
||||||
|
# define V8_CONSTINIT __attribute__((require_constant_initialization))
|
||||||
|
#else
|
||||||
|
# define V8_CONSTINIT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
// A macro to mark specific arguments as non-null.
|
// A macro to mark specific arguments as non-null.
|
||||||
// Use like:
|
// Use like:
|
||||||
// int add(int* x, int y, int* z) V8_NONNULL(1, 3) { return *x + y + *z; }
|
// int add(int* x, int y, int* z) V8_NONNULL(1, 3) { return *x + y + *z; }
|
||||||
|
|
|
@ -161,7 +161,6 @@
|
||||||
'tests': [
|
'tests': [
|
||||||
{'name': 'benchmarks'},
|
{'name': 'benchmarks'},
|
||||||
{'name': 'benchmarks', 'variant': 'extra'},
|
{'name': 'benchmarks', 'variant': 'extra'},
|
||||||
{'name': 'gcmole'},
|
|
||||||
{'name': 'mjsunit_sp_frame_access'},
|
{'name': 'mjsunit_sp_frame_access'},
|
||||||
{'name': 'mozilla'},
|
{'name': 'mozilla'},
|
||||||
{'name': 'mozilla', 'variant': 'extra'},
|
{'name': 'mozilla', 'variant': 'extra'},
|
||||||
|
@ -180,6 +179,7 @@
|
||||||
],
|
],
|
||||||
'shards': 4,
|
'shards': 4,
|
||||||
},
|
},
|
||||||
|
{'name': 'gcmole'},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
'v8_linux_optional_rel': {
|
'v8_linux_optional_rel': {
|
||||||
|
@ -847,7 +847,7 @@
|
||||||
{'name': 'mozilla'},
|
{'name': 'mozilla'},
|
||||||
{'name': 'test262', 'variant': 'default', 'shards': 2},
|
{'name': 'test262', 'variant': 'default', 'shards': 2},
|
||||||
{'name': 'v8testing', 'shards': 2},
|
{'name': 'v8testing', 'shards': 2},
|
||||||
{'name': 'v8testing', 'variant': 'extra'},
|
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
##############################################################################
|
##############################################################################
|
||||||
|
@ -989,7 +989,6 @@
|
||||||
'tests': [
|
'tests': [
|
||||||
{'name': 'benchmarks'},
|
{'name': 'benchmarks'},
|
||||||
{'name': 'benchmarks', 'variant': 'extra'},
|
{'name': 'benchmarks', 'variant': 'extra'},
|
||||||
{'name': 'gcmole'},
|
|
||||||
{'name': 'mjsunit_sp_frame_access'},
|
{'name': 'mjsunit_sp_frame_access'},
|
||||||
{'name': 'mozilla'},
|
{'name': 'mozilla'},
|
||||||
{'name': 'mozilla', 'variant': 'extra'},
|
{'name': 'mozilla', 'variant': 'extra'},
|
||||||
|
@ -1051,6 +1050,7 @@
|
||||||
'test_args': ['--extra-flags', '--noenable-avx'],
|
'test_args': ['--extra-flags', '--noenable-avx'],
|
||||||
'shards': 2
|
'shards': 2
|
||||||
},
|
},
|
||||||
|
{'name': 'gcmole'},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
'V8 Linux - arm64 - sim - CFI': {
|
'V8 Linux - arm64 - sim - CFI': {
|
||||||
|
@ -1807,8 +1807,8 @@
|
||||||
'tests': [
|
'tests': [
|
||||||
{'name': 'mozilla'},
|
{'name': 'mozilla'},
|
||||||
{'name': 'test262', 'variant': 'default'},
|
{'name': 'test262', 'variant': 'default'},
|
||||||
{'name': 'v8testing'},
|
{'name': 'v8testing', 'shards': 2},
|
||||||
{'name': 'v8testing', 'variant': 'extra'},
|
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
'V8 Win64 - debug': {
|
'V8 Win64 - debug': {
|
||||||
|
@ -1829,7 +1829,7 @@
|
||||||
'tests': [
|
'tests': [
|
||||||
{'name': 'mozilla'},
|
{'name': 'mozilla'},
|
||||||
{'name': 'test262', 'variant': 'default'},
|
{'name': 'test262', 'variant': 'default'},
|
||||||
{'name': 'v8testing'},
|
{'name': 'v8testing', 'shards': 2},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
'V8 Win64 ASAN': {
|
'V8 Win64 ASAN': {
|
||||||
|
|
|
@ -83,6 +83,8 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
|
||||||
InstantiateFunction(isolate,
|
InstantiateFunction(isolate,
|
||||||
Handle<FunctionTemplateInfo>::cast(getter)),
|
Handle<FunctionTemplateInfo>::cast(getter)),
|
||||||
Object);
|
Object);
|
||||||
|
Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
|
||||||
|
Handle<JSFunction>::cast(getter)->set_code(*trampoline);
|
||||||
}
|
}
|
||||||
if (setter->IsFunctionTemplateInfo() &&
|
if (setter->IsFunctionTemplateInfo() &&
|
||||||
FunctionTemplateInfo::cast(*setter).BreakAtEntry()) {
|
FunctionTemplateInfo::cast(*setter).BreakAtEntry()) {
|
||||||
|
@ -91,6 +93,8 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
|
||||||
InstantiateFunction(isolate,
|
InstantiateFunction(isolate,
|
||||||
Handle<FunctionTemplateInfo>::cast(setter)),
|
Handle<FunctionTemplateInfo>::cast(setter)),
|
||||||
Object);
|
Object);
|
||||||
|
Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
|
||||||
|
Handle<JSFunction>::cast(setter)->set_code(*trampoline);
|
||||||
}
|
}
|
||||||
RETURN_ON_EXCEPTION(
|
RETURN_ON_EXCEPTION(
|
||||||
isolate,
|
isolate,
|
||||||
|
|
|
@ -63,6 +63,7 @@
|
||||||
#include "src/handles/global-handles.h"
|
#include "src/handles/global-handles.h"
|
||||||
#include "src/handles/persistent-handles.h"
|
#include "src/handles/persistent-handles.h"
|
||||||
#include "src/handles/shared-object-conveyor-handles.h"
|
#include "src/handles/shared-object-conveyor-handles.h"
|
||||||
|
#include "src/handles/traced-handles.h"
|
||||||
#include "src/heap/embedder-tracing.h"
|
#include "src/heap/embedder-tracing.h"
|
||||||
#include "src/heap/heap-inl.h"
|
#include "src/heap/heap-inl.h"
|
||||||
#include "src/heap/heap-write-barrier.h"
|
#include "src/heap/heap-write-barrier.h"
|
||||||
|
@ -616,7 +617,10 @@ StartupData SnapshotCreator::CreateBlob(
|
||||||
i::Snapshot::ClearReconstructableDataForSerialization(
|
i::Snapshot::ClearReconstructableDataForSerialization(
|
||||||
i_isolate, function_code_handling == FunctionCodeHandling::kClear);
|
i_isolate, function_code_handling == FunctionCodeHandling::kClear);
|
||||||
|
|
||||||
i::GlobalSafepointScope global_safepoint(i_isolate);
|
i::SafepointKind safepoint_kind = i_isolate->has_shared_heap()
|
||||||
|
? i::SafepointKind::kGlobal
|
||||||
|
: i::SafepointKind::kIsolate;
|
||||||
|
i::SafepointScope safepoint_scope(i_isolate, safepoint_kind);
|
||||||
i::DisallowGarbageCollection no_gc_from_here_on;
|
i::DisallowGarbageCollection no_gc_from_here_on;
|
||||||
|
|
||||||
// Create a vector with all contexts and clear associated Persistent fields.
|
// Create a vector with all contexts and clear associated Persistent fields.
|
||||||
|
@ -654,7 +658,7 @@ StartupData SnapshotCreator::CreateBlob(
|
||||||
|
|
||||||
data->created_ = true;
|
data->created_ = true;
|
||||||
return i::Snapshot::Create(i_isolate, &contexts, embedder_fields_serializers,
|
return i::Snapshot::Create(i_isolate, &contexts, embedder_fields_serializers,
|
||||||
global_safepoint, no_gc_from_here_on);
|
safepoint_scope, no_gc_from_here_on);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool StartupData::CanBeRehashed() const {
|
bool StartupData::CanBeRehashed() const {
|
||||||
|
@ -792,8 +796,7 @@ i::Address* GlobalizeTracedReference(i::Isolate* i_isolate, i::Address* obj,
|
||||||
Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
|
Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
|
||||||
"the address slot must be not null");
|
"the address slot must be not null");
|
||||||
#endif
|
#endif
|
||||||
i::Handle<i::Object> result =
|
auto result = i_isolate->traced_handles()->Create(*obj, slot, store_mode);
|
||||||
i_isolate->global_handles()->CreateTraced(*obj, slot, store_mode);
|
|
||||||
#ifdef VERIFY_HEAP
|
#ifdef VERIFY_HEAP
|
||||||
if (i::v8_flags.verify_heap) {
|
if (i::v8_flags.verify_heap) {
|
||||||
i::Object(*obj).ObjectVerify(i_isolate);
|
i::Object(*obj).ObjectVerify(i_isolate);
|
||||||
|
@ -803,16 +806,16 @@ i::Address* GlobalizeTracedReference(i::Isolate* i_isolate, i::Address* obj,
|
||||||
}
|
}
|
||||||
|
|
||||||
void MoveTracedReference(internal::Address** from, internal::Address** to) {
|
void MoveTracedReference(internal::Address** from, internal::Address** to) {
|
||||||
GlobalHandles::MoveTracedReference(from, to);
|
TracedHandles::Move(from, to);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CopyTracedReference(const internal::Address* const* from,
|
void CopyTracedReference(const internal::Address* const* from,
|
||||||
internal::Address** to) {
|
internal::Address** to) {
|
||||||
GlobalHandles::CopyTracedReference(from, to);
|
TracedHandles::Copy(from, to);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DisposeTracedReference(internal::Address* location) {
|
void DisposeTracedReference(internal::Address* location) {
|
||||||
GlobalHandles::DestroyTracedReference(location);
|
TracedHandles::Destroy(location);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
@ -3732,6 +3735,7 @@ bool Value::IsWasmModuleObject() const { return false; }
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
VALUE_IS_SPECIFIC_TYPE(WeakMap, JSWeakMap)
|
VALUE_IS_SPECIFIC_TYPE(WeakMap, JSWeakMap)
|
||||||
VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
|
VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
|
||||||
|
VALUE_IS_SPECIFIC_TYPE(WeakRef, JSWeakRef)
|
||||||
|
|
||||||
#undef VALUE_IS_SPECIFIC_TYPE
|
#undef VALUE_IS_SPECIFIC_TYPE
|
||||||
|
|
||||||
|
@ -6611,10 +6615,31 @@ v8::Isolate* Context::GetIsolate() {
|
||||||
v8::MicrotaskQueue* Context::GetMicrotaskQueue() {
|
v8::MicrotaskQueue* Context::GetMicrotaskQueue() {
|
||||||
i::Handle<i::Context> env = Utils::OpenHandle(this);
|
i::Handle<i::Context> env = Utils::OpenHandle(this);
|
||||||
Utils::ApiCheck(env->IsNativeContext(), "v8::Context::GetMicrotaskQueue",
|
Utils::ApiCheck(env->IsNativeContext(), "v8::Context::GetMicrotaskQueue",
|
||||||
"Must be calld on a native context");
|
"Must be called on a native context");
|
||||||
return i::Handle<i::NativeContext>::cast(env)->microtask_queue();
|
return i::Handle<i::NativeContext>::cast(env)->microtask_queue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Context::SetMicrotaskQueue(v8::MicrotaskQueue* queue) {
|
||||||
|
i::Handle<i::Context> context = Utils::OpenHandle(this);
|
||||||
|
i::Isolate* i_isolate = context->GetIsolate();
|
||||||
|
Utils::ApiCheck(context->IsNativeContext(), "v8::Context::SetMicrotaskQueue",
|
||||||
|
"Must be called on a native context");
|
||||||
|
i::Handle<i::NativeContext> native_context =
|
||||||
|
i::Handle<i::NativeContext>::cast(context);
|
||||||
|
i::HandleScopeImplementer* impl = i_isolate->handle_scope_implementer();
|
||||||
|
Utils::ApiCheck(!native_context->microtask_queue()->IsRunningMicrotasks(),
|
||||||
|
"v8::Context::SetMicrotaskQueue",
|
||||||
|
"Must not be running microtasks");
|
||||||
|
Utils::ApiCheck(
|
||||||
|
native_context->microtask_queue()->GetMicrotasksScopeDepth() == 0,
|
||||||
|
"v8::Context::SetMicrotaskQueue", "Must not have microtask scope pushed");
|
||||||
|
Utils::ApiCheck(impl->EnteredContextCount() == 0,
|
||||||
|
"v8::Context::SetMicrotaskQueue()",
|
||||||
|
"Cannot set Microtask Queue with an entered context");
|
||||||
|
native_context->set_microtask_queue(
|
||||||
|
i_isolate, static_cast<const i::MicrotaskQueue*>(queue));
|
||||||
|
}
|
||||||
|
|
||||||
v8::Local<v8::Object> Context::Global() {
|
v8::Local<v8::Object> Context::Global() {
|
||||||
i::Handle<i::Context> context = Utils::OpenHandle(this);
|
i::Handle<i::Context> context = Utils::OpenHandle(this);
|
||||||
i::Isolate* i_isolate = context->GetIsolate();
|
i::Isolate* i_isolate = context->GetIsolate();
|
||||||
|
@ -8086,14 +8111,32 @@ std::shared_ptr<i::BackingStore> ToInternal(
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void v8::ArrayBuffer::Detach() {
|
Maybe<bool> v8::ArrayBuffer::Detach(v8::Local<v8::Value> key) {
|
||||||
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
|
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
|
||||||
i::Isolate* i_isolate = obj->GetIsolate();
|
i::Isolate* i_isolate = obj->GetIsolate();
|
||||||
Utils::ApiCheck(obj->is_detachable(), "v8::ArrayBuffer::Detach",
|
Utils::ApiCheck(obj->is_detachable(), "v8::ArrayBuffer::Detach",
|
||||||
"Only detachable ArrayBuffers can be detached");
|
"Only detachable ArrayBuffers can be detached");
|
||||||
API_RCS_SCOPE(i_isolate, ArrayBuffer, Detach);
|
ENTER_V8_NO_SCRIPT(
|
||||||
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
|
i_isolate, reinterpret_cast<v8::Isolate*>(i_isolate)->GetCurrentContext(),
|
||||||
obj->Detach();
|
ArrayBuffer, Detach, Nothing<bool>(), i::HandleScope);
|
||||||
|
if (!key.IsEmpty()) {
|
||||||
|
i::Handle<i::Object> i_key = Utils::OpenHandle(*key);
|
||||||
|
constexpr bool kForceForWasmMemory = false;
|
||||||
|
has_pending_exception =
|
||||||
|
i::JSArrayBuffer::Detach(obj, kForceForWasmMemory, i_key).IsNothing();
|
||||||
|
} else {
|
||||||
|
has_pending_exception = i::JSArrayBuffer::Detach(obj).IsNothing();
|
||||||
|
}
|
||||||
|
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
|
||||||
|
return Just(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
void v8::ArrayBuffer::Detach() { Detach(Local<Value>()).Check(); }
|
||||||
|
|
||||||
|
void v8::ArrayBuffer::SetDetachKey(v8::Local<v8::Value> key) {
|
||||||
|
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
|
||||||
|
i::Handle<i::Object> i_key = Utils::OpenHandle(*key);
|
||||||
|
obj->set_detach_key(*i_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t v8::ArrayBuffer::ByteLength() const {
|
size_t v8::ArrayBuffer::ByteLength() const {
|
||||||
|
@ -9557,16 +9600,19 @@ CALLBACK_SETTER(WasmAsyncResolvePromiseCallback,
|
||||||
CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback,
|
CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback,
|
||||||
wasm_load_source_map_callback)
|
wasm_load_source_map_callback)
|
||||||
|
|
||||||
CALLBACK_SETTER(WasmSimdEnabledCallback, WasmSimdEnabledCallback,
|
|
||||||
wasm_simd_enabled_callback)
|
|
||||||
|
|
||||||
CALLBACK_SETTER(WasmExceptionsEnabledCallback, WasmExceptionsEnabledCallback,
|
|
||||||
wasm_exceptions_enabled_callback)
|
|
||||||
|
|
||||||
CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback,
|
CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback,
|
||||||
SharedArrayBufferConstructorEnabledCallback,
|
SharedArrayBufferConstructorEnabledCallback,
|
||||||
sharedarraybuffer_constructor_enabled_callback)
|
sharedarraybuffer_constructor_enabled_callback)
|
||||||
|
|
||||||
|
void Isolate::SetWasmExceptionsEnabledCallback(
|
||||||
|
WasmExceptionsEnabledCallback callback) {
|
||||||
|
// Exceptions are always enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
void Isolate::SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback) {
|
||||||
|
// SIMD is always enabled
|
||||||
|
}
|
||||||
|
|
||||||
void Isolate::InstallConditionalFeatures(Local<Context> context) {
|
void Isolate::InstallConditionalFeatures(Local<Context> context) {
|
||||||
v8::HandleScope handle_scope(this);
|
v8::HandleScope handle_scope(this);
|
||||||
v8::Context::Scope context_scope(context);
|
v8::Context::Scope context_scope(context);
|
||||||
|
@ -9725,6 +9771,11 @@ MicrotasksScope::MicrotasksScope(Isolate* v8_isolate,
|
||||||
MicrotasksScope::Type type)
|
MicrotasksScope::Type type)
|
||||||
: MicrotasksScope(v8_isolate, nullptr, type) {}
|
: MicrotasksScope(v8_isolate, nullptr, type) {}
|
||||||
|
|
||||||
|
MicrotasksScope::MicrotasksScope(Local<Context> v8_context,
|
||||||
|
MicrotasksScope::Type type)
|
||||||
|
: MicrotasksScope(v8_context->GetIsolate(), v8_context->GetMicrotaskQueue(),
|
||||||
|
type) {}
|
||||||
|
|
||||||
MicrotasksScope::MicrotasksScope(Isolate* v8_isolate,
|
MicrotasksScope::MicrotasksScope(Isolate* v8_isolate,
|
||||||
MicrotaskQueue* microtask_queue,
|
MicrotaskQueue* microtask_queue,
|
||||||
MicrotasksScope::Type type)
|
MicrotasksScope::Type type)
|
||||||
|
@ -10046,6 +10097,21 @@ int64_t CpuProfile::GetEndTime() const {
|
||||||
return profile->end_time().since_origin().InMicroseconds();
|
return profile->end_time().since_origin().InMicroseconds();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static i::CpuProfile* ToInternal(const CpuProfile* profile) {
|
||||||
|
return const_cast<i::CpuProfile*>(
|
||||||
|
reinterpret_cast<const i::CpuProfile*>(profile));
|
||||||
|
}
|
||||||
|
|
||||||
|
void CpuProfile::Serialize(OutputStream* stream,
|
||||||
|
CpuProfile::SerializationFormat format) const {
|
||||||
|
Utils::ApiCheck(format == kJSON, "v8::CpuProfile::Serialize",
|
||||||
|
"Unknown serialization format");
|
||||||
|
Utils::ApiCheck(stream->GetChunkSize() > 0, "v8::CpuProfile::Serialize",
|
||||||
|
"Invalid stream chunk size");
|
||||||
|
i::CpuProfileJSONSerializer serializer(ToInternal(this));
|
||||||
|
serializer.Serialize(stream);
|
||||||
|
}
|
||||||
|
|
||||||
int CpuProfile::GetSamplesCount() const {
|
int CpuProfile::GetSamplesCount() const {
|
||||||
return reinterpret_cast<const i::CpuProfile*>(this)->samples_count();
|
return reinterpret_cast<const i::CpuProfile*>(this)->samples_count();
|
||||||
}
|
}
|
||||||
|
@ -10508,7 +10574,7 @@ void EmbedderHeapTracer::IterateTracedGlobalHandles(
|
||||||
TracedGlobalHandleVisitor* visitor) {
|
TracedGlobalHandleVisitor* visitor) {
|
||||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate_);
|
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate_);
|
||||||
i::DisallowGarbageCollection no_gc;
|
i::DisallowGarbageCollection no_gc;
|
||||||
i_isolate->global_handles()->IterateTracedNodes(visitor);
|
i_isolate->traced_handles()->Iterate(visitor);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool EmbedderHeapTracer::IsRootForNonTracingGC(
|
bool EmbedderHeapTracer::IsRootForNonTracingGC(
|
||||||
|
|
|
@ -133,7 +133,7 @@ void Report(Handle<Script> script, int position, base::Vector<const char> text,
|
||||||
// Hook to report successful execution of {AsmJs::CompileAsmViaWasm} phase.
|
// Hook to report successful execution of {AsmJs::CompileAsmViaWasm} phase.
|
||||||
void ReportCompilationSuccess(Handle<Script> script, int position,
|
void ReportCompilationSuccess(Handle<Script> script, int position,
|
||||||
double compile_time, size_t module_size) {
|
double compile_time, size_t module_size) {
|
||||||
if (FLAG_suppress_asm_messages || !FLAG_trace_asm_time) return;
|
if (v8_flags.suppress_asm_messages || !v8_flags.trace_asm_time) return;
|
||||||
base::EmbeddedVector<char, 100> text;
|
base::EmbeddedVector<char, 100> text;
|
||||||
int length = SNPrintF(text, "success, compile time %0.3f ms, %zu bytes",
|
int length = SNPrintF(text, "success, compile time %0.3f ms, %zu bytes",
|
||||||
compile_time, module_size);
|
compile_time, module_size);
|
||||||
|
@ -146,7 +146,7 @@ void ReportCompilationSuccess(Handle<Script> script, int position,
|
||||||
// Hook to report failed execution of {AsmJs::CompileAsmViaWasm} phase.
|
// Hook to report failed execution of {AsmJs::CompileAsmViaWasm} phase.
|
||||||
void ReportCompilationFailure(ParseInfo* parse_info, int position,
|
void ReportCompilationFailure(ParseInfo* parse_info, int position,
|
||||||
const char* reason) {
|
const char* reason) {
|
||||||
if (FLAG_suppress_asm_messages) return;
|
if (v8_flags.suppress_asm_messages) return;
|
||||||
parse_info->pending_error_handler()->ReportWarningAt(
|
parse_info->pending_error_handler()->ReportWarningAt(
|
||||||
position, position, MessageTemplate::kAsmJsInvalid, reason);
|
position, position, MessageTemplate::kAsmJsInvalid, reason);
|
||||||
}
|
}
|
||||||
|
@ -154,7 +154,7 @@ void ReportCompilationFailure(ParseInfo* parse_info, int position,
|
||||||
// Hook to report successful execution of {AsmJs::InstantiateAsmWasm} phase.
|
// Hook to report successful execution of {AsmJs::InstantiateAsmWasm} phase.
|
||||||
void ReportInstantiationSuccess(Handle<Script> script, int position,
|
void ReportInstantiationSuccess(Handle<Script> script, int position,
|
||||||
double instantiate_time) {
|
double instantiate_time) {
|
||||||
if (FLAG_suppress_asm_messages || !FLAG_trace_asm_time) return;
|
if (v8_flags.suppress_asm_messages || !v8_flags.trace_asm_time) return;
|
||||||
base::EmbeddedVector<char, 50> text;
|
base::EmbeddedVector<char, 50> text;
|
||||||
int length = SNPrintF(text, "success, %0.3f ms", instantiate_time);
|
int length = SNPrintF(text, "success, %0.3f ms", instantiate_time);
|
||||||
CHECK_NE(-1, length);
|
CHECK_NE(-1, length);
|
||||||
|
@ -166,7 +166,7 @@ void ReportInstantiationSuccess(Handle<Script> script, int position,
|
||||||
// Hook to report failed execution of {AsmJs::InstantiateAsmWasm} phase.
|
// Hook to report failed execution of {AsmJs::InstantiateAsmWasm} phase.
|
||||||
void ReportInstantiationFailure(Handle<Script> script, int position,
|
void ReportInstantiationFailure(Handle<Script> script, int position,
|
||||||
const char* reason) {
|
const char* reason) {
|
||||||
if (FLAG_suppress_asm_messages) return;
|
if (v8_flags.suppress_asm_messages) return;
|
||||||
base::Vector<const char> text = base::CStrVector(reason);
|
base::Vector<const char> text = base::CStrVector(reason);
|
||||||
Report(script, position, text, MessageTemplate::kAsmJsLinkingFailed,
|
Report(script, position, text, MessageTemplate::kAsmJsLinkingFailed,
|
||||||
v8::Isolate::kMessageWarning);
|
v8::Isolate::kMessageWarning);
|
||||||
|
@ -237,7 +237,7 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
|
||||||
stream->Seek(compilation_info()->literal()->start_position());
|
stream->Seek(compilation_info()->literal()->start_position());
|
||||||
wasm::AsmJsParser parser(&translate_zone, stack_limit(), stream);
|
wasm::AsmJsParser parser(&translate_zone, stack_limit(), stream);
|
||||||
if (!parser.Run()) {
|
if (!parser.Run()) {
|
||||||
if (!FLAG_suppress_asm_messages) {
|
if (!v8_flags.suppress_asm_messages) {
|
||||||
ReportCompilationFailure(parse_info(), parser.failure_location(),
|
ReportCompilationFailure(parse_info(), parser.failure_location(),
|
||||||
parser.failure_message());
|
parser.failure_message());
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ namespace wasm {
|
||||||
failed_ = true; \
|
failed_ = true; \
|
||||||
failure_message_ = msg; \
|
failure_message_ = msg; \
|
||||||
failure_location_ = static_cast<int>(scanner_.Position()); \
|
failure_location_ = static_cast<int>(scanner_.Position()); \
|
||||||
if (FLAG_trace_asm_parser) { \
|
if (v8_flags.trace_asm_parser) { \
|
||||||
PrintF("[asm.js failure: %s, token: '%s', see: %s:%d]\n", msg, \
|
PrintF("[asm.js failure: %s, token: '%s', see: %s:%d]\n", msg, \
|
||||||
scanner_.Name(scanner_.Token()).c_str(), __FILE__, __LINE__); \
|
scanner_.Name(scanner_.Token()).c_str(), __FILE__, __LINE__); \
|
||||||
} \
|
} \
|
||||||
|
|
|
@ -67,7 +67,7 @@ void AsmJsScanner::Next() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
if (FLAG_trace_asm_scanner) {
|
if (v8_flags.trace_asm_scanner) {
|
||||||
if (Token() == kDouble) {
|
if (Token() == kDouble) {
|
||||||
PrintF("%lf ", AsDouble());
|
PrintF("%lf ", AsDouble());
|
||||||
} else if (Token() == kUnsigned) {
|
} else if (Token() == kUnsigned) {
|
||||||
|
|
|
@ -715,7 +715,7 @@ bool DeclarationScope::Analyze(ParseInfo* info) {
|
||||||
scope->GetScriptScope()->RewriteReplGlobalVariables();
|
scope->GetScriptScope()->RewriteReplGlobalVariables();
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (FLAG_print_scopes) {
|
if (v8_flags.print_scopes) {
|
||||||
PrintF("Global scope:\n");
|
PrintF("Global scope:\n");
|
||||||
scope->Print();
|
scope->Print();
|
||||||
}
|
}
|
||||||
|
@ -1762,7 +1762,7 @@ void DeclarationScope::AnalyzePartially(Parser* parser,
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (FLAG_print_scopes) {
|
if (v8_flags.print_scopes) {
|
||||||
PrintF("Inner function scope:\n");
|
PrintF("Inner function scope:\n");
|
||||||
Print();
|
Print();
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace v8::base {
|
namespace v8::base {
|
||||||
|
|
||||||
|
@ -77,6 +78,18 @@ inline size_t count_if(const C& container, const P& predicate) {
|
||||||
return std::count_if(begin(container), end(container), predicate);
|
return std::count_if(begin(container), end(container), predicate);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper for std::all_of.
|
||||||
|
template <typename C, typename P>
|
||||||
|
inline bool all_of(const C& container, const P& predicate) {
|
||||||
|
return std::all_of(begin(container), end(container), predicate);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper for std::none_of.
|
||||||
|
template <typename C, typename P>
|
||||||
|
inline bool none_of(const C& container, const P& predicate) {
|
||||||
|
return std::none_of(begin(container), end(container), predicate);
|
||||||
|
}
|
||||||
|
|
||||||
// Returns true iff all elements of {container} compare equal using operator==.
|
// Returns true iff all elements of {container} compare equal using operator==.
|
||||||
template <typename C>
|
template <typename C>
|
||||||
inline bool all_equal(const C& container) {
|
inline bool all_equal(const C& container) {
|
||||||
|
@ -87,6 +100,21 @@ inline bool all_equal(const C& container) {
|
||||||
[&](const auto& v) { return v == value; });
|
[&](const auto& v) { return v == value; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns true iff all elements of {container} compare equal to {value} using
|
||||||
|
// operator==.
|
||||||
|
template <typename C, typename T>
|
||||||
|
inline bool all_equal(const C& container, const T& value) {
|
||||||
|
return std::all_of(begin(container), end(container),
|
||||||
|
[&](const auto& v) { return v == value; });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Appends to vector {v} all the elements in the range {begin(container)} and
|
||||||
|
// {end(container)}.
|
||||||
|
template <typename T, typename A, typename C>
|
||||||
|
inline void vector_append(std::vector<T, A>& v, const C& container) {
|
||||||
|
v.insert(end(v), begin(container), end(container));
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace v8::base
|
} // namespace v8::base
|
||||||
|
|
||||||
#endif // V8_BASE_CONTAINER_UTILS_H_
|
#endif // V8_BASE_CONTAINER_UTILS_H_
|
||||||
|
|
|
@ -48,6 +48,13 @@ V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
|
||||||
|
|
||||||
#define UNIMPLEMENTED() FATAL("unimplemented code")
|
#define UNIMPLEMENTED() FATAL("unimplemented code")
|
||||||
#define UNREACHABLE() FATAL("unreachable code")
|
#define UNREACHABLE() FATAL("unreachable code")
|
||||||
|
// g++ versions <= 8 cannot use UNREACHABLE() in a constexpr function.
|
||||||
|
// TODO(miladfarca): Remove once all compilers handle this properly.
|
||||||
|
#if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ <= 8)
|
||||||
|
#define CONSTEXPR_UNREACHABLE() abort()
|
||||||
|
#else
|
||||||
|
#define CONSTEXPR_UNREACHABLE() UNREACHABLE()
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace base {
|
namespace base {
|
||||||
|
|
|
@ -23,9 +23,9 @@
|
||||||
#include <malloc.h>
|
#include <malloc.h>
|
||||||
#endif // !V8_OS_DARWIN
|
#endif // !V8_OS_DARWIN
|
||||||
|
|
||||||
#if (V8_OS_POSIX && !V8_OS_AIX && !V8_OS_SOLARIS) || V8_OS_WIN
|
#if (V8_OS_POSIX && !V8_OS_AIX) || V8_OS_WIN
|
||||||
#define V8_HAS_MALLOC_USABLE_SIZE 1
|
#define V8_HAS_MALLOC_USABLE_SIZE 1
|
||||||
#endif // (V8_OS_POSIX && !V8_OS_AIX && !V8_OS_SOLARIS) || V8_OS_WIN
|
#endif // (V8_OS_POSIX && !V8_OS_AIX) || V8_OS_WIN
|
||||||
|
|
||||||
namespace v8::base {
|
namespace v8::base {
|
||||||
|
|
||||||
|
@ -111,6 +111,8 @@ inline void AlignedFree(void* ptr) {
|
||||||
// `AllocateAtLeast()` for a safe version.
|
// `AllocateAtLeast()` for a safe version.
|
||||||
inline size_t MallocUsableSize(void* ptr) {
|
inline size_t MallocUsableSize(void* ptr) {
|
||||||
#if V8_OS_WIN
|
#if V8_OS_WIN
|
||||||
|
// |_msize| cannot handle a null pointer.
|
||||||
|
if (!ptr) return 0;
|
||||||
return _msize(ptr);
|
return _msize(ptr);
|
||||||
#elif V8_OS_DARWIN
|
#elif V8_OS_DARWIN
|
||||||
return malloc_size(ptr);
|
return malloc_size(ptr);
|
||||||
|
@ -130,7 +132,7 @@ struct AllocationResult {
|
||||||
|
|
||||||
// Allocates at least `n * sizeof(T)` uninitialized storage but may allocate
|
// Allocates at least `n * sizeof(T)` uninitialized storage but may allocate
|
||||||
// more which is indicated by the return value. Mimics C++23
|
// more which is indicated by the return value. Mimics C++23
|
||||||
// `allocate_ate_least()`.
|
// `allocate_at_least()`.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
V8_NODISCARD AllocationResult<T*> AllocateAtLeast(size_t n) {
|
V8_NODISCARD AllocationResult<T*> AllocateAtLeast(size_t n) {
|
||||||
const size_t min_wanted_size = n * sizeof(T);
|
const size_t min_wanted_size = n * sizeof(T);
|
||||||
|
@ -140,13 +142,14 @@ V8_NODISCARD AllocationResult<T*> AllocateAtLeast(size_t n) {
|
||||||
#else // V8_HAS_MALLOC_USABLE_SIZE
|
#else // V8_HAS_MALLOC_USABLE_SIZE
|
||||||
const size_t usable_size = MallocUsableSize(memory);
|
const size_t usable_size = MallocUsableSize(memory);
|
||||||
#if V8_USE_UNDEFINED_BEHAVIOR_SANITIZER
|
#if V8_USE_UNDEFINED_BEHAVIOR_SANITIZER
|
||||||
|
if (memory == nullptr)
|
||||||
|
return {nullptr, 0};
|
||||||
// UBSan (specifically, -fsanitize=bounds) assumes that any access outside
|
// UBSan (specifically, -fsanitize=bounds) assumes that any access outside
|
||||||
// of the requested size for malloc is UB and will trap in ud2 instructions.
|
// of the requested size for malloc is UB and will trap in ud2 instructions.
|
||||||
// This can be worked around by using `Realloc()` on the specific memory
|
// This can be worked around by using `Realloc()` on the specific memory
|
||||||
// region, assuming that the allocator doesn't actually reallocate the
|
// region.
|
||||||
// buffer.
|
|
||||||
if (usable_size != min_wanted_size) {
|
if (usable_size != min_wanted_size) {
|
||||||
CHECK_EQ(static_cast<T*>(Realloc(memory, usable_size)), memory);
|
memory = static_cast<T*>(Realloc(memory, usable_size));
|
||||||
}
|
}
|
||||||
#endif // V8_USE_UNDEFINED_BEHAVIOR_SANITIZER
|
#endif // V8_USE_UNDEFINED_BEHAVIOR_SANITIZER
|
||||||
return {memory, usable_size};
|
return {memory, usable_size};
|
||||||
|
|
|
@ -278,7 +278,6 @@ class V8_BASE_EXPORT SharedMutex final {
|
||||||
// pthread_rwlock_t is broken on MacOS when signals are being sent to the
|
// pthread_rwlock_t is broken on MacOS when signals are being sent to the
|
||||||
// process (see https://crbug.com/v8/11399).
|
// process (see https://crbug.com/v8/11399).
|
||||||
// We thus use std::shared_mutex on MacOS, which does not have this problem.
|
// We thus use std::shared_mutex on MacOS, which does not have this problem.
|
||||||
// TODO(13256): Use std::shared_mutex directly, on all platforms.
|
|
||||||
using NativeHandle = std::shared_mutex;
|
using NativeHandle = std::shared_mutex;
|
||||||
#elif V8_OS_POSIX
|
#elif V8_OS_POSIX
|
||||||
using NativeHandle = pthread_rwlock_t;
|
using NativeHandle = pthread_rwlock_t;
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include "src/baseline/baseline-assembler.h"
|
#include "src/baseline/baseline-assembler.h"
|
||||||
#include "src/codegen/interface-descriptors.h"
|
#include "src/codegen/interface-descriptors.h"
|
||||||
#include "src/codegen/ppc/assembler-ppc-inl.h"
|
#include "src/codegen/ppc/assembler-ppc-inl.h"
|
||||||
|
#include "src/codegen/ppc/register-ppc.h"
|
||||||
#include "src/objects/literal-objects-inl.h"
|
#include "src/objects/literal-objects-inl.h"
|
||||||
|
|
||||||
namespace v8 {
|
namespace v8 {
|
||||||
|
@ -596,6 +597,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||||
|
|
||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
|
@ -605,15 +607,23 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
|
ASM_CODE_COMMENT(masm_);
|
||||||
|
if (depth > 0) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
|
// Decompress tagged pointer.
|
||||||
|
__ AddS64(context, context, kPtrComprCageBaseRegister);
|
||||||
|
}
|
||||||
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
|
@ -636,6 +646,7 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
|
@ -650,6 +661,7 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
|
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
|
||||||
|
ASM_CODE_COMMENT(masm_);
|
||||||
if (rhs.value() == 0) return;
|
if (rhs.value() == 0) return;
|
||||||
__ LoadSmiLiteral(r0, rhs);
|
__ LoadSmiLiteral(r0, rhs);
|
||||||
if (SmiValuesAre31Bits()) {
|
if (SmiValuesAre31Bits()) {
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "src/runtime/runtime.h"
|
#include "src/runtime/runtime.h"
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
#include "src/wasm/baseline/liftoff-assembler-defs.h"
|
||||||
#include "src/wasm/wasm-linkage.h"
|
#include "src/wasm/wasm-linkage.h"
|
||||||
#include "src/wasm/wasm-objects.h"
|
#include "src/wasm/wasm-objects.h"
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
@ -2532,63 +2533,123 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
||||||
|
struct SaveWasmParamsScope {
|
||||||
|
explicit SaveWasmParamsScope(MacroAssembler* masm)
|
||||||
|
: lowest_fp_reg(std::begin(wasm::kFpParamRegisters)[0]),
|
||||||
|
highest_fp_reg(std::end(wasm::kFpParamRegisters)[-1]),
|
||||||
|
masm(masm) {
|
||||||
|
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
||||||
|
gp_regs.set(gp_param_reg);
|
||||||
|
}
|
||||||
|
gp_regs.set(lr);
|
||||||
|
for (DwVfpRegister fp_param_reg : wasm::kFpParamRegisters) {
|
||||||
|
CHECK(fp_param_reg.code() >= lowest_fp_reg.code() &&
|
||||||
|
fp_param_reg.code() <= highest_fp_reg.code());
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1);
|
||||||
|
CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
|
||||||
|
arraysize(wasm::kFpParamRegisters));
|
||||||
|
CHECK_EQ(gp_regs.Count(),
|
||||||
|
WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs +
|
||||||
|
1 /* instance */ + 1 /* lr */);
|
||||||
|
CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
|
||||||
|
WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs);
|
||||||
|
|
||||||
|
__ stm(db_w, sp, gp_regs);
|
||||||
|
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
|
||||||
|
}
|
||||||
|
~SaveWasmParamsScope() {
|
||||||
|
__ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
|
||||||
|
__ ldm(ia_w, sp, gp_regs);
|
||||||
|
}
|
||||||
|
|
||||||
|
RegList gp_regs;
|
||||||
|
DwVfpRegister lowest_fp_reg;
|
||||||
|
DwVfpRegister highest_fp_reg;
|
||||||
|
MacroAssembler* masm;
|
||||||
|
};
|
||||||
|
|
||||||
|
// This builtin creates the following stack frame:
|
||||||
|
//
|
||||||
|
// [ feedback vector ] <-- sp // Added by this builtin.
|
||||||
|
// [ Wasm instance ] // Added by this builtin.
|
||||||
|
// [ WASM frame marker ] // Already there on entry.
|
||||||
|
// [ saved fp ] <-- fp // Already there on entry.
|
||||||
|
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||||
|
Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
|
||||||
|
Register vector = r5;
|
||||||
|
Register scratch = r7;
|
||||||
|
Label allocate_vector, done;
|
||||||
|
|
||||||
|
__ ldr(vector, FieldMemOperand(kWasmInstanceRegister,
|
||||||
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
|
__ add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
|
||||||
|
__ ldr(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
|
__ bind(&done);
|
||||||
|
__ push(kWasmInstanceRegister);
|
||||||
|
__ push(vector);
|
||||||
|
__ Ret();
|
||||||
|
|
||||||
|
__ bind(&allocate_vector);
|
||||||
|
|
||||||
|
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
|
||||||
|
// We temporarily change the frame type for this, because we need special
|
||||||
|
// handling by the stack walker in case of GC.
|
||||||
|
__ mov(scratch,
|
||||||
|
Operand(StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP)));
|
||||||
|
__ str(scratch, MemOperand(sp));
|
||||||
|
{
|
||||||
|
SaveWasmParamsScope save_params(masm);
|
||||||
|
// Arguments to the runtime function: instance, func_index.
|
||||||
|
__ push(kWasmInstanceRegister);
|
||||||
|
__ SmiTag(func_index);
|
||||||
|
__ push(func_index);
|
||||||
|
// Allocate a stack slot where the runtime function can spill a pointer
|
||||||
|
// to the {NativeModule}.
|
||||||
|
__ push(r8);
|
||||||
|
__ Move(cp, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
|
||||||
|
__ mov(vector, kReturnRegister0);
|
||||||
|
// Saved parameters are restored at the end of this block.
|
||||||
|
}
|
||||||
|
__ mov(scratch, Operand(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||||
|
__ str(scratch, MemOperand(sp));
|
||||||
|
__ b(&done);
|
||||||
|
}
|
||||||
|
|
||||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// The function index was put in a register by the jump table trampoline.
|
// The function index was put in a register by the jump table trampoline.
|
||||||
// Convert to Smi for the runtime call.
|
// Convert to Smi for the runtime call.
|
||||||
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
||||||
{
|
{
|
||||||
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
||||||
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
|
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
|
||||||
// Save all parameter registers (see wasm-linkage.h). They might be
|
{
|
||||||
// overwritten in the runtime call below. We don't have any callee-saved
|
SaveWasmParamsScope save_params(masm);
|
||||||
// registers in wasm, so no need to store anything else.
|
|
||||||
RegList gp_regs;
|
|
||||||
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
|
||||||
gp_regs.set(gp_param_reg);
|
|
||||||
}
|
|
||||||
DwVfpRegister lowest_fp_reg = std::begin(wasm::kFpParamRegisters)[0];
|
|
||||||
DwVfpRegister highest_fp_reg = std::end(wasm::kFpParamRegisters)[-1];
|
|
||||||
for (DwVfpRegister fp_param_reg : wasm::kFpParamRegisters) {
|
|
||||||
CHECK(fp_param_reg.code() >= lowest_fp_reg.code() &&
|
|
||||||
fp_param_reg.code() <= highest_fp_reg.code());
|
|
||||||
}
|
|
||||||
|
|
||||||
CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
|
|
||||||
CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
|
|
||||||
arraysize(wasm::kFpParamRegisters));
|
|
||||||
CHECK_EQ(gp_regs.Count(),
|
|
||||||
WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs + 1);
|
|
||||||
CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
|
|
||||||
WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs);
|
|
||||||
|
|
||||||
__ stm(db_w, sp, gp_regs);
|
|
||||||
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
|
|
||||||
|
|
||||||
// Push the Wasm instance as an explicit argument to the runtime function.
|
// Push the Wasm instance as an explicit argument to the runtime function.
|
||||||
__ push(kWasmInstanceRegister);
|
__ push(kWasmInstanceRegister);
|
||||||
// Push the function index as second argument.
|
// Push the function index as second argument.
|
||||||
__ push(kWasmCompileLazyFuncIndexRegister);
|
__ push(kWasmCompileLazyFuncIndexRegister);
|
||||||
// Allocate a stack slot for the NativeModule, the pushed value does not
|
|
||||||
// matter.
|
|
||||||
__ push(r8);
|
|
||||||
// Initialize the JavaScript context with 0. CEntry will use it to
|
// Initialize the JavaScript context with 0. CEntry will use it to
|
||||||
// set the current context on the isolate.
|
// set the current context on the isolate.
|
||||||
__ Move(cp, Smi::zero());
|
__ Move(cp, Smi::zero());
|
||||||
__ CallRuntime(Runtime::kWasmCompileLazy, 3);
|
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
|
||||||
// The runtime function returns the jump table slot offset as a Smi. Use
|
// The runtime function returns the jump table slot offset as a Smi. Use
|
||||||
// that to compute the jump target in r8.
|
// that to compute the jump target in r8.
|
||||||
__ mov(r8, Operand::SmiUntag(kReturnRegister0));
|
__ mov(r8, Operand::SmiUntag(kReturnRegister0));
|
||||||
|
|
||||||
// Restore registers.
|
// Saved parameters are restored at the end of this block.
|
||||||
__ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
|
}
|
||||||
__ ldm(ia_w, sp, gp_regs);
|
|
||||||
|
|
||||||
// After the instance register has been restored, we can add the jump table
|
// After the instance register has been restored, we can add the jump table
|
||||||
// start to the jump table offset already stored in r8.
|
// start to the jump table offset already stored in r8.
|
||||||
__ ldr(r9, MemOperand(
|
__ ldr(r9, FieldMemOperand(kWasmInstanceRegister,
|
||||||
kWasmInstanceRegister,
|
WasmInstanceObject::kJumpTableStartOffset));
|
||||||
WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
|
|
||||||
__ add(r8, r8, r9);
|
__ add(r8, r8, r9);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,11 +26,12 @@
|
||||||
#include "src/runtime/runtime.h"
|
#include "src/runtime/runtime.h"
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
#include "src/wasm/wasm-linkage.h"
|
#include "src/wasm/baseline/liftoff-assembler-defs.h"
|
||||||
#include "src/wasm/wasm-objects.h"
|
|
||||||
#include "src/wasm/object-access.h"
|
#include "src/wasm/object-access.h"
|
||||||
#include "src/wasm/stacks.h"
|
#include "src/wasm/stacks.h"
|
||||||
#include "src/wasm/wasm-constants.h"
|
#include "src/wasm/wasm-constants.h"
|
||||||
|
#include "src/wasm/wasm-linkage.h"
|
||||||
|
#include "src/wasm/wasm-objects.h"
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
||||||
#if defined(V8_OS_WIN)
|
#if defined(V8_OS_WIN)
|
||||||
|
@ -2922,33 +2923,22 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
|
||||||
// The function index was put in w8 by the jump table trampoline.
|
|
||||||
// Sign extend and convert to Smi for the runtime call.
|
|
||||||
__ sxtw(kWasmCompileLazyFuncIndexRegister,
|
|
||||||
kWasmCompileLazyFuncIndexRegister.W());
|
|
||||||
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
|
||||||
|
|
||||||
// Compute register lists for parameters to be saved. We save all parameter
|
// Compute register lists for parameters to be saved. We save all parameter
|
||||||
// registers (see wasm-linkage.h). They might be overwritten in the runtime
|
// registers (see wasm-linkage.h). They might be overwritten in runtime
|
||||||
// call below. We don't have any callee-saved registers in wasm, so no need to
|
// calls. We don't have any callee-saved registers in wasm, so no need to
|
||||||
// store anything else.
|
// store anything else.
|
||||||
constexpr RegList kSavedGpRegs = ([]() constexpr {
|
constexpr RegList kSavedGpRegs = ([]() constexpr {
|
||||||
RegList saved_gp_regs;
|
RegList saved_gp_regs;
|
||||||
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
||||||
saved_gp_regs.set(gp_param_reg);
|
saved_gp_regs.set(gp_param_reg);
|
||||||
}
|
}
|
||||||
// Also push x1, because we must push multiples of 16 bytes (see
|
// The instance has already been stored in the fixed part of the frame.
|
||||||
// {TurboAssembler::PushCPURegList}.
|
saved_gp_regs.clear(kWasmInstanceRegister);
|
||||||
saved_gp_regs.set(x1);
|
// All set registers were unique. The instance is skipped.
|
||||||
// All set registers were unique.
|
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1);
|
||||||
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1);
|
|
||||||
// We push a multiple of 16 bytes.
|
// We push a multiple of 16 bytes.
|
||||||
CHECK_EQ(0, saved_gp_regs.Count() % 2);
|
CHECK_EQ(0, saved_gp_regs.Count() % 2);
|
||||||
// The Wasm instance must be part of the saved registers.
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs,
|
||||||
CHECK(saved_gp_regs.has(kWasmInstanceRegister));
|
|
||||||
// + instance + alignment
|
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs + 2,
|
|
||||||
saved_gp_regs.Count());
|
saved_gp_regs.Count());
|
||||||
return saved_gp_regs;
|
return saved_gp_regs;
|
||||||
})();
|
})();
|
||||||
|
@ -2960,29 +2950,104 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs,
|
||||||
saved_fp_regs.Count());
|
saved_fp_regs.Count());
|
||||||
return saved_fp_regs;
|
return saved_fp_regs;
|
||||||
})();
|
})();
|
||||||
|
|
||||||
|
// When entering this builtin, we have just created a Wasm stack frame:
|
||||||
|
//
|
||||||
|
// [ Wasm instance ] <-- sp
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved fp ] <-- fp
|
||||||
|
//
|
||||||
|
// Due to stack alignment restrictions, this builtin adds the feedback vector
|
||||||
|
// plus a filler to the stack. The stack pointer will be
|
||||||
|
// moved an appropriate distance by {PatchPrepareStackFrame}.
|
||||||
|
//
|
||||||
|
// [ (unused) ] <-- sp
|
||||||
|
// [ feedback vector ]
|
||||||
|
// [ Wasm instance ]
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved fp ] <-- fp
|
||||||
|
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||||
|
Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
|
||||||
|
Register vector = x9;
|
||||||
|
Register scratch = x10;
|
||||||
|
Label allocate_vector, done;
|
||||||
|
|
||||||
|
__ LoadTaggedPointerField(
|
||||||
|
vector, FieldMemOperand(kWasmInstanceRegister,
|
||||||
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
|
__ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
|
||||||
|
__ LoadTaggedPointerField(vector,
|
||||||
|
FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
|
__ bind(&done);
|
||||||
|
__ Push(vector, xzr);
|
||||||
|
__ Ret();
|
||||||
|
|
||||||
|
__ bind(&allocate_vector);
|
||||||
|
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
|
||||||
|
// We temporarily change the frame type for this, because we need special
|
||||||
|
// handling by the stack walker in case of GC.
|
||||||
|
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP));
|
||||||
|
__ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||||
|
// Save registers.
|
||||||
|
__ PushXRegList(kSavedGpRegs);
|
||||||
|
__ PushQRegList(kSavedFpRegs);
|
||||||
|
__ Push<TurboAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
|
||||||
|
|
||||||
|
// Arguments to the runtime function: instance, func_index, and an
|
||||||
|
// additional stack slot for the NativeModule. The first pushed register
|
||||||
|
// is for alignment. {x0} and {x1} are picked arbitrarily.
|
||||||
|
__ SmiTag(func_index);
|
||||||
|
__ Push(x0, kWasmInstanceRegister, func_index, x1);
|
||||||
|
__ Mov(cp, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
|
||||||
|
__ Mov(vector, kReturnRegister0);
|
||||||
|
|
||||||
|
// Restore registers and frame type.
|
||||||
|
__ Pop<TurboAssembler::kAuthLR>(xzr, lr);
|
||||||
|
__ PopQRegList(kSavedFpRegs);
|
||||||
|
__ PopXRegList(kSavedGpRegs);
|
||||||
|
// Restore the instance from the frame.
|
||||||
|
__ Ldr(kWasmInstanceRegister,
|
||||||
|
MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset));
|
||||||
|
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM));
|
||||||
|
__ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||||
|
__ B(&done);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
|
// The function index was put in w8 by the jump table trampoline.
|
||||||
|
// Sign extend and convert to Smi for the runtime call.
|
||||||
|
__ sxtw(kWasmCompileLazyFuncIndexRegister,
|
||||||
|
kWasmCompileLazyFuncIndexRegister.W());
|
||||||
|
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
||||||
|
|
||||||
UseScratchRegisterScope temps(masm);
|
UseScratchRegisterScope temps(masm);
|
||||||
temps.Exclude(x17);
|
temps.Exclude(x17);
|
||||||
{
|
{
|
||||||
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
||||||
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
// Manually save the instance (which kSavedGpRegs skips because its
|
||||||
|
// other use puts it into the fixed frame anyway). The stack slot is valid
|
||||||
|
// because the {FrameScope} (via {EnterFrame}) always reserves it (for stack
|
||||||
|
// alignment reasons). The instance is needed because once this builtin is
|
||||||
|
// done, we'll call a regular Wasm function.
|
||||||
|
__ Str(kWasmInstanceRegister,
|
||||||
|
MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset));
|
||||||
|
|
||||||
// Save registers that we need to keep alive across the runtime call.
|
// Save registers that we need to keep alive across the runtime call.
|
||||||
__ PushXRegList(kSavedGpRegs);
|
__ PushXRegList(kSavedGpRegs);
|
||||||
__ PushQRegList(kSavedFpRegs);
|
__ PushQRegList(kSavedFpRegs);
|
||||||
|
|
||||||
// Pass instance, function index, and an additional stack slot for the
|
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
|
||||||
// native module, as explicit arguments to the runtime function. The first
|
|
||||||
// pushed register is for alignment. {x0} and {x1} are picked arbitrarily.
|
|
||||||
__ Push(x0, kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister, x1);
|
|
||||||
// Initialize the JavaScript context with 0. CEntry will use it to
|
// Initialize the JavaScript context with 0. CEntry will use it to
|
||||||
// set the current context on the isolate.
|
// set the current context on the isolate.
|
||||||
__ Mov(cp, Smi::zero());
|
__ Mov(cp, Smi::zero());
|
||||||
__ CallRuntime(Runtime::kWasmCompileLazy, 3);
|
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
|
||||||
|
|
||||||
// Untag the returned Smi into into x17 (ip1), for later use.
|
// Untag the returned Smi into into x17 (ip1), for later use.
|
||||||
static_assert(!kSavedGpRegs.has(x17));
|
static_assert(!kSavedGpRegs.has(x17));
|
||||||
|
@ -2991,6 +3056,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// Restore registers.
|
// Restore registers.
|
||||||
__ PopQRegList(kSavedFpRegs);
|
__ PopQRegList(kSavedFpRegs);
|
||||||
__ PopXRegList(kSavedGpRegs);
|
__ PopXRegList(kSavedGpRegs);
|
||||||
|
// Restore the instance from the frame.
|
||||||
|
__ Ldr(kWasmInstanceRegister,
|
||||||
|
MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
// The runtime function returned the jump table slot offset as a Smi (now in
|
// The runtime function returned the jump table slot offset as a Smi (now in
|
||||||
|
@ -2998,9 +3066,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// target, to be compliant with CFI.
|
// target, to be compliant with CFI.
|
||||||
constexpr Register temp = x8;
|
constexpr Register temp = x8;
|
||||||
static_assert(!kSavedGpRegs.has(temp));
|
static_assert(!kSavedGpRegs.has(temp));
|
||||||
__ ldr(temp, MemOperand(
|
__ ldr(temp, FieldMemOperand(kWasmInstanceRegister,
|
||||||
kWasmInstanceRegister,
|
WasmInstanceObject::kJumpTableStartOffset));
|
||||||
WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
|
|
||||||
__ add(x17, temp, Operand(x17));
|
__ add(x17, temp, Operand(x17));
|
||||||
// Finally, jump to the jump table slot for the function.
|
// Finally, jump to the jump table slot for the function.
|
||||||
__ Jump(x17);
|
__ Jump(x17);
|
||||||
|
|
|
@ -65,7 +65,8 @@ ArrayTimSortIntoCopy(context: Context, sortState: SortState): JSArray {
|
||||||
if (sortState.numberOfUndefined != 0) goto FastObject;
|
if (sortState.numberOfUndefined != 0) goto FastObject;
|
||||||
|
|
||||||
const workArray = sortState.workArray;
|
const workArray = sortState.workArray;
|
||||||
for (let i: Smi = 0; i < workArray.length; ++i) {
|
dcheck(numberOfNonUndefined <= workArray.length);
|
||||||
|
for (let i: Smi = 0; i < numberOfNonUndefined; ++i) {
|
||||||
const e = UnsafeCast<JSAny>(workArray.objects[i]);
|
const e = UnsafeCast<JSAny>(workArray.objects[i]);
|
||||||
// TODO(v8:12764): ArrayTimSortImpl already boxed doubles. Support
|
// TODO(v8:12764): ArrayTimSortImpl already boxed doubles. Support
|
||||||
// PACKED_DOUBLE_ELEMENTS.
|
// PACKED_DOUBLE_ELEMENTS.
|
||||||
|
|
|
@ -603,8 +603,11 @@ extern macro SelectBooleanConstant(bool): Boolean;
|
||||||
|
|
||||||
extern macro Print(constexpr string): void;
|
extern macro Print(constexpr string): void;
|
||||||
extern macro Print(constexpr string, Object): void;
|
extern macro Print(constexpr string, Object): void;
|
||||||
extern macro Comment(constexpr string): void;
|
|
||||||
extern macro Print(Object): void;
|
extern macro Print(Object): void;
|
||||||
|
extern macro PrintErr(constexpr string): void;
|
||||||
|
extern macro PrintErr(constexpr string, Object): void;
|
||||||
|
extern macro PrintErr(Object): void;
|
||||||
|
extern macro Comment(constexpr string): void;
|
||||||
extern macro DebugBreak(): void;
|
extern macro DebugBreak(): void;
|
||||||
|
|
||||||
// ES6 7.1.4 ToInteger ( argument )
|
// ES6 7.1.4 ToInteger ( argument )
|
||||||
|
|
|
@ -53,7 +53,7 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
|
||||||
// Ensure that all fields are initialized because BackingStore::Allocate is
|
// Ensure that all fields are initialized because BackingStore::Allocate is
|
||||||
// allowed to GC. Note that we cannot move the allocation of the ArrayBuffer
|
// allowed to GC. Note that we cannot move the allocation of the ArrayBuffer
|
||||||
// after BackingStore::Allocate because of the spec.
|
// after BackingStore::Allocate because of the spec.
|
||||||
array_buffer->Setup(shared, resizable, nullptr);
|
array_buffer->Setup(shared, resizable, nullptr, isolate);
|
||||||
|
|
||||||
size_t byte_length;
|
size_t byte_length;
|
||||||
size_t max_byte_length = 0;
|
size_t max_byte_length = 0;
|
||||||
|
@ -558,7 +558,8 @@ BUILTIN(ArrayBufferPrototypeTransfer) {
|
||||||
// Nothing to do for steps 6-12.
|
// Nothing to do for steps 6-12.
|
||||||
|
|
||||||
// 13. Perform ? DetachArrayBuffer(O).
|
// 13. Perform ? DetachArrayBuffer(O).
|
||||||
array_buffer->Detach();
|
MAYBE_RETURN(JSArrayBuffer::Detach(array_buffer),
|
||||||
|
ReadOnlyRoots(isolate).exception());
|
||||||
|
|
||||||
// 14. Return new.
|
// 14. Return new.
|
||||||
return *isolate->factory()
|
return *isolate->factory()
|
||||||
|
@ -581,7 +582,8 @@ BUILTIN(ArrayBufferPrototypeTransfer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// 13. Perform ? DetachArrayBuffer(O).
|
// 13. Perform ? DetachArrayBuffer(O).
|
||||||
array_buffer->Detach();
|
MAYBE_RETURN(JSArrayBuffer::Detach(array_buffer),
|
||||||
|
ReadOnlyRoots(isolate).exception());
|
||||||
|
|
||||||
// 14. Return new.
|
// 14. Return new.
|
||||||
return *isolate->factory()->NewJSArrayBuffer(std::move(from_backing_store));
|
return *isolate->factory()->NewJSArrayBuffer(std::move(from_backing_store));
|
||||||
|
@ -623,7 +625,8 @@ BUILTIN(ArrayBufferPrototypeTransfer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// 13. Perform ? DetachArrayBuffer(O).
|
// 13. Perform ? DetachArrayBuffer(O).
|
||||||
array_buffer->Detach();
|
MAYBE_RETURN(JSArrayBuffer::Detach(array_buffer),
|
||||||
|
ReadOnlyRoots(isolate).exception());
|
||||||
|
|
||||||
// 14. Return new.
|
// 14. Return new.
|
||||||
return *new_;
|
return *new_;
|
||||||
|
|
|
@ -93,6 +93,21 @@ class BigIntBuiltinsAssembler : public CodeStubAssembler {
|
||||||
return return_code;
|
return return_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TNode<Int32T> CppAbsoluteModAndCanonicalize(TNode<BigInt> result,
|
||||||
|
TNode<BigInt> x,
|
||||||
|
TNode<BigInt> y) {
|
||||||
|
TNode<ExternalReference> mutable_big_int_absolute_mod_and_canonicalize =
|
||||||
|
ExternalConstant(
|
||||||
|
ExternalReference::
|
||||||
|
mutable_big_int_absolute_mod_and_canonicalize_function());
|
||||||
|
TNode<Int32T> return_code = UncheckedCast<Int32T>(CallCFunction(
|
||||||
|
mutable_big_int_absolute_mod_and_canonicalize, MachineType::Int32(),
|
||||||
|
std::make_pair(MachineType::AnyTagged(), result),
|
||||||
|
std::make_pair(MachineType::AnyTagged(), x),
|
||||||
|
std::make_pair(MachineType::AnyTagged(), y)));
|
||||||
|
return return_code;
|
||||||
|
}
|
||||||
|
|
||||||
void CppBitwiseAndPosPosAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
|
void CppBitwiseAndPosPosAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
|
||||||
TNode<BigInt> y) {
|
TNode<BigInt> y) {
|
||||||
TNode<ExternalReference>
|
TNode<ExternalReference>
|
||||||
|
|
|
@ -17,6 +17,8 @@ extern macro BigIntBuiltinsAssembler::CppAbsoluteMulAndCanonicalize(
|
||||||
MutableBigInt, BigIntBase, BigIntBase): int32;
|
MutableBigInt, BigIntBase, BigIntBase): int32;
|
||||||
extern macro BigIntBuiltinsAssembler::CppAbsoluteDivAndCanonicalize(
|
extern macro BigIntBuiltinsAssembler::CppAbsoluteDivAndCanonicalize(
|
||||||
MutableBigInt, BigIntBase, BigIntBase): int32;
|
MutableBigInt, BigIntBase, BigIntBase): int32;
|
||||||
|
extern macro BigIntBuiltinsAssembler::CppAbsoluteModAndCanonicalize(
|
||||||
|
MutableBigInt, BigIntBase, BigIntBase): int32;
|
||||||
extern macro BigIntBuiltinsAssembler::CppBitwiseAndPosPosAndCanonicalize(
|
extern macro BigIntBuiltinsAssembler::CppBitwiseAndPosPosAndCanonicalize(
|
||||||
MutableBigInt, BigIntBase, BigIntBase): void;
|
MutableBigInt, BigIntBase, BigIntBase): void;
|
||||||
extern macro BigIntBuiltinsAssembler::CppBitwiseAndNegNegAndCanonicalize(
|
extern macro BigIntBuiltinsAssembler::CppBitwiseAndNegNegAndCanonicalize(
|
||||||
|
@ -341,6 +343,70 @@ builtin BigIntDivide(implicit context: Context)(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
macro BigIntModulusImpl(implicit context: Context)(x: BigInt, y: BigInt):
|
||||||
|
BigInt labels BigIntDivZero, TerminationRequested {
|
||||||
|
const ylength = ReadBigIntLength(y);
|
||||||
|
|
||||||
|
// case: x % 0n
|
||||||
|
if (ylength == 0) {
|
||||||
|
goto BigIntDivZero;
|
||||||
|
}
|
||||||
|
|
||||||
|
// case: x % y, where x < y
|
||||||
|
if (MutableBigIntAbsoluteCompare(x, y) < 0) {
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
// case: x % 1n or x % -1n
|
||||||
|
if (ylength == 1 && LoadBigIntDigit(y, 0) == 1) {
|
||||||
|
const zero = AllocateEmptyBigInt(kPositiveSign, 0);
|
||||||
|
return Convert<BigInt>(zero);
|
||||||
|
}
|
||||||
|
|
||||||
|
// case: x % y
|
||||||
|
const resultSign = ReadBigIntSign(x);
|
||||||
|
const resultLength = ylength;
|
||||||
|
const result = AllocateEmptyBigIntNoThrow(resultSign, resultLength)
|
||||||
|
otherwise unreachable;
|
||||||
|
|
||||||
|
if (CppAbsoluteModAndCanonicalize(result, x, y) == 1) {
|
||||||
|
goto TerminationRequested;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Convert<BigInt>(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
builtin BigIntModulusNoThrow(implicit context: Context)(
|
||||||
|
x: BigInt, y: BigInt): Numeric {
|
||||||
|
try {
|
||||||
|
return BigIntModulusImpl(x, y) otherwise BigIntDivZero,
|
||||||
|
TerminationRequested;
|
||||||
|
} label BigIntDivZero {
|
||||||
|
// Smi sentinel 0 is used to signal BigIntDivZero exception.
|
||||||
|
return Convert<Smi>(0);
|
||||||
|
} label TerminationRequested {
|
||||||
|
// Smi sentinel 1 is used to signal TerminateExecution exception.
|
||||||
|
return Convert<Smi>(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
builtin BigIntModulus(implicit context: Context)(
|
||||||
|
xNum: Numeric, yNum: Numeric): BigInt {
|
||||||
|
try {
|
||||||
|
const x = Cast<BigInt>(xNum) otherwise MixedTypes;
|
||||||
|
const y = Cast<BigInt>(yNum) otherwise MixedTypes;
|
||||||
|
|
||||||
|
return BigIntModulusImpl(x, y) otherwise BigIntDivZero,
|
||||||
|
TerminationRequested;
|
||||||
|
} label MixedTypes {
|
||||||
|
ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
|
||||||
|
} label BigIntDivZero {
|
||||||
|
ThrowRangeError(MessageTemplate::kBigIntDivZero);
|
||||||
|
} label TerminationRequested {
|
||||||
|
TerminateExecution();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
macro BigIntBitwiseAndImpl(implicit context: Context)(
|
macro BigIntBitwiseAndImpl(implicit context: Context)(
|
||||||
x: BigInt, y: BigInt): BigInt labels BigIntTooBig {
|
x: BigInt, y: BigInt): BigInt labels BigIntTooBig {
|
||||||
const xlength = ReadBigIntLength(x);
|
const xlength = ReadBigIntLength(x);
|
||||||
|
|
|
@ -68,7 +68,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
|
||||||
}
|
}
|
||||||
BIND(&fast_loop);
|
BIND(&fast_loop);
|
||||||
{
|
{
|
||||||
Label if_exception_during_fast_iteration(this);
|
Label if_exception_during_fast_iteration(this, Label::kDeferred);
|
||||||
TNode<JSArray> initial_entries_jsarray =
|
TNode<JSArray> initial_entries_jsarray =
|
||||||
UncheckedCast<JSArray>(initial_entries);
|
UncheckedCast<JSArray>(initial_entries);
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
|
|
|
@ -983,12 +983,11 @@ namespace internal {
|
||||||
IF_WASM(ASM, WasmResume, WasmDummy) \
|
IF_WASM(ASM, WasmResume, WasmDummy) \
|
||||||
IF_WASM(ASM, WasmReject, WasmDummy) \
|
IF_WASM(ASM, WasmReject, WasmDummy) \
|
||||||
IF_WASM(ASM, WasmCompileLazy, WasmDummy) \
|
IF_WASM(ASM, WasmCompileLazy, WasmDummy) \
|
||||||
|
IF_WASM(ASM, WasmLiftoffFrameSetup, WasmDummy) \
|
||||||
IF_WASM(ASM, WasmDebugBreak, WasmDummy) \
|
IF_WASM(ASM, WasmDebugBreak, WasmDummy) \
|
||||||
IF_WASM(ASM, WasmOnStackReplace, WasmDummy) \
|
IF_WASM(ASM, WasmOnStackReplace, WasmDummy) \
|
||||||
IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \
|
IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \
|
||||||
IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToNumber) \
|
IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToNumber) \
|
||||||
IF_WASM(TFC, WasmI32AtomicWait32, WasmI32AtomicWait32) \
|
|
||||||
IF_WASM(TFC, WasmI64AtomicWait32, WasmI64AtomicWait32) \
|
|
||||||
IF_WASM(TFC, JSToWasmLazyDeoptContinuation, SingleParameterOnStack) \
|
IF_WASM(TFC, JSToWasmLazyDeoptContinuation, SingleParameterOnStack) \
|
||||||
\
|
\
|
||||||
/* WeakMap */ \
|
/* WeakMap */ \
|
||||||
|
@ -1873,7 +1872,7 @@ namespace internal {
|
||||||
/* ES #sec-string.prototype.normalize */ \
|
/* ES #sec-string.prototype.normalize */ \
|
||||||
CPP(StringPrototypeNormalizeIntl) \
|
CPP(StringPrototypeNormalizeIntl) \
|
||||||
/* ecma402 #sup-string.prototype.tolocalelowercase */ \
|
/* ecma402 #sup-string.prototype.tolocalelowercase */ \
|
||||||
CPP(StringPrototypeToLocaleLowerCase) \
|
TFJ(StringPrototypeToLocaleLowerCase, kDontAdaptArgumentsSentinel) \
|
||||||
/* ecma402 #sup-string.prototype.tolocaleuppercase */ \
|
/* ecma402 #sup-string.prototype.tolocaleuppercase */ \
|
||||||
CPP(StringPrototypeToLocaleUpperCase) \
|
CPP(StringPrototypeToLocaleUpperCase) \
|
||||||
/* ES #sec-string.prototype.tolowercase */ \
|
/* ES #sec-string.prototype.tolowercase */ \
|
||||||
|
|
|
@ -37,11 +37,80 @@ class IntlBuiltinsAssembler : public CodeStubAssembler {
|
||||||
BitcastTaggedToWord(seq_string),
|
BitcastTaggedToWord(seq_string),
|
||||||
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TNode<Uint8T> GetChar(TNode<SeqOneByteString> seq_string, int index) {
|
||||||
|
int effective_offset =
|
||||||
|
SeqOneByteString::kHeaderSize - kHeapObjectTag + index;
|
||||||
|
return Load<Uint8T>(seq_string, IntPtrConstant(effective_offset));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Jumps to {target} if the first two characters of {seq_string} equal
|
||||||
|
// {pattern} ignoring case.
|
||||||
|
void JumpIfStartsWithIgnoreCase(TNode<SeqOneByteString> seq_string,
|
||||||
|
const char* pattern, Label* target) {
|
||||||
|
int effective_offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
|
||||||
|
TNode<Uint16T> raw =
|
||||||
|
Load<Uint16T>(seq_string, IntPtrConstant(effective_offset));
|
||||||
|
DCHECK_EQ(strlen(pattern), 2);
|
||||||
|
#if V8_TARGET_BIG_ENDIAN
|
||||||
|
int raw_pattern = (pattern[0] << 8) + pattern[1];
|
||||||
|
#else
|
||||||
|
int raw_pattern = pattern[0] + (pattern[1] << 8);
|
||||||
|
#endif
|
||||||
|
GotoIf(Word32Equal(Word32Or(raw, Int32Constant(0x2020)),
|
||||||
|
Int32Constant(raw_pattern)),
|
||||||
|
target);
|
||||||
|
}
|
||||||
|
|
||||||
|
TNode<BoolT> IsNonAlpha(TNode<Uint8T> character) {
|
||||||
|
return Uint32GreaterThan(
|
||||||
|
Int32Sub(Word32Or(character, Int32Constant(0x20)), Int32Constant('a')),
|
||||||
|
Int32Constant('z' - 'a'));
|
||||||
|
}
|
||||||
|
|
||||||
|
enum class ToLowerCaseKind {
|
||||||
|
kToLowerCase,
|
||||||
|
kToLocaleLowerCase,
|
||||||
|
};
|
||||||
|
void ToLowerCaseImpl(TNode<String> string, TNode<Object> maybe_locales,
|
||||||
|
TNode<Context> context, ToLowerCaseKind kind,
|
||||||
|
std::function<void(TNode<Object>)> ReturnFct);
|
||||||
};
|
};
|
||||||
|
|
||||||
TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
|
TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
|
||||||
const auto string = Parameter<String>(Descriptor::kString);
|
const auto string = Parameter<String>(Descriptor::kString);
|
||||||
|
ToLowerCaseImpl(string, TNode<Object>() /*maybe_locales*/, TNode<Context>(),
|
||||||
|
ToLowerCaseKind::kToLowerCase,
|
||||||
|
[this](TNode<Object> ret) { Return(ret); });
|
||||||
|
}
|
||||||
|
|
||||||
|
TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
|
||||||
|
auto maybe_string = Parameter<Object>(Descriptor::kReceiver);
|
||||||
|
auto context = Parameter<Context>(Descriptor::kContext);
|
||||||
|
|
||||||
|
TNode<String> string =
|
||||||
|
ToThisString(context, maybe_string, "String.prototype.toLowerCase");
|
||||||
|
|
||||||
|
Return(CallBuiltin(Builtin::kStringToLowerCaseIntl, context, string));
|
||||||
|
}
|
||||||
|
|
||||||
|
TF_BUILTIN(StringPrototypeToLocaleLowerCase, IntlBuiltinsAssembler) {
|
||||||
|
TNode<Int32T> argc =
|
||||||
|
UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
|
||||||
|
CodeStubArguments args(this, argc);
|
||||||
|
TNode<Object> maybe_string = args.GetReceiver();
|
||||||
|
TNode<Context> context = Parameter<Context>(Descriptor::kContext);
|
||||||
|
TNode<Object> maybe_locales = args.GetOptionalArgumentValue(0);
|
||||||
|
TNode<String> string =
|
||||||
|
ToThisString(context, maybe_string, "String.prototype.toLocaleLowerCase");
|
||||||
|
ToLowerCaseImpl(string, maybe_locales, context,
|
||||||
|
ToLowerCaseKind::kToLocaleLowerCase,
|
||||||
|
[&args](TNode<Object> ret) { args.PopAndReturn(ret); });
|
||||||
|
}
|
||||||
|
|
||||||
|
void IntlBuiltinsAssembler::ToLowerCaseImpl(
|
||||||
|
TNode<String> string, TNode<Object> maybe_locales, TNode<Context> context,
|
||||||
|
ToLowerCaseKind kind, std::function<void(TNode<Object>)> ReturnFct) {
|
||||||
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
|
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
|
||||||
|
|
||||||
// Early exit on empty strings.
|
// Early exit on empty strings.
|
||||||
|
@ -54,9 +123,40 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
|
||||||
state(), string, ToDirectStringAssembler::kDontUnpackSlicedStrings);
|
state(), string, ToDirectStringAssembler::kDontUnpackSlicedStrings);
|
||||||
to_direct.TryToDirect(&runtime);
|
to_direct.TryToDirect(&runtime);
|
||||||
|
|
||||||
|
if (kind == ToLowerCaseKind::kToLocaleLowerCase) {
|
||||||
|
Label fast(this), check_locale(this);
|
||||||
|
// Check for fast locales.
|
||||||
|
GotoIf(IsUndefined(maybe_locales), &fast);
|
||||||
|
// Passing a smi here is equivalent to passing an empty list of locales.
|
||||||
|
GotoIf(TaggedIsSmi(maybe_locales), &fast);
|
||||||
|
GotoIfNot(IsString(CAST(maybe_locales)), &runtime);
|
||||||
|
GotoIfNot(IsSeqOneByteString(CAST(maybe_locales)), &runtime);
|
||||||
|
TNode<SeqOneByteString> locale = CAST(maybe_locales);
|
||||||
|
TNode<Uint32T> locale_length = LoadStringLengthAsWord32(locale);
|
||||||
|
GotoIf(Int32LessThan(locale_length, Int32Constant(2)), &runtime);
|
||||||
|
GotoIf(IsNonAlpha(GetChar(locale, 0)), &runtime);
|
||||||
|
GotoIf(IsNonAlpha(GetChar(locale, 1)), &runtime);
|
||||||
|
GotoIf(Word32Equal(locale_length, Int32Constant(2)), &check_locale);
|
||||||
|
GotoIf(Word32NotEqual(locale_length, Int32Constant(5)), &runtime);
|
||||||
|
GotoIf(Word32NotEqual(GetChar(locale, 2), Int32Constant('-')), &runtime);
|
||||||
|
GotoIf(IsNonAlpha(GetChar(locale, 3)), &runtime);
|
||||||
|
GotoIf(IsNonAlpha(GetChar(locale, 4)), &runtime);
|
||||||
|
Goto(&check_locale);
|
||||||
|
|
||||||
|
Bind(&check_locale);
|
||||||
|
JumpIfStartsWithIgnoreCase(locale, "az", &runtime);
|
||||||
|
JumpIfStartsWithIgnoreCase(locale, "el", &runtime);
|
||||||
|
JumpIfStartsWithIgnoreCase(locale, "lt", &runtime);
|
||||||
|
JumpIfStartsWithIgnoreCase(locale, "tr", &runtime);
|
||||||
|
Goto(&fast);
|
||||||
|
|
||||||
|
Bind(&fast);
|
||||||
|
}
|
||||||
|
|
||||||
const TNode<Int32T> instance_type = to_direct.instance_type();
|
const TNode<Int32T> instance_type = to_direct.instance_type();
|
||||||
CSA_DCHECK(this,
|
CSA_DCHECK(this,
|
||||||
Word32BinaryNot(IsIndirectStringInstanceType(instance_type)));
|
Word32BinaryNot(IsIndirectStringInstanceType(instance_type)));
|
||||||
|
|
||||||
GotoIfNot(IsOneByteStringInstanceType(instance_type), &runtime);
|
GotoIfNot(IsOneByteStringInstanceType(instance_type), &runtime);
|
||||||
|
|
||||||
// For short strings, do the conversion in CSA through the lookup table.
|
// For short strings, do the conversion in CSA through the lookup table.
|
||||||
|
@ -103,7 +203,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
|
||||||
// hash) on the source string.
|
// hash) on the source string.
|
||||||
GotoIfNot(var_did_change.value(), &return_string);
|
GotoIfNot(var_did_change.value(), &return_string);
|
||||||
|
|
||||||
Return(dst);
|
ReturnFct(dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call into C for case conversion. The signature is:
|
// Call into C for case conversion. The signature is:
|
||||||
|
@ -121,30 +221,23 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
|
||||||
function_addr, type_tagged, std::make_pair(type_tagged, src),
|
function_addr, type_tagged, std::make_pair(type_tagged, src),
|
||||||
std::make_pair(type_tagged, dst)));
|
std::make_pair(type_tagged, dst)));
|
||||||
|
|
||||||
Return(result);
|
ReturnFct(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
BIND(&return_string);
|
BIND(&return_string);
|
||||||
Return(string);
|
ReturnFct(string);
|
||||||
|
|
||||||
BIND(&runtime);
|
BIND(&runtime);
|
||||||
{
|
if (kind == ToLowerCaseKind::kToLocaleLowerCase) {
|
||||||
const TNode<Object> result = CallRuntime(Runtime::kStringToLowerCaseIntl,
|
ReturnFct(CallRuntime(Runtime::kStringToLocaleLowerCase, context, string,
|
||||||
NoContextConstant(), string);
|
maybe_locales));
|
||||||
Return(result);
|
} else {
|
||||||
|
DCHECK_EQ(kind, ToLowerCaseKind::kToLowerCase);
|
||||||
|
ReturnFct(CallRuntime(Runtime::kStringToLowerCaseIntl, NoContextConstant(),
|
||||||
|
string));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
|
|
||||||
auto maybe_string = Parameter<Object>(Descriptor::kReceiver);
|
|
||||||
auto context = Parameter<Context>(Descriptor::kContext);
|
|
||||||
|
|
||||||
TNode<String> string =
|
|
||||||
ToThisString(context, maybe_string, "String.prototype.toLowerCase");
|
|
||||||
|
|
||||||
Return(CallBuiltin(Builtin::kStringToLowerCaseIntl, context, string));
|
|
||||||
}
|
|
||||||
|
|
||||||
void IntlBuiltinsAssembler::ListFormatCommon(TNode<Context> context,
|
void IntlBuiltinsAssembler::ListFormatCommon(TNode<Context> context,
|
||||||
TNode<Int32T> argc,
|
TNode<Int32T> argc,
|
||||||
Runtime::FunctionId format_func_id,
|
Runtime::FunctionId format_func_id,
|
||||||
|
|
|
@ -901,28 +901,39 @@ BUILTIN(RelativeTimeFormatPrototypeResolvedOptions) {
|
||||||
return *JSRelativeTimeFormat::ResolvedOptions(isolate, format_holder);
|
return *JSRelativeTimeFormat::ResolvedOptions(isolate, format_holder);
|
||||||
}
|
}
|
||||||
|
|
||||||
BUILTIN(StringPrototypeToLocaleLowerCase) {
|
bool IsFastLocale(Object maybe_locale) {
|
||||||
HandleScope scope(isolate);
|
DisallowGarbageCollection no_gc;
|
||||||
|
if (!maybe_locale.IsSeqOneByteString()) {
|
||||||
isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringToLocaleLowerCase);
|
return false;
|
||||||
|
}
|
||||||
TO_THIS_STRING(string, "String.prototype.toLocaleLowerCase");
|
auto locale = SeqOneByteString::cast(maybe_locale);
|
||||||
|
uint8_t* chars = locale.GetChars(no_gc);
|
||||||
RETURN_RESULT_OR_FAILURE(
|
if (locale.length() < 2 || !std::isalpha(chars[0]) ||
|
||||||
isolate, Intl::StringLocaleConvertCase(isolate, string, false,
|
!std::isalpha(chars[1])) {
|
||||||
args.atOrUndefined(isolate, 1)));
|
return false;
|
||||||
|
}
|
||||||
|
if (locale.length() != 2 &&
|
||||||
|
(locale.length() != 5 || chars[2] != '-' || !std::isalpha(chars[3]) ||
|
||||||
|
!std::isalpha(chars[4]))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
char first = chars[0] | 0x20;
|
||||||
|
char second = chars[1] | 0x20;
|
||||||
|
return (first != 'a' || second != 'z') && (first != 'e' || second != 'l') &&
|
||||||
|
(first != 'l' || second != 't') && (first != 't' || second != 'r');
|
||||||
}
|
}
|
||||||
|
|
||||||
BUILTIN(StringPrototypeToLocaleUpperCase) {
|
BUILTIN(StringPrototypeToLocaleUpperCase) {
|
||||||
HandleScope scope(isolate);
|
HandleScope scope(isolate);
|
||||||
|
Handle<Object> maybe_locale = args.atOrUndefined(isolate, 1);
|
||||||
isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringToLocaleUpperCase);
|
|
||||||
|
|
||||||
TO_THIS_STRING(string, "String.prototype.toLocaleUpperCase");
|
TO_THIS_STRING(string, "String.prototype.toLocaleUpperCase");
|
||||||
|
if (maybe_locale->IsUndefined() || IsFastLocale(*maybe_locale)) {
|
||||||
RETURN_RESULT_OR_FAILURE(
|
string = String::Flatten(isolate, string);
|
||||||
isolate, Intl::StringLocaleConvertCase(isolate, string, true,
|
RETURN_RESULT_OR_FAILURE(isolate, Intl::ConvertToUpper(isolate, string));
|
||||||
args.atOrUndefined(isolate, 1)));
|
} else {
|
||||||
|
RETURN_RESULT_OR_FAILURE(isolate, Intl::StringLocaleConvertCase(
|
||||||
|
isolate, string, true, maybe_locale));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BUILTIN(PluralRulesConstructor) {
|
BUILTIN(PluralRulesConstructor) {
|
||||||
|
|
|
@ -756,7 +756,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
|
||||||
if_number(this, Label::kDeferred), if_object(this), if_primitive(this),
|
if_number(this, Label::kDeferred), if_object(this), if_primitive(this),
|
||||||
if_proxy(this, Label::kDeferred), if_regexp(this), if_string(this),
|
if_proxy(this, Label::kDeferred), if_regexp(this), if_string(this),
|
||||||
if_symbol(this, Label::kDeferred), if_value(this),
|
if_symbol(this, Label::kDeferred), if_value(this),
|
||||||
if_bigint(this, Label::kDeferred);
|
if_bigint(this, Label::kDeferred), if_wasm(this);
|
||||||
|
|
||||||
auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||||
auto context = Parameter<Context>(Descriptor::kContext);
|
auto context = Parameter<Context>(Descriptor::kContext);
|
||||||
|
@ -776,7 +776,8 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
|
||||||
const struct {
|
const struct {
|
||||||
InstanceType value;
|
InstanceType value;
|
||||||
Label* label;
|
Label* label;
|
||||||
} kJumpTable[] = {{JS_OBJECT_TYPE, &if_object},
|
} kJumpTable[] = {
|
||||||
|
{JS_OBJECT_TYPE, &if_object},
|
||||||
{JS_ARRAY_TYPE, &if_array},
|
{JS_ARRAY_TYPE, &if_array},
|
||||||
{JS_REG_EXP_TYPE, &if_regexp},
|
{JS_REG_EXP_TYPE, &if_regexp},
|
||||||
{JS_ARGUMENTS_OBJECT_TYPE, &if_arguments},
|
{JS_ARGUMENTS_OBJECT_TYPE, &if_arguments},
|
||||||
|
@ -785,7 +786,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
|
||||||
{JS_SPECIAL_API_OBJECT_TYPE, &if_object},
|
{JS_SPECIAL_API_OBJECT_TYPE, &if_object},
|
||||||
{JS_PROXY_TYPE, &if_proxy},
|
{JS_PROXY_TYPE, &if_proxy},
|
||||||
{JS_ERROR_TYPE, &if_error},
|
{JS_ERROR_TYPE, &if_error},
|
||||||
{JS_PRIMITIVE_WRAPPER_TYPE, &if_value}};
|
{JS_PRIMITIVE_WRAPPER_TYPE, &if_value},
|
||||||
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
{WASM_STRUCT_TYPE, &if_wasm},
|
||||||
|
{WASM_ARRAY_TYPE, &if_wasm},
|
||||||
|
#endif
|
||||||
|
};
|
||||||
size_t const kNumCases = arraysize(kJumpTable);
|
size_t const kNumCases = arraysize(kJumpTable);
|
||||||
Label* case_labels[kNumCases];
|
Label* case_labels[kNumCases];
|
||||||
int32_t case_values[kNumCases];
|
int32_t case_values[kNumCases];
|
||||||
|
@ -1051,6 +1057,11 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
|
||||||
Goto(&loop);
|
Goto(&loop);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
BIND(&if_wasm);
|
||||||
|
ThrowTypeError(context, MessageTemplate::kWasmObjectsAreOpaque);
|
||||||
|
#endif
|
||||||
|
|
||||||
BIND(&return_generic);
|
BIND(&return_generic);
|
||||||
{
|
{
|
||||||
TNode<Object> tag = GetProperty(context, ToObject(context, receiver),
|
TNode<Object> tag = GetProperty(context, ToObject(context, receiver),
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file.
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include <unordered_set>
|
||||||
|
|
||||||
#include "src/builtins/builtins-utils-inl.h"
|
#include "src/builtins/builtins-utils-inl.h"
|
||||||
#include "src/objects/js-struct-inl.h"
|
#include "src/objects/js-struct-inl.h"
|
||||||
#include "src/objects/property-details.h"
|
#include "src/objects/property-details.h"
|
||||||
|
@ -15,6 +17,25 @@ constexpr int kMaxJSStructFields = 999;
|
||||||
// rely on DescriptorArrays and are hence limited to 1020 fields at most.
|
// rely on DescriptorArrays and are hence limited to 1020 fields at most.
|
||||||
static_assert(kMaxJSStructFields <= kMaxNumberOfDescriptors);
|
static_assert(kMaxJSStructFields <= kMaxNumberOfDescriptors);
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
struct NameHandleHasher {
|
||||||
|
size_t operator()(Handle<Name> name) const { return name->hash(); }
|
||||||
|
};
|
||||||
|
|
||||||
|
struct UniqueNameHandleEqual {
|
||||||
|
bool operator()(Handle<Name> x, Handle<Name> y) const {
|
||||||
|
DCHECK(x->IsUniqueName());
|
||||||
|
DCHECK(y->IsUniqueName());
|
||||||
|
return *x == *y;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
using UniqueNameHandleSet =
|
||||||
|
std::unordered_set<Handle<Name>, NameHandleHasher, UniqueNameHandleEqual>;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
BUILTIN(SharedStructTypeConstructor) {
|
BUILTIN(SharedStructTypeConstructor) {
|
||||||
DCHECK(v8_flags.shared_string_table);
|
DCHECK(v8_flags.shared_string_table);
|
||||||
|
|
||||||
|
@ -43,6 +64,7 @@ BUILTIN(SharedStructTypeConstructor) {
|
||||||
num_properties, 0, AllocationType::kSharedOld);
|
num_properties, 0, AllocationType::kSharedOld);
|
||||||
|
|
||||||
// Build up the descriptor array.
|
// Build up the descriptor array.
|
||||||
|
UniqueNameHandleSet all_field_names;
|
||||||
for (int i = 0; i < num_properties; ++i) {
|
for (int i = 0; i < num_properties; ++i) {
|
||||||
Handle<Object> raw_field_name;
|
Handle<Object> raw_field_name;
|
||||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||||
|
@ -53,6 +75,14 @@ BUILTIN(SharedStructTypeConstructor) {
|
||||||
Object::ToName(isolate, raw_field_name));
|
Object::ToName(isolate, raw_field_name));
|
||||||
field_name = factory->InternalizeName(field_name);
|
field_name = factory->InternalizeName(field_name);
|
||||||
|
|
||||||
|
// Check that there are no duplicates.
|
||||||
|
const bool is_duplicate = !all_field_names.insert(field_name).second;
|
||||||
|
if (is_duplicate) {
|
||||||
|
THROW_NEW_ERROR_RETURN_FAILURE(
|
||||||
|
isolate, NewTypeError(MessageTemplate::kDuplicateTemplateProperty,
|
||||||
|
field_name));
|
||||||
|
}
|
||||||
|
|
||||||
// Shared structs' fields need to be aligned, so make it all tagged.
|
// Shared structs' fields need to be aligned, so make it all tagged.
|
||||||
PropertyDetails details(
|
PropertyDetails details(
|
||||||
PropertyKind::kData, SEALED, PropertyLocation::kField,
|
PropertyKind::kData, SEALED, PropertyLocation::kField,
|
||||||
|
@ -85,7 +115,12 @@ BUILTIN(SharedStructTypeConstructor) {
|
||||||
|
|
||||||
instance_map->InitializeDescriptors(isolate, *descriptors);
|
instance_map->InitializeDescriptors(isolate, *descriptors);
|
||||||
// Structs have fixed layout ahead of time, so there's no slack.
|
// Structs have fixed layout ahead of time, so there's no slack.
|
||||||
|
int out_of_object_properties = num_properties - in_object_properties;
|
||||||
|
if (out_of_object_properties == 0) {
|
||||||
instance_map->SetInObjectUnusedPropertyFields(0);
|
instance_map->SetInObjectUnusedPropertyFields(0);
|
||||||
|
} else {
|
||||||
|
instance_map->SetOutOfObjectUnusedPropertyFields(0);
|
||||||
|
}
|
||||||
instance_map->set_is_extensible(false);
|
instance_map->set_is_extensible(false);
|
||||||
JSFunction::SetInitialMap(isolate, constructor, instance_map,
|
JSFunction::SetInitialMap(isolate, constructor, instance_map,
|
||||||
factory->null_value());
|
factory->null_value());
|
||||||
|
@ -94,6 +129,16 @@ BUILTIN(SharedStructTypeConstructor) {
|
||||||
// to it.
|
// to it.
|
||||||
instance_map->set_constructor_or_back_pointer(*factory->null_value());
|
instance_map->set_constructor_or_back_pointer(*factory->null_value());
|
||||||
|
|
||||||
|
// Pre-create the enum cache in the shared space, as otherwise for-in
|
||||||
|
// enumeration will incorrectly create an enum cache in the per-thread heap.
|
||||||
|
if (num_properties == 0) {
|
||||||
|
instance_map->SetEnumLength(0);
|
||||||
|
} else {
|
||||||
|
FastKeyAccumulator::InitializeFastPropertyEnumCache(
|
||||||
|
isolate, instance_map, num_properties, AllocationType::kSharedOld);
|
||||||
|
DCHECK_EQ(num_properties, instance_map->EnumLength());
|
||||||
|
}
|
||||||
|
|
||||||
return *constructor;
|
return *constructor;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,6 +64,8 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
|
||||||
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset,
|
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset,
|
||||||
Int32Constant(bitfield_value));
|
Int32Constant(bitfield_value));
|
||||||
|
|
||||||
|
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kDetachKeyOffset,
|
||||||
|
UndefinedConstant());
|
||||||
StoreBoundedSizeToObject(buffer, JSArrayBuffer::kRawByteLengthOffset,
|
StoreBoundedSizeToObject(buffer, JSArrayBuffer::kRawByteLengthOffset,
|
||||||
UintPtrConstant(0));
|
UintPtrConstant(0));
|
||||||
StoreSandboxedPointerToObject(buffer, JSArrayBuffer::kBackingStoreOffset,
|
StoreSandboxedPointerToObject(buffer, JSArrayBuffer::kBackingStoreOffset,
|
||||||
|
|
|
@ -52,60 +52,6 @@ TF_BUILTIN(WasmFloat64ToNumber, WasmBuiltinsAssembler) {
|
||||||
Return(ChangeFloat64ToTagged(val));
|
Return(ChangeFloat64ToTagged(val));
|
||||||
}
|
}
|
||||||
|
|
||||||
TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
|
|
||||||
if (!Is32()) {
|
|
||||||
Unreachable();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto address = UncheckedParameter<Uint32T>(Descriptor::kAddress);
|
|
||||||
TNode<Number> address_number = ChangeUint32ToTagged(address);
|
|
||||||
|
|
||||||
auto expected_value = UncheckedParameter<Int32T>(Descriptor::kExpectedValue);
|
|
||||||
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
|
|
||||||
|
|
||||||
auto timeout_low = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutLow);
|
|
||||||
auto timeout_high = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutHigh);
|
|
||||||
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
|
|
||||||
|
|
||||||
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
|
|
||||||
TNode<Context> context = LoadContextFromInstance(instance);
|
|
||||||
|
|
||||||
TNode<Smi> result_smi =
|
|
||||||
CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
|
|
||||||
address_number, expected_value_number, timeout));
|
|
||||||
Return(Unsigned(SmiToInt32(result_smi)));
|
|
||||||
}
|
|
||||||
|
|
||||||
TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
|
|
||||||
if (!Is32()) {
|
|
||||||
Unreachable();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto address = UncheckedParameter<Uint32T>(Descriptor::kAddress);
|
|
||||||
TNode<Number> address_number = ChangeUint32ToTagged(address);
|
|
||||||
|
|
||||||
auto expected_value_low =
|
|
||||||
UncheckedParameter<IntPtrT>(Descriptor::kExpectedValueLow);
|
|
||||||
auto expected_value_high =
|
|
||||||
UncheckedParameter<IntPtrT>(Descriptor::kExpectedValueHigh);
|
|
||||||
TNode<BigInt> expected_value =
|
|
||||||
BigIntFromInt32Pair(expected_value_low, expected_value_high);
|
|
||||||
|
|
||||||
auto timeout_low = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutLow);
|
|
||||||
auto timeout_high = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutHigh);
|
|
||||||
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
|
|
||||||
|
|
||||||
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
|
|
||||||
TNode<Context> context = LoadContextFromInstance(instance);
|
|
||||||
|
|
||||||
TNode<Smi> result_smi =
|
|
||||||
CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
|
|
||||||
address_number, expected_value, timeout));
|
|
||||||
Return(Unsigned(SmiToInt32(result_smi)));
|
|
||||||
}
|
|
||||||
|
|
||||||
TF_BUILTIN(JSToWasmLazyDeoptContinuation, WasmBuiltinsAssembler) {
|
TF_BUILTIN(JSToWasmLazyDeoptContinuation, WasmBuiltinsAssembler) {
|
||||||
// Reset thread_in_wasm_flag.
|
// Reset thread_in_wasm_flag.
|
||||||
TNode<ExternalReference> thread_in_wasm_flag_address_address =
|
TNode<ExternalReference> thread_in_wasm_flag_address_address =
|
||||||
|
|
|
@ -181,9 +181,6 @@ FullObjectSlot Builtins::builtin_tier0_slot(Builtin builtin) {
|
||||||
|
|
||||||
void Builtins::set_code(Builtin builtin, CodeT code) {
|
void Builtins::set_code(Builtin builtin, CodeT code) {
|
||||||
DCHECK_EQ(builtin, code.builtin_id());
|
DCHECK_EQ(builtin, code.builtin_id());
|
||||||
if (!V8_REMOVE_BUILTINS_CODE_OBJECTS && V8_EXTERNAL_CODE_SPACE_BOOL) {
|
|
||||||
DCHECK_EQ(builtin, FromCodeT(code).builtin_id());
|
|
||||||
}
|
|
||||||
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
|
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
|
||||||
// The given builtin may be uninitialized thus we cannot check its type here.
|
// The given builtin may be uninitialized thus we cannot check its type here.
|
||||||
isolate_->builtin_table()[Builtins::ToInt(builtin)] = code.ptr();
|
isolate_->builtin_table()[Builtins::ToInt(builtin)] = code.ptr();
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file.
|
// found in the LICENSE file.
|
||||||
|
|
||||||
#include "src/codegen/register.h"
|
|
||||||
#if V8_TARGET_ARCH_IA32
|
#if V8_TARGET_ARCH_IA32
|
||||||
|
|
||||||
#include "src/api/api-arguments.h"
|
#include "src/api/api-arguments.h"
|
||||||
|
@ -27,6 +26,7 @@
|
||||||
#include "src/objects/smi.h"
|
#include "src/objects/smi.h"
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
#include "src/wasm/baseline/liftoff-assembler-defs.h"
|
||||||
#include "src/wasm/wasm-linkage.h"
|
#include "src/wasm/wasm-linkage.h"
|
||||||
#include "src/wasm/wasm-objects.h"
|
#include "src/wasm/wasm-objects.h"
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
@ -2763,25 +2763,19 @@ void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
|
||||||
// The function index was put in edi by the jump table trampoline.
|
|
||||||
// Convert to Smi for the runtime call.
|
|
||||||
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
|
||||||
{
|
|
||||||
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
|
||||||
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
|
|
||||||
|
|
||||||
|
// Returns the offset beyond the last saved FP register.
|
||||||
|
int SaveWasmParams(MacroAssembler* masm) {
|
||||||
// Save all parameter registers (see wasm-linkage.h). They might be
|
// Save all parameter registers (see wasm-linkage.h). They might be
|
||||||
// overwritten in the runtime call below. We don't have any callee-saved
|
// overwritten in the subsequent runtime call. We don't have any callee-saved
|
||||||
// registers in wasm, so no need to store anything else.
|
// registers in wasm, so no need to store anything else.
|
||||||
static_assert(
|
static_assert(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs + 1 ==
|
||||||
WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs + 1 ==
|
|
||||||
arraysize(wasm::kGpParamRegisters),
|
arraysize(wasm::kGpParamRegisters),
|
||||||
"frame size mismatch");
|
"frame size mismatch");
|
||||||
for (Register reg : wasm::kGpParamRegisters) {
|
for (Register reg : wasm::kGpParamRegisters) {
|
||||||
__ Push(reg);
|
__ Push(reg);
|
||||||
}
|
}
|
||||||
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
|
static_assert(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs ==
|
||||||
arraysize(wasm::kFpParamRegisters),
|
arraysize(wasm::kFpParamRegisters),
|
||||||
"frame size mismatch");
|
"frame size mismatch");
|
||||||
__ AllocateStackSpace(kSimd128Size * arraysize(wasm::kFpParamRegisters));
|
__ AllocateStackSpace(kSimd128Size * arraysize(wasm::kFpParamRegisters));
|
||||||
|
@ -2790,24 +2784,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
__ movdqu(Operand(esp, offset), reg);
|
__ movdqu(Operand(esp, offset), reg);
|
||||||
offset += kSimd128Size;
|
offset += kSimd128Size;
|
||||||
}
|
}
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
// Push the Wasm instance as an explicit argument to the runtime function.
|
// Consumes the offset beyond the last saved FP register (as returned by
|
||||||
__ Push(kWasmInstanceRegister);
|
// {SaveWasmParams}).
|
||||||
// Push the function index as second argument.
|
void RestoreWasmParams(MacroAssembler* masm, int offset) {
|
||||||
__ Push(kWasmCompileLazyFuncIndexRegister);
|
|
||||||
// Allocate a stack slot, where the runtime function can spill a pointer to
|
|
||||||
// the the NativeModule.
|
|
||||||
__ Push(esp);
|
|
||||||
// Initialize the JavaScript context with 0. CEntry will use it to
|
|
||||||
// set the current context on the isolate.
|
|
||||||
__ Move(kContextRegister, Smi::zero());
|
|
||||||
__ CallRuntime(Runtime::kWasmCompileLazy, 3);
|
|
||||||
// The runtime function returns the jump table slot offset as a Smi. Use
|
|
||||||
// that to compute the jump target in edi.
|
|
||||||
__ SmiUntag(kReturnRegister0);
|
|
||||||
__ mov(edi, kReturnRegister0);
|
|
||||||
|
|
||||||
// Restore registers.
|
|
||||||
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
|
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
|
||||||
offset -= kSimd128Size;
|
offset -= kSimd128Size;
|
||||||
__ movdqu(reg, Operand(esp, offset));
|
__ movdqu(reg, Operand(esp, offset));
|
||||||
|
@ -2817,6 +2799,126 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
|
for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
|
||||||
__ Pop(reg);
|
__ Pop(reg);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// When this builtin is called, the topmost stack entry is the calling pc.
|
||||||
|
// This is replaced with the following:
|
||||||
|
//
|
||||||
|
// [ calling pc ] <-- esp; popped by {ret}.
|
||||||
|
// [ feedback vector ]
|
||||||
|
// [ Wasm instance ]
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved ebp ] <-- ebp; this is where "calling pc" used to be.
|
||||||
|
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||||
|
constexpr Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
|
||||||
|
|
||||||
|
// We have zero free registers at this point. Free up a temp. Its value
|
||||||
|
// could be tagged, but we're only storing it on the stack for a short
|
||||||
|
// while, and no GC or stack walk can happen during this time.
|
||||||
|
Register tmp = eax; // Arbitrarily chosen.
|
||||||
|
__ Push(tmp); // This is the "marker" slot.
|
||||||
|
{
|
||||||
|
Operand saved_ebp_slot = Operand(esp, kSystemPointerSize);
|
||||||
|
__ mov(tmp, saved_ebp_slot); // tmp now holds the "calling pc".
|
||||||
|
__ mov(saved_ebp_slot, ebp);
|
||||||
|
__ lea(ebp, Operand(esp, kSystemPointerSize));
|
||||||
|
}
|
||||||
|
__ Push(tmp); // This is the "instance" slot.
|
||||||
|
|
||||||
|
// Stack layout is now:
|
||||||
|
// [calling pc] <-- instance_slot <-- esp
|
||||||
|
// [saved tmp] <-- marker_slot
|
||||||
|
// [saved ebp]
|
||||||
|
Operand marker_slot = Operand(ebp, WasmFrameConstants::kFrameTypeOffset);
|
||||||
|
Operand instance_slot = Operand(ebp, WasmFrameConstants::kWasmInstanceOffset);
|
||||||
|
|
||||||
|
// Load the feedback vector.
|
||||||
|
__ mov(tmp, FieldOperand(kWasmInstanceRegister,
|
||||||
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
|
__ mov(tmp, FieldOperand(tmp, func_index, times_tagged_size,
|
||||||
|
FixedArray::kHeaderSize));
|
||||||
|
Label allocate_vector;
|
||||||
|
__ JumpIfSmi(tmp, &allocate_vector);
|
||||||
|
|
||||||
|
// Vector exists. Finish setting up the stack frame.
|
||||||
|
__ Push(tmp); // Feedback vector.
|
||||||
|
__ mov(tmp, instance_slot); // Calling PC.
|
||||||
|
__ Push(tmp);
|
||||||
|
__ mov(instance_slot, kWasmInstanceRegister);
|
||||||
|
__ mov(tmp, marker_slot);
|
||||||
|
__ mov(marker_slot, Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||||
|
__ ret(0);
|
||||||
|
|
||||||
|
__ bind(&allocate_vector);
|
||||||
|
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
|
||||||
|
// We temporarily change the frame type for this, because we need special
|
||||||
|
// handling by the stack walker in case of GC.
|
||||||
|
// For the runtime call, we create the following stack layout:
|
||||||
|
//
|
||||||
|
// [ reserved slot for NativeModule ] <-- arg[2]
|
||||||
|
// [ ("declared") function index ] <-- arg[1] for runtime func.
|
||||||
|
// [ Wasm instance ] <-- arg[0]
|
||||||
|
// [ ...spilled Wasm parameters... ]
|
||||||
|
// [ calling pc ] <-- already in place
|
||||||
|
// [ WASM_LIFTOFF_SETUP marker ]
|
||||||
|
// [ saved ebp ] <-- already in place
|
||||||
|
|
||||||
|
__ mov(tmp, marker_slot);
|
||||||
|
__ mov(marker_slot,
|
||||||
|
Immediate(StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP)));
|
||||||
|
|
||||||
|
int offset = SaveWasmParams(masm);
|
||||||
|
|
||||||
|
// Arguments to the runtime function: instance, func_index.
|
||||||
|
__ Push(kWasmInstanceRegister);
|
||||||
|
__ SmiTag(func_index);
|
||||||
|
__ Push(func_index);
|
||||||
|
// Allocate a stack slot where the runtime function can spill a pointer
|
||||||
|
// to the NativeModule.
|
||||||
|
__ Push(esp);
|
||||||
|
__ Move(kContextRegister, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
|
||||||
|
tmp = func_index;
|
||||||
|
__ mov(tmp, kReturnRegister0);
|
||||||
|
|
||||||
|
RestoreWasmParams(masm, offset);
|
||||||
|
|
||||||
|
// Finish setting up the stack frame:
|
||||||
|
// [ calling pc ]
|
||||||
|
// (tmp reg) ---> [ feedback vector ]
|
||||||
|
// [ calling pc ] => [ Wasm instance ] <-- instance_slot
|
||||||
|
// [ WASM_LIFTOFF_SETUP marker ] [ WASM marker ] <-- marker_slot
|
||||||
|
// [ saved ebp ] [ saved ebp ]
|
||||||
|
__ mov(marker_slot, Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||||
|
__ Push(tmp); // Feedback vector.
|
||||||
|
__ mov(tmp, instance_slot); // Calling PC.
|
||||||
|
__ Push(tmp);
|
||||||
|
__ mov(instance_slot, kWasmInstanceRegister);
|
||||||
|
__ ret(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
|
// The function index was put in edi by the jump table trampoline.
|
||||||
|
// Convert to Smi for the runtime call.
|
||||||
|
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
||||||
|
{
|
||||||
|
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
||||||
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
int offset = SaveWasmParams(masm);
|
||||||
|
|
||||||
|
// Push arguments for the runtime function.
|
||||||
|
__ Push(kWasmInstanceRegister);
|
||||||
|
__ Push(kWasmCompileLazyFuncIndexRegister);
|
||||||
|
// Initialize the JavaScript context with 0. CEntry will use it to
|
||||||
|
// set the current context on the isolate.
|
||||||
|
__ Move(kContextRegister, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
|
||||||
|
// The runtime function returns the jump table slot offset as a Smi. Use
|
||||||
|
// that to compute the jump target in edi.
|
||||||
|
__ SmiUntag(kReturnRegister0);
|
||||||
|
__ mov(edi, kReturnRegister0);
|
||||||
|
|
||||||
|
RestoreWasmParams(masm, offset);
|
||||||
|
|
||||||
// After the instance register has been restored, we can add the jump table
|
// After the instance register has been restored, we can add the jump table
|
||||||
// start to the jump table offset already stored in edi.
|
// start to the jump table offset already stored in edi.
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "src/runtime/runtime.h"
|
#include "src/runtime/runtime.h"
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
#include "src/wasm/baseline/liftoff-assembler-defs.h"
|
||||||
#include "src/wasm/wasm-linkage.h"
|
#include "src/wasm/wasm-linkage.h"
|
||||||
#include "src/wasm/wasm-objects.h"
|
#include "src/wasm/wasm-objects.h"
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
@ -2562,11 +2563,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
|
||||||
// The function index was put in t0 by the jump table trampoline.
|
|
||||||
// Convert to Smi for the runtime call
|
|
||||||
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
|
||||||
|
|
||||||
// Compute register lists for parameters to be saved. We save all parameter
|
// Compute register lists for parameters to be saved. We save all parameter
|
||||||
// registers (see wasm-linkage.h). They might be overwritten in the runtime
|
// registers (see wasm-linkage.h). They might be overwritten in the runtime
|
||||||
// call below. We don't have any callee-saved registers in wasm, so no need to
|
// call below. We don't have any callee-saved registers in wasm, so no need to
|
||||||
|
@ -2577,12 +2573,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
saved_gp_regs.set(gp_param_reg);
|
saved_gp_regs.set(gp_param_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The instance has already been stored in the fixed part of the frame.
|
||||||
|
saved_gp_regs.clear(kWasmInstanceRegister);
|
||||||
// All set registers were unique.
|
// All set registers were unique.
|
||||||
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
|
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1);
|
||||||
// The Wasm instance must be part of the saved registers.
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs,
|
||||||
CHECK(saved_gp_regs.has(kWasmInstanceRegister));
|
|
||||||
// + instance
|
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs + 1,
|
|
||||||
saved_gp_regs.Count());
|
saved_gp_regs.Count());
|
||||||
return saved_gp_regs;
|
return saved_gp_regs;
|
||||||
})();
|
})();
|
||||||
|
@ -2594,16 +2589,80 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs,
|
||||||
saved_fp_regs.Count());
|
saved_fp_regs.Count());
|
||||||
return saved_fp_regs;
|
return saved_fp_regs;
|
||||||
})();
|
})();
|
||||||
|
|
||||||
|
// When entering this builtin, we have just created a Wasm stack frame:
|
||||||
|
//
|
||||||
|
// [ Wasm instance ] <-- sp
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved fp ] <-- fp
|
||||||
|
//
|
||||||
|
// Add the feedback vector to the stack.
|
||||||
|
//
|
||||||
|
// [ feedback vector ] <-- sp
|
||||||
|
// [ Wasm instance ]
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved fp ] <-- fp
|
||||||
|
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||||
|
Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
|
||||||
|
Register vector = t1;
|
||||||
|
Register scratch = t2;
|
||||||
|
Label allocate_vector, done;
|
||||||
|
|
||||||
|
__ Ld_d(vector, FieldMemOperand(kWasmInstanceRegister,
|
||||||
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
|
__ Alsl_d(vector, func_index, vector, kTaggedSizeLog2);
|
||||||
|
__ Ld_d(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
|
__ bind(&done);
|
||||||
|
__ Push(vector);
|
||||||
|
__ Ret();
|
||||||
|
|
||||||
|
__ bind(&allocate_vector);
|
||||||
|
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
|
||||||
|
// We temporarily change the frame type for this, because we need special
|
||||||
|
// handling by the stack walker in case of GC.
|
||||||
|
__ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP));
|
||||||
|
__ St_d(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||||
|
|
||||||
|
// Save registers.
|
||||||
|
__ MultiPush(kSavedGpRegs);
|
||||||
|
__ MultiPushFPU(kSavedFpRegs);
|
||||||
|
__ Push(ra);
|
||||||
|
|
||||||
|
// Arguments to the runtime function: instance, func_index, and an
|
||||||
|
// additional stack slot for the NativeModule.
|
||||||
|
__ SmiTag(func_index);
|
||||||
|
__ Push(kWasmInstanceRegister, func_index, zero_reg);
|
||||||
|
__ Move(cp, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
|
||||||
|
__ mov(vector, kReturnRegister0);
|
||||||
|
|
||||||
|
// Restore registers and frame type.
|
||||||
|
__ Pop(ra);
|
||||||
|
__ MultiPopFPU(kSavedFpRegs);
|
||||||
|
__ MultiPop(kSavedGpRegs);
|
||||||
|
__ Ld_d(kWasmInstanceRegister,
|
||||||
|
MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset));
|
||||||
|
__ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM));
|
||||||
|
__ St_d(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||||
|
__ Branch(&done);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
|
// The function index was put in t0 by the jump table trampoline.
|
||||||
|
// Convert to Smi for the runtime call
|
||||||
|
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
||||||
|
|
||||||
{
|
{
|
||||||
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
||||||
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
|
||||||
// Save registers that we need to keep alive across the runtime call.
|
// Save registers that we need to keep alive across the runtime call.
|
||||||
|
__ Push(kWasmInstanceRegister);
|
||||||
__ MultiPush(kSavedGpRegs);
|
__ MultiPush(kSavedGpRegs);
|
||||||
__ MultiPushFPU(kSavedFpRegs);
|
__ MultiPushFPU(kSavedFpRegs);
|
||||||
|
|
||||||
|
@ -2612,16 +2671,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// as if they were saved.
|
// as if they were saved.
|
||||||
__ Sub_d(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
|
__ Sub_d(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
|
||||||
|
|
||||||
// Pass instance and function index as an explicit arguments to the runtime
|
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
|
||||||
// function.
|
|
||||||
// Allocate a stack slot, where the runtime function can spill a pointer to
|
|
||||||
// the the NativeModule.
|
|
||||||
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister, zero_reg);
|
|
||||||
|
|
||||||
// Initialize the JavaScript context with 0. CEntry will use it to
|
// Initialize the JavaScript context with 0. CEntry will use it to
|
||||||
// set the current context on the isolate.
|
// set the current context on the isolate.
|
||||||
__ Move(kContextRegister, Smi::zero());
|
__ Move(kContextRegister, Smi::zero());
|
||||||
__ CallRuntime(Runtime::kWasmCompileLazy, 3);
|
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
|
||||||
|
|
||||||
// Untag the returned Smi into into t7, for later use.
|
// Untag the returned Smi into into t7, for later use.
|
||||||
static_assert(!kSavedGpRegs.has(t7));
|
static_assert(!kSavedGpRegs.has(t7));
|
||||||
|
@ -2631,14 +2686,14 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// Restore registers.
|
// Restore registers.
|
||||||
__ MultiPopFPU(kSavedFpRegs);
|
__ MultiPopFPU(kSavedFpRegs);
|
||||||
__ MultiPop(kSavedGpRegs);
|
__ MultiPop(kSavedGpRegs);
|
||||||
|
__ Pop(kWasmInstanceRegister);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The runtime function returned the jump table slot offset as a Smi (now in
|
// The runtime function returned the jump table slot offset as a Smi (now in
|
||||||
// t7). Use that to compute the jump target.
|
// t7). Use that to compute the jump target.
|
||||||
static_assert(!kSavedGpRegs.has(t8));
|
static_assert(!kSavedGpRegs.has(t8));
|
||||||
__ Ld_d(t8, MemOperand(
|
__ Ld_d(t8, FieldMemOperand(kWasmInstanceRegister,
|
||||||
kWasmInstanceRegister,
|
WasmInstanceObject::kJumpTableStartOffset));
|
||||||
WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
|
|
||||||
__ Add_d(t7, t8, Operand(t7));
|
__ Add_d(t7, t8, Operand(t7));
|
||||||
|
|
||||||
// Finally, jump to the jump table slot for the function.
|
// Finally, jump to the jump table slot for the function.
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "src/runtime/runtime.h"
|
#include "src/runtime/runtime.h"
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
#include "src/wasm/baseline/liftoff-assembler-defs.h"
|
||||||
#include "src/wasm/wasm-linkage.h"
|
#include "src/wasm/wasm-linkage.h"
|
||||||
#include "src/wasm/wasm-objects.h"
|
#include "src/wasm/wasm-objects.h"
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
@ -2553,11 +2554,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
|
||||||
// The function index was put in t0 by the jump table trampoline.
|
|
||||||
// Convert to Smi for the runtime call
|
|
||||||
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
|
||||||
|
|
||||||
// Compute register lists for parameters to be saved. We save all parameter
|
// Compute register lists for parameters to be saved. We save all parameter
|
||||||
// registers (see wasm-linkage.h). They might be overwritten in the runtime
|
// registers (see wasm-linkage.h). They might be overwritten in the runtime
|
||||||
// call below. We don't have any callee-saved registers in wasm, so no need to
|
// call below. We don't have any callee-saved registers in wasm, so no need to
|
||||||
|
@ -2568,12 +2564,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
saved_gp_regs.set(gp_param_reg);
|
saved_gp_regs.set(gp_param_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The instance has already been stored in the fixed part of the frame.
|
||||||
|
saved_gp_regs.clear(kWasmInstanceRegister);
|
||||||
// All set registers were unique.
|
// All set registers were unique.
|
||||||
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
|
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1);
|
||||||
// The Wasm instance must be part of the saved registers.
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs,
|
||||||
CHECK(saved_gp_regs.has(kWasmInstanceRegister));
|
|
||||||
// + instance
|
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs + 1,
|
|
||||||
saved_gp_regs.Count());
|
saved_gp_regs.Count());
|
||||||
return saved_gp_regs;
|
return saved_gp_regs;
|
||||||
})();
|
})();
|
||||||
|
@ -2585,16 +2580,80 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs,
|
||||||
saved_fp_regs.Count());
|
saved_fp_regs.Count());
|
||||||
return saved_fp_regs;
|
return saved_fp_regs;
|
||||||
})();
|
})();
|
||||||
|
|
||||||
|
// When entering this builtin, we have just created a Wasm stack frame:
|
||||||
|
//
|
||||||
|
// [ Wasm instance ] <-- sp
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved fp ] <-- fp
|
||||||
|
//
|
||||||
|
// Add the feedback vector to the stack.
|
||||||
|
//
|
||||||
|
// [ feedback vector ] <-- sp
|
||||||
|
// [ Wasm instance ]
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved fp ] <-- fp
|
||||||
|
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||||
|
Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
|
||||||
|
Register vector = t1;
|
||||||
|
Register scratch = t2;
|
||||||
|
Label allocate_vector, done;
|
||||||
|
|
||||||
|
__ Ld(vector, FieldMemOperand(kWasmInstanceRegister,
|
||||||
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
|
__ Dlsa(vector, vector, func_index, kTaggedSizeLog2);
|
||||||
|
__ Ld(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
|
__ bind(&done);
|
||||||
|
__ Push(vector);
|
||||||
|
__ Ret();
|
||||||
|
|
||||||
|
__ bind(&allocate_vector);
|
||||||
|
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
|
||||||
|
// We temporarily change the frame type for this, because we need special
|
||||||
|
// handling by the stack walker in case of GC.
|
||||||
|
__ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP));
|
||||||
|
__ Sd(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||||
|
|
||||||
|
// Save registers.
|
||||||
|
__ MultiPush(kSavedGpRegs);
|
||||||
|
__ MultiPushFPU(kSavedFpRegs);
|
||||||
|
__ Push(ra);
|
||||||
|
|
||||||
|
// Arguments to the runtime function: instance, func_index, and an
|
||||||
|
// additional stack slot for the NativeModule.
|
||||||
|
__ SmiTag(func_index);
|
||||||
|
__ Push(kWasmInstanceRegister, func_index, zero_reg);
|
||||||
|
__ Move(cp, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
|
||||||
|
__ mov(vector, kReturnRegister0);
|
||||||
|
|
||||||
|
// Restore registers and frame type.
|
||||||
|
__ Pop(ra);
|
||||||
|
__ MultiPopFPU(kSavedFpRegs);
|
||||||
|
__ MultiPop(kSavedGpRegs);
|
||||||
|
__ Ld(kWasmInstanceRegister,
|
||||||
|
MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset));
|
||||||
|
__ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM));
|
||||||
|
__ Sd(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||||
|
__ Branch(&done);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
|
// The function index was put in t0 by the jump table trampoline.
|
||||||
|
// Convert to Smi for the runtime call
|
||||||
|
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
||||||
|
|
||||||
{
|
{
|
||||||
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
||||||
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
|
||||||
// Save registers that we need to keep alive across the runtime call.
|
// Save registers that we need to keep alive across the runtime call.
|
||||||
|
__ Push(kWasmInstanceRegister);
|
||||||
__ MultiPush(kSavedGpRegs);
|
__ MultiPush(kSavedGpRegs);
|
||||||
// Check if machine has simd enabled, if so push vector registers. If not
|
// Check if machine has simd enabled, if so push vector registers. If not
|
||||||
// then only push double registers.
|
// then only push double registers.
|
||||||
|
@ -2617,16 +2676,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// as if they were saved.
|
// as if they were saved.
|
||||||
__ Dsubu(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
|
__ Dsubu(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
|
||||||
__ bind(&simd_pushed);
|
__ bind(&simd_pushed);
|
||||||
// Pass instance and function index as an explicit arguments to the runtime
|
|
||||||
// function.
|
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
|
||||||
// Allocate a stack slot, where the runtime function can spill a pointer to
|
|
||||||
// the the NativeModule.
|
|
||||||
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister, zero_reg);
|
|
||||||
|
|
||||||
// Initialize the JavaScript context with 0. CEntry will use it to
|
// Initialize the JavaScript context with 0. CEntry will use it to
|
||||||
// set the current context on the isolate.
|
// set the current context on the isolate.
|
||||||
__ Move(kContextRegister, Smi::zero());
|
__ Move(kContextRegister, Smi::zero());
|
||||||
__ CallRuntime(Runtime::kWasmCompileLazy, 3);
|
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
|
||||||
|
|
||||||
// Restore registers.
|
// Restore registers.
|
||||||
Label pop_doubles, simd_popped;
|
Label pop_doubles, simd_popped;
|
||||||
|
@ -2646,6 +2702,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
__ MultiPopFPU(kSavedFpRegs);
|
__ MultiPopFPU(kSavedFpRegs);
|
||||||
__ bind(&simd_popped);
|
__ bind(&simd_popped);
|
||||||
__ MultiPop(kSavedGpRegs);
|
__ MultiPop(kSavedGpRegs);
|
||||||
|
__ Pop(kWasmInstanceRegister);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Untag the returned Smi, for later use.
|
// Untag the returned Smi, for later use.
|
||||||
|
@ -2655,9 +2712,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// The runtime function returned the jump table slot offset as a Smi (now in
|
// The runtime function returned the jump table slot offset as a Smi (now in
|
||||||
// t8). Use that to compute the jump target.
|
// t8). Use that to compute the jump target.
|
||||||
static_assert(!kSavedGpRegs.has(t8));
|
static_assert(!kSavedGpRegs.has(t8));
|
||||||
__ Ld(t8,
|
__ Ld(t8, FieldMemOperand(kWasmInstanceRegister,
|
||||||
MemOperand(kWasmInstanceRegister,
|
WasmInstanceObject::kJumpTableStartOffset));
|
||||||
WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
|
|
||||||
__ Daddu(t8, v0, t8);
|
__ Daddu(t8, v0, t8);
|
||||||
|
|
||||||
// Finally, jump to the jump table slot for the function.
|
// Finally, jump to the jump table slot for the function.
|
||||||
|
|
|
@ -666,8 +666,7 @@ builtin Modulus(implicit context: Context)(left: JSAny, right: JSAny): Numeric {
|
||||||
} label Float64s(left: float64, right: float64) {
|
} label Float64s(left: float64, right: float64) {
|
||||||
return AllocateHeapNumberWithValue(left % right);
|
return AllocateHeapNumberWithValue(left % right);
|
||||||
} label AtLeastOneBigInt(left: Numeric, right: Numeric) {
|
} label AtLeastOneBigInt(left: Numeric, right: Numeric) {
|
||||||
tail runtime::BigIntBinaryOp(
|
tail bigint::BigIntModulus(left, right);
|
||||||
context, left, right, SmiTag<Operation>(Operation::kModulus));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "src/runtime/runtime.h"
|
#include "src/runtime/runtime.h"
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
#include "src/wasm/baseline/liftoff-assembler-defs.h"
|
||||||
#include "src/wasm/wasm-linkage.h"
|
#include "src/wasm/wasm-linkage.h"
|
||||||
#include "src/wasm/wasm-objects.h"
|
#include "src/wasm/wasm-objects.h"
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
@ -2759,6 +2760,96 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
||||||
|
struct SaveWasmParamsScope {
|
||||||
|
explicit SaveWasmParamsScope(MacroAssembler* masm) : masm(masm) {
|
||||||
|
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
||||||
|
gp_regs.set(gp_param_reg);
|
||||||
|
}
|
||||||
|
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
|
||||||
|
fp_regs.set(fp_param_reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
|
||||||
|
CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
||||||
|
CHECK_EQ(simd_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
||||||
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs + 1,
|
||||||
|
gp_regs.Count());
|
||||||
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs,
|
||||||
|
fp_regs.Count());
|
||||||
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs,
|
||||||
|
simd_regs.Count());
|
||||||
|
|
||||||
|
__ MultiPush(gp_regs);
|
||||||
|
__ MultiPushF64AndV128(fp_regs, simd_regs, ip, r0);
|
||||||
|
}
|
||||||
|
~SaveWasmParamsScope() {
|
||||||
|
__ MultiPopF64AndV128(fp_regs, simd_regs, ip, r0);
|
||||||
|
__ MultiPop(gp_regs);
|
||||||
|
}
|
||||||
|
|
||||||
|
RegList gp_regs;
|
||||||
|
DoubleRegList fp_regs;
|
||||||
|
// List must match register numbers under kFpParamRegisters.
|
||||||
|
Simd128RegList simd_regs = {v1, v2, v3, v4, v5, v6, v7, v8};
|
||||||
|
MacroAssembler* masm;
|
||||||
|
};
|
||||||
|
|
||||||
|
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||||
|
Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
|
||||||
|
Register vector = r11;
|
||||||
|
Register scratch = ip;
|
||||||
|
Label allocate_vector, done;
|
||||||
|
|
||||||
|
__ LoadTaggedPointerField(
|
||||||
|
vector,
|
||||||
|
FieldMemOperand(kWasmInstanceRegister,
|
||||||
|
WasmInstanceObject::kFeedbackVectorsOffset),
|
||||||
|
scratch);
|
||||||
|
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
||||||
|
__ AddS64(vector, vector, scratch);
|
||||||
|
__ LoadTaggedPointerField(
|
||||||
|
vector, FieldMemOperand(vector, FixedArray::kHeaderSize), scratch);
|
||||||
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
|
__ bind(&done);
|
||||||
|
__ push(kWasmInstanceRegister);
|
||||||
|
__ push(vector);
|
||||||
|
__ Ret();
|
||||||
|
|
||||||
|
__ bind(&allocate_vector);
|
||||||
|
|
||||||
|
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
|
||||||
|
// We temporarily change the frame type for this, because we need special
|
||||||
|
// handling by the stack walker in case of GC.
|
||||||
|
__ mov(scratch,
|
||||||
|
Operand(StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP)));
|
||||||
|
__ StoreU64(scratch, MemOperand(sp));
|
||||||
|
|
||||||
|
// Save current return address as it will get clobbered during CallRuntime.
|
||||||
|
__ mflr(scratch);
|
||||||
|
__ push(scratch);
|
||||||
|
{
|
||||||
|
SaveWasmParamsScope save_params(masm); // Will use r0 and ip as scratch.
|
||||||
|
// Arguments to the runtime function: instance, func_index.
|
||||||
|
__ push(kWasmInstanceRegister);
|
||||||
|
__ SmiTag(func_index);
|
||||||
|
__ push(func_index);
|
||||||
|
// Allocate a stack slot where the runtime function can spill a pointer
|
||||||
|
// to the {NativeModule}.
|
||||||
|
__ push(r11);
|
||||||
|
__ LoadSmiLiteral(cp, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
|
||||||
|
__ mr(vector, kReturnRegister0);
|
||||||
|
// Saved parameters are restored at the end of this block.
|
||||||
|
}
|
||||||
|
__ pop(scratch);
|
||||||
|
__ mtlr(scratch);
|
||||||
|
|
||||||
|
__ mov(scratch, Operand(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||||
|
__ StoreU64(scratch, MemOperand(sp));
|
||||||
|
__ b(&done);
|
||||||
|
}
|
||||||
|
|
||||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// The function index was put in a register by the jump table trampoline.
|
// The function index was put in a register by the jump table trampoline.
|
||||||
// Convert to Smi for the runtime call.
|
// Convert to Smi for the runtime call.
|
||||||
|
@ -2766,63 +2857,32 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
|
|
||||||
{
|
{
|
||||||
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
||||||
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
|
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
|
||||||
// Save all parameter registers (see wasm-linkage.h). They might be
|
{
|
||||||
// overwritten in the runtime call below. We don't have any callee-saved
|
SaveWasmParamsScope save_params(masm); // Will use r0 and ip as scratch.
|
||||||
// registers in wasm, so no need to store anything else.
|
|
||||||
RegList gp_regs;
|
|
||||||
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
|
||||||
gp_regs.set(gp_param_reg);
|
|
||||||
}
|
|
||||||
|
|
||||||
DoubleRegList fp_regs;
|
|
||||||
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
|
|
||||||
fp_regs.set(fp_param_reg);
|
|
||||||
}
|
|
||||||
|
|
||||||
// List must match register numbers under kFpParamRegisters.
|
|
||||||
constexpr Simd128RegList simd_regs = {v1, v2, v3, v4, v5, v6, v7, v8};
|
|
||||||
|
|
||||||
CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
|
|
||||||
CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
|
||||||
CHECK_EQ(simd_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs + 1,
|
|
||||||
gp_regs.Count());
|
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
|
|
||||||
fp_regs.Count());
|
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
|
|
||||||
simd_regs.Count());
|
|
||||||
|
|
||||||
__ MultiPush(gp_regs);
|
|
||||||
__ MultiPushF64AndV128(fp_regs, simd_regs, ip, r0);
|
|
||||||
|
|
||||||
// Push the Wasm instance as an explicit argument to the runtime function.
|
// Push the Wasm instance as an explicit argument to the runtime function.
|
||||||
__ Push(kWasmInstanceRegister);
|
__ push(kWasmInstanceRegister);
|
||||||
// Push the function index as second argument.
|
// Push the function index as second argument.
|
||||||
__ Push(kWasmCompileLazyFuncIndexRegister);
|
__ push(kWasmCompileLazyFuncIndexRegister);
|
||||||
// Allocate a stack slot for the NativeModule, the pushed value does not
|
|
||||||
// matter.
|
|
||||||
__ push(r11);
|
|
||||||
// Initialize the JavaScript context with 0. CEntry will use it to
|
// Initialize the JavaScript context with 0. CEntry will use it to
|
||||||
// set the current context on the isolate.
|
// set the current context on the isolate.
|
||||||
__ LoadSmiLiteral(cp, Smi::zero());
|
__ LoadSmiLiteral(cp, Smi::zero());
|
||||||
__ CallRuntime(Runtime::kWasmCompileLazy, 3);
|
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
|
||||||
// The runtime function returns the jump table slot offset as a Smi. Use
|
// The runtime function returns the jump table slot offset as a Smi. Use
|
||||||
// that to compute the jump target in r11.
|
// that to compute the jump target in r11.
|
||||||
__ SmiUntag(kReturnRegister0);
|
__ SmiUntag(kReturnRegister0);
|
||||||
__ mr(r11, kReturnRegister0);
|
__ mr(r11, kReturnRegister0);
|
||||||
|
|
||||||
// Restore registers.
|
// Saved parameters are restored at the end of this block.
|
||||||
__ MultiPopF64AndV128(fp_regs, simd_regs, ip, r0);
|
}
|
||||||
__ MultiPop(gp_regs);
|
|
||||||
|
|
||||||
// After the instance register has been restored, we can add the jump table
|
// After the instance register has been restored, we can add the jump table
|
||||||
// start to the jump table offset already stored in r8.
|
// start to the jump table offset already stored in r11.
|
||||||
__ LoadU64(
|
__ LoadU64(ip,
|
||||||
ip,
|
FieldMemOperand(kWasmInstanceRegister,
|
||||||
MemOperand(kWasmInstanceRegister,
|
WasmInstanceObject::kJumpTableStartOffset),
|
||||||
WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag),
|
|
||||||
r0);
|
r0);
|
||||||
__ AddS64(r11, r11, ip);
|
__ AddS64(r11, r11, ip);
|
||||||
}
|
}
|
||||||
|
|
|
@ -174,6 +174,12 @@ transitioning macro PromiseAllResolveElementClosure<F: type>(
|
||||||
const arrayMap =
|
const arrayMap =
|
||||||
*NativeContextSlot(
|
*NativeContextSlot(
|
||||||
nativeContext, ContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
|
nativeContext, ContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
|
||||||
|
|
||||||
|
// If resolve and reject handlers close over values to keep track of whether
|
||||||
|
// an input promise is already settled, mark the values array as COW before
|
||||||
|
// letting it escape to user code.
|
||||||
|
if (hasResolveAndRejectClosures) MakeFixedArrayCOW(values);
|
||||||
|
|
||||||
const valuesArray = NewJSArray(arrayMap, values);
|
const valuesArray = NewJSArray(arrayMap, values);
|
||||||
Call(promiseContext, resolve, Undefined, valuesArray);
|
Call(promiseContext, resolve, Undefined, valuesArray);
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,7 +138,8 @@ transitioning macro PerformPromiseAll<F1: type, F2: type>(
|
||||||
nativeContext: NativeContext, iter: iterator::IteratorRecord,
|
nativeContext: NativeContext, iter: iterator::IteratorRecord,
|
||||||
constructor: Constructor, capability: PromiseCapability,
|
constructor: Constructor, capability: PromiseCapability,
|
||||||
promiseResolveFunction: JSAny, createResolveElementFunctor: F1,
|
promiseResolveFunction: JSAny, createResolveElementFunctor: F1,
|
||||||
createRejectElementFunctor: F2): JSAny labels
|
createRejectElementFunctor: F2,
|
||||||
|
hasResolveAndRejectClosures: constexpr bool): JSAny labels
|
||||||
Reject(JSAny) {
|
Reject(JSAny) {
|
||||||
const promise = capability.promise;
|
const promise = capability.promise;
|
||||||
const resolve = capability.resolve;
|
const resolve = capability.resolve;
|
||||||
|
@ -308,6 +309,12 @@ Reject(JSAny) {
|
||||||
const arrayMap =
|
const arrayMap =
|
||||||
*NativeContextSlot(
|
*NativeContextSlot(
|
||||||
nativeContext, ContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
|
nativeContext, ContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
|
||||||
|
|
||||||
|
// If resolve and reject handlers close over values to keep track of
|
||||||
|
// whether an input promise is already settled, mark the values array as
|
||||||
|
// COW before letting it escape to user code.
|
||||||
|
if (hasResolveAndRejectClosures) MakeFixedArrayCOW(values);
|
||||||
|
|
||||||
const valuesArray = NewJSArray(arrayMap, values);
|
const valuesArray = NewJSArray(arrayMap, values);
|
||||||
Call(nativeContext, UnsafeCast<JSAny>(resolve), Undefined, valuesArray);
|
Call(nativeContext, UnsafeCast<JSAny>(resolve), Undefined, valuesArray);
|
||||||
}
|
}
|
||||||
|
@ -319,7 +326,8 @@ Reject(JSAny) {
|
||||||
transitioning macro GeneratePromiseAll<F1: type, F2: type>(
|
transitioning macro GeneratePromiseAll<F1: type, F2: type>(
|
||||||
implicit context: Context)(
|
implicit context: Context)(
|
||||||
receiver: JSAny, iterable: JSAny, createResolveElementFunctor: F1,
|
receiver: JSAny, iterable: JSAny, createResolveElementFunctor: F1,
|
||||||
createRejectElementFunctor: F2, message: constexpr string): JSAny {
|
createRejectElementFunctor: F2, message: constexpr string,
|
||||||
|
hasResolveAndRejectClosures: constexpr bool): JSAny {
|
||||||
const nativeContext = LoadNativeContext(context);
|
const nativeContext = LoadNativeContext(context);
|
||||||
// Let C be the this value.
|
// Let C be the this value.
|
||||||
// If Type(C) is not Object, throw a TypeError exception.
|
// If Type(C) is not Object, throw a TypeError exception.
|
||||||
|
@ -352,7 +360,8 @@ transitioning macro GeneratePromiseAll<F1: type, F2: type>(
|
||||||
// IfAbruptRejectPromise(result, promiseCapability).
|
// IfAbruptRejectPromise(result, promiseCapability).
|
||||||
return PerformPromiseAll(
|
return PerformPromiseAll(
|
||||||
nativeContext, i, constructor, capability, promiseResolveFunction,
|
nativeContext, i, constructor, capability, promiseResolveFunction,
|
||||||
createResolveElementFunctor, createRejectElementFunctor)
|
createResolveElementFunctor, createRejectElementFunctor,
|
||||||
|
hasResolveAndRejectClosures)
|
||||||
otherwise Reject;
|
otherwise Reject;
|
||||||
} catch (e, _message) deferred {
|
} catch (e, _message) deferred {
|
||||||
goto Reject(e);
|
goto Reject(e);
|
||||||
|
@ -368,7 +377,7 @@ transitioning javascript builtin PromiseAll(
|
||||||
js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
|
js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
|
||||||
return GeneratePromiseAll(
|
return GeneratePromiseAll(
|
||||||
receiver, iterable, PromiseAllResolveElementFunctor{},
|
receiver, iterable, PromiseAllResolveElementFunctor{},
|
||||||
PromiseAllRejectElementFunctor{}, 'Promise.all');
|
PromiseAllRejectElementFunctor{}, 'Promise.all', false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ES#sec-promise.allsettled
|
// ES#sec-promise.allsettled
|
||||||
|
@ -377,7 +386,7 @@ transitioning javascript builtin PromiseAllSettled(
|
||||||
js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
|
js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
|
||||||
return GeneratePromiseAll(
|
return GeneratePromiseAll(
|
||||||
receiver, iterable, PromiseAllSettledResolveElementFunctor{},
|
receiver, iterable, PromiseAllSettledResolveElementFunctor{},
|
||||||
PromiseAllSettledRejectElementFunctor{}, 'Promise.allSettled');
|
PromiseAllSettledRejectElementFunctor{}, 'Promise.allSettled', true);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern macro PromiseAllResolveElementSharedFunConstant(): SharedFunctionInfo;
|
extern macro PromiseAllResolveElementSharedFunConstant(): SharedFunctionInfo;
|
||||||
|
@ -385,4 +394,6 @@ extern macro PromiseAllSettledRejectElementSharedFunConstant():
|
||||||
SharedFunctionInfo;
|
SharedFunctionInfo;
|
||||||
extern macro PromiseAllSettledResolveElementSharedFunConstant():
|
extern macro PromiseAllSettledResolveElementSharedFunConstant():
|
||||||
SharedFunctionInfo;
|
SharedFunctionInfo;
|
||||||
|
|
||||||
|
extern macro MakeFixedArrayCOW(FixedArray): void;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,12 @@
|
||||||
#include "src/objects/objects-inl.h"
|
#include "src/objects/objects-inl.h"
|
||||||
#include "src/objects/smi.h"
|
#include "src/objects/smi.h"
|
||||||
#include "src/runtime/runtime.h"
|
#include "src/runtime/runtime.h"
|
||||||
|
|
||||||
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
#include "src/wasm/baseline/liftoff-assembler-defs.h"
|
||||||
#include "src/wasm/wasm-linkage.h"
|
#include "src/wasm/wasm-linkage.h"
|
||||||
#include "src/wasm/wasm-objects.h"
|
#include "src/wasm/wasm-objects.h"
|
||||||
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
@ -2625,54 +2629,115 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
// Compute register lists for parameters to be saved. We save all parameter
|
||||||
// The function index was put in t0 by the jump table trampoline.
|
// registers (see wasm-linkage.h). They might be overwritten in the runtime
|
||||||
// Convert to Smi for the runtime call
|
// call below. We don't have any callee-saved registers in wasm, so no need to
|
||||||
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
// store anything else.
|
||||||
|
constexpr RegList kSavedGpRegs = ([]() constexpr {
|
||||||
RegList kSavedGpRegs = ([]() constexpr {
|
|
||||||
RegList saved_gp_regs;
|
RegList saved_gp_regs;
|
||||||
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
||||||
saved_gp_regs.set(gp_param_reg);
|
saved_gp_regs.set(gp_param_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The instance has already been stored in the fixed part of the frame.
|
||||||
|
saved_gp_regs.clear(kWasmInstanceRegister);
|
||||||
// All set registers were unique.
|
// All set registers were unique.
|
||||||
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
|
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1);
|
||||||
// The Wasm instance must be part of the saved registers.
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs,
|
||||||
CHECK(saved_gp_regs.has(kWasmInstanceRegister));
|
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs + 1,
|
|
||||||
saved_gp_regs.Count());
|
saved_gp_regs.Count());
|
||||||
return saved_gp_regs;
|
return saved_gp_regs;
|
||||||
})();
|
})();
|
||||||
|
|
||||||
DoubleRegList kSavedFpRegs = ([]() constexpr {
|
constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
|
||||||
DoubleRegList saved_fp_regs;
|
DoubleRegList saved_fp_regs;
|
||||||
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
|
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
|
||||||
saved_fp_regs.set(fp_param_reg);
|
saved_fp_regs.set(fp_param_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs,
|
||||||
saved_fp_regs.Count());
|
saved_fp_regs.Count());
|
||||||
return saved_fp_regs;
|
return saved_fp_regs;
|
||||||
})();
|
})();
|
||||||
|
|
||||||
|
// When entering this builtin, we have just created a Wasm stack frame:
|
||||||
|
//
|
||||||
|
// [ Wasm instance ] <-- sp
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved fp ] <-- fp
|
||||||
|
//
|
||||||
|
// Add the feedback vector to the stack.
|
||||||
|
//
|
||||||
|
// [ feedback vector ] <-- sp
|
||||||
|
// [ Wasm instance ]
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved fp ] <-- fp
|
||||||
|
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||||
|
Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
|
||||||
|
Register vector = t1;
|
||||||
|
Register scratch = t2;
|
||||||
|
Label allocate_vector, done;
|
||||||
|
|
||||||
|
__ LoadWord(vector,
|
||||||
|
FieldMemOperand(kWasmInstanceRegister,
|
||||||
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
|
__ CalcScaledAddress(vector, vector, func_index, kTaggedSizeLog2);
|
||||||
|
__ LoadWord(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
|
__ bind(&done);
|
||||||
|
__ Push(vector);
|
||||||
|
__ Ret();
|
||||||
|
|
||||||
|
__ bind(&allocate_vector);
|
||||||
|
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
|
||||||
|
// We temporarily change the frame type for this, because we need special
|
||||||
|
// handling by the stack walker in case of GC.
|
||||||
|
__ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP));
|
||||||
|
__ StoreWord(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||||
|
|
||||||
|
// Save registers.
|
||||||
|
__ MultiPush(kSavedGpRegs);
|
||||||
|
__ MultiPushFPU(kSavedFpRegs);
|
||||||
|
__ Push(ra);
|
||||||
|
|
||||||
|
// Arguments to the runtime function: instance, func_index, and an
|
||||||
|
// additional stack slot for the NativeModule.
|
||||||
|
__ SmiTag(func_index);
|
||||||
|
__ Push(kWasmInstanceRegister, func_index, zero_reg);
|
||||||
|
__ Move(cp, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
|
||||||
|
__ mv(vector, kReturnRegister0);
|
||||||
|
|
||||||
|
// Restore registers and frame type.
|
||||||
|
__ Pop(ra);
|
||||||
|
__ MultiPopFPU(kSavedFpRegs);
|
||||||
|
__ MultiPop(kSavedGpRegs);
|
||||||
|
__ LoadWord(kWasmInstanceRegister,
|
||||||
|
MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset));
|
||||||
|
__ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM));
|
||||||
|
__ StoreWord(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||||
|
__ Branch(&done);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
|
// The function index was put in t0 by the jump table trampoline.
|
||||||
|
// Convert to Smi for the runtime call
|
||||||
|
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
|
||||||
|
|
||||||
{
|
{
|
||||||
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
||||||
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
|
||||||
|
// Save registers that we need to keep alive across the runtime call.
|
||||||
|
__ Push(kWasmInstanceRegister);
|
||||||
__ MultiPush(kSavedGpRegs);
|
__ MultiPush(kSavedGpRegs);
|
||||||
__ MultiPushFPU(kSavedFpRegs);
|
__ MultiPushFPU(kSavedFpRegs);
|
||||||
|
|
||||||
// Pass instance and function index as an explicit arguments to the runtime
|
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
|
||||||
// function.
|
|
||||||
// Also allocate a stack slot for the NativeModule, the pushed value does
|
|
||||||
// not matter.
|
|
||||||
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister, a0);
|
|
||||||
// Initialize the JavaScript context with 0. CEntry will use it to
|
// Initialize the JavaScript context with 0. CEntry will use it to
|
||||||
// set the current context on the isolate.
|
// set the current context on the isolate.
|
||||||
__ Move(kContextRegister, Smi::zero());
|
__ Move(kContextRegister, Smi::zero());
|
||||||
__ CallRuntime(Runtime::kWasmCompileLazy, 3);
|
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
|
||||||
|
|
||||||
__ SmiUntag(s1, a0); // move return value to s1 since a0 will be restored
|
__ SmiUntag(s1, a0); // move return value to s1 since a0 will be restored
|
||||||
// to the value before the call
|
// to the value before the call
|
||||||
|
@ -2681,14 +2746,14 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// Restore registers.
|
// Restore registers.
|
||||||
__ MultiPopFPU(kSavedFpRegs);
|
__ MultiPopFPU(kSavedFpRegs);
|
||||||
__ MultiPop(kSavedGpRegs);
|
__ MultiPop(kSavedGpRegs);
|
||||||
|
__ Pop(kWasmInstanceRegister);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The runtime function returned the jump table slot offset as a Smi (now in
|
// The runtime function returned the jump table slot offset as a Smi (now in
|
||||||
// x17). Use that to compute the jump target.
|
// x17). Use that to compute the jump target.
|
||||||
__ LoadWord(
|
__ LoadWord(kScratchReg,
|
||||||
kScratchReg,
|
FieldMemOperand(kWasmInstanceRegister,
|
||||||
MemOperand(kWasmInstanceRegister,
|
WasmInstanceObject::kJumpTableStartOffset));
|
||||||
WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
|
|
||||||
__ AddWord(s1, s1, Operand(kScratchReg));
|
__ AddWord(s1, s1, Operand(kScratchReg));
|
||||||
// Finally, jump to the entrypoint.
|
// Finally, jump to the entrypoint.
|
||||||
__ Jump(s1);
|
__ Jump(s1);
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "src/runtime/runtime.h"
|
#include "src/runtime/runtime.h"
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
#include "src/wasm/baseline/liftoff-assembler-defs.h"
|
||||||
#include "src/wasm/wasm-linkage.h"
|
#include "src/wasm/wasm-linkage.h"
|
||||||
#include "src/wasm/wasm-objects.h"
|
#include "src/wasm/wasm-objects.h"
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
@ -2767,6 +2768,87 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
||||||
|
struct SaveWasmParamsScope {
|
||||||
|
explicit SaveWasmParamsScope(MacroAssembler* masm) : masm(masm) {
|
||||||
|
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
||||||
|
gp_regs.set(gp_param_reg);
|
||||||
|
}
|
||||||
|
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
|
||||||
|
fp_regs.set(fp_param_reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
|
||||||
|
CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
||||||
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs + 1,
|
||||||
|
gp_regs.Count());
|
||||||
|
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs,
|
||||||
|
fp_regs.Count());
|
||||||
|
|
||||||
|
__ MultiPush(gp_regs);
|
||||||
|
__ MultiPushF64OrV128(fp_regs, r1);
|
||||||
|
}
|
||||||
|
~SaveWasmParamsScope() {
|
||||||
|
__ MultiPopF64OrV128(fp_regs, r1);
|
||||||
|
__ MultiPop(gp_regs);
|
||||||
|
}
|
||||||
|
|
||||||
|
RegList gp_regs;
|
||||||
|
DoubleRegList fp_regs;
|
||||||
|
MacroAssembler* masm;
|
||||||
|
};
|
||||||
|
|
||||||
|
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||||
|
Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
|
||||||
|
Register vector = ip;
|
||||||
|
Register scratch = r0;
|
||||||
|
Label allocate_vector, done;
|
||||||
|
|
||||||
|
__ LoadTaggedPointerField(
|
||||||
|
vector, FieldMemOperand(kWasmInstanceRegister,
|
||||||
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
|
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
||||||
|
__ AddS64(vector, vector, scratch);
|
||||||
|
__ LoadTaggedPointerField(vector,
|
||||||
|
FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
|
__ bind(&done);
|
||||||
|
__ push(kWasmInstanceRegister);
|
||||||
|
__ push(vector);
|
||||||
|
__ Ret();
|
||||||
|
|
||||||
|
__ bind(&allocate_vector);
|
||||||
|
|
||||||
|
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
|
||||||
|
// We temporarily change the frame type for this, because we need special
|
||||||
|
// handling by the stack walker in case of GC.
|
||||||
|
__ mov(scratch,
|
||||||
|
Operand(StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP)));
|
||||||
|
__ StoreU64(scratch, MemOperand(sp));
|
||||||
|
|
||||||
|
// Save current return address as it will get clobbered during CallRuntime.
|
||||||
|
__ push(r14);
|
||||||
|
{
|
||||||
|
SaveWasmParamsScope save_params(masm);
|
||||||
|
// Arguments to the runtime function: instance, func_index.
|
||||||
|
__ push(kWasmInstanceRegister);
|
||||||
|
__ SmiTag(func_index);
|
||||||
|
__ push(func_index);
|
||||||
|
// Allocate a stack slot where the runtime function can spill a pointer
|
||||||
|
// to the {NativeModule}.
|
||||||
|
__ push(r10);
|
||||||
|
__ LoadSmiLiteral(cp, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
|
||||||
|
__ mov(vector, kReturnRegister0);
|
||||||
|
// Saved parameters are restored at the end of this block.
|
||||||
|
}
|
||||||
|
__ pop(r14);
|
||||||
|
|
||||||
|
__ mov(scratch, Operand(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||||
|
__ StoreU64(scratch, MemOperand(sp));
|
||||||
|
__ b(&done);
|
||||||
|
}
|
||||||
|
|
||||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
// The function index was put in a register by the jump table trampoline.
|
// The function index was put in a register by the jump table trampoline.
|
||||||
// Convert to Smi for the runtime call.
|
// Convert to Smi for the runtime call.
|
||||||
|
@ -2774,56 +2856,31 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
|
|
||||||
{
|
{
|
||||||
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
||||||
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
|
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
|
||||||
// Save all parameter registers (see wasm-linkage.h). They might be
|
{
|
||||||
// overwritten in the runtime call below. We don't have any callee-saved
|
SaveWasmParamsScope save_params(masm);
|
||||||
// registers in wasm, so no need to store anything else.
|
|
||||||
RegList gp_regs;
|
|
||||||
for (Register gp_param_reg : wasm::kGpParamRegisters) {
|
|
||||||
gp_regs.set(gp_param_reg);
|
|
||||||
}
|
|
||||||
|
|
||||||
DoubleRegList fp_regs;
|
|
||||||
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
|
|
||||||
fp_regs.set(fp_param_reg);
|
|
||||||
}
|
|
||||||
|
|
||||||
CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
|
|
||||||
CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
|
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs + 1,
|
|
||||||
gp_regs.Count());
|
|
||||||
CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
|
|
||||||
fp_regs.Count());
|
|
||||||
|
|
||||||
__ MultiPush(gp_regs);
|
|
||||||
__ MultiPushF64OrV128(fp_regs, ip);
|
|
||||||
|
|
||||||
// Push the Wasm instance as an explicit argument to the runtime function.
|
// Push the Wasm instance as an explicit argument to the runtime function.
|
||||||
__ Push(kWasmInstanceRegister);
|
__ push(kWasmInstanceRegister);
|
||||||
// Push the function index as second argument.
|
// Push the function index as second argument.
|
||||||
__ Push(kWasmCompileLazyFuncIndexRegister);
|
__ push(kWasmCompileLazyFuncIndexRegister);
|
||||||
// Allocate a stack slot for the NativeModule, the pushed value does not
|
|
||||||
// matter.
|
|
||||||
__ push(ip);
|
|
||||||
// Initialize the JavaScript context with 0. CEntry will use it to
|
// Initialize the JavaScript context with 0. CEntry will use it to
|
||||||
// set the current context on the isolate.
|
// set the current context on the isolate.
|
||||||
__ LoadSmiLiteral(cp, Smi::zero());
|
__ LoadSmiLiteral(cp, Smi::zero());
|
||||||
__ CallRuntime(Runtime::kWasmCompileLazy, 3);
|
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
|
||||||
// The runtime function returns the jump table slot offset as a Smi. Use
|
// The runtime function returns the jump table slot offset as a Smi. Use
|
||||||
// that to compute the jump target in ip.
|
// that to compute the jump target in ip.
|
||||||
__ SmiUntag(kReturnRegister0);
|
__ SmiUntag(kReturnRegister0);
|
||||||
__ mov(ip, kReturnRegister0);
|
__ mov(ip, kReturnRegister0);
|
||||||
|
|
||||||
// Restore registers.
|
// Saved parameters are restored at the end of this block.
|
||||||
__ MultiPopF64OrV128(fp_regs, r1);
|
}
|
||||||
__ MultiPop(gp_regs);
|
|
||||||
|
|
||||||
// After the instance register has been restored, we can add the jump table
|
// After the instance register has been restored, we can add the jump table
|
||||||
// start to the jump table offset already stored in r8.
|
// start to the jump table offset already stored in r8.
|
||||||
__ LoadU64(r0, MemOperand(kWasmInstanceRegister,
|
__ LoadU64(r0, FieldMemOperand(kWasmInstanceRegister,
|
||||||
WasmInstanceObject::kJumpTableStartOffset -
|
WasmInstanceObject::kJumpTableStartOffset));
|
||||||
kHeapObjectTag));
|
|
||||||
__ AddS64(ip, ip, r0);
|
__ AddS64(ip, ip, r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -329,11 +329,18 @@ transitioning macro TypedArrayCreateByLength(implicit context: Context)(
|
||||||
|
|
||||||
// 2. Perform ? ValidateTypedArray(newTypedArray).
|
// 2. Perform ? ValidateTypedArray(newTypedArray).
|
||||||
// ValidateTypedArray currently returns the array, not the ViewBuffer.
|
// ValidateTypedArray currently returns the array, not the ViewBuffer.
|
||||||
|
const newTypedArrayLength =
|
||||||
|
ValidateTypedArrayAndGetLength(context, newTypedArrayObj, methodName);
|
||||||
const newTypedArray: JSTypedArray =
|
const newTypedArray: JSTypedArray =
|
||||||
ValidateTypedArray(context, newTypedArrayObj, methodName);
|
UnsafeCast<JSTypedArray>(newTypedArrayObj);
|
||||||
// TODO(v8:11111): bit_field should be initialized to 0.
|
|
||||||
newTypedArray.bit_field.is_length_tracking = false;
|
dcheck(
|
||||||
newTypedArray.bit_field.is_backed_by_rab = false;
|
newTypedArray.bit_field.is_backed_by_rab ==
|
||||||
|
(IsResizableArrayBuffer(newTypedArray.buffer) &&
|
||||||
|
!IsSharedArrayBuffer(newTypedArray.buffer)));
|
||||||
|
dcheck(
|
||||||
|
!newTypedArray.bit_field.is_length_tracking ||
|
||||||
|
IsResizableArrayBuffer(newTypedArray.buffer));
|
||||||
|
|
||||||
if (IsDetachedBuffer(newTypedArray.buffer)) deferred {
|
if (IsDetachedBuffer(newTypedArray.buffer)) deferred {
|
||||||
ThrowTypeError(MessageTemplate::kDetachedOperation, methodName);
|
ThrowTypeError(MessageTemplate::kDetachedOperation, methodName);
|
||||||
|
@ -342,7 +349,7 @@ transitioning macro TypedArrayCreateByLength(implicit context: Context)(
|
||||||
// 3. If argumentList is a List of a single Number, then
|
// 3. If argumentList is a List of a single Number, then
|
||||||
// a. If newTypedArray.[[ArrayLength]] < argumentList[0], throw a
|
// a. If newTypedArray.[[ArrayLength]] < argumentList[0], throw a
|
||||||
// TypeError exception.
|
// TypeError exception.
|
||||||
if (newTypedArray.length < Convert<uintptr>(length)) deferred {
|
if (newTypedArrayLength < Convert<uintptr>(length)) deferred {
|
||||||
ThrowTypeError(MessageTemplate::kTypedArrayTooShort);
|
ThrowTypeError(MessageTemplate::kTypedArrayTooShort);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -104,8 +104,9 @@ TypedArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(
|
||||||
finalSource = source;
|
finalSource = source;
|
||||||
}
|
}
|
||||||
case (sourceTypedArray: JSTypedArray): {
|
case (sourceTypedArray: JSTypedArray): {
|
||||||
const sourceBuffer = sourceTypedArray.buffer;
|
finalLength =
|
||||||
if (IsDetachedBuffer(sourceBuffer)) goto UseUserProvidedIterator;
|
LoadJSTypedArrayLengthAndCheckDetached(sourceTypedArray)
|
||||||
|
otherwise UseUserProvidedIterator;
|
||||||
|
|
||||||
// Check that the iterator function is exactly
|
// Check that the iterator function is exactly
|
||||||
// Builtin::kTypedArrayPrototypeValues.
|
// Builtin::kTypedArrayPrototypeValues.
|
||||||
|
@ -117,7 +118,6 @@ TypedArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(
|
||||||
// Source is a TypedArray with unmodified iterator behavior. Use the
|
// Source is a TypedArray with unmodified iterator behavior. Use the
|
||||||
// source object directly, taking advantage of the special-case code
|
// source object directly, taking advantage of the special-case code
|
||||||
// in TypedArrayCopyElements
|
// in TypedArrayCopyElements
|
||||||
finalLength = sourceTypedArray.length;
|
|
||||||
finalSource = source;
|
finalSource = source;
|
||||||
}
|
}
|
||||||
case (Object): {
|
case (Object): {
|
||||||
|
|
|
@ -484,30 +484,22 @@ builtin WasmAtomicNotify(offset: uintptr, count: uint32): uint32 {
|
||||||
return Unsigned(SmiToInt32(result));
|
return Unsigned(SmiToInt32(result));
|
||||||
}
|
}
|
||||||
|
|
||||||
builtin WasmI32AtomicWait64(
|
builtin WasmI32AtomicWait(
|
||||||
offset: uintptr, expectedValue: int32, timeout: intptr): uint32 {
|
offset: uintptr, expectedValue: int32, timeout: BigInt): uint32 {
|
||||||
if constexpr (Is64()) {
|
|
||||||
const instance: WasmInstanceObject = LoadInstanceFromFrame();
|
const instance: WasmInstanceObject = LoadInstanceFromFrame();
|
||||||
const result: Smi = runtime::WasmI32AtomicWait(
|
const result: Smi = runtime::WasmI32AtomicWait(
|
||||||
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
|
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
|
||||||
WasmInt32ToNumber(expectedValue), I64ToBigInt(timeout));
|
WasmInt32ToNumber(expectedValue), timeout);
|
||||||
return Unsigned(SmiToInt32(result));
|
return Unsigned(SmiToInt32(result));
|
||||||
} else {
|
|
||||||
unreachable;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
builtin WasmI64AtomicWait64(
|
builtin WasmI64AtomicWait(
|
||||||
offset: uintptr, expectedValue: intptr, timeout: intptr): uint32 {
|
offset: uintptr, expectedValue: BigInt, timeout: BigInt): uint32 {
|
||||||
if constexpr (Is64()) {
|
|
||||||
const instance: WasmInstanceObject = LoadInstanceFromFrame();
|
const instance: WasmInstanceObject = LoadInstanceFromFrame();
|
||||||
const result: Smi = runtime::WasmI64AtomicWait(
|
const result: Smi = runtime::WasmI64AtomicWait(
|
||||||
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
|
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
|
||||||
I64ToBigInt(expectedValue), I64ToBigInt(timeout));
|
expectedValue, timeout);
|
||||||
return Unsigned(SmiToInt32(result));
|
return Unsigned(SmiToInt32(result));
|
||||||
} else {
|
|
||||||
unreachable;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Type feedback collection support for `call_ref`.
|
// Type feedback collection support for `call_ref`.
|
||||||
|
|
|
@ -2909,27 +2909,19 @@ void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
|
||||||
// The function index was pushed to the stack by the caller as int32.
|
|
||||||
__ Pop(r15);
|
|
||||||
// Convert to Smi for the runtime call.
|
|
||||||
__ SmiTag(r15);
|
|
||||||
|
|
||||||
{
|
|
||||||
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
|
||||||
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
|
|
||||||
|
|
||||||
|
// Returns the offset beyond the last saved FP register.
|
||||||
|
int SaveWasmParams(MacroAssembler* masm) {
|
||||||
// Save all parameter registers (see wasm-linkage.h). They might be
|
// Save all parameter registers (see wasm-linkage.h). They might be
|
||||||
// overwritten in the runtime call below. We don't have any callee-saved
|
// overwritten in the subsequent runtime call. We don't have any callee-saved
|
||||||
// registers in wasm, so no need to store anything else.
|
// registers in wasm, so no need to store anything else.
|
||||||
static_assert(
|
static_assert(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs + 1 ==
|
||||||
WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs + 1 ==
|
|
||||||
arraysize(wasm::kGpParamRegisters),
|
arraysize(wasm::kGpParamRegisters),
|
||||||
"frame size mismatch");
|
"frame size mismatch");
|
||||||
for (Register reg : wasm::kGpParamRegisters) {
|
for (Register reg : wasm::kGpParamRegisters) {
|
||||||
__ Push(reg);
|
__ Push(reg);
|
||||||
}
|
}
|
||||||
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
|
static_assert(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs ==
|
||||||
arraysize(wasm::kFpParamRegisters),
|
arraysize(wasm::kFpParamRegisters),
|
||||||
"frame size mismatch");
|
"frame size mismatch");
|
||||||
__ AllocateStackSpace(kSimd128Size * arraysize(wasm::kFpParamRegisters));
|
__ AllocateStackSpace(kSimd128Size * arraysize(wasm::kFpParamRegisters));
|
||||||
|
@ -2938,25 +2930,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
__ movdqu(Operand(rsp, offset), reg);
|
__ movdqu(Operand(rsp, offset), reg);
|
||||||
offset += kSimd128Size;
|
offset += kSimd128Size;
|
||||||
}
|
}
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
// Push the Wasm instance as an explicit argument to the runtime function.
|
// Consumes the offset beyond the last saved FP register (as returned by
|
||||||
__ Push(kWasmInstanceRegister);
|
// {SaveWasmParams}).
|
||||||
// Push the function index as second argument.
|
void RestoreWasmParams(MacroAssembler* masm, int offset) {
|
||||||
__ Push(r15);
|
|
||||||
|
|
||||||
// Allocate a stack slot, where the runtime function can spill a pointer to
|
|
||||||
// the the NativeModule.
|
|
||||||
__ Push(rsp);
|
|
||||||
// Initialize the JavaScript context with 0. CEntry will use it to
|
|
||||||
// set the current context on the isolate.
|
|
||||||
__ Move(kContextRegister, Smi::zero());
|
|
||||||
__ CallRuntime(Runtime::kWasmCompileLazy, 3);
|
|
||||||
// The runtime function returns the jump table slot offset as a Smi. Use
|
|
||||||
// that to compute the jump target in r15.
|
|
||||||
__ SmiUntagUnsigned(kReturnRegister0);
|
|
||||||
__ movq(r15, kReturnRegister0);
|
|
||||||
|
|
||||||
// Restore registers.
|
|
||||||
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
|
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
|
||||||
offset -= kSimd128Size;
|
offset -= kSimd128Size;
|
||||||
__ movdqu(reg, Operand(rsp, offset));
|
__ movdqu(reg, Operand(rsp, offset));
|
||||||
|
@ -2966,6 +2945,102 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
|
for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
|
||||||
__ Pop(reg);
|
__ Pop(reg);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// When this builtin is called, the topmost stack entry is the calling pc.
|
||||||
|
// This is replaced with the following:
|
||||||
|
//
|
||||||
|
// [ calling pc ] <-- rsp; popped by {ret}.
|
||||||
|
// [ feedback vector ]
|
||||||
|
// [ Wasm instance ]
|
||||||
|
// [ WASM frame marker ]
|
||||||
|
// [ saved rbp ] <-- rbp; this is where "calling pc" used to be.
|
||||||
|
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
||||||
|
Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
|
||||||
|
Register vector = r15;
|
||||||
|
Register calling_pc = rdi;
|
||||||
|
|
||||||
|
__ Pop(calling_pc);
|
||||||
|
__ Push(rbp);
|
||||||
|
__ Move(rbp, rsp);
|
||||||
|
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||||
|
__ LoadTaggedPointerField(
|
||||||
|
vector, FieldOperand(kWasmInstanceRegister,
|
||||||
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
|
__ LoadTaggedPointerField(vector,
|
||||||
|
FieldOperand(vector, func_index, times_tagged_size,
|
||||||
|
FixedArray::kHeaderSize));
|
||||||
|
Label allocate_vector, done;
|
||||||
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
|
__ bind(&done);
|
||||||
|
__ Push(kWasmInstanceRegister);
|
||||||
|
__ Push(vector);
|
||||||
|
__ Push(calling_pc);
|
||||||
|
__ ret(0);
|
||||||
|
|
||||||
|
__ bind(&allocate_vector);
|
||||||
|
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
|
||||||
|
// We temporarily change the frame type for this, because we need special
|
||||||
|
// handling by the stack walker in case of GC.
|
||||||
|
// For the runtime call, we create the following stack layout:
|
||||||
|
//
|
||||||
|
// [ reserved slot for NativeModule ] <-- arg[2]
|
||||||
|
// [ ("declared") function index ] <-- arg[1] for runtime func.
|
||||||
|
// [ Wasm instance ] <-- arg[0]
|
||||||
|
// [ ...spilled Wasm parameters... ]
|
||||||
|
// [ calling pc ]
|
||||||
|
// [ WASM_LIFTOFF_SETUP marker ]
|
||||||
|
// [ saved rbp ]
|
||||||
|
__ movq(Operand(rbp, TypedFrameConstants::kFrameTypeOffset),
|
||||||
|
Immediate(StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP)));
|
||||||
|
__ set_has_frame(true);
|
||||||
|
__ Push(calling_pc);
|
||||||
|
int offset = SaveWasmParams(masm);
|
||||||
|
|
||||||
|
// Arguments to the runtime function: instance, func_index.
|
||||||
|
__ Push(kWasmInstanceRegister);
|
||||||
|
__ SmiTag(func_index);
|
||||||
|
__ Push(func_index);
|
||||||
|
// Allocate a stack slot where the runtime function can spill a pointer
|
||||||
|
// to the NativeModule.
|
||||||
|
__ Push(rsp);
|
||||||
|
__ Move(kContextRegister, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
|
||||||
|
__ movq(vector, kReturnRegister0);
|
||||||
|
|
||||||
|
RestoreWasmParams(masm, offset);
|
||||||
|
__ Pop(calling_pc);
|
||||||
|
// Restore correct frame type.
|
||||||
|
__ movq(Operand(rbp, TypedFrameConstants::kFrameTypeOffset),
|
||||||
|
Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||||
|
__ jmp(&done);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||||
|
// The function index was pushed to the stack by the caller as int32.
|
||||||
|
__ Pop(r15);
|
||||||
|
// Convert to Smi for the runtime call.
|
||||||
|
__ SmiTag(r15);
|
||||||
|
|
||||||
|
{
|
||||||
|
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
|
||||||
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
|
||||||
|
int offset = SaveWasmParams(masm);
|
||||||
|
|
||||||
|
// Push arguments for the runtime function.
|
||||||
|
__ Push(kWasmInstanceRegister);
|
||||||
|
__ Push(r15);
|
||||||
|
// Initialize the JavaScript context with 0. CEntry will use it to
|
||||||
|
// set the current context on the isolate.
|
||||||
|
__ Move(kContextRegister, Smi::zero());
|
||||||
|
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
|
||||||
|
// The runtime function returns the jump table slot offset as a Smi. Use
|
||||||
|
// that to compute the jump target in r15.
|
||||||
|
__ SmiUntagUnsigned(kReturnRegister0);
|
||||||
|
__ movq(r15, kReturnRegister0);
|
||||||
|
|
||||||
|
RestoreWasmParams(masm, offset);
|
||||||
// After the instance register has been restored, we can add the jump table
|
// After the instance register has been restored, we can add the jump table
|
||||||
// start to the jump table offset already stored in r15.
|
// start to the jump table offset already stored in r15.
|
||||||
__ addq(r15, MemOperand(kWasmInstanceRegister,
|
__ addq(r15, MemOperand(kWasmInstanceRegister,
|
||||||
|
@ -3076,7 +3151,7 @@ void RestoreAfterBuiltinCall(MacroAssembler* masm, Register function_data,
|
||||||
void SwitchStackState(MacroAssembler* masm, Register jmpbuf,
|
void SwitchStackState(MacroAssembler* masm, Register jmpbuf,
|
||||||
wasm::JumpBuffer::StackState old_state,
|
wasm::JumpBuffer::StackState old_state,
|
||||||
wasm::JumpBuffer::StackState new_state) {
|
wasm::JumpBuffer::StackState new_state) {
|
||||||
if (FLAG_debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
__ cmpl(MemOperand(jmpbuf, wasm::kJmpBufStateOffset), Immediate(old_state));
|
__ cmpl(MemOperand(jmpbuf, wasm::kJmpBufStateOffset), Immediate(old_state));
|
||||||
Label ok;
|
Label ok;
|
||||||
__ j(equal, &ok, Label::kNear);
|
__ j(equal, &ok, Label::kNear);
|
||||||
|
|
|
@ -7,5 +7,6 @@ jkummerow@chromium.org
|
||||||
leszeks@chromium.org
|
leszeks@chromium.org
|
||||||
mslekova@chromium.org
|
mslekova@chromium.org
|
||||||
nicohartmann@chromium.org
|
nicohartmann@chromium.org
|
||||||
|
tebbi@chromium.org
|
||||||
|
|
||||||
per-file compiler.*=marja@chromium.org
|
per-file compiler.*=marja@chromium.org
|
||||||
|
|
|
@ -1635,13 +1635,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
|
||||||
DCHECK_EQ(actual_parameter_count, r0);
|
DCHECK_EQ(actual_parameter_count, r0);
|
||||||
DCHECK_EQ(expected_parameter_count, r2);
|
DCHECK_EQ(expected_parameter_count, r2);
|
||||||
|
|
||||||
// If the expected parameter count is equal to the adaptor sentinel, no need
|
|
||||||
// to push undefined value as arguments.
|
|
||||||
if (kDontAdaptArgumentsSentinel != 0) {
|
|
||||||
cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
|
|
||||||
b(eq, ®ular_invoke);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If overapplication or if the actual argument count is equal to the
|
// If overapplication or if the actual argument count is equal to the
|
||||||
// formal parameter count, no need to push extra undefined values.
|
// formal parameter count, no need to push extra undefined values.
|
||||||
sub(expected_parameter_count, expected_parameter_count,
|
sub(expected_parameter_count, expected_parameter_count,
|
||||||
|
|
|
@ -2348,12 +2348,10 @@ void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
|
||||||
Register destination, Register code_data_container_object) {
|
Register destination, Register code_data_container_object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
|
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
|
||||||
// Given the fields layout we can read the Code reference as a full word.
|
// Compute the Code object pointer from the code entry point.
|
||||||
static_assert(!V8_EXTERNAL_CODE_SPACE_BOOL ||
|
|
||||||
(CodeDataContainer::kCodeCageBaseUpper32BitsOffset ==
|
|
||||||
CodeDataContainer::kCodeOffset + kTaggedSize));
|
|
||||||
Ldr(destination, FieldMemOperand(code_data_container_object,
|
Ldr(destination, FieldMemOperand(code_data_container_object,
|
||||||
CodeDataContainer::kCodeOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
|
Sub(destination, destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallCodeDataContainerObject(
|
void TurboAssembler::CallCodeDataContainerObject(
|
||||||
|
@ -2506,13 +2504,6 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
|
||||||
DCHECK_EQ(actual_argument_count, x0);
|
DCHECK_EQ(actual_argument_count, x0);
|
||||||
DCHECK_EQ(formal_parameter_count, x2);
|
DCHECK_EQ(formal_parameter_count, x2);
|
||||||
|
|
||||||
// If the formal parameter count is equal to the adaptor sentinel, no need
|
|
||||||
// to push undefined value as arguments.
|
|
||||||
if (kDontAdaptArgumentsSentinel != 0) {
|
|
||||||
Cmp(formal_parameter_count, Operand(kDontAdaptArgumentsSentinel));
|
|
||||||
B(eq, ®ular_invoke);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If overapplication or if the actual argument count is equal to the
|
// If overapplication or if the actual argument count is equal to the
|
||||||
// formal parameter count, no need to push extra undefined values.
|
// formal parameter count, no need to push extra undefined values.
|
||||||
Register extra_argument_count = x2;
|
Register extra_argument_count = x2;
|
||||||
|
@ -2860,7 +2851,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||||
fourth_reg = cp;
|
fourth_reg = cp;
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
} else if (type == StackFrame::WASM ||
|
} else if (type == StackFrame::WASM ||
|
||||||
type == StackFrame::WASM_COMPILE_LAZY ||
|
type == StackFrame::WASM_LIFTOFF_SETUP ||
|
||||||
type == StackFrame::WASM_EXIT) {
|
type == StackFrame::WASM_EXIT) {
|
||||||
fourth_reg = kWasmInstanceRegister;
|
fourth_reg = kWasmInstanceRegister;
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
|
@ -73,13 +73,13 @@ class V8_EXPORT_PRIVATE BackgroundMergeTask {
|
||||||
std::vector<Handle<SharedFunctionInfo>> used_new_sfis_;
|
std::vector<Handle<SharedFunctionInfo>> used_new_sfis_;
|
||||||
|
|
||||||
// SharedFunctionInfos from the cached script which were not compiled, with
|
// SharedFunctionInfos from the cached script which were not compiled, with
|
||||||
// function_data and feedback_metadata from the corresponding new
|
// the corresponding new SharedFunctionInfo. If the SharedFunctionInfo from
|
||||||
// SharedFunctionInfo. If the SharedFunctionInfo from the cached script is
|
// the cached script is still uncompiled when finishing, the main thread must
|
||||||
// still uncompiled when finishing, the main thread must set the two fields.
|
// copy all fields from the new SharedFunctionInfo to the SharedFunctionInfo
|
||||||
|
// from the cached script.
|
||||||
struct NewCompiledDataForCachedSfi {
|
struct NewCompiledDataForCachedSfi {
|
||||||
Handle<SharedFunctionInfo> cached_sfi;
|
Handle<SharedFunctionInfo> cached_sfi;
|
||||||
Handle<Object> function_data;
|
Handle<SharedFunctionInfo> new_sfi;
|
||||||
Handle<FeedbackMetadata> feedback_metadata;
|
|
||||||
};
|
};
|
||||||
std::vector<NewCompiledDataForCachedSfi> new_compiled_data_for_cached_sfis_;
|
std::vector<NewCompiledDataForCachedSfi> new_compiled_data_for_cached_sfis_;
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,8 @@
|
||||||
|
|
||||||
#include "src/codegen/code-stub-assembler.h"
|
#include "src/codegen/code-stub-assembler.h"
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
#include "include/v8-internal.h"
|
#include "include/v8-internal.h"
|
||||||
|
@ -845,6 +847,20 @@ TNode<IntPtrT> CodeStubAssembler::TryIntPtrMul(TNode<IntPtrT> a,
|
||||||
return Projection<0>(pair);
|
return Projection<0>(pair);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TNode<IntPtrT> CodeStubAssembler::TryIntPtrDiv(TNode<IntPtrT> a,
|
||||||
|
TNode<IntPtrT> b,
|
||||||
|
Label* if_div_zero) {
|
||||||
|
GotoIf(IntPtrEqual(b, IntPtrConstant(0)), if_div_zero);
|
||||||
|
return IntPtrDiv(a, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
TNode<IntPtrT> CodeStubAssembler::TryIntPtrMod(TNode<IntPtrT> a,
|
||||||
|
TNode<IntPtrT> b,
|
||||||
|
Label* if_div_zero) {
|
||||||
|
GotoIf(IntPtrEqual(b, IntPtrConstant(0)), if_div_zero);
|
||||||
|
return IntPtrMod(a, b);
|
||||||
|
}
|
||||||
|
|
||||||
TNode<Int32T> CodeStubAssembler::TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
|
TNode<Int32T> CodeStubAssembler::TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
|
||||||
Label* if_overflow) {
|
Label* if_overflow) {
|
||||||
TNode<PairT<Int32T, BoolT>> pair = Int32MulWithOverflow(a, b);
|
TNode<PairT<Int32T, BoolT>> pair = Int32MulWithOverflow(a, b);
|
||||||
|
@ -3025,7 +3041,8 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
|
||||||
Int32Constant(static_cast<int>(CodeKind::BASELINE))));
|
Int32Constant(static_cast<int>(CodeKind::BASELINE))));
|
||||||
#endif // DEBUG
|
#endif // DEBUG
|
||||||
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
|
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
|
||||||
FromCodeT(code), Code::kDeoptimizationDataOrInterpreterDataOffset);
|
FromCodeTNonBuiltin(code),
|
||||||
|
Code::kDeoptimizationDataOrInterpreterDataOffset);
|
||||||
var_result = baseline_data;
|
var_result = baseline_data;
|
||||||
}
|
}
|
||||||
Goto(&check_for_interpreter_data);
|
Goto(&check_for_interpreter_data);
|
||||||
|
@ -3090,9 +3107,14 @@ void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier(
|
||||||
object, offset, value);
|
object, offset, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeStubAssembler::StoreJSSharedStructInObjectField(
|
void CodeStubAssembler::StoreSharedObjectField(TNode<HeapObject> object,
|
||||||
TNode<HeapObject> object, TNode<IntPtrT> offset, TNode<Object> value) {
|
TNode<IntPtrT> offset,
|
||||||
CSA_DCHECK(this, IsJSSharedStruct(object));
|
TNode<Object> value) {
|
||||||
|
CSA_DCHECK(
|
||||||
|
this,
|
||||||
|
WordNotEqual(WordAnd(LoadBasicMemoryChunkFlags(object),
|
||||||
|
IntPtrConstant(BasicMemoryChunk::IN_SHARED_HEAP)),
|
||||||
|
IntPtrConstant(0)));
|
||||||
// JSSharedStructs are allocated in the shared old space, which is currently
|
// JSSharedStructs are allocated in the shared old space, which is currently
|
||||||
// collected by stopping the world, so the incremental write barrier is not
|
// collected by stopping the world, so the incremental write barrier is not
|
||||||
// needed. They can only store Smis and other HeapObjects in the shared old
|
// needed. They can only store Smis and other HeapObjects in the shared old
|
||||||
|
@ -15292,26 +15314,46 @@ TorqueStructArguments CodeStubAssembler::GetFrameArguments(
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeStubAssembler::Print(const char* s) {
|
void CodeStubAssembler::Print(const char* s) {
|
||||||
|
PrintToStream(s, fileno(stdout));
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeStubAssembler::PrintErr(const char* s) {
|
||||||
|
PrintToStream(s, fileno(stderr));
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeStubAssembler::PrintToStream(const char* s, int stream) {
|
||||||
std::string formatted(s);
|
std::string formatted(s);
|
||||||
formatted += "\n";
|
formatted += "\n";
|
||||||
CallRuntime(Runtime::kGlobalPrint, NoContextConstant(),
|
CallRuntime(Runtime::kGlobalPrint, NoContextConstant(),
|
||||||
StringConstant(formatted.c_str()));
|
StringConstant(formatted.c_str()), SmiConstant(stream));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeStubAssembler::Print(const char* prefix,
|
void CodeStubAssembler::Print(const char* prefix,
|
||||||
TNode<MaybeObject> tagged_value) {
|
TNode<MaybeObject> tagged_value) {
|
||||||
|
PrintToStream(prefix, tagged_value, fileno(stdout));
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeStubAssembler::PrintErr(const char* prefix,
|
||||||
|
TNode<MaybeObject> tagged_value) {
|
||||||
|
PrintToStream(prefix, tagged_value, fileno(stderr));
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeStubAssembler::PrintToStream(const char* prefix,
|
||||||
|
TNode<MaybeObject> tagged_value,
|
||||||
|
int stream) {
|
||||||
if (prefix != nullptr) {
|
if (prefix != nullptr) {
|
||||||
std::string formatted(prefix);
|
std::string formatted(prefix);
|
||||||
formatted += ": ";
|
formatted += ": ";
|
||||||
Handle<String> string = isolate()->factory()->NewStringFromAsciiChecked(
|
Handle<String> string = isolate()->factory()->NewStringFromAsciiChecked(
|
||||||
formatted.c_str(), AllocationType::kOld);
|
formatted.c_str(), AllocationType::kOld);
|
||||||
CallRuntime(Runtime::kGlobalPrint, NoContextConstant(),
|
CallRuntime(Runtime::kGlobalPrint, NoContextConstant(),
|
||||||
HeapConstant(string));
|
HeapConstant(string), SmiConstant(stream));
|
||||||
}
|
}
|
||||||
// CallRuntime only accepts Objects, so do an UncheckedCast to object.
|
// CallRuntime only accepts Objects, so do an UncheckedCast to object.
|
||||||
// DebugPrint explicitly checks whether the tagged value is a MaybeObject.
|
// DebugPrint explicitly checks whether the tagged value is a MaybeObject.
|
||||||
TNode<Object> arg = UncheckedCast<Object>(tagged_value);
|
TNode<Object> arg = UncheckedCast<Object>(tagged_value);
|
||||||
CallRuntime(Runtime::kDebugPrint, NoContextConstant(), arg);
|
CallRuntime(Runtime::kDebugPrint, NoContextConstant(), arg,
|
||||||
|
SmiConstant(stream));
|
||||||
}
|
}
|
||||||
|
|
||||||
IntegerLiteral CodeStubAssembler::ConstexprIntegerLiteralAdd(
|
IntegerLiteral CodeStubAssembler::ConstexprIntegerLiteralAdd(
|
||||||
|
|
|
@ -630,6 +630,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||||
Label* if_overflow);
|
Label* if_overflow);
|
||||||
TNode<IntPtrT> TryIntPtrMul(TNode<IntPtrT> a, TNode<IntPtrT> b,
|
TNode<IntPtrT> TryIntPtrMul(TNode<IntPtrT> a, TNode<IntPtrT> b,
|
||||||
Label* if_overflow);
|
Label* if_overflow);
|
||||||
|
TNode<IntPtrT> TryIntPtrDiv(TNode<IntPtrT> a, TNode<IntPtrT> b,
|
||||||
|
Label* if_div_zero);
|
||||||
|
TNode<IntPtrT> TryIntPtrMod(TNode<IntPtrT> a, TNode<IntPtrT> b,
|
||||||
|
Label* if_div_zero);
|
||||||
TNode<Int32T> TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
|
TNode<Int32T> TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
|
||||||
Label* if_overflow);
|
Label* if_overflow);
|
||||||
TNode<Smi> TrySmiAdd(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
|
TNode<Smi> TrySmiAdd(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
|
||||||
|
@ -838,16 +842,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||||
|
|
||||||
// TODO(v8:11880): remove once Code::bytecode_or_interpreter_data field
|
// TODO(v8:11880): remove once Code::bytecode_or_interpreter_data field
|
||||||
// is cached in or moved to CodeT.
|
// is cached in or moved to CodeT.
|
||||||
TNode<Code> FromCodeT(TNode<CodeT> code) {
|
TNode<Code> FromCodeTNonBuiltin(TNode<CodeT> code) {
|
||||||
#ifdef V8_EXTERNAL_CODE_SPACE
|
#ifdef V8_EXTERNAL_CODE_SPACE
|
||||||
#if V8_TARGET_BIG_ENDIAN
|
// Compute the Code object pointer from the code entry point.
|
||||||
#error "This code requires updating for big-endian architectures"
|
TNode<RawPtrT> code_entry = Load<RawPtrT>(
|
||||||
#endif
|
code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset -
|
||||||
// Given the fields layout we can read the Code reference as a full word.
|
kHeapObjectTag));
|
||||||
static_assert(CodeDataContainer::kCodeCageBaseUpper32BitsOffset ==
|
TNode<Object> o = BitcastWordToTagged(IntPtrSub(
|
||||||
CodeDataContainer::kCodeOffset + kTaggedSize);
|
code_entry, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
|
||||||
TNode<Object> o = BitcastWordToTagged(Load<RawPtrT>(
|
|
||||||
code, IntPtrConstant(CodeDataContainer::kCodeOffset - kHeapObjectTag)));
|
|
||||||
return CAST(o);
|
return CAST(o);
|
||||||
#else
|
#else
|
||||||
return code;
|
return code;
|
||||||
|
@ -1856,8 +1858,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||||
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
|
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
|
||||||
int additional_offset = 0);
|
int additional_offset = 0);
|
||||||
|
|
||||||
void StoreJSSharedStructInObjectField(TNode<HeapObject> object,
|
void StoreSharedObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset,
|
||||||
TNode<IntPtrT> offset,
|
|
||||||
TNode<Object> value);
|
TNode<Object> value);
|
||||||
|
|
||||||
void StoreJSSharedStructPropertyArrayElement(TNode<PropertyArray> array,
|
void StoreJSSharedStructPropertyArrayElement(TNode<PropertyArray> array,
|
||||||
|
@ -3931,6 +3932,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||||
void Print(TNode<MaybeObject> tagged_value) {
|
void Print(TNode<MaybeObject> tagged_value) {
|
||||||
return Print(nullptr, tagged_value);
|
return Print(nullptr, tagged_value);
|
||||||
}
|
}
|
||||||
|
void PrintErr(const char* s);
|
||||||
|
void PrintErr(const char* prefix, TNode<MaybeObject> tagged_value);
|
||||||
|
void PrintErr(TNode<MaybeObject> tagged_value) {
|
||||||
|
return PrintErr(nullptr, tagged_value);
|
||||||
|
}
|
||||||
|
void PrintToStream(const char* s, int stream);
|
||||||
|
void PrintToStream(const char* prefix, TNode<MaybeObject> tagged_value,
|
||||||
|
int stream);
|
||||||
|
|
||||||
template <class... TArgs>
|
template <class... TArgs>
|
||||||
TNode<HeapObject> MakeTypeError(MessageTemplate message,
|
TNode<HeapObject> MakeTypeError(MessageTemplate message,
|
||||||
|
|
|
@ -1284,12 +1284,6 @@ MaybeHandle<CodeT> GetOrCompileOptimized(
|
||||||
// turbo_filter.
|
// turbo_filter.
|
||||||
if (!ShouldOptimize(code_kind, shared)) return {};
|
if (!ShouldOptimize(code_kind, shared)) return {};
|
||||||
|
|
||||||
// If code was pending optimization for testing, remove the entry from the
|
|
||||||
// table that was preventing the bytecode from being flushed.
|
|
||||||
if (V8_UNLIKELY(v8_flags.testing_d8_test_runner)) {
|
|
||||||
PendingOptimizationTable::FunctionWasOptimized(isolate, function);
|
|
||||||
}
|
|
||||||
|
|
||||||
Handle<CodeT> cached_code;
|
Handle<CodeT> cached_code;
|
||||||
if (OptimizedCodeCache::Get(isolate, function, osr_offset, code_kind)
|
if (OptimizedCodeCache::Get(isolate, function, osr_offset, code_kind)
|
||||||
.ToHandle(&cached_code)) {
|
.ToHandle(&cached_code)) {
|
||||||
|
@ -2045,12 +2039,9 @@ void BackgroundMergeTask::BeginMergeInBackground(LocalIsolate* isolate,
|
||||||
old_sfi.GetBytecodeArray(isolate).set_bytecode_age(0);
|
old_sfi.GetBytecodeArray(isolate).set_bytecode_age(0);
|
||||||
} else {
|
} else {
|
||||||
// The old SFI can use the compiled data from the new SFI.
|
// The old SFI can use the compiled data from the new SFI.
|
||||||
Object function_data = new_sfi.function_data(kAcquireLoad);
|
|
||||||
FeedbackMetadata feedback_metadata = new_sfi.feedback_metadata();
|
|
||||||
new_compiled_data_for_cached_sfis_.push_back(
|
new_compiled_data_for_cached_sfis_.push_back(
|
||||||
{local_heap->NewPersistentHandle(old_sfi),
|
{local_heap->NewPersistentHandle(old_sfi),
|
||||||
local_heap->NewPersistentHandle(function_data),
|
local_heap->NewPersistentHandle(new_sfi)});
|
||||||
local_heap->NewPersistentHandle(feedback_metadata)});
|
|
||||||
forwarder.AddBytecodeArray(new_sfi.GetBytecodeArray(isolate));
|
forwarder.AddBytecodeArray(new_sfi.GetBytecodeArray(isolate));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2087,11 +2078,19 @@ Handle<SharedFunctionInfo> BackgroundMergeTask::CompleteMergeInForeground(
|
||||||
Handle<Script> old_script = cached_script_.ToHandleChecked();
|
Handle<Script> old_script = cached_script_.ToHandleChecked();
|
||||||
|
|
||||||
for (const auto& new_compiled_data : new_compiled_data_for_cached_sfis_) {
|
for (const auto& new_compiled_data : new_compiled_data_for_cached_sfis_) {
|
||||||
if (!new_compiled_data.cached_sfi->is_compiled()) {
|
if (!new_compiled_data.cached_sfi->is_compiled() &&
|
||||||
new_compiled_data.cached_sfi->set_function_data(
|
new_compiled_data.new_sfi->is_compiled()) {
|
||||||
*new_compiled_data.function_data, kReleaseStore);
|
// Updating existing DebugInfos is not supported, but we don't expect
|
||||||
new_compiled_data.cached_sfi->set_feedback_metadata(
|
// uncompiled SharedFunctionInfos to contain DebugInfos.
|
||||||
*new_compiled_data.feedback_metadata, kReleaseStore);
|
DCHECK(!new_compiled_data.cached_sfi->HasDebugInfo());
|
||||||
|
// The goal here is to copy every field except script_or_debug_info from
|
||||||
|
// new_sfi to cached_sfi. The safest way to do so (including a DCHECK that
|
||||||
|
// no fields were skipped) is to first copy the script_or_debug_info from
|
||||||
|
// cached_sfi to new_sfi, and then copy every field using CopyFrom.
|
||||||
|
new_compiled_data.new_sfi->set_script_or_debug_info(
|
||||||
|
new_compiled_data.cached_sfi->script_or_debug_info(kAcquireLoad),
|
||||||
|
kReleaseStore);
|
||||||
|
new_compiled_data.cached_sfi->CopyFrom(*new_compiled_data.new_sfi);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (Handle<SharedFunctionInfo> new_sfi : used_new_sfis_) {
|
for (Handle<SharedFunctionInfo> new_sfi : used_new_sfis_) {
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "src/logging/log.h"
|
#include "src/logging/log.h"
|
||||||
#include "src/numbers/hash-seed-inl.h"
|
#include "src/numbers/hash-seed-inl.h"
|
||||||
#include "src/numbers/math-random.h"
|
#include "src/numbers/math-random.h"
|
||||||
|
#include "src/objects/elements-kind.h"
|
||||||
#include "src/objects/elements.h"
|
#include "src/objects/elements.h"
|
||||||
#include "src/objects/object-type.h"
|
#include "src/objects/object-type.h"
|
||||||
#include "src/objects/objects-inl.h"
|
#include "src/objects/objects-inl.h"
|
||||||
|
@ -454,8 +455,8 @@ IF_WASM(FUNCTION_REFERENCE, wasm_float64_pow, wasm::float64_pow_wrapper)
|
||||||
IF_WASM(FUNCTION_REFERENCE, wasm_call_trap_callback_for_testing,
|
IF_WASM(FUNCTION_REFERENCE, wasm_call_trap_callback_for_testing,
|
||||||
wasm::call_trap_callback_for_testing)
|
wasm::call_trap_callback_for_testing)
|
||||||
IF_WASM(FUNCTION_REFERENCE, wasm_array_copy, wasm::array_copy_wrapper)
|
IF_WASM(FUNCTION_REFERENCE, wasm_array_copy, wasm::array_copy_wrapper)
|
||||||
IF_WASM(FUNCTION_REFERENCE, wasm_array_fill_with_zeroes,
|
IF_WASM(FUNCTION_REFERENCE, wasm_array_fill_with_number_or_null,
|
||||||
wasm::array_fill_with_zeroes_wrapper)
|
wasm::array_fill_with_number_or_null_wrapper)
|
||||||
|
|
||||||
static void f64_acos_wrapper(Address data) {
|
static void f64_acos_wrapper(Address data) {
|
||||||
double input = ReadUnalignedValue<double>(data);
|
double input = ReadUnalignedValue<double>(data);
|
||||||
|
@ -587,7 +588,7 @@ ExternalReference ExternalReference::address_of_log_or_trace_osr() {
|
||||||
|
|
||||||
ExternalReference
|
ExternalReference
|
||||||
ExternalReference::address_of_FLAG_harmony_symbol_as_weakmap_key() {
|
ExternalReference::address_of_FLAG_harmony_symbol_as_weakmap_key() {
|
||||||
return ExternalReference(&FLAG_harmony_symbol_as_weakmap_key);
|
return ExternalReference(&v8_flags.harmony_symbol_as_weakmap_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
ExternalReference ExternalReference::address_of_builtin_subclassing_flag() {
|
ExternalReference ExternalReference::address_of_builtin_subclassing_flag() {
|
||||||
|
@ -950,6 +951,20 @@ ExternalReference ExternalReference::search_string_raw_two_two() {
|
||||||
return search_string_raw<const base::uc16, const base::uc16>();
|
return search_string_raw<const base::uc16, const base::uc16>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ExternalReference
|
||||||
|
ExternalReference::typed_array_and_rab_gsab_typed_array_elements_kind_shifts() {
|
||||||
|
uint8_t* ptr =
|
||||||
|
const_cast<uint8_t*>(TypedArrayAndRabGsabTypedArrayElementsKindShifts());
|
||||||
|
return ExternalReference(reinterpret_cast<Address>(ptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
ExternalReference
|
||||||
|
ExternalReference::typed_array_and_rab_gsab_typed_array_elements_kind_sizes() {
|
||||||
|
uint8_t* ptr =
|
||||||
|
const_cast<uint8_t*>(TypedArrayAndRabGsabTypedArrayElementsKindSizes());
|
||||||
|
return ExternalReference(reinterpret_cast<Address>(ptr));
|
||||||
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
void StringWriteToFlatOneByte(Address source, uint8_t* sink, int32_t start,
|
void StringWriteToFlatOneByte(Address source, uint8_t* sink, int32_t start,
|
||||||
|
@ -1103,6 +1118,9 @@ FUNCTION_REFERENCE(mutable_big_int_absolute_mul_and_canonicalize_function,
|
||||||
FUNCTION_REFERENCE(mutable_big_int_absolute_div_and_canonicalize_function,
|
FUNCTION_REFERENCE(mutable_big_int_absolute_div_and_canonicalize_function,
|
||||||
MutableBigInt_AbsoluteDivAndCanonicalize)
|
MutableBigInt_AbsoluteDivAndCanonicalize)
|
||||||
|
|
||||||
|
FUNCTION_REFERENCE(mutable_big_int_absolute_mod_and_canonicalize_function,
|
||||||
|
MutableBigInt_AbsoluteModAndCanonicalize)
|
||||||
|
|
||||||
FUNCTION_REFERENCE(mutable_big_int_bitwise_and_pp_and_canonicalize_function,
|
FUNCTION_REFERENCE(mutable_big_int_bitwise_and_pp_and_canonicalize_function,
|
||||||
MutableBigInt_BitwiseAndPosPosAndCanonicalize)
|
MutableBigInt_BitwiseAndPosPosAndCanonicalize)
|
||||||
|
|
||||||
|
|
|
@ -184,6 +184,8 @@ class StatsCounter;
|
||||||
"MutableBigInt_AbsoluteMulAndCanonicalize") \
|
"MutableBigInt_AbsoluteMulAndCanonicalize") \
|
||||||
V(mutable_big_int_absolute_div_and_canonicalize_function, \
|
V(mutable_big_int_absolute_div_and_canonicalize_function, \
|
||||||
"MutableBigInt_AbsoluteDivAndCanonicalize") \
|
"MutableBigInt_AbsoluteDivAndCanonicalize") \
|
||||||
|
V(mutable_big_int_absolute_mod_and_canonicalize_function, \
|
||||||
|
"MutableBigInt_AbsoluteModAndCanonicalize") \
|
||||||
V(mutable_big_int_bitwise_and_pp_and_canonicalize_function, \
|
V(mutable_big_int_bitwise_and_pp_and_canonicalize_function, \
|
||||||
"MutableBigInt_BitwiseAndPosPosAndCanonicalize") \
|
"MutableBigInt_BitwiseAndPosPosAndCanonicalize") \
|
||||||
V(mutable_big_int_bitwise_and_nn_and_canonicalize_function, \
|
V(mutable_big_int_bitwise_and_nn_and_canonicalize_function, \
|
||||||
|
@ -272,7 +274,8 @@ class StatsCounter;
|
||||||
IF_WASM(V, wasm_memory_copy, "wasm::memory_copy") \
|
IF_WASM(V, wasm_memory_copy, "wasm::memory_copy") \
|
||||||
IF_WASM(V, wasm_memory_fill, "wasm::memory_fill") \
|
IF_WASM(V, wasm_memory_fill, "wasm::memory_fill") \
|
||||||
IF_WASM(V, wasm_array_copy, "wasm::array_copy") \
|
IF_WASM(V, wasm_array_copy, "wasm::array_copy") \
|
||||||
IF_WASM(V, wasm_array_fill_with_zeroes, "wasm::array_fill_with_zeroes") \
|
IF_WASM(V, wasm_array_fill_with_number_or_null, \
|
||||||
|
"wasm::array_fill_with_number_or_null") \
|
||||||
V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
|
V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
|
||||||
V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
|
V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
|
||||||
V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
|
V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
|
||||||
|
@ -337,6 +340,10 @@ class StatsCounter;
|
||||||
V(re_match_for_call_from_js, "IrregexpInterpreter::MatchForCallFromJs") \
|
V(re_match_for_call_from_js, "IrregexpInterpreter::MatchForCallFromJs") \
|
||||||
V(re_experimental_match_for_call_from_js, \
|
V(re_experimental_match_for_call_from_js, \
|
||||||
"ExperimentalRegExp::MatchForCallFromJs") \
|
"ExperimentalRegExp::MatchForCallFromJs") \
|
||||||
|
V(typed_array_and_rab_gsab_typed_array_elements_kind_shifts, \
|
||||||
|
"TypedArrayAndRabGsabTypedArrayElementsKindShifts") \
|
||||||
|
V(typed_array_and_rab_gsab_typed_array_elements_kind_sizes, \
|
||||||
|
"TypedArrayAndRabGsabTypedArrayElementsKindSizes") \
|
||||||
EXTERNAL_REFERENCE_LIST_INTL(V) \
|
EXTERNAL_REFERENCE_LIST_INTL(V) \
|
||||||
EXTERNAL_REFERENCE_LIST_SANDBOX(V)
|
EXTERNAL_REFERENCE_LIST_SANDBOX(V)
|
||||||
#ifdef V8_INTL_SUPPORT
|
#ifdef V8_INTL_SUPPORT
|
||||||
|
|
|
@ -1399,13 +1399,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
|
||||||
DCHECK_EQ(expected_parameter_count, ecx);
|
DCHECK_EQ(expected_parameter_count, ecx);
|
||||||
Label regular_invoke;
|
Label regular_invoke;
|
||||||
|
|
||||||
// If the expected parameter count is equal to the adaptor sentinel, no need
|
|
||||||
// to push undefined value as arguments.
|
|
||||||
if (kDontAdaptArgumentsSentinel != 0) {
|
|
||||||
cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
|
|
||||||
j(equal, ®ular_invoke, Label::kFar);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If overapplication or if the actual argument count is equal to the
|
// If overapplication or if the actual argument count is equal to the
|
||||||
// formal parameter count, no need to push extra undefined values.
|
// formal parameter count, no need to push extra undefined values.
|
||||||
sub(expected_parameter_count, actual_parameter_count);
|
sub(expected_parameter_count, actual_parameter_count);
|
||||||
|
|
|
@ -187,6 +187,14 @@ StaticCallInterfaceDescriptor<DerivedDescriptor>::GetRegisterParameter(int i) {
|
||||||
return DerivedDescriptor::registers()[i];
|
return DerivedDescriptor::registers()[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
template <typename DerivedDescriptor>
|
||||||
|
constexpr int
|
||||||
|
StaticCallInterfaceDescriptor<DerivedDescriptor>::GetStackParameterIndex(
|
||||||
|
int i) {
|
||||||
|
return i - DerivedDescriptor::GetRegisterParameterCount();
|
||||||
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
template <typename DerivedDescriptor>
|
template <typename DerivedDescriptor>
|
||||||
constexpr DoubleRegister
|
constexpr DoubleRegister
|
||||||
|
|
|
@ -130,8 +130,6 @@ namespace internal {
|
||||||
V(Void) \
|
V(Void) \
|
||||||
V(WasmFloat32ToNumber) \
|
V(WasmFloat32ToNumber) \
|
||||||
V(WasmFloat64ToNumber) \
|
V(WasmFloat64ToNumber) \
|
||||||
V(WasmI32AtomicWait32) \
|
|
||||||
V(WasmI64AtomicWait32) \
|
|
||||||
V(WasmSuspend) \
|
V(WasmSuspend) \
|
||||||
V(WriteBarrier) \
|
V(WriteBarrier) \
|
||||||
IF_TSAN(V, TSANLoad) \
|
IF_TSAN(V, TSANLoad) \
|
||||||
|
@ -482,6 +480,7 @@ class StaticCallInterfaceDescriptor : public CallInterfaceDescriptor {
|
||||||
static constexpr inline int GetStackParameterCount();
|
static constexpr inline int GetStackParameterCount();
|
||||||
static constexpr inline Register* GetRegisterData();
|
static constexpr inline Register* GetRegisterData();
|
||||||
static constexpr inline Register GetRegisterParameter(int i);
|
static constexpr inline Register GetRegisterParameter(int i);
|
||||||
|
static constexpr inline int GetStackParameterIndex(int i);
|
||||||
|
|
||||||
// Interface descriptors don't really support double registers.
|
// Interface descriptors don't really support double registers.
|
||||||
// This reinterprets the i-th register as a double with the same code.
|
// This reinterprets the i-th register as a double with the same code.
|
||||||
|
@ -1982,38 +1981,6 @@ class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final
|
||||||
DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor)
|
DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor)
|
||||||
};
|
};
|
||||||
|
|
||||||
class WasmI32AtomicWait32Descriptor final
|
|
||||||
: public StaticCallInterfaceDescriptor<WasmI32AtomicWait32Descriptor> {
|
|
||||||
public:
|
|
||||||
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
|
|
||||||
kTimeoutHigh)
|
|
||||||
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
|
|
||||||
MachineType::Uint32(), // kAddress
|
|
||||||
MachineType::Int32(), // kExpectedValue
|
|
||||||
MachineType::Uint32(), // kTimeoutLow
|
|
||||||
MachineType::Uint32()) // kTimeoutHigh
|
|
||||||
DECLARE_DESCRIPTOR(WasmI32AtomicWait32Descriptor)
|
|
||||||
};
|
|
||||||
|
|
||||||
class WasmI64AtomicWait32Descriptor final
|
|
||||||
: public StaticCallInterfaceDescriptor<WasmI64AtomicWait32Descriptor> {
|
|
||||||
public:
|
|
||||||
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueLow, kExpectedValueHigh,
|
|
||||||
kTimeoutLow, kTimeoutHigh)
|
|
||||||
|
|
||||||
static constexpr bool kNoStackScan = true;
|
|
||||||
|
|
||||||
DEFINE_RESULT_AND_PARAMETER_TYPES(
|
|
||||||
MachineType::Uint32(), // result 1
|
|
||||||
MachineType::Uint32(), // kAddress
|
|
||||||
MachineType::Uint32(), // kExpectedValueLow
|
|
||||||
MachineType::Uint32(), // kExpectedValueHigh
|
|
||||||
MachineType::Uint32(), // kTimeoutLow
|
|
||||||
MachineType::Uint32()) // kTimeoutHigh
|
|
||||||
|
|
||||||
DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor)
|
|
||||||
};
|
|
||||||
|
|
||||||
class CloneObjectWithVectorDescriptor final
|
class CloneObjectWithVectorDescriptor final
|
||||||
: public StaticCallInterfaceDescriptor<CloneObjectWithVectorDescriptor> {
|
: public StaticCallInterfaceDescriptor<CloneObjectWithVectorDescriptor> {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -693,7 +693,8 @@ inline Hint NegateHint(Hint hint) { return no_hint; }
|
||||||
// registers and other constants.
|
// registers and other constants.
|
||||||
|
|
||||||
// Break 0xfffff, reserved for redirected real time call.
|
// Break 0xfffff, reserved for redirected real time call.
|
||||||
const Instr rtCallRedirInstr = BREAK | call_rt_redirected;
|
const Instr rtCallRedirInstr =
|
||||||
|
static_cast<uint32_t>(BREAK) | call_rt_redirected;
|
||||||
// A nop instruction. (Encoding of addi_w 0 0 0).
|
// A nop instruction. (Encoding of addi_w 0 0 0).
|
||||||
const Instr nopInstr = ADDI_W;
|
const Instr nopInstr = ADDI_W;
|
||||||
|
|
||||||
|
|
|
@ -3033,13 +3033,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
|
||||||
DCHECK_EQ(actual_parameter_count, a0);
|
DCHECK_EQ(actual_parameter_count, a0);
|
||||||
DCHECK_EQ(expected_parameter_count, a2);
|
DCHECK_EQ(expected_parameter_count, a2);
|
||||||
|
|
||||||
// If the expected parameter count is equal to the adaptor sentinel, no need
|
|
||||||
// to push undefined value as arguments.
|
|
||||||
if (kDontAdaptArgumentsSentinel != 0) {
|
|
||||||
Branch(®ular_invoke, eq, expected_parameter_count,
|
|
||||||
Operand(kDontAdaptArgumentsSentinel));
|
|
||||||
}
|
|
||||||
|
|
||||||
// If overapplication or if the actual argument count is equal to the
|
// If overapplication or if the actual argument count is equal to the
|
||||||
// formal parameter count, no need to push extra undefined values.
|
// formal parameter count, no need to push extra undefined values.
|
||||||
sub_d(expected_parameter_count, expected_parameter_count,
|
sub_d(expected_parameter_count, expected_parameter_count,
|
||||||
|
@ -3551,7 +3544,8 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||||
Push(kScratchReg);
|
Push(kScratchReg);
|
||||||
}
|
}
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
|
if (type == StackFrame::WASM || type == StackFrame::WASM_LIFTOFF_SETUP)
|
||||||
|
Push(kWasmInstanceRegister);
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -418,88 +418,88 @@ const int32_t kJumpRawMask = 0xf0000000;
|
||||||
// ----- MIPS Opcodes and Function Fields.
|
// ----- MIPS Opcodes and Function Fields.
|
||||||
// We use this presentation to stay close to the table representation in
|
// We use this presentation to stay close to the table representation in
|
||||||
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
|
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
|
||||||
enum Opcode : uint32_t {
|
using Opcode = uint32_t;
|
||||||
SPECIAL = 0U << kOpcodeShift,
|
constexpr Opcode SPECIAL = 0U << kOpcodeShift;
|
||||||
REGIMM = 1U << kOpcodeShift,
|
constexpr Opcode REGIMM = 1U << kOpcodeShift;
|
||||||
|
|
||||||
J = ((0U << 3) + 2) << kOpcodeShift,
|
constexpr Opcode J = ((0U << 3) + 2) << kOpcodeShift;
|
||||||
JAL = ((0U << 3) + 3) << kOpcodeShift,
|
constexpr Opcode JAL = ((0U << 3) + 3) << kOpcodeShift;
|
||||||
BEQ = ((0U << 3) + 4) << kOpcodeShift,
|
constexpr Opcode BEQ = ((0U << 3) + 4) << kOpcodeShift;
|
||||||
BNE = ((0U << 3) + 5) << kOpcodeShift,
|
constexpr Opcode BNE = ((0U << 3) + 5) << kOpcodeShift;
|
||||||
BLEZ = ((0U << 3) + 6) << kOpcodeShift,
|
constexpr Opcode BLEZ = ((0U << 3) + 6) << kOpcodeShift;
|
||||||
BGTZ = ((0U << 3) + 7) << kOpcodeShift,
|
constexpr Opcode BGTZ = ((0U << 3) + 7) << kOpcodeShift;
|
||||||
|
|
||||||
ADDI = ((1U << 3) + 0) << kOpcodeShift,
|
constexpr Opcode ADDI = ((1U << 3) + 0) << kOpcodeShift;
|
||||||
ADDIU = ((1U << 3) + 1) << kOpcodeShift,
|
constexpr Opcode ADDIU = ((1U << 3) + 1) << kOpcodeShift;
|
||||||
SLTI = ((1U << 3) + 2) << kOpcodeShift,
|
constexpr Opcode SLTI = ((1U << 3) + 2) << kOpcodeShift;
|
||||||
SLTIU = ((1U << 3) + 3) << kOpcodeShift,
|
constexpr Opcode SLTIU = ((1U << 3) + 3) << kOpcodeShift;
|
||||||
ANDI = ((1U << 3) + 4) << kOpcodeShift,
|
constexpr Opcode ANDI = ((1U << 3) + 4) << kOpcodeShift;
|
||||||
ORI = ((1U << 3) + 5) << kOpcodeShift,
|
constexpr Opcode ORI = ((1U << 3) + 5) << kOpcodeShift;
|
||||||
XORI = ((1U << 3) + 6) << kOpcodeShift,
|
constexpr Opcode XORI = ((1U << 3) + 6) << kOpcodeShift;
|
||||||
LUI = ((1U << 3) + 7) << kOpcodeShift, // LUI/AUI family.
|
constexpr Opcode LUI = ((1U << 3) + 7) << kOpcodeShift; // LUI/AUI family.
|
||||||
DAUI = ((3U << 3) + 5) << kOpcodeShift,
|
constexpr Opcode DAUI = ((3U << 3) + 5) << kOpcodeShift;
|
||||||
|
|
||||||
BEQC = ((2U << 3) + 0) << kOpcodeShift,
|
constexpr Opcode BEQC = ((2U << 3) + 0) << kOpcodeShift;
|
||||||
COP1 = ((2U << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
|
constexpr Opcode COP1 = ((2U << 3) + 1)
|
||||||
BEQL = ((2U << 3) + 4) << kOpcodeShift,
|
<< kOpcodeShift; // Coprocessor 1 class.
|
||||||
BNEL = ((2U << 3) + 5) << kOpcodeShift,
|
constexpr Opcode BEQL = ((2U << 3) + 4) << kOpcodeShift;
|
||||||
BLEZL = ((2U << 3) + 6) << kOpcodeShift,
|
constexpr Opcode BNEL = ((2U << 3) + 5) << kOpcodeShift;
|
||||||
BGTZL = ((2U << 3) + 7) << kOpcodeShift,
|
constexpr Opcode BLEZL = ((2U << 3) + 6) << kOpcodeShift;
|
||||||
|
constexpr Opcode BGTZL = ((2U << 3) + 7) << kOpcodeShift;
|
||||||
|
|
||||||
DADDI = ((3U << 3) + 0) << kOpcodeShift, // This is also BNEC.
|
constexpr Opcode DADDI = ((3U << 3) + 0) << kOpcodeShift; // This is also BNEC.
|
||||||
DADDIU = ((3U << 3) + 1) << kOpcodeShift,
|
constexpr Opcode DADDIU = ((3U << 3) + 1) << kOpcodeShift;
|
||||||
LDL = ((3U << 3) + 2) << kOpcodeShift,
|
constexpr Opcode LDL = ((3U << 3) + 2) << kOpcodeShift;
|
||||||
LDR = ((3U << 3) + 3) << kOpcodeShift,
|
constexpr Opcode LDR = ((3U << 3) + 3) << kOpcodeShift;
|
||||||
SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift,
|
constexpr Opcode SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift;
|
||||||
MSA = ((3U << 3) + 6) << kOpcodeShift,
|
constexpr Opcode MSA = ((3U << 3) + 6) << kOpcodeShift;
|
||||||
SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift,
|
constexpr Opcode SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift;
|
||||||
|
|
||||||
LB = ((4U << 3) + 0) << kOpcodeShift,
|
constexpr Opcode LB = ((4U << 3) + 0) << kOpcodeShift;
|
||||||
LH = ((4U << 3) + 1) << kOpcodeShift,
|
constexpr Opcode LH = ((4U << 3) + 1) << kOpcodeShift;
|
||||||
LWL = ((4U << 3) + 2) << kOpcodeShift,
|
constexpr Opcode LWL = ((4U << 3) + 2) << kOpcodeShift;
|
||||||
LW = ((4U << 3) + 3) << kOpcodeShift,
|
constexpr Opcode LW = ((4U << 3) + 3) << kOpcodeShift;
|
||||||
LBU = ((4U << 3) + 4) << kOpcodeShift,
|
constexpr Opcode LBU = ((4U << 3) + 4) << kOpcodeShift;
|
||||||
LHU = ((4U << 3) + 5) << kOpcodeShift,
|
constexpr Opcode LHU = ((4U << 3) + 5) << kOpcodeShift;
|
||||||
LWR = ((4U << 3) + 6) << kOpcodeShift,
|
constexpr Opcode LWR = ((4U << 3) + 6) << kOpcodeShift;
|
||||||
LWU = ((4U << 3) + 7) << kOpcodeShift,
|
constexpr Opcode LWU = ((4U << 3) + 7) << kOpcodeShift;
|
||||||
|
|
||||||
SB = ((5U << 3) + 0) << kOpcodeShift,
|
constexpr Opcode SB = ((5U << 3) + 0) << kOpcodeShift;
|
||||||
SH = ((5U << 3) + 1) << kOpcodeShift,
|
constexpr Opcode SH = ((5U << 3) + 1) << kOpcodeShift;
|
||||||
SWL = ((5U << 3) + 2) << kOpcodeShift,
|
constexpr Opcode SWL = ((5U << 3) + 2) << kOpcodeShift;
|
||||||
SW = ((5U << 3) + 3) << kOpcodeShift,
|
constexpr Opcode SW = ((5U << 3) + 3) << kOpcodeShift;
|
||||||
SDL = ((5U << 3) + 4) << kOpcodeShift,
|
constexpr Opcode SDL = ((5U << 3) + 4) << kOpcodeShift;
|
||||||
SDR = ((5U << 3) + 5) << kOpcodeShift,
|
constexpr Opcode SDR = ((5U << 3) + 5) << kOpcodeShift;
|
||||||
SWR = ((5U << 3) + 6) << kOpcodeShift,
|
constexpr Opcode SWR = ((5U << 3) + 6) << kOpcodeShift;
|
||||||
|
|
||||||
LL = ((6U << 3) + 0) << kOpcodeShift,
|
constexpr Opcode LL = ((6U << 3) + 0) << kOpcodeShift;
|
||||||
LWC1 = ((6U << 3) + 1) << kOpcodeShift,
|
constexpr Opcode LWC1 = ((6U << 3) + 1) << kOpcodeShift;
|
||||||
BC = ((6U << 3) + 2) << kOpcodeShift,
|
constexpr Opcode BC = ((6U << 3) + 2) << kOpcodeShift;
|
||||||
LLD = ((6U << 3) + 4) << kOpcodeShift,
|
constexpr Opcode LLD = ((6U << 3) + 4) << kOpcodeShift;
|
||||||
LDC1 = ((6U << 3) + 5) << kOpcodeShift,
|
constexpr Opcode LDC1 = ((6U << 3) + 5) << kOpcodeShift;
|
||||||
POP66 = ((6U << 3) + 6) << kOpcodeShift,
|
constexpr Opcode POP66 = ((6U << 3) + 6) << kOpcodeShift;
|
||||||
LD = ((6U << 3) + 7) << kOpcodeShift,
|
constexpr Opcode LD = ((6U << 3) + 7) << kOpcodeShift;
|
||||||
|
|
||||||
PREF = ((6U << 3) + 3) << kOpcodeShift,
|
constexpr Opcode PREF = ((6U << 3) + 3) << kOpcodeShift;
|
||||||
|
|
||||||
SC = ((7U << 3) + 0) << kOpcodeShift,
|
constexpr Opcode SC = ((7U << 3) + 0) << kOpcodeShift;
|
||||||
SWC1 = ((7U << 3) + 1) << kOpcodeShift,
|
constexpr Opcode SWC1 = ((7U << 3) + 1) << kOpcodeShift;
|
||||||
BALC = ((7U << 3) + 2) << kOpcodeShift,
|
constexpr Opcode BALC = ((7U << 3) + 2) << kOpcodeShift;
|
||||||
PCREL = ((7U << 3) + 3) << kOpcodeShift,
|
constexpr Opcode PCREL = ((7U << 3) + 3) << kOpcodeShift;
|
||||||
SCD = ((7U << 3) + 4) << kOpcodeShift,
|
constexpr Opcode SCD = ((7U << 3) + 4) << kOpcodeShift;
|
||||||
SDC1 = ((7U << 3) + 5) << kOpcodeShift,
|
constexpr Opcode SDC1 = ((7U << 3) + 5) << kOpcodeShift;
|
||||||
POP76 = ((7U << 3) + 6) << kOpcodeShift,
|
constexpr Opcode POP76 = ((7U << 3) + 6) << kOpcodeShift;
|
||||||
SD = ((7U << 3) + 7) << kOpcodeShift,
|
constexpr Opcode SD = ((7U << 3) + 7) << kOpcodeShift;
|
||||||
|
|
||||||
COP1X = ((1U << 4) + 3) << kOpcodeShift,
|
constexpr Opcode COP1X = ((1U << 4) + 3) << kOpcodeShift;
|
||||||
|
|
||||||
// New r6 instruction.
|
// New r6 instruction.
|
||||||
POP06 = BLEZ, // bgeuc/bleuc, blezalc, bgezalc
|
constexpr Opcode POP06 = BLEZ; // bgeuc/bleuc, blezalc, bgezalc
|
||||||
POP07 = BGTZ, // bltuc/bgtuc, bgtzalc, bltzalc
|
constexpr Opcode POP07 = BGTZ; // bltuc/bgtuc, bgtzalc, bltzalc
|
||||||
POP10 = ADDI, // beqzalc, bovc, beqc
|
constexpr Opcode POP10 = ADDI; // beqzalc, bovc, beqc
|
||||||
POP26 = BLEZL, // bgezc, blezc, bgec/blec
|
constexpr Opcode POP26 = BLEZL; // bgezc, blezc, bgec/blec
|
||||||
POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc
|
constexpr Opcode POP27 = BGTZL; // bgtzc, bltzc, bltc/bgtc
|
||||||
POP30 = DADDI, // bnezalc, bnvc, bnec
|
constexpr Opcode POP30 = DADDI; // bnezalc, bnvc, bnec
|
||||||
};
|
|
||||||
|
|
||||||
enum SecondaryField : uint32_t {
|
enum SecondaryField : uint32_t {
|
||||||
// SPECIAL Encoding of Function Field.
|
// SPECIAL Encoding of Function Field.
|
||||||
|
|
|
@ -4951,13 +4951,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
|
||||||
DCHECK_EQ(actual_parameter_count, a0);
|
DCHECK_EQ(actual_parameter_count, a0);
|
||||||
DCHECK_EQ(expected_parameter_count, a2);
|
DCHECK_EQ(expected_parameter_count, a2);
|
||||||
|
|
||||||
// If the expected parameter count is equal to the adaptor sentinel, no need
|
|
||||||
// to push undefined value as arguments.
|
|
||||||
if (kDontAdaptArgumentsSentinel != 0) {
|
|
||||||
Branch(®ular_invoke, eq, expected_parameter_count,
|
|
||||||
Operand(kDontAdaptArgumentsSentinel));
|
|
||||||
}
|
|
||||||
|
|
||||||
// If overapplication or if the actual argument count is equal to the
|
// If overapplication or if the actual argument count is equal to the
|
||||||
// formal parameter count, no need to push extra undefined values.
|
// formal parameter count, no need to push extra undefined values.
|
||||||
Dsubu(expected_parameter_count, expected_parameter_count,
|
Dsubu(expected_parameter_count, expected_parameter_count,
|
||||||
|
@ -5460,7 +5453,8 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||||
Push(kScratchReg);
|
Push(kScratchReg);
|
||||||
}
|
}
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
|
if (type == StackFrame::WASM || type == StackFrame::WASM_LIFTOFF_SETUP)
|
||||||
|
Push(kWasmInstanceRegister);
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,123 +13,54 @@
|
||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
enum class FunctionStatus : int {
|
void ManualOptimizationTable::MarkFunctionForManualOptimization(
|
||||||
kPrepareForOptimize = 1 << 0,
|
|
||||||
kMarkForOptimize = 1 << 1,
|
|
||||||
kAllowHeuristicOptimization = 1 << 2,
|
|
||||||
};
|
|
||||||
|
|
||||||
using FunctionStatusFlags = base::Flags<FunctionStatus>;
|
|
||||||
|
|
||||||
void PendingOptimizationTable::PreparedForOptimization(
|
|
||||||
Isolate* isolate, Handle<JSFunction> function,
|
Isolate* isolate, Handle<JSFunction> function,
|
||||||
bool allow_heuristic_optimization) {
|
IsCompiledScope* is_compiled_scope) {
|
||||||
DCHECK(v8_flags.testing_d8_test_runner);
|
DCHECK(v8_flags.testing_d8_test_runner);
|
||||||
|
DCHECK(is_compiled_scope->is_compiled());
|
||||||
|
DCHECK(function->has_feedback_vector());
|
||||||
|
|
||||||
FunctionStatusFlags status = FunctionStatus::kPrepareForOptimize;
|
|
||||||
if (allow_heuristic_optimization) {
|
|
||||||
status |= FunctionStatus::kAllowHeuristicOptimization;
|
|
||||||
}
|
|
||||||
Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
|
Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
|
||||||
|
|
||||||
IsCompiledScope is_compiled_scope;
|
|
||||||
SharedFunctionInfo::EnsureBytecodeArrayAvailable(isolate, shared_info,
|
|
||||||
&is_compiled_scope);
|
|
||||||
|
|
||||||
Handle<ObjectHashTable> table =
|
Handle<ObjectHashTable> table =
|
||||||
isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()
|
isolate->heap()->functions_marked_for_manual_optimization().IsUndefined()
|
||||||
? ObjectHashTable::New(isolate, 1)
|
? ObjectHashTable::New(isolate, 1)
|
||||||
: handle(ObjectHashTable::cast(
|
: handle(ObjectHashTable::cast(
|
||||||
isolate->heap()->pending_optimize_for_test_bytecode()),
|
isolate->heap()
|
||||||
|
->functions_marked_for_manual_optimization()),
|
||||||
isolate);
|
isolate);
|
||||||
Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
|
table = ObjectHashTable::Put(
|
||||||
handle(shared_info->GetBytecodeArray(isolate), isolate),
|
table, shared_info,
|
||||||
handle(Smi::FromInt(status), isolate), AllocationType::kYoung);
|
handle(shared_info->GetBytecodeArray(isolate), isolate));
|
||||||
table =
|
isolate->heap()->SetFunctionsMarkedForManualOptimization(*table);
|
||||||
ObjectHashTable::Put(table, handle(function->shared(), isolate), tuple);
|
|
||||||
isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PendingOptimizationTable::IsHeuristicOptimizationAllowed(
|
void ManualOptimizationTable::CheckMarkedForManualOptimization(
|
||||||
Isolate* isolate, JSFunction function) {
|
Isolate* isolate, JSFunction function) {
|
||||||
DCHECK(v8_flags.testing_d8_test_runner);
|
if (!IsMarkedForManualOptimization(isolate, function)) {
|
||||||
|
|
||||||
Handle<Object> table =
|
|
||||||
handle(isolate->heap()->pending_optimize_for_test_bytecode(), isolate);
|
|
||||||
Handle<Object> entry =
|
|
||||||
table->IsUndefined()
|
|
||||||
? handle(ReadOnlyRoots(isolate).the_hole_value(), isolate)
|
|
||||||
: handle(Handle<ObjectHashTable>::cast(table)->Lookup(
|
|
||||||
handle(function.shared(), isolate)),
|
|
||||||
isolate);
|
|
||||||
if (entry->IsTheHole()) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
DCHECK(entry->IsTuple2());
|
|
||||||
DCHECK(Handle<Tuple2>::cast(entry)->value2().IsSmi());
|
|
||||||
FunctionStatusFlags status(Smi::ToInt(Handle<Tuple2>::cast(entry)->value2()));
|
|
||||||
return status & FunctionStatus::kAllowHeuristicOptimization;
|
|
||||||
}
|
|
||||||
|
|
||||||
void PendingOptimizationTable::MarkedForOptimization(
|
|
||||||
Isolate* isolate, Handle<JSFunction> function) {
|
|
||||||
DCHECK(v8_flags.testing_d8_test_runner);
|
|
||||||
|
|
||||||
Handle<Object> table =
|
|
||||||
handle(isolate->heap()->pending_optimize_for_test_bytecode(), isolate);
|
|
||||||
Handle<Object> entry =
|
|
||||||
table->IsUndefined()
|
|
||||||
? handle(ReadOnlyRoots(isolate).the_hole_value(), isolate)
|
|
||||||
: handle(Handle<ObjectHashTable>::cast(table)->Lookup(
|
|
||||||
handle(function->shared(), isolate)),
|
|
||||||
isolate);
|
|
||||||
if (entry->IsTheHole()) {
|
|
||||||
PrintF("Error: Function ");
|
PrintF("Error: Function ");
|
||||||
function->ShortPrint();
|
function.ShortPrint();
|
||||||
PrintF(
|
PrintF(
|
||||||
" should be prepared for optimization with "
|
" should be prepared for optimization with "
|
||||||
"%%PrepareFunctionForOptimization before "
|
"%%PrepareFunctionForOptimization before "
|
||||||
"%%OptimizeFunctionOnNextCall / %%OptimizeOSR ");
|
"%%OptimizeFunctionOnNextCall / %%OptimizeOSR ");
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
DCHECK(entry->IsTuple2());
|
|
||||||
DCHECK(Handle<Tuple2>::cast(entry)->value2().IsSmi());
|
|
||||||
FunctionStatusFlags status(Smi::ToInt(Handle<Tuple2>::cast(entry)->value2()));
|
|
||||||
status = status.without(FunctionStatus::kPrepareForOptimize) |
|
|
||||||
FunctionStatus::kMarkForOptimize;
|
|
||||||
Handle<Tuple2>::cast(entry)->set_value2(Smi::FromInt(status));
|
|
||||||
table = ObjectHashTable::Put(Handle<ObjectHashTable>::cast(table),
|
|
||||||
handle(function->shared(), isolate), entry);
|
|
||||||
isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void PendingOptimizationTable::FunctionWasOptimized(
|
bool ManualOptimizationTable::IsMarkedForManualOptimization(
|
||||||
Isolate* isolate, Handle<JSFunction> function) {
|
Isolate* isolate, JSFunction function) {
|
||||||
DCHECK(v8_flags.testing_d8_test_runner);
|
DCHECK(v8_flags.testing_d8_test_runner);
|
||||||
|
|
||||||
if (isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) {
|
Handle<Object> table = handle(
|
||||||
return;
|
isolate->heap()->functions_marked_for_manual_optimization(), isolate);
|
||||||
}
|
Handle<Object> entry =
|
||||||
|
table->IsUndefined()
|
||||||
Handle<ObjectHashTable> table =
|
? handle(ReadOnlyRoots(isolate).the_hole_value(), isolate)
|
||||||
handle(ObjectHashTable::cast(
|
: handle(Handle<ObjectHashTable>::cast(table)->Lookup(
|
||||||
isolate->heap()->pending_optimize_for_test_bytecode()),
|
handle(function.shared(), isolate)),
|
||||||
isolate);
|
isolate);
|
||||||
Handle<Object> value(table->Lookup(handle(function->shared(), isolate)),
|
return !entry->IsTheHole();
|
||||||
isolate);
|
|
||||||
// Remove only if we have already seen %OptimizeFunctionOnNextCall. If it is
|
|
||||||
// optimized for other reasons, still keep holding the bytecode since we may
|
|
||||||
// optimize it later.
|
|
||||||
if (!value->IsTheHole() &&
|
|
||||||
Smi::cast(Handle<Tuple2>::cast(value)->value2()).value() ==
|
|
||||||
static_cast<int>(FunctionStatus::kMarkForOptimize)) {
|
|
||||||
bool was_present;
|
|
||||||
table = table->Remove(isolate, table, handle(function->shared(), isolate),
|
|
||||||
&was_present);
|
|
||||||
DCHECK(was_present);
|
|
||||||
isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
|
|
@ -10,38 +10,32 @@
|
||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
|
class IsCompiledScope;
|
||||||
|
|
||||||
// This class adds the functionality to properly test the optimized code. This
|
// This class adds the functionality to properly test the optimized code. This
|
||||||
// is only for use in tests. All these functions should only be called when
|
// is only for use in tests. All these functions should only be called when
|
||||||
// testing_d8_flag_for_tests is set.
|
// testing_d8_flag_for_tests is set.
|
||||||
class PendingOptimizationTable {
|
class ManualOptimizationTable {
|
||||||
public:
|
public:
|
||||||
// This function should be called before we mark the function for
|
// This function should be called before we mark the function for
|
||||||
// optimization. Calling this function ensures that |function| is compiled and
|
// optimization. It should be called when |function| is already compiled and
|
||||||
// has a feedback vector allocated. This also holds on to the bytecode
|
// has a feedback vector allocated, and it blocks heuristic optimization.
|
||||||
// strongly in pending optimization table preventing the bytecode to be
|
//
|
||||||
// flushed.
|
// This also holds on to the bytecode strongly, preventing the bytecode from
|
||||||
static void PreparedForOptimization(Isolate* isolate,
|
// being flushed.
|
||||||
Handle<JSFunction> function,
|
static void MarkFunctionForManualOptimization(
|
||||||
bool allow_heuristic_optimization);
|
Isolate* isolate, Handle<JSFunction> function,
|
||||||
|
IsCompiledScope* is_compiled_scope);
|
||||||
|
|
||||||
// This function should be called when the function is marked for optimization
|
// This function should be called when the function is marked for optimization
|
||||||
// via the intrinsics. This will update the state of the bytecode array in the
|
// via the intrinsics. This will check whether
|
||||||
// pending optimization table, so that the entry can be removed once the
|
// MarkFunctionForManualOptimization was called with this function.
|
||||||
// function is optimized. If the function is already optimized it removes the
|
static void CheckMarkedForManualOptimization(Isolate* isolate,
|
||||||
// entry from the table.
|
JSFunction function);
|
||||||
static void MarkedForOptimization(Isolate* isolate,
|
|
||||||
Handle<JSFunction> function);
|
|
||||||
|
|
||||||
// This function should be called once the function is optimized. If there is
|
// Returns true if MarkFunctionForManualOptimization was called with this
|
||||||
// an entry in the pending optimization table and it is marked for removal
|
// function.
|
||||||
// then this function removes the entry from pending optimization table.
|
static bool IsMarkedForManualOptimization(Isolate* isolate,
|
||||||
static void FunctionWasOptimized(Isolate* isolate,
|
|
||||||
Handle<JSFunction> function);
|
|
||||||
|
|
||||||
// This function returns whether a heuristic is allowed to trigger
|
|
||||||
// optimization the function. This mechanism is used in tests to prevent
|
|
||||||
// heuristics from interfering with manually triggered optimization.
|
|
||||||
static bool IsHeuristicOptimizationAllowed(Isolate* isolate,
|
|
||||||
JSFunction function);
|
JSFunction function);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -690,11 +690,11 @@ int Assembler::link(Label* L) {
|
||||||
// Branch instructions.
|
// Branch instructions.
|
||||||
|
|
||||||
void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
|
void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
|
||||||
emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
|
emit(EXT1 | static_cast<uint32_t>(bo) | condition_bit * B16 | BCLRX | lk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
|
void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
|
||||||
emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
|
emit(EXT1 | static_cast<uint32_t>(bo) | condition_bit * B16 | BCCTRX | lk);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pseudo op - branch to link register
|
// Pseudo op - branch to link register
|
||||||
|
@ -708,7 +708,8 @@ void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
|
||||||
void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
|
void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
|
||||||
int imm16 = branch_offset;
|
int imm16 = branch_offset;
|
||||||
CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
|
CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
|
||||||
emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
|
emit(BCX | static_cast<uint32_t>(bo) | condition_bit * B16 |
|
||||||
|
(imm16 & kImm16Mask) | lk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::b(int branch_offset, LKBit lk) {
|
void Assembler::b(int branch_offset, LKBit lk) {
|
||||||
|
|
|
@ -1524,14 +1524,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
|
||||||
DCHECK_EQ(actual_parameter_count, r3);
|
DCHECK_EQ(actual_parameter_count, r3);
|
||||||
DCHECK_EQ(expected_parameter_count, r5);
|
DCHECK_EQ(expected_parameter_count, r5);
|
||||||
|
|
||||||
// If the expected parameter count is equal to the adaptor sentinel, no need
|
|
||||||
// to push undefined value as arguments.
|
|
||||||
if (kDontAdaptArgumentsSentinel != 0) {
|
|
||||||
mov(r0, Operand(kDontAdaptArgumentsSentinel));
|
|
||||||
CmpS64(expected_parameter_count, r0);
|
|
||||||
beq(®ular_invoke);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If overapplication or if the actual argument count is equal to the
|
// If overapplication or if the actual argument count is equal to the
|
||||||
// formal parameter count, no need to push extra undefined values.
|
// formal parameter count, no need to push extra undefined values.
|
||||||
sub(expected_parameter_count, expected_parameter_count,
|
sub(expected_parameter_count, expected_parameter_count,
|
||||||
|
@ -3730,7 +3722,11 @@ void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
|
||||||
V(I8x16AddSatS, vaddsbs) \
|
V(I8x16AddSatS, vaddsbs) \
|
||||||
V(I8x16SubSatS, vsubsbs) \
|
V(I8x16SubSatS, vsubsbs) \
|
||||||
V(I8x16AddSatU, vaddubs) \
|
V(I8x16AddSatU, vaddubs) \
|
||||||
V(I8x16SubSatU, vsububs)
|
V(I8x16SubSatU, vsububs) \
|
||||||
|
V(S128And, vand) \
|
||||||
|
V(S128Or, vor) \
|
||||||
|
V(S128Xor, vxor) \
|
||||||
|
V(S128AndNot, vandc)
|
||||||
|
|
||||||
#define EMIT_SIMD_BINOP(name, op) \
|
#define EMIT_SIMD_BINOP(name, op) \
|
||||||
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
|
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
|
||||||
|
@ -3786,7 +3782,13 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
|
||||||
V(F32x4Floor, xvrspim) \
|
V(F32x4Floor, xvrspim) \
|
||||||
V(F32x4Trunc, xvrspiz) \
|
V(F32x4Trunc, xvrspiz) \
|
||||||
V(I64x2Neg, vnegd) \
|
V(I64x2Neg, vnegd) \
|
||||||
|
V(I64x2SConvertI32x4Low, vupklsw) \
|
||||||
|
V(I64x2SConvertI32x4High, vupkhsw) \
|
||||||
V(I32x4Neg, vnegw) \
|
V(I32x4Neg, vnegw) \
|
||||||
|
V(I32x4SConvertI16x8Low, vupklsh) \
|
||||||
|
V(I32x4SConvertI16x8High, vupkhsh) \
|
||||||
|
V(I16x8SConvertI8x16Low, vupklsb) \
|
||||||
|
V(I16x8SConvertI8x16High, vupkhsb) \
|
||||||
V(I8x16Popcnt, vpopcntb)
|
V(I8x16Popcnt, vpopcntb)
|
||||||
|
|
||||||
#define EMIT_SIMD_UNOP(name, op) \
|
#define EMIT_SIMD_UNOP(name, op) \
|
||||||
|
@ -3797,6 +3799,94 @@ SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
|
||||||
#undef EMIT_SIMD_UNOP
|
#undef EMIT_SIMD_UNOP
|
||||||
#undef SIMD_UNOP_LIST
|
#undef SIMD_UNOP_LIST
|
||||||
|
|
||||||
|
#define EXT_MUL(dst_even, dst_odd, mul_even, mul_odd) \
|
||||||
|
mul_even(dst_even, src1, src2); \
|
||||||
|
mul_odd(dst_odd, src1, src2);
|
||||||
|
#define SIMD_EXT_MUL_LIST(V) \
|
||||||
|
V(I32x4ExtMulLowI16x8S, vmulesh, vmulosh, vmrglw) \
|
||||||
|
V(I32x4ExtMulHighI16x8S, vmulesh, vmulosh, vmrghw) \
|
||||||
|
V(I32x4ExtMulLowI16x8U, vmuleuh, vmulouh, vmrglw) \
|
||||||
|
V(I32x4ExtMulHighI16x8U, vmuleuh, vmulouh, vmrghw) \
|
||||||
|
V(I16x8ExtMulLowI8x16S, vmulesb, vmulosb, vmrglh) \
|
||||||
|
V(I16x8ExtMulHighI8x16S, vmulesb, vmulosb, vmrghh) \
|
||||||
|
V(I16x8ExtMulLowI8x16U, vmuleub, vmuloub, vmrglh) \
|
||||||
|
V(I16x8ExtMulHighI8x16U, vmuleub, vmuloub, vmrghh)
|
||||||
|
|
||||||
|
#define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge) \
|
||||||
|
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
|
||||||
|
Simd128Register src2, Simd128Register scratch) { \
|
||||||
|
EXT_MUL(scratch, dst, mul_even, mul_odd) \
|
||||||
|
merge(dst, scratch, dst); \
|
||||||
|
}
|
||||||
|
SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL)
|
||||||
|
#undef EMIT_SIMD_EXT_MUL
|
||||||
|
#undef SIMD_EXT_MUL_LIST
|
||||||
|
|
||||||
|
#define SIMD_ALL_TRUE_LIST(V) \
|
||||||
|
V(I64x2AllTrue, vcmpgtud) \
|
||||||
|
V(I32x4AllTrue, vcmpgtuw) \
|
||||||
|
V(I16x8AllTrue, vcmpgtuh) \
|
||||||
|
V(I8x16AllTrue, vcmpgtub)
|
||||||
|
|
||||||
|
#define EMIT_SIMD_ALL_TRUE(name, op) \
|
||||||
|
void TurboAssembler::name(Register dst, Simd128Register src, \
|
||||||
|
Register scratch1, Register scratch2, \
|
||||||
|
Simd128Register scratch3) { \
|
||||||
|
constexpr uint8_t fxm = 0x2; /* field mask. */ \
|
||||||
|
constexpr int bit_number = 24; \
|
||||||
|
li(scratch1, Operand(0)); \
|
||||||
|
li(scratch2, Operand(1)); \
|
||||||
|
/* Check if all lanes > 0, if not then return false.*/ \
|
||||||
|
vxor(scratch3, scratch3, scratch3); \
|
||||||
|
mtcrf(scratch1, fxm); /* Clear cr6.*/ \
|
||||||
|
op(scratch3, src, scratch3, SetRC); \
|
||||||
|
isel(dst, scratch2, scratch1, bit_number); \
|
||||||
|
}
|
||||||
|
SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
|
||||||
|
#undef EMIT_SIMD_ALL_TRUE
|
||||||
|
#undef SIMD_ALL_TRUE_LIST
|
||||||
|
|
||||||
|
void TurboAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst,
|
||||||
|
Simd128Register src1,
|
||||||
|
Simd128Register src2,
|
||||||
|
Simd128Register scratch) {
|
||||||
|
constexpr int lane_width_in_bytes = 8;
|
||||||
|
EXT_MUL(scratch, dst, vmulesw, vmulosw)
|
||||||
|
vextractd(scratch, scratch, Operand(1 * lane_width_in_bytes));
|
||||||
|
vinsertd(dst, scratch, Operand(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst,
|
||||||
|
Simd128Register src1,
|
||||||
|
Simd128Register src2,
|
||||||
|
Simd128Register scratch) {
|
||||||
|
constexpr int lane_width_in_bytes = 8;
|
||||||
|
EXT_MUL(scratch, dst, vmulesw, vmulosw)
|
||||||
|
vinsertd(scratch, dst, Operand(1 * lane_width_in_bytes));
|
||||||
|
vor(dst, scratch, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst,
|
||||||
|
Simd128Register src1,
|
||||||
|
Simd128Register src2,
|
||||||
|
Simd128Register scratch) {
|
||||||
|
constexpr int lane_width_in_bytes = 8;
|
||||||
|
EXT_MUL(scratch, dst, vmuleuw, vmulouw)
|
||||||
|
vextractd(scratch, scratch, Operand(1 * lane_width_in_bytes));
|
||||||
|
vinsertd(dst, scratch, Operand(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst,
|
||||||
|
Simd128Register src1,
|
||||||
|
Simd128Register src2,
|
||||||
|
Simd128Register scratch) {
|
||||||
|
constexpr int lane_width_in_bytes = 8;
|
||||||
|
EXT_MUL(scratch, dst, vmuleuw, vmulouw)
|
||||||
|
vinsertd(scratch, dst, Operand(1 * lane_width_in_bytes));
|
||||||
|
vor(dst, scratch, scratch);
|
||||||
|
}
|
||||||
|
#undef EXT_MUL
|
||||||
|
|
||||||
void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
|
void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
|
||||||
Register scratch) {
|
Register scratch) {
|
||||||
GenerateMemoryOperationRR(dst, mem, lxvx);
|
GenerateMemoryOperationRR(dst, mem, lxvx);
|
||||||
|
@ -4218,6 +4308,166 @@ void TurboAssembler::I8x16Neg(Simd128Register dst, Simd128Register src,
|
||||||
vaddubm(dst, scratch, dst);
|
vaddubm(dst, scratch, dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::F64x2Pmin(Simd128Register dst, Simd128Register src1,
|
||||||
|
Simd128Register src2, Simd128Register scratch) {
|
||||||
|
xvcmpgtdp(kScratchSimd128Reg, src1, src2);
|
||||||
|
vsel(dst, src1, src2, kScratchSimd128Reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::F64x2Pmax(Simd128Register dst, Simd128Register src1,
|
||||||
|
Simd128Register src2, Simd128Register scratch) {
|
||||||
|
xvcmpgtdp(kScratchSimd128Reg, src2, src1);
|
||||||
|
vsel(dst, src1, src2, kScratchSimd128Reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::F32x4Pmin(Simd128Register dst, Simd128Register src1,
|
||||||
|
Simd128Register src2, Simd128Register scratch) {
|
||||||
|
xvcmpgtsp(kScratchSimd128Reg, src1, src2);
|
||||||
|
vsel(dst, src1, src2, kScratchSimd128Reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::F32x4Pmax(Simd128Register dst, Simd128Register src1,
|
||||||
|
Simd128Register src2, Simd128Register scratch) {
|
||||||
|
xvcmpgtsp(kScratchSimd128Reg, src2, src1);
|
||||||
|
vsel(dst, src1, src2, kScratchSimd128Reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I16x8SConvertI32x4(Simd128Register dst,
|
||||||
|
Simd128Register src1,
|
||||||
|
Simd128Register src2) {
|
||||||
|
vpkswss(dst, src2, src1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst,
|
||||||
|
Simd128Register src1,
|
||||||
|
Simd128Register src2) {
|
||||||
|
vpkswus(dst, src2, src1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst,
|
||||||
|
Simd128Register src1,
|
||||||
|
Simd128Register src2) {
|
||||||
|
vpkshss(dst, src2, src1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst,
|
||||||
|
Simd128Register src1,
|
||||||
|
Simd128Register src2) {
|
||||||
|
vpkshus(dst, src2, src1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::F64x2ConvertLowI32x4S(Simd128Register dst,
|
||||||
|
Simd128Register src) {
|
||||||
|
vupklsw(dst, src);
|
||||||
|
xvcvsxddp(dst, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst,
|
||||||
|
Simd128Register src,
|
||||||
|
Register scratch1,
|
||||||
|
Simd128Register scratch2) {
|
||||||
|
constexpr int lane_width_in_bytes = 8;
|
||||||
|
vupklsw(dst, src);
|
||||||
|
// Zero extend.
|
||||||
|
mov(scratch1, Operand(0xFFFFFFFF));
|
||||||
|
mtvsrd(scratch2, scratch1);
|
||||||
|
vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
|
||||||
|
vand(dst, scratch2, dst);
|
||||||
|
xvcvuxddp(dst, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I64x2UConvertI32x4Low(Simd128Register dst,
|
||||||
|
Simd128Register src,
|
||||||
|
Register scratch1,
|
||||||
|
Simd128Register scratch2) {
|
||||||
|
constexpr int lane_width_in_bytes = 8;
|
||||||
|
vupklsw(dst, src);
|
||||||
|
// Zero extend.
|
||||||
|
mov(scratch1, Operand(0xFFFFFFFF));
|
||||||
|
mtvsrd(scratch2, scratch1);
|
||||||
|
vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
|
||||||
|
vand(dst, scratch2, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I64x2UConvertI32x4High(Simd128Register dst,
|
||||||
|
Simd128Register src,
|
||||||
|
Register scratch1,
|
||||||
|
Simd128Register scratch2) {
|
||||||
|
constexpr int lane_width_in_bytes = 8;
|
||||||
|
vupkhsw(dst, src);
|
||||||
|
// Zero extend.
|
||||||
|
mov(scratch1, Operand(0xFFFFFFFF));
|
||||||
|
mtvsrd(scratch2, scratch1);
|
||||||
|
vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
|
||||||
|
vand(dst, scratch2, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I32x4UConvertI16x8Low(Simd128Register dst,
|
||||||
|
Simd128Register src,
|
||||||
|
Register scratch1,
|
||||||
|
Simd128Register scratch2) {
|
||||||
|
vupklsh(dst, src);
|
||||||
|
// Zero extend.
|
||||||
|
mov(scratch1, Operand(0xFFFF));
|
||||||
|
mtvsrd(scratch2, scratch1);
|
||||||
|
vspltw(scratch2, scratch2, Operand(1));
|
||||||
|
vand(dst, scratch2, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I32x4UConvertI16x8High(Simd128Register dst,
|
||||||
|
Simd128Register src,
|
||||||
|
Register scratch1,
|
||||||
|
Simd128Register scratch2) {
|
||||||
|
vupkhsh(dst, src);
|
||||||
|
// Zero extend.
|
||||||
|
mov(scratch1, Operand(0xFFFF));
|
||||||
|
mtvsrd(scratch2, scratch1);
|
||||||
|
vspltw(scratch2, scratch2, Operand(1));
|
||||||
|
vand(dst, scratch2, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I16x8UConvertI8x16Low(Simd128Register dst,
|
||||||
|
Simd128Register src,
|
||||||
|
Register scratch1,
|
||||||
|
Simd128Register scratch2) {
|
||||||
|
vupklsb(dst, src);
|
||||||
|
// Zero extend.
|
||||||
|
li(scratch1, Operand(0xFF));
|
||||||
|
mtvsrd(scratch2, scratch1);
|
||||||
|
vsplth(scratch2, scratch2, Operand(3));
|
||||||
|
vand(dst, scratch2, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::I16x8UConvertI8x16High(Simd128Register dst,
|
||||||
|
Simd128Register src,
|
||||||
|
Register scratch1,
|
||||||
|
Simd128Register scratch2) {
|
||||||
|
vupkhsb(dst, src);
|
||||||
|
// Zero extend.
|
||||||
|
li(scratch1, Operand(0xFF));
|
||||||
|
mtvsrd(scratch2, scratch1);
|
||||||
|
vsplth(scratch2, scratch2, Operand(3));
|
||||||
|
vand(dst, scratch2, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src,
|
||||||
|
Register scratch1, Register scratch2,
|
||||||
|
Simd128Register scratch3) {
|
||||||
|
constexpr uint8_t fxm = 0x2; // field mask.
|
||||||
|
constexpr int bit_number = 24;
|
||||||
|
li(scratch1, Operand(0));
|
||||||
|
li(scratch2, Operand(1));
|
||||||
|
// Check if both lanes are 0, if so then return false.
|
||||||
|
vxor(scratch3, scratch3, scratch3);
|
||||||
|
mtcrf(scratch1, fxm); // Clear cr6.
|
||||||
|
vcmpequd(scratch3, src, scratch3, SetRC);
|
||||||
|
isel(dst, scratch1, scratch2, bit_number);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::S128Not(Simd128Register dst, Simd128Register src) {
|
||||||
|
vnor(dst, src, src);
|
||||||
|
}
|
||||||
|
|
||||||
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
|
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
|
||||||
Register reg4, Register reg5,
|
Register reg4, Register reg5,
|
||||||
Register reg6) {
|
Register reg6) {
|
||||||
|
|
|
@ -1125,6 +1125,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||||
V(I16x8SubSatS) \
|
V(I16x8SubSatS) \
|
||||||
V(I16x8AddSatU) \
|
V(I16x8AddSatU) \
|
||||||
V(I16x8SubSatU) \
|
V(I16x8SubSatU) \
|
||||||
|
V(I16x8SConvertI32x4) \
|
||||||
|
V(I16x8UConvertI32x4) \
|
||||||
V(I8x16Add) \
|
V(I8x16Add) \
|
||||||
V(I8x16Sub) \
|
V(I8x16Sub) \
|
||||||
V(I8x16MinS) \
|
V(I8x16MinS) \
|
||||||
|
@ -1137,7 +1139,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||||
V(I8x16AddSatS) \
|
V(I8x16AddSatS) \
|
||||||
V(I8x16SubSatS) \
|
V(I8x16SubSatS) \
|
||||||
V(I8x16AddSatU) \
|
V(I8x16AddSatU) \
|
||||||
V(I8x16SubSatU)
|
V(I8x16SubSatU) \
|
||||||
|
V(I8x16SConvertI16x8) \
|
||||||
|
V(I8x16UConvertI16x8) \
|
||||||
|
V(S128And) \
|
||||||
|
V(S128Or) \
|
||||||
|
V(S128Xor) \
|
||||||
|
V(S128AndNot)
|
||||||
|
|
||||||
#define PROTOTYPE_SIMD_BINOP(name) \
|
#define PROTOTYPE_SIMD_BINOP(name) \
|
||||||
void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
|
void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
|
||||||
|
@ -1147,15 +1155,31 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||||
|
|
||||||
#define SIMD_BINOP_WITH_SCRATCH_LIST(V) \
|
#define SIMD_BINOP_WITH_SCRATCH_LIST(V) \
|
||||||
V(F64x2Ne) \
|
V(F64x2Ne) \
|
||||||
|
V(F64x2Pmin) \
|
||||||
|
V(F64x2Pmax) \
|
||||||
V(F32x4Ne) \
|
V(F32x4Ne) \
|
||||||
|
V(F32x4Pmin) \
|
||||||
|
V(F32x4Pmax) \
|
||||||
V(I64x2Ne) \
|
V(I64x2Ne) \
|
||||||
V(I64x2GeS) \
|
V(I64x2GeS) \
|
||||||
|
V(I64x2ExtMulLowI32x4S) \
|
||||||
|
V(I64x2ExtMulHighI32x4S) \
|
||||||
|
V(I64x2ExtMulLowI32x4U) \
|
||||||
|
V(I64x2ExtMulHighI32x4U) \
|
||||||
V(I32x4Ne) \
|
V(I32x4Ne) \
|
||||||
V(I32x4GeS) \
|
V(I32x4GeS) \
|
||||||
V(I32x4GeU) \
|
V(I32x4GeU) \
|
||||||
|
V(I32x4ExtMulLowI16x8S) \
|
||||||
|
V(I32x4ExtMulHighI16x8S) \
|
||||||
|
V(I32x4ExtMulLowI16x8U) \
|
||||||
|
V(I32x4ExtMulHighI16x8U) \
|
||||||
V(I16x8Ne) \
|
V(I16x8Ne) \
|
||||||
V(I16x8GeS) \
|
V(I16x8GeS) \
|
||||||
V(I16x8GeU) \
|
V(I16x8GeU) \
|
||||||
|
V(I16x8ExtMulLowI8x16S) \
|
||||||
|
V(I16x8ExtMulHighI8x16S) \
|
||||||
|
V(I16x8ExtMulLowI8x16U) \
|
||||||
|
V(I16x8ExtMulHighI8x16U) \
|
||||||
V(I8x16Ne) \
|
V(I8x16Ne) \
|
||||||
V(I8x16GeS) \
|
V(I8x16GeS) \
|
||||||
V(I8x16GeU)
|
V(I8x16GeU)
|
||||||
|
@ -1204,8 +1228,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||||
V(F32x4Floor) \
|
V(F32x4Floor) \
|
||||||
V(F32x4Trunc) \
|
V(F32x4Trunc) \
|
||||||
V(I64x2Neg) \
|
V(I64x2Neg) \
|
||||||
|
V(F64x2ConvertLowI32x4S) \
|
||||||
|
V(I64x2SConvertI32x4Low) \
|
||||||
|
V(I64x2SConvertI32x4High) \
|
||||||
V(I32x4Neg) \
|
V(I32x4Neg) \
|
||||||
V(I8x16Popcnt)
|
V(I32x4SConvertI16x8Low) \
|
||||||
|
V(I32x4SConvertI16x8High) \
|
||||||
|
V(I16x8SConvertI8x16Low) \
|
||||||
|
V(I16x8SConvertI8x16High) \
|
||||||
|
V(I8x16Popcnt) \
|
||||||
|
V(S128Not)
|
||||||
|
|
||||||
#define PROTOTYPE_SIMD_UNOP(name) \
|
#define PROTOTYPE_SIMD_UNOP(name) \
|
||||||
void name(Simd128Register dst, Simd128Register src);
|
void name(Simd128Register dst, Simd128Register src);
|
||||||
|
@ -1213,6 +1245,33 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||||
#undef PROTOTYPE_SIMD_UNOP
|
#undef PROTOTYPE_SIMD_UNOP
|
||||||
#undef SIMD_UNOP_LIST
|
#undef SIMD_UNOP_LIST
|
||||||
|
|
||||||
|
#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \
|
||||||
|
V(I64x2Abs) \
|
||||||
|
V(I32x4Abs) \
|
||||||
|
V(I16x8Abs) \
|
||||||
|
V(I16x8Neg) \
|
||||||
|
V(I8x16Abs) \
|
||||||
|
V(I8x16Neg)
|
||||||
|
|
||||||
|
#define PROTOTYPE_SIMD_UNOP_WITH_SCRATCH(name) \
|
||||||
|
void name(Simd128Register dst, Simd128Register src, Simd128Register scratch);
|
||||||
|
SIMD_UNOP_WITH_SCRATCH_LIST(PROTOTYPE_SIMD_UNOP_WITH_SCRATCH)
|
||||||
|
#undef PROTOTYPE_SIMD_UNOP_WITH_SCRATCH
|
||||||
|
#undef SIMD_UNOP_WITH_SCRATCH_LIST
|
||||||
|
|
||||||
|
#define SIMD_ALL_TRUE_LIST(V) \
|
||||||
|
V(I64x2AllTrue) \
|
||||||
|
V(I32x4AllTrue) \
|
||||||
|
V(I16x8AllTrue) \
|
||||||
|
V(I8x16AllTrue)
|
||||||
|
|
||||||
|
#define PROTOTYPE_SIMD_ALL_TRUE(name) \
|
||||||
|
void name(Register dst, Simd128Register src, Register scratch1, \
|
||||||
|
Register scratch2, Simd128Register scratch3);
|
||||||
|
SIMD_ALL_TRUE_LIST(PROTOTYPE_SIMD_ALL_TRUE)
|
||||||
|
#undef PROTOTYPE_SIMD_ALL_TRUE
|
||||||
|
#undef SIMD_ALL_TRUE_LIST
|
||||||
|
|
||||||
void LoadSimd128(Simd128Register dst, const MemOperand& mem,
|
void LoadSimd128(Simd128Register dst, const MemOperand& mem,
|
||||||
Register scratch);
|
Register scratch);
|
||||||
void StoreSimd128(Simd128Register src, const MemOperand& mem,
|
void StoreSimd128(Simd128Register src, const MemOperand& mem,
|
||||||
|
@ -1272,18 +1331,22 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||||
Simd128Register scratch1, Simd128Register scratch2);
|
Simd128Register scratch1, Simd128Register scratch2);
|
||||||
void F64x2Max(Simd128Register dst, Simd128Register src1, Simd128Register src2,
|
void F64x2Max(Simd128Register dst, Simd128Register src1, Simd128Register src2,
|
||||||
Simd128Register scratch1, Simd128Register scratch2);
|
Simd128Register scratch1, Simd128Register scratch2);
|
||||||
void I64x2Abs(Simd128Register dst, Simd128Register src,
|
void F64x2ConvertLowI32x4U(Simd128Register dst, Simd128Register src,
|
||||||
Simd128Register scratch);
|
Register scratch1, Simd128Register scratch2);
|
||||||
void I32x4Abs(Simd128Register dst, Simd128Register src,
|
void I64x2UConvertI32x4Low(Simd128Register dst, Simd128Register src,
|
||||||
Simd128Register scratch);
|
Register scratch1, Simd128Register scratch2);
|
||||||
void I16x8Abs(Simd128Register dst, Simd128Register src,
|
void I64x2UConvertI32x4High(Simd128Register dst, Simd128Register src,
|
||||||
Simd128Register scratch);
|
Register scratch1, Simd128Register scratch2);
|
||||||
void I16x8Neg(Simd128Register dst, Simd128Register src,
|
void I32x4UConvertI16x8Low(Simd128Register dst, Simd128Register src,
|
||||||
Simd128Register scratch);
|
Register scratch1, Simd128Register scratch2);
|
||||||
void I8x16Abs(Simd128Register dst, Simd128Register src,
|
void I32x4UConvertI16x8High(Simd128Register dst, Simd128Register src,
|
||||||
Simd128Register scratch);
|
Register scratch1, Simd128Register scratch2);
|
||||||
void I8x16Neg(Simd128Register dst, Simd128Register src,
|
void I16x8UConvertI8x16Low(Simd128Register dst, Simd128Register src,
|
||||||
Simd128Register scratch);
|
Register scratch1, Simd128Register scratch2);
|
||||||
|
void I16x8UConvertI8x16High(Simd128Register dst, Simd128Register src,
|
||||||
|
Register scratch1, Simd128Register scratch2);
|
||||||
|
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1,
|
||||||
|
Register scratch2, Simd128Register scratch3);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
||||||
|
|
|
@ -17,20 +17,29 @@ namespace internal {
|
||||||
V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
|
V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
|
||||||
V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
|
V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
|
||||||
|
|
||||||
|
#define ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
|
||||||
|
V(r3) V(r4) V(r5) V(r6) V(r7) \
|
||||||
|
V(r8) V(r9) V(r10) V(r14) V(r15) \
|
||||||
|
V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
|
||||||
|
V(r24) V(r25) V(r26) V(r30)
|
||||||
|
|
||||||
#if V8_EMBEDDED_CONSTANT_POOL_BOOL
|
#if V8_EMBEDDED_CONSTANT_POOL_BOOL
|
||||||
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
|
#define MAYBE_ALLOCATEABLE_CONSTANT_POOL_REGISTER(V)
|
||||||
V(r3) V(r4) V(r5) V(r6) V(r7) \
|
|
||||||
V(r8) V(r9) V(r10) V(r14) V(r15) \
|
|
||||||
V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
|
|
||||||
V(r24) V(r25) V(r26) V(r27) V(r30)
|
|
||||||
#else
|
#else
|
||||||
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
|
#define MAYBE_ALLOCATEABLE_CONSTANT_POOL_REGISTER(V) V(r28)
|
||||||
V(r3) V(r4) V(r5) V(r6) V(r7) \
|
|
||||||
V(r8) V(r9) V(r10) V(r14) V(r15) \
|
|
||||||
V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
|
|
||||||
V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||||
|
#define MAYBE_ALLOCATABLE_CAGE_REGISTERS(V)
|
||||||
|
#else
|
||||||
|
#define MAYBE_ALLOCATABLE_CAGE_REGISTERS(V) V(r27)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
|
||||||
|
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
|
||||||
|
MAYBE_ALLOCATEABLE_CONSTANT_POOL_REGISTER(V) \
|
||||||
|
MAYBE_ALLOCATABLE_CAGE_REGISTERS(V)
|
||||||
|
|
||||||
#define LOW_DOUBLE_REGISTERS(V) \
|
#define LOW_DOUBLE_REGISTERS(V) \
|
||||||
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
|
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
|
||||||
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
|
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
|
||||||
|
@ -137,6 +146,11 @@ constexpr Register no_reg = Register::no_reg();
|
||||||
constexpr Register kConstantPoolRegister = r28; // Constant pool.
|
constexpr Register kConstantPoolRegister = r28; // Constant pool.
|
||||||
constexpr Register kRootRegister = r29; // Roots array pointer.
|
constexpr Register kRootRegister = r29; // Roots array pointer.
|
||||||
constexpr Register cp = r30; // JavaScript context pointer.
|
constexpr Register cp = r30; // JavaScript context pointer.
|
||||||
|
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||||
|
constexpr Register kPtrComprCageBaseRegister = r27; // callee save
|
||||||
|
#else
|
||||||
|
constexpr Register kPtrComprCageBaseRegister = kRootRegister;
|
||||||
|
#endif
|
||||||
|
|
||||||
// Returns the number of padding slots needed for stack pointer alignment.
|
// Returns the number of padding slots needed for stack pointer alignment.
|
||||||
constexpr int ArgumentPaddingSlots(int argument_count) {
|
constexpr int ArgumentPaddingSlots(int argument_count) {
|
||||||
|
|
|
@ -4791,12 +4791,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
|
||||||
DCHECK_EQ(actual_parameter_count, a0);
|
DCHECK_EQ(actual_parameter_count, a0);
|
||||||
DCHECK_EQ(expected_parameter_count, a2);
|
DCHECK_EQ(expected_parameter_count, a2);
|
||||||
|
|
||||||
// If the expected parameter count is equal to the adaptor sentinel, no need
|
|
||||||
// to push undefined value as arguments.
|
|
||||||
if (kDontAdaptArgumentsSentinel != 0) {
|
|
||||||
Branch(®ular_invoke, eq, expected_parameter_count,
|
|
||||||
Operand(kDontAdaptArgumentsSentinel));
|
|
||||||
}
|
|
||||||
// If overapplication or if the actual argument count is equal to the
|
// If overapplication or if the actual argument count is equal to the
|
||||||
// formal parameter count, no need to push extra undefined values.
|
// formal parameter count, no need to push extra undefined values.
|
||||||
SubWord(expected_parameter_count, expected_parameter_count,
|
SubWord(expected_parameter_count, expected_parameter_count,
|
||||||
|
@ -5540,7 +5534,8 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||||
Push(scratch);
|
Push(scratch);
|
||||||
}
|
}
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
|
if (type == StackFrame::WASM || type == StackFrame::WASM_LIFTOFF_SETUP)
|
||||||
|
Push(kWasmInstanceRegister);
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1707,13 +1707,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
|
||||||
DCHECK_EQ(actual_parameter_count, r2);
|
DCHECK_EQ(actual_parameter_count, r2);
|
||||||
DCHECK_EQ(expected_parameter_count, r4);
|
DCHECK_EQ(expected_parameter_count, r4);
|
||||||
|
|
||||||
// If the expected parameter count is equal to the adaptor sentinel, no need
|
|
||||||
// to push undefined value as arguments.
|
|
||||||
if (kDontAdaptArgumentsSentinel != 0) {
|
|
||||||
CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
|
|
||||||
beq(®ular_invoke);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If overapplication or if the actual argument count is equal to the
|
// If overapplication or if the actual argument count is equal to the
|
||||||
// formal parameter count, no need to push extra undefined values.
|
// formal parameter count, no need to push extra undefined values.
|
||||||
SubS64(expected_parameter_count, expected_parameter_count,
|
SubS64(expected_parameter_count, expected_parameter_count,
|
||||||
|
|
|
@ -359,10 +359,10 @@ class TNode {
|
||||||
public:
|
public:
|
||||||
template <class U,
|
template <class U,
|
||||||
typename std::enable_if<is_subtype<U, T>::value, int>::type = 0>
|
typename std::enable_if<is_subtype<U, T>::value, int>::type = 0>
|
||||||
TNode(const TNode<U>& other) : node_(other) {
|
TNode(const TNode<U>& other) V8_NOEXCEPT : node_(other) {
|
||||||
LazyTemplateChecks();
|
LazyTemplateChecks();
|
||||||
}
|
}
|
||||||
TNode(const TNode& other) : node_(other) { LazyTemplateChecks(); }
|
TNode(const TNode& other) V8_NOEXCEPT : node_(other) { LazyTemplateChecks(); }
|
||||||
TNode() : TNode(nullptr) {}
|
TNode() : TNode(nullptr) {}
|
||||||
|
|
||||||
TNode operator=(TNode other) {
|
TNode operator=(TNode other) {
|
||||||
|
@ -375,7 +375,7 @@ class TNode {
|
||||||
|
|
||||||
static TNode UncheckedCast(compiler::Node* node) { return TNode(node); }
|
static TNode UncheckedCast(compiler::Node* node) { return TNode(node); }
|
||||||
|
|
||||||
private:
|
protected:
|
||||||
explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
|
explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
|
||||||
// These checks shouldn't be checked before TNode is actually used.
|
// These checks shouldn't be checked before TNode is actually used.
|
||||||
void LazyTemplateChecks() {
|
void LazyTemplateChecks() {
|
||||||
|
@ -385,6 +385,21 @@ class TNode {
|
||||||
compiler::Node* node_;
|
compiler::Node* node_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
|
||||||
|
// Node*. It is intended for function arguments as long as some call sites
|
||||||
|
// still use untyped Node* arguments.
|
||||||
|
// TODO(turbofan): Delete this class once transition is finished.
|
||||||
|
template <class T>
|
||||||
|
class SloppyTNode : public TNode<T> {
|
||||||
|
public:
|
||||||
|
SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit)
|
||||||
|
: TNode<T>(node) {}
|
||||||
|
template <class U, typename std::enable_if<is_subtype<U, T>::value,
|
||||||
|
int>::type = 0>
|
||||||
|
SloppyTNode(const TNode<U>& other) V8_NOEXCEPT // NOLINT(runtime/explicit)
|
||||||
|
: TNode<T>(other) {}
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
} // namespace v8
|
} // namespace v8
|
||||||
|
|
||||||
|
|
|
@ -1018,6 +1018,16 @@ void Assembler::near_jmp(intptr_t disp, RelocInfo::Mode rmode) {
|
||||||
emitl(static_cast<int32_t>(disp));
|
emitl(static_cast<int32_t>(disp));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::near_j(Condition cc, intptr_t disp, RelocInfo::Mode rmode) {
|
||||||
|
EnsureSpace ensure_space(this);
|
||||||
|
// 0000 1111 1000 tttn #32-bit disp.
|
||||||
|
emit(0x0F);
|
||||||
|
emit(0x80 | cc);
|
||||||
|
DCHECK(is_int32(disp));
|
||||||
|
if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode);
|
||||||
|
emitl(static_cast<int32_t>(disp));
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::call(Register adr) {
|
void Assembler::call(Register adr) {
|
||||||
EnsureSpace ensure_space(this);
|
EnsureSpace ensure_space(this);
|
||||||
// Opcode: FF /2 r64.
|
// Opcode: FF /2 r64.
|
||||||
|
|
|
@ -821,6 +821,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||||
static constexpr int kNearJmpInstrSize = 5;
|
static constexpr int kNearJmpInstrSize = 5;
|
||||||
void near_call(intptr_t disp, RelocInfo::Mode rmode);
|
void near_call(intptr_t disp, RelocInfo::Mode rmode);
|
||||||
void near_jmp(intptr_t disp, RelocInfo::Mode rmode);
|
void near_jmp(intptr_t disp, RelocInfo::Mode rmode);
|
||||||
|
void near_j(Condition cc, intptr_t disp, RelocInfo::Mode rmode);
|
||||||
|
|
||||||
void call(Handle<CodeT> target,
|
void call(Handle<CodeT> target,
|
||||||
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
|
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
|
||||||
|
|
|
@ -1897,10 +1897,23 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
|
||||||
void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
|
void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
|
||||||
if (source->IsSmi()) {
|
if (source->IsSmi()) {
|
||||||
Cmp(dst, Smi::cast(*source));
|
Cmp(dst, Smi::cast(*source));
|
||||||
|
} else if (root_array_available_ && options().isolate_independent_code) {
|
||||||
|
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||||
|
// non-isolate-independent code. In many cases it might be cheaper than
|
||||||
|
// embedding the relocatable value.
|
||||||
|
// TODO(v8:9706): Fix-it! This load will always uncompress the value
|
||||||
|
// even when we are loading a compressed embedded object.
|
||||||
|
IndirectLoadConstant(kScratchRegister, Handle<HeapObject>::cast(source));
|
||||||
|
cmp_tagged(dst, kScratchRegister);
|
||||||
|
} else if (COMPRESS_POINTERS_BOOL) {
|
||||||
|
EmbeddedObjectIndex index =
|
||||||
|
AddEmbeddedObject(Handle<HeapObject>::cast(source));
|
||||||
|
DCHECK(is_uint32(index));
|
||||||
|
cmpl(dst, Immediate(static_cast<int>(index),
|
||||||
|
RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
|
||||||
} else {
|
} else {
|
||||||
Move(kScratchRegister, Handle<HeapObject>::cast(source),
|
Move(kScratchRegister, Handle<HeapObject>::cast(source),
|
||||||
COMPRESS_POINTERS_BOOL ? RelocInfo::COMPRESSED_EMBEDDED_OBJECT
|
RelocInfo::FULL_EMBEDDED_OBJECT);
|
||||||
: RelocInfo::FULL_EMBEDDED_OBJECT);
|
|
||||||
cmp_tagged(dst, kScratchRegister);
|
cmp_tagged(dst, kScratchRegister);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2081,11 +2094,26 @@ void TurboAssembler::Jump(const ExternalReference& reference) {
|
||||||
|
|
||||||
void TurboAssembler::Jump(Operand op) { jmp(op); }
|
void TurboAssembler::Jump(Operand op) { jmp(op); }
|
||||||
|
|
||||||
|
void TurboAssembler::Jump(Operand op, Condition cc) {
|
||||||
|
Label skip;
|
||||||
|
j(NegateCondition(cc), &skip, Label::kNear);
|
||||||
|
Jump(op);
|
||||||
|
bind(&skip);
|
||||||
|
}
|
||||||
|
|
||||||
void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
|
void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
|
||||||
Move(kScratchRegister, destination, rmode);
|
Move(kScratchRegister, destination, rmode);
|
||||||
jmp(kScratchRegister);
|
jmp(kScratchRegister);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode,
|
||||||
|
Condition cc) {
|
||||||
|
Label skip;
|
||||||
|
j(NegateCondition(cc), &skip, Label::kNear);
|
||||||
|
Jump(destination, rmode);
|
||||||
|
bind(&skip);
|
||||||
|
}
|
||||||
|
|
||||||
void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
|
void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
|
||||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||||
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
||||||
|
@ -2104,10 +2132,7 @@ void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
|
||||||
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
||||||
Builtin builtin = Builtin::kNoBuiltinId;
|
Builtin builtin = Builtin::kNoBuiltinId;
|
||||||
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
|
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
|
||||||
Label skip;
|
TailCallBuiltin(builtin, cc);
|
||||||
j(NegateCondition(cc), &skip, Label::kNear);
|
|
||||||
TailCallBuiltin(builtin);
|
|
||||||
bind(&skip);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||||
|
@ -2217,6 +2242,27 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cc) {
|
||||||
|
ASM_CODE_COMMENT_STRING(this,
|
||||||
|
CommentForOffHeapTrampoline("tail call", builtin));
|
||||||
|
switch (options().builtin_call_jump_mode) {
|
||||||
|
case BuiltinCallJumpMode::kAbsolute:
|
||||||
|
Jump(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET, cc);
|
||||||
|
break;
|
||||||
|
case BuiltinCallJumpMode::kPCRelative:
|
||||||
|
near_j(cc, static_cast<intptr_t>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
|
||||||
|
break;
|
||||||
|
case BuiltinCallJumpMode::kIndirect:
|
||||||
|
Jump(EntryFromBuiltinAsOperand(builtin), cc);
|
||||||
|
break;
|
||||||
|
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||||
|
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||||
|
j(cc, code, RelocInfo::CODE_TARGET);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||||
Register code_object) {
|
Register code_object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
|
@ -2297,12 +2343,10 @@ void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
|
||||||
Register destination, Register code_data_container_object) {
|
Register destination, Register code_data_container_object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
|
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
|
||||||
// Given the fields layout we can read the Code reference as a full word.
|
// Compute the Code object pointer from the code entry point.
|
||||||
static_assert(!V8_EXTERNAL_CODE_SPACE_BOOL ||
|
|
||||||
(CodeDataContainer::kCodeCageBaseUpper32BitsOffset ==
|
|
||||||
CodeDataContainer::kCodeOffset + kTaggedSize));
|
|
||||||
movq(destination, FieldOperand(code_data_container_object,
|
movq(destination, FieldOperand(code_data_container_object,
|
||||||
CodeDataContainer::kCodeOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
|
subq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallCodeDataContainerObject(
|
void TurboAssembler::CallCodeDataContainerObject(
|
||||||
|
@ -2884,7 +2928,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||||
}
|
}
|
||||||
|
|
||||||
Label done;
|
Label done;
|
||||||
InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
|
InvokePrologue(expected_parameter_count, actual_parameter_count, type);
|
||||||
// We call indirectly through the code field in the function to
|
// We call indirectly through the code field in the function to
|
||||||
// allow recompilation to take effect without changing any of the
|
// allow recompilation to take effect without changing any of the
|
||||||
// call sites.
|
// call sites.
|
||||||
|
@ -2949,19 +2993,13 @@ void MacroAssembler::StackOverflowCheck(
|
||||||
|
|
||||||
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
|
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
|
||||||
Register actual_parameter_count,
|
Register actual_parameter_count,
|
||||||
Label* done, InvokeType type) {
|
InvokeType type) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (expected_parameter_count == actual_parameter_count) {
|
if (expected_parameter_count == actual_parameter_count) {
|
||||||
Move(rax, actual_parameter_count);
|
Move(rax, actual_parameter_count);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
Label regular_invoke;
|
Label regular_invoke;
|
||||||
// If the expected parameter count is equal to the adaptor sentinel, no need
|
|
||||||
// to push undefined value as arguments.
|
|
||||||
if (kDontAdaptArgumentsSentinel != 0) {
|
|
||||||
cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
|
|
||||||
j(equal, ®ular_invoke, Label::kFar);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If overapplication or if the actual argument count is equal to the
|
// If overapplication or if the actual argument count is equal to the
|
||||||
// formal parameter count, no need to push extra undefined values.
|
// formal parameter count, no need to push extra undefined values.
|
||||||
|
|
|
@ -390,6 +390,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
||||||
void CallBuiltinByIndex(Register builtin_index);
|
void CallBuiltinByIndex(Register builtin_index);
|
||||||
void CallBuiltin(Builtin builtin);
|
void CallBuiltin(Builtin builtin);
|
||||||
void TailCallBuiltin(Builtin builtin);
|
void TailCallBuiltin(Builtin builtin);
|
||||||
|
void TailCallBuiltin(Builtin builtin, Condition cc);
|
||||||
|
|
||||||
void LoadCodeObjectEntry(Register destination, Register code_object);
|
void LoadCodeObjectEntry(Register destination, Register code_object);
|
||||||
void CallCodeObject(Register code_object);
|
void CallCodeObject(Register code_object);
|
||||||
|
@ -418,8 +419,10 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
||||||
void CodeDataContainerFromCodeT(Register destination, Register codet);
|
void CodeDataContainerFromCodeT(Register destination, Register codet);
|
||||||
|
|
||||||
void Jump(Address destination, RelocInfo::Mode rmode);
|
void Jump(Address destination, RelocInfo::Mode rmode);
|
||||||
|
void Jump(Address destination, RelocInfo::Mode rmode, Condition cc);
|
||||||
void Jump(const ExternalReference& reference);
|
void Jump(const ExternalReference& reference);
|
||||||
void Jump(Operand op);
|
void Jump(Operand op);
|
||||||
|
void Jump(Operand op, Condition cc);
|
||||||
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode);
|
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode);
|
||||||
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode, Condition cc);
|
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode, Condition cc);
|
||||||
|
|
||||||
|
@ -943,8 +946,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||||
private:
|
private:
|
||||||
// Helper functions for generating invokes.
|
// Helper functions for generating invokes.
|
||||||
void InvokePrologue(Register expected_parameter_count,
|
void InvokePrologue(Register expected_parameter_count,
|
||||||
Register actual_parameter_count, Label* done,
|
Register actual_parameter_count, InvokeType type);
|
||||||
InvokeType type);
|
|
||||||
|
|
||||||
void EnterExitFramePrologue(Register saved_rax_reg,
|
void EnterExitFramePrologue(Register saved_rax_reg,
|
||||||
StackFrame::Type frame_type);
|
StackFrame::Type frame_type);
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
|
|
||||||
#ifdef ENABLE_SLOW_DCHECKS
|
#ifdef ENABLE_SLOW_DCHECKS
|
||||||
#define SLOW_DCHECK(condition) \
|
#define SLOW_DCHECK(condition) \
|
||||||
CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
|
CHECK(!v8::internal::v8_flags.enable_slow_asserts || (condition))
|
||||||
#define SLOW_DCHECK_IMPLIES(lhs, rhs) SLOW_DCHECK(!(lhs) || (rhs))
|
#define SLOW_DCHECK_IMPLIES(lhs, rhs) SLOW_DCHECK(!(lhs) || (rhs))
|
||||||
#else
|
#else
|
||||||
#define SLOW_DCHECK(condition) ((void)0)
|
#define SLOW_DCHECK(condition) ((void)0)
|
||||||
|
|
|
@ -16,13 +16,13 @@ namespace internal {
|
||||||
|
|
||||||
RwxMemoryWriteScope::RwxMemoryWriteScope(const char* comment) {
|
RwxMemoryWriteScope::RwxMemoryWriteScope(const char* comment) {
|
||||||
DCHECK(is_key_permissions_initialized_for_current_thread());
|
DCHECK(is_key_permissions_initialized_for_current_thread());
|
||||||
if (!FLAG_jitless) {
|
if (!v8_flags.jitless) {
|
||||||
SetWritable();
|
SetWritable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RwxMemoryWriteScope::~RwxMemoryWriteScope() {
|
RwxMemoryWriteScope::~RwxMemoryWriteScope() {
|
||||||
if (!FLAG_jitless) {
|
if (!v8_flags.jitless) {
|
||||||
SetExecutable();
|
SetExecutable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue