File: | out/../deps/v8/src/wasm/baseline/liftoff-compiler.cc |
Warning: | line 1603, column 35 Called function pointer is null (null dereference) |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // Copyright 2017 the V8 project authors. All rights reserved. | |||
2 | // Use of this source code is governed by a BSD-style license that can be | |||
3 | // found in the LICENSE file. | |||
4 | ||||
5 | #include "src/wasm/baseline/liftoff-compiler.h" | |||
6 | ||||
7 | #include "src/base/enum-set.h" | |||
8 | #include "src/base/optional.h" | |||
9 | #include "src/base/platform/wrappers.h" | |||
10 | #include "src/codegen/assembler-inl.h" | |||
11 | // TODO(clemensb): Remove dependences on compiler stuff. | |||
12 | #include "src/codegen/external-reference.h" | |||
13 | #include "src/codegen/interface-descriptors-inl.h" | |||
14 | #include "src/codegen/machine-type.h" | |||
15 | #include "src/codegen/macro-assembler-inl.h" | |||
16 | #include "src/compiler/linkage.h" | |||
17 | #include "src/compiler/wasm-compiler.h" | |||
18 | #include "src/logging/counters.h" | |||
19 | #include "src/logging/log.h" | |||
20 | #include "src/objects/smi.h" | |||
21 | #include "src/tracing/trace-event.h" | |||
22 | #include "src/utils/ostreams.h" | |||
23 | #include "src/utils/utils.h" | |||
24 | #include "src/wasm/baseline/liftoff-assembler.h" | |||
25 | #include "src/wasm/baseline/liftoff-register.h" | |||
26 | #include "src/wasm/function-body-decoder-impl.h" | |||
27 | #include "src/wasm/function-compiler.h" | |||
28 | #include "src/wasm/memory-tracing.h" | |||
29 | #include "src/wasm/object-access.h" | |||
30 | #include "src/wasm/simd-shuffle.h" | |||
31 | #include "src/wasm/wasm-debug.h" | |||
32 | #include "src/wasm/wasm-engine.h" | |||
33 | #include "src/wasm/wasm-linkage.h" | |||
34 | #include "src/wasm/wasm-objects.h" | |||
35 | #include "src/wasm/wasm-opcodes-inl.h" | |||
36 | ||||
37 | namespace v8 { | |||
38 | namespace internal { | |||
39 | namespace wasm { | |||
40 | ||||
41 | constexpr auto kRegister = LiftoffAssembler::VarState::kRegister; | |||
42 | constexpr auto kIntConst = LiftoffAssembler::VarState::kIntConst; | |||
43 | constexpr auto kStack = LiftoffAssembler::VarState::kStack; | |||
44 | ||||
45 | namespace { | |||
46 | ||||
47 | #define __ asm_. | |||
48 | ||||
49 | #define TRACE(...) \ | |||
50 | do { \ | |||
51 | if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \ | |||
52 | } while (false) | |||
53 | ||||
54 | #define WASM_INSTANCE_OBJECT_FIELD_OFFSET(name) \ | |||
55 | ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset) | |||
56 | ||||
57 | template <int expected_size, int actual_size> | |||
58 | struct assert_field_size { | |||
59 | static_assert(expected_size == actual_size, | |||
60 | "field in WasmInstance does not have the expected size"); | |||
61 | static constexpr int size = actual_size; | |||
62 | }; | |||
63 | ||||
64 | #define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \ | |||
65 | FIELD_SIZE(WasmInstanceObject::k##name##Offset)(WasmInstanceObject::k##name##OffsetEnd + 1 - WasmInstanceObject ::k##name##Offset) | |||
66 | ||||
67 | #define LOAD_INSTANCE_FIELD(dst, name, load_size, pinned) \ | |||
68 | __ LoadFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \ | |||
69 | WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \ | |||
70 | assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \ | |||
71 | load_size>::size); | |||
72 | ||||
73 | #define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name, pinned) \ | |||
74 | static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \ | |||
75 | "field in WasmInstance does not have the expected size"); \ | |||
76 | __ LoadTaggedPointerFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \ | |||
77 | WASM_INSTANCE_OBJECT_FIELD_OFFSET(name)); | |||
78 | ||||
79 | #ifdef V8_CODE_COMMENTS | |||
80 | #define CODE_COMMENT(str) \ | |||
81 | do { \ | |||
82 | __ RecordComment(str); \ | |||
83 | } while (false) | |||
84 | #else | |||
85 | #define CODE_COMMENT(str) ((void)0) | |||
86 | #endif | |||
87 | ||||
88 | constexpr LoadType::LoadTypeValue kPointerLoadType = | |||
89 | kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load; | |||
90 | ||||
91 | constexpr ValueKind kPointerKind = LiftoffAssembler::kPointerKind; | |||
92 | constexpr ValueKind kSmiKind = LiftoffAssembler::kSmiKind; | |||
93 | constexpr ValueKind kTaggedKind = LiftoffAssembler::kTaggedKind; | |||
94 | ||||
95 | // Used to construct fixed-size signatures: MakeSig::Returns(...).Params(...); | |||
96 | using MakeSig = FixedSizeSignature<ValueKind>; | |||
97 | ||||
98 | #if V8_TARGET_ARCH_ARM64 | |||
99 | // On ARM64, the Assembler keeps track of pointers to Labels to resolve | |||
100 | // branches to distant targets. Moving labels would confuse the Assembler, | |||
101 | // thus store the label on the heap and keep a unique_ptr. | |||
102 | class MovableLabel { | |||
103 | public: | |||
104 | MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel)MovableLabel(MovableLabel&&) noexcept = default; MovableLabel & operator=(MovableLabel&&) noexcept = default; MovableLabel (const MovableLabel&) = delete; MovableLabel& operator =(const MovableLabel&) = delete; | |||
105 | MovableLabel() : label_(new Label()) {} | |||
106 | ||||
107 | Label* get() { return label_.get(); } | |||
108 | ||||
109 | private: | |||
110 | std::unique_ptr<Label> label_; | |||
111 | }; | |||
112 | #else | |||
113 | // On all other platforms, just store the Label directly. | |||
114 | class MovableLabel { | |||
115 | public: | |||
116 | MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel)MovableLabel() = default; MovableLabel(MovableLabel&& ) noexcept = default; MovableLabel& operator=(MovableLabel &&) noexcept = default; MovableLabel(const MovableLabel &) = delete; MovableLabel& operator=(const MovableLabel &) = delete; | |||
117 | ||||
118 | Label* get() { return &label_; } | |||
119 | ||||
120 | private: | |||
121 | Label label_; | |||
122 | }; | |||
123 | #endif | |||
124 | ||||
125 | compiler::CallDescriptor* GetLoweredCallDescriptor( | |||
126 | Zone* zone, compiler::CallDescriptor* call_desc) { | |||
127 | return kSystemPointerSize == 4 | |||
128 | ? compiler::GetI32WasmCallDescriptor(zone, call_desc) | |||
129 | : call_desc; | |||
130 | } | |||
131 | ||||
132 | constexpr LiftoffRegList GetGpParamRegisters() { | |||
133 | LiftoffRegList registers; | |||
134 | for (auto reg : kGpParamRegisters) registers.set(reg); | |||
135 | return registers; | |||
136 | } | |||
137 | ||||
138 | constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) { | |||
139 | switch (opcode) { | |||
140 | case kExprI32Eq: | |||
141 | return kEqual; | |||
142 | case kExprI32Ne: | |||
143 | return kUnequal; | |||
144 | case kExprI32LtS: | |||
145 | return kSignedLessThan; | |||
146 | case kExprI32LtU: | |||
147 | return kUnsignedLessThan; | |||
148 | case kExprI32GtS: | |||
149 | return kSignedGreaterThan; | |||
150 | case kExprI32GtU: | |||
151 | return kUnsignedGreaterThan; | |||
152 | case kExprI32LeS: | |||
153 | return kSignedLessEqual; | |||
154 | case kExprI32LeU: | |||
155 | return kUnsignedLessEqual; | |||
156 | case kExprI32GeS: | |||
157 | return kSignedGreaterEqual; | |||
158 | case kExprI32GeU: | |||
159 | return kUnsignedGreaterEqual; | |||
160 | default: | |||
161 | UNREACHABLE()V8_Fatal("unreachable code"); | |||
162 | } | |||
163 | } | |||
164 | ||||
165 | // Builds a {DebugSideTable}. | |||
166 | class DebugSideTableBuilder { | |||
167 | using Entry = DebugSideTable::Entry; | |||
168 | using Value = Entry::Value; | |||
169 | ||||
170 | public: | |||
171 | enum AssumeSpilling { | |||
172 | // All register values will be spilled before the pc covered by the debug | |||
173 | // side table entry. Register slots will be marked as stack slots in the | |||
174 | // generated debug side table entry. | |||
175 | kAssumeSpilling, | |||
176 | // Register slots will be written out as they are. | |||
177 | kAllowRegisters, | |||
178 | // Register slots cannot appear since we already spilled. | |||
179 | kDidSpill | |||
180 | }; | |||
181 | ||||
182 | class EntryBuilder { | |||
183 | public: | |||
184 | explicit EntryBuilder(int pc_offset, int stack_height, | |||
185 | std::vector<Value> changed_values) | |||
186 | : pc_offset_(pc_offset), | |||
187 | stack_height_(stack_height), | |||
188 | changed_values_(std::move(changed_values)) {} | |||
189 | ||||
190 | Entry ToTableEntry() { | |||
191 | return Entry{pc_offset_, stack_height_, std::move(changed_values_)}; | |||
192 | } | |||
193 | ||||
194 | void MinimizeBasedOnPreviousStack(const std::vector<Value>& last_values) { | |||
195 | auto dst = changed_values_.begin(); | |||
196 | auto end = changed_values_.end(); | |||
197 | for (auto src = dst; src != end; ++src) { | |||
198 | if (src->index < static_cast<int>(last_values.size()) && | |||
199 | *src == last_values[src->index]) { | |||
200 | continue; | |||
201 | } | |||
202 | if (dst != src) *dst = *src; | |||
203 | ++dst; | |||
204 | } | |||
205 | changed_values_.erase(dst, end); | |||
206 | } | |||
207 | ||||
208 | int pc_offset() const { return pc_offset_; } | |||
209 | void set_pc_offset(int new_pc_offset) { pc_offset_ = new_pc_offset; } | |||
210 | ||||
211 | private: | |||
212 | int pc_offset_; | |||
213 | int stack_height_; | |||
214 | std::vector<Value> changed_values_; | |||
215 | }; | |||
216 | ||||
217 | // Adds a new entry in regular code. | |||
218 | void NewEntry(int pc_offset, | |||
219 | base::Vector<DebugSideTable::Entry::Value> values) { | |||
220 | entries_.emplace_back(pc_offset, static_cast<int>(values.size()), | |||
221 | GetChangedStackValues(last_values_, values)); | |||
222 | } | |||
223 | ||||
224 | // Adds a new entry for OOL code, and returns a pointer to a builder for | |||
225 | // modifying that entry. | |||
226 | EntryBuilder* NewOOLEntry(base::Vector<DebugSideTable::Entry::Value> values) { | |||
227 | constexpr int kNoPcOffsetYet = -1; | |||
228 | ool_entries_.emplace_back(kNoPcOffsetYet, static_cast<int>(values.size()), | |||
229 | GetChangedStackValues(last_ool_values_, values)); | |||
230 | return &ool_entries_.back(); | |||
231 | } | |||
232 | ||||
233 | void SetNumLocals(int num_locals) { | |||
234 | DCHECK_EQ(-1, num_locals_)((void) 0); | |||
235 | DCHECK_LE(0, num_locals)((void) 0); | |||
236 | num_locals_ = num_locals; | |||
237 | } | |||
238 | ||||
239 | std::unique_ptr<DebugSideTable> GenerateDebugSideTable() { | |||
240 | DCHECK_LE(0, num_locals_)((void) 0); | |||
241 | ||||
242 | // Connect {entries_} and {ool_entries_} by removing redundant stack | |||
243 | // information from the first {ool_entries_} entry (based on | |||
244 | // {last_values_}). | |||
245 | if (!entries_.empty() && !ool_entries_.empty()) { | |||
246 | ool_entries_.front().MinimizeBasedOnPreviousStack(last_values_); | |||
247 | } | |||
248 | ||||
249 | std::vector<Entry> entries; | |||
250 | entries.reserve(entries_.size() + ool_entries_.size()); | |||
251 | for (auto& entry : entries_) entries.push_back(entry.ToTableEntry()); | |||
252 | for (auto& entry : ool_entries_) entries.push_back(entry.ToTableEntry()); | |||
253 | DCHECK(std::is_sorted(((void) 0) | |||
254 | entries.begin(), entries.end(),((void) 0) | |||
255 | [](Entry& a, Entry& b) { return a.pc_offset() < b.pc_offset(); }))((void) 0); | |||
256 | return std::make_unique<DebugSideTable>(num_locals_, std::move(entries)); | |||
257 | } | |||
258 | ||||
259 | private: | |||
260 | static std::vector<Value> GetChangedStackValues( | |||
261 | std::vector<Value>& last_values, | |||
262 | base::Vector<DebugSideTable::Entry::Value> values) { | |||
263 | std::vector<Value> changed_values; | |||
264 | int old_stack_size = static_cast<int>(last_values.size()); | |||
265 | last_values.resize(values.size()); | |||
266 | ||||
267 | int index = 0; | |||
268 | for (const auto& value : values) { | |||
269 | if (index >= old_stack_size || last_values[index] != value) { | |||
270 | changed_values.push_back(value); | |||
271 | last_values[index] = value; | |||
272 | } | |||
273 | ++index; | |||
274 | } | |||
275 | return changed_values; | |||
276 | } | |||
277 | ||||
278 | int num_locals_ = -1; | |||
279 | // Keep a snapshot of the stack of the last entry, to generate a delta to the | |||
280 | // next entry. | |||
281 | std::vector<Value> last_values_; | |||
282 | std::vector<EntryBuilder> entries_; | |||
283 | // Keep OOL code entries separate so we can do proper delta-encoding (more | |||
284 | // entries might be added between the existing {entries_} and the | |||
285 | // {ool_entries_}). Store the entries in a list so the pointer is not | |||
286 | // invalidated by adding more entries. | |||
287 | std::vector<Value> last_ool_values_; | |||
288 | std::list<EntryBuilder> ool_entries_; | |||
289 | }; | |||
290 | ||||
291 | void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail, | |||
292 | const CompilationEnv* env) { | |||
293 | // Decode errors are ok. | |||
294 | if (reason == kDecodeError) return; | |||
295 | ||||
296 | // --liftoff-only ensures that tests actually exercise the Liftoff path | |||
297 | // without bailing out. We also fail for missing CPU support, to avoid | |||
298 | // running any TurboFan code under --liftoff-only. | |||
299 | if (FLAG_liftoff_only) { | |||
300 | FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s", detail)V8_Fatal("--liftoff-only: treating bailout as fatal error. Cause: %s" , detail); | |||
301 | } | |||
302 | ||||
303 | // Missing CPU features are generally OK, except with --liftoff-only. | |||
304 | if (reason == kMissingCPUFeature) return; | |||
305 | ||||
306 | // If --enable-testing-opcode-in-wasm is set, we are expected to bailout with | |||
307 | // "testing opcode". | |||
308 | if (FLAG_enable_testing_opcode_in_wasm && | |||
309 | strcmp(detail, "testing opcode") == 0) { | |||
310 | return; | |||
311 | } | |||
312 | ||||
313 | // Some externally maintained architectures don't fully implement Liftoff yet. | |||
314 | #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \ | |||
315 | V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64 | |||
316 | return; | |||
317 | #endif | |||
318 | ||||
319 | #if V8_TARGET_ARCH_ARM | |||
320 | // Allow bailout for missing ARMv7 support. | |||
321 | if (!CpuFeatures::IsSupported(ARMv7) && reason == kUnsupportedArchitecture) { | |||
322 | return; | |||
323 | } | |||
324 | #endif | |||
325 | ||||
326 | #define LIST_FEATURE(name, ...) kFeature_##name, | |||
327 | constexpr WasmFeatures kExperimentalFeatures{ | |||
328 | FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)LIST_FEATURE(compilation_hints, "compilation hints section", false ) LIST_FEATURE(gc, "garbage collection", false) LIST_FEATURE( nn_locals, "allow non-defaultable/non-nullable locals, validated with 'until end of " "block' semantics", false) LIST_FEATURE(unsafe_nn_locals, "allow non-defaultable/non-nullable locals, no validation" , false) LIST_FEATURE(assume_ref_cast_succeeds, "assume ref.cast always succeeds and skip the related type check " "(unsafe)", false) LIST_FEATURE(skip_null_checks, "skip null checks for call.ref and array and struct operations (unsafe)" , false) LIST_FEATURE(skip_bounds_checks, "skip array bounds checks (unsafe)" , false) LIST_FEATURE(typed_funcref, "typed function references" , false) LIST_FEATURE(memory64, "memory64", false) LIST_FEATURE (relaxed_simd, "relaxed simd", false) LIST_FEATURE(branch_hinting , "branch hinting", false) LIST_FEATURE(stack_switching, "stack switching" , false) LIST_FEATURE(extended_const, "extended constant expressions" , false)}; | |||
329 | #undef LIST_FEATURE | |||
330 | ||||
331 | // Bailout is allowed if any experimental feature is enabled. | |||
332 | if (env->enabled_features.contains_any(kExperimentalFeatures)) return; | |||
333 | ||||
334 | // Otherwise, bailout is not allowed. | |||
335 | FATAL("Liftoff bailout should not happen. Cause: %s\n", detail)V8_Fatal("Liftoff bailout should not happen. Cause: %s\n", detail ); | |||
336 | } | |||
337 | ||||
338 | class LiftoffCompiler { | |||
339 | public: | |||
340 | // TODO(clemensb): Make this a template parameter. | |||
341 | static constexpr Decoder::ValidateFlag validate = Decoder::kBooleanValidation; | |||
342 | ||||
343 | using Value = ValueBase<validate>; | |||
344 | ||||
345 | struct ElseState { | |||
346 | MovableLabel label; | |||
347 | LiftoffAssembler::CacheState state; | |||
348 | }; | |||
349 | ||||
350 | struct TryInfo { | |||
351 | TryInfo() = default; | |||
352 | LiftoffAssembler::CacheState catch_state; | |||
353 | Label catch_label; | |||
354 | bool catch_reached = false; | |||
355 | bool in_handler = false; | |||
356 | }; | |||
357 | ||||
358 | struct Control : public ControlBase<Value, validate> { | |||
359 | std::unique_ptr<ElseState> else_state; | |||
360 | LiftoffAssembler::CacheState label_state; | |||
361 | MovableLabel label; | |||
362 | std::unique_ptr<TryInfo> try_info; | |||
363 | // Number of exceptions on the stack below this control. | |||
364 | int num_exceptions = 0; | |||
365 | ||||
366 | MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control)Control(Control&&) noexcept = default; Control& operator =(Control&&) noexcept = default; Control(const Control &) = delete; Control& operator=(const Control&) = delete; | |||
367 | ||||
368 | template <typename... Args> | |||
369 | explicit Control(Args&&... args) V8_NOEXCEPTnoexcept | |||
370 | : ControlBase(std::forward<Args>(args)...) {} | |||
371 | }; | |||
372 | ||||
373 | using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>; | |||
374 | using ValueKindSig = LiftoffAssembler::ValueKindSig; | |||
375 | ||||
376 | class MostlySmallValueKindSig : public Signature<ValueKind> { | |||
377 | public: | |||
378 | MostlySmallValueKindSig(Zone* zone, const FunctionSig* sig) | |||
379 | : Signature<ValueKind>(sig->return_count(), sig->parameter_count(), | |||
380 | MakeKinds(inline_storage_, zone, sig)) {} | |||
381 | ||||
382 | private: | |||
383 | static constexpr size_t kInlineStorage = 8; | |||
384 | ||||
385 | static ValueKind* MakeKinds(ValueKind* storage, Zone* zone, | |||
386 | const FunctionSig* sig) { | |||
387 | const size_t size = sig->parameter_count() + sig->return_count(); | |||
388 | if (V8_UNLIKELY(size > kInlineStorage)(__builtin_expect(!!(size > kInlineStorage), 0))) { | |||
389 | storage = zone->NewArray<ValueKind>(size); | |||
390 | } | |||
391 | std::transform(sig->all().begin(), sig->all().end(), storage, | |||
392 | [](ValueType type) { return type.kind(); }); | |||
393 | return storage; | |||
394 | } | |||
395 | ||||
396 | ValueKind inline_storage_[kInlineStorage]; | |||
397 | }; | |||
398 | ||||
399 | // For debugging, we need to spill registers before a trap or a stack check to | |||
400 | // be able to inspect them. | |||
401 | struct SpilledRegistersForInspection : public ZoneObject { | |||
402 | struct Entry { | |||
403 | int offset; | |||
404 | LiftoffRegister reg; | |||
405 | ValueKind kind; | |||
406 | }; | |||
407 | ZoneVector<Entry> entries; | |||
408 | ||||
409 | explicit SpilledRegistersForInspection(Zone* zone) : entries(zone) {} | |||
410 | }; | |||
411 | ||||
412 | struct OutOfLineSafepointInfo { | |||
413 | ZoneVector<int> slots; | |||
414 | LiftoffRegList spills; | |||
415 | ||||
416 | explicit OutOfLineSafepointInfo(Zone* zone) : slots(zone) {} | |||
417 | }; | |||
418 | ||||
419 | struct OutOfLineCode { | |||
420 | MovableLabel label; | |||
421 | MovableLabel continuation; | |||
422 | WasmCode::RuntimeStubId stub; | |||
423 | WasmCodePosition position; | |||
424 | LiftoffRegList regs_to_save; | |||
425 | Register cached_instance; | |||
426 | OutOfLineSafepointInfo* safepoint_info; | |||
427 | uint32_t pc; // for trap handler. | |||
428 | // These two pointers will only be used for debug code: | |||
429 | SpilledRegistersForInspection* spilled_registers; | |||
430 | DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder; | |||
431 | ||||
432 | // Named constructors: | |||
433 | static OutOfLineCode Trap( | |||
434 | WasmCode::RuntimeStubId s, WasmCodePosition pos, | |||
435 | SpilledRegistersForInspection* spilled_registers, | |||
436 | OutOfLineSafepointInfo* safepoint_info, uint32_t pc, | |||
437 | DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) { | |||
438 | DCHECK_LT(0, pos)((void) 0); | |||
439 | return { | |||
440 | {}, // label | |||
441 | {}, // continuation | |||
442 | s, // stub | |||
443 | pos, // position | |||
444 | {}, // regs_to_save | |||
445 | no_reg, // cached_instance | |||
446 | safepoint_info, // safepoint_info | |||
447 | pc, // pc | |||
448 | spilled_registers, // spilled_registers | |||
449 | debug_sidetable_entry_builder // debug_side_table_entry_builder | |||
450 | }; | |||
451 | } | |||
452 | static OutOfLineCode StackCheck( | |||
453 | WasmCodePosition pos, LiftoffRegList regs_to_save, | |||
454 | Register cached_instance, SpilledRegistersForInspection* spilled_regs, | |||
455 | OutOfLineSafepointInfo* safepoint_info, | |||
456 | DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) { | |||
457 | return { | |||
458 | {}, // label | |||
459 | {}, // continuation | |||
460 | WasmCode::kWasmStackGuard, // stub | |||
461 | pos, // position | |||
462 | regs_to_save, // regs_to_save | |||
463 | cached_instance, // cached_instance | |||
464 | safepoint_info, // safepoint_info | |||
465 | 0, // pc | |||
466 | spilled_regs, // spilled_registers | |||
467 | debug_sidetable_entry_builder // debug_side_table_entry_builder | |||
468 | }; | |||
469 | } | |||
470 | static OutOfLineCode TierupCheck( | |||
471 | WasmCodePosition pos, LiftoffRegList regs_to_save, | |||
472 | Register cached_instance, SpilledRegistersForInspection* spilled_regs, | |||
473 | OutOfLineSafepointInfo* safepoint_info, | |||
474 | DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) { | |||
475 | return { | |||
476 | {}, // label | |||
477 | {}, // continuation, | |||
478 | WasmCode::kWasmTriggerTierUp, // stub | |||
479 | pos, // position | |||
480 | regs_to_save, // regs_to_save | |||
481 | cached_instance, // cached_instance | |||
482 | safepoint_info, // safepoint_info | |||
483 | 0, // pc | |||
484 | spilled_regs, // spilled_registers | |||
485 | debug_sidetable_entry_builder // debug_side_table_entry_builder | |||
486 | }; | |||
487 | } | |||
488 | }; | |||
489 | ||||
490 | LiftoffCompiler(compiler::CallDescriptor* call_descriptor, | |||
491 | CompilationEnv* env, Zone* compilation_zone, | |||
492 | std::unique_ptr<AssemblerBuffer> buffer, | |||
493 | DebugSideTableBuilder* debug_sidetable_builder, | |||
494 | ForDebugging for_debugging, int func_index, | |||
495 | base::Vector<const int> breakpoints = {}, | |||
496 | int dead_breakpoint = 0, int32_t* max_steps = nullptr, | |||
497 | int32_t* nondeterminism = nullptr) | |||
498 | : asm_(std::move(buffer)), | |||
499 | descriptor_( | |||
500 | GetLoweredCallDescriptor(compilation_zone, call_descriptor)), | |||
501 | env_(env), | |||
502 | debug_sidetable_builder_(debug_sidetable_builder), | |||
503 | for_debugging_(for_debugging), | |||
504 | func_index_(func_index), | |||
505 | out_of_line_code_(compilation_zone), | |||
506 | source_position_table_builder_(compilation_zone), | |||
507 | protected_instructions_(compilation_zone), | |||
508 | compilation_zone_(compilation_zone), | |||
509 | safepoint_table_builder_(compilation_zone_), | |||
510 | next_breakpoint_ptr_(breakpoints.begin()), | |||
511 | next_breakpoint_end_(breakpoints.end()), | |||
512 | dead_breakpoint_(dead_breakpoint), | |||
513 | handlers_(compilation_zone), | |||
514 | max_steps_(max_steps), | |||
515 | nondeterminism_(nondeterminism) { | |||
516 | if (breakpoints.empty()) { | |||
517 | next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr; | |||
518 | } | |||
519 | } | |||
520 | ||||
521 | bool did_bailout() const { return bailout_reason_ != kSuccess; } | |||
522 | LiftoffBailoutReason bailout_reason() const { return bailout_reason_; } | |||
523 | ||||
524 | void GetCode(CodeDesc* desc) { | |||
525 | asm_.GetCode(nullptr, desc, &safepoint_table_builder_, | |||
526 | handler_table_offset_); | |||
527 | } | |||
528 | ||||
529 | std::unique_ptr<AssemblerBuffer> ReleaseBuffer() { | |||
530 | return asm_.ReleaseBuffer(); | |||
531 | } | |||
532 | ||||
533 | base::OwnedVector<uint8_t> GetSourcePositionTable() { | |||
534 | return source_position_table_builder_.ToSourcePositionTableVector(); | |||
535 | } | |||
536 | ||||
537 | base::OwnedVector<uint8_t> GetProtectedInstructionsData() const { | |||
538 | return base::OwnedVector<uint8_t>::Of(base::Vector<const uint8_t>::cast( | |||
539 | base::VectorOf(protected_instructions_))); | |||
540 | } | |||
541 | ||||
542 | uint32_t GetTotalFrameSlotCountForGC() const { | |||
543 | return __ GetTotalFrameSlotCountForGC(); | |||
544 | } | |||
545 | ||||
546 | int GetFeedbackVectorSlots() const { | |||
547 | // The number of instructions is capped by max function size. | |||
548 | STATIC_ASSERT(kV8MaxWasmFunctionSize < std::numeric_limits<int>::max())static_assert(kV8MaxWasmFunctionSize < std::numeric_limits <int>::max(), "kV8MaxWasmFunctionSize < std::numeric_limits<int>::max()" ); | |||
549 | return static_cast<int>(num_call_instructions_) * 2; | |||
550 | } | |||
551 | ||||
552 | void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason, | |||
553 | const char* detail) { | |||
554 | DCHECK_NE(kSuccess, reason)((void) 0); | |||
555 | if (did_bailout()) return; | |||
556 | bailout_reason_ = reason; | |||
557 | TRACE("unsupported: %s\n", detail); | |||
558 | decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s", | |||
559 | detail); | |||
560 | UnuseLabels(decoder); | |||
561 | CheckBailoutAllowed(reason, detail, env_); | |||
562 | } | |||
563 | ||||
564 | bool DidAssemblerBailout(FullDecoder* decoder) { | |||
565 | if (decoder->failed() || !__ did_bailout()) return false; | |||
566 | unsupported(decoder, __ bailout_reason(), __ bailout_detail()); | |||
567 | return true; | |||
568 | } | |||
569 | ||||
570 | V8_INLINEinline __attribute__((always_inline)) bool CheckSupportedType(FullDecoder* decoder, ValueKind kind, | |||
571 | const char* context) { | |||
572 | if (V8_LIKELY(supported_types_.contains(kind))(__builtin_expect(!!(supported_types_.contains(kind)), 1))) return true; | |||
573 | return MaybeBailoutForUnsupportedType(decoder, kind, context); | |||
574 | } | |||
575 | ||||
576 | V8_NOINLINE__attribute__((noinline)) bool MaybeBailoutForUnsupportedType(FullDecoder* decoder, | |||
577 | ValueKind kind, | |||
578 | const char* context) { | |||
579 | DCHECK(!supported_types_.contains(kind))((void) 0); | |||
580 | ||||
581 | // Lazily update {supported_types_}; then check again. | |||
582 | if (CpuFeatures::SupportsWasmSimd128()) supported_types_.Add(kS128); | |||
583 | if (supported_types_.contains(kind)) return true; | |||
584 | ||||
585 | LiftoffBailoutReason bailout_reason; | |||
586 | switch (kind) { | |||
587 | case kS128: | |||
588 | bailout_reason = kMissingCPUFeature; | |||
589 | break; | |||
590 | case kRef: | |||
591 | case kOptRef: | |||
592 | case kRtt: | |||
593 | case kI8: | |||
594 | case kI16: | |||
595 | bailout_reason = kGC; | |||
596 | break; | |||
597 | default: | |||
598 | UNREACHABLE()V8_Fatal("unreachable code"); | |||
599 | } | |||
600 | base::EmbeddedVector<char, 128> buffer; | |||
601 | SNPrintF(buffer, "%s %s", name(kind), context); | |||
602 | unsupported(decoder, bailout_reason, buffer.begin()); | |||
603 | return false; | |||
604 | } | |||
605 | ||||
606 | void UnuseLabels(FullDecoder* decoder) { | |||
607 | #ifdef DEBUG | |||
608 | auto Unuse = [](Label* label) { | |||
609 | label->Unuse(); | |||
610 | label->UnuseNear(); | |||
611 | }; | |||
612 | // Unuse all labels now, otherwise their destructor will fire a DCHECK error | |||
613 | // if they where referenced before. | |||
614 | uint32_t control_depth = decoder ? decoder->control_depth() : 0; | |||
615 | for (uint32_t i = 0; i < control_depth; ++i) { | |||
616 | Control* c = decoder->control_at(i); | |||
617 | Unuse(c->label.get()); | |||
618 | if (c->else_state) Unuse(c->else_state->label.get()); | |||
619 | if (c->try_info != nullptr) Unuse(&c->try_info->catch_label); | |||
620 | } | |||
621 | for (auto& ool : out_of_line_code_) Unuse(ool.label.get()); | |||
622 | #endif | |||
623 | } | |||
624 | ||||
625 | void StartFunction(FullDecoder* decoder) { | |||
626 | if (FLAG_trace_liftoff && !FLAG_trace_wasm_decoder) { | |||
627 | StdoutStream{} << "hint: add --trace-wasm-decoder to also see the wasm " | |||
628 | "instructions being decoded\n"; | |||
629 | } | |||
630 | int num_locals = decoder->num_locals(); | |||
631 | __ set_num_locals(num_locals); | |||
632 | for (int i = 0; i < num_locals; ++i) { | |||
633 | ValueKind kind = decoder->local_type(i).kind(); | |||
634 | __ set_local_kind(i, kind); | |||
635 | } | |||
636 | } | |||
637 | ||||
638 | constexpr static LiftoffRegList RegsUnusedByParams() { | |||
639 | LiftoffRegList regs = kGpCacheRegList; | |||
640 | for (auto reg : kGpParamRegisters) { | |||
641 | regs.clear(reg); | |||
642 | } | |||
643 | return regs; | |||
644 | } | |||
645 | ||||
646 | // Returns the number of inputs processed (1 or 2). | |||
647 | uint32_t ProcessParameter(ValueKind kind, uint32_t input_idx) { | |||
648 | const bool needs_pair = needs_gp_reg_pair(kind); | |||
649 | const ValueKind reg_kind = needs_pair ? kI32 : kind; | |||
650 | const RegClass rc = reg_class_for(reg_kind); | |||
651 | ||||
652 | auto LoadToReg = [this, reg_kind, rc](compiler::LinkageLocation location, | |||
653 | LiftoffRegList pinned) { | |||
654 | if (location.IsRegister()) { | |||
655 | DCHECK(!location.IsAnyRegister())((void) 0); | |||
656 | return LiftoffRegister::from_external_code(rc, reg_kind, | |||
657 | location.AsRegister()); | |||
658 | } | |||
659 | DCHECK(location.IsCallerFrameSlot())((void) 0); | |||
660 | // For reference type parameters we have to use registers that were not | |||
661 | // used for parameters because some reference type stack parameters may | |||
662 | // get processed before some value type register parameters. | |||
663 | static constexpr auto kRegsUnusedByParams = RegsUnusedByParams(); | |||
664 | LiftoffRegister reg = is_reference(reg_kind) | |||
665 | ? __ GetUnusedRegister(kRegsUnusedByParams) | |||
666 | : __ GetUnusedRegister(rc, pinned); | |||
667 | __ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind); | |||
668 | return reg; | |||
669 | }; | |||
670 | ||||
671 | LiftoffRegister reg = | |||
672 | LoadToReg(descriptor_->GetInputLocation(input_idx), {}); | |||
673 | if (needs_pair) { | |||
674 | LiftoffRegister reg2 = LoadToReg( | |||
675 | descriptor_->GetInputLocation(input_idx + 1), LiftoffRegList{reg}); | |||
676 | reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp()); | |||
677 | } | |||
678 | __ PushRegister(kind, reg); | |||
679 | ||||
680 | return needs_pair ? 2 : 1; | |||
681 | } | |||
682 | ||||
683 | void StackCheck(FullDecoder* decoder, WasmCodePosition position) { | |||
684 | CODE_COMMENT("stack check"); | |||
685 | if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) return; | |||
686 | ||||
687 | // Loading the limit address can change the stack state, hence do this | |||
688 | // before storing information about registers. | |||
689 | Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp(); | |||
690 | LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize, | |||
691 | {}); | |||
692 | ||||
693 | LiftoffRegList regs_to_save = __ cache_state()->used_registers; | |||
694 | // The cached instance will be reloaded separately. | |||
695 | if (__ cache_state()->cached_instance != no_reg) { | |||
696 | DCHECK(regs_to_save.has(__ cache_state()->cached_instance))((void) 0); | |||
697 | regs_to_save.clear(__ cache_state()->cached_instance); | |||
698 | } | |||
699 | SpilledRegistersForInspection* spilled_regs = nullptr; | |||
700 | ||||
701 | OutOfLineSafepointInfo* safepoint_info = | |||
702 | compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_); | |||
703 | __ cache_state()->GetTaggedSlotsForOOLCode( | |||
704 | &safepoint_info->slots, &safepoint_info->spills, | |||
705 | for_debugging_ | |||
706 | ? LiftoffAssembler::CacheState::SpillLocation::kStackSlots | |||
707 | : LiftoffAssembler::CacheState::SpillLocation::kTopOfStack); | |||
708 | if (V8_UNLIKELY(for_debugging_)(__builtin_expect(!!(for_debugging_), 0))) { | |||
709 | // When debugging, we do not just push all registers to the stack, but we | |||
710 | // spill them to their proper stack locations such that we can inspect | |||
711 | // them. | |||
712 | // The only exception is the cached memory start, which we just push | |||
713 | // before the stack check and pop afterwards. | |||
714 | regs_to_save = {}; | |||
715 | if (__ cache_state()->cached_mem_start != no_reg) { | |||
716 | regs_to_save.set(__ cache_state()->cached_mem_start); | |||
717 | } | |||
718 | spilled_regs = GetSpilledRegistersForInspection(); | |||
719 | } | |||
720 | out_of_line_code_.push_back(OutOfLineCode::StackCheck( | |||
721 | position, regs_to_save, __ cache_state()->cached_instance, spilled_regs, | |||
722 | safepoint_info, RegisterOOLDebugSideTableEntry(decoder))); | |||
723 | OutOfLineCode& ool = out_of_line_code_.back(); | |||
724 | __ StackCheck(ool.label.get(), limit_address); | |||
725 | __ bind(ool.continuation.get()); | |||
726 | } | |||
727 | ||||
728 | void TierupCheck(FullDecoder* decoder, WasmCodePosition position, | |||
729 | int budget_used) { | |||
730 | // We should always decrement the budget, and we don't expect integer | |||
731 | // overflows in the budget calculation. | |||
732 | DCHECK_LE(1, budget_used)((void) 0); | |||
733 | ||||
734 | if (for_debugging_ != kNoDebugging) return; | |||
735 | CODE_COMMENT("tierup check"); | |||
736 | // We never want to blow the entire budget at once. | |||
737 | const int kMax = FLAG_wasm_tiering_budget / 4; | |||
738 | if (budget_used > kMax) budget_used = kMax; | |||
739 | ||||
740 | LiftoffRegister budget_reg = __ GetUnusedRegister(kGpReg, {}); | |||
741 | __ Fill(budget_reg, liftoff::kTierupBudgetOffset, ValueKind::kI32); | |||
742 | LiftoffRegList regs_to_save = __ cache_state()->used_registers; | |||
743 | // The cached instance will be reloaded separately. | |||
744 | if (__ cache_state()->cached_instance != no_reg) { | |||
745 | DCHECK(regs_to_save.has(__ cache_state()->cached_instance))((void) 0); | |||
746 | regs_to_save.clear(__ cache_state()->cached_instance); | |||
747 | } | |||
748 | SpilledRegistersForInspection* spilled_regs = nullptr; | |||
749 | ||||
750 | OutOfLineSafepointInfo* safepoint_info = | |||
751 | compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_); | |||
752 | __ cache_state()->GetTaggedSlotsForOOLCode( | |||
753 | &safepoint_info->slots, &safepoint_info->spills, | |||
754 | LiftoffAssembler::CacheState::SpillLocation::kTopOfStack); | |||
755 | out_of_line_code_.push_back(OutOfLineCode::TierupCheck( | |||
756 | position, regs_to_save, __ cache_state()->cached_instance, spilled_regs, | |||
757 | safepoint_info, RegisterOOLDebugSideTableEntry(decoder))); | |||
758 | OutOfLineCode& ool = out_of_line_code_.back(); | |||
759 | __ emit_i32_subi_jump_negative(budget_reg.gp(), budget_used, | |||
760 | ool.label.get()); | |||
761 | __ Spill(liftoff::kTierupBudgetOffset, budget_reg, ValueKind::kI32); | |||
762 | __ bind(ool.continuation.get()); | |||
763 | } | |||
764 | ||||
765 | bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) { | |||
766 | int actual_locals = __ num_locals() - num_params; | |||
767 | DCHECK_LE(0, actual_locals)((void) 0); | |||
768 | constexpr int kNumCacheRegisters = kLiftoffAssemblerGpCacheRegs.Count(); | |||
769 | // If we have many locals, we put them on the stack initially. This avoids | |||
770 | // having to spill them on merge points. Use of these initial values should | |||
771 | // be rare anyway. | |||
772 | if (actual_locals > kNumCacheRegisters / 2) return true; | |||
773 | // If there are locals which are not i32 or i64, we also spill all locals, | |||
774 | // because other types cannot be initialized to constants. | |||
775 | for (uint32_t param_idx = num_params; param_idx < __ num_locals(); | |||
776 | ++param_idx) { | |||
777 | ValueKind kind = __ local_kind(param_idx); | |||
778 | if (kind != kI32 && kind != kI64) return true; | |||
779 | } | |||
780 | return false; | |||
781 | } | |||
782 | ||||
783 | void TraceFunctionEntry(FullDecoder* decoder) { | |||
784 | CODE_COMMENT("trace function entry"); | |||
785 | __ SpillAllRegisters(); | |||
786 | source_position_table_builder_.AddPosition( | |||
787 | __ pc_offset(), SourcePosition(decoder->position()), false); | |||
788 | __ CallRuntimeStub(WasmCode::kWasmTraceEnter); | |||
789 | DefineSafepoint(); | |||
790 | } | |||
791 | ||||
792 | bool dynamic_tiering() { | |||
793 | return env_->dynamic_tiering == DynamicTiering::kEnabled && | |||
794 | for_debugging_ == kNoDebugging && | |||
795 | (FLAG_wasm_tier_up_filter == -1 || | |||
796 | FLAG_wasm_tier_up_filter == func_index_); | |||
797 | } | |||
798 | ||||
799 | void StartFunctionBody(FullDecoder* decoder, Control* block) { | |||
800 | for (uint32_t i = 0; i < __ num_locals(); ++i) { | |||
801 | if (!CheckSupportedType(decoder, __ local_kind(i), "param")) return; | |||
802 | } | |||
803 | ||||
804 | // Parameter 0 is the instance parameter. | |||
805 | uint32_t num_params = | |||
806 | static_cast<uint32_t>(decoder->sig_->parameter_count()); | |||
807 | ||||
808 | __ CodeEntry(); | |||
809 | ||||
810 | __ EnterFrame(StackFrame::WASM); | |||
811 | __ set_has_frame(true); | |||
812 | pc_offset_stack_frame_construction_ = __ PrepareStackFrame(); | |||
813 | // {PrepareStackFrame} is the first platform-specific assembler method. | |||
814 | // If this failed, we can bail out immediately, avoiding runtime overhead | |||
815 | // and potential failures because of other unimplemented methods. | |||
816 | // A platform implementing {PrepareStackFrame} must ensure that we can | |||
817 | // finish compilation without errors even if we hit unimplemented | |||
818 | // LiftoffAssembler methods. | |||
819 | if (DidAssemblerBailout(decoder)) return; | |||
820 | ||||
821 | // Input 0 is the call target, the instance is at 1. | |||
822 | constexpr int kInstanceParameterIndex = 1; | |||
823 | // Check that {kWasmInstanceRegister} matches our call descriptor. | |||
824 | DCHECK_EQ(kWasmInstanceRegister,((void) 0) | |||
825 | Register::from_code(((void) 0) | |||
826 | descriptor_->GetInputLocation(kInstanceParameterIndex)((void) 0) | |||
827 | .AsRegister()))((void) 0); | |||
828 | __ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister); | |||
829 | // Load the feedback vector and cache it in a stack slot. | |||
830 | constexpr LiftoffRegList kGpParamRegisters = GetGpParamRegisters(); | |||
831 | if (FLAG_wasm_speculative_inlining) { | |||
832 | CODE_COMMENT("load feedback vector"); | |||
833 | int declared_func_index = | |||
834 | func_index_ - env_->module->num_imported_functions; | |||
835 | DCHECK_GE(declared_func_index, 0)((void) 0); | |||
836 | LiftoffRegList pinned = kGpParamRegisters; | |||
837 | LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
838 | __ LoadTaggedPointerFromInstance( | |||
839 | tmp.gp(), kWasmInstanceRegister, | |||
840 | WASM_INSTANCE_OBJECT_FIELD_OFFSET(FeedbackVectors)); | |||
841 | __ LoadTaggedPointer(tmp.gp(), tmp.gp(), no_reg, | |||
842 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray( | |||
843 | declared_func_index), | |||
844 | pinned); | |||
845 | __ Spill(liftoff::kFeedbackVectorOffset, tmp, kPointerKind); | |||
846 | } | |||
847 | if (dynamic_tiering()) { | |||
848 | CODE_COMMENT("load tier up budget"); | |||
849 | LiftoffRegList pinned = kGpParamRegisters; | |||
850 | LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
851 | LOAD_INSTANCE_FIELD(tmp.gp(), TieringBudgetArray, kSystemPointerSize, | |||
852 | pinned); | |||
853 | uint32_t offset = | |||
854 | kInt32Size * declared_function_index(env_->module, func_index_); | |||
855 | __ Load(tmp, tmp.gp(), no_reg, offset, LoadType::kI32Load, pinned); | |||
856 | __ Spill(liftoff::kTierupBudgetOffset, tmp, ValueKind::kI32); | |||
857 | } | |||
858 | if (for_debugging_) __ ResetOSRTarget(); | |||
859 | ||||
860 | // Process parameters. | |||
861 | if (num_params) CODE_COMMENT("process parameters"); | |||
862 | // Input 0 is the code target, 1 is the instance. First parameter at 2. | |||
863 | uint32_t input_idx = kInstanceParameterIndex + 1; | |||
864 | for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) { | |||
865 | input_idx += ProcessParameter(__ local_kind(param_idx), input_idx); | |||
866 | } | |||
867 | int params_size = __ TopSpillOffset(); | |||
868 | DCHECK_EQ(input_idx, descriptor_->InputCount())((void) 0); | |||
869 | ||||
870 | // Initialize locals beyond parameters. | |||
871 | if (num_params < __ num_locals()) CODE_COMMENT("init locals"); | |||
872 | if (SpillLocalsInitially(decoder, num_params)) { | |||
873 | bool has_refs = false; | |||
874 | for (uint32_t param_idx = num_params; param_idx < __ num_locals(); | |||
875 | ++param_idx) { | |||
876 | ValueKind kind = __ local_kind(param_idx); | |||
877 | has_refs |= is_reference(kind); | |||
878 | __ PushStack(kind); | |||
879 | } | |||
880 | int spill_size = __ TopSpillOffset() - params_size; | |||
881 | __ FillStackSlotsWithZero(params_size, spill_size); | |||
882 | ||||
883 | // Initialize all reference type locals with ref.null. | |||
884 | if (has_refs) { | |||
885 | Register null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp(); | |||
886 | LoadNullValue(null_ref_reg, {}); | |||
887 | for (uint32_t local_index = num_params; local_index < __ num_locals(); | |||
888 | ++local_index) { | |||
889 | ValueKind kind = __ local_kind(local_index); | |||
890 | if (is_reference(kind)) { | |||
891 | __ Spill(__ cache_state()->stack_state[local_index].offset(), | |||
892 | LiftoffRegister(null_ref_reg), kind); | |||
893 | } | |||
894 | } | |||
895 | } | |||
896 | } else { | |||
897 | for (uint32_t param_idx = num_params; param_idx < __ num_locals(); | |||
898 | ++param_idx) { | |||
899 | ValueKind kind = __ local_kind(param_idx); | |||
900 | // Anything which is not i32 or i64 requires spilling. | |||
901 | DCHECK(kind == kI32 || kind == kI64)((void) 0); | |||
902 | __ PushConstant(kind, int32_t{0}); | |||
903 | } | |||
904 | } | |||
905 | ||||
906 | DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height())((void) 0); | |||
907 | ||||
908 | if (V8_UNLIKELY(debug_sidetable_builder_)(__builtin_expect(!!(debug_sidetable_builder_), 0))) { | |||
909 | debug_sidetable_builder_->SetNumLocals(__ num_locals()); | |||
910 | } | |||
911 | ||||
912 | // The function-prologue stack check is associated with position 0, which | |||
913 | // is never a position of any instruction in the function. | |||
914 | StackCheck(decoder, 0); | |||
915 | ||||
916 | if (FLAG_trace_wasm) TraceFunctionEntry(decoder); | |||
917 | } | |||
918 | ||||
919 | void GenerateOutOfLineCode(OutOfLineCode* ool) { | |||
920 | CODE_COMMENT( | |||
921 | (std::string("OOL: ") + GetRuntimeStubName(ool->stub)).c_str()); | |||
922 | __ bind(ool->label.get()); | |||
923 | const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard; | |||
924 | const bool is_tierup = ool->stub == WasmCode::kWasmTriggerTierUp; | |||
925 | ||||
926 | // Only memory OOB traps need a {pc}, but not unconditionally. Static OOB | |||
927 | // accesses do not need protected instruction information, hence they also | |||
928 | // do not set {pc}. | |||
929 | DCHECK_IMPLIES(ool->stub != WasmCode::kThrowWasmTrapMemOutOfBounds,((void) 0) | |||
930 | ool->pc == 0)((void) 0); | |||
931 | ||||
932 | if (env_->bounds_checks == kTrapHandler && ool->pc != 0) { | |||
933 | uint32_t pc = static_cast<uint32_t>(__ pc_offset()); | |||
934 | DCHECK_EQ(pc, __ pc_offset())((void) 0); | |||
935 | protected_instructions_.emplace_back( | |||
936 | trap_handler::ProtectedInstructionData{ool->pc, pc}); | |||
937 | } | |||
938 | ||||
939 | if (!env_->runtime_exception_support) { | |||
940 | // We cannot test calls to the runtime in cctest/test-run-wasm. | |||
941 | // Therefore we emit a call to C here instead of a call to the runtime. | |||
942 | // In this mode, we never generate stack checks. | |||
943 | DCHECK(!is_stack_check)((void) 0); | |||
944 | __ CallTrapCallbackForTesting(); | |||
945 | __ LeaveFrame(StackFrame::WASM); | |||
946 | __ DropStackSlotsAndRet( | |||
947 | static_cast<uint32_t>(descriptor_->ParameterSlotCount())); | |||
948 | return; | |||
949 | } | |||
950 | ||||
951 | if (!ool->regs_to_save.is_empty()) { | |||
952 | __ PushRegisters(ool->regs_to_save); | |||
953 | } | |||
954 | if (V8_UNLIKELY(ool->spilled_registers != nullptr)(__builtin_expect(!!(ool->spilled_registers != nullptr), 0 ))) { | |||
955 | for (auto& entry : ool->spilled_registers->entries) { | |||
956 | // We should not push and spill the same register. | |||
957 | DCHECK(!ool->regs_to_save.has(entry.reg))((void) 0); | |||
958 | __ Spill(entry.offset, entry.reg, entry.kind); | |||
959 | } | |||
960 | } | |||
961 | ||||
962 | source_position_table_builder_.AddPosition( | |||
963 | __ pc_offset(), SourcePosition(ool->position), true); | |||
964 | __ CallRuntimeStub(ool->stub); | |||
965 | auto safepoint = safepoint_table_builder_.DefineSafepoint(&asm_); | |||
966 | ||||
967 | if (ool->safepoint_info) { | |||
968 | for (auto index : ool->safepoint_info->slots) { | |||
969 | safepoint.DefineTaggedStackSlot(index); | |||
970 | } | |||
971 | ||||
972 | int total_frame_size = __ GetTotalFrameSize(); | |||
973 | LiftoffRegList gp_regs = ool->regs_to_save & kGpCacheRegList; | |||
974 | // {total_frame_size} is the highest offset from the FP that is used to | |||
975 | // store a value. The offset of the first spill slot should therefore be | |||
976 | // {(total_frame_size / kSystemPointerSize) + 1}. However, spill slots | |||
977 | // don't start at offset '0' but at offset '-1' (or | |||
978 | // {-kSystemPointerSize}). Therefore we have to add another '+ 1' to the | |||
979 | // index of the first spill slot. | |||
980 | int index = (total_frame_size / kSystemPointerSize) + 2; | |||
981 | ||||
982 | __ RecordSpillsInSafepoint(safepoint, gp_regs, | |||
983 | ool->safepoint_info->spills, index); | |||
984 | } | |||
985 | if (is_tierup) { | |||
986 | // Reset the budget. | |||
987 | __ Spill(liftoff::kTierupBudgetOffset, | |||
988 | WasmValue(FLAG_wasm_tiering_budget)); | |||
989 | } | |||
990 | ||||
991 | DCHECK_EQ(!debug_sidetable_builder_, !ool->debug_sidetable_entry_builder)((void) 0); | |||
992 | if (V8_UNLIKELY(ool->debug_sidetable_entry_builder)(__builtin_expect(!!(ool->debug_sidetable_entry_builder), 0 ))) { | |||
993 | ool->debug_sidetable_entry_builder->set_pc_offset(__ pc_offset()); | |||
994 | } | |||
995 | DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check || is_tierup)((void) 0); | |||
996 | if (is_stack_check) { | |||
997 | MaybeOSR(); | |||
998 | } | |||
999 | if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save); | |||
1000 | if (is_stack_check || is_tierup) { | |||
1001 | if (V8_UNLIKELY(ool->spilled_registers != nullptr)(__builtin_expect(!!(ool->spilled_registers != nullptr), 0 ))) { | |||
1002 | DCHECK(for_debugging_)((void) 0); | |||
1003 | for (auto& entry : ool->spilled_registers->entries) { | |||
1004 | __ Fill(entry.reg, entry.offset, entry.kind); | |||
1005 | } | |||
1006 | } | |||
1007 | if (ool->cached_instance != no_reg) { | |||
1008 | __ LoadInstanceFromFrame(ool->cached_instance); | |||
1009 | } | |||
1010 | __ emit_jump(ool->continuation.get()); | |||
1011 | } else { | |||
1012 | __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap); | |||
1013 | } | |||
1014 | } | |||
1015 | ||||
1016 | void FinishFunction(FullDecoder* decoder) { | |||
1017 | if (DidAssemblerBailout(decoder)) return; | |||
1018 | __ AlignFrameSize(); | |||
1019 | #if DEBUG | |||
1020 | int frame_size = __ GetTotalFrameSize(); | |||
1021 | #endif | |||
1022 | for (OutOfLineCode& ool : out_of_line_code_) { | |||
1023 | GenerateOutOfLineCode(&ool); | |||
1024 | } | |||
1025 | DCHECK_EQ(frame_size, __ GetTotalFrameSize())((void) 0); | |||
1026 | __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_, | |||
1027 | &safepoint_table_builder_); | |||
1028 | __ FinishCode(); | |||
1029 | safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCountForGC()); | |||
1030 | // Emit the handler table. | |||
1031 | if (!handlers_.empty()) { | |||
1032 | handler_table_offset_ = HandlerTable::EmitReturnTableStart(&asm_); | |||
1033 | for (auto& handler : handlers_) { | |||
1034 | HandlerTable::EmitReturnEntry(&asm_, handler.pc_offset, | |||
1035 | handler.handler.get()->pos()); | |||
1036 | } | |||
1037 | } | |||
1038 | __ MaybeEmitOutOfLineConstantPool(); | |||
1039 | // The previous calls may have also generated a bailout. | |||
1040 | DidAssemblerBailout(decoder); | |||
1041 | DCHECK_EQ(num_exceptions_, 0)((void) 0); | |||
1042 | } | |||
1043 | ||||
1044 | void OnFirstError(FullDecoder* decoder) { | |||
1045 | if (!did_bailout()) bailout_reason_ = kDecodeError; | |||
1046 | UnuseLabels(decoder); | |||
1047 | asm_.AbortCompilation(); | |||
1048 | } | |||
1049 | ||||
1050 | V8_NOINLINE__attribute__((noinline)) void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) { | |||
1051 | DCHECK(for_debugging_)((void) 0); | |||
1052 | if (!WasmOpcodes::IsBreakable(opcode)) return; | |||
1053 | bool has_breakpoint = false; | |||
1054 | if (next_breakpoint_ptr_) { | |||
1055 | if (*next_breakpoint_ptr_ == 0) { | |||
1056 | // A single breakpoint at offset 0 indicates stepping. | |||
1057 | DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_)((void) 0); | |||
1058 | has_breakpoint = true; | |||
1059 | } else { | |||
1060 | while (next_breakpoint_ptr_ != next_breakpoint_end_ && | |||
1061 | *next_breakpoint_ptr_ < decoder->position()) { | |||
1062 | // Skip unreachable breakpoints. | |||
1063 | ++next_breakpoint_ptr_; | |||
1064 | } | |||
1065 | if (next_breakpoint_ptr_ == next_breakpoint_end_) { | |||
1066 | next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr; | |||
1067 | } else if (*next_breakpoint_ptr_ == decoder->position()) { | |||
1068 | has_breakpoint = true; | |||
1069 | } | |||
1070 | } | |||
1071 | } | |||
1072 | if (has_breakpoint) { | |||
1073 | CODE_COMMENT("breakpoint"); | |||
1074 | EmitBreakpoint(decoder); | |||
1075 | // Once we emitted an unconditional breakpoint, we don't need to check | |||
1076 | // function entry breaks any more. | |||
1077 | did_function_entry_break_checks_ = true; | |||
1078 | } else if (!did_function_entry_break_checks_) { | |||
1079 | did_function_entry_break_checks_ = true; | |||
1080 | CODE_COMMENT("check function entry break"); | |||
1081 | Label do_break; | |||
1082 | Label no_break; | |||
1083 | Register flag = __ GetUnusedRegister(kGpReg, {}).gp(); | |||
1084 | ||||
1085 | // Check the "hook on function call" flag. If set, trigger a break. | |||
1086 | LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize, | |||
1087 | {}); | |||
1088 | __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {}); | |||
1089 | __ emit_cond_jump(kNotEqualZero, &do_break, kI32, flag); | |||
1090 | ||||
1091 | // Check if we should stop on "script entry". | |||
1092 | LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {}); | |||
1093 | __ emit_cond_jump(kEqualZero, &no_break, kI32, flag); | |||
1094 | ||||
1095 | __ bind(&do_break); | |||
1096 | EmitBreakpoint(decoder); | |||
1097 | __ bind(&no_break); | |||
1098 | } else if (dead_breakpoint_ == decoder->position()) { | |||
1099 | DCHECK(!next_breakpoint_ptr_ ||((void) 0) | |||
1100 | *next_breakpoint_ptr_ != dead_breakpoint_)((void) 0); | |||
1101 | // The top frame is paused at this position, but the breakpoint was | |||
1102 | // removed. Adding a dead breakpoint here ensures that the source | |||
1103 | // position exists, and that the offset to the return address is the | |||
1104 | // same as in the old code. | |||
1105 | CODE_COMMENT("dead breakpoint"); | |||
1106 | Label cont; | |||
1107 | __ emit_jump(&cont); | |||
1108 | EmitBreakpoint(decoder); | |||
1109 | __ bind(&cont); | |||
1110 | } | |||
1111 | if (V8_UNLIKELY(max_steps_ != nullptr)(__builtin_expect(!!(max_steps_ != nullptr), 0))) { | |||
1112 | CODE_COMMENT("check max steps"); | |||
1113 | LiftoffRegList pinned; | |||
1114 | LiftoffRegister max_steps = __ GetUnusedRegister(kGpReg, {}); | |||
1115 | pinned.set(max_steps); | |||
1116 | LiftoffRegister max_steps_addr = __ GetUnusedRegister(kGpReg, pinned); | |||
1117 | pinned.set(max_steps_addr); | |||
1118 | __ LoadConstant( | |||
1119 | max_steps_addr, | |||
1120 | WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(max_steps_))); | |||
1121 | __ Load(max_steps, max_steps_addr.gp(), no_reg, 0, LoadType::kI32Load, | |||
1122 | pinned); | |||
1123 | Label cont; | |||
1124 | __ emit_i32_cond_jumpi(kUnequal, &cont, max_steps.gp(), 0); | |||
1125 | // Abort. | |||
1126 | Trap(decoder, kTrapUnreachable); | |||
1127 | __ bind(&cont); | |||
1128 | __ emit_i32_subi(max_steps.gp(), max_steps.gp(), 1); | |||
1129 | __ Store(max_steps_addr.gp(), no_reg, 0, max_steps, StoreType::kI32Store, | |||
1130 | pinned); | |||
1131 | } | |||
1132 | } | |||
1133 | ||||
1134 | void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) { | |||
1135 | // Add a single check, so that the fast path can be inlined while | |||
1136 | // {EmitDebuggingInfo} stays outlined. | |||
1137 | if (V8_UNLIKELY(for_debugging_)(__builtin_expect(!!(for_debugging_), 0))) EmitDebuggingInfo(decoder, opcode); | |||
1138 | TraceCacheState(decoder); | |||
1139 | SLOW_DCHECK(__ ValidateCacheState())((void)0); | |||
1140 | CODE_COMMENT(WasmOpcodes::OpcodeName( | |||
1141 | WasmOpcodes::IsPrefixOpcode(opcode) | |||
1142 | ? decoder->read_prefixed_opcode<Decoder::kFullValidation>( | |||
1143 | decoder->pc()) | |||
1144 | : opcode)); | |||
1145 | } | |||
1146 | ||||
1147 | void EmitBreakpoint(FullDecoder* decoder) { | |||
1148 | DCHECK(for_debugging_)((void) 0); | |||
1149 | source_position_table_builder_.AddPosition( | |||
1150 | __ pc_offset(), SourcePosition(decoder->position()), true); | |||
1151 | __ CallRuntimeStub(WasmCode::kWasmDebugBreak); | |||
1152 | DefineSafepointWithCalleeSavedRegisters(); | |||
1153 | RegisterDebugSideTableEntry(decoder, | |||
1154 | DebugSideTableBuilder::kAllowRegisters); | |||
1155 | MaybeOSR(); | |||
1156 | } | |||
1157 | ||||
1158 | void PushControl(Control* block) { | |||
1159 | // The Liftoff stack includes implicit exception refs stored for catch | |||
1160 | // blocks, so that they can be rethrown. | |||
1161 | block->num_exceptions = num_exceptions_; | |||
1162 | } | |||
1163 | ||||
1164 | void Block(FullDecoder* decoder, Control* block) { PushControl(block); } | |||
1165 | ||||
1166 | void Loop(FullDecoder* decoder, Control* loop) { | |||
1167 | // Before entering a loop, spill all locals to the stack, in order to free | |||
1168 | // the cache registers, and to avoid unnecessarily reloading stack values | |||
1169 | // into registers at branches. | |||
1170 | // TODO(clemensb): Come up with a better strategy here, involving | |||
1171 | // pre-analysis of the function. | |||
1172 | __ SpillLocals(); | |||
1173 | ||||
1174 | __ PrepareLoopArgs(loop->start_merge.arity); | |||
1175 | ||||
1176 | // Loop labels bind at the beginning of the block. | |||
1177 | __ bind(loop->label.get()); | |||
1178 | ||||
1179 | // Save the current cache state for the merge when jumping to this loop. | |||
1180 | loop->label_state.Split(*__ cache_state()); | |||
1181 | ||||
1182 | PushControl(loop); | |||
1183 | ||||
1184 | if (!dynamic_tiering()) { | |||
1185 | // When the budget-based tiering mechanism is enabled, use that to | |||
1186 | // check for interrupt requests; otherwise execute a stack check in the | |||
1187 | // loop header. | |||
1188 | StackCheck(decoder, decoder->position()); | |||
1189 | } | |||
1190 | } | |||
1191 | ||||
1192 | void Try(FullDecoder* decoder, Control* block) { | |||
1193 | block->try_info = std::make_unique<TryInfo>(); | |||
1194 | PushControl(block); | |||
1195 | } | |||
1196 | ||||
1197 | // Load the property in {kReturnRegister0}. | |||
1198 | LiftoffRegister GetExceptionProperty(LiftoffAssembler::VarState& exception, | |||
1199 | RootIndex root_index) { | |||
1200 | DCHECK(root_index == RootIndex::kwasm_exception_tag_symbol ||((void) 0) | |||
1201 | root_index == RootIndex::kwasm_exception_values_symbol)((void) 0); | |||
1202 | ||||
1203 | LiftoffRegList pinned; | |||
1204 | LiftoffRegister tag_symbol_reg = | |||
1205 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
1206 | LoadExceptionSymbol(tag_symbol_reg.gp(), pinned, root_index); | |||
1207 | LiftoffRegister context_reg = | |||
1208 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
1209 | LOAD_TAGGED_PTR_INSTANCE_FIELD(context_reg.gp(), NativeContext, pinned); | |||
1210 | ||||
1211 | LiftoffAssembler::VarState tag_symbol(kPointerKind, tag_symbol_reg, 0); | |||
1212 | LiftoffAssembler::VarState context(kPointerKind, context_reg, 0); | |||
1213 | ||||
1214 | CallRuntimeStub(WasmCode::kWasmGetOwnProperty, | |||
1215 | MakeSig::Returns(kPointerKind) | |||
1216 | .Params(kPointerKind, kPointerKind, kPointerKind), | |||
1217 | {exception, tag_symbol, context}, kNoSourcePosition); | |||
1218 | ||||
1219 | return LiftoffRegister(kReturnRegister0); | |||
1220 | } | |||
1221 | ||||
1222 | void CatchException(FullDecoder* decoder, | |||
1223 | const TagIndexImmediate<validate>& imm, Control* block, | |||
1224 | base::Vector<Value> values) { | |||
1225 | DCHECK(block->is_try_catch())((void) 0); | |||
1226 | __ emit_jump(block->label.get()); | |||
1227 | ||||
1228 | // The catch block is unreachable if no possible throws in the try block | |||
1229 | // exist. We only build a landing pad if some node in the try block can | |||
1230 | // (possibly) throw. Otherwise the catch environments remain empty. | |||
1231 | if (!block->try_info->catch_reached) { | |||
1232 | block->reachability = kSpecOnlyReachable; | |||
1233 | return; | |||
1234 | } | |||
1235 | ||||
1236 | // This is the last use of this label. Re-use the field for the label of the | |||
1237 | // next catch block, and jump there if the tag does not match. | |||
1238 | __ bind(&block->try_info->catch_label); | |||
1239 | new (&block->try_info->catch_label) Label(); | |||
1240 | ||||
1241 | __ cache_state()->Split(block->try_info->catch_state); | |||
1242 | ||||
1243 | CODE_COMMENT("load caught exception tag"); | |||
1244 | DCHECK_EQ(__ cache_state()->stack_state.back().kind(), kRef)((void) 0); | |||
1245 | LiftoffRegister caught_tag = | |||
1246 | GetExceptionProperty(__ cache_state()->stack_state.back(), | |||
1247 | RootIndex::kwasm_exception_tag_symbol); | |||
1248 | LiftoffRegList pinned; | |||
1249 | pinned.set(caught_tag); | |||
1250 | ||||
1251 | CODE_COMMENT("load expected exception tag"); | |||
1252 | Register imm_tag = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
1253 | LOAD_TAGGED_PTR_INSTANCE_FIELD(imm_tag, TagsTable, pinned); | |||
1254 | __ LoadTaggedPointer( | |||
1255 | imm_tag, imm_tag, no_reg, | |||
1256 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {}); | |||
1257 | ||||
1258 | CODE_COMMENT("compare tags"); | |||
1259 | Label caught; | |||
1260 | __ emit_cond_jump(kEqual, &caught, kI32, imm_tag, caught_tag.gp()); | |||
1261 | // The tags don't match, merge the current state into the catch state and | |||
1262 | // jump to the next handler. | |||
1263 | __ MergeFullStackWith(block->try_info->catch_state, *__ cache_state()); | |||
1264 | __ emit_jump(&block->try_info->catch_label); | |||
1265 | ||||
1266 | __ bind(&caught); | |||
1267 | if (!block->try_info->in_handler) { | |||
1268 | block->try_info->in_handler = true; | |||
1269 | num_exceptions_++; | |||
1270 | } | |||
1271 | GetExceptionValues(decoder, __ cache_state()->stack_state.back(), imm.tag); | |||
1272 | } | |||
1273 | ||||
1274 | void Rethrow(FullDecoder* decoder, | |||
1275 | const LiftoffAssembler::VarState& exception) { | |||
1276 | DCHECK_EQ(exception.kind(), kRef)((void) 0); | |||
1277 | CallRuntimeStub(WasmCode::kWasmRethrow, MakeSig::Params(kPointerKind), | |||
1278 | {exception}, decoder->position()); | |||
1279 | } | |||
1280 | ||||
1281 | void Delegate(FullDecoder* decoder, uint32_t depth, Control* block) { | |||
1282 | DCHECK_EQ(block, decoder->control_at(0))((void) 0); | |||
1283 | Control* target = decoder->control_at(depth); | |||
1284 | DCHECK(block->is_incomplete_try())((void) 0); | |||
1285 | __ bind(&block->try_info->catch_label); | |||
1286 | if (block->try_info->catch_reached) { | |||
1287 | __ cache_state()->Steal(block->try_info->catch_state); | |||
1288 | if (depth == decoder->control_depth() - 1) { | |||
1289 | // Delegate to the caller, do not emit a landing pad. | |||
1290 | Rethrow(decoder, __ cache_state()->stack_state.back()); | |||
1291 | MaybeOSR(); | |||
1292 | } else { | |||
1293 | DCHECK(target->is_incomplete_try())((void) 0); | |||
1294 | if (!target->try_info->catch_reached) { | |||
1295 | target->try_info->catch_state.InitMerge( | |||
1296 | *__ cache_state(), __ num_locals(), 1, | |||
1297 | target->stack_depth + target->num_exceptions); | |||
1298 | target->try_info->catch_reached = true; | |||
1299 | } | |||
1300 | __ MergeStackWith(target->try_info->catch_state, 1, | |||
1301 | LiftoffAssembler::kForwardJump); | |||
1302 | __ emit_jump(&target->try_info->catch_label); | |||
1303 | } | |||
1304 | } | |||
1305 | } | |||
1306 | ||||
1307 | void Rethrow(FullDecoder* decoder, Control* try_block) { | |||
1308 | int index = try_block->try_info->catch_state.stack_height() - 1; | |||
1309 | auto& exception = __ cache_state()->stack_state[index]; | |||
1310 | Rethrow(decoder, exception); | |||
1311 | int pc_offset = __ pc_offset(); | |||
1312 | MaybeOSR(); | |||
1313 | EmitLandingPad(decoder, pc_offset); | |||
1314 | } | |||
1315 | ||||
1316 | void CatchAll(FullDecoder* decoder, Control* block) { | |||
1317 | DCHECK(block->is_try_catchall() || block->is_try_catch())((void) 0); | |||
1318 | DCHECK_EQ(decoder->control_at(0), block)((void) 0); | |||
1319 | ||||
1320 | // The catch block is unreachable if no possible throws in the try block | |||
1321 | // exist. We only build a landing pad if some node in the try block can | |||
1322 | // (possibly) throw. Otherwise the catch environments remain empty. | |||
1323 | if (!block->try_info->catch_reached) { | |||
1324 | decoder->SetSucceedingCodeDynamicallyUnreachable(); | |||
1325 | return; | |||
1326 | } | |||
1327 | ||||
1328 | __ bind(&block->try_info->catch_label); | |||
1329 | __ cache_state()->Steal(block->try_info->catch_state); | |||
1330 | if (!block->try_info->in_handler) { | |||
1331 | block->try_info->in_handler = true; | |||
1332 | num_exceptions_++; | |||
1333 | } | |||
1334 | } | |||
1335 | ||||
1336 | void JumpIfFalse(FullDecoder* decoder, Label* false_dst) { | |||
1337 | LiftoffCondition cond = | |||
1338 | test_and_reset_outstanding_op(kExprI32Eqz) ? kNotEqualZero : kEqualZero; | |||
1339 | ||||
1340 | if (!has_outstanding_op()) { | |||
1341 | // Unary comparison. | |||
1342 | Register value = __ PopToRegister().gp(); | |||
1343 | __ emit_cond_jump(cond, false_dst, kI32, value); | |||
1344 | return; | |||
1345 | } | |||
1346 | ||||
1347 | // Binary comparison of i32 values. | |||
1348 | cond = Negate(GetCompareCondition(outstanding_op_)); | |||
1349 | outstanding_op_ = kNoOutstandingOp; | |||
1350 | LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back(); | |||
1351 | if (rhs_slot.is_const()) { | |||
1352 | // Compare to a constant. | |||
1353 | int32_t rhs_imm = rhs_slot.i32_const(); | |||
1354 | __ cache_state()->stack_state.pop_back(); | |||
1355 | Register lhs = __ PopToRegister().gp(); | |||
1356 | __ emit_i32_cond_jumpi(cond, false_dst, lhs, rhs_imm); | |||
1357 | return; | |||
1358 | } | |||
1359 | ||||
1360 | Register rhs = __ PopToRegister().gp(); | |||
1361 | LiftoffAssembler::VarState lhs_slot = __ cache_state()->stack_state.back(); | |||
1362 | if (lhs_slot.is_const()) { | |||
1363 | // Compare a constant to an arbitrary value. | |||
1364 | int32_t lhs_imm = lhs_slot.i32_const(); | |||
1365 | __ cache_state()->stack_state.pop_back(); | |||
1366 | // Flip the condition, because {lhs} and {rhs} are swapped. | |||
1367 | __ emit_i32_cond_jumpi(Flip(cond), false_dst, rhs, lhs_imm); | |||
1368 | return; | |||
1369 | } | |||
1370 | ||||
1371 | // Compare two arbitrary values. | |||
1372 | Register lhs = __ PopToRegister(LiftoffRegList{rhs}).gp(); | |||
1373 | __ emit_cond_jump(cond, false_dst, kI32, lhs, rhs); | |||
1374 | } | |||
1375 | ||||
1376 | void If(FullDecoder* decoder, const Value& cond, Control* if_block) { | |||
1377 | DCHECK_EQ(if_block, decoder->control_at(0))((void) 0); | |||
1378 | DCHECK(if_block->is_if())((void) 0); | |||
1379 | ||||
1380 | // Allocate the else state. | |||
1381 | if_block->else_state = std::make_unique<ElseState>(); | |||
1382 | ||||
1383 | // Test the condition on the value stack, jump to else if zero. | |||
1384 | JumpIfFalse(decoder, if_block->else_state->label.get()); | |||
1385 | ||||
1386 | // Store the state (after popping the value) for executing the else branch. | |||
1387 | if_block->else_state->state.Split(*__ cache_state()); | |||
1388 | ||||
1389 | PushControl(if_block); | |||
1390 | } | |||
1391 | ||||
1392 | void FallThruTo(FullDecoder* decoder, Control* c) { | |||
1393 | if (!c->end_merge.reached) { | |||
1394 | c->label_state.InitMerge(*__ cache_state(), __ num_locals(), | |||
1395 | c->end_merge.arity, | |||
1396 | c->stack_depth + c->num_exceptions); | |||
1397 | } | |||
1398 | DCHECK(!c->is_try_catchall())((void) 0); | |||
1399 | if (c->is_try_catch()) { | |||
1400 | // Drop the implicit exception ref if any. There may be none if this is a | |||
1401 | // catch-less try block. | |||
1402 | __ MergeStackWith(c->label_state, c->br_merge()->arity, | |||
1403 | LiftoffAssembler::kForwardJump); | |||
1404 | } else { | |||
1405 | __ MergeFullStackWith(c->label_state, *__ cache_state()); | |||
1406 | } | |||
1407 | __ emit_jump(c->label.get()); | |||
1408 | TraceCacheState(decoder); | |||
1409 | } | |||
1410 | ||||
1411 | void FinishOneArmedIf(FullDecoder* decoder, Control* c) { | |||
1412 | DCHECK(c->is_onearmed_if())((void) 0); | |||
1413 | if (c->end_merge.reached) { | |||
1414 | // Someone already merged to the end of the if. Merge both arms into that. | |||
1415 | if (c->reachable()) { | |||
1416 | // Merge the if state into the end state. | |||
1417 | __ MergeFullStackWith(c->label_state, *__ cache_state()); | |||
1418 | __ emit_jump(c->label.get()); | |||
1419 | } | |||
1420 | // Merge the else state into the end state. | |||
1421 | __ bind(c->else_state->label.get()); | |||
1422 | __ MergeFullStackWith(c->label_state, c->else_state->state); | |||
1423 | __ cache_state()->Steal(c->label_state); | |||
1424 | } else if (c->reachable()) { | |||
1425 | // No merge yet at the end of the if, but we need to create a merge for | |||
1426 | // the both arms of this if. Thus init the merge point from the else | |||
1427 | // state, then merge the if state into that. | |||
1428 | DCHECK_EQ(c->start_merge.arity, c->end_merge.arity)((void) 0); | |||
1429 | c->label_state.InitMerge(c->else_state->state, __ num_locals(), | |||
1430 | c->start_merge.arity, | |||
1431 | c->stack_depth + c->num_exceptions); | |||
1432 | __ MergeFullStackWith(c->label_state, *__ cache_state()); | |||
1433 | __ emit_jump(c->label.get()); | |||
1434 | // Merge the else state into the end state. | |||
1435 | __ bind(c->else_state->label.get()); | |||
1436 | __ MergeFullStackWith(c->label_state, c->else_state->state); | |||
1437 | __ cache_state()->Steal(c->label_state); | |||
1438 | } else { | |||
1439 | // No merge needed, just continue with the else state. | |||
1440 | __ bind(c->else_state->label.get()); | |||
1441 | __ cache_state()->Steal(c->else_state->state); | |||
1442 | } | |||
1443 | } | |||
1444 | ||||
1445 | void FinishTry(FullDecoder* decoder, Control* c) { | |||
1446 | DCHECK(c->is_try_catch() || c->is_try_catchall())((void) 0); | |||
1447 | if (!c->end_merge.reached) { | |||
1448 | if (c->try_info->catch_reached) { | |||
1449 | // Drop the implicit exception ref. | |||
1450 | __ DropValue(__ num_locals() + c->stack_depth + c->num_exceptions); | |||
1451 | } | |||
1452 | // Else we did not enter the catch state, continue with the current state. | |||
1453 | } else { | |||
1454 | if (c->reachable()) { | |||
1455 | __ MergeStackWith(c->label_state, c->br_merge()->arity, | |||
1456 | LiftoffAssembler::kForwardJump); | |||
1457 | } | |||
1458 | __ cache_state()->Steal(c->label_state); | |||
1459 | } | |||
1460 | if (c->try_info->catch_reached) { | |||
1461 | num_exceptions_--; | |||
1462 | } | |||
1463 | } | |||
1464 | ||||
1465 | void PopControl(FullDecoder* decoder, Control* c) { | |||
1466 | if (c->is_loop()) return; // A loop just falls through. | |||
1467 | if (c->is_onearmed_if()) { | |||
1468 | // Special handling for one-armed ifs. | |||
1469 | FinishOneArmedIf(decoder, c); | |||
1470 | } else if (c->is_try_catch() || c->is_try_catchall()) { | |||
1471 | FinishTry(decoder, c); | |||
1472 | } else if (c->end_merge.reached) { | |||
1473 | // There is a merge already. Merge our state into that, then continue with | |||
1474 | // that state. | |||
1475 | if (c->reachable()) { | |||
1476 | __ MergeFullStackWith(c->label_state, *__ cache_state()); | |||
1477 | } | |||
1478 | __ cache_state()->Steal(c->label_state); | |||
1479 | } else { | |||
1480 | // No merge, just continue with our current state. | |||
1481 | } | |||
1482 | ||||
1483 | if (!c->label.get()->is_bound()) __ bind(c->label.get()); | |||
1484 | } | |||
1485 | ||||
1486 | void GenerateCCall(const LiftoffRegister* result_regs, | |||
1487 | const ValueKindSig* sig, ValueKind out_argument_kind, | |||
1488 | const LiftoffRegister* arg_regs, | |||
1489 | ExternalReference ext_ref) { | |||
1490 | // Before making a call, spill all cache registers. | |||
1491 | __ SpillAllRegisters(); | |||
1492 | ||||
1493 | // Store arguments on our stack, then align the stack for calling to C. | |||
1494 | int param_bytes = 0; | |||
1495 | for (ValueKind param_kind : sig->parameters()) { | |||
1496 | param_bytes += value_kind_size(param_kind); | |||
1497 | } | |||
1498 | int out_arg_bytes = | |||
1499 | out_argument_kind == kVoid ? 0 : value_kind_size(out_argument_kind); | |||
1500 | int stack_bytes = std::max(param_bytes, out_arg_bytes); | |||
1501 | __ CallC(sig, arg_regs, result_regs, out_argument_kind, stack_bytes, | |||
1502 | ext_ref); | |||
1503 | } | |||
1504 | ||||
1505 | template <typename EmitFn, typename... Args> | |||
1506 | typename std::enable_if<!std::is_member_function_pointer<EmitFn>::value>::type | |||
1507 | CallEmitFn(EmitFn fn, Args... args) { | |||
1508 | fn(args...); | |||
1509 | } | |||
1510 | ||||
1511 | template <typename EmitFn, typename... Args> | |||
1512 | typename std::enable_if<std::is_member_function_pointer<EmitFn>::value>::type | |||
1513 | CallEmitFn(EmitFn fn, Args... args) { | |||
1514 | (asm_.*fn)(ConvertAssemblerArg(args)...); | |||
1515 | } | |||
1516 | ||||
1517 | // Wrap a {LiftoffRegister} with implicit conversions to {Register} and | |||
1518 | // {DoubleRegister}. | |||
1519 | struct AssemblerRegisterConverter { | |||
1520 | LiftoffRegister reg; | |||
1521 | operator LiftoffRegister() { return reg; } | |||
1522 | operator Register() { return reg.gp(); } | |||
1523 | operator DoubleRegister() { return reg.fp(); } | |||
1524 | }; | |||
1525 | ||||
1526 | // Convert {LiftoffRegister} to {AssemblerRegisterConverter}, other types stay | |||
1527 | // unchanged. | |||
1528 | template <typename T> | |||
1529 | typename std::conditional<std::is_same<LiftoffRegister, T>::value, | |||
1530 | AssemblerRegisterConverter, T>::type | |||
1531 | ConvertAssemblerArg(T t) { | |||
1532 | return {t}; | |||
1533 | } | |||
1534 | ||||
1535 | template <typename EmitFn, typename ArgType> | |||
1536 | struct EmitFnWithFirstArg { | |||
1537 | EmitFn fn; | |||
1538 | ArgType first_arg; | |||
1539 | }; | |||
1540 | ||||
1541 | template <typename EmitFn, typename ArgType> | |||
1542 | EmitFnWithFirstArg<EmitFn, ArgType> BindFirst(EmitFn fn, ArgType arg) { | |||
1543 | return {fn, arg}; | |||
1544 | } | |||
1545 | ||||
1546 | template <typename EmitFn, typename T, typename... Args> | |||
1547 | void CallEmitFn(EmitFnWithFirstArg<EmitFn, T> bound_fn, Args... args) { | |||
1548 | CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...); | |||
1549 | } | |||
1550 | ||||
1551 | template <ValueKind src_kind, ValueKind result_kind, | |||
1552 | ValueKind result_lane_kind = kVoid, class EmitFn> | |||
1553 | void EmitUnOp(EmitFn fn) { | |||
1554 | constexpr RegClass src_rc = reg_class_for(src_kind); | |||
1555 | constexpr RegClass result_rc = reg_class_for(result_kind); | |||
1556 | LiftoffRegister src = __ PopToRegister(); | |||
1557 | LiftoffRegister dst = src_rc == result_rc | |||
1558 | ? __ GetUnusedRegister(result_rc, {src}, {}) | |||
1559 | : __ GetUnusedRegister(result_rc, {}); | |||
1560 | CallEmitFn(fn, dst, src); | |||
1561 | if (V8_UNLIKELY(nondeterminism_)(__builtin_expect(!!(nondeterminism_), 0))) { | |||
1562 | LiftoffRegList pinned = {dst}; | |||
1563 | if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) { | |||
1564 | CheckNan(dst, pinned, result_kind); | |||
1565 | } else if (result_kind == ValueKind::kS128 && | |||
1566 | (result_lane_kind == kF32 || result_lane_kind == kF64)) { | |||
1567 | CheckS128Nan(dst, pinned, result_lane_kind); | |||
1568 | } | |||
1569 | } | |||
1570 | __ PushRegister(result_kind, dst); | |||
1571 | } | |||
1572 | ||||
1573 | template <ValueKind kind> | |||
1574 | void EmitFloatUnOpWithCFallback( | |||
1575 | bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister), | |||
1576 | ExternalReference (*fallback_fn)()) { | |||
1577 | auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) { | |||
1578 | if ((asm_.*emit_fn)(dst.fp(), src.fp())) return; | |||
1579 | ExternalReference ext_ref = fallback_fn(); | |||
1580 | auto sig = MakeSig::Params(kind); | |||
1581 | GenerateCCall(&dst, &sig, kind, &src, ext_ref); | |||
1582 | }; | |||
1583 | EmitUnOp<kind, kind>(emit_with_c_fallback); | |||
1584 | } | |||
1585 | ||||
1586 | enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false }; | |||
1587 | template <ValueKind dst_kind, ValueKind src_kind, | |||
1588 | TypeConversionTrapping can_trap> | |||
1589 | void EmitTypeConversion(FullDecoder* decoder, WasmOpcode opcode, | |||
1590 | ExternalReference (*fallback_fn)()) { | |||
1591 | static constexpr RegClass src_rc = reg_class_for(src_kind); | |||
1592 | static constexpr RegClass dst_rc = reg_class_for(dst_kind); | |||
1593 | LiftoffRegister src = __ PopToRegister(); | |||
1594 | LiftoffRegister dst = src_rc == dst_rc | |||
1595 | ? __ GetUnusedRegister(dst_rc, {src}, {}) | |||
1596 | : __ GetUnusedRegister(dst_rc, {}); | |||
1597 | Label* trap = | |||
1598 | can_trap ? AddOutOfLineTrap( | |||
1599 | decoder, WasmCode::kThrowWasmTrapFloatUnrepresentable) | |||
1600 | : nullptr; | |||
1601 | if (!__ emit_type_conversion(opcode, dst, src, trap)) { | |||
1602 | DCHECK_NOT_NULL(fallback_fn)((void) 0); | |||
1603 | ExternalReference ext_ref = fallback_fn(); | |||
| ||||
1604 | if (can_trap) { | |||
1605 | // External references for potentially trapping conversions return int. | |||
1606 | auto sig = MakeSig::Returns(kI32).Params(src_kind); | |||
1607 | LiftoffRegister ret_reg = | |||
1608 | __ GetUnusedRegister(kGpReg, LiftoffRegList{dst}); | |||
1609 | LiftoffRegister dst_regs[] = {ret_reg, dst}; | |||
1610 | GenerateCCall(dst_regs, &sig, dst_kind, &src, ext_ref); | |||
1611 | __ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp()); | |||
1612 | } else { | |||
1613 | ValueKind sig_kinds[] = {src_kind}; | |||
1614 | ValueKindSig sig(0, 1, sig_kinds); | |||
1615 | GenerateCCall(&dst, &sig, dst_kind, &src, ext_ref); | |||
1616 | } | |||
1617 | } | |||
1618 | __ PushRegister(dst_kind, dst); | |||
1619 | } | |||
1620 | ||||
1621 | void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value, | |||
1622 | Value* result) { | |||
1623 | #define CASE_I32_UNOP(opcode, fn) \ | |||
1624 | case kExpr##opcode: \ | |||
1625 | return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_##fn); | |||
1626 | #define CASE_I64_UNOP(opcode, fn) \ | |||
1627 | case kExpr##opcode: \ | |||
1628 | return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn); | |||
1629 | #define CASE_FLOAT_UNOP(opcode, kind, fn) \ | |||
1630 | case kExpr##opcode: \ | |||
1631 | return EmitUnOp<k##kind, k##kind>(&LiftoffAssembler::emit_##fn); | |||
1632 | #define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, kind, fn) \ | |||
1633 | case kExpr##opcode: \ | |||
1634 | return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \ | |||
1635 | &ExternalReference::wasm_##fn); | |||
1636 | #define CASE_TYPE_CONVERSION(opcode, dst_kind, src_kind, ext_ref, can_trap) \ | |||
1637 | case kExpr##opcode: \ | |||
1638 | return EmitTypeConversion<k##dst_kind, k##src_kind, can_trap>( \ | |||
1639 | decoder, kExpr##opcode, ext_ref); | |||
1640 | switch (opcode) { | |||
| ||||
1641 | CASE_I32_UNOP(I32Clz, i32_clz) | |||
1642 | CASE_I32_UNOP(I32Ctz, i32_ctz) | |||
1643 | CASE_FLOAT_UNOP(F32Abs, F32, f32_abs) | |||
1644 | CASE_FLOAT_UNOP(F32Neg, F32, f32_neg) | |||
1645 | CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Ceil, F32, f32_ceil) | |||
1646 | CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Floor, F32, f32_floor) | |||
1647 | CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Trunc, F32, f32_trunc) | |||
1648 | CASE_FLOAT_UNOP_WITH_CFALLBACK(F32NearestInt, F32, f32_nearest_int) | |||
1649 | CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt) | |||
1650 | CASE_FLOAT_UNOP(F64Abs, F64, f64_abs) | |||
1651 | CASE_FLOAT_UNOP(F64Neg, F64, f64_neg) | |||
1652 | CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Ceil, F64, f64_ceil) | |||
1653 | CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Floor, F64, f64_floor) | |||
1654 | CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Trunc, F64, f64_trunc) | |||
1655 | CASE_FLOAT_UNOP_WITH_CFALLBACK(F64NearestInt, F64, f64_nearest_int) | |||
1656 | CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt) | |||
1657 | CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr, kNoTrap) | |||
1658 | CASE_TYPE_CONVERSION(I32SConvertF32, I32, F32, nullptr, kCanTrap) | |||
1659 | CASE_TYPE_CONVERSION(I32UConvertF32, I32, F32, nullptr, kCanTrap) | |||
1660 | CASE_TYPE_CONVERSION(I32SConvertF64, I32, F64, nullptr, kCanTrap) | |||
1661 | CASE_TYPE_CONVERSION(I32UConvertF64, I32, F64, nullptr, kCanTrap) | |||
1662 | CASE_TYPE_CONVERSION(I32ReinterpretF32, I32, F32, nullptr, kNoTrap) | |||
1663 | CASE_TYPE_CONVERSION(I64SConvertI32, I64, I32, nullptr, kNoTrap) | |||
1664 | CASE_TYPE_CONVERSION(I64UConvertI32, I64, I32, nullptr, kNoTrap) | |||
1665 | CASE_TYPE_CONVERSION(I64SConvertF32, I64, F32, | |||
1666 | &ExternalReference::wasm_float32_to_int64, kCanTrap) | |||
1667 | CASE_TYPE_CONVERSION(I64UConvertF32, I64, F32, | |||
1668 | &ExternalReference::wasm_float32_to_uint64, kCanTrap) | |||
1669 | CASE_TYPE_CONVERSION(I64SConvertF64, I64, F64, | |||
1670 | &ExternalReference::wasm_float64_to_int64, kCanTrap) | |||
1671 | CASE_TYPE_CONVERSION(I64UConvertF64, I64, F64, | |||
1672 | &ExternalReference::wasm_float64_to_uint64, kCanTrap) | |||
1673 | CASE_TYPE_CONVERSION(I64ReinterpretF64, I64, F64, nullptr, kNoTrap) | |||
1674 | CASE_TYPE_CONVERSION(F32SConvertI32, F32, I32, nullptr, kNoTrap) | |||
1675 | CASE_TYPE_CONVERSION(F32UConvertI32, F32, I32, nullptr, kNoTrap) | |||
1676 | CASE_TYPE_CONVERSION(F32SConvertI64, F32, I64, | |||
1677 | &ExternalReference::wasm_int64_to_float32, kNoTrap) | |||
1678 | CASE_TYPE_CONVERSION(F32UConvertI64, F32, I64, | |||
1679 | &ExternalReference::wasm_uint64_to_float32, kNoTrap) | |||
1680 | CASE_TYPE_CONVERSION(F32ConvertF64, F32, F64, nullptr, kNoTrap) | |||
1681 | CASE_TYPE_CONVERSION(F32ReinterpretI32, F32, I32, nullptr, kNoTrap) | |||
1682 | CASE_TYPE_CONVERSION(F64SConvertI32, F64, I32, nullptr, kNoTrap) | |||
1683 | CASE_TYPE_CONVERSION(F64UConvertI32, F64, I32, nullptr, kNoTrap) | |||
1684 | CASE_TYPE_CONVERSION(F64SConvertI64, F64, I64, | |||
1685 | &ExternalReference::wasm_int64_to_float64, kNoTrap) | |||
1686 | CASE_TYPE_CONVERSION(F64UConvertI64, F64, I64, | |||
1687 | &ExternalReference::wasm_uint64_to_float64, kNoTrap) | |||
1688 | CASE_TYPE_CONVERSION(F64ConvertF32, F64, F32, nullptr, kNoTrap) | |||
1689 | CASE_TYPE_CONVERSION(F64ReinterpretI64, F64, I64, nullptr, kNoTrap) | |||
1690 | CASE_I32_UNOP(I32SExtendI8, i32_signextend_i8) | |||
1691 | CASE_I32_UNOP(I32SExtendI16, i32_signextend_i16) | |||
1692 | CASE_I64_UNOP(I64SExtendI8, i64_signextend_i8) | |||
1693 | CASE_I64_UNOP(I64SExtendI16, i64_signextend_i16) | |||
1694 | CASE_I64_UNOP(I64SExtendI32, i64_signextend_i32) | |||
1695 | CASE_I64_UNOP(I64Clz, i64_clz) | |||
1696 | CASE_I64_UNOP(I64Ctz, i64_ctz) | |||
1697 | CASE_TYPE_CONVERSION(I32SConvertSatF32, I32, F32, nullptr, kNoTrap) | |||
1698 | CASE_TYPE_CONVERSION(I32UConvertSatF32, I32, F32, nullptr, kNoTrap) | |||
1699 | CASE_TYPE_CONVERSION(I32SConvertSatF64, I32, F64, nullptr, kNoTrap) | |||
1700 | CASE_TYPE_CONVERSION(I32UConvertSatF64, I32, F64, nullptr, kNoTrap) | |||
1701 | CASE_TYPE_CONVERSION(I64SConvertSatF32, I64, F32, | |||
1702 | &ExternalReference::wasm_float32_to_int64_sat, | |||
1703 | kNoTrap) | |||
1704 | CASE_TYPE_CONVERSION(I64UConvertSatF32, I64, F32, | |||
1705 | &ExternalReference::wasm_float32_to_uint64_sat, | |||
1706 | kNoTrap) | |||
1707 | CASE_TYPE_CONVERSION(I64SConvertSatF64, I64, F64, | |||
1708 | &ExternalReference::wasm_float64_to_int64_sat, | |||
1709 | kNoTrap) | |||
1710 | CASE_TYPE_CONVERSION(I64UConvertSatF64, I64, F64, | |||
1711 | &ExternalReference::wasm_float64_to_uint64_sat, | |||
1712 | kNoTrap) | |||
1713 | case kExprI32Eqz: | |||
1714 | DCHECK(decoder->lookahead(0, kExprI32Eqz))((void) 0); | |||
1715 | if ((decoder->lookahead(1, kExprBrIf) || | |||
1716 | decoder->lookahead(1, kExprIf)) && | |||
1717 | !for_debugging_) { | |||
1718 | DCHECK(!has_outstanding_op())((void) 0); | |||
1719 | outstanding_op_ = kExprI32Eqz; | |||
1720 | break; | |||
1721 | } | |||
1722 | return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_i32_eqz); | |||
1723 | case kExprI64Eqz: | |||
1724 | return EmitUnOp<kI64, kI32>(&LiftoffAssembler::emit_i64_eqz); | |||
1725 | case kExprI32Popcnt: | |||
1726 | return EmitUnOp<kI32, kI32>( | |||
1727 | [=](LiftoffRegister dst, LiftoffRegister src) { | |||
1728 | if (__ emit_i32_popcnt(dst.gp(), src.gp())) return; | |||
1729 | auto sig = MakeSig::Returns(kI32).Params(kI32); | |||
1730 | GenerateCCall(&dst, &sig, kVoid, &src, | |||
1731 | ExternalReference::wasm_word32_popcnt()); | |||
1732 | }); | |||
1733 | case kExprI64Popcnt: | |||
1734 | return EmitUnOp<kI64, kI64>( | |||
1735 | [=](LiftoffRegister dst, LiftoffRegister src) { | |||
1736 | if (__ emit_i64_popcnt(dst, src)) return; | |||
1737 | // The c function returns i32. We will zero-extend later. | |||
1738 | auto sig = MakeSig::Returns(kI32).Params(kI64); | |||
1739 | LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst; | |||
1740 | GenerateCCall(&c_call_dst, &sig, kVoid, &src, | |||
1741 | ExternalReference::wasm_word64_popcnt()); | |||
1742 | // Now zero-extend the result to i64. | |||
1743 | __ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst, | |||
1744 | nullptr); | |||
1745 | }); | |||
1746 | case kExprRefIsNull: | |||
1747 | // We abuse ref.as_non_null, which isn't otherwise used in this switch, as | |||
1748 | // a sentinel for the negation of ref.is_null. | |||
1749 | case kExprRefAsNonNull: { | |||
1750 | LiftoffRegList pinned; | |||
1751 | LiftoffRegister ref = pinned.set(__ PopToRegister()); | |||
1752 | LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned); | |||
1753 | LoadNullValue(null.gp(), pinned); | |||
1754 | // Prefer to overwrite one of the input registers with the result | |||
1755 | // of the comparison. | |||
1756 | LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {}); | |||
1757 | __ emit_ptrsize_set_cond(opcode == kExprRefIsNull ? kEqual : kUnequal, | |||
1758 | dst.gp(), ref, null); | |||
1759 | __ PushRegister(kI32, dst); | |||
1760 | return; | |||
1761 | } | |||
1762 | default: | |||
1763 | UNREACHABLE()V8_Fatal("unreachable code"); | |||
1764 | } | |||
1765 | #undef CASE_I32_UNOP | |||
1766 | #undef CASE_I64_UNOP | |||
1767 | #undef CASE_FLOAT_UNOP | |||
1768 | #undef CASE_FLOAT_UNOP_WITH_CFALLBACK | |||
1769 | #undef CASE_TYPE_CONVERSION | |||
1770 | } | |||
1771 | ||||
1772 | template <ValueKind src_kind, ValueKind result_kind, typename EmitFn, | |||
1773 | typename EmitFnImm> | |||
1774 | void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) { | |||
1775 | static constexpr RegClass src_rc = reg_class_for(src_kind); | |||
1776 | static constexpr RegClass result_rc = reg_class_for(result_kind); | |||
1777 | ||||
1778 | LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back(); | |||
1779 | // Check if the RHS is an immediate. | |||
1780 | if (rhs_slot.is_const()) { | |||
1781 | __ cache_state()->stack_state.pop_back(); | |||
1782 | int32_t imm = rhs_slot.i32_const(); | |||
1783 | ||||
1784 | LiftoffRegister lhs = __ PopToRegister(); | |||
1785 | // Either reuse {lhs} for {dst}, or choose a register (pair) which does | |||
1786 | // not overlap, for easier code generation. | |||
1787 | LiftoffRegList pinned = {lhs}; | |||
1788 | LiftoffRegister dst = src_rc == result_rc | |||
1789 | ? __ GetUnusedRegister(result_rc, {lhs}, pinned) | |||
1790 | : __ GetUnusedRegister(result_rc, pinned); | |||
1791 | ||||
1792 | CallEmitFn(fnImm, dst, lhs, imm); | |||
1793 | static_assert(result_kind != kF32 && result_kind != kF64, | |||
1794 | "Unhandled nondeterminism for fuzzing."); | |||
1795 | __ PushRegister(result_kind, dst); | |||
1796 | } else { | |||
1797 | // The RHS was not an immediate. | |||
1798 | EmitBinOp<src_kind, result_kind>(fn); | |||
1799 | } | |||
1800 | } | |||
1801 | ||||
1802 | template <ValueKind src_kind, ValueKind result_kind, | |||
1803 | bool swap_lhs_rhs = false, ValueKind result_lane_kind = kVoid, | |||
1804 | typename EmitFn> | |||
1805 | void EmitBinOp(EmitFn fn) { | |||
1806 | static constexpr RegClass src_rc = reg_class_for(src_kind); | |||
1807 | static constexpr RegClass result_rc = reg_class_for(result_kind); | |||
1808 | LiftoffRegister rhs = __ PopToRegister(); | |||
1809 | LiftoffRegister lhs = __ PopToRegister(LiftoffRegList{rhs}); | |||
1810 | LiftoffRegister dst = src_rc == result_rc | |||
1811 | ? __ GetUnusedRegister(result_rc, {lhs, rhs}, {}) | |||
1812 | : __ GetUnusedRegister(result_rc, {}); | |||
1813 | ||||
1814 | if (swap_lhs_rhs) std::swap(lhs, rhs); | |||
1815 | ||||
1816 | CallEmitFn(fn, dst, lhs, rhs); | |||
1817 | if (V8_UNLIKELY(nondeterminism_)(__builtin_expect(!!(nondeterminism_), 0))) { | |||
1818 | LiftoffRegList pinned = {dst}; | |||
1819 | if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) { | |||
1820 | CheckNan(dst, pinned, result_kind); | |||
1821 | } else if (result_kind == ValueKind::kS128 && | |||
1822 | (result_lane_kind == kF32 || result_lane_kind == kF64)) { | |||
1823 | CheckS128Nan(dst, pinned, result_lane_kind); | |||
1824 | } | |||
1825 | } | |||
1826 | __ PushRegister(result_kind, dst); | |||
1827 | } | |||
1828 | ||||
1829 | void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs, | |||
1830 | LiftoffRegister rhs, ExternalReference ext_ref, | |||
1831 | Label* trap_by_zero, | |||
1832 | Label* trap_unrepresentable = nullptr) { | |||
1833 | // Cannot emit native instructions, build C call. | |||
1834 | LiftoffRegister ret = __ GetUnusedRegister(kGpReg, LiftoffRegList{dst}); | |||
1835 | LiftoffRegister tmp = | |||
1836 | __ GetUnusedRegister(kGpReg, LiftoffRegList{dst, ret}); | |||
1837 | LiftoffRegister arg_regs[] = {lhs, rhs}; | |||
1838 | LiftoffRegister result_regs[] = {ret, dst}; | |||
1839 | auto sig = MakeSig::Returns(kI32).Params(kI64, kI64); | |||
1840 | GenerateCCall(result_regs, &sig, kI64, arg_regs, ext_ref); | |||
1841 | __ LoadConstant(tmp, WasmValue(int32_t{0})); | |||
1842 | __ emit_cond_jump(kEqual, trap_by_zero, kI32, ret.gp(), tmp.gp()); | |||
1843 | if (trap_unrepresentable) { | |||
1844 | __ LoadConstant(tmp, WasmValue(int32_t{-1})); | |||
1845 | __ emit_cond_jump(kEqual, trap_unrepresentable, kI32, ret.gp(), tmp.gp()); | |||
1846 | } | |||
1847 | } | |||
1848 | ||||
1849 | template <WasmOpcode opcode> | |||
1850 | void EmitI32CmpOp(FullDecoder* decoder) { | |||
1851 | DCHECK(decoder->lookahead(0, opcode))((void) 0); | |||
1852 | if ((decoder->lookahead(1, kExprBrIf) || decoder->lookahead(1, kExprIf)) && | |||
1853 | !for_debugging_) { | |||
1854 | DCHECK(!has_outstanding_op())((void) 0); | |||
1855 | outstanding_op_ = opcode; | |||
1856 | return; | |||
1857 | } | |||
1858 | return EmitBinOp<kI32, kI32>(BindFirst(&LiftoffAssembler::emit_i32_set_cond, | |||
1859 | GetCompareCondition(opcode))); | |||
1860 | } | |||
1861 | ||||
1862 | void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs, | |||
1863 | const Value& rhs, Value* result) { | |||
1864 | #define CASE_I64_SHIFTOP(opcode, fn) \ | |||
1865 | case kExpr##opcode: \ | |||
1866 | return EmitBinOpImm<kI64, kI64>( \ | |||
1867 | [=](LiftoffRegister dst, LiftoffRegister src, \ | |||
1868 | LiftoffRegister amount) { \ | |||
1869 | __ emit_##fn(dst, src, \ | |||
1870 | amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \ | |||
1871 | }, \ | |||
1872 | &LiftoffAssembler::emit_##fn##i); | |||
1873 | #define CASE_CCALL_BINOP(opcode, kind, ext_ref_fn) \ | |||
1874 | case kExpr##opcode: \ | |||
1875 | return EmitBinOp<k##kind, k##kind>( \ | |||
1876 | [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \ | |||
1877 | LiftoffRegister args[] = {lhs, rhs}; \ | |||
1878 | auto ext_ref = ExternalReference::ext_ref_fn(); \ | |||
1879 | ValueKind sig_kinds[] = {k##kind, k##kind, k##kind}; \ | |||
1880 | const bool out_via_stack = k##kind == kI64; \ | |||
1881 | ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_kinds); \ | |||
1882 | ValueKind out_arg_kind = out_via_stack ? kI64 : kVoid; \ | |||
1883 | GenerateCCall(&dst, &sig, out_arg_kind, args, ext_ref); \ | |||
1884 | }); | |||
1885 | switch (opcode) { | |||
1886 | case kExprI32Add: | |||
1887 | return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_add, | |||
1888 | &LiftoffAssembler::emit_i32_addi); | |||
1889 | case kExprI32Sub: | |||
1890 | return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_sub); | |||
1891 | case kExprI32Mul: | |||
1892 | return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_mul); | |||
1893 | case kExprI32And: | |||
1894 | return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_and, | |||
1895 | &LiftoffAssembler::emit_i32_andi); | |||
1896 | case kExprI32Ior: | |||
1897 | return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_or, | |||
1898 | &LiftoffAssembler::emit_i32_ori); | |||
1899 | case kExprI32Xor: | |||
1900 | return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_xor, | |||
1901 | &LiftoffAssembler::emit_i32_xori); | |||
1902 | case kExprI32Eq: | |||
1903 | return EmitI32CmpOp<kExprI32Eq>(decoder); | |||
1904 | case kExprI32Ne: | |||
1905 | return EmitI32CmpOp<kExprI32Ne>(decoder); | |||
1906 | case kExprI32LtS: | |||
1907 | return EmitI32CmpOp<kExprI32LtS>(decoder); | |||
1908 | case kExprI32LtU: | |||
1909 | return EmitI32CmpOp<kExprI32LtU>(decoder); | |||
1910 | case kExprI32GtS: | |||
1911 | return EmitI32CmpOp<kExprI32GtS>(decoder); | |||
1912 | case kExprI32GtU: | |||
1913 | return EmitI32CmpOp<kExprI32GtU>(decoder); | |||
1914 | case kExprI32LeS: | |||
1915 | return EmitI32CmpOp<kExprI32LeS>(decoder); | |||
1916 | case kExprI32LeU: | |||
1917 | return EmitI32CmpOp<kExprI32LeU>(decoder); | |||
1918 | case kExprI32GeS: | |||
1919 | return EmitI32CmpOp<kExprI32GeS>(decoder); | |||
1920 | case kExprI32GeU: | |||
1921 | return EmitI32CmpOp<kExprI32GeU>(decoder); | |||
1922 | case kExprI64Add: | |||
1923 | return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_add, | |||
1924 | &LiftoffAssembler::emit_i64_addi); | |||
1925 | case kExprI64Sub: | |||
1926 | return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_sub); | |||
1927 | case kExprI64Mul: | |||
1928 | return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_mul); | |||
1929 | case kExprI64And: | |||
1930 | return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_and, | |||
1931 | &LiftoffAssembler::emit_i64_andi); | |||
1932 | case kExprI64Ior: | |||
1933 | return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_or, | |||
1934 | &LiftoffAssembler::emit_i64_ori); | |||
1935 | case kExprI64Xor: | |||
1936 | return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_xor, | |||
1937 | &LiftoffAssembler::emit_i64_xori); | |||
1938 | case kExprI64Eq: | |||
1939 | return EmitBinOp<kI64, kI32>( | |||
1940 | BindFirst(&LiftoffAssembler::emit_i64_set_cond, kEqual)); | |||
1941 | case kExprI64Ne: | |||
1942 | return EmitBinOp<kI64, kI32>( | |||
1943 | BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnequal)); | |||
1944 | case kExprI64LtS: | |||
1945 | return EmitBinOp<kI64, kI32>( | |||
1946 | BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessThan)); | |||
1947 | case kExprI64LtU: | |||
1948 | return EmitBinOp<kI64, kI32>( | |||
1949 | BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnsignedLessThan)); | |||
1950 | case kExprI64GtS: | |||
1951 | return EmitBinOp<kI64, kI32>(BindFirst( | |||
1952 | &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterThan)); | |||
1953 | case kExprI64GtU: | |||
1954 | return EmitBinOp<kI64, kI32>(BindFirst( | |||
1955 | &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterThan)); | |||
1956 | case kExprI64LeS: | |||
1957 | return EmitBinOp<kI64, kI32>( | |||
1958 | BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessEqual)); | |||
1959 | case kExprI64LeU: | |||
1960 | return EmitBinOp<kI64, kI32>(BindFirst( | |||
1961 | &LiftoffAssembler::emit_i64_set_cond, kUnsignedLessEqual)); | |||
1962 | case kExprI64GeS: | |||
1963 | return EmitBinOp<kI64, kI32>(BindFirst( | |||
1964 | &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterEqual)); | |||
1965 | case kExprI64GeU: | |||
1966 | return EmitBinOp<kI64, kI32>(BindFirst( | |||
1967 | &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterEqual)); | |||
1968 | case kExprF32Eq: | |||
1969 | return EmitBinOp<kF32, kI32>( | |||
1970 | BindFirst(&LiftoffAssembler::emit_f32_set_cond, kEqual)); | |||
1971 | case kExprF32Ne: | |||
1972 | return EmitBinOp<kF32, kI32>( | |||
1973 | BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnequal)); | |||
1974 | case kExprF32Lt: | |||
1975 | return EmitBinOp<kF32, kI32>( | |||
1976 | BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnsignedLessThan)); | |||
1977 | case kExprF32Gt: | |||
1978 | return EmitBinOp<kF32, kI32>(BindFirst( | |||
1979 | &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterThan)); | |||
1980 | case kExprF32Le: | |||
1981 | return EmitBinOp<kF32, kI32>(BindFirst( | |||
1982 | &LiftoffAssembler::emit_f32_set_cond, kUnsignedLessEqual)); | |||
1983 | case kExprF32Ge: | |||
1984 | return EmitBinOp<kF32, kI32>(BindFirst( | |||
1985 | &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterEqual)); | |||
1986 | case kExprF64Eq: | |||
1987 | return EmitBinOp<kF64, kI32>( | |||
1988 | BindFirst(&LiftoffAssembler::emit_f64_set_cond, kEqual)); | |||
1989 | case kExprF64Ne: | |||
1990 | return EmitBinOp<kF64, kI32>( | |||
1991 | BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnequal)); | |||
1992 | case kExprF64Lt: | |||
1993 | return EmitBinOp<kF64, kI32>( | |||
1994 | BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnsignedLessThan)); | |||
1995 | case kExprF64Gt: | |||
1996 | return EmitBinOp<kF64, kI32>(BindFirst( | |||
1997 | &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterThan)); | |||
1998 | case kExprF64Le: | |||
1999 | return EmitBinOp<kF64, kI32>(BindFirst( | |||
2000 | &LiftoffAssembler::emit_f64_set_cond, kUnsignedLessEqual)); | |||
2001 | case kExprF64Ge: | |||
2002 | return EmitBinOp<kF64, kI32>(BindFirst( | |||
2003 | &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterEqual)); | |||
2004 | case kExprI32Shl: | |||
2005 | return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shl, | |||
2006 | &LiftoffAssembler::emit_i32_shli); | |||
2007 | case kExprI32ShrS: | |||
2008 | return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_sar, | |||
2009 | &LiftoffAssembler::emit_i32_sari); | |||
2010 | case kExprI32ShrU: | |||
2011 | return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shr, | |||
2012 | &LiftoffAssembler::emit_i32_shri); | |||
2013 | CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol) | |||
2014 | CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror) | |||
2015 | CASE_I64_SHIFTOP(I64Shl, i64_shl) | |||
2016 | CASE_I64_SHIFTOP(I64ShrS, i64_sar) | |||
2017 | CASE_I64_SHIFTOP(I64ShrU, i64_shr) | |||
2018 | CASE_CCALL_BINOP(I64Rol, I64, wasm_word64_rol) | |||
2019 | CASE_CCALL_BINOP(I64Ror, I64, wasm_word64_ror) | |||
2020 | case kExprF32Add: | |||
2021 | return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_add); | |||
2022 | case kExprF32Sub: | |||
2023 | return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_sub); | |||
2024 | case kExprF32Mul: | |||
2025 | return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_mul); | |||
2026 | case kExprF32Div: | |||
2027 | return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_div); | |||
2028 | case kExprF32Min: | |||
2029 | return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_min); | |||
2030 | case kExprF32Max: | |||
2031 | return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_max); | |||
2032 | case kExprF32CopySign: | |||
2033 | return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_copysign); | |||
2034 | case kExprF64Add: | |||
2035 | return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_add); | |||
2036 | case kExprF64Sub: | |||
2037 | return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_sub); | |||
2038 | case kExprF64Mul: | |||
2039 | return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_mul); | |||
2040 | case kExprF64Div: | |||
2041 | return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_div); | |||
2042 | case kExprF64Min: | |||
2043 | return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_min); | |||
2044 | case kExprF64Max: | |||
2045 | return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_max); | |||
2046 | case kExprF64CopySign: | |||
2047 | return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_copysign); | |||
2048 | case kExprI32DivS: | |||
2049 | return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst, | |||
2050 | LiftoffRegister lhs, | |||
2051 | LiftoffRegister rhs) { | |||
2052 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero); | |||
2053 | // Adding the second trap might invalidate the pointer returned for | |||
2054 | // the first one, thus get both pointers afterwards. | |||
2055 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivUnrepresentable); | |||
2056 | Label* div_by_zero = out_of_line_code_.end()[-2].label.get(); | |||
2057 | Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get(); | |||
2058 | __ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero, | |||
2059 | div_unrepresentable); | |||
2060 | }); | |||
2061 | case kExprI32DivU: | |||
2062 | return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst, | |||
2063 | LiftoffRegister lhs, | |||
2064 | LiftoffRegister rhs) { | |||
2065 | Label* div_by_zero = | |||
2066 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero); | |||
2067 | __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero); | |||
2068 | }); | |||
2069 | case kExprI32RemS: | |||
2070 | return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst, | |||
2071 | LiftoffRegister lhs, | |||
2072 | LiftoffRegister rhs) { | |||
2073 | Label* rem_by_zero = | |||
2074 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero); | |||
2075 | __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero); | |||
2076 | }); | |||
2077 | case kExprI32RemU: | |||
2078 | return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst, | |||
2079 | LiftoffRegister lhs, | |||
2080 | LiftoffRegister rhs) { | |||
2081 | Label* rem_by_zero = | |||
2082 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero); | |||
2083 | __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero); | |||
2084 | }); | |||
2085 | case kExprI64DivS: | |||
2086 | return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst, | |||
2087 | LiftoffRegister lhs, | |||
2088 | LiftoffRegister rhs) { | |||
2089 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero); | |||
2090 | // Adding the second trap might invalidate the pointer returned for | |||
2091 | // the first one, thus get both pointers afterwards. | |||
2092 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivUnrepresentable); | |||
2093 | Label* div_by_zero = out_of_line_code_.end()[-2].label.get(); | |||
2094 | Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get(); | |||
2095 | if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero, | |||
2096 | div_unrepresentable)) { | |||
2097 | ExternalReference ext_ref = ExternalReference::wasm_int64_div(); | |||
2098 | EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero, | |||
2099 | div_unrepresentable); | |||
2100 | } | |||
2101 | }); | |||
2102 | case kExprI64DivU: | |||
2103 | return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst, | |||
2104 | LiftoffRegister lhs, | |||
2105 | LiftoffRegister rhs) { | |||
2106 | Label* div_by_zero = | |||
2107 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero); | |||
2108 | if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) { | |||
2109 | ExternalReference ext_ref = ExternalReference::wasm_uint64_div(); | |||
2110 | EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero); | |||
2111 | } | |||
2112 | }); | |||
2113 | case kExprI64RemS: | |||
2114 | return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst, | |||
2115 | LiftoffRegister lhs, | |||
2116 | LiftoffRegister rhs) { | |||
2117 | Label* rem_by_zero = | |||
2118 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero); | |||
2119 | if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) { | |||
2120 | ExternalReference ext_ref = ExternalReference::wasm_int64_mod(); | |||
2121 | EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero); | |||
2122 | } | |||
2123 | }); | |||
2124 | case kExprI64RemU: | |||
2125 | return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst, | |||
2126 | LiftoffRegister lhs, | |||
2127 | LiftoffRegister rhs) { | |||
2128 | Label* rem_by_zero = | |||
2129 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero); | |||
2130 | if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) { | |||
2131 | ExternalReference ext_ref = ExternalReference::wasm_uint64_mod(); | |||
2132 | EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero); | |||
2133 | } | |||
2134 | }); | |||
2135 | case kExprRefEq: { | |||
2136 | return EmitBinOp<kOptRef, kI32>( | |||
2137 | BindFirst(&LiftoffAssembler::emit_ptrsize_set_cond, kEqual)); | |||
2138 | } | |||
2139 | ||||
2140 | default: | |||
2141 | UNREACHABLE()V8_Fatal("unreachable code"); | |||
2142 | } | |||
2143 | #undef CASE_I64_SHIFTOP | |||
2144 | #undef CASE_CCALL_BINOP | |||
2145 | } | |||
2146 | ||||
2147 | void I32Const(FullDecoder* decoder, Value* result, int32_t value) { | |||
2148 | __ PushConstant(kI32, value); | |||
2149 | } | |||
2150 | ||||
2151 | void I64Const(FullDecoder* decoder, Value* result, int64_t value) { | |||
2152 | // The {VarState} stores constant values as int32_t, thus we only store | |||
2153 | // 64-bit constants in this field if it fits in an int32_t. Larger values | |||
2154 | // cannot be used as immediate value anyway, so we can also just put them in | |||
2155 | // a register immediately. | |||
2156 | int32_t value_i32 = static_cast<int32_t>(value); | |||
2157 | if (value_i32 == value) { | |||
2158 | __ PushConstant(kI64, value_i32); | |||
2159 | } else { | |||
2160 | LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kI64), {}); | |||
2161 | __ LoadConstant(reg, WasmValue(value)); | |||
2162 | __ PushRegister(kI64, reg); | |||
2163 | } | |||
2164 | } | |||
2165 | ||||
2166 | void F32Const(FullDecoder* decoder, Value* result, float value) { | |||
2167 | LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {}); | |||
2168 | __ LoadConstant(reg, WasmValue(value)); | |||
2169 | __ PushRegister(kF32, reg); | |||
2170 | } | |||
2171 | ||||
2172 | void F64Const(FullDecoder* decoder, Value* result, double value) { | |||
2173 | LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {}); | |||
2174 | __ LoadConstant(reg, WasmValue(value)); | |||
2175 | __ PushRegister(kF64, reg); | |||
2176 | } | |||
2177 | ||||
2178 | void RefNull(FullDecoder* decoder, ValueType type, Value*) { | |||
2179 | LiftoffRegister null = __ GetUnusedRegister(kGpReg, {}); | |||
2180 | LoadNullValue(null.gp(), {}); | |||
2181 | __ PushRegister(type.kind(), null); | |||
2182 | } | |||
2183 | ||||
2184 | void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) { | |||
2185 | LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {}); | |||
2186 | __ LoadConstant(func_index_reg, WasmValue(function_index)); | |||
2187 | LiftoffAssembler::VarState func_index_var(kI32, func_index_reg, 0); | |||
2188 | CallRuntimeStub(WasmCode::kWasmRefFunc, MakeSig::Returns(kRef).Params(kI32), | |||
2189 | {func_index_var}, decoder->position()); | |||
2190 | __ PushRegister(kRef, LiftoffRegister(kReturnRegister0)); | |||
2191 | } | |||
2192 | ||||
2193 | void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) { | |||
2194 | LiftoffRegList pinned; | |||
2195 | LiftoffRegister obj = pinned.set(__ PopToRegister(pinned)); | |||
2196 | MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type); | |||
2197 | __ PushRegister(kRef, obj); | |||
2198 | } | |||
2199 | ||||
2200 | void Drop(FullDecoder* decoder) { __ DropValues(1); } | |||
2201 | ||||
2202 | void TraceFunctionExit(FullDecoder* decoder) { | |||
2203 | CODE_COMMENT("trace function exit"); | |||
2204 | // Before making the runtime call, spill all cache registers. | |||
2205 | __ SpillAllRegisters(); | |||
2206 | LiftoffRegList pinned; | |||
2207 | // Get a register to hold the stack slot for the return value. | |||
2208 | LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
2209 | __ AllocateStackSlot(info.gp(), sizeof(int64_t)); | |||
2210 | ||||
2211 | // Store the return value if there is exactly one. Multiple return values | |||
2212 | // are not handled yet. | |||
2213 | size_t num_returns = decoder->sig_->return_count(); | |||
2214 | if (num_returns == 1) { | |||
2215 | ValueKind return_kind = decoder->sig_->GetReturn(0).kind(); | |||
2216 | LiftoffRegister return_reg = | |||
2217 | __ LoadToRegister(__ cache_state()->stack_state.back(), pinned); | |||
2218 | if (is_reference(return_kind)) { | |||
2219 | __ StoreTaggedPointer(info.gp(), no_reg, 0, return_reg, pinned); | |||
2220 | } else { | |||
2221 | __ Store(info.gp(), no_reg, 0, return_reg, | |||
2222 | StoreType::ForValueKind(return_kind), pinned); | |||
2223 | } | |||
2224 | } | |||
2225 | // Put the parameter in its place. | |||
2226 | WasmTraceExitDescriptor descriptor; | |||
2227 | DCHECK_EQ(0, descriptor.GetStackParameterCount())((void) 0); | |||
2228 | DCHECK_EQ(1, descriptor.GetRegisterParameterCount())((void) 0); | |||
2229 | Register param_reg = descriptor.GetRegisterParameter(0); | |||
2230 | if (info.gp() != param_reg) { | |||
2231 | __ Move(param_reg, info.gp(), kPointerKind); | |||
2232 | } | |||
2233 | ||||
2234 | source_position_table_builder_.AddPosition( | |||
2235 | __ pc_offset(), SourcePosition(decoder->position()), false); | |||
2236 | __ CallRuntimeStub(WasmCode::kWasmTraceExit); | |||
2237 | DefineSafepoint(); | |||
2238 | ||||
2239 | __ DeallocateStackSlot(sizeof(int64_t)); | |||
2240 | } | |||
2241 | ||||
2242 | void TierupCheckOnExit(FullDecoder* decoder) { | |||
2243 | if (!dynamic_tiering()) return; | |||
2244 | TierupCheck(decoder, decoder->position(), __ pc_offset()); | |||
2245 | CODE_COMMENT("update tiering budget"); | |||
2246 | LiftoffRegList pinned; | |||
2247 | LiftoffRegister budget = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
2248 | LiftoffRegister array = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
2249 | LOAD_INSTANCE_FIELD(array.gp(), TieringBudgetArray, kSystemPointerSize, | |||
2250 | pinned); | |||
2251 | uint32_t offset = | |||
2252 | kInt32Size * declared_function_index(env_->module, func_index_); | |||
2253 | __ Fill(budget, liftoff::kTierupBudgetOffset, ValueKind::kI32); | |||
2254 | __ Store(array.gp(), no_reg, offset, budget, StoreType::kI32Store, pinned); | |||
2255 | } | |||
2256 | ||||
2257 | void DoReturn(FullDecoder* decoder, uint32_t /* drop_values */) { | |||
2258 | if (FLAG_trace_wasm) TraceFunctionExit(decoder); | |||
2259 | TierupCheckOnExit(decoder); | |||
2260 | size_t num_returns = decoder->sig_->return_count(); | |||
2261 | if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_); | |||
2262 | __ LeaveFrame(StackFrame::WASM); | |||
2263 | __ DropStackSlotsAndRet( | |||
2264 | static_cast<uint32_t>(descriptor_->ParameterSlotCount())); | |||
2265 | } | |||
2266 | ||||
2267 | void LocalGet(FullDecoder* decoder, Value* result, | |||
2268 | const IndexImmediate<validate>& imm) { | |||
2269 | auto local_slot = __ cache_state()->stack_state[imm.index]; | |||
2270 | __ cache_state()->stack_state.emplace_back( | |||
2271 | local_slot.kind(), __ NextSpillOffset(local_slot.kind())); | |||
2272 | auto* slot = &__ cache_state()->stack_state.back(); | |||
2273 | if (local_slot.is_reg()) { | |||
2274 | __ cache_state()->inc_used(local_slot.reg()); | |||
2275 | slot->MakeRegister(local_slot.reg()); | |||
2276 | } else if (local_slot.is_const()) { | |||
2277 | slot->MakeConstant(local_slot.i32_const()); | |||
2278 | } else { | |||
2279 | DCHECK(local_slot.is_stack())((void) 0); | |||
2280 | auto rc = reg_class_for(local_slot.kind()); | |||
2281 | LiftoffRegister reg = __ GetUnusedRegister(rc, {}); | |||
2282 | __ cache_state()->inc_used(reg); | |||
2283 | slot->MakeRegister(reg); | |||
2284 | __ Fill(reg, local_slot.offset(), local_slot.kind()); | |||
2285 | } | |||
2286 | } | |||
2287 | ||||
2288 | void LocalSetFromStackSlot(LiftoffAssembler::VarState* dst_slot, | |||
2289 | uint32_t local_index) { | |||
2290 | auto& state = *__ cache_state(); | |||
2291 | auto& src_slot = state.stack_state.back(); | |||
2292 | ValueKind kind = dst_slot->kind(); | |||
2293 | if (dst_slot->is_reg()) { | |||
2294 | LiftoffRegister slot_reg = dst_slot->reg(); | |||
2295 | if (state.get_use_count(slot_reg) == 1) { | |||
2296 | __ Fill(dst_slot->reg(), src_slot.offset(), kind); | |||
2297 | return; | |||
2298 | } | |||
2299 | state.dec_used(slot_reg); | |||
2300 | dst_slot->MakeStack(); | |||
2301 | } | |||
2302 | DCHECK(CheckCompatibleStackSlotTypes(kind, __ local_kind(local_index)))((void) 0); | |||
2303 | RegClass rc = reg_class_for(kind); | |||
2304 | LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {}); | |||
2305 | __ Fill(dst_reg, src_slot.offset(), kind); | |||
2306 | *dst_slot = LiftoffAssembler::VarState(kind, dst_reg, dst_slot->offset()); | |||
2307 | __ cache_state()->inc_used(dst_reg); | |||
2308 | } | |||
2309 | ||||
2310 | void LocalSet(uint32_t local_index, bool is_tee) { | |||
2311 | auto& state = *__ cache_state(); | |||
2312 | auto& source_slot = state.stack_state.back(); | |||
2313 | auto& target_slot = state.stack_state[local_index]; | |||
2314 | switch (source_slot.loc()) { | |||
2315 | case kRegister: | |||
2316 | if (target_slot.is_reg()) state.dec_used(target_slot.reg()); | |||
2317 | target_slot.Copy(source_slot); | |||
2318 | if (is_tee) state.inc_used(target_slot.reg()); | |||
2319 | break; | |||
2320 | case kIntConst: | |||
2321 | if (target_slot.is_reg()) state.dec_used(target_slot.reg()); | |||
2322 | target_slot.Copy(source_slot); | |||
2323 | break; | |||
2324 | case kStack: | |||
2325 | LocalSetFromStackSlot(&target_slot, local_index); | |||
2326 | break; | |||
2327 | } | |||
2328 | if (!is_tee) __ cache_state()->stack_state.pop_back(); | |||
2329 | } | |||
2330 | ||||
2331 | void LocalSet(FullDecoder* decoder, const Value& value, | |||
2332 | const IndexImmediate<validate>& imm) { | |||
2333 | LocalSet(imm.index, false); | |||
2334 | } | |||
2335 | ||||
2336 | void LocalTee(FullDecoder* decoder, const Value& value, Value* result, | |||
2337 | const IndexImmediate<validate>& imm) { | |||
2338 | LocalSet(imm.index, true); | |||
2339 | } | |||
2340 | ||||
2341 | void AllocateLocals(FullDecoder* decoder, base::Vector<Value> local_values) { | |||
2342 | // TODO(7748): Introduce typed functions bailout reason | |||
2343 | unsupported(decoder, kGC, "let"); | |||
2344 | } | |||
2345 | ||||
2346 | void DeallocateLocals(FullDecoder* decoder, uint32_t count) { | |||
2347 | // TODO(7748): Introduce typed functions bailout reason | |||
2348 | unsupported(decoder, kGC, "let"); | |||
2349 | } | |||
2350 | ||||
2351 | Register GetGlobalBaseAndOffset(const WasmGlobal* global, | |||
2352 | LiftoffRegList* pinned, uint32_t* offset) { | |||
2353 | Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp(); | |||
2354 | if (global->mutability && global->imported) { | |||
2355 | LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize, | |||
2356 | *pinned); | |||
2357 | __ Load(LiftoffRegister(addr), addr, no_reg, | |||
2358 | global->index * sizeof(Address), kPointerLoadType, *pinned); | |||
2359 | *offset = 0; | |||
2360 | } else { | |||
2361 | LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize, *pinned); | |||
2362 | *offset = global->offset; | |||
2363 | } | |||
2364 | return addr; | |||
2365 | } | |||
2366 | ||||
2367 | void GetBaseAndOffsetForImportedMutableExternRefGlobal( | |||
2368 | const WasmGlobal* global, LiftoffRegList* pinned, Register* base, | |||
2369 | Register* offset) { | |||
2370 | Register globals_buffer = | |||
2371 | pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp(); | |||
2372 | LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, | |||
2373 | ImportedMutableGlobalsBuffers, *pinned); | |||
2374 | *base = globals_buffer; | |||
2375 | __ LoadTaggedPointer( | |||
2376 | *base, globals_buffer, no_reg, | |||
2377 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(global->offset), | |||
2378 | *pinned); | |||
2379 | ||||
2380 | // For the offset we need the index of the global in the buffer, and | |||
2381 | // then calculate the actual offset from the index. Load the index from | |||
2382 | // the ImportedMutableGlobals array of the instance. | |||
2383 | Register imported_mutable_globals = | |||
2384 | pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp(); | |||
2385 | ||||
2386 | LOAD_INSTANCE_FIELD(imported_mutable_globals, ImportedMutableGlobals, | |||
2387 | kSystemPointerSize, *pinned); | |||
2388 | *offset = imported_mutable_globals; | |||
2389 | __ Load(LiftoffRegister(*offset), imported_mutable_globals, no_reg, | |||
2390 | global->index * sizeof(Address), | |||
2391 | kSystemPointerSize == 4 ? LoadType::kI32Load : LoadType::kI64Load, | |||
2392 | *pinned); | |||
2393 | __ emit_i32_shli(*offset, *offset, kTaggedSizeLog2); | |||
2394 | __ emit_i32_addi(*offset, *offset, | |||
2395 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)); | |||
2396 | } | |||
2397 | ||||
2398 | void GlobalGet(FullDecoder* decoder, Value* result, | |||
2399 | const GlobalIndexImmediate<validate>& imm) { | |||
2400 | const auto* global = &env_->module->globals[imm.index]; | |||
2401 | ValueKind kind = global->type.kind(); | |||
2402 | if (!CheckSupportedType(decoder, kind, "global")) { | |||
2403 | return; | |||
2404 | } | |||
2405 | ||||
2406 | if (is_reference(kind)) { | |||
2407 | if (global->mutability && global->imported) { | |||
2408 | LiftoffRegList pinned; | |||
2409 | Register base = no_reg; | |||
2410 | Register offset = no_reg; | |||
2411 | GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned, | |||
2412 | &base, &offset); | |||
2413 | __ LoadTaggedPointer(base, base, offset, 0, pinned); | |||
2414 | __ PushRegister(kind, LiftoffRegister(base)); | |||
2415 | return; | |||
2416 | } | |||
2417 | ||||
2418 | LiftoffRegList pinned; | |||
2419 | Register globals_buffer = | |||
2420 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
2421 | LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer, | |||
2422 | pinned); | |||
2423 | Register value = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
2424 | __ LoadTaggedPointer(value, globals_buffer, no_reg, | |||
2425 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray( | |||
2426 | imm.global->offset), | |||
2427 | pinned); | |||
2428 | __ PushRegister(kind, LiftoffRegister(value)); | |||
2429 | return; | |||
2430 | } | |||
2431 | LiftoffRegList pinned; | |||
2432 | uint32_t offset = 0; | |||
2433 | Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset); | |||
2434 | LiftoffRegister value = | |||
2435 | pinned.set(__ GetUnusedRegister(reg_class_for(kind), pinned)); | |||
2436 | LoadType type = LoadType::ForValueKind(kind); | |||
2437 | __ Load(value, addr, no_reg, offset, type, pinned, nullptr, false); | |||
2438 | __ PushRegister(kind, value); | |||
2439 | } | |||
2440 | ||||
2441 | void GlobalSet(FullDecoder* decoder, const Value&, | |||
2442 | const GlobalIndexImmediate<validate>& imm) { | |||
2443 | auto* global = &env_->module->globals[imm.index]; | |||
2444 | ValueKind kind = global->type.kind(); | |||
2445 | if (!CheckSupportedType(decoder, kind, "global")) { | |||
2446 | return; | |||
2447 | } | |||
2448 | ||||
2449 | if (is_reference(kind)) { | |||
2450 | if (global->mutability && global->imported) { | |||
2451 | LiftoffRegList pinned; | |||
2452 | LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); | |||
2453 | Register base = no_reg; | |||
2454 | Register offset = no_reg; | |||
2455 | GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned, | |||
2456 | &base, &offset); | |||
2457 | __ StoreTaggedPointer(base, offset, 0, value, pinned); | |||
2458 | return; | |||
2459 | } | |||
2460 | ||||
2461 | LiftoffRegList pinned; | |||
2462 | Register globals_buffer = | |||
2463 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
2464 | LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer, | |||
2465 | pinned); | |||
2466 | LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); | |||
2467 | __ StoreTaggedPointer(globals_buffer, no_reg, | |||
2468 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray( | |||
2469 | imm.global->offset), | |||
2470 | value, pinned); | |||
2471 | return; | |||
2472 | } | |||
2473 | LiftoffRegList pinned; | |||
2474 | uint32_t offset = 0; | |||
2475 | Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset); | |||
2476 | LiftoffRegister reg = pinned.set(__ PopToRegister(pinned)); | |||
2477 | StoreType type = StoreType::ForValueKind(kind); | |||
2478 | __ Store(addr, no_reg, offset, reg, type, {}, nullptr, false); | |||
2479 | } | |||
2480 | ||||
2481 | void TableGet(FullDecoder* decoder, const Value&, Value*, | |||
2482 | const IndexImmediate<validate>& imm) { | |||
2483 | LiftoffRegList pinned; | |||
2484 | ||||
2485 | LiftoffRegister table_index_reg = | |||
2486 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
2487 | __ LoadConstant(table_index_reg, WasmValue(imm.index)); | |||
2488 | LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0); | |||
2489 | ||||
2490 | LiftoffAssembler::VarState index = __ cache_state()->stack_state.back(); | |||
2491 | ||||
2492 | ValueKind result_kind = env_->module->tables[imm.index].type.kind(); | |||
2493 | CallRuntimeStub(WasmCode::kWasmTableGet, | |||
2494 | MakeSig::Returns(result_kind).Params(kI32, kI32), | |||
2495 | {table_index, index}, decoder->position()); | |||
2496 | ||||
2497 | // Pop parameters from the value stack. | |||
2498 | __ cache_state()->stack_state.pop_back(1); | |||
2499 | ||||
2500 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
2501 | ||||
2502 | __ PushRegister(result_kind, LiftoffRegister(kReturnRegister0)); | |||
2503 | } | |||
2504 | ||||
2505 | void TableSet(FullDecoder* decoder, const Value&, const Value&, | |||
2506 | const IndexImmediate<validate>& imm) { | |||
2507 | LiftoffRegList pinned; | |||
2508 | ||||
2509 | LiftoffRegister table_index_reg = | |||
2510 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
2511 | __ LoadConstant(table_index_reg, WasmValue(imm.index)); | |||
2512 | LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0); | |||
2513 | ||||
2514 | LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-1]; | |||
2515 | LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2]; | |||
2516 | ||||
2517 | ValueKind table_kind = env_->module->tables[imm.index].type.kind(); | |||
2518 | ||||
2519 | CallRuntimeStub(WasmCode::kWasmTableSet, | |||
2520 | MakeSig::Params(kI32, kI32, table_kind), | |||
2521 | {table_index, index, value}, decoder->position()); | |||
2522 | ||||
2523 | // Pop parameters from the value stack. | |||
2524 | __ cache_state()->stack_state.pop_back(2); | |||
2525 | ||||
2526 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
2527 | } | |||
2528 | ||||
2529 | WasmCode::RuntimeStubId GetRuntimeStubIdForTrapReason(TrapReason reason) { | |||
2530 | switch (reason) { | |||
2531 | #define RUNTIME_STUB_FOR_TRAP(trap_reason) \ | |||
2532 | case k##trap_reason: \ | |||
2533 | return WasmCode::kThrowWasm##trap_reason; | |||
2534 | ||||
2535 | FOREACH_WASM_TRAPREASON(RUNTIME_STUB_FOR_TRAP)RUNTIME_STUB_FOR_TRAP(TrapUnreachable) RUNTIME_STUB_FOR_TRAP( TrapMemOutOfBounds) RUNTIME_STUB_FOR_TRAP(TrapUnalignedAccess ) RUNTIME_STUB_FOR_TRAP(TrapDivByZero) RUNTIME_STUB_FOR_TRAP( TrapDivUnrepresentable) RUNTIME_STUB_FOR_TRAP(TrapRemByZero) RUNTIME_STUB_FOR_TRAP (TrapFloatUnrepresentable) RUNTIME_STUB_FOR_TRAP(TrapFuncSigMismatch ) RUNTIME_STUB_FOR_TRAP(TrapDataSegmentOutOfBounds) RUNTIME_STUB_FOR_TRAP (TrapElemSegmentDropped) RUNTIME_STUB_FOR_TRAP(TrapTableOutOfBounds ) RUNTIME_STUB_FOR_TRAP(TrapRethrowNull) RUNTIME_STUB_FOR_TRAP (TrapNullDereference) RUNTIME_STUB_FOR_TRAP(TrapIllegalCast) RUNTIME_STUB_FOR_TRAP (TrapArrayOutOfBounds) RUNTIME_STUB_FOR_TRAP(TrapArrayTooLarge ) | |||
2536 | #undef RUNTIME_STUB_FOR_TRAP | |||
2537 | default: | |||
2538 | UNREACHABLE()V8_Fatal("unreachable code"); | |||
2539 | } | |||
2540 | } | |||
2541 | ||||
2542 | void Trap(FullDecoder* decoder, TrapReason reason) { | |||
2543 | Label* trap_label = | |||
2544 | AddOutOfLineTrap(decoder, GetRuntimeStubIdForTrapReason(reason)); | |||
2545 | __ emit_jump(trap_label); | |||
2546 | __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap); | |||
2547 | } | |||
2548 | ||||
2549 | void AssertNull(FullDecoder* decoder, const Value& arg, Value* result) { | |||
2550 | LiftoffRegList pinned; | |||
2551 | LiftoffRegister obj = pinned.set(__ PopToRegister(pinned)); | |||
2552 | Label* trap_label = | |||
2553 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast); | |||
2554 | LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned); | |||
2555 | LoadNullValue(null.gp(), pinned); | |||
2556 | __ emit_cond_jump(kUnequal, trap_label, kOptRef, obj.gp(), null.gp()); | |||
2557 | __ PushRegister(kOptRef, obj); | |||
2558 | } | |||
2559 | ||||
2560 | void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) { | |||
2561 | unsupported(decoder, kOtherReason, "testing opcode"); | |||
2562 | } | |||
2563 | ||||
2564 | void Select(FullDecoder* decoder, const Value& cond, const Value& fval, | |||
2565 | const Value& tval, Value* result) { | |||
2566 | LiftoffRegList pinned; | |||
2567 | Register condition = pinned.set(__ PopToRegister()).gp(); | |||
2568 | ValueKind kind = __ cache_state()->stack_state.end()[-1].kind(); | |||
2569 | DCHECK(CheckCompatibleStackSlotTypes(((void) 0) | |||
2570 | kind, __ cache_state()->stack_state.end()[-2].kind()))((void) 0); | |||
2571 | LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned)); | |||
2572 | LiftoffRegister true_value = __ PopToRegister(pinned); | |||
2573 | LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(), | |||
2574 | {true_value, false_value}, {}); | |||
2575 | if (!__ emit_select(dst, condition, true_value, false_value, kind)) { | |||
2576 | // Emit generic code (using branches) instead. | |||
2577 | Label cont; | |||
2578 | Label case_false; | |||
2579 | __ emit_cond_jump(kEqual, &case_false, kI32, condition); | |||
2580 | if (dst != true_value) __ Move(dst, true_value, kind); | |||
2581 | __ emit_jump(&cont); | |||
2582 | ||||
2583 | __ bind(&case_false); | |||
2584 | if (dst != false_value) __ Move(dst, false_value, kind); | |||
2585 | __ bind(&cont); | |||
2586 | } | |||
2587 | __ PushRegister(kind, dst); | |||
2588 | } | |||
2589 | ||||
2590 | void BrImpl(FullDecoder* decoder, Control* target) { | |||
2591 | if (dynamic_tiering()) { | |||
2592 | if (target->is_loop()) { | |||
2593 | DCHECK(target->label.get()->is_bound())((void) 0); | |||
2594 | int jump_distance = __ pc_offset() - target->label.get()->pos(); | |||
2595 | // For now we just add one as the cost for the tier up check. We might | |||
2596 | // want to revisit this when tuning tiering budgets later. | |||
2597 | const int kTierUpCheckCost = 1; | |||
2598 | TierupCheck(decoder, decoder->position(), | |||
2599 | jump_distance + kTierUpCheckCost); | |||
2600 | } else { | |||
2601 | // To estimate time spent in this function more accurately, we could | |||
2602 | // increment the tiering budget on forward jumps. However, we don't | |||
2603 | // know the jump distance yet; using a blanket value has been tried | |||
2604 | // and found to not make a difference. | |||
2605 | } | |||
2606 | } | |||
2607 | if (!target->br_merge()->reached) { | |||
2608 | target->label_state.InitMerge( | |||
2609 | *__ cache_state(), __ num_locals(), target->br_merge()->arity, | |||
2610 | target->stack_depth + target->num_exceptions); | |||
2611 | } | |||
2612 | __ MergeStackWith(target->label_state, target->br_merge()->arity, | |||
2613 | target->is_loop() ? LiftoffAssembler::kBackwardJump | |||
2614 | : LiftoffAssembler::kForwardJump); | |||
2615 | __ jmp(target->label.get()); | |||
2616 | } | |||
2617 | ||||
2618 | void BrOrRet(FullDecoder* decoder, uint32_t depth, | |||
2619 | uint32_t /* drop_values */) { | |||
2620 | BrOrRetImpl(decoder, depth); | |||
2621 | } | |||
2622 | ||||
2623 | void BrOrRetImpl(FullDecoder* decoder, uint32_t depth) { | |||
2624 | if (depth == decoder->control_depth() - 1) { | |||
2625 | DoReturn(decoder, 0); | |||
2626 | } else { | |||
2627 | BrImpl(decoder, decoder->control_at(depth)); | |||
2628 | } | |||
2629 | } | |||
2630 | ||||
2631 | void BrIf(FullDecoder* decoder, const Value& /* cond */, uint32_t depth) { | |||
2632 | // Before branching, materialize all constants. This avoids repeatedly | |||
2633 | // materializing them for each conditional branch. | |||
2634 | // TODO(clemensb): Do the same for br_table. | |||
2635 | if (depth != decoder->control_depth() - 1) { | |||
2636 | __ MaterializeMergedConstants( | |||
2637 | decoder->control_at(depth)->br_merge()->arity); | |||
2638 | } | |||
2639 | ||||
2640 | Label cont_false; | |||
2641 | ||||
2642 | // Test the condition on the value stack, jump to {cont_false} if zero. | |||
2643 | JumpIfFalse(decoder, &cont_false); | |||
2644 | ||||
2645 | // As a quickfix for https://crbug.com/1314184 we store the cache state | |||
2646 | // before calling {BrOrRetImpl} under dynamic tiering, because the tier up | |||
2647 | // check modifies the cache state (GetUnusedRegister, | |||
2648 | // LoadInstanceIntoRegister). | |||
2649 | // TODO(wasm): This causes significant overhead during compilation; try to | |||
2650 | // avoid this, maybe by passing in scratch registers. | |||
2651 | if (dynamic_tiering()) { | |||
2652 | LiftoffAssembler::CacheState old_cache_state; | |||
2653 | old_cache_state.Split(*__ cache_state()); | |||
2654 | BrOrRetImpl(decoder, depth); | |||
2655 | __ cache_state()->Steal(old_cache_state); | |||
2656 | } else { | |||
2657 | BrOrRetImpl(decoder, depth); | |||
2658 | } | |||
2659 | ||||
2660 | __ bind(&cont_false); | |||
2661 | } | |||
2662 | ||||
2663 | // Generate a branch table case, potentially reusing previously generated | |||
2664 | // stack transfer code. | |||
2665 | void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth, | |||
2666 | std::map<uint32_t, MovableLabel>* br_targets) { | |||
2667 | MovableLabel& label = (*br_targets)[br_depth]; | |||
2668 | if (label.get()->is_bound()) { | |||
2669 | __ jmp(label.get()); | |||
2670 | } else { | |||
2671 | __ bind(label.get()); | |||
2672 | BrOrRet(decoder, br_depth, 0); | |||
2673 | } | |||
2674 | } | |||
2675 | ||||
2676 | // Generate a branch table for input in [min, max). | |||
2677 | // TODO(wasm): Generate a real branch table (like TF TableSwitch). | |||
2678 | void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp, | |||
2679 | LiftoffRegister value, uint32_t min, uint32_t max, | |||
2680 | BranchTableIterator<validate>* table_iterator, | |||
2681 | std::map<uint32_t, MovableLabel>* br_targets) { | |||
2682 | DCHECK_LT(min, max)((void) 0); | |||
2683 | // Check base case. | |||
2684 | if (max == min + 1) { | |||
2685 | DCHECK_EQ(min, table_iterator->cur_index())((void) 0); | |||
2686 | GenerateBrCase(decoder, table_iterator->next(), br_targets); | |||
2687 | return; | |||
2688 | } | |||
2689 | ||||
2690 | uint32_t split = min + (max - min) / 2; | |||
2691 | Label upper_half; | |||
2692 | __ LoadConstant(tmp, WasmValue(split)); | |||
2693 | __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kI32, value.gp(), | |||
2694 | tmp.gp()); | |||
2695 | // Emit br table for lower half: | |||
2696 | GenerateBrTable(decoder, tmp, value, min, split, table_iterator, | |||
2697 | br_targets); | |||
2698 | __ bind(&upper_half); | |||
2699 | // table_iterator will trigger a DCHECK if we don't stop decoding now. | |||
2700 | if (did_bailout()) return; | |||
2701 | // Emit br table for upper half: | |||
2702 | GenerateBrTable(decoder, tmp, value, split, max, table_iterator, | |||
2703 | br_targets); | |||
2704 | } | |||
2705 | ||||
2706 | void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm, | |||
2707 | const Value& key) { | |||
2708 | LiftoffRegList pinned; | |||
2709 | LiftoffRegister value = pinned.set(__ PopToRegister()); | |||
2710 | BranchTableIterator<validate> table_iterator(decoder, imm); | |||
2711 | std::map<uint32_t, MovableLabel> br_targets; | |||
2712 | ||||
2713 | if (imm.table_count > 0) { | |||
2714 | LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned); | |||
2715 | __ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count})); | |||
2716 | Label case_default; | |||
2717 | __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kI32, value.gp(), | |||
2718 | tmp.gp()); | |||
2719 | ||||
2720 | GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator, | |||
2721 | &br_targets); | |||
2722 | ||||
2723 | __ bind(&case_default); | |||
2724 | // table_iterator will trigger a DCHECK if we don't stop decoding now. | |||
2725 | if (did_bailout()) return; | |||
2726 | } | |||
2727 | ||||
2728 | // Generate the default case. | |||
2729 | GenerateBrCase(decoder, table_iterator.next(), &br_targets); | |||
2730 | DCHECK(!table_iterator.has_next())((void) 0); | |||
2731 | } | |||
2732 | ||||
2733 | void Else(FullDecoder* decoder, Control* c) { | |||
2734 | if (c->reachable()) { | |||
2735 | if (!c->end_merge.reached) { | |||
2736 | c->label_state.InitMerge(*__ cache_state(), __ num_locals(), | |||
2737 | c->end_merge.arity, | |||
2738 | c->stack_depth + c->num_exceptions); | |||
2739 | } | |||
2740 | __ MergeFullStackWith(c->label_state, *__ cache_state()); | |||
2741 | __ emit_jump(c->label.get()); | |||
2742 | } | |||
2743 | __ bind(c->else_state->label.get()); | |||
2744 | __ cache_state()->Steal(c->else_state->state); | |||
2745 | } | |||
2746 | ||||
2747 | SpilledRegistersForInspection* GetSpilledRegistersForInspection() { | |||
2748 | DCHECK(for_debugging_)((void) 0); | |||
2749 | // If we are generating debugging code, we really need to spill all | |||
2750 | // registers to make them inspectable when stopping at the trap. | |||
2751 | auto* spilled = compilation_zone_->New<SpilledRegistersForInspection>( | |||
2752 | compilation_zone_); | |||
2753 | for (uint32_t i = 0, e = __ cache_state()->stack_height(); i < e; ++i) { | |||
2754 | auto& slot = __ cache_state()->stack_state[i]; | |||
2755 | if (!slot.is_reg()) continue; | |||
2756 | spilled->entries.push_back(SpilledRegistersForInspection::Entry{ | |||
2757 | slot.offset(), slot.reg(), slot.kind()}); | |||
2758 | __ RecordUsedSpillOffset(slot.offset()); | |||
2759 | } | |||
2760 | return spilled; | |||
2761 | } | |||
2762 | ||||
2763 | Label* AddOutOfLineTrap(FullDecoder* decoder, WasmCode::RuntimeStubId stub, | |||
2764 | uint32_t pc = 0) { | |||
2765 | // Only memory OOB traps need a {pc}. | |||
2766 | DCHECK_IMPLIES(stub != WasmCode::kThrowWasmTrapMemOutOfBounds, pc == 0)((void) 0); | |||
2767 | DCHECK(FLAG_wasm_bounds_checks)((void) 0); | |||
2768 | OutOfLineSafepointInfo* safepoint_info = nullptr; | |||
2769 | if (V8_UNLIKELY(for_debugging_)(__builtin_expect(!!(for_debugging_), 0))) { | |||
2770 | // Execution does not return after a trap. Therefore we don't have to | |||
2771 | // define a safepoint for traps that would preserve references on the | |||
2772 | // stack. However, if this is debug code, then we have to preserve the | |||
2773 | // references so that they can be inspected. | |||
2774 | safepoint_info = | |||
2775 | compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_); | |||
2776 | __ cache_state()->GetTaggedSlotsForOOLCode( | |||
2777 | &safepoint_info->slots, &safepoint_info->spills, | |||
2778 | LiftoffAssembler::CacheState::SpillLocation::kStackSlots); | |||
2779 | } | |||
2780 | out_of_line_code_.push_back(OutOfLineCode::Trap( | |||
2781 | stub, decoder->position(), | |||
2782 | V8_UNLIKELY(for_debugging_)(__builtin_expect(!!(for_debugging_), 0)) ? GetSpilledRegistersForInspection() | |||
2783 | : nullptr, | |||
2784 | safepoint_info, pc, RegisterOOLDebugSideTableEntry(decoder))); | |||
2785 | return out_of_line_code_.back().label.get(); | |||
2786 | } | |||
2787 | ||||
2788 | enum ForceCheck : bool { kDoForceCheck = true, kDontForceCheck = false }; | |||
2789 | ||||
2790 | // Returns {no_reg} if the memory access is statically known to be out of | |||
2791 | // bounds (a jump to the trap was generated then); return the GP {index} | |||
2792 | // register otherwise (holding the ptrsized index). | |||
2793 | Register BoundsCheckMem(FullDecoder* decoder, uint32_t access_size, | |||
2794 | uint64_t offset, LiftoffRegister index, | |||
2795 | LiftoffRegList pinned, ForceCheck force_check) { | |||
2796 | const bool statically_oob = | |||
2797 | !base::IsInBounds<uintptr_t>(offset, access_size, | |||
2798 | env_->max_memory_size); | |||
2799 | ||||
2800 | // After bounds checking, we know that the index must be ptrsize, hence only | |||
2801 | // look at the lower word on 32-bit systems (the high word is bounds-checked | |||
2802 | // further down). | |||
2803 | Register index_ptrsize = | |||
2804 | kNeedI64RegPair && index.is_gp_pair() ? index.low_gp() : index.gp(); | |||
2805 | ||||
2806 | // Without bounds checks (testing only), just return the ptrsize index. | |||
2807 | if (V8_UNLIKELY(env_->bounds_checks == kNoBoundsChecks)(__builtin_expect(!!(env_->bounds_checks == kNoBoundsChecks ), 0))) { | |||
2808 | return index_ptrsize; | |||
2809 | } | |||
2810 | ||||
2811 | // Early return for trap handler. | |||
2812 | DCHECK_IMPLIES(env_->module->is_memory64,((void) 0) | |||
2813 | env_->bounds_checks == kExplicitBoundsChecks)((void) 0); | |||
2814 | if (!force_check && !statically_oob && | |||
2815 | env_->bounds_checks == kTrapHandler) { | |||
2816 | // With trap handlers we should not have a register pair as input (we | |||
2817 | // would only return the lower half). | |||
2818 | DCHECK(index.is_gp())((void) 0); | |||
2819 | return index_ptrsize; | |||
2820 | } | |||
2821 | ||||
2822 | CODE_COMMENT("bounds check memory"); | |||
2823 | ||||
2824 | // Set {pc} of the OOL code to {0} to avoid generation of protected | |||
2825 | // instruction information (see {GenerateOutOfLineCode}. | |||
2826 | Label* trap_label = | |||
2827 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, 0); | |||
2828 | ||||
2829 | if (V8_UNLIKELY(statically_oob)(__builtin_expect(!!(statically_oob), 0))) { | |||
2830 | __ emit_jump(trap_label); | |||
2831 | decoder->SetSucceedingCodeDynamicallyUnreachable(); | |||
2832 | return no_reg; | |||
2833 | } | |||
2834 | ||||
2835 | // Convert the index to ptrsize, bounds-checking the high word on 32-bit | |||
2836 | // systems for memory64. | |||
2837 | if (!env_->module->is_memory64) { | |||
2838 | __ emit_u32_to_uintptr(index_ptrsize, index_ptrsize); | |||
2839 | } else if (kSystemPointerSize == kInt32Size) { | |||
2840 | DCHECK_GE(kMaxUInt32, env_->max_memory_size)((void) 0); | |||
2841 | __ emit_cond_jump(kNotEqualZero, trap_label, kI32, index.high_gp()); | |||
2842 | } | |||
2843 | ||||
2844 | uintptr_t end_offset = offset + access_size - 1u; | |||
2845 | ||||
2846 | pinned.set(index_ptrsize); | |||
2847 | LiftoffRegister end_offset_reg = | |||
2848 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
2849 | LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned); | |||
2850 | LOAD_INSTANCE_FIELD(mem_size.gp(), MemorySize, kSystemPointerSize, pinned); | |||
2851 | ||||
2852 | __ LoadConstant(end_offset_reg, WasmValue::ForUintPtr(end_offset)); | |||
2853 | ||||
2854 | // If the end offset is larger than the smallest memory, dynamically check | |||
2855 | // the end offset against the actual memory size, which is not known at | |||
2856 | // compile time. Otherwise, only one check is required (see below). | |||
2857 | if (end_offset > env_->min_memory_size) { | |||
2858 | __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerKind, | |||
2859 | end_offset_reg.gp(), mem_size.gp()); | |||
2860 | } | |||
2861 | ||||
2862 | // Just reuse the end_offset register for computing the effective size | |||
2863 | // (which is >= 0 because of the check above). | |||
2864 | LiftoffRegister effective_size_reg = end_offset_reg; | |||
2865 | __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(), | |||
2866 | end_offset_reg.gp()); | |||
2867 | ||||
2868 | __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerKind, | |||
2869 | index_ptrsize, effective_size_reg.gp()); | |||
2870 | return index_ptrsize; | |||
2871 | } | |||
2872 | ||||
2873 | void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size, | |||
2874 | uintptr_t offset, Register index, | |||
2875 | LiftoffRegList pinned) { | |||
2876 | CODE_COMMENT("alignment check"); | |||
2877 | Label* trap_label = | |||
2878 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapUnalignedAccess, 0); | |||
2879 | Register address = __ GetUnusedRegister(kGpReg, pinned).gp(); | |||
2880 | ||||
2881 | const uint32_t align_mask = access_size - 1; | |||
2882 | if ((offset & align_mask) == 0) { | |||
2883 | // If {offset} is aligned, we can produce faster code. | |||
2884 | ||||
2885 | // TODO(ahaas): On Intel, the "test" instruction implicitly computes the | |||
2886 | // AND of two operands. We could introduce a new variant of | |||
2887 | // {emit_cond_jump} to use the "test" instruction without the "and" here. | |||
2888 | // Then we can also avoid using the temp register here. | |||
2889 | __ emit_i32_andi(address, index, align_mask); | |||
2890 | __ emit_cond_jump(kUnequal, trap_label, kI32, address); | |||
2891 | } else { | |||
2892 | // For alignment checks we only look at the lower 32-bits in {offset}. | |||
2893 | __ emit_i32_addi(address, index, static_cast<uint32_t>(offset)); | |||
2894 | __ emit_i32_andi(address, address, align_mask); | |||
2895 | __ emit_cond_jump(kUnequal, trap_label, kI32, address); | |||
2896 | } | |||
2897 | } | |||
2898 | ||||
2899 | void TraceMemoryOperation(bool is_store, MachineRepresentation rep, | |||
2900 | Register index, uintptr_t offset, | |||
2901 | WasmCodePosition position) { | |||
2902 | // Before making the runtime call, spill all cache registers. | |||
2903 | __ SpillAllRegisters(); | |||
2904 | ||||
2905 | LiftoffRegList pinned; | |||
2906 | if (index != no_reg) pinned.set(index); | |||
2907 | // Get one register for computing the effective offset (offset + index). | |||
2908 | LiftoffRegister effective_offset = | |||
2909 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
2910 | DCHECK_GE(kMaxUInt32, offset)((void) 0); | |||
2911 | __ LoadConstant(effective_offset, WasmValue(static_cast<uint32_t>(offset))); | |||
2912 | if (index != no_reg) { | |||
2913 | // TODO(clemensb): Do a 64-bit addition here if memory64 is used. | |||
2914 | __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index); | |||
2915 | } | |||
2916 | ||||
2917 | // Get a register to hold the stack slot for MemoryTracingInfo. | |||
2918 | LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
2919 | // Allocate stack slot for MemoryTracingInfo. | |||
2920 | __ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo)); | |||
2921 | ||||
2922 | // Reuse the {effective_offset} register for all information to be stored in | |||
2923 | // the MemoryTracingInfo struct. | |||
2924 | LiftoffRegister data = effective_offset; | |||
2925 | ||||
2926 | // Now store all information into the MemoryTracingInfo struct. | |||
2927 | if (kSystemPointerSize == 8) { | |||
2928 | // Zero-extend the effective offset to u64. | |||
2929 | CHECK(__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset,do { if ((__builtin_expect(!!(!(__ emit_type_conversion(kExprI64UConvertI32 , data, effective_offset, nullptr))), 0))) { V8_Fatal("Check failed: %s." , "__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset, nullptr)" ); } } while (false) | |||
2930 | nullptr))do { if ((__builtin_expect(!!(!(__ emit_type_conversion(kExprI64UConvertI32 , data, effective_offset, nullptr))), 0))) { V8_Fatal("Check failed: %s." , "__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset, nullptr)" ); } } while (false); | |||
2931 | } | |||
2932 | __ Store( | |||
2933 | info.gp(), no_reg, offsetof(MemoryTracingInfo, offset)__builtin_offsetof(MemoryTracingInfo, offset), data, | |||
2934 | kSystemPointerSize == 8 ? StoreType::kI64Store : StoreType::kI32Store, | |||
2935 | pinned); | |||
2936 | __ LoadConstant(data, WasmValue(is_store ? 1 : 0)); | |||
2937 | __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store)__builtin_offsetof(MemoryTracingInfo, is_store), data, | |||
2938 | StoreType::kI32Store8, pinned); | |||
2939 | __ LoadConstant(data, WasmValue(static_cast<int>(rep))); | |||
2940 | __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep)__builtin_offsetof(MemoryTracingInfo, mem_rep), data, | |||
2941 | StoreType::kI32Store8, pinned); | |||
2942 | ||||
2943 | WasmTraceMemoryDescriptor descriptor; | |||
2944 | DCHECK_EQ(0, descriptor.GetStackParameterCount())((void) 0); | |||
2945 | DCHECK_EQ(1, descriptor.GetRegisterParameterCount())((void) 0); | |||
2946 | Register param_reg = descriptor.GetRegisterParameter(0); | |||
2947 | if (info.gp() != param_reg) { | |||
2948 | __ Move(param_reg, info.gp(), kPointerKind); | |||
2949 | } | |||
2950 | ||||
2951 | source_position_table_builder_.AddPosition(__ pc_offset(), | |||
2952 | SourcePosition(position), false); | |||
2953 | __ CallRuntimeStub(WasmCode::kWasmTraceMemory); | |||
2954 | DefineSafepoint(); | |||
2955 | ||||
2956 | __ DeallocateStackSlot(sizeof(MemoryTracingInfo)); | |||
2957 | } | |||
2958 | ||||
2959 | bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot, | |||
2960 | int access_size, uintptr_t* offset) { | |||
2961 | if (!index_slot.is_const()) return false; | |||
2962 | ||||
2963 | // Potentially zero extend index (which is a 32-bit constant). | |||
2964 | const uintptr_t index = static_cast<uint32_t>(index_slot.i32_const()); | |||
2965 | const uintptr_t effective_offset = index + *offset; | |||
2966 | ||||
2967 | if (effective_offset < index // overflow | |||
2968 | || !base::IsInBounds<uintptr_t>(effective_offset, access_size, | |||
2969 | env_->min_memory_size)) { | |||
2970 | return false; | |||
2971 | } | |||
2972 | ||||
2973 | *offset = effective_offset; | |||
2974 | return true; | |||
2975 | } | |||
2976 | ||||
2977 | Register GetMemoryStart(LiftoffRegList pinned) { | |||
2978 | Register memory_start = __ cache_state()->cached_mem_start; | |||
2979 | if (memory_start == no_reg) { | |||
2980 | memory_start = __ GetUnusedRegister(kGpReg, pinned).gp(); | |||
2981 | LOAD_INSTANCE_FIELD(memory_start, MemoryStart, kSystemPointerSize, | |||
2982 | pinned); | |||
2983 | #ifdef V8_SANDBOXED_POINTERS | |||
2984 | __ DecodeSandboxedPointer(memory_start); | |||
2985 | #endif | |||
2986 | __ cache_state()->SetMemStartCacheRegister(memory_start); | |||
2987 | } | |||
2988 | return memory_start; | |||
2989 | } | |||
2990 | ||||
2991 | void LoadMem(FullDecoder* decoder, LoadType type, | |||
2992 | const MemoryAccessImmediate<validate>& imm, | |||
2993 | const Value& index_val, Value* result) { | |||
2994 | ValueKind kind = type.value_type().kind(); | |||
2995 | RegClass rc = reg_class_for(kind); | |||
2996 | if (!CheckSupportedType(decoder, kind, "load")) return; | |||
2997 | ||||
2998 | uintptr_t offset = imm.offset; | |||
2999 | Register index = no_reg; | |||
3000 | ||||
3001 | // Only look at the slot, do not pop it yet (will happen in PopToRegister | |||
3002 | // below, if this is not a statically-in-bounds index). | |||
3003 | auto& index_slot = __ cache_state()->stack_state.back(); | |||
3004 | bool i64_offset = index_val.type == kWasmI64; | |||
3005 | if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) { | |||
3006 | __ cache_state()->stack_state.pop_back(); | |||
3007 | CODE_COMMENT("load from memory (constant offset)"); | |||
3008 | LiftoffRegList pinned; | |||
3009 | Register mem = pinned.set(GetMemoryStart(pinned)); | |||
3010 | LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); | |||
3011 | __ Load(value, mem, no_reg, offset, type, pinned, nullptr, true, | |||
3012 | i64_offset); | |||
3013 | __ PushRegister(kind, value); | |||
3014 | } else { | |||
3015 | LiftoffRegister full_index = __ PopToRegister(); | |||
3016 | index = BoundsCheckMem(decoder, type.size(), offset, full_index, {}, | |||
3017 | kDontForceCheck); | |||
3018 | if (index == no_reg) return; | |||
3019 | ||||
3020 | CODE_COMMENT("load from memory"); | |||
3021 | LiftoffRegList pinned = {index}; | |||
3022 | ||||
3023 | // Load the memory start address only now to reduce register pressure | |||
3024 | // (important on ia32). | |||
3025 | Register mem = pinned.set(GetMemoryStart(pinned)); | |||
3026 | LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); | |||
3027 | ||||
3028 | uint32_t protected_load_pc = 0; | |||
3029 | __ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true, | |||
3030 | i64_offset); | |||
3031 | if (env_->bounds_checks == kTrapHandler) { | |||
3032 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, | |||
3033 | protected_load_pc); | |||
3034 | } | |||
3035 | __ PushRegister(kind, value); | |||
3036 | } | |||
3037 | ||||
3038 | if (V8_UNLIKELY(FLAG_trace_wasm_memory)(__builtin_expect(!!(FLAG_trace_wasm_memory), 0))) { | |||
3039 | TraceMemoryOperation(false, type.mem_type().representation(), index, | |||
3040 | offset, decoder->position()); | |||
3041 | } | |||
3042 | } | |||
3043 | ||||
3044 | void LoadTransform(FullDecoder* decoder, LoadType type, | |||
3045 | LoadTransformationKind transform, | |||
3046 | const MemoryAccessImmediate<validate>& imm, | |||
3047 | const Value& index_val, Value* result) { | |||
3048 | // LoadTransform requires SIMD support, so check for it here. If | |||
3049 | // unsupported, bailout and let TurboFan lower the code. | |||
3050 | if (!CheckSupportedType(decoder, kS128, "LoadTransform")) { | |||
3051 | return; | |||
3052 | } | |||
3053 | ||||
3054 | LiftoffRegister full_index = __ PopToRegister(); | |||
3055 | // For load splats and load zero, LoadType is the size of the load, and for | |||
3056 | // load extends, LoadType is the size of the lane, and it always loads 8 | |||
3057 | // bytes. | |||
3058 | uint32_t access_size = | |||
3059 | transform == LoadTransformationKind::kExtend ? 8 : type.size(); | |||
3060 | Register index = BoundsCheckMem(decoder, access_size, imm.offset, | |||
3061 | full_index, {}, kDontForceCheck); | |||
3062 | if (index == no_reg) return; | |||
3063 | ||||
3064 | uintptr_t offset = imm.offset; | |||
3065 | LiftoffRegList pinned = {index}; | |||
3066 | CODE_COMMENT("load with transformation"); | |||
3067 | Register addr = GetMemoryStart(pinned); | |||
3068 | LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {}); | |||
3069 | uint32_t protected_load_pc = 0; | |||
3070 | __ LoadTransform(value, addr, index, offset, type, transform, | |||
3071 | &protected_load_pc); | |||
3072 | ||||
3073 | if (env_->bounds_checks == kTrapHandler) { | |||
3074 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, | |||
3075 | protected_load_pc); | |||
3076 | } | |||
3077 | __ PushRegister(kS128, value); | |||
3078 | ||||
3079 | if (V8_UNLIKELY(FLAG_trace_wasm_memory)(__builtin_expect(!!(FLAG_trace_wasm_memory), 0))) { | |||
3080 | // Again load extend is different. | |||
3081 | MachineRepresentation mem_rep = | |||
3082 | transform == LoadTransformationKind::kExtend | |||
3083 | ? MachineRepresentation::kWord64 | |||
3084 | : type.mem_type().representation(); | |||
3085 | TraceMemoryOperation(false, mem_rep, index, offset, decoder->position()); | |||
3086 | } | |||
3087 | } | |||
3088 | ||||
3089 | void LoadLane(FullDecoder* decoder, LoadType type, const Value& _value, | |||
3090 | const Value& _index, const MemoryAccessImmediate<validate>& imm, | |||
3091 | const uint8_t laneidx, Value* _result) { | |||
3092 | if (!CheckSupportedType(decoder, kS128, "LoadLane")) { | |||
3093 | return; | |||
3094 | } | |||
3095 | ||||
3096 | LiftoffRegList pinned; | |||
3097 | LiftoffRegister value = pinned.set(__ PopToRegister()); | |||
3098 | LiftoffRegister full_index = __ PopToRegister(); | |||
3099 | Register index = BoundsCheckMem(decoder, type.size(), imm.offset, | |||
3100 | full_index, pinned, kDontForceCheck); | |||
3101 | if (index == no_reg) return; | |||
3102 | ||||
3103 | uintptr_t offset = imm.offset; | |||
3104 | pinned.set(index); | |||
3105 | CODE_COMMENT("load lane"); | |||
3106 | Register addr = GetMemoryStart(pinned); | |||
3107 | LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {}); | |||
3108 | uint32_t protected_load_pc = 0; | |||
3109 | ||||
3110 | __ LoadLane(result, value, addr, index, offset, type, laneidx, | |||
3111 | &protected_load_pc); | |||
3112 | if (env_->bounds_checks == kTrapHandler) { | |||
3113 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, | |||
3114 | protected_load_pc); | |||
3115 | } | |||
3116 | ||||
3117 | __ PushRegister(kS128, result); | |||
3118 | ||||
3119 | if (V8_UNLIKELY(FLAG_trace_wasm_memory)(__builtin_expect(!!(FLAG_trace_wasm_memory), 0))) { | |||
3120 | TraceMemoryOperation(false, type.mem_type().representation(), index, | |||
3121 | offset, decoder->position()); | |||
3122 | } | |||
3123 | } | |||
3124 | ||||
3125 | void StoreMem(FullDecoder* decoder, StoreType type, | |||
3126 | const MemoryAccessImmediate<validate>& imm, | |||
3127 | const Value& index_val, const Value& value_val) { | |||
3128 | ValueKind kind = type.value_type().kind(); | |||
3129 | if (!CheckSupportedType(decoder, kind, "store")) return; | |||
3130 | ||||
3131 | LiftoffRegList pinned; | |||
3132 | LiftoffRegister value = pinned.set(__ PopToRegister()); | |||
3133 | ||||
3134 | uintptr_t offset = imm.offset; | |||
3135 | Register index = no_reg; | |||
3136 | ||||
3137 | auto& index_slot = __ cache_state()->stack_state.back(); | |||
3138 | if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) { | |||
3139 | __ cache_state()->stack_state.pop_back(); | |||
3140 | CODE_COMMENT("store to memory (constant offset)"); | |||
3141 | Register mem = pinned.set(GetMemoryStart(pinned)); | |||
3142 | __ Store(mem, no_reg, offset, value, type, pinned, nullptr, true); | |||
3143 | } else { | |||
3144 | LiftoffRegister full_index = __ PopToRegister(pinned); | |||
3145 | index = BoundsCheckMem(decoder, type.size(), imm.offset, full_index, | |||
3146 | pinned, kDontForceCheck); | |||
3147 | if (index == no_reg) return; | |||
3148 | ||||
3149 | pinned.set(index); | |||
3150 | CODE_COMMENT("store to memory"); | |||
3151 | uint32_t protected_store_pc = 0; | |||
3152 | // Load the memory start address only now to reduce register pressure | |||
3153 | // (important on ia32). | |||
3154 | Register mem = pinned.set(GetMemoryStart(pinned)); | |||
3155 | LiftoffRegList outer_pinned; | |||
3156 | if (V8_UNLIKELY(FLAG_trace_wasm_memory)(__builtin_expect(!!(FLAG_trace_wasm_memory), 0))) outer_pinned.set(index); | |||
3157 | __ Store(mem, index, offset, value, type, outer_pinned, | |||
3158 | &protected_store_pc, true); | |||
3159 | if (env_->bounds_checks == kTrapHandler) { | |||
3160 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, | |||
3161 | protected_store_pc); | |||
3162 | } | |||
3163 | } | |||
3164 | ||||
3165 | if (V8_UNLIKELY(FLAG_trace_wasm_memory)(__builtin_expect(!!(FLAG_trace_wasm_memory), 0))) { | |||
3166 | TraceMemoryOperation(true, type.mem_rep(), index, offset, | |||
3167 | decoder->position()); | |||
3168 | } | |||
3169 | } | |||
3170 | ||||
3171 | void StoreLane(FullDecoder* decoder, StoreType type, | |||
3172 | const MemoryAccessImmediate<validate>& imm, | |||
3173 | const Value& _index, const Value& _value, const uint8_t lane) { | |||
3174 | if (!CheckSupportedType(decoder, kS128, "StoreLane")) return; | |||
3175 | LiftoffRegList pinned; | |||
3176 | LiftoffRegister value = pinned.set(__ PopToRegister()); | |||
3177 | LiftoffRegister full_index = __ PopToRegister(pinned); | |||
3178 | Register index = BoundsCheckMem(decoder, type.size(), imm.offset, | |||
3179 | full_index, pinned, kDontForceCheck); | |||
3180 | if (index == no_reg) return; | |||
3181 | ||||
3182 | uintptr_t offset = imm.offset; | |||
3183 | pinned.set(index); | |||
3184 | CODE_COMMENT("store lane to memory"); | |||
3185 | Register addr = pinned.set(GetMemoryStart(pinned)); | |||
3186 | uint32_t protected_store_pc = 0; | |||
3187 | __ StoreLane(addr, index, offset, value, type, lane, &protected_store_pc); | |||
3188 | if (env_->bounds_checks == kTrapHandler) { | |||
3189 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, | |||
3190 | protected_store_pc); | |||
3191 | } | |||
3192 | if (V8_UNLIKELY(FLAG_trace_wasm_memory)(__builtin_expect(!!(FLAG_trace_wasm_memory), 0))) { | |||
3193 | TraceMemoryOperation(true, type.mem_rep(), index, offset, | |||
3194 | decoder->position()); | |||
3195 | } | |||
3196 | } | |||
3197 | ||||
3198 | void CurrentMemoryPages(FullDecoder* /* decoder */, Value* /* result */) { | |||
3199 | Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp(); | |||
3200 | LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize, {}); | |||
3201 | __ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2); | |||
3202 | LiftoffRegister result{mem_size}; | |||
3203 | if (env_->module->is_memory64 && kNeedI64RegPair) { | |||
3204 | LiftoffRegister high_word = | |||
3205 | __ GetUnusedRegister(kGpReg, LiftoffRegList{mem_size}); | |||
3206 | // The high word is always 0 on 32-bit systems. | |||
3207 | __ LoadConstant(high_word, WasmValue{uint32_t{0}}); | |||
3208 | result = LiftoffRegister::ForPair(mem_size, high_word.gp()); | |||
3209 | } | |||
3210 | __ PushRegister(env_->module->is_memory64 ? kI64 : kI32, result); | |||
3211 | } | |||
3212 | ||||
3213 | void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) { | |||
3214 | // Pop the input, then spill all cache registers to make the runtime call. | |||
3215 | LiftoffRegList pinned; | |||
3216 | LiftoffRegister input = pinned.set(__ PopToRegister()); | |||
3217 | __ SpillAllRegisters(); | |||
3218 | ||||
3219 | LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
3220 | ||||
3221 | Label done; | |||
3222 | ||||
3223 | if (env_->module->is_memory64) { | |||
3224 | // If the high word is not 0, this will always fail (would grow by | |||
3225 | // >=256TB). The int32_t value will be sign-extended below. | |||
3226 | __ LoadConstant(result, WasmValue(int32_t{-1})); | |||
3227 | if (kNeedI64RegPair) { | |||
3228 | __ emit_cond_jump(kUnequal /* neq */, &done, kI32, input.high_gp()); | |||
3229 | input = input.low(); | |||
3230 | } else { | |||
3231 | LiftoffRegister high_word = __ GetUnusedRegister(kGpReg, pinned); | |||
3232 | __ emit_i64_shri(high_word, input, 32); | |||
3233 | __ emit_cond_jump(kUnequal /* neq */, &done, kI32, high_word.gp()); | |||
3234 | } | |||
3235 | } | |||
3236 | ||||
3237 | WasmMemoryGrowDescriptor descriptor; | |||
3238 | DCHECK_EQ(0, descriptor.GetStackParameterCount())((void) 0); | |||
3239 | DCHECK_EQ(1, descriptor.GetRegisterParameterCount())((void) 0); | |||
3240 | DCHECK_EQ(machine_type(kI32), descriptor.GetParameterType(0))((void) 0); | |||
3241 | ||||
3242 | Register param_reg = descriptor.GetRegisterParameter(0); | |||
3243 | if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kI32); | |||
3244 | ||||
3245 | __ CallRuntimeStub(WasmCode::kWasmMemoryGrow); | |||
3246 | DefineSafepoint(); | |||
3247 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
3248 | ||||
3249 | if (kReturnRegister0 != result.gp()) { | |||
3250 | __ Move(result.gp(), kReturnRegister0, kI32); | |||
3251 | } | |||
3252 | ||||
3253 | __ bind(&done); | |||
3254 | ||||
3255 | if (env_->module->is_memory64) { | |||
3256 | LiftoffRegister result64 = result; | |||
3257 | if (kNeedI64RegPair) result64 = __ GetUnusedRegister(kGpRegPair, pinned); | |||
3258 | __ emit_type_conversion(kExprI64SConvertI32, result64, result, nullptr); | |||
3259 | __ PushRegister(kI64, result64); | |||
3260 | } else { | |||
3261 | __ PushRegister(kI32, result); | |||
3262 | } | |||
3263 | } | |||
3264 | ||||
3265 | base::OwnedVector<DebugSideTable::Entry::Value> | |||
3266 | GetCurrentDebugSideTableEntries( | |||
3267 | FullDecoder* decoder, | |||
3268 | DebugSideTableBuilder::AssumeSpilling assume_spilling) { | |||
3269 | auto& stack_state = __ cache_state()->stack_state; | |||
3270 | auto values = | |||
3271 | base::OwnedVector<DebugSideTable::Entry::Value>::NewForOverwrite( | |||
3272 | stack_state.size()); | |||
3273 | ||||
3274 | // For function calls, the decoder still has the arguments on the stack, but | |||
3275 | // Liftoff already popped them. Hence {decoder->stack_size()} can be bigger | |||
3276 | // than expected. Just ignore that and use the lower part only. | |||
3277 | DCHECK_LE(stack_state.size() - num_exceptions_,((void) 0) | |||
3278 | decoder->num_locals() + decoder->stack_size())((void) 0); | |||
3279 | int index = 0; | |||
3280 | int decoder_stack_index = decoder->stack_size(); | |||
3281 | // Iterate the operand stack control block by control block, so that we can | |||
3282 | // handle the implicit exception value for try blocks. | |||
3283 | for (int j = decoder->control_depth() - 1; j >= 0; j--) { | |||
3284 | Control* control = decoder->control_at(j); | |||
3285 | Control* next_control = j > 0 ? decoder->control_at(j - 1) : nullptr; | |||
3286 | int end_index = next_control | |||
3287 | ? next_control->stack_depth + __ num_locals() + | |||
3288 | next_control->num_exceptions | |||
3289 | : __ cache_state()->stack_height(); | |||
3290 | bool exception = control->is_try_catch() || control->is_try_catchall(); | |||
3291 | for (; index < end_index; ++index) { | |||
3292 | auto& slot = stack_state[index]; | |||
3293 | auto& value = values[index]; | |||
3294 | value.index = index; | |||
3295 | ValueType type = | |||
3296 | index < static_cast<int>(__ num_locals()) | |||
3297 | ? decoder->local_type(index) | |||
3298 | : exception ? ValueType::Ref(HeapType::kAny, kNonNullable) | |||
3299 | : decoder->stack_value(decoder_stack_index--)->type; | |||
3300 | DCHECK(CheckCompatibleStackSlotTypes(slot.kind(), type.kind()))((void) 0); | |||
3301 | value.type = type; | |||
3302 | switch (slot.loc()) { | |||
3303 | case kIntConst: | |||
3304 | value.storage = DebugSideTable::Entry::kConstant; | |||
3305 | value.i32_const = slot.i32_const(); | |||
3306 | break; | |||
3307 | case kRegister: | |||
3308 | DCHECK_NE(DebugSideTableBuilder::kDidSpill, assume_spilling)((void) 0); | |||
3309 | if (assume_spilling == DebugSideTableBuilder::kAllowRegisters) { | |||
3310 | value.storage = DebugSideTable::Entry::kRegister; | |||
3311 | value.reg_code = slot.reg().liftoff_code(); | |||
3312 | break; | |||
3313 | } | |||
3314 | DCHECK_EQ(DebugSideTableBuilder::kAssumeSpilling, assume_spilling)((void) 0); | |||
3315 | V8_FALLTHROUGH[[clang::fallthrough]]; | |||
3316 | case kStack: | |||
3317 | value.storage = DebugSideTable::Entry::kStack; | |||
3318 | value.stack_offset = slot.offset(); | |||
3319 | break; | |||
3320 | } | |||
3321 | exception = false; | |||
3322 | } | |||
3323 | } | |||
3324 | DCHECK_EQ(values.size(), index)((void) 0); | |||
3325 | return values; | |||
3326 | } | |||
3327 | ||||
3328 | void RegisterDebugSideTableEntry( | |||
3329 | FullDecoder* decoder, | |||
3330 | DebugSideTableBuilder::AssumeSpilling assume_spilling) { | |||
3331 | if (V8_LIKELY(!debug_sidetable_builder_)(__builtin_expect(!!(!debug_sidetable_builder_), 1))) return; | |||
3332 | debug_sidetable_builder_->NewEntry( | |||
3333 | __ pc_offset(), | |||
3334 | GetCurrentDebugSideTableEntries(decoder, assume_spilling).as_vector()); | |||
3335 | } | |||
3336 | ||||
3337 | DebugSideTableBuilder::EntryBuilder* RegisterOOLDebugSideTableEntry( | |||
3338 | FullDecoder* decoder) { | |||
3339 | if (V8_LIKELY(!debug_sidetable_builder_)(__builtin_expect(!!(!debug_sidetable_builder_), 1))) return nullptr; | |||
3340 | return debug_sidetable_builder_->NewOOLEntry( | |||
3341 | GetCurrentDebugSideTableEntries(decoder, | |||
3342 | DebugSideTableBuilder::kAssumeSpilling) | |||
3343 | .as_vector()); | |||
3344 | } | |||
3345 | ||||
3346 | enum TailCall : bool { kTailCall = true, kNoTailCall = false }; | |||
3347 | ||||
3348 | void CallDirect(FullDecoder* decoder, | |||
3349 | const CallFunctionImmediate<validate>& imm, | |||
3350 | const Value args[], Value[]) { | |||
3351 | CallDirect(decoder, imm, args, nullptr, kNoTailCall); | |||
3352 | } | |||
3353 | ||||
3354 | void CallIndirect(FullDecoder* decoder, const Value& index_val, | |||
3355 | const CallIndirectImmediate<validate>& imm, | |||
3356 | const Value args[], Value returns[]) { | |||
3357 | CallIndirect(decoder, index_val, imm, kNoTailCall); | |||
3358 | } | |||
3359 | ||||
3360 | void CallRef(FullDecoder* decoder, const Value& func_ref, | |||
3361 | const FunctionSig* sig, uint32_t sig_index, const Value args[], | |||
3362 | Value returns[]) { | |||
3363 | CallRef(decoder, func_ref.type, sig, kNoTailCall); | |||
3364 | } | |||
3365 | ||||
3366 | void ReturnCall(FullDecoder* decoder, | |||
3367 | const CallFunctionImmediate<validate>& imm, | |||
3368 | const Value args[]) { | |||
3369 | TierupCheckOnExit(decoder); | |||
3370 | CallDirect(decoder, imm, args, nullptr, kTailCall); | |||
3371 | } | |||
3372 | ||||
3373 | void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val, | |||
3374 | const CallIndirectImmediate<validate>& imm, | |||
3375 | const Value args[]) { | |||
3376 | TierupCheckOnExit(decoder); | |||
3377 | CallIndirect(decoder, index_val, imm, kTailCall); | |||
3378 | } | |||
3379 | ||||
3380 | void ReturnCallRef(FullDecoder* decoder, const Value& func_ref, | |||
3381 | const FunctionSig* sig, uint32_t sig_index, | |||
3382 | const Value args[]) { | |||
3383 | TierupCheckOnExit(decoder); | |||
3384 | CallRef(decoder, func_ref.type, sig, kTailCall); | |||
3385 | } | |||
3386 | ||||
3387 | void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth, | |||
3388 | bool pass_null_along_branch, | |||
3389 | Value* /* result_on_fallthrough */) { | |||
3390 | // Before branching, materialize all constants. This avoids repeatedly | |||
3391 | // materializing them for each conditional branch. | |||
3392 | if (depth != decoder->control_depth() - 1) { | |||
3393 | __ MaterializeMergedConstants( | |||
3394 | decoder->control_at(depth)->br_merge()->arity); | |||
3395 | } | |||
3396 | ||||
3397 | Label cont_false; | |||
3398 | LiftoffRegList pinned; | |||
3399 | LiftoffRegister ref = pinned.set(__ PopToRegister(pinned)); | |||
3400 | Register null = __ GetUnusedRegister(kGpReg, pinned).gp(); | |||
3401 | LoadNullValue(null, pinned); | |||
3402 | __ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(), | |||
3403 | null); | |||
3404 | if (pass_null_along_branch) LoadNullValue(null, pinned); | |||
3405 | BrOrRet(decoder, depth, 0); | |||
3406 | __ bind(&cont_false); | |||
3407 | __ PushRegister(kRef, ref); | |||
3408 | } | |||
3409 | ||||
3410 | void BrOnNonNull(FullDecoder* decoder, const Value& ref_object, | |||
3411 | uint32_t depth) { | |||
3412 | // Before branching, materialize all constants. This avoids repeatedly | |||
3413 | // materializing them for each conditional branch. | |||
3414 | if (depth != decoder->control_depth() - 1) { | |||
3415 | __ MaterializeMergedConstants( | |||
3416 | decoder->control_at(depth)->br_merge()->arity); | |||
3417 | } | |||
3418 | ||||
3419 | Label cont_false; | |||
3420 | LiftoffRegList pinned; | |||
3421 | LiftoffRegister ref = pinned.set(__ PopToRegister(pinned)); | |||
3422 | // Put the reference back onto the stack for the branch. | |||
3423 | __ PushRegister(kRef, ref); | |||
3424 | ||||
3425 | Register null = __ GetUnusedRegister(kGpReg, pinned).gp(); | |||
3426 | LoadNullValue(null, pinned); | |||
3427 | __ emit_cond_jump(kEqual, &cont_false, ref_object.type.kind(), ref.gp(), | |||
3428 | null); | |||
3429 | ||||
3430 | BrOrRet(decoder, depth, 0); | |||
3431 | // Drop the reference if we are not branching. | |||
3432 | __ DropValues(1); | |||
3433 | __ bind(&cont_false); | |||
3434 | } | |||
3435 | ||||
3436 | template <ValueKind src_kind, ValueKind result_kind, | |||
3437 | ValueKind result_lane_kind = kVoid, typename EmitFn> | |||
3438 | void EmitTerOp(EmitFn fn) { | |||
3439 | static constexpr RegClass src_rc = reg_class_for(src_kind); | |||
3440 | static constexpr RegClass result_rc = reg_class_for(result_kind); | |||
3441 | LiftoffRegister src3 = __ PopToRegister(); | |||
3442 | LiftoffRegister src2 = __ PopToRegister(LiftoffRegList{src3}); | |||
3443 | LiftoffRegister src1 = __ PopToRegister(LiftoffRegList{src3, src2}); | |||
3444 | // Reusing src1 and src2 will complicate codegen for select for some | |||
3445 | // backend, so we allow only reusing src3 (the mask), and pin src1 and src2. | |||
3446 | LiftoffRegister dst = src_rc == result_rc | |||
3447 | ? __ GetUnusedRegister(result_rc, {src3}, | |||
3448 | LiftoffRegList{src1, src2}) | |||
3449 | : __ GetUnusedRegister(result_rc, {}); | |||
3450 | CallEmitFn(fn, dst, src1, src2, src3); | |||
3451 | if (V8_UNLIKELY(nondeterminism_)(__builtin_expect(!!(nondeterminism_), 0))) { | |||
3452 | LiftoffRegList pinned = {dst}; | |||
3453 | if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) { | |||
3454 | CheckNan(dst, pinned, result_kind); | |||
3455 | } else if (result_kind == ValueKind::kS128 && | |||
3456 | (result_lane_kind == kF32 || result_lane_kind == kF64)) { | |||
3457 | CheckS128Nan(dst, LiftoffRegList{src1, src2, src3, dst}, | |||
3458 | result_lane_kind); | |||
3459 | } | |||
3460 | } | |||
3461 | __ PushRegister(result_kind, dst); | |||
3462 | } | |||
3463 | ||||
3464 | template <typename EmitFn, typename EmitFnImm> | |||
3465 | void EmitSimdShiftOp(EmitFn fn, EmitFnImm fnImm) { | |||
3466 | static constexpr RegClass result_rc = reg_class_for(kS128); | |||
3467 | ||||
3468 | LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back(); | |||
3469 | // Check if the RHS is an immediate. | |||
3470 | if (rhs_slot.is_const()) { | |||
3471 | __ cache_state()->stack_state.pop_back(); | |||
3472 | int32_t imm = rhs_slot.i32_const(); | |||
3473 | ||||
3474 | LiftoffRegister operand = __ PopToRegister(); | |||
3475 | LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {}); | |||
3476 | ||||
3477 | CallEmitFn(fnImm, dst, operand, imm); | |||
3478 | __ PushRegister(kS128, dst); | |||
3479 | } else { | |||
3480 | LiftoffRegister count = __ PopToRegister(); | |||
3481 | LiftoffRegister operand = __ PopToRegister(); | |||
3482 | LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {}); | |||
3483 | ||||
3484 | CallEmitFn(fn, dst, operand, count); | |||
3485 | __ PushRegister(kS128, dst); | |||
3486 | } | |||
3487 | } | |||
3488 | ||||
3489 | template <ValueKind result_lane_kind> | |||
3490 | void EmitSimdFloatRoundingOpWithCFallback( | |||
3491 | bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister), | |||
3492 | ExternalReference (*ext_ref)()) { | |||
3493 | static constexpr RegClass rc = reg_class_for(kS128); | |||
3494 | LiftoffRegister src = __ PopToRegister(); | |||
3495 | LiftoffRegister dst = __ GetUnusedRegister(rc, {src}, {}); | |||
3496 | if (!(asm_.*emit_fn)(dst, src)) { | |||
3497 | // Return v128 via stack for ARM. | |||
3498 | auto sig_v_s = MakeSig::Params(kS128); | |||
3499 | GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref()); | |||
3500 | } | |||
3501 | if (V8_UNLIKELY(nondeterminism_)(__builtin_expect(!!(nondeterminism_), 0))) { | |||
3502 | LiftoffRegList pinned = {dst}; | |||
3503 | CheckS128Nan(dst, pinned, result_lane_kind); | |||
3504 | } | |||
3505 | __ PushRegister(kS128, dst); | |||
3506 | } | |||
3507 | ||||
3508 | void SimdOp(FullDecoder* decoder, WasmOpcode opcode, base::Vector<Value> args, | |||
3509 | Value* result) { | |||
3510 | if (!CpuFeatures::SupportsWasmSimd128()) { | |||
3511 | return unsupported(decoder, kSimd, "simd"); | |||
3512 | } | |||
3513 | switch (opcode) { | |||
3514 | case wasm::kExprI8x16Swizzle: | |||
3515 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_swizzle); | |||
3516 | case wasm::kExprI8x16Popcnt: | |||
3517 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_popcnt); | |||
3518 | case wasm::kExprI8x16Splat: | |||
3519 | return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i8x16_splat); | |||
3520 | case wasm::kExprI16x8Splat: | |||
3521 | return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i16x8_splat); | |||
3522 | case wasm::kExprI32x4Splat: | |||
3523 | return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i32x4_splat); | |||
3524 | case wasm::kExprI64x2Splat: | |||
3525 | return EmitUnOp<kI64, kS128>(&LiftoffAssembler::emit_i64x2_splat); | |||
3526 | case wasm::kExprF32x4Splat: | |||
3527 | return EmitUnOp<kF32, kS128, kF32>(&LiftoffAssembler::emit_f32x4_splat); | |||
3528 | case wasm::kExprF64x2Splat: | |||
3529 | return EmitUnOp<kF64, kS128, kF64>(&LiftoffAssembler::emit_f64x2_splat); | |||
3530 | case wasm::kExprI8x16Eq: | |||
3531 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_eq); | |||
3532 | case wasm::kExprI8x16Ne: | |||
3533 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ne); | |||
3534 | case wasm::kExprI8x16LtS: | |||
3535 | return EmitBinOp<kS128, kS128, true>( | |||
3536 | &LiftoffAssembler::emit_i8x16_gt_s); | |||
3537 | case wasm::kExprI8x16LtU: | |||
3538 | return EmitBinOp<kS128, kS128, true>( | |||
3539 | &LiftoffAssembler::emit_i8x16_gt_u); | |||
3540 | case wasm::kExprI8x16GtS: | |||
3541 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_gt_s); | |||
3542 | case wasm::kExprI8x16GtU: | |||
3543 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_gt_u); | |||
3544 | case wasm::kExprI8x16LeS: | |||
3545 | return EmitBinOp<kS128, kS128, true>( | |||
3546 | &LiftoffAssembler::emit_i8x16_ge_s); | |||
3547 | case wasm::kExprI8x16LeU: | |||
3548 | return EmitBinOp<kS128, kS128, true>( | |||
3549 | &LiftoffAssembler::emit_i8x16_ge_u); | |||
3550 | case wasm::kExprI8x16GeS: | |||
3551 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ge_s); | |||
3552 | case wasm::kExprI8x16GeU: | |||
3553 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ge_u); | |||
3554 | case wasm::kExprI16x8Eq: | |||
3555 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_eq); | |||
3556 | case wasm::kExprI16x8Ne: | |||
3557 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ne); | |||
3558 | case wasm::kExprI16x8LtS: | |||
3559 | return EmitBinOp<kS128, kS128, true>( | |||
3560 | &LiftoffAssembler::emit_i16x8_gt_s); | |||
3561 | case wasm::kExprI16x8LtU: | |||
3562 | return EmitBinOp<kS128, kS128, true>( | |||
3563 | &LiftoffAssembler::emit_i16x8_gt_u); | |||
3564 | case wasm::kExprI16x8GtS: | |||
3565 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_gt_s); | |||
3566 | case wasm::kExprI16x8GtU: | |||
3567 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_gt_u); | |||
3568 | case wasm::kExprI16x8LeS: | |||
3569 | return EmitBinOp<kS128, kS128, true>( | |||
3570 | &LiftoffAssembler::emit_i16x8_ge_s); | |||
3571 | case wasm::kExprI16x8LeU: | |||
3572 | return EmitBinOp<kS128, kS128, true>( | |||
3573 | &LiftoffAssembler::emit_i16x8_ge_u); | |||
3574 | case wasm::kExprI16x8GeS: | |||
3575 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ge_s); | |||
3576 | case wasm::kExprI16x8GeU: | |||
3577 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ge_u); | |||
3578 | case wasm::kExprI32x4Eq: | |||
3579 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_eq); | |||
3580 | case wasm::kExprI32x4Ne: | |||
3581 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ne); | |||
3582 | case wasm::kExprI32x4LtS: | |||
3583 | return EmitBinOp<kS128, kS128, true>( | |||
3584 | &LiftoffAssembler::emit_i32x4_gt_s); | |||
3585 | case wasm::kExprI32x4LtU: | |||
3586 | return EmitBinOp<kS128, kS128, true>( | |||
3587 | &LiftoffAssembler::emit_i32x4_gt_u); | |||
3588 | case wasm::kExprI32x4GtS: | |||
3589 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_gt_s); | |||
3590 | case wasm::kExprI32x4GtU: | |||
3591 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_gt_u); | |||
3592 | case wasm::kExprI32x4LeS: | |||
3593 | return EmitBinOp<kS128, kS128, true>( | |||
3594 | &LiftoffAssembler::emit_i32x4_ge_s); | |||
3595 | case wasm::kExprI32x4LeU: | |||
3596 | return EmitBinOp<kS128, kS128, true>( | |||
3597 | &LiftoffAssembler::emit_i32x4_ge_u); | |||
3598 | case wasm::kExprI32x4GeS: | |||
3599 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_s); | |||
3600 | case wasm::kExprI32x4GeU: | |||
3601 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_u); | |||
3602 | case wasm::kExprI64x2Eq: | |||
3603 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_eq); | |||
3604 | case wasm::kExprI64x2Ne: | |||
3605 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_ne); | |||
3606 | case wasm::kExprI64x2LtS: | |||
3607 | return EmitBinOp<kS128, kS128, true>( | |||
3608 | &LiftoffAssembler::emit_i64x2_gt_s); | |||
3609 | case wasm::kExprI64x2GtS: | |||
3610 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_gt_s); | |||
3611 | case wasm::kExprI64x2LeS: | |||
3612 | return EmitBinOp<kS128, kS128, true>( | |||
3613 | &LiftoffAssembler::emit_i64x2_ge_s); | |||
3614 | case wasm::kExprI64x2GeS: | |||
3615 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_ge_s); | |||
3616 | case wasm::kExprF32x4Eq: | |||
3617 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_eq); | |||
3618 | case wasm::kExprF32x4Ne: | |||
3619 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_ne); | |||
3620 | case wasm::kExprF32x4Lt: | |||
3621 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_lt); | |||
3622 | case wasm::kExprF32x4Gt: | |||
3623 | return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f32x4_lt); | |||
3624 | case wasm::kExprF32x4Le: | |||
3625 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_le); | |||
3626 | case wasm::kExprF32x4Ge: | |||
3627 | return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f32x4_le); | |||
3628 | case wasm::kExprF64x2Eq: | |||
3629 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_eq); | |||
3630 | case wasm::kExprF64x2Ne: | |||
3631 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_ne); | |||
3632 | case wasm::kExprF64x2Lt: | |||
3633 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_lt); | |||
3634 | case wasm::kExprF64x2Gt: | |||
3635 | return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f64x2_lt); | |||
3636 | case wasm::kExprF64x2Le: | |||
3637 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_le); | |||
3638 | case wasm::kExprF64x2Ge: | |||
3639 | return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f64x2_le); | |||
3640 | case wasm::kExprS128Not: | |||
3641 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_s128_not); | |||
3642 | case wasm::kExprS128And: | |||
3643 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_and); | |||
3644 | case wasm::kExprS128Or: | |||
3645 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_or); | |||
3646 | case wasm::kExprS128Xor: | |||
3647 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_xor); | |||
3648 | case wasm::kExprS128Select: | |||
3649 | return EmitTerOp<kS128, kS128>(&LiftoffAssembler::emit_s128_select); | |||
3650 | case wasm::kExprI8x16Neg: | |||
3651 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg); | |||
3652 | case wasm::kExprV128AnyTrue: | |||
3653 | return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v128_anytrue); | |||
3654 | case wasm::kExprI8x16AllTrue: | |||
3655 | return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i8x16_alltrue); | |||
3656 | case wasm::kExprI8x16BitMask: | |||
3657 | return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i8x16_bitmask); | |||
3658 | case wasm::kExprI8x16Shl: | |||
3659 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shl, | |||
3660 | &LiftoffAssembler::emit_i8x16_shli); | |||
3661 | case wasm::kExprI8x16ShrS: | |||
3662 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shr_s, | |||
3663 | &LiftoffAssembler::emit_i8x16_shri_s); | |||
3664 | case wasm::kExprI8x16ShrU: | |||
3665 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shr_u, | |||
3666 | &LiftoffAssembler::emit_i8x16_shri_u); | |||
3667 | case wasm::kExprI8x16Add: | |||
3668 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add); | |||
3669 | case wasm::kExprI8x16AddSatS: | |||
3670 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_s); | |||
3671 | case wasm::kExprI8x16AddSatU: | |||
3672 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_u); | |||
3673 | case wasm::kExprI8x16Sub: | |||
3674 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub); | |||
3675 | case wasm::kExprI8x16SubSatS: | |||
3676 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_s); | |||
3677 | case wasm::kExprI8x16SubSatU: | |||
3678 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_u); | |||
3679 | case wasm::kExprI8x16MinS: | |||
3680 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_min_s); | |||
3681 | case wasm::kExprI8x16MinU: | |||
3682 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_min_u); | |||
3683 | case wasm::kExprI8x16MaxS: | |||
3684 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_s); | |||
3685 | case wasm::kExprI8x16MaxU: | |||
3686 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_u); | |||
3687 | case wasm::kExprI16x8Neg: | |||
3688 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_neg); | |||
3689 | case wasm::kExprI16x8AllTrue: | |||
3690 | return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i16x8_alltrue); | |||
3691 | case wasm::kExprI16x8BitMask: | |||
3692 | return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i16x8_bitmask); | |||
3693 | case wasm::kExprI16x8Shl: | |||
3694 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shl, | |||
3695 | &LiftoffAssembler::emit_i16x8_shli); | |||
3696 | case wasm::kExprI16x8ShrS: | |||
3697 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shr_s, | |||
3698 | &LiftoffAssembler::emit_i16x8_shri_s); | |||
3699 | case wasm::kExprI16x8ShrU: | |||
3700 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shr_u, | |||
3701 | &LiftoffAssembler::emit_i16x8_shri_u); | |||
3702 | case wasm::kExprI16x8Add: | |||
3703 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add); | |||
3704 | case wasm::kExprI16x8AddSatS: | |||
3705 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_s); | |||
3706 | case wasm::kExprI16x8AddSatU: | |||
3707 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_u); | |||
3708 | case wasm::kExprI16x8Sub: | |||
3709 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub); | |||
3710 | case wasm::kExprI16x8SubSatS: | |||
3711 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_s); | |||
3712 | case wasm::kExprI16x8SubSatU: | |||
3713 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_u); | |||
3714 | case wasm::kExprI16x8Mul: | |||
3715 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_mul); | |||
3716 | case wasm::kExprI16x8MinS: | |||
3717 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_min_s); | |||
3718 | case wasm::kExprI16x8MinU: | |||
3719 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_min_u); | |||
3720 | case wasm::kExprI16x8MaxS: | |||
3721 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_s); | |||
3722 | case wasm::kExprI16x8MaxU: | |||
3723 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_u); | |||
3724 | case wasm::kExprI16x8ExtAddPairwiseI8x16S: | |||
3725 | return EmitUnOp<kS128, kS128>( | |||
3726 | &LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s); | |||
3727 | case wasm::kExprI16x8ExtAddPairwiseI8x16U: | |||
3728 | return EmitUnOp<kS128, kS128>( | |||
3729 | &LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u); | |||
3730 | case wasm::kExprI16x8ExtMulLowI8x16S: | |||
3731 | return EmitBinOp<kS128, kS128>( | |||
3732 | &LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s); | |||
3733 | case wasm::kExprI16x8ExtMulLowI8x16U: | |||
3734 | return EmitBinOp<kS128, kS128>( | |||
3735 | &LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u); | |||
3736 | case wasm::kExprI16x8ExtMulHighI8x16S: | |||
3737 | return EmitBinOp<kS128, kS128>( | |||
3738 | &LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s); | |||
3739 | case wasm::kExprI16x8ExtMulHighI8x16U: | |||
3740 | return EmitBinOp<kS128, kS128>( | |||
3741 | &LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u); | |||
3742 | case wasm::kExprI16x8Q15MulRSatS: | |||
3743 | return EmitBinOp<kS128, kS128>( | |||
3744 | &LiftoffAssembler::emit_i16x8_q15mulr_sat_s); | |||
3745 | case wasm::kExprI32x4Neg: | |||
3746 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_neg); | |||
3747 | case wasm::kExprI32x4AllTrue: | |||
3748 | return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i32x4_alltrue); | |||
3749 | case wasm::kExprI32x4BitMask: | |||
3750 | return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i32x4_bitmask); | |||
3751 | case wasm::kExprI32x4Shl: | |||
3752 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shl, | |||
3753 | &LiftoffAssembler::emit_i32x4_shli); | |||
3754 | case wasm::kExprI32x4ShrS: | |||
3755 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shr_s, | |||
3756 | &LiftoffAssembler::emit_i32x4_shri_s); | |||
3757 | case wasm::kExprI32x4ShrU: | |||
3758 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shr_u, | |||
3759 | &LiftoffAssembler::emit_i32x4_shri_u); | |||
3760 | case wasm::kExprI32x4Add: | |||
3761 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_add); | |||
3762 | case wasm::kExprI32x4Sub: | |||
3763 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_sub); | |||
3764 | case wasm::kExprI32x4Mul: | |||
3765 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_mul); | |||
3766 | case wasm::kExprI32x4MinS: | |||
3767 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_min_s); | |||
3768 | case wasm::kExprI32x4MinU: | |||
3769 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_min_u); | |||
3770 | case wasm::kExprI32x4MaxS: | |||
3771 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_s); | |||
3772 | case wasm::kExprI32x4MaxU: | |||
3773 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_u); | |||
3774 | case wasm::kExprI32x4DotI16x8S: | |||
3775 | return EmitBinOp<kS128, kS128>( | |||
3776 | &LiftoffAssembler::emit_i32x4_dot_i16x8_s); | |||
3777 | case wasm::kExprI32x4ExtAddPairwiseI16x8S: | |||
3778 | return EmitUnOp<kS128, kS128>( | |||
3779 | &LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s); | |||
3780 | case wasm::kExprI32x4ExtAddPairwiseI16x8U: | |||
3781 | return EmitUnOp<kS128, kS128>( | |||
3782 | &LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u); | |||
3783 | case wasm::kExprI32x4ExtMulLowI16x8S: | |||
3784 | return EmitBinOp<kS128, kS128>( | |||
3785 | &LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s); | |||
3786 | case wasm::kExprI32x4ExtMulLowI16x8U: | |||
3787 | return EmitBinOp<kS128, kS128>( | |||
3788 | &LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u); | |||
3789 | case wasm::kExprI32x4ExtMulHighI16x8S: | |||
3790 | return EmitBinOp<kS128, kS128>( | |||
3791 | &LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s); | |||
3792 | case wasm::kExprI32x4ExtMulHighI16x8U: | |||
3793 | return EmitBinOp<kS128, kS128>( | |||
3794 | &LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u); | |||
3795 | case wasm::kExprI64x2Neg: | |||
3796 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg); | |||
3797 | case wasm::kExprI64x2AllTrue: | |||
3798 | return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i64x2_alltrue); | |||
3799 | case wasm::kExprI64x2Shl: | |||
3800 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shl, | |||
3801 | &LiftoffAssembler::emit_i64x2_shli); | |||
3802 | case wasm::kExprI64x2ShrS: | |||
3803 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shr_s, | |||
3804 | &LiftoffAssembler::emit_i64x2_shri_s); | |||
3805 | case wasm::kExprI64x2ShrU: | |||
3806 | return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shr_u, | |||
3807 | &LiftoffAssembler::emit_i64x2_shri_u); | |||
3808 | case wasm::kExprI64x2Add: | |||
3809 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_add); | |||
3810 | case wasm::kExprI64x2Sub: | |||
3811 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_sub); | |||
3812 | case wasm::kExprI64x2Mul: | |||
3813 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_mul); | |||
3814 | case wasm::kExprI64x2ExtMulLowI32x4S: | |||
3815 | return EmitBinOp<kS128, kS128>( | |||
3816 | &LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s); | |||
3817 | case wasm::kExprI64x2ExtMulLowI32x4U: | |||
3818 | return EmitBinOp<kS128, kS128>( | |||
3819 | &LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u); | |||
3820 | case wasm::kExprI64x2ExtMulHighI32x4S: | |||
3821 | return EmitBinOp<kS128, kS128>( | |||
3822 | &LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s); | |||
3823 | case wasm::kExprI64x2ExtMulHighI32x4U: | |||
3824 | return EmitBinOp<kS128, kS128>( | |||
3825 | &LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u); | |||
3826 | case wasm::kExprI64x2BitMask: | |||
3827 | return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i64x2_bitmask); | |||
3828 | case wasm::kExprI64x2SConvertI32x4Low: | |||
3829 | return EmitUnOp<kS128, kS128>( | |||
3830 | &LiftoffAssembler::emit_i64x2_sconvert_i32x4_low); | |||
3831 | case wasm::kExprI64x2SConvertI32x4High: | |||
3832 | return EmitUnOp<kS128, kS128>( | |||
3833 | &LiftoffAssembler::emit_i64x2_sconvert_i32x4_high); | |||
3834 | case wasm::kExprI64x2UConvertI32x4Low: | |||
3835 | return EmitUnOp<kS128, kS128>( | |||
3836 | &LiftoffAssembler::emit_i64x2_uconvert_i32x4_low); | |||
3837 | case wasm::kExprI64x2UConvertI32x4High: | |||
3838 | return EmitUnOp<kS128, kS128>( | |||
3839 | &LiftoffAssembler::emit_i64x2_uconvert_i32x4_high); | |||
3840 | case wasm::kExprF32x4Abs: | |||
3841 | return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_abs); | |||
3842 | case wasm::kExprF32x4Neg: | |||
3843 | return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_neg); | |||
3844 | case wasm::kExprF32x4Sqrt: | |||
3845 | return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_sqrt); | |||
3846 | case wasm::kExprF32x4Ceil: | |||
3847 | return EmitSimdFloatRoundingOpWithCFallback<kF32>( | |||
3848 | &LiftoffAssembler::emit_f32x4_ceil, | |||
3849 | &ExternalReference::wasm_f32x4_ceil); | |||
3850 | case wasm::kExprF32x4Floor: | |||
3851 | return EmitSimdFloatRoundingOpWithCFallback<kF32>( | |||
3852 | &LiftoffAssembler::emit_f32x4_floor, | |||
3853 | ExternalReference::wasm_f32x4_floor); | |||
3854 | case wasm::kExprF32x4Trunc: | |||
3855 | return EmitSimdFloatRoundingOpWithCFallback<kF32>( | |||
3856 | &LiftoffAssembler::emit_f32x4_trunc, | |||
3857 | ExternalReference::wasm_f32x4_trunc); | |||
3858 | case wasm::kExprF32x4NearestInt: | |||
3859 | return EmitSimdFloatRoundingOpWithCFallback<kF32>( | |||
3860 | &LiftoffAssembler::emit_f32x4_nearest_int, | |||
3861 | ExternalReference::wasm_f32x4_nearest_int); | |||
3862 | case wasm::kExprF32x4Add: | |||
3863 | return EmitBinOp<kS128, kS128, false, kF32>( | |||
3864 | &LiftoffAssembler::emit_f32x4_add); | |||
3865 | case wasm::kExprF32x4Sub: | |||
3866 | return EmitBinOp<kS128, kS128, false, kF32>( | |||
3867 | &LiftoffAssembler::emit_f32x4_sub); | |||
3868 | case wasm::kExprF32x4Mul: | |||
3869 | return EmitBinOp<kS128, kS128, false, kF32>( | |||
3870 | &LiftoffAssembler::emit_f32x4_mul); | |||
3871 | case wasm::kExprF32x4Div: | |||
3872 | return EmitBinOp<kS128, kS128, false, kF32>( | |||
3873 | &LiftoffAssembler::emit_f32x4_div); | |||
3874 | case wasm::kExprF32x4Min: | |||
3875 | return EmitBinOp<kS128, kS128, false, kF32>( | |||
3876 | &LiftoffAssembler::emit_f32x4_min); | |||
3877 | case wasm::kExprF32x4Max: | |||
3878 | return EmitBinOp<kS128, kS128, false, kF32>( | |||
3879 | &LiftoffAssembler::emit_f32x4_max); | |||
3880 | case wasm::kExprF32x4Pmin: | |||
3881 | return EmitBinOp<kS128, kS128, false, kF32>( | |||
3882 | &LiftoffAssembler::emit_f32x4_pmin); | |||
3883 | case wasm::kExprF32x4Pmax: | |||
3884 | return EmitBinOp<kS128, kS128, false, kF32>( | |||
3885 | &LiftoffAssembler::emit_f32x4_pmax); | |||
3886 | case wasm::kExprF64x2Abs: | |||
3887 | return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_abs); | |||
3888 | case wasm::kExprF64x2Neg: | |||
3889 | return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_neg); | |||
3890 | case wasm::kExprF64x2Sqrt: | |||
3891 | return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_sqrt); | |||
3892 | case wasm::kExprF64x2Ceil: | |||
3893 | return EmitSimdFloatRoundingOpWithCFallback<kF64>( | |||
3894 | &LiftoffAssembler::emit_f64x2_ceil, | |||
3895 | &ExternalReference::wasm_f64x2_ceil); | |||
3896 | case wasm::kExprF64x2Floor: | |||
3897 | return EmitSimdFloatRoundingOpWithCFallback<kF64>( | |||
3898 | &LiftoffAssembler::emit_f64x2_floor, | |||
3899 | ExternalReference::wasm_f64x2_floor); | |||
3900 | case wasm::kExprF64x2Trunc: | |||
3901 | return EmitSimdFloatRoundingOpWithCFallback<kF64>( | |||
3902 | &LiftoffAssembler::emit_f64x2_trunc, | |||
3903 | ExternalReference::wasm_f64x2_trunc); | |||
3904 | case wasm::kExprF64x2NearestInt: | |||
3905 | return EmitSimdFloatRoundingOpWithCFallback<kF64>( | |||
3906 | &LiftoffAssembler::emit_f64x2_nearest_int, | |||
3907 | ExternalReference::wasm_f64x2_nearest_int); | |||
3908 | case wasm::kExprF64x2Add: | |||
3909 | return EmitBinOp<kS128, kS128, false, kF64>( | |||
3910 | &LiftoffAssembler::emit_f64x2_add); | |||
3911 | case wasm::kExprF64x2Sub: | |||
3912 | return EmitBinOp<kS128, kS128, false, kF64>( | |||
3913 | &LiftoffAssembler::emit_f64x2_sub); | |||
3914 | case wasm::kExprF64x2Mul: | |||
3915 | return EmitBinOp<kS128, kS128, false, kF64>( | |||
3916 | &LiftoffAssembler::emit_f64x2_mul); | |||
3917 | case wasm::kExprF64x2Div: | |||
3918 | return EmitBinOp<kS128, kS128, false, kF64>( | |||
3919 | &LiftoffAssembler::emit_f64x2_div); | |||
3920 | case wasm::kExprF64x2Min: | |||
3921 | return EmitBinOp<kS128, kS128, false, kF64>( | |||
3922 | &LiftoffAssembler::emit_f64x2_min); | |||
3923 | case wasm::kExprF64x2Max: | |||
3924 | return EmitBinOp<kS128, kS128, false, kF64>( | |||
3925 | &LiftoffAssembler::emit_f64x2_max); | |||
3926 | case wasm::kExprF64x2Pmin: | |||
3927 | return EmitBinOp<kS128, kS128, false, kF64>( | |||
3928 | &LiftoffAssembler::emit_f64x2_pmin); | |||
3929 | case wasm::kExprF64x2Pmax: | |||
3930 | return EmitBinOp<kS128, kS128, false, kF64>( | |||
3931 | &LiftoffAssembler::emit_f64x2_pmax); | |||
3932 | case wasm::kExprI32x4SConvertF32x4: | |||
3933 | return EmitUnOp<kS128, kS128, kF32>( | |||
3934 | &LiftoffAssembler::emit_i32x4_sconvert_f32x4); | |||
3935 | case wasm::kExprI32x4UConvertF32x4: | |||
3936 | return EmitUnOp<kS128, kS128, kF32>( | |||
3937 | &LiftoffAssembler::emit_i32x4_uconvert_f32x4); | |||
3938 | case wasm::kExprF32x4SConvertI32x4: | |||
3939 | return EmitUnOp<kS128, kS128, kF32>( | |||
3940 | &LiftoffAssembler::emit_f32x4_sconvert_i32x4); | |||
3941 | case wasm::kExprF32x4UConvertI32x4: | |||
3942 | return EmitUnOp<kS128, kS128, kF32>( | |||
3943 | &LiftoffAssembler::emit_f32x4_uconvert_i32x4); | |||
3944 | case wasm::kExprI8x16SConvertI16x8: | |||
3945 | return EmitBinOp<kS128, kS128>( | |||
3946 | &LiftoffAssembler::emit_i8x16_sconvert_i16x8); | |||
3947 | case wasm::kExprI8x16UConvertI16x8: | |||
3948 | return EmitBinOp<kS128, kS128>( | |||
3949 | &LiftoffAssembler::emit_i8x16_uconvert_i16x8); | |||
3950 | case wasm::kExprI16x8SConvertI32x4: | |||
3951 | return EmitBinOp<kS128, kS128>( | |||
3952 | &LiftoffAssembler::emit_i16x8_sconvert_i32x4); | |||
3953 | case wasm::kExprI16x8UConvertI32x4: | |||
3954 | return EmitBinOp<kS128, kS128>( | |||
3955 | &LiftoffAssembler::emit_i16x8_uconvert_i32x4); | |||
3956 | case wasm::kExprI16x8SConvertI8x16Low: | |||
3957 | return EmitUnOp<kS128, kS128>( | |||
3958 | &LiftoffAssembler::emit_i16x8_sconvert_i8x16_low); | |||
3959 | case wasm::kExprI16x8SConvertI8x16High: | |||
3960 | return EmitUnOp<kS128, kS128>( | |||
3961 | &LiftoffAssembler::emit_i16x8_sconvert_i8x16_high); | |||
3962 | case wasm::kExprI16x8UConvertI8x16Low: | |||
3963 | return EmitUnOp<kS128, kS128>( | |||
3964 | &LiftoffAssembler::emit_i16x8_uconvert_i8x16_low); | |||
3965 | case wasm::kExprI16x8UConvertI8x16High: | |||
3966 | return EmitUnOp<kS128, kS128>( | |||
3967 | &LiftoffAssembler::emit_i16x8_uconvert_i8x16_high); | |||
3968 | case wasm::kExprI32x4SConvertI16x8Low: | |||
3969 | return EmitUnOp<kS128, kS128>( | |||
3970 | &LiftoffAssembler::emit_i32x4_sconvert_i16x8_low); | |||
3971 | case wasm::kExprI32x4SConvertI16x8High: | |||
3972 | return EmitUnOp<kS128, kS128>( | |||
3973 | &LiftoffAssembler::emit_i32x4_sconvert_i16x8_high); | |||
3974 | case wasm::kExprI32x4UConvertI16x8Low: | |||
3975 | return EmitUnOp<kS128, kS128>( | |||
3976 | &LiftoffAssembler::emit_i32x4_uconvert_i16x8_low); | |||
3977 | case wasm::kExprI32x4UConvertI16x8High: | |||
3978 | return EmitUnOp<kS128, kS128>( | |||
3979 | &LiftoffAssembler::emit_i32x4_uconvert_i16x8_high); | |||
3980 | case wasm::kExprS128AndNot: | |||
3981 | return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_and_not); | |||
3982 | case wasm::kExprI8x16RoundingAverageU: | |||
3983 | return EmitBinOp<kS128, kS128>( | |||
3984 | &LiftoffAssembler::emit_i8x16_rounding_average_u); | |||
3985 | case wasm::kExprI16x8RoundingAverageU: | |||
3986 | return EmitBinOp<kS128, kS128>( | |||
3987 | &LiftoffAssembler::emit_i16x8_rounding_average_u); | |||
3988 | case wasm::kExprI8x16Abs: | |||
3989 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_abs); | |||
3990 | case wasm::kExprI16x8Abs: | |||
3991 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_abs); | |||
3992 | case wasm::kExprI32x4Abs: | |||
3993 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_abs); | |||
3994 | case wasm::kExprI64x2Abs: | |||
3995 | return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_abs); | |||
3996 | case wasm::kExprF64x2ConvertLowI32x4S: | |||
3997 | return EmitUnOp<kS128, kS128, kF64>( | |||
3998 | &LiftoffAssembler::emit_f64x2_convert_low_i32x4_s); | |||
3999 | case wasm::kExprF64x2ConvertLowI32x4U: | |||
4000 | return EmitUnOp<kS128, kS128, kF64>( | |||
4001 | &LiftoffAssembler::emit_f64x2_convert_low_i32x4_u); | |||
4002 | case wasm::kExprF64x2PromoteLowF32x4: | |||
4003 | return EmitUnOp<kS128, kS128, kF64>( | |||
4004 | &LiftoffAssembler::emit_f64x2_promote_low_f32x4); | |||
4005 | case wasm::kExprF32x4DemoteF64x2Zero: | |||
4006 | return EmitUnOp<kS128, kS128, kF32>( | |||
4007 | &LiftoffAssembler::emit_f32x4_demote_f64x2_zero); | |||
4008 | case wasm::kExprI32x4TruncSatF64x2SZero: | |||
4009 | return EmitUnOp<kS128, kS128>( | |||
4010 | &LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero); | |||
4011 | case wasm::kExprI32x4TruncSatF64x2UZero: | |||
4012 | return EmitUnOp<kS128, kS128>( | |||
4013 | &LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero); | |||
4014 | default: | |||
4015 | unsupported(decoder, kSimd, "simd"); | |||
4016 | } | |||
4017 | } | |||
4018 | ||||
4019 | template <ValueKind src_kind, ValueKind result_kind, typename EmitFn> | |||
4020 | void EmitSimdExtractLaneOp(EmitFn fn, | |||
4021 | const SimdLaneImmediate<validate>& imm) { | |||
4022 | static constexpr RegClass src_rc = reg_class_for(src_kind); | |||
4023 | static constexpr RegClass result_rc = reg_class_for(result_kind); | |||
4024 | LiftoffRegister lhs = __ PopToRegister(); | |||
4025 | LiftoffRegister dst = src_rc == result_rc | |||
4026 | ? __ GetUnusedRegister(result_rc, {lhs}, {}) | |||
4027 | : __ GetUnusedRegister(result_rc, {}); | |||
4028 | fn(dst, lhs, imm.lane); | |||
4029 | __ PushRegister(result_kind, dst); | |||
4030 | } | |||
4031 | ||||
4032 | template <ValueKind src2_kind, typename EmitFn> | |||
4033 | void EmitSimdReplaceLaneOp(EmitFn fn, | |||
4034 | const SimdLaneImmediate<validate>& imm) { | |||
4035 | static constexpr RegClass src1_rc = reg_class_for(kS128); | |||
4036 | static constexpr RegClass src2_rc = reg_class_for(src2_kind); | |||
4037 | static constexpr RegClass result_rc = reg_class_for(kS128); | |||
4038 | // On backends which need fp pair, src1_rc and result_rc end up being | |||
4039 | // kFpRegPair, which is != kFpReg, but we still want to pin src2 when it is | |||
4040 | // kFpReg, since it can overlap with those pairs. | |||
4041 | static constexpr bool pin_src2 = kNeedS128RegPair && src2_rc == kFpReg; | |||
4042 | ||||
4043 | // Does not work for arm | |||
4044 | LiftoffRegister src2 = __ PopToRegister(); | |||
4045 | LiftoffRegister src1 = (src1_rc == src2_rc || pin_src2) | |||
4046 | ? __ PopToRegister(LiftoffRegList{src2}) | |||
4047 | : __ | |||
4048 | PopToRegister(); | |||
4049 | LiftoffRegister dst = | |||
4050 | (src2_rc == result_rc || pin_src2) | |||
4051 | ? __ GetUnusedRegister(result_rc, {src1}, LiftoffRegList{src2}) | |||
4052 | : __ GetUnusedRegister(result_rc, {src1}, {}); | |||
4053 | fn(dst, src1, src2, imm.lane); | |||
4054 | __ PushRegister(kS128, dst); | |||
4055 | } | |||
4056 | ||||
4057 | void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode, | |||
4058 | const SimdLaneImmediate<validate>& imm, | |||
4059 | const base::Vector<Value> inputs, Value* result) { | |||
4060 | if (!CpuFeatures::SupportsWasmSimd128()) { | |||
4061 | return unsupported(decoder, kSimd, "simd"); | |||
4062 | } | |||
4063 | switch (opcode) { | |||
4064 | #define CASE_SIMD_EXTRACT_LANE_OP(opcode, kind, fn) \ | |||
4065 | case wasm::kExpr##opcode: \ | |||
4066 | EmitSimdExtractLaneOp<kS128, k##kind>( \ | |||
4067 | [=](LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { \ | |||
4068 | __ emit_##fn(dst, lhs, imm_lane_idx); \ | |||
4069 | }, \ | |||
4070 | imm); \ | |||
4071 | break; | |||
4072 | CASE_SIMD_EXTRACT_LANE_OP(I8x16ExtractLaneS, I32, i8x16_extract_lane_s) | |||
4073 | CASE_SIMD_EXTRACT_LANE_OP(I8x16ExtractLaneU, I32, i8x16_extract_lane_u) | |||
4074 | CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneS, I32, i16x8_extract_lane_s) | |||
4075 | CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneU, I32, i16x8_extract_lane_u) | |||
4076 | CASE_SIMD_EXTRACT_LANE_OP(I32x4ExtractLane, I32, i32x4_extract_lane) | |||
4077 | CASE_SIMD_EXTRACT_LANE_OP(I64x2ExtractLane, I64, i64x2_extract_lane) | |||
4078 | CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane) | |||
4079 | CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane) | |||
4080 | #undef CASE_SIMD_EXTRACT_LANE_OP | |||
4081 | #define CASE_SIMD_REPLACE_LANE_OP(opcode, kind, fn) \ | |||
4082 | case wasm::kExpr##opcode: \ | |||
4083 | EmitSimdReplaceLaneOp<k##kind>( \ | |||
4084 | [=](LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \ | |||
4085 | uint8_t imm_lane_idx) { \ | |||
4086 | __ emit_##fn(dst, src1, src2, imm_lane_idx); \ | |||
4087 | }, \ | |||
4088 | imm); \ | |||
4089 | break; | |||
4090 | CASE_SIMD_REPLACE_LANE_OP(I8x16ReplaceLane, I32, i8x16_replace_lane) | |||
4091 | CASE_SIMD_REPLACE_LANE_OP(I16x8ReplaceLane, I32, i16x8_replace_lane) | |||
4092 | CASE_SIMD_REPLACE_LANE_OP(I32x4ReplaceLane, I32, i32x4_replace_lane) | |||
4093 | CASE_SIMD_REPLACE_LANE_OP(I64x2ReplaceLane, I64, i64x2_replace_lane) | |||
4094 | CASE_SIMD_REPLACE_LANE_OP(F32x4ReplaceLane, F32, f32x4_replace_lane) | |||
4095 | CASE_SIMD_REPLACE_LANE_OP(F64x2ReplaceLane, F64, f64x2_replace_lane) | |||
4096 | #undef CASE_SIMD_REPLACE_LANE_OP | |||
4097 | default: | |||
4098 | unsupported(decoder, kSimd, "simd"); | |||
4099 | } | |||
4100 | } | |||
4101 | ||||
4102 | void S128Const(FullDecoder* decoder, const Simd128Immediate<validate>& imm, | |||
4103 | Value* result) { | |||
4104 | if (!CpuFeatures::SupportsWasmSimd128()) { | |||
4105 | return unsupported(decoder, kSimd, "simd"); | |||
4106 | } | |||
4107 | constexpr RegClass result_rc = reg_class_for(kS128); | |||
4108 | LiftoffRegister dst = __ GetUnusedRegister(result_rc, {}); | |||
4109 | bool all_zeroes = std::all_of(std::begin(imm.value), std::end(imm.value), | |||
4110 | [](uint8_t v) { return v == 0; }); | |||
4111 | bool all_ones = std::all_of(std::begin(imm.value), std::end(imm.value), | |||
4112 | [](uint8_t v) { return v == 0xff; }); | |||
4113 | if (all_zeroes) { | |||
4114 | __ LiftoffAssembler::emit_s128_xor(dst, dst, dst); | |||
4115 | } else if (all_ones) { | |||
4116 | // Any SIMD eq will work, i32x4 is efficient on all archs. | |||
4117 | __ LiftoffAssembler::emit_i32x4_eq(dst, dst, dst); | |||
4118 | } else { | |||
4119 | __ LiftoffAssembler::emit_s128_const(dst, imm.value); | |||
4120 | } | |||
4121 | __ PushRegister(kS128, dst); | |||
4122 | } | |||
4123 | ||||
4124 | void Simd8x16ShuffleOp(FullDecoder* decoder, | |||
4125 | const Simd128Immediate<validate>& imm, | |||
4126 | const Value& input0, const Value& input1, | |||
4127 | Value* result) { | |||
4128 | if (!CpuFeatures::SupportsWasmSimd128()) { | |||
4129 | return unsupported(decoder, kSimd, "simd"); | |||
4130 | } | |||
4131 | static constexpr RegClass result_rc = reg_class_for(kS128); | |||
4132 | LiftoffRegister rhs = __ PopToRegister(); | |||
4133 | LiftoffRegister lhs = __ PopToRegister(LiftoffRegList{rhs}); | |||
4134 | LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {}); | |||
4135 | ||||
4136 | uint8_t shuffle[kSimd128Size]; | |||
4137 | memcpy(shuffle, imm.value, sizeof(shuffle)); | |||
4138 | bool is_swizzle; | |||
4139 | bool needs_swap; | |||
4140 | wasm::SimdShuffle::CanonicalizeShuffle(lhs == rhs, shuffle, &needs_swap, | |||
4141 | &is_swizzle); | |||
4142 | if (needs_swap) { | |||
4143 | std::swap(lhs, rhs); | |||
4144 | } | |||
4145 | __ LiftoffAssembler::emit_i8x16_shuffle(dst, lhs, rhs, shuffle, is_swizzle); | |||
4146 | __ PushRegister(kS128, dst); | |||
4147 | } | |||
4148 | ||||
4149 | void ToSmi(Register reg) { | |||
4150 | if (COMPRESS_POINTERS_BOOLfalse || kSystemPointerSize == 4) { | |||
4151 | __ emit_i32_shli(reg, reg, kSmiShiftSize + kSmiTagSize); | |||
4152 | } else { | |||
4153 | __ emit_i64_shli(LiftoffRegister{reg}, LiftoffRegister{reg}, | |||
4154 | kSmiShiftSize + kSmiTagSize); | |||
4155 | } | |||
4156 | } | |||
4157 | ||||
4158 | void Store32BitExceptionValue(Register values_array, int* index_in_array, | |||
4159 | Register value, LiftoffRegList pinned) { | |||
4160 | LiftoffRegister tmp_reg = __ GetUnusedRegister(kGpReg, pinned); | |||
4161 | // Get the lower half word into tmp_reg and extend to a Smi. | |||
4162 | --*index_in_array; | |||
4163 | __ emit_i32_andi(tmp_reg.gp(), value, 0xffff); | |||
4164 | ToSmi(tmp_reg.gp()); | |||
4165 | __ StoreTaggedPointer( | |||
4166 | values_array, no_reg, | |||
4167 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index_in_array), | |||
4168 | tmp_reg, pinned, LiftoffAssembler::kSkipWriteBarrier); | |||
4169 | ||||
4170 | // Get the upper half word into tmp_reg and extend to a Smi. | |||
4171 | --*index_in_array; | |||
4172 | __ emit_i32_shri(tmp_reg.gp(), value, 16); | |||
4173 | ToSmi(tmp_reg.gp()); | |||
4174 | __ StoreTaggedPointer( | |||
4175 | values_array, no_reg, | |||
4176 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index_in_array), | |||
4177 | tmp_reg, pinned, LiftoffAssembler::kSkipWriteBarrier); | |||
4178 | } | |||
4179 | ||||
4180 | void Store64BitExceptionValue(Register values_array, int* index_in_array, | |||
4181 | LiftoffRegister value, LiftoffRegList pinned) { | |||
4182 | if (kNeedI64RegPair) { | |||
4183 | Store32BitExceptionValue(values_array, index_in_array, value.low_gp(), | |||
4184 | pinned); | |||
4185 | Store32BitExceptionValue(values_array, index_in_array, value.high_gp(), | |||
4186 | pinned); | |||
4187 | } else { | |||
4188 | Store32BitExceptionValue(values_array, index_in_array, value.gp(), | |||
4189 | pinned); | |||
4190 | __ emit_i64_shri(value, value, 32); | |||
4191 | Store32BitExceptionValue(values_array, index_in_array, value.gp(), | |||
4192 | pinned); | |||
4193 | } | |||
4194 | } | |||
4195 | ||||
4196 | void Load16BitExceptionValue(LiftoffRegister dst, | |||
4197 | LiftoffRegister values_array, uint32_t* index, | |||
4198 | LiftoffRegList pinned) { | |||
4199 | __ LoadSmiAsInt32( | |||
4200 | dst, values_array.gp(), | |||
4201 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index), pinned); | |||
4202 | (*index)++; | |||
4203 | } | |||
4204 | ||||
4205 | void Load32BitExceptionValue(Register dst, LiftoffRegister values_array, | |||
4206 | uint32_t* index, LiftoffRegList pinned) { | |||
4207 | LiftoffRegister upper = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4208 | Load16BitExceptionValue(upper, values_array, index, pinned); | |||
4209 | __ emit_i32_shli(upper.gp(), upper.gp(), 16); | |||
4210 | Load16BitExceptionValue(LiftoffRegister(dst), values_array, index, pinned); | |||
4211 | __ emit_i32_or(dst, upper.gp(), dst); | |||
4212 | } | |||
4213 | ||||
4214 | void Load64BitExceptionValue(LiftoffRegister dst, | |||
4215 | LiftoffRegister values_array, uint32_t* index, | |||
4216 | LiftoffRegList pinned) { | |||
4217 | if (kNeedI64RegPair) { | |||
4218 | Load32BitExceptionValue(dst.high_gp(), values_array, index, pinned); | |||
4219 | Load32BitExceptionValue(dst.low_gp(), values_array, index, pinned); | |||
4220 | } else { | |||
4221 | Load16BitExceptionValue(dst, values_array, index, pinned); | |||
4222 | __ emit_i64_shli(dst, dst, 48); | |||
4223 | LiftoffRegister tmp_reg = | |||
4224 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4225 | Load16BitExceptionValue(tmp_reg, values_array, index, pinned); | |||
4226 | __ emit_i64_shli(tmp_reg, tmp_reg, 32); | |||
4227 | __ emit_i64_or(dst, tmp_reg, dst); | |||
4228 | Load16BitExceptionValue(tmp_reg, values_array, index, pinned); | |||
4229 | __ emit_i64_shli(tmp_reg, tmp_reg, 16); | |||
4230 | __ emit_i64_or(dst, tmp_reg, dst); | |||
4231 | Load16BitExceptionValue(tmp_reg, values_array, index, pinned); | |||
4232 | __ emit_i64_or(dst, tmp_reg, dst); | |||
4233 | } | |||
4234 | } | |||
4235 | ||||
4236 | void StoreExceptionValue(ValueType type, Register values_array, | |||
4237 | int* index_in_array, LiftoffRegList pinned) { | |||
4238 | LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); | |||
4239 | switch (type.kind()) { | |||
4240 | case kI32: | |||
4241 | Store32BitExceptionValue(values_array, index_in_array, value.gp(), | |||
4242 | pinned); | |||
4243 | break; | |||
4244 | case kF32: { | |||
4245 | LiftoffRegister gp_reg = | |||
4246 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4247 | __ emit_type_conversion(kExprI32ReinterpretF32, gp_reg, value, nullptr); | |||
4248 | Store32BitExceptionValue(values_array, index_in_array, gp_reg.gp(), | |||
4249 | pinned); | |||
4250 | break; | |||
4251 | } | |||
4252 | case kI64: | |||
4253 | Store64BitExceptionValue(values_array, index_in_array, value, pinned); | |||
4254 | break; | |||
4255 | case kF64: { | |||
4256 | LiftoffRegister tmp_reg = | |||
4257 | pinned.set(__ GetUnusedRegister(reg_class_for(kI64), pinned)); | |||
4258 | __ emit_type_conversion(kExprI64ReinterpretF64, tmp_reg, value, | |||
4259 | nullptr); | |||
4260 | Store64BitExceptionValue(values_array, index_in_array, tmp_reg, pinned); | |||
4261 | break; | |||
4262 | } | |||
4263 | case kS128: { | |||
4264 | LiftoffRegister tmp_reg = | |||
4265 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4266 | for (int i : {3, 2, 1, 0}) { | |||
4267 | __ emit_i32x4_extract_lane(tmp_reg, value, i); | |||
4268 | Store32BitExceptionValue(values_array, index_in_array, tmp_reg.gp(), | |||
4269 | pinned); | |||
4270 | } | |||
4271 | break; | |||
4272 | } | |||
4273 | case wasm::kRef: | |||
4274 | case wasm::kOptRef: | |||
4275 | case wasm::kRtt: { | |||
4276 | --(*index_in_array); | |||
4277 | __ StoreTaggedPointer( | |||
4278 | values_array, no_reg, | |||
4279 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray( | |||
4280 | *index_in_array), | |||
4281 | value, pinned); | |||
4282 | break; | |||
4283 | } | |||
4284 | case wasm::kI8: | |||
4285 | case wasm::kI16: | |||
4286 | case wasm::kVoid: | |||
4287 | case wasm::kBottom: | |||
4288 | UNREACHABLE()V8_Fatal("unreachable code"); | |||
4289 | } | |||
4290 | } | |||
4291 | ||||
4292 | void LoadExceptionValue(ValueKind kind, LiftoffRegister values_array, | |||
4293 | uint32_t* index, LiftoffRegList pinned) { | |||
4294 | RegClass rc = reg_class_for(kind); | |||
4295 | LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); | |||
4296 | switch (kind) { | |||
4297 | case kI32: | |||
4298 | Load32BitExceptionValue(value.gp(), values_array, index, pinned); | |||
4299 | break; | |||
4300 | case kF32: { | |||
4301 | LiftoffRegister tmp_reg = | |||
4302 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4303 | Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned); | |||
4304 | __ emit_type_conversion(kExprF32ReinterpretI32, value, tmp_reg, | |||
4305 | nullptr); | |||
4306 | break; | |||
4307 | } | |||
4308 | case kI64: | |||
4309 | Load64BitExceptionValue(value, values_array, index, pinned); | |||
4310 | break; | |||
4311 | case kF64: { | |||
4312 | RegClass rc_i64 = reg_class_for(kI64); | |||
4313 | LiftoffRegister tmp_reg = | |||
4314 | pinned.set(__ GetUnusedRegister(rc_i64, pinned)); | |||
4315 | Load64BitExceptionValue(tmp_reg, values_array, index, pinned); | |||
4316 | __ emit_type_conversion(kExprF64ReinterpretI64, value, tmp_reg, | |||
4317 | nullptr); | |||
4318 | break; | |||
4319 | } | |||
4320 | case kS128: { | |||
4321 | LiftoffRegister tmp_reg = | |||
4322 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4323 | Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned); | |||
4324 | __ emit_i32x4_splat(value, tmp_reg); | |||
4325 | for (int lane : {1, 2, 3}) { | |||
4326 | Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned); | |||
4327 | __ emit_i32x4_replace_lane(value, value, tmp_reg, lane); | |||
4328 | } | |||
4329 | break; | |||
4330 | } | |||
4331 | case wasm::kRef: | |||
4332 | case wasm::kOptRef: | |||
4333 | case wasm::kRtt: { | |||
4334 | __ LoadTaggedPointer( | |||
4335 | value.gp(), values_array.gp(), no_reg, | |||
4336 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index), | |||
4337 | pinned); | |||
4338 | (*index)++; | |||
4339 | break; | |||
4340 | } | |||
4341 | case wasm::kI8: | |||
4342 | case wasm::kI16: | |||
4343 | case wasm::kVoid: | |||
4344 | case wasm::kBottom: | |||
4345 | UNREACHABLE()V8_Fatal("unreachable code"); | |||
4346 | } | |||
4347 | __ PushRegister(kind, value); | |||
4348 | } | |||
4349 | ||||
4350 | void GetExceptionValues(FullDecoder* decoder, | |||
4351 | LiftoffAssembler::VarState& exception_var, | |||
4352 | const WasmTag* tag) { | |||
4353 | LiftoffRegList pinned; | |||
4354 | CODE_COMMENT("get exception values"); | |||
4355 | LiftoffRegister values_array = GetExceptionProperty( | |||
4356 | exception_var, RootIndex::kwasm_exception_values_symbol); | |||
4357 | pinned.set(values_array); | |||
4358 | uint32_t index = 0; | |||
4359 | const WasmTagSig* sig = tag->sig; | |||
4360 | for (ValueType param : sig->parameters()) { | |||
4361 | LoadExceptionValue(param.kind(), values_array, &index, pinned); | |||
4362 | } | |||
4363 | DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(tag))((void) 0); | |||
4364 | } | |||
4365 | ||||
4366 | void EmitLandingPad(FullDecoder* decoder, int handler_offset) { | |||
4367 | if (decoder->current_catch() == -1) return; | |||
4368 | MovableLabel handler; | |||
4369 | ||||
4370 | // If we return from the throwing code normally, just skip over the handler. | |||
4371 | Label skip_handler; | |||
4372 | __ emit_jump(&skip_handler); | |||
4373 | ||||
4374 | // Handler: merge into the catch state, and jump to the catch body. | |||
4375 | CODE_COMMENT("-- landing pad --"); | |||
4376 | __ bind(handler.get()); | |||
4377 | __ ExceptionHandler(); | |||
4378 | __ PushException(); | |||
4379 | handlers_.push_back({std::move(handler), handler_offset}); | |||
4380 | Control* current_try = | |||
4381 | decoder->control_at(decoder->control_depth_of_current_catch()); | |||
4382 | DCHECK_NOT_NULL(current_try->try_info)((void) 0); | |||
4383 | if (!current_try->try_info->catch_reached) { | |||
4384 | current_try->try_info->catch_state.InitMerge( | |||
4385 | *__ cache_state(), __ num_locals(), 1, | |||
4386 | current_try->stack_depth + current_try->num_exceptions); | |||
4387 | current_try->try_info->catch_reached = true; | |||
4388 | } | |||
4389 | __ MergeStackWith(current_try->try_info->catch_state, 1, | |||
4390 | LiftoffAssembler::kForwardJump); | |||
4391 | __ emit_jump(¤t_try->try_info->catch_label); | |||
4392 | ||||
4393 | __ bind(&skip_handler); | |||
4394 | // Drop the exception. | |||
4395 | __ DropValues(1); | |||
4396 | } | |||
4397 | ||||
4398 | void Throw(FullDecoder* decoder, const TagIndexImmediate<validate>& imm, | |||
4399 | const base::Vector<Value>& /* args */) { | |||
4400 | LiftoffRegList pinned; | |||
4401 | ||||
4402 | // Load the encoded size in a register for the builtin call. | |||
4403 | int encoded_size = WasmExceptionPackage::GetEncodedSize(imm.tag); | |||
4404 | LiftoffRegister encoded_size_reg = | |||
4405 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4406 | __ LoadConstant(encoded_size_reg, WasmValue(encoded_size)); | |||
4407 | ||||
4408 | // Call the WasmAllocateFixedArray builtin to create the values array. | |||
4409 | CallRuntimeStub(WasmCode::kWasmAllocateFixedArray, | |||
4410 | MakeSig::Returns(kPointerKind).Params(kPointerKind), | |||
4411 | {LiftoffAssembler::VarState{ | |||
4412 | kSmiKind, LiftoffRegister{encoded_size_reg}, 0}}, | |||
4413 | decoder->position()); | |||
4414 | MaybeOSR(); | |||
4415 | ||||
4416 | // The FixedArray for the exception values is now in the first gp return | |||
4417 | // register. | |||
4418 | LiftoffRegister values_array{kReturnRegister0}; | |||
4419 | pinned.set(values_array); | |||
4420 | ||||
4421 | // Now store the exception values in the FixedArray. Do this from last to | |||
4422 | // first value, such that we can just pop them from the value stack. | |||
4423 | CODE_COMMENT("fill values array"); | |||
4424 | int index = encoded_size; | |||
4425 | auto* sig = imm.tag->sig; | |||
4426 | for (size_t param_idx = sig->parameter_count(); param_idx > 0; | |||
4427 | --param_idx) { | |||
4428 | ValueType type = sig->GetParam(param_idx - 1); | |||
4429 | StoreExceptionValue(type, values_array.gp(), &index, pinned); | |||
4430 | } | |||
4431 | DCHECK_EQ(0, index)((void) 0); | |||
4432 | ||||
4433 | // Load the exception tag. | |||
4434 | CODE_COMMENT("load exception tag"); | |||
4435 | LiftoffRegister exception_tag = | |||
4436 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4437 | LOAD_TAGGED_PTR_INSTANCE_FIELD(exception_tag.gp(), TagsTable, pinned); | |||
4438 | __ LoadTaggedPointer( | |||
4439 | exception_tag.gp(), exception_tag.gp(), no_reg, | |||
4440 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {}); | |||
4441 | ||||
4442 | // Finally, call WasmThrow. | |||
4443 | CallRuntimeStub(WasmCode::kWasmThrow, | |||
4444 | MakeSig::Params(kPointerKind, kPointerKind), | |||
4445 | {LiftoffAssembler::VarState{kPointerKind, exception_tag, 0}, | |||
4446 | LiftoffAssembler::VarState{kPointerKind, values_array, 0}}, | |||
4447 | decoder->position()); | |||
4448 | ||||
4449 | int pc_offset = __ pc_offset(); | |||
4450 | MaybeOSR(); | |||
4451 | EmitLandingPad(decoder, pc_offset); | |||
4452 | } | |||
4453 | ||||
4454 | void AtomicStoreMem(FullDecoder* decoder, StoreType type, | |||
4455 | const MemoryAccessImmediate<validate>& imm) { | |||
4456 | LiftoffRegList pinned; | |||
4457 | LiftoffRegister value = pinned.set(__ PopToRegister()); | |||
4458 | LiftoffRegister full_index = __ PopToRegister(pinned); | |||
4459 | Register index = BoundsCheckMem(decoder, type.size(), imm.offset, | |||
4460 | full_index, pinned, kDoForceCheck); | |||
4461 | if (index == no_reg) return; | |||
4462 | ||||
4463 | pinned.set(index); | |||
4464 | AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); | |||
4465 | uintptr_t offset = imm.offset; | |||
4466 | CODE_COMMENT("atomic store to memory"); | |||
4467 | Register addr = pinned.set(GetMemoryStart(pinned)); | |||
4468 | LiftoffRegList outer_pinned; | |||
4469 | if (V8_UNLIKELY(FLAG_trace_wasm_memory)(__builtin_expect(!!(FLAG_trace_wasm_memory), 0))) outer_pinned.set(index); | |||
4470 | __ AtomicStore(addr, index, offset, value, type, outer_pinned); | |||
4471 | if (V8_UNLIKELY(FLAG_trace_wasm_memory)(__builtin_expect(!!(FLAG_trace_wasm_memory), 0))) { | |||
4472 | TraceMemoryOperation(true, type.mem_rep(), index, offset, | |||
4473 | decoder->position()); | |||
4474 | } | |||
4475 | } | |||
4476 | ||||
4477 | void AtomicLoadMem(FullDecoder* decoder, LoadType type, | |||
4478 | const MemoryAccessImmediate<validate>& imm) { | |||
4479 | ValueKind kind = type.value_type().kind(); | |||
4480 | LiftoffRegister full_index = __ PopToRegister(); | |||
4481 | Register index = BoundsCheckMem(decoder, type.size(), imm.offset, | |||
4482 | full_index, {}, kDoForceCheck); | |||
4483 | if (index == no_reg) return; | |||
4484 | ||||
4485 | LiftoffRegList pinned = {index}; | |||
4486 | AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); | |||
4487 | uintptr_t offset = imm.offset; | |||
4488 | CODE_COMMENT("atomic load from memory"); | |||
4489 | Register addr = pinned.set(GetMemoryStart(pinned)); | |||
4490 | RegClass rc = reg_class_for(kind); | |||
4491 | LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); | |||
4492 | __ AtomicLoad(value, addr, index, offset, type, pinned); | |||
4493 | __ PushRegister(kind, value); | |||
4494 | ||||
4495 | if (V8_UNLIKELY(FLAG_trace_wasm_memory)(__builtin_expect(!!(FLAG_trace_wasm_memory), 0))) { | |||
4496 | TraceMemoryOperation(false, type.mem_type().representation(), index, | |||
4497 | offset, decoder->position()); | |||
4498 | } | |||
4499 | } | |||
4500 | ||||
4501 | void AtomicBinop(FullDecoder* decoder, StoreType type, | |||
4502 | const MemoryAccessImmediate<validate>& imm, | |||
4503 | void (LiftoffAssembler::*emit_fn)(Register, Register, | |||
4504 | uintptr_t, LiftoffRegister, | |||
4505 | LiftoffRegister, | |||
4506 | StoreType)) { | |||
4507 | ValueKind result_kind = type.value_type().kind(); | |||
4508 | LiftoffRegList pinned; | |||
4509 | LiftoffRegister value = pinned.set(__ PopToRegister()); | |||
4510 | #ifdef V8_TARGET_ARCH_IA32 | |||
4511 | // We have to reuse the value register as the result register so that we | |||
4512 | // don't run out of registers on ia32. For this we use the value register as | |||
4513 | // the result register if it has no other uses. Otherwise we allocate a new | |||
4514 | // register and let go of the value register to get spilled. | |||
4515 | LiftoffRegister result = value; | |||
4516 | if (__ cache_state()->is_used(value)) { | |||
4517 | result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned)); | |||
4518 | __ Move(result, value, result_kind); | |||
4519 | pinned.clear(value); | |||
4520 | value = result; | |||
4521 | } | |||
4522 | #else | |||
4523 | LiftoffRegister result = | |||
4524 | pinned.set(__ GetUnusedRegister(value.reg_class(), pinned)); | |||
4525 | #endif | |||
4526 | LiftoffRegister full_index = __ PopToRegister(pinned); | |||
4527 | Register index = BoundsCheckMem(decoder, type.size(), imm.offset, | |||
4528 | full_index, pinned, kDoForceCheck); | |||
4529 | if (index == no_reg) return; | |||
4530 | ||||
4531 | pinned.set(index); | |||
4532 | AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); | |||
4533 | ||||
4534 | CODE_COMMENT("atomic binop"); | |||
4535 | uintptr_t offset = imm.offset; | |||
4536 | Register addr = pinned.set(GetMemoryStart(pinned)); | |||
4537 | ||||
4538 | (asm_.*emit_fn)(addr, index, offset, value, result, type); | |||
4539 | __ PushRegister(result_kind, result); | |||
4540 | } | |||
4541 | ||||
4542 | void AtomicCompareExchange(FullDecoder* decoder, StoreType type, | |||
4543 | const MemoryAccessImmediate<validate>& imm) { | |||
4544 | #ifdef V8_TARGET_ARCH_IA32 | |||
4545 | // On ia32 we don't have enough registers to first pop all the values off | |||
4546 | // the stack and then start with the code generation. Instead we do the | |||
4547 | // complete address calculation first, so that the address only needs a | |||
4548 | // single register. Afterwards we load all remaining values into the | |||
4549 | // other registers. | |||
4550 | LiftoffRegister full_index = __ PeekToRegister(2, {}); | |||
4551 | Register index = BoundsCheckMem(decoder, type.size(), imm.offset, | |||
4552 | full_index, {}, kDoForceCheck); | |||
4553 | if (index == no_reg) return; | |||
4554 | LiftoffRegList pinned = {index}; | |||
4555 | AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); | |||
4556 | ||||
4557 | uintptr_t offset = imm.offset; | |||
4558 | Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
4559 | LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); | |||
4560 | #ifdef V8_SANDBOXED_POINTERS | |||
4561 | __ DecodeSandboxedPointer(addr); | |||
4562 | #endif | |||
4563 | __ emit_i32_add(addr, addr, index); | |||
4564 | pinned.clear(LiftoffRegister(index)); | |||
4565 | LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned)); | |||
4566 | LiftoffRegister expected = pinned.set(__ PopToRegister(pinned)); | |||
4567 | ||||
4568 | // Pop the index from the stack. | |||
4569 | __ DropValues(1); | |||
4570 | ||||
4571 | LiftoffRegister result = expected; | |||
4572 | if (__ cache_state()->is_used(result)) __ SpillRegister(result); | |||
4573 | ||||
4574 | // We already added the index to addr, so we can just pass no_reg to the | |||
4575 | // assembler now. | |||
4576 | __ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result, | |||
4577 | type); | |||
4578 | __ PushRegister(type.value_type().kind(), result); | |||
4579 | return; | |||
4580 | #else | |||
4581 | ValueKind result_kind = type.value_type().kind(); | |||
4582 | LiftoffRegList pinned; | |||
4583 | LiftoffRegister new_value = pinned.set(__ PopToRegister()); | |||
4584 | LiftoffRegister expected = pinned.set(__ PopToRegister(pinned)); | |||
4585 | LiftoffRegister full_index = __ PopToRegister(pinned); | |||
4586 | Register index = BoundsCheckMem(decoder, type.size(), imm.offset, | |||
4587 | full_index, pinned, kDoForceCheck); | |||
4588 | if (index == no_reg) return; | |||
4589 | pinned.set(index); | |||
4590 | AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); | |||
4591 | ||||
4592 | uintptr_t offset = imm.offset; | |||
4593 | Register addr = pinned.set(GetMemoryStart(pinned)); | |||
4594 | LiftoffRegister result = | |||
4595 | pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned)); | |||
4596 | ||||
4597 | __ AtomicCompareExchange(addr, index, offset, expected, new_value, result, | |||
4598 | type); | |||
4599 | __ PushRegister(result_kind, result); | |||
4600 | #endif | |||
4601 | } | |||
4602 | ||||
4603 | void CallRuntimeStub(WasmCode::RuntimeStubId stub_id, const ValueKindSig& sig, | |||
4604 | std::initializer_list<LiftoffAssembler::VarState> params, | |||
4605 | int position) { | |||
4606 | CODE_COMMENT( | |||
4607 | (std::string{"call builtin: "} + GetRuntimeStubName(stub_id)).c_str()); | |||
4608 | auto interface_descriptor = Builtins::CallInterfaceDescriptorFor( | |||
4609 | RuntimeStubIdToBuiltinName(stub_id)); | |||
4610 | auto* call_descriptor = compiler::Linkage::GetStubCallDescriptor( | |||
4611 | compilation_zone_, // zone | |||
4612 | interface_descriptor, // descriptor | |||
4613 | interface_descriptor.GetStackParameterCount(), // stack parameter count | |||
4614 | compiler::CallDescriptor::kNoFlags, // flags | |||
4615 | compiler::Operator::kNoProperties, // properties | |||
4616 | StubCallMode::kCallWasmRuntimeStub); // stub call mode | |||
4617 | ||||
4618 | __ PrepareBuiltinCall(&sig, call_descriptor, params); | |||
4619 | if (position != kNoSourcePosition) { | |||
4620 | source_position_table_builder_.AddPosition( | |||
4621 | __ pc_offset(), SourcePosition(position), true); | |||
4622 | } | |||
4623 | __ CallRuntimeStub(stub_id); | |||
4624 | DefineSafepoint(); | |||
4625 | } | |||
4626 | ||||
4627 | void AtomicWait(FullDecoder* decoder, ValueKind kind, | |||
4628 | const MemoryAccessImmediate<validate>& imm) { | |||
4629 | LiftoffRegister full_index = __ PeekToRegister(2, {}); | |||
4630 | Register index_reg = | |||
4631 | BoundsCheckMem(decoder, value_kind_size(kind), imm.offset, full_index, | |||
4632 | {}, kDoForceCheck); | |||
4633 | if (index_reg == no_reg) return; | |||
4634 | LiftoffRegList pinned = {index_reg}; | |||
4635 | AlignmentCheckMem(decoder, value_kind_size(kind), imm.offset, index_reg, | |||
4636 | pinned); | |||
4637 | ||||
4638 | uintptr_t offset = imm.offset; | |||
4639 | Register index_plus_offset = | |||
4640 | __ cache_state()->is_used(LiftoffRegister(index_reg)) | |||
4641 | ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp() | |||
4642 | : index_reg; | |||
4643 | // TODO(clemensb): Skip this if memory is 64 bit. | |||
4644 | __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg); | |||
4645 | if (offset) { | |||
4646 | __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset); | |||
4647 | } | |||
4648 | ||||
4649 | LiftoffAssembler::VarState timeout = | |||
4650 | __ cache_state()->stack_state.end()[-1]; | |||
4651 | LiftoffAssembler::VarState expected_value = | |||
4652 | __ cache_state()->stack_state.end()[-2]; | |||
4653 | LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3]; | |||
4654 | ||||
4655 | // We have to set the correct register for the index. | |||
4656 | index.MakeRegister(LiftoffRegister(index_plus_offset)); | |||
4657 | ||||
4658 | static constexpr WasmCode::RuntimeStubId kTargets[2][2]{ | |||
4659 | // 64 bit systems (kNeedI64RegPair == false): | |||
4660 | {WasmCode::kWasmI64AtomicWait64, WasmCode::kWasmI32AtomicWait64}, | |||
4661 | // 32 bit systems (kNeedI64RegPair == true): | |||
4662 | {WasmCode::kWasmI64AtomicWait32, WasmCode::kWasmI32AtomicWait32}}; | |||
4663 | auto target = kTargets[kNeedI64RegPair][kind == kI32]; | |||
4664 | ||||
4665 | CallRuntimeStub(target, MakeSig::Params(kPointerKind, kind, kI64), | |||
4666 | {index, expected_value, timeout}, decoder->position()); | |||
4667 | // Pop parameters from the value stack. | |||
4668 | __ DropValues(3); | |||
4669 | ||||
4670 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
4671 | ||||
4672 | __ PushRegister(kI32, LiftoffRegister(kReturnRegister0)); | |||
4673 | } | |||
4674 | ||||
4675 | void AtomicNotify(FullDecoder* decoder, | |||
4676 | const MemoryAccessImmediate<validate>& imm) { | |||
4677 | LiftoffRegister full_index = __ PeekToRegister(1, {}); | |||
4678 | Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset, | |||
4679 | full_index, {}, kDoForceCheck); | |||
4680 | if (index_reg == no_reg) return; | |||
4681 | LiftoffRegList pinned = {index_reg}; | |||
4682 | AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned); | |||
4683 | ||||
4684 | uintptr_t offset = imm.offset; | |||
4685 | Register index_plus_offset = | |||
4686 | __ cache_state()->is_used(LiftoffRegister(index_reg)) | |||
4687 | ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp() | |||
4688 | : index_reg; | |||
4689 | // TODO(clemensb): Skip this if memory is 64 bit. | |||
4690 | __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg); | |||
4691 | if (offset) { | |||
4692 | __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset); | |||
4693 | } | |||
4694 | ||||
4695 | LiftoffAssembler::VarState count = __ cache_state()->stack_state.end()[-1]; | |||
4696 | LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2]; | |||
4697 | index.MakeRegister(LiftoffRegister(index_plus_offset)); | |||
4698 | ||||
4699 | CallRuntimeStub(WasmCode::kWasmAtomicNotify, | |||
4700 | MakeSig::Returns(kI32).Params(kPointerKind, kI32), | |||
4701 | {index, count}, decoder->position()); | |||
4702 | // Pop parameters from the value stack. | |||
4703 | __ DropValues(2); | |||
4704 | ||||
4705 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
4706 | ||||
4707 | __ PushRegister(kI32, LiftoffRegister(kReturnRegister0)); | |||
4708 | } | |||
4709 | ||||
4710 | #define ATOMIC_STORE_LIST(V) \ | |||
4711 | V(I32AtomicStore, kI32Store) \ | |||
4712 | V(I64AtomicStore, kI64Store) \ | |||
4713 | V(I32AtomicStore8U, kI32Store8) \ | |||
4714 | V(I32AtomicStore16U, kI32Store16) \ | |||
4715 | V(I64AtomicStore8U, kI64Store8) \ | |||
4716 | V(I64AtomicStore16U, kI64Store16) \ | |||
4717 | V(I64AtomicStore32U, kI64Store32) | |||
4718 | ||||
4719 | #define ATOMIC_LOAD_LIST(V) \ | |||
4720 | V(I32AtomicLoad, kI32Load) \ | |||
4721 | V(I64AtomicLoad, kI64Load) \ | |||
4722 | V(I32AtomicLoad8U, kI32Load8U) \ | |||
4723 | V(I32AtomicLoad16U, kI32Load16U) \ | |||
4724 | V(I64AtomicLoad8U, kI64Load8U) \ | |||
4725 | V(I64AtomicLoad16U, kI64Load16U) \ | |||
4726 | V(I64AtomicLoad32U, kI64Load32U) | |||
4727 | ||||
4728 | #define ATOMIC_BINOP_INSTRUCTION_LIST(V) \ | |||
4729 | V(Add, I32AtomicAdd, kI32Store) \ | |||
4730 | V(Add, I64AtomicAdd, kI64Store) \ | |||
4731 | V(Add, I32AtomicAdd8U, kI32Store8) \ | |||
4732 | V(Add, I32AtomicAdd16U, kI32Store16) \ | |||
4733 | V(Add, I64AtomicAdd8U, kI64Store8) \ | |||
4734 | V(Add, I64AtomicAdd16U, kI64Store16) \ | |||
4735 | V(Add, I64AtomicAdd32U, kI64Store32) \ | |||
4736 | V(Sub, I32AtomicSub, kI32Store) \ | |||
4737 | V(Sub, I64AtomicSub, kI64Store) \ | |||
4738 | V(Sub, I32AtomicSub8U, kI32Store8) \ | |||
4739 | V(Sub, I32AtomicSub16U, kI32Store16) \ | |||
4740 | V(Sub, I64AtomicSub8U, kI64Store8) \ | |||
4741 | V(Sub, I64AtomicSub16U, kI64Store16) \ | |||
4742 | V(Sub, I64AtomicSub32U, kI64Store32) \ | |||
4743 | V(And, I32AtomicAnd, kI32Store) \ | |||
4744 | V(And, I64AtomicAnd, kI64Store) \ | |||
4745 | V(And, I32AtomicAnd8U, kI32Store8) \ | |||
4746 | V(And, I32AtomicAnd16U, kI32Store16) \ | |||
4747 | V(And, I64AtomicAnd8U, kI64Store8) \ | |||
4748 | V(And, I64AtomicAnd16U, kI64Store16) \ | |||
4749 | V(And, I64AtomicAnd32U, kI64Store32) \ | |||
4750 | V(Or, I32AtomicOr, kI32Store) \ | |||
4751 | V(Or, I64AtomicOr, kI64Store) \ | |||
4752 | V(Or, I32AtomicOr8U, kI32Store8) \ | |||
4753 | V(Or, I32AtomicOr16U, kI32Store16) \ | |||
4754 | V(Or, I64AtomicOr8U, kI64Store8) \ | |||
4755 | V(Or, I64AtomicOr16U, kI64Store16) \ | |||
4756 | V(Or, I64AtomicOr32U, kI64Store32) \ | |||
4757 | V(Xor, I32AtomicXor, kI32Store) \ | |||
4758 | V(Xor, I64AtomicXor, kI64Store) \ | |||
4759 | V(Xor, I32AtomicXor8U, kI32Store8) \ | |||
4760 | V(Xor, I32AtomicXor16U, kI32Store16) \ | |||
4761 | V(Xor, I64AtomicXor8U, kI64Store8) \ | |||
4762 | V(Xor, I64AtomicXor16U, kI64Store16) \ | |||
4763 | V(Xor, I64AtomicXor32U, kI64Store32) \ | |||
4764 | V(Exchange, I32AtomicExchange, kI32Store) \ | |||
4765 | V(Exchange, I64AtomicExchange, kI64Store) \ | |||
4766 | V(Exchange, I32AtomicExchange8U, kI32Store8) \ | |||
4767 | V(Exchange, I32AtomicExchange16U, kI32Store16) \ | |||
4768 | V(Exchange, I64AtomicExchange8U, kI64Store8) \ | |||
4769 | V(Exchange, I64AtomicExchange16U, kI64Store16) \ | |||
4770 | V(Exchange, I64AtomicExchange32U, kI64Store32) | |||
4771 | ||||
4772 | #define ATOMIC_COMPARE_EXCHANGE_LIST(V) \ | |||
4773 | V(I32AtomicCompareExchange, kI32Store) \ | |||
4774 | V(I64AtomicCompareExchange, kI64Store) \ | |||
4775 | V(I32AtomicCompareExchange8U, kI32Store8) \ | |||
4776 | V(I32AtomicCompareExchange16U, kI32Store16) \ | |||
4777 | V(I64AtomicCompareExchange8U, kI64Store8) \ | |||
4778 | V(I64AtomicCompareExchange16U, kI64Store16) \ | |||
4779 | V(I64AtomicCompareExchange32U, kI64Store32) | |||
4780 | ||||
4781 | void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, | |||
4782 | base::Vector<Value> args, | |||
4783 | const MemoryAccessImmediate<validate>& imm, Value* result) { | |||
4784 | switch (opcode) { | |||
4785 | #define ATOMIC_STORE_OP(name, type) \ | |||
4786 | case wasm::kExpr##name: \ | |||
4787 | AtomicStoreMem(decoder, StoreType::type, imm); \ | |||
4788 | break; | |||
4789 | ||||
4790 | ATOMIC_STORE_LIST(ATOMIC_STORE_OP) | |||
4791 | #undef ATOMIC_STORE_OP | |||
4792 | ||||
4793 | #define ATOMIC_LOAD_OP(name, type) \ | |||
4794 | case wasm::kExpr##name: \ | |||
4795 | AtomicLoadMem(decoder, LoadType::type, imm); \ | |||
4796 | break; | |||
4797 | ||||
4798 | ATOMIC_LOAD_LIST(ATOMIC_LOAD_OP) | |||
4799 | #undef ATOMIC_LOAD_OP | |||
4800 | ||||
4801 | #define ATOMIC_BINOP_OP(op, name, type) \ | |||
4802 | case wasm::kExpr##name: \ | |||
4803 | AtomicBinop(decoder, StoreType::type, imm, &LiftoffAssembler::Atomic##op); \ | |||
4804 | break; | |||
4805 | ||||
4806 | ATOMIC_BINOP_INSTRUCTION_LIST(ATOMIC_BINOP_OP) | |||
4807 | #undef ATOMIC_BINOP_OP | |||
4808 | ||||
4809 | #define ATOMIC_COMPARE_EXCHANGE_OP(name, type) \ | |||
4810 | case wasm::kExpr##name: \ | |||
4811 | AtomicCompareExchange(decoder, StoreType::type, imm); \ | |||
4812 | break; | |||
4813 | ||||
4814 | ATOMIC_COMPARE_EXCHANGE_LIST(ATOMIC_COMPARE_EXCHANGE_OP) | |||
4815 | #undef ATOMIC_COMPARE_EXCHANGE_OP | |||
4816 | ||||
4817 | case kExprI32AtomicWait: | |||
4818 | AtomicWait(decoder, kI32, imm); | |||
4819 | break; | |||
4820 | case kExprI64AtomicWait: | |||
4821 | AtomicWait(decoder, kI64, imm); | |||
4822 | break; | |||
4823 | case kExprAtomicNotify: | |||
4824 | AtomicNotify(decoder, imm); | |||
4825 | break; | |||
4826 | default: | |||
4827 | unsupported(decoder, kAtomics, "atomicop"); | |||
4828 | } | |||
4829 | } | |||
4830 | ||||
4831 | #undef ATOMIC_STORE_LIST | |||
4832 | #undef ATOMIC_LOAD_LIST | |||
4833 | #undef ATOMIC_BINOP_INSTRUCTION_LIST | |||
4834 | #undef ATOMIC_COMPARE_EXCHANGE_LIST | |||
4835 | ||||
4836 | void AtomicFence(FullDecoder* decoder) { __ AtomicFence(); } | |||
4837 | ||||
4838 | // Pop a memtype (i32 or i64 depending on {WasmModule::is_memory64}) to a | |||
4839 | // register, updating {*high_word} to contain the ORed combination of all | |||
4840 | // popped high words. Returns the ptrsized register holding the popped value. | |||
4841 | LiftoffRegister PopMemTypeToRegister(FullDecoder* decoder, | |||
4842 | Register* high_word, | |||
4843 | LiftoffRegList* pinned) { | |||
4844 | LiftoffRegister reg = __ PopToRegister(*pinned); | |||
4845 | LiftoffRegister intptr_reg = reg; | |||
4846 | // For memory32 on 64-bit hosts, zero-extend. | |||
4847 | if (kSystemPointerSize == kInt64Size && !env_->module->is_memory64) { | |||
4848 | // Only overwrite {reg} if it's not used otherwise. | |||
4849 | if (pinned->has(reg) || __ cache_state()->is_used(reg)) { | |||
4850 | intptr_reg = __ GetUnusedRegister(kGpReg, *pinned); | |||
4851 | } | |||
4852 | __ emit_u32_to_uintptr(intptr_reg.gp(), reg.gp()); | |||
4853 | } | |||
4854 | // For memory32 or memory64 on 64-bit, we are done here. | |||
4855 | if (kSystemPointerSize == kInt64Size || !env_->module->is_memory64) { | |||
4856 | pinned->set(intptr_reg); | |||
4857 | return intptr_reg; | |||
4858 | } | |||
4859 | ||||
4860 | // For memory64 on 32-bit systems, combine all high words for a zero-check | |||
4861 | // and only use the low words afterwards. This keeps the register pressure | |||
4862 | // managable. | |||
4863 | DCHECK_GE(kMaxUInt32, env_->max_memory_size)((void) 0); | |||
4864 | pinned->set(reg.low()); | |||
4865 | if (*high_word == no_reg) { | |||
4866 | // Choose a register to hold the (combination of) high word(s). It cannot | |||
4867 | // be one of the pinned registers, and it cannot be used in the value | |||
4868 | // stack. | |||
4869 | *high_word = | |||
4870 | pinned->has(reg.high()) | |||
4871 | ? __ GetUnusedRegister(kGpReg, *pinned).gp() | |||
4872 | : __ GetUnusedRegister(kGpReg, {reg.high()}, *pinned).gp(); | |||
4873 | pinned->set(*high_word); | |||
4874 | if (*high_word != reg.high_gp()) { | |||
4875 | __ Move(*high_word, reg.high_gp(), kI32); | |||
4876 | } | |||
4877 | } else if (*high_word != reg.high_gp()) { | |||
4878 | // Combine the new high word into existing high words. | |||
4879 | __ emit_i32_or(*high_word, *high_word, reg.high_gp()); | |||
4880 | } | |||
4881 | return reg.low(); | |||
4882 | } | |||
4883 | ||||
4884 | void MemoryInit(FullDecoder* decoder, | |||
4885 | const MemoryInitImmediate<validate>& imm, const Value&, | |||
4886 | const Value&, const Value&) { | |||
4887 | Register mem_offsets_high_word = no_reg; | |||
4888 | LiftoffRegList pinned; | |||
4889 | LiftoffRegister size = pinned.set(__ PopToRegister()); | |||
4890 | LiftoffRegister src = pinned.set(__ PopToRegister(pinned)); | |||
4891 | LiftoffRegister dst = | |||
4892 | PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned); | |||
4893 | ||||
4894 | Register instance = __ cache_state()->cached_instance; | |||
4895 | if (instance == no_reg) { | |||
4896 | instance = __ GetUnusedRegister(kGpReg, pinned).gp(); | |||
4897 | __ LoadInstanceFromFrame(instance); | |||
4898 | } | |||
4899 | pinned.set(instance); | |||
4900 | ||||
4901 | // Only allocate the OOB code now, so the state of the stack is reflected | |||
4902 | // correctly. | |||
4903 | Label* trap_label = | |||
4904 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds); | |||
4905 | if (mem_offsets_high_word != no_reg) { | |||
4906 | // If any high word has bits set, jump to the OOB trap. | |||
4907 | __ emit_cond_jump(kNotEqualZero, trap_label, kI32, mem_offsets_high_word); | |||
4908 | pinned.clear(mem_offsets_high_word); | |||
4909 | } | |||
4910 | ||||
4911 | LiftoffRegister segment_index = | |||
4912 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4913 | __ LoadConstant(segment_index, WasmValue(imm.data_segment.index)); | |||
4914 | ||||
4915 | auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind, kI32, | |||
4916 | kI32, kI32); | |||
4917 | LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, | |||
4918 | segment_index, size}; | |||
4919 | // We don't need the instance anymore after the call. We can use the | |||
4920 | // register for the result. | |||
4921 | LiftoffRegister result(instance); | |||
4922 | GenerateCCall(&result, &sig, kVoid, args, | |||
4923 | ExternalReference::wasm_memory_init()); | |||
4924 | __ emit_cond_jump(kEqual, trap_label, kI32, result.gp()); | |||
4925 | } | |||
4926 | ||||
4927 | void DataDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) { | |||
4928 | LiftoffRegList pinned; | |||
4929 | ||||
4930 | Register seg_size_array = | |||
4931 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
4932 | LOAD_INSTANCE_FIELD(seg_size_array, DataSegmentSizes, kSystemPointerSize, | |||
4933 | pinned); | |||
4934 | ||||
4935 | LiftoffRegister seg_index = | |||
4936 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4937 | // Scale the seg_index for the array access. | |||
4938 | __ LoadConstant(seg_index, | |||
4939 | WasmValue(imm.index << value_kind_size_log2(kI32))); | |||
4940 | ||||
4941 | // Set the length of the segment to '0' to drop it. | |||
4942 | LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
4943 | __ LoadConstant(null_reg, WasmValue(0)); | |||
4944 | __ Store(seg_size_array, seg_index.gp(), 0, null_reg, StoreType::kI32Store, | |||
4945 | pinned); | |||
4946 | } | |||
4947 | ||||
4948 | void MemoryCopy(FullDecoder* decoder, | |||
4949 | const MemoryCopyImmediate<validate>& imm, const Value&, | |||
4950 | const Value&, const Value&) { | |||
4951 | Register mem_offsets_high_word = no_reg; | |||
4952 | LiftoffRegList pinned; | |||
4953 | LiftoffRegister size = pinned.set( | |||
4954 | PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned)); | |||
4955 | LiftoffRegister src = pinned.set( | |||
4956 | PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned)); | |||
4957 | LiftoffRegister dst = pinned.set( | |||
4958 | PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned)); | |||
4959 | ||||
4960 | Register instance = __ cache_state()->cached_instance; | |||
4961 | if (instance == no_reg) { | |||
4962 | instance = __ GetUnusedRegister(kGpReg, pinned).gp(); | |||
4963 | __ LoadInstanceFromFrame(instance); | |||
4964 | } | |||
4965 | ||||
4966 | // Only allocate the OOB code now, so the state of the stack is reflected | |||
4967 | // correctly. | |||
4968 | Label* trap_label = | |||
4969 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds); | |||
4970 | if (mem_offsets_high_word != no_reg) { | |||
4971 | // If any high word has bits set, jump to the OOB trap. | |||
4972 | __ emit_cond_jump(kNotEqualZero, trap_label, kI32, mem_offsets_high_word); | |||
4973 | } | |||
4974 | ||||
4975 | auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind, | |||
4976 | kPointerKind, kPointerKind); | |||
4977 | LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size}; | |||
4978 | // We don't need the instance anymore after the call. We can use the | |||
4979 | // register for the result. | |||
4980 | LiftoffRegister result(instance); | |||
4981 | GenerateCCall(&result, &sig, kVoid, args, | |||
4982 | ExternalReference::wasm_memory_copy()); | |||
4983 | __ emit_cond_jump(kEqual, trap_label, kI32, result.gp()); | |||
4984 | } | |||
4985 | ||||
4986 | void MemoryFill(FullDecoder* decoder, | |||
4987 | const MemoryIndexImmediate<validate>& imm, const Value&, | |||
4988 | const Value&, const Value&) { | |||
4989 | Register mem_offsets_high_word = no_reg; | |||
4990 | LiftoffRegList pinned; | |||
4991 | LiftoffRegister size = pinned.set( | |||
4992 | PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned)); | |||
4993 | LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); | |||
4994 | LiftoffRegister dst = pinned.set( | |||
4995 | PopMemTypeToRegister(decoder, &mem_offsets_high_word, &pinned)); | |||
4996 | ||||
4997 | Register instance = __ cache_state()->cached_instance; | |||
4998 | if (instance == no_reg) { | |||
4999 | instance = __ GetUnusedRegister(kGpReg, pinned).gp(); | |||
5000 | __ LoadInstanceFromFrame(instance); | |||
5001 | } | |||
5002 | ||||
5003 | // Only allocate the OOB code now, so the state of the stack is reflected | |||
5004 | // correctly. | |||
5005 | Label* trap_label = | |||
5006 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds); | |||
5007 | if (mem_offsets_high_word != no_reg) { | |||
5008 | // If any high word has bits set, jump to the OOB trap. | |||
5009 | __ emit_cond_jump(kNotEqualZero, trap_label, kI32, mem_offsets_high_word); | |||
5010 | } | |||
5011 | ||||
5012 | auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind, kI32, | |||
5013 | kPointerKind); | |||
5014 | LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size}; | |||
5015 | // We don't need the instance anymore after the call. We can use the | |||
5016 | // register for the result. | |||
5017 | LiftoffRegister result(instance); | |||
5018 | GenerateCCall(&result, &sig, kVoid, args, | |||
5019 | ExternalReference::wasm_memory_fill()); | |||
5020 | __ emit_cond_jump(kEqual, trap_label, kI32, result.gp()); | |||
5021 | } | |||
5022 | ||||
5023 | void LoadSmi(LiftoffRegister reg, int value) { | |||
5024 | Address smi_value = Smi::FromInt(value).ptr(); | |||
5025 | using smi_type = std::conditional_t<kSmiKind == kI32, int32_t, int64_t>; | |||
5026 | __ LoadConstant(reg, WasmValue{static_cast<smi_type>(smi_value)}); | |||
5027 | } | |||
5028 | ||||
5029 | void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm, | |||
5030 | base::Vector<Value> args) { | |||
5031 | LiftoffRegList pinned; | |||
5032 | LiftoffRegister table_index_reg = | |||
5033 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5034 | ||||
5035 | LoadSmi(table_index_reg, imm.table.index); | |||
5036 | LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0); | |||
5037 | ||||
5038 | LiftoffRegister segment_index_reg = | |||
5039 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5040 | LoadSmi(segment_index_reg, imm.element_segment.index); | |||
5041 | LiftoffAssembler::VarState segment_index(kPointerKind, segment_index_reg, | |||
5042 | 0); | |||
5043 | ||||
5044 | LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1]; | |||
5045 | LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2]; | |||
5046 | LiftoffAssembler::VarState dst = __ cache_state()->stack_state.end()[-3]; | |||
5047 | ||||
5048 | CallRuntimeStub(WasmCode::kWasmTableInit, | |||
5049 | MakeSig::Params(kI32, kI32, kI32, kSmiKind, kSmiKind), | |||
5050 | {dst, src, size, table_index, segment_index}, | |||
5051 | decoder->position()); | |||
5052 | ||||
5053 | // Pop parameters from the value stack. | |||
5054 | __ cache_state()->stack_state.pop_back(3); | |||
5055 | ||||
5056 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
5057 | } | |||
5058 | ||||
5059 | void ElemDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) { | |||
5060 | LiftoffRegList pinned; | |||
5061 | Register dropped_elem_segments = | |||
5062 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
5063 | LOAD_INSTANCE_FIELD(dropped_elem_segments, DroppedElemSegments, | |||
5064 | kSystemPointerSize, pinned); | |||
5065 | ||||
5066 | LiftoffRegister seg_index = | |||
5067 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5068 | __ LoadConstant(seg_index, WasmValue(imm.index)); | |||
5069 | ||||
5070 | // Mark the segment as dropped by setting its value in the dropped | |||
5071 | // segments list to 1. | |||
5072 | LiftoffRegister one_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5073 | __ LoadConstant(one_reg, WasmValue(1)); | |||
5074 | __ Store(dropped_elem_segments, seg_index.gp(), 0, one_reg, | |||
5075 | StoreType::kI32Store8, pinned); | |||
5076 | } | |||
5077 | ||||
5078 | void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm, | |||
5079 | base::Vector<Value> args) { | |||
5080 | LiftoffRegList pinned; | |||
5081 | ||||
5082 | LiftoffRegister table_dst_index_reg = | |||
5083 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5084 | LoadSmi(table_dst_index_reg, imm.table_dst.index); | |||
5085 | LiftoffAssembler::VarState table_dst_index(kPointerKind, | |||
5086 | table_dst_index_reg, 0); | |||
5087 | ||||
5088 | LiftoffRegister table_src_index_reg = | |||
5089 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5090 | LoadSmi(table_src_index_reg, imm.table_src.index); | |||
5091 | LiftoffAssembler::VarState table_src_index(kPointerKind, | |||
5092 | table_src_index_reg, 0); | |||
5093 | ||||
5094 | LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1]; | |||
5095 | LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2]; | |||
5096 | LiftoffAssembler::VarState dst = __ cache_state()->stack_state.end()[-3]; | |||
5097 | ||||
5098 | CallRuntimeStub(WasmCode::kWasmTableCopy, | |||
5099 | MakeSig::Params(kI32, kI32, kI32, kSmiKind, kSmiKind), | |||
5100 | {dst, src, size, table_dst_index, table_src_index}, | |||
5101 | decoder->position()); | |||
5102 | ||||
5103 | // Pop parameters from the value stack. | |||
5104 | __ cache_state()->stack_state.pop_back(3); | |||
5105 | ||||
5106 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
5107 | } | |||
5108 | ||||
5109 | void TableGrow(FullDecoder* decoder, const IndexImmediate<validate>& imm, | |||
5110 | const Value&, const Value&, Value* result) { | |||
5111 | LiftoffRegList pinned; | |||
5112 | ||||
5113 | LiftoffRegister table_index_reg = | |||
5114 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5115 | LoadSmi(table_index_reg, imm.index); | |||
5116 | LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0); | |||
5117 | ||||
5118 | LiftoffAssembler::VarState delta = __ cache_state()->stack_state.end()[-1]; | |||
5119 | LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-2]; | |||
5120 | ||||
5121 | CallRuntimeStub( | |||
5122 | WasmCode::kWasmTableGrow, | |||
5123 | MakeSig::Returns(kSmiKind).Params(kSmiKind, kI32, kTaggedKind), | |||
5124 | {table_index, delta, value}, decoder->position()); | |||
5125 | ||||
5126 | // Pop parameters from the value stack. | |||
5127 | __ cache_state()->stack_state.pop_back(2); | |||
5128 | ||||
5129 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
5130 | __ SmiToInt32(kReturnRegister0); | |||
5131 | __ PushRegister(kI32, LiftoffRegister(kReturnRegister0)); | |||
5132 | } | |||
5133 | ||||
5134 | void TableSize(FullDecoder* decoder, const IndexImmediate<validate>& imm, | |||
5135 | Value*) { | |||
5136 | // We have to look up instance->tables[table_index].length. | |||
5137 | ||||
5138 | LiftoffRegList pinned; | |||
5139 | // Get the number of calls array address. | |||
5140 | Register tables = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
5141 | LOAD_TAGGED_PTR_INSTANCE_FIELD(tables, Tables, pinned); | |||
5142 | ||||
5143 | Register table = tables; | |||
5144 | __ LoadTaggedPointer( | |||
5145 | table, tables, no_reg, | |||
5146 | ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned); | |||
5147 | ||||
5148 | int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd - | |||
5149 | WasmTableObject::kCurrentLengthOffset + 1; | |||
5150 | ||||
5151 | Register result = table; | |||
5152 | __ Load(LiftoffRegister(result), table, no_reg, | |||
5153 | wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset), | |||
5154 | length_field_size == 4 ? LoadType::kI32Load : LoadType::kI64Load, | |||
5155 | pinned); | |||
5156 | ||||
5157 | __ SmiUntag(result); | |||
5158 | __ PushRegister(kI32, LiftoffRegister(result)); | |||
5159 | } | |||
5160 | ||||
5161 | void TableFill(FullDecoder* decoder, const IndexImmediate<validate>& imm, | |||
5162 | const Value&, const Value&, const Value&) { | |||
5163 | LiftoffRegList pinned; | |||
5164 | ||||
5165 | LiftoffRegister table_index_reg = | |||
5166 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5167 | LoadSmi(table_index_reg, imm.index); | |||
5168 | LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0); | |||
5169 | ||||
5170 | LiftoffAssembler::VarState count = __ cache_state()->stack_state.end()[-1]; | |||
5171 | LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-2]; | |||
5172 | LiftoffAssembler::VarState start = __ cache_state()->stack_state.end()[-3]; | |||
5173 | ||||
5174 | CallRuntimeStub(WasmCode::kWasmTableFill, | |||
5175 | MakeSig::Params(kSmiKind, kI32, kI32, kTaggedKind), | |||
5176 | {table_index, start, count, value}, decoder->position()); | |||
5177 | ||||
5178 | // Pop parameters from the value stack. | |||
5179 | __ cache_state()->stack_state.pop_back(3); | |||
5180 | ||||
5181 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
5182 | } | |||
5183 | ||||
5184 | void StructNew(FullDecoder* decoder, | |||
5185 | const StructIndexImmediate<validate>& imm, const Value& rtt, | |||
5186 | bool initial_values_on_stack) { | |||
5187 | LiftoffAssembler::VarState rtt_value = | |||
5188 | __ cache_state()->stack_state.end()[-1]; | |||
5189 | CallRuntimeStub(WasmCode::kWasmAllocateStructWithRtt, | |||
5190 | MakeSig::Returns(kRef).Params(rtt.type.kind()), {rtt_value}, | |||
5191 | decoder->position()); | |||
5192 | // Drop the RTT. | |||
5193 | __ cache_state()->stack_state.pop_back(1); | |||
5194 | ||||
5195 | LiftoffRegister obj(kReturnRegister0); | |||
5196 | LiftoffRegList pinned = {obj}; | |||
5197 | for (uint32_t i = imm.struct_type->field_count(); i > 0;) { | |||
5198 | i--; | |||
5199 | int offset = StructFieldOffset(imm.struct_type, i); | |||
5200 | ValueKind field_kind = imm.struct_type->field(i).kind(); | |||
5201 | LiftoffRegister value = initial_values_on_stack | |||
5202 | ? pinned.set(__ PopToRegister(pinned)) | |||
5203 | : pinned.set(__ GetUnusedRegister( | |||
5204 | reg_class_for(field_kind), pinned)); | |||
5205 | if (!initial_values_on_stack) { | |||
5206 | if (!CheckSupportedType(decoder, field_kind, "default value")) return; | |||
5207 | SetDefaultValue(value, field_kind, pinned); | |||
5208 | } | |||
5209 | StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind); | |||
5210 | pinned.clear(value); | |||
5211 | } | |||
5212 | // If this assert fails then initialization of padding field might be | |||
5213 | // necessary. | |||
5214 | static_assert(Heap::kMinObjectSizeInTaggedWords == 2 && | |||
5215 | WasmStruct::kHeaderSize == 2 * kTaggedSize, | |||
5216 | "empty struct might require initialization of padding field"); | |||
5217 | __ PushRegister(kRef, obj); | |||
5218 | } | |||
5219 | ||||
5220 | void StructNewWithRtt(FullDecoder* decoder, | |||
5221 | const StructIndexImmediate<validate>& imm, | |||
5222 | const Value& rtt, const Value args[], Value* result) { | |||
5223 | StructNew(decoder, imm, rtt, true); | |||
5224 | } | |||
5225 | ||||
5226 | void StructNewDefault(FullDecoder* decoder, | |||
5227 | const StructIndexImmediate<validate>& imm, | |||
5228 | const Value& rtt, Value* result) { | |||
5229 | StructNew(decoder, imm, rtt, false); | |||
5230 | } | |||
5231 | ||||
5232 | void StructGet(FullDecoder* decoder, const Value& struct_obj, | |||
5233 | const FieldImmediate<validate>& field, bool is_signed, | |||
5234 | Value* result) { | |||
5235 | const StructType* struct_type = field.struct_imm.struct_type; | |||
5236 | ValueKind field_kind = struct_type->field(field.field_imm.index).kind(); | |||
5237 | if (!CheckSupportedType(decoder, field_kind, "field load")) return; | |||
5238 | int offset = StructFieldOffset(struct_type, field.field_imm.index); | |||
5239 | LiftoffRegList pinned; | |||
5240 | LiftoffRegister obj = pinned.set(__ PopToRegister(pinned)); | |||
5241 | MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type); | |||
5242 | LiftoffRegister value = | |||
5243 | __ GetUnusedRegister(reg_class_for(field_kind), pinned); | |||
5244 | LoadObjectField(value, obj.gp(), no_reg, offset, field_kind, is_signed, | |||
5245 | pinned); | |||
5246 | __ PushRegister(unpacked(field_kind), value); | |||
5247 | } | |||
5248 | ||||
5249 | void StructSet(FullDecoder* decoder, const Value& struct_obj, | |||
5250 | const FieldImmediate<validate>& field, | |||
5251 | const Value& field_value) { | |||
5252 | const StructType* struct_type = field.struct_imm.struct_type; | |||
5253 | ValueKind field_kind = struct_type->field(field.field_imm.index).kind(); | |||
5254 | int offset = StructFieldOffset(struct_type, field.field_imm.index); | |||
5255 | LiftoffRegList pinned; | |||
5256 | LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); | |||
5257 | LiftoffRegister obj = pinned.set(__ PopToRegister(pinned)); | |||
5258 | MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type); | |||
5259 | StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind); | |||
5260 | } | |||
5261 | ||||
5262 | void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm, | |||
5263 | ValueKind rtt_kind, bool initial_value_on_stack) { | |||
5264 | // Max length check. | |||
5265 | { | |||
5266 | LiftoffRegister length = | |||
5267 | __ LoadToRegister(__ cache_state()->stack_state.end()[-2], {}); | |||
5268 | Label* trap_label = | |||
5269 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge); | |||
5270 | __ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(), | |||
5271 | WasmArray::MaxLength(imm.array_type)); | |||
5272 | } | |||
5273 | ValueKind elem_kind = imm.array_type->element_type().kind(); | |||
5274 | int elem_size = value_kind_size(elem_kind); | |||
5275 | // Allocate the array. | |||
5276 | { | |||
5277 | LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {}); | |||
5278 | LiftoffAssembler::VarState rtt_var = | |||
5279 | __ cache_state()->stack_state.end()[-1]; | |||
5280 | LiftoffAssembler::VarState length_var = | |||
5281 | __ cache_state()->stack_state.end()[-2]; | |||
5282 | __ LoadConstant(elem_size_reg, WasmValue(elem_size)); | |||
5283 | LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0); | |||
5284 | ||||
5285 | WasmCode::RuntimeStubId stub_id = | |||
5286 | initial_value_on_stack | |||
5287 | ? WasmCode::kWasmAllocateArray_Uninitialized | |||
5288 | : is_reference(elem_kind) ? WasmCode::kWasmAllocateArray_InitNull | |||
5289 | : WasmCode::kWasmAllocateArray_InitZero; | |||
5290 | CallRuntimeStub( | |||
5291 | stub_id, MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32), | |||
5292 | {rtt_var, length_var, elem_size_var}, decoder->position()); | |||
5293 | // Drop the RTT. | |||
5294 | __ cache_state()->stack_state.pop_back(1); | |||
5295 | } | |||
5296 | ||||
5297 | LiftoffRegister obj(kReturnRegister0); | |||
5298 | if (initial_value_on_stack) { | |||
5299 | LiftoffRegList pinned = {obj}; | |||
5300 | LiftoffRegister length = pinned.set(__ PopToModifiableRegister(pinned)); | |||
5301 | LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); | |||
5302 | ||||
5303 | // Initialize the array's elements. | |||
5304 | LiftoffRegister offset = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5305 | __ LoadConstant( | |||
5306 | offset, | |||
5307 | WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize))); | |||
5308 | LiftoffRegister end_offset = length; | |||
5309 | if (value_kind_size_log2(elem_kind) != 0) { | |||
5310 | __ emit_i32_shli(end_offset.gp(), length.gp(), | |||
5311 | value_kind_size_log2(elem_kind)); | |||
5312 | } | |||
5313 | __ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp()); | |||
5314 | Label loop, done; | |||
5315 | __ bind(&loop); | |||
5316 | __ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(), | |||
5317 | end_offset.gp()); | |||
5318 | StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind); | |||
5319 | __ emit_i32_addi(offset.gp(), offset.gp(), elem_size); | |||
5320 | __ emit_jump(&loop); | |||
5321 | ||||
5322 | __ bind(&done); | |||
5323 | } else { | |||
5324 | if (!CheckSupportedType(decoder, elem_kind, "default value")) return; | |||
5325 | // Drop the length. | |||
5326 | __ cache_state()->stack_state.pop_back(1); | |||
5327 | } | |||
5328 | __ PushRegister(kRef, obj); | |||
5329 | } | |||
5330 | ||||
5331 | void ArrayNewWithRtt(FullDecoder* decoder, | |||
5332 | const ArrayIndexImmediate<validate>& imm, | |||
5333 | const Value& length_value, const Value& initial_value, | |||
5334 | const Value& rtt, Value* result) { | |||
5335 | ArrayNew(decoder, imm, rtt.type.kind(), true); | |||
5336 | } | |||
5337 | ||||
5338 | void ArrayNewDefault(FullDecoder* decoder, | |||
5339 | const ArrayIndexImmediate<validate>& imm, | |||
5340 | const Value& length, const Value& rtt, Value* result) { | |||
5341 | ArrayNew(decoder, imm, rtt.type.kind(), false); | |||
5342 | } | |||
5343 | ||||
5344 | void ArrayGet(FullDecoder* decoder, const Value& array_obj, | |||
5345 | const ArrayIndexImmediate<validate>& imm, | |||
5346 | const Value& index_val, bool is_signed, Value* result) { | |||
5347 | LiftoffRegList pinned; | |||
5348 | LiftoffRegister index = pinned.set(__ PopToModifiableRegister(pinned)); | |||
5349 | LiftoffRegister array = pinned.set(__ PopToRegister(pinned)); | |||
5350 | MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type); | |||
5351 | BoundsCheckArray(decoder, array, index, pinned); | |||
5352 | ValueKind elem_kind = imm.array_type->element_type().kind(); | |||
5353 | if (!CheckSupportedType(decoder, elem_kind, "array load")) return; | |||
5354 | int elem_size_shift = value_kind_size_log2(elem_kind); | |||
5355 | if (elem_size_shift != 0) { | |||
5356 | __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift); | |||
5357 | } | |||
5358 | LiftoffRegister value = | |||
5359 | __ GetUnusedRegister(reg_class_for(elem_kind), pinned); | |||
5360 | LoadObjectField(value, array.gp(), index.gp(), | |||
5361 | wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize), | |||
5362 | elem_kind, is_signed, pinned); | |||
5363 | __ PushRegister(unpacked(elem_kind), value); | |||
5364 | } | |||
5365 | ||||
5366 | void ArraySet(FullDecoder* decoder, const Value& array_obj, | |||
5367 | const ArrayIndexImmediate<validate>& imm, | |||
5368 | const Value& index_val, const Value& value_val) { | |||
5369 | LiftoffRegList pinned; | |||
5370 | LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); | |||
5371 | DCHECK_EQ(reg_class_for(imm.array_type->element_type().kind()),((void) 0) | |||
5372 | value.reg_class())((void) 0); | |||
5373 | LiftoffRegister index = pinned.set(__ PopToModifiableRegister(pinned)); | |||
5374 | LiftoffRegister array = pinned.set(__ PopToRegister(pinned)); | |||
5375 | MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type); | |||
5376 | BoundsCheckArray(decoder, array, index, pinned); | |||
5377 | ValueKind elem_kind = imm.array_type->element_type().kind(); | |||
5378 | int elem_size_shift = value_kind_size_log2(elem_kind); | |||
5379 | if (elem_size_shift != 0) { | |||
5380 | __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift); | |||
5381 | } | |||
5382 | StoreObjectField(array.gp(), index.gp(), | |||
5383 | wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize), | |||
5384 | value, pinned, elem_kind); | |||
5385 | } | |||
5386 | ||||
5387 | void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) { | |||
5388 | LiftoffRegList pinned; | |||
5389 | LiftoffRegister obj = pinned.set(__ PopToRegister(pinned)); | |||
5390 | MaybeEmitNullCheck(decoder, obj.gp(), pinned, array_obj.type); | |||
5391 | LiftoffRegister len = __ GetUnusedRegister(kGpReg, pinned); | |||
5392 | int kLengthOffset = wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset); | |||
5393 | LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kI32, false, pinned); | |||
5394 | __ PushRegister(kI32, len); | |||
5395 | } | |||
5396 | ||||
5397 | void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index, | |||
5398 | const Value& src, const Value& src_index, | |||
5399 | const Value& length) { | |||
5400 | // TODO(7748): Unify implementation with TF: Implement this with | |||
5401 | // GenerateCCall. Remove runtime function and builtin in wasm.tq. | |||
5402 | CallRuntimeStub(FLAG_experimental_wasm_skip_bounds_checks | |||
5403 | ? WasmCode::kWasmArrayCopy | |||
5404 | : WasmCode::kWasmArrayCopyWithChecks, | |||
5405 | MakeSig::Params(kI32, kI32, kI32, kOptRef, kOptRef), | |||
5406 | // Builtin parameter order: | |||
5407 | // [dst_index, src_index, length, dst, src]. | |||
5408 | {__ cache_state()->stack_state.end()[-4], | |||
5409 | __ cache_state()->stack_state.end()[-2], | |||
5410 | __ cache_state()->stack_state.end()[-1], | |||
5411 | __ cache_state()->stack_state.end()[-5], | |||
5412 | __ cache_state()->stack_state.end()[-3]}, | |||
5413 | decoder->position()); | |||
5414 | __ cache_state()->stack_state.pop_back(5); | |||
5415 | } | |||
5416 | ||||
5417 | void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm, | |||
5418 | const base::Vector<Value>& elements, const Value& rtt, | |||
5419 | Value* result) { | |||
5420 | ValueKind rtt_kind = rtt.type.kind(); | |||
5421 | ValueKind elem_kind = imm.array_type->element_type().kind(); | |||
5422 | // Allocate the array. | |||
5423 | { | |||
5424 | LiftoffRegList pinned; | |||
5425 | LiftoffRegister elem_size_reg = | |||
5426 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5427 | ||||
5428 | __ LoadConstant(elem_size_reg, WasmValue(value_kind_size(elem_kind))); | |||
5429 | LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0); | |||
5430 | ||||
5431 | LiftoffRegister length_reg = | |||
5432 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5433 | __ LoadConstant(length_reg, | |||
5434 | WasmValue(static_cast<int32_t>(elements.size()))); | |||
5435 | LiftoffAssembler::VarState length_var(kI32, length_reg, 0); | |||
5436 | ||||
5437 | LiftoffAssembler::VarState rtt_var = | |||
5438 | __ cache_state()->stack_state.end()[-1]; | |||
5439 | ||||
5440 | CallRuntimeStub(WasmCode::kWasmAllocateArray_Uninitialized, | |||
5441 | MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32), | |||
5442 | {rtt_var, length_var, elem_size_var}, | |||
5443 | decoder->position()); | |||
5444 | // Drop the RTT. | |||
5445 | __ DropValues(1); | |||
5446 | } | |||
5447 | ||||
5448 | // Initialize the array with stack arguments. | |||
5449 | LiftoffRegister array(kReturnRegister0); | |||
5450 | if (!CheckSupportedType(decoder, elem_kind, "array.init")) return; | |||
5451 | for (int i = static_cast<int>(elements.size()) - 1; i >= 0; i--) { | |||
5452 | LiftoffRegList pinned = {array}; | |||
5453 | LiftoffRegister element = pinned.set(__ PopToRegister(pinned)); | |||
5454 | LiftoffRegister offset_reg = | |||
5455 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5456 | __ LoadConstant(offset_reg, | |||
5457 | WasmValue(i << value_kind_size_log2(elem_kind))); | |||
5458 | StoreObjectField(array.gp(), offset_reg.gp(), | |||
5459 | wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize), | |||
5460 | element, pinned, elem_kind); | |||
5461 | } | |||
5462 | ||||
5463 | // Push the array onto the stack. | |||
5464 | __ PushRegister(kRef, array); | |||
5465 | } | |||
5466 | ||||
5467 | void ArrayInitFromData(FullDecoder* decoder, | |||
5468 | const ArrayIndexImmediate<validate>& array_imm, | |||
5469 | const IndexImmediate<validate>& data_segment, | |||
5470 | const Value& /* offset */, const Value& /* length */, | |||
5471 | const Value& /* rtt */, Value* /* result */) { | |||
5472 | LiftoffRegList pinned; | |||
5473 | LiftoffRegister data_segment_reg = | |||
5474 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5475 | __ LoadConstant(data_segment_reg, | |||
5476 | WasmValue(static_cast<int32_t>(data_segment.index))); | |||
5477 | LiftoffAssembler::VarState data_segment_var(kI32, data_segment_reg, 0); | |||
5478 | ||||
5479 | CallRuntimeStub(WasmCode::kWasmArrayInitFromData, | |||
5480 | MakeSig::Returns(kRef).Params(kI32, kI32, kI32, kRtt), | |||
5481 | { | |||
5482 | data_segment_var, | |||
5483 | __ cache_state()->stack_state.end()[-3], // offset | |||
5484 | __ cache_state()->stack_state.end()[-2], // length | |||
5485 | __ cache_state()->stack_state.end()[-1] // rtt | |||
5486 | }, | |||
5487 | decoder->position()); | |||
5488 | ||||
5489 | LiftoffRegister result(kReturnRegister0); | |||
5490 | // Reuse the data segment register for error handling. | |||
5491 | LiftoffRegister error_smi = data_segment_reg; | |||
5492 | LoadSmi(error_smi, kArrayInitFromDataArrayTooLargeErrorCode); | |||
5493 | Label* trap_label_array_too_large = | |||
5494 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge); | |||
5495 | __ emit_cond_jump(kEqual, trap_label_array_too_large, kRef, result.gp(), | |||
5496 | error_smi.gp()); | |||
5497 | LoadSmi(error_smi, kArrayInitFromDataSegmentOutOfBoundsErrorCode); | |||
5498 | Label* trap_label_segment_out_of_bounds = AddOutOfLineTrap( | |||
5499 | decoder, WasmCode::kThrowWasmTrapDataSegmentOutOfBounds); | |||
5500 | __ emit_cond_jump(kEqual, trap_label_segment_out_of_bounds, kRef, | |||
5501 | result.gp(), error_smi.gp()); | |||
5502 | ||||
5503 | __ PushRegister(kRef, result); | |||
5504 | } | |||
5505 | ||||
5506 | // 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation. | |||
5507 | constexpr static int kI31To32BitSmiShift = 33; | |||
5508 | ||||
5509 | void I31New(FullDecoder* decoder, const Value& input, Value* result) { | |||
5510 | LiftoffRegister src = __ PopToRegister(); | |||
5511 | LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {}); | |||
5512 | if (SmiValuesAre31Bits()) { | |||
5513 | STATIC_ASSERT(kSmiTag == 0)static_assert(kSmiTag == 0, "kSmiTag == 0"); | |||
5514 | __ emit_i32_shli(dst.gp(), src.gp(), kSmiTagSize); | |||
5515 | } else { | |||
5516 | DCHECK(SmiValuesAre32Bits())((void) 0); | |||
5517 | __ emit_i64_shli(dst, src, kI31To32BitSmiShift); | |||
5518 | } | |||
5519 | __ PushRegister(kRef, dst); | |||
5520 | } | |||
5521 | ||||
5522 | void I31GetS(FullDecoder* decoder, const Value& input, Value* result) { | |||
5523 | LiftoffRegister src = __ PopToRegister(); | |||
5524 | LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {}); | |||
5525 | if (SmiValuesAre31Bits()) { | |||
5526 | __ emit_i32_sari(dst.gp(), src.gp(), kSmiTagSize); | |||
5527 | } else { | |||
5528 | DCHECK(SmiValuesAre32Bits())((void) 0); | |||
5529 | __ emit_i64_sari(dst, src, kI31To32BitSmiShift); | |||
5530 | } | |||
5531 | __ PushRegister(kI32, dst); | |||
5532 | } | |||
5533 | ||||
5534 | void I31GetU(FullDecoder* decoder, const Value& input, Value* result) { | |||
5535 | LiftoffRegister src = __ PopToRegister(); | |||
5536 | LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {}); | |||
5537 | if (SmiValuesAre31Bits()) { | |||
5538 | __ emit_i32_shri(dst.gp(), src.gp(), kSmiTagSize); | |||
5539 | } else { | |||
5540 | DCHECK(SmiValuesAre32Bits())((void) 0); | |||
5541 | __ emit_i64_shri(dst, src, kI31To32BitSmiShift); | |||
5542 | } | |||
5543 | __ PushRegister(kI32, dst); | |||
5544 | } | |||
5545 | ||||
5546 | void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) { | |||
5547 | LiftoffRegister rtt = __ GetUnusedRegister(kGpReg, {}); | |||
5548 | LOAD_TAGGED_PTR_INSTANCE_FIELD(rtt.gp(), ManagedObjectMaps, {}); | |||
5549 | __ LoadTaggedPointer( | |||
5550 | rtt.gp(), rtt.gp(), no_reg, | |||
5551 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index), {}); | |||
5552 | __ PushRegister(kRtt, rtt); | |||
5553 | } | |||
5554 | ||||
5555 | enum NullSucceeds : bool { // -- | |||
5556 | kNullSucceeds = true, | |||
5557 | kNullFails = false | |||
5558 | }; | |||
5559 | ||||
5560 | // Falls through on match (=successful type check). | |||
5561 | // Returns the register containing the object. | |||
5562 | LiftoffRegister SubtypeCheck(FullDecoder* decoder, const Value& obj, | |||
5563 | const Value& rtt, Label* no_match, | |||
5564 | NullSucceeds null_succeeds, | |||
5565 | LiftoffRegList pinned = {}, | |||
5566 | Register opt_scratch = no_reg) { | |||
5567 | Label match; | |||
5568 | LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned)); | |||
5569 | LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned)); | |||
5570 | ||||
5571 | // Reserve all temporary registers up front, so that the cache state | |||
5572 | // tracking doesn't get confused by the following conditional jumps. | |||
5573 | LiftoffRegister tmp1 = | |||
5574 | opt_scratch != no_reg | |||
5575 | ? LiftoffRegister(opt_scratch) | |||
5576 | : pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5577 | LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
5578 | if (obj.type.is_nullable()) { | |||
5579 | LoadNullValue(tmp1.gp(), pinned); | |||
5580 | __ emit_cond_jump(kEqual, null_succeeds ? &match : no_match, | |||
5581 | obj.type.kind(), obj_reg.gp(), tmp1.gp()); | |||
5582 | } | |||
5583 | ||||
5584 | __ LoadMap(tmp1.gp(), obj_reg.gp()); | |||
5585 | // {tmp1} now holds the object's map. | |||
5586 | ||||
5587 | // Check for rtt equality, and if not, check if the rtt is a struct/array | |||
5588 | // rtt. | |||
5589 | __ emit_cond_jump(kEqual, &match, rtt.type.kind(), tmp1.gp(), rtt_reg.gp()); | |||
5590 | ||||
5591 | // Constant-time subtyping check: load exactly one candidate RTT from the | |||
5592 | // supertypes list. | |||
5593 | // Step 1: load the WasmTypeInfo into {tmp1}. | |||
5594 | constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged( | |||
5595 | Map::kConstructorOrBackPointerOrNativeContextOffset); | |||
5596 | __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kTypeInfoOffset, pinned); | |||
5597 | // Step 2: load the supertypes list into {tmp1}. | |||
5598 | constexpr int kSuperTypesOffset = | |||
5599 | wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset); | |||
5600 | __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset, | |||
5601 | pinned); | |||
5602 | // Step 3: check the list's length if needed. | |||
5603 | uint32_t rtt_depth = | |||
5604 | GetSubtypingDepth(decoder->module_, rtt.type.ref_index()); | |||
5605 | if (rtt_depth >= kMinimumSupertypeArraySize) { | |||
5606 | LiftoffRegister list_length = tmp2; | |||
5607 | __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned); | |||
5608 | __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(), | |||
5609 | rtt_depth); | |||
5610 | } | |||
5611 | // Step 4: load the candidate list slot into {tmp1}, and compare it. | |||
5612 | __ LoadTaggedPointer( | |||
5613 | tmp1.gp(), tmp1.gp(), no_reg, | |||
5614 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt_depth), pinned); | |||
5615 | __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(), | |||
5616 | rtt_reg.gp()); | |||
5617 | ||||
5618 | // Fall through to {match}. | |||
5619 | __ bind(&match); | |||
5620 | return obj_reg; | |||
5621 | } | |||
5622 | ||||
5623 | void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt, | |||
5624 | Value* /* result_val */) { | |||
5625 | Label return_false, done; | |||
5626 | LiftoffRegList pinned; | |||
5627 | LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {})); | |||
5628 | ||||
5629 | SubtypeCheck(decoder, obj, rtt, &return_false, kNullFails, pinned, | |||
5630 | result.gp()); | |||
5631 | ||||
5632 | __ LoadConstant(result, WasmValue(1)); | |||
5633 | // TODO(jkummerow): Emit near jumps on platforms where it's more efficient. | |||
5634 | __ emit_jump(&done); | |||
5635 | ||||
5636 | __ bind(&return_false); | |||
5637 | __ LoadConstant(result, WasmValue(0)); | |||
5638 | __ bind(&done); | |||
5639 | __ PushRegister(kI32, result); | |||
5640 | } | |||
5641 | ||||
5642 | void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt, | |||
5643 | Value* result) { | |||
5644 | if (FLAG_experimental_wasm_assume_ref_cast_succeeds) { | |||
5645 | // Just drop the rtt. | |||
5646 | __ DropValues(1); | |||
5647 | } else { | |||
5648 | Label* trap_label = | |||
5649 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast); | |||
5650 | LiftoffRegister obj_reg = | |||
5651 | SubtypeCheck(decoder, obj, rtt, trap_label, kNullSucceeds); | |||
5652 | __ PushRegister(obj.type.kind(), obj_reg); | |||
5653 | } | |||
5654 | } | |||
5655 | ||||
5656 | void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt, | |||
5657 | Value* /* result_on_branch */, uint32_t depth) { | |||
5658 | // Before branching, materialize all constants. This avoids repeatedly | |||
5659 | // materializing them for each conditional branch. | |||
5660 | if (depth != decoder->control_depth() - 1) { | |||
5661 | __ MaterializeMergedConstants( | |||
5662 | decoder->control_at(depth)->br_merge()->arity); | |||
5663 | } | |||
5664 | ||||
5665 | Label cont_false; | |||
5666 | LiftoffRegister obj_reg = | |||
5667 | SubtypeCheck(decoder, obj, rtt, &cont_false, kNullFails); | |||
5668 | ||||
5669 | __ PushRegister(rtt.type.is_bottom() ? kBottom : obj.type.kind(), obj_reg); | |||
5670 | BrOrRet(decoder, depth, 0); | |||
5671 | ||||
5672 | __ bind(&cont_false); | |||
5673 | // Drop the branch's value, restore original value. | |||
5674 | Drop(decoder); | |||
5675 | __ PushRegister(obj.type.kind(), obj_reg); | |||
5676 | } | |||
5677 | ||||
5678 | void BrOnCastFail(FullDecoder* decoder, const Value& obj, const Value& rtt, | |||
5679 | Value* /* result_on_fallthrough */, uint32_t depth) { | |||
5680 | // Before branching, materialize all constants. This avoids repeatedly | |||
5681 | // materializing them for each conditional branch. | |||
5682 | if (depth != decoder->control_depth() - 1) { | |||
5683 | __ MaterializeMergedConstants( | |||
5684 | decoder->control_at(depth)->br_merge()->arity); | |||
5685 | } | |||
5686 | ||||
5687 | Label cont_branch, fallthrough; | |||
5688 | LiftoffRegister obj_reg = | |||
5689 | SubtypeCheck(decoder, obj, rtt, &cont_branch, kNullFails); | |||
5690 | __ PushRegister(obj.type.kind(), obj_reg); | |||
5691 | __ emit_jump(&fallthrough); | |||
5692 | ||||
5693 | __ bind(&cont_branch); | |||
5694 | BrOrRet(decoder, depth, 0); | |||
5695 | ||||
5696 | __ bind(&fallthrough); | |||
5697 | } | |||
5698 | ||||
5699 | // Abstract type checkers. They all return the object register and fall | |||
5700 | // through to match. | |||
5701 | LiftoffRegister DataCheck(const Value& obj, Label* no_match, | |||
5702 | LiftoffRegList pinned, Register opt_scratch) { | |||
5703 | TypeCheckRegisters registers = | |||
5704 | TypeCheckPrelude(obj, no_match, pinned, opt_scratch); | |||
5705 | EmitDataRefCheck(registers.map_reg.gp(), no_match, registers.tmp_reg, | |||
5706 | pinned); | |||
5707 | return registers.obj_reg; | |||
5708 | } | |||
5709 | ||||
5710 | LiftoffRegister ArrayCheck(const Value& obj, Label* no_match, | |||
5711 | LiftoffRegList pinned, Register opt_scratch) { | |||
5712 | TypeCheckRegisters registers = | |||
5713 | TypeCheckPrelude(obj, no_match, pinned, opt_scratch); | |||
5714 | __ Load(registers.map_reg, registers.map_reg.gp(), no_reg, | |||
5715 | wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset), | |||
5716 | LoadType::kI32Load16U, pinned); | |||
5717 | __ emit_i32_cond_jumpi(kUnequal, no_match, registers.map_reg.gp(), | |||
5718 | WASM_ARRAY_TYPE); | |||
5719 | return registers.obj_reg; | |||
5720 | } | |||
5721 | ||||
5722 | LiftoffRegister FuncCheck(const Value& obj, Label* no_match, | |||
5723 | LiftoffRegList pinned, Register opt_scratch) { | |||
5724 | TypeCheckRegisters registers = | |||
5725 | TypeCheckPrelude(obj, no_match, pinned, opt_scratch); | |||
5726 | __ Load(registers.map_reg, registers.map_reg.gp(), no_reg, | |||
5727 | wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset), | |||
5728 | LoadType::kI32Load16U, pinned); | |||
5729 | __ emit_i32_cond_jumpi(kUnequal, no_match, registers.map_reg.gp(), | |||
5730 | WASM_INTERNAL_FUNCTION_TYPE); | |||
5731 | return registers.obj_reg; | |||
5732 | } | |||
5733 | ||||
5734 | LiftoffRegister I31Check(const Value& object, Label* no_match, | |||
5735 | LiftoffRegList pinned, Register opt_scratch) { | |||
5736 | LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned)); | |||
5737 | ||||
5738 | __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnNotSmi); | |||
5739 | ||||
5740 | return obj_reg; | |||
5741 | } | |||
5742 | ||||
5743 | using TypeChecker = LiftoffRegister (LiftoffCompiler::*)( | |||
5744 | const Value& obj, Label* no_match, LiftoffRegList pinned, | |||
5745 | Register opt_scratch); | |||
5746 | ||||
5747 | template <TypeChecker type_checker> | |||
5748 | void AbstractTypeCheck(const Value& object) { | |||
5749 | Label match, no_match, done; | |||
5750 | LiftoffRegList pinned; | |||
5751 | LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {})); | |||
5752 | ||||
5753 | (this->*type_checker)(object, &no_match, pinned, result.gp()); | |||
5754 | ||||
5755 | __ bind(&match); | |||
5756 | __ LoadConstant(result, WasmValue(1)); | |||
5757 | // TODO(jkummerow): Emit near jumps on platforms where it's more efficient. | |||
5758 | __ emit_jump(&done); | |||
5759 | ||||
5760 | __ bind(&no_match); | |||
5761 | __ LoadConstant(result, WasmValue(0)); | |||
5762 | __ bind(&done); | |||
5763 | __ PushRegister(kI32, result); | |||
5764 | } | |||
5765 | ||||
5766 | void RefIsData(FullDecoder* /* decoder */, const Value& object, | |||
5767 | Value* /* result_val */) { | |||
5768 | return AbstractTypeCheck<&LiftoffCompiler::DataCheck>(object); | |||
5769 | } | |||
5770 | ||||
5771 | void RefIsFunc(FullDecoder* /* decoder */, const Value& object, | |||
5772 | Value* /* result_val */) { | |||
5773 | return AbstractTypeCheck<&LiftoffCompiler::FuncCheck>(object); | |||
5774 | } | |||
5775 | ||||
5776 | void RefIsArray(FullDecoder* /* decoder */, const Value& object, | |||
5777 | Value* /* result_val */) { | |||
5778 | return AbstractTypeCheck<&LiftoffCompiler::ArrayCheck>(object); | |||
5779 | } | |||
5780 | ||||
5781 | void RefIsI31(FullDecoder* decoder, const Value& object, | |||
5782 | Value* /* result */) { | |||
5783 | return AbstractTypeCheck<&LiftoffCompiler::I31Check>(object); | |||
5784 | } | |||
5785 | ||||
5786 | template <TypeChecker type_checker> | |||
5787 | void AbstractTypeCast(const Value& object, FullDecoder* decoder, | |||
5788 | ValueKind result_kind) { | |||
5789 | Label* trap_label = | |||
5790 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast); | |||
5791 | Label match; | |||
5792 | LiftoffRegister obj_reg = | |||
5793 | (this->*type_checker)(object, trap_label, {}, no_reg); | |||
5794 | __ bind(&match); | |||
5795 | __ PushRegister(result_kind, obj_reg); | |||
5796 | } | |||
5797 | ||||
5798 | void RefAsData(FullDecoder* decoder, const Value& object, | |||
5799 | Value* /* result */) { | |||
5800 | return AbstractTypeCast<&LiftoffCompiler::DataCheck>(object, decoder, kRef); | |||
5801 | } | |||
5802 | ||||
5803 | void RefAsFunc(FullDecoder* decoder, const Value& object, | |||
5804 | Value* /* result */) { | |||
5805 | return AbstractTypeCast<&LiftoffCompiler::FuncCheck>(object, decoder, kRef); | |||
5806 | } | |||
5807 | ||||
5808 | void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) { | |||
5809 | return AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, kRef); | |||
5810 | } | |||
5811 | ||||
5812 | void RefAsArray(FullDecoder* decoder, const Value& object, Value* result) { | |||
5813 | return AbstractTypeCast<&LiftoffCompiler::ArrayCheck>(object, decoder, | |||
5814 | kRef); | |||
5815 | } | |||
5816 | ||||
5817 | template <TypeChecker type_checker> | |||
5818 | void BrOnAbstractType(const Value& object, FullDecoder* decoder, | |||
5819 | uint32_t br_depth) { | |||
5820 | // Before branching, materialize all constants. This avoids repeatedly | |||
5821 | // materializing them for each conditional branch. | |||
5822 | if (br_depth != decoder->control_depth() - 1) { | |||
5823 | __ MaterializeMergedConstants( | |||
5824 | decoder->control_at(br_depth)->br_merge()->arity); | |||
5825 | } | |||
5826 | ||||
5827 | Label no_match; | |||
5828 | LiftoffRegister obj_reg = | |||
5829 | (this->*type_checker)(object, &no_match, {}, no_reg); | |||
5830 | ||||
5831 | __ PushRegister(kRef, obj_reg); | |||
5832 | BrOrRet(decoder, br_depth, 0); | |||
5833 | ||||
5834 | __ bind(&no_match); | |||
5835 | } | |||
5836 | ||||
5837 | template <TypeChecker type_checker> | |||
5838 | void BrOnNonAbstractType(const Value& object, FullDecoder* decoder, | |||
5839 | uint32_t br_depth) { | |||
5840 | // Before branching, materialize all constants. This avoids repeatedly | |||
5841 | // materializing them for each conditional branch. | |||
5842 | if (br_depth != decoder->control_depth() - 1) { | |||
5843 | __ MaterializeMergedConstants( | |||
5844 | decoder->control_at(br_depth)->br_merge()->arity); | |||
5845 | } | |||
5846 | ||||
5847 | Label no_match, end; | |||
5848 | LiftoffRegister obj_reg = | |||
5849 | (this->*type_checker)(object, &no_match, {}, no_reg); | |||
5850 | __ PushRegister(kRef, obj_reg); | |||
5851 | __ emit_jump(&end); | |||
5852 | ||||
5853 | __ bind(&no_match); | |||
5854 | BrOrRet(decoder, br_depth, 0); | |||
5855 | ||||
5856 | __ bind(&end); | |||
5857 | } | |||
5858 | ||||
5859 | void BrOnData(FullDecoder* decoder, const Value& object, | |||
5860 | Value* /* value_on_branch */, uint32_t br_depth) { | |||
5861 | return BrOnAbstractType<&LiftoffCompiler::DataCheck>(object, decoder, | |||
5862 | br_depth); | |||
5863 | } | |||
5864 | ||||
5865 | void BrOnFunc(FullDecoder* decoder, const Value& object, | |||
5866 | Value* /* value_on_branch */, uint32_t br_depth) { | |||
5867 | return BrOnAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder, | |||
5868 | br_depth); | |||
5869 | } | |||
5870 | ||||
5871 | void BrOnI31(FullDecoder* decoder, const Value& object, | |||
5872 | Value* /* value_on_branch */, uint32_t br_depth) { | |||
5873 | return BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder, | |||
5874 | br_depth); | |||
5875 | } | |||
5876 | ||||
5877 | void BrOnArray(FullDecoder* decoder, const Value& object, | |||
5878 | Value* /* value_on_branch */, uint32_t br_depth) { | |||
5879 | return BrOnAbstractType<&LiftoffCompiler::ArrayCheck>(object, decoder, | |||
5880 | br_depth); | |||
5881 | } | |||
5882 | ||||
5883 | void BrOnNonData(FullDecoder* decoder, const Value& object, | |||
5884 | Value* /* value_on_branch */, uint32_t br_depth) { | |||
5885 | return BrOnNonAbstractType<&LiftoffCompiler::DataCheck>(object, decoder, | |||
5886 | br_depth); | |||
5887 | } | |||
5888 | ||||
5889 | void BrOnNonFunc(FullDecoder* decoder, const Value& object, | |||
5890 | Value* /* value_on_branch */, uint32_t br_depth) { | |||
5891 | return BrOnNonAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder, | |||
5892 | br_depth); | |||
5893 | } | |||
5894 | ||||
5895 | void BrOnNonI31(FullDecoder* decoder, const Value& object, | |||
5896 | Value* /* value_on_branch */, uint32_t br_depth) { | |||
5897 | return BrOnNonAbstractType<&LiftoffCompiler::I31Check>(object, decoder, | |||
5898 | br_depth); | |||
5899 | } | |||
5900 | ||||
5901 | void BrOnNonArray(FullDecoder* decoder, const Value& object, | |||
5902 | Value* /* value_on_branch */, uint32_t br_depth) { | |||
5903 | return BrOnNonAbstractType<&LiftoffCompiler::ArrayCheck>(object, decoder, | |||
5904 | br_depth); | |||
5905 | } | |||
5906 | ||||
5907 | void Forward(FullDecoder* decoder, const Value& from, Value* to) { | |||
5908 | // Nothing to do here. | |||
5909 | } | |||
5910 | ||||
5911 | private: | |||
5912 | void CallDirect(FullDecoder* decoder, | |||
5913 | const CallFunctionImmediate<validate>& imm, | |||
5914 | const Value args[], Value returns[], TailCall tail_call) { | |||
5915 | MostlySmallValueKindSig sig(compilation_zone_, imm.sig); | |||
5916 | for (ValueKind ret : sig.returns()) { | |||
5917 | if (!CheckSupportedType(decoder, ret, "return")) return; | |||
5918 | } | |||
5919 | ||||
5920 | auto call_descriptor = | |||
5921 | compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig); | |||
5922 | call_descriptor = | |||
5923 | GetLoweredCallDescriptor(compilation_zone_, call_descriptor); | |||
5924 | ||||
5925 | // One slot would be enough for call_direct, but would make index | |||
5926 | // computations much more complicated. | |||
5927 | uintptr_t vector_slot = num_call_instructions_ * 2; | |||
5928 | if (FLAG_wasm_speculative_inlining) { | |||
5929 | base::MutexGuard mutex_guard(&decoder->module_->type_feedback.mutex); | |||
5930 | decoder->module_->type_feedback.feedback_for_function[func_index_] | |||
5931 | .positions[decoder->position()] = | |||
5932 | static_cast<int>(num_call_instructions_); | |||
5933 | num_call_instructions_++; | |||
5934 | } | |||
5935 | ||||
5936 | if (imm.index < env_->module->num_imported_functions) { | |||
5937 | // A direct call to an imported function. | |||
5938 | LiftoffRegList pinned; | |||
5939 | Register tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
5940 | Register target = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
5941 | ||||
5942 | Register imported_targets = tmp; | |||
5943 | LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets, | |||
5944 | kSystemPointerSize, pinned); | |||
5945 | __ Load(LiftoffRegister(target), imported_targets, no_reg, | |||
5946 | imm.index * sizeof(Address), kPointerLoadType, pinned); | |||
5947 | ||||
5948 | Register imported_function_refs = tmp; | |||
5949 | LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs, | |||
5950 | ImportedFunctionRefs, pinned); | |||
5951 | Register imported_function_ref = tmp; | |||
5952 | __ LoadTaggedPointer( | |||
5953 | imported_function_ref, imported_function_refs, no_reg, | |||
5954 | ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned); | |||
5955 | ||||
5956 | Register* explicit_instance = &imported_function_ref; | |||
5957 | __ PrepareCall(&sig, call_descriptor, &target, explicit_instance); | |||
5958 | if (tail_call) { | |||
5959 | __ PrepareTailCall( | |||
5960 | static_cast<int>(call_descriptor->ParameterSlotCount()), | |||
5961 | static_cast<int>( | |||
5962 | call_descriptor->GetStackParameterDelta(descriptor_))); | |||
5963 | __ TailCallIndirect(target); | |||
5964 | } else { | |||
5965 | source_position_table_builder_.AddPosition( | |||
5966 | __ pc_offset(), SourcePosition(decoder->position()), true); | |||
5967 | __ CallIndirect(&sig, call_descriptor, target); | |||
5968 | FinishCall(decoder, &sig, call_descriptor); | |||
5969 | } | |||
5970 | } else { | |||
5971 | // Inlining direct calls isn't speculative, but existence of the | |||
5972 | // feedback vector currently depends on this flag. | |||
5973 | if (FLAG_wasm_speculative_inlining) { | |||
5974 | LiftoffRegister vector = __ GetUnusedRegister(kGpReg, {}); | |||
5975 | __ Fill(vector, liftoff::kFeedbackVectorOffset, kPointerKind); | |||
5976 | __ IncrementSmi(vector, | |||
5977 | wasm::ObjectAccess::ElementOffsetInTaggedFixedArray( | |||
5978 | static_cast<int>(vector_slot))); | |||
5979 | } | |||
5980 | // A direct call within this module just gets the current instance. | |||
5981 | __ PrepareCall(&sig, call_descriptor); | |||
5982 | // Just encode the function index. This will be patched at instantiation. | |||
5983 | Address addr = static_cast<Address>(imm.index); | |||
5984 | if (tail_call) { | |||
5985 | DCHECK(descriptor_->CanTailCall(call_descriptor))((void) 0); | |||
5986 | __ PrepareTailCall( | |||
5987 | static_cast<int>(call_descriptor->ParameterSlotCount()), | |||
5988 | static_cast<int>( | |||
5989 | call_descriptor->GetStackParameterDelta(descriptor_))); | |||
5990 | __ TailCallNativeWasmCode(addr); | |||
5991 | } else { | |||
5992 | source_position_table_builder_.AddPosition( | |||
5993 | __ pc_offset(), SourcePosition(decoder->position()), true); | |||
5994 | __ CallNativeWasmCode(addr); | |||
5995 | FinishCall(decoder, &sig, call_descriptor); | |||
5996 | } | |||
5997 | } | |||
5998 | } | |||
5999 | ||||
6000 | void CallIndirect(FullDecoder* decoder, const Value& index_val, | |||
6001 | const CallIndirectImmediate<validate>& imm, | |||
6002 | TailCall tail_call) { | |||
6003 | MostlySmallValueKindSig sig(compilation_zone_, imm.sig); | |||
6004 | for (ValueKind ret : sig.returns()) { | |||
6005 | if (!CheckSupportedType(decoder, ret, "return")) return; | |||
6006 | } | |||
6007 | ||||
6008 | // Pop the index. We'll modify the register's contents later. | |||
6009 | Register index = __ PopToModifiableRegister().gp(); | |||
6010 | ||||
6011 | LiftoffRegList pinned = {index}; | |||
6012 | // Get three temporary registers. | |||
6013 | Register table = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
6014 | Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
6015 | Register scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
6016 | Register indirect_function_table = no_reg; | |||
6017 | if (imm.table_imm.index != 0) { | |||
6018 | Register indirect_function_tables = | |||
6019 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); | |||
6020 | LOAD_TAGGED_PTR_INSTANCE_FIELD(indirect_function_tables, | |||
6021 | IndirectFunctionTables, pinned); | |||
6022 | ||||
6023 | indirect_function_table = indirect_function_tables; | |||
6024 | __ LoadTaggedPointer( | |||
6025 | indirect_function_table, indirect_function_tables, no_reg, | |||
6026 | ObjectAccess::ElementOffsetInTaggedFixedArray(imm.table_imm.index), | |||
6027 | pinned); | |||
6028 | } | |||
6029 | ||||
6030 | // Bounds check against the table size. | |||
6031 | Label* invalid_func_label = | |||
6032 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapTableOutOfBounds); | |||
6033 | ||||
6034 | uint32_t canonical_sig_num = | |||
6035 | env_->module->canonicalized_type_ids[imm.sig_imm.index]; | |||
6036 | DCHECK_GE(canonical_sig_num, 0)((void) 0); | |||
6037 | DCHECK_GE(kMaxInt, canonical_sig_num)((void) 0); | |||
6038 | ||||
6039 | // Compare against table size stored in | |||
6040 | // {instance->indirect_function_table_size}. | |||
6041 | if (imm.table_imm.index == 0) { | |||
6042 | LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size, | |||
6043 | pinned); | |||
6044 | } else { | |||
6045 | __ Load( | |||
6046 | LiftoffRegister(tmp_const), indirect_function_table, no_reg, | |||
6047 | wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset), | |||
6048 | LoadType::kI32Load, pinned); | |||
6049 | } | |||
6050 | __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index, | |||
6051 | tmp_const); | |||
6052 | ||||
6053 | CODE_COMMENT("Check indirect call signature"); | |||
6054 | // Load the signature from {instance->ift_sig_ids[key]} | |||
6055 | if (imm.table_imm.index == 0) { | |||
6056 | LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, | |||
6057 | kSystemPointerSize, pinned); | |||
6058 | } else { | |||
6059 | __ Load(LiftoffRegister(table), indirect_function_table, no_reg, | |||
6060 | wasm::ObjectAccess::ToTagged( | |||
6061 | WasmIndirectFunctionTable::kSigIdsOffset), | |||
6062 | kPointerLoadType, pinned); | |||
6063 | } | |||
6064 | // Shift {index} by 2 (multiply by 4) to represent kInt32Size items. | |||
6065 | STATIC_ASSERT((1 << 2) == kInt32Size)static_assert((1 << 2) == kInt32Size, "(1 << 2) == kInt32Size" ); | |||
6066 | __ emit_i32_shli(index, index, 2); | |||
6067 | __ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load, | |||
6068 | pinned); | |||
6069 | ||||
6070 | // Compare against expected signature. | |||
6071 | __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(canonical_sig_num)); | |||
6072 | ||||
6073 | Label* sig_mismatch_label = | |||
6074 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch); | |||
6075 | __ emit_cond_jump(kUnequal, sig_mismatch_label, kPointerKind, scratch, | |||
6076 | tmp_const); | |||
6077 | ||||
6078 | // At this point {index} has already been multiplied by 4. | |||
6079 | CODE_COMMENT("Execute indirect call"); | |||
6080 | if (kTaggedSize != kInt32Size) { | |||
6081 | DCHECK_EQ(kTaggedSize, kInt32Size * 2)((void) 0); | |||
6082 | // Multiply {index} by another 2 to represent kTaggedSize items. | |||
6083 | __ emit_i32_add(index, index, index); | |||
6084 | } | |||
6085 | // At this point {index} has already been multiplied by kTaggedSize. | |||
6086 | ||||
6087 | // Load the instance from {instance->ift_instances[key]} | |||
6088 | if (imm.table_imm.index == 0) { | |||
6089 | LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs, pinned); | |||
6090 | } else { | |||
6091 | __ LoadTaggedPointer( | |||
6092 | table, indirect_function_table, no_reg, | |||
6093 | wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset), | |||
6094 | pinned); | |||
6095 | } | |||
6096 | __ LoadTaggedPointer(tmp_const, table, index, | |||
6097 | ObjectAccess::ElementOffsetInTaggedFixedArray(0), | |||
6098 | pinned); | |||
6099 | ||||
6100 | if (kTaggedSize != kSystemPointerSize) { | |||
6101 | DCHECK_EQ(kSystemPointerSize, kTaggedSize * 2)((void) 0); | |||
6102 | // Multiply {index} by another 2 to represent kSystemPointerSize items. | |||
6103 | __ emit_i32_add(index, index, index); | |||
6104 | } | |||
6105 | // At this point {index} has already been multiplied by kSystemPointerSize. | |||
6106 | ||||
6107 | Register* explicit_instance = &tmp_const; | |||
6108 | ||||
6109 | // Load the target from {instance->ift_targets[key]} | |||
6110 | if (imm.table_imm.index == 0) { | |||
6111 | LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, | |||
6112 | kSystemPointerSize, pinned); | |||
6113 | } else { | |||
6114 | __ Load(LiftoffRegister(table), indirect_function_table, no_reg, | |||
6115 | wasm::ObjectAccess::ToTagged( | |||
6116 | WasmIndirectFunctionTable::kTargetsOffset), | |||
6117 | kPointerLoadType, pinned); | |||
6118 | } | |||
6119 | __ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType, | |||
6120 | pinned); | |||
6121 | ||||
6122 | auto call_descriptor = | |||
6123 | compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig); | |||
6124 | call_descriptor = | |||
6125 | GetLoweredCallDescriptor(compilation_zone_, call_descriptor); | |||
6126 | ||||
6127 | Register target = scratch; | |||
6128 | __ PrepareCall(&sig, call_descriptor, &target, explicit_instance); | |||
6129 | if (tail_call) { | |||
6130 | __ PrepareTailCall( | |||
6131 | static_cast<int>(call_descriptor->ParameterSlotCount()), | |||
6132 | static_cast<int>( | |||
6133 | call_descriptor->GetStackParameterDelta(descriptor_))); | |||
6134 | __ TailCallIndirect(target); | |||
6135 | } else { | |||
6136 | source_position_table_builder_.AddPosition( | |||
6137 | __ pc_offset(), SourcePosition(decoder->position()), true); | |||
6138 | __ CallIndirect(&sig, call_descriptor, target); | |||
6139 | ||||
6140 | FinishCall(decoder, &sig, call_descriptor); | |||
6141 | } | |||
6142 | } | |||
6143 | ||||
6144 | void CallRef(FullDecoder* decoder, ValueType func_ref_type, | |||
6145 | const FunctionSig* type_sig, TailCall tail_call) { | |||
6146 | MostlySmallValueKindSig sig(compilation_zone_, type_sig); | |||
6147 | for (ValueKind ret : sig.returns()) { | |||
6148 | if (!CheckSupportedType(decoder, ret, "return")) return; | |||
6149 | } | |||
6150 | compiler::CallDescriptor* call_descriptor = | |||
6151 | compiler::GetWasmCallDescriptor(compilation_zone_, type_sig); | |||
6152 | call_descriptor = | |||
6153 | GetLoweredCallDescriptor(compilation_zone_, call_descriptor); | |||
6154 | ||||
6155 | Register target_reg = no_reg, instance_reg = no_reg; | |||
6156 | ||||
6157 | if (FLAG_wasm_speculative_inlining) { | |||
6158 | ValueKind kIntPtrKind = kPointerKind; | |||
6159 | ||||
6160 | LiftoffRegList pinned; | |||
6161 | LiftoffRegister vector = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
6162 | LiftoffAssembler::VarState funcref = | |||
6163 | __ cache_state()->stack_state.end()[-1]; | |||
6164 | if (funcref.is_reg()) pinned.set(funcref.reg()); | |||
6165 | __ Fill(vector, liftoff::kFeedbackVectorOffset, kPointerKind); | |||
6166 | LiftoffAssembler::VarState vector_var(kPointerKind, vector, 0); | |||
6167 | LiftoffRegister index = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
6168 | uintptr_t vector_slot = num_call_instructions_ * 2; | |||
6169 | { | |||
6170 | base::MutexGuard mutex_guard(&decoder->module_->type_feedback.mutex); | |||
6171 | decoder->module_->type_feedback.feedback_for_function[func_index_] | |||
6172 | .positions[decoder->position()] = | |||
6173 | static_cast<int>(num_call_instructions_); | |||
6174 | } | |||
6175 | num_call_instructions_++; | |||
6176 | __ LoadConstant(index, WasmValue::ForUintPtr(vector_slot)); | |||
6177 | LiftoffAssembler::VarState index_var(kIntPtrKind, index, 0); | |||
6178 | ||||
6179 | // CallRefIC(vector: FixedArray, index: intptr, | |||
6180 | // funcref: WasmInternalFunction) | |||
6181 | CallRuntimeStub(WasmCode::kCallRefIC, | |||
6182 | MakeSig::Returns(kPointerKind, kPointerKind) | |||
6183 | .Params(kPointerKind, kIntPtrKind, kPointerKind), | |||
6184 | {vector_var, index_var, funcref}, decoder->position()); | |||
6185 | ||||
6186 | __ cache_state()->stack_state.pop_back(1); // Drop funcref. | |||
6187 | target_reg = LiftoffRegister(kReturnRegister0).gp(); | |||
6188 | instance_reg = LiftoffRegister(kReturnRegister1).gp(); | |||
6189 | ||||
6190 | } else { // FLAG_wasm_speculative_inlining | |||
6191 | // Non-feedback-collecting version. | |||
6192 | // Executing a write barrier needs temp registers; doing this on a | |||
6193 | // conditional branch confuses the LiftoffAssembler's register management. | |||
6194 | // Spill everything up front to work around that. | |||
6195 | __ SpillAllRegisters(); | |||
6196 | ||||
6197 | // We limit ourselves to four registers: | |||
6198 | // (1) func_data, initially reused for func_ref. | |||
6199 | // (2) instance, initially used as temp. | |||
6200 | // (3) target, initially used as temp. | |||
6201 | // (4) temp. | |||
6202 | LiftoffRegList pinned; | |||
6203 | LiftoffRegister func_ref = pinned.set(__ PopToModifiableRegister(pinned)); | |||
6204 | MaybeEmitNullCheck(decoder, func_ref.gp(), pinned, func_ref_type); | |||
6205 | LiftoffRegister instance = | |||
6206 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
6207 | LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
6208 | LiftoffRegister temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
6209 | ||||
6210 | // Load "ref" (instance or WasmApiFunctionRef) and target. | |||
6211 | __ LoadTaggedPointer( | |||
6212 | instance.gp(), func_ref.gp(), no_reg, | |||
6213 | wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset), | |||
6214 | pinned); | |||
6215 | ||||
6216 | #ifdef V8_SANDBOXED_EXTERNAL_POINTERS | |||
6217 | LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned); | |||
6218 | __ LoadExternalPointer(target.gp(), func_ref.gp(), | |||
6219 | WasmInternalFunction::kForeignAddressOffset, | |||
6220 | kForeignForeignAddressTag, temp.gp()); | |||
6221 | #else | |||
6222 | __ Load(target, func_ref.gp(), no_reg, | |||
6223 | wasm::ObjectAccess::ToTagged( | |||
6224 | WasmInternalFunction::kForeignAddressOffset), | |||
6225 | kPointerLoadType, pinned); | |||
6226 | #endif | |||
6227 | ||||
6228 | Label perform_call; | |||
6229 | ||||
6230 | LiftoffRegister null_address = temp; | |||
6231 | __ LoadConstant(null_address, WasmValue::ForUintPtr(0)); | |||
6232 | __ emit_cond_jump(kUnequal, &perform_call, kRef, target.gp(), | |||
6233 | null_address.gp()); | |||
6234 | // The cached target can only be null for WasmJSFunctions. | |||
6235 | __ LoadTaggedPointer( | |||
6236 | target.gp(), func_ref.gp(), no_reg, | |||
6237 | wasm::ObjectAccess::ToTagged(WasmInternalFunction::kCodeOffset), | |||
6238 | pinned); | |||
6239 | #ifdef V8_EXTERNAL_CODE_SPACE | |||
6240 | __ LoadCodeDataContainerEntry(target.gp(), target.gp()); | |||
6241 | #else | |||
6242 | __ emit_ptrsize_addi(target.gp(), target.gp(), | |||
6243 | wasm::ObjectAccess::ToTagged(Code::kHeaderSize)); | |||
6244 | #endif | |||
6245 | // Fall through to {perform_call}. | |||
6246 | ||||
6247 | __ bind(&perform_call); | |||
6248 | // Now the call target is in {target}, and the right instance object | |||
6249 | // is in {instance}. | |||
6250 | target_reg = target.gp(); | |||
6251 | instance_reg = instance.gp(); | |||
6252 | } // FLAG_wasm_speculative_inlining | |||
6253 | ||||
6254 | __ PrepareCall(&sig, call_descriptor, &target_reg, &instance_reg); | |||
6255 | if (tail_call) { | |||
6256 | __ PrepareTailCall( | |||
6257 | static_cast<int>(call_descriptor->ParameterSlotCount()), | |||
6258 | static_cast<int>( | |||
6259 | call_descriptor->GetStackParameterDelta(descriptor_))); | |||
6260 | __ TailCallIndirect(target_reg); | |||
6261 | } else { | |||
6262 | source_position_table_builder_.AddPosition( | |||
6263 | __ pc_offset(), SourcePosition(decoder->position()), true); | |||
6264 | __ CallIndirect(&sig, call_descriptor, target_reg); | |||
6265 | ||||
6266 | FinishCall(decoder, &sig, call_descriptor); | |||
6267 | } | |||
6268 | } | |||
6269 | ||||
6270 | void LoadNullValue(Register null, LiftoffRegList pinned) { | |||
6271 | LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize, pinned); | |||
6272 | __ LoadFullPointer(null, null, | |||
6273 | IsolateData::root_slot_offset(RootIndex::kNullValue)); | |||
6274 | } | |||
6275 | ||||
6276 | void LoadExceptionSymbol(Register dst, LiftoffRegList pinned, | |||
6277 | RootIndex root_index) { | |||
6278 | LOAD_INSTANCE_FIELD(dst, IsolateRoot, kSystemPointerSize, pinned); | |||
6279 | uint32_t offset_imm = IsolateData::root_slot_offset(root_index); | |||
6280 | __ LoadFullPointer(dst, dst, offset_imm); | |||
6281 | } | |||
6282 | ||||
6283 | void MaybeEmitNullCheck(FullDecoder* decoder, Register object, | |||
6284 | LiftoffRegList pinned, ValueType type) { | |||
6285 | if (FLAG_experimental_wasm_skip_null_checks || !type.is_nullable()) return; | |||
6286 | Label* trap_label = | |||
6287 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapNullDereference); | |||
6288 | LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned); | |||
6289 | LoadNullValue(null.gp(), pinned); | |||
6290 | __ emit_cond_jump(LiftoffCondition::kEqual, trap_label, kOptRef, object, | |||
6291 | null.gp()); | |||
6292 | } | |||
6293 | ||||
6294 | void BoundsCheckArray(FullDecoder* decoder, LiftoffRegister array, | |||
6295 | LiftoffRegister index, LiftoffRegList pinned) { | |||
6296 | if (V8_UNLIKELY(FLAG_experimental_wasm_skip_bounds_checks)(__builtin_expect(!!(FLAG_experimental_wasm_skip_bounds_checks ), 0))) return; | |||
6297 | Label* trap_label = | |||
6298 | AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayOutOfBounds); | |||
6299 | LiftoffRegister length = __ GetUnusedRegister(kGpReg, pinned); | |||
6300 | constexpr int kLengthOffset = | |||
6301 | wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset); | |||
6302 | __ Load(length, array.gp(), no_reg, kLengthOffset, LoadType::kI32Load, | |||
6303 | pinned); | |||
6304 | __ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label, kI32, | |||
6305 | index.gp(), length.gp()); | |||
6306 | } | |||
6307 | ||||
6308 | int StructFieldOffset(const StructType* struct_type, int field_index) { | |||
6309 | return wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize + | |||
6310 | struct_type->field_offset(field_index)); | |||
6311 | } | |||
6312 | ||||
6313 | void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg, | |||
6314 | int offset, ValueKind kind, bool is_signed, | |||
6315 | LiftoffRegList pinned) { | |||
6316 | if (is_reference(kind)) { | |||
6317 | __ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned); | |||
6318 | } else { | |||
6319 | // Primitive kind. | |||
6320 | LoadType load_type = LoadType::ForValueKind(kind, is_signed); | |||
6321 | __ Load(dst, src, offset_reg, offset, load_type, pinned); | |||
6322 | } | |||
6323 | } | |||
6324 | ||||
6325 | void StoreObjectField(Register obj, Register offset_reg, int offset, | |||
6326 | LiftoffRegister value, LiftoffRegList pinned, | |||
6327 | ValueKind kind) { | |||
6328 | if (is_reference(kind)) { | |||
6329 | __ StoreTaggedPointer(obj, offset_reg, offset, value, pinned); | |||
6330 | } else { | |||
6331 | // Primitive kind. | |||
6332 | StoreType store_type = StoreType::ForValueKind(kind); | |||
6333 | __ Store(obj, offset_reg, offset, value, store_type, pinned); | |||
6334 | } | |||
6335 | } | |||
6336 | ||||
6337 | void SetDefaultValue(LiftoffRegister reg, ValueKind kind, | |||
6338 | LiftoffRegList pinned) { | |||
6339 | DCHECK(is_defaultable(kind))((void) 0); | |||
6340 | switch (kind) { | |||
6341 | case kI8: | |||
6342 | case kI16: | |||
6343 | case kI32: | |||
6344 | return __ LoadConstant(reg, WasmValue(int32_t{0})); | |||
6345 | case kI64: | |||
6346 | return __ LoadConstant(reg, WasmValue(int64_t{0})); | |||
6347 | case kF32: | |||
6348 | return __ LoadConstant(reg, WasmValue(float{0.0})); | |||
6349 | case kF64: | |||
6350 | return __ LoadConstant(reg, WasmValue(double{0.0})); | |||
6351 | case kS128: | |||
6352 | DCHECK(CpuFeatures::SupportsWasmSimd128())((void) 0); | |||
6353 | return __ emit_s128_xor(reg, reg, reg); | |||
6354 | case kOptRef: | |||
6355 | return LoadNullValue(reg.gp(), pinned); | |||
6356 | case kRtt: | |||
6357 | case kVoid: | |||
6358 | case kBottom: | |||
6359 | case kRef: | |||
6360 | UNREACHABLE()V8_Fatal("unreachable code"); | |||
6361 | } | |||
6362 | } | |||
6363 | ||||
6364 | struct TypeCheckRegisters { | |||
6365 | LiftoffRegister obj_reg, map_reg, tmp_reg; | |||
6366 | }; | |||
6367 | ||||
6368 | TypeCheckRegisters TypeCheckPrelude(const Value& obj, Label* no_match, | |||
6369 | LiftoffRegList pinned, | |||
6370 | Register opt_scratch) { | |||
6371 | LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned)); | |||
6372 | ||||
6373 | // Reserve all temporary registers up front, so that the cache state | |||
6374 | // tracking doesn't get confused by the following conditional jumps. | |||
6375 | LiftoffRegister map_reg = | |||
6376 | opt_scratch != no_reg | |||
6377 | ? LiftoffRegister(opt_scratch) | |||
6378 | : pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
6379 | LiftoffRegister tmp_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
6380 | ||||
6381 | if (obj.type.is_nullable()) { | |||
6382 | LoadNullValue(map_reg.gp(), pinned); | |||
6383 | __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), map_reg.gp()); | |||
6384 | } | |||
6385 | ||||
6386 | __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi); | |||
6387 | ||||
6388 | __ LoadMap(map_reg.gp(), obj_reg.gp()); | |||
6389 | ||||
6390 | return {obj_reg, map_reg, tmp_reg}; | |||
6391 | } | |||
6392 | ||||
6393 | void EmitDataRefCheck(Register map, Label* not_data_ref, LiftoffRegister tmp, | |||
6394 | LiftoffRegList pinned) { | |||
6395 | constexpr int kInstanceTypeOffset = | |||
6396 | wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset); | |||
6397 | __ Load(tmp, map, no_reg, kInstanceTypeOffset, LoadType::kI32Load16U, | |||
6398 | pinned); | |||
6399 | // We're going to test a range of WasmObject instance types with a single | |||
6400 | // unsigned comparison. | |||
6401 | __ emit_i32_subi(tmp.gp(), tmp.gp(), FIRST_WASM_OBJECT_TYPE); | |||
6402 | __ emit_i32_cond_jumpi(kUnsignedGreaterThan, not_data_ref, tmp.gp(), | |||
6403 | LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE); | |||
6404 | } | |||
6405 | ||||
6406 | void MaybeOSR() { | |||
6407 | if (V8_UNLIKELY(for_debugging_)(__builtin_expect(!!(for_debugging_), 0))) { | |||
6408 | __ MaybeOSR(); | |||
6409 | } | |||
6410 | } | |||
6411 | ||||
6412 | void FinishCall(FullDecoder* decoder, ValueKindSig* sig, | |||
6413 | compiler::CallDescriptor* call_descriptor) { | |||
6414 | DefineSafepoint(); | |||
6415 | RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); | |||
6416 | int pc_offset = __ pc_offset(); | |||
6417 | MaybeOSR(); | |||
6418 | EmitLandingPad(decoder, pc_offset); | |||
6419 | __ FinishCall(sig, call_descriptor); | |||
6420 | } | |||
6421 | ||||
6422 | void CheckNan(LiftoffRegister src, LiftoffRegList pinned, ValueKind kind) { | |||
6423 | DCHECK(kind == ValueKind::kF32 || kind == ValueKind::kF64)((void) 0); | |||
6424 | auto nondeterminism_addr = __ GetUnusedRegister(kGpReg, pinned); | |||
6425 | __ LoadConstant( | |||
6426 | nondeterminism_addr, | |||
6427 | WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_))); | |||
6428 | __ emit_set_if_nan(nondeterminism_addr.gp(), src.fp(), kind); | |||
6429 | } | |||
6430 | ||||
6431 | void CheckS128Nan(LiftoffRegister dst, LiftoffRegList pinned, | |||
6432 | ValueKind lane_kind) { | |||
6433 | RegClass rc = reg_class_for(kS128); | |||
6434 | LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
6435 | LiftoffRegister tmp_s128 = pinned.set(__ GetUnusedRegister(rc, pinned)); | |||
6436 | LiftoffRegister nondeterminism_addr = | |||
6437 | pinned.set(__ GetUnusedRegister(kGpReg, pinned)); | |||
6438 | __ LoadConstant( | |||
6439 | nondeterminism_addr, | |||
6440 | WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_))); | |||
6441 | __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst, tmp_gp.gp(), | |||
6442 | tmp_s128, lane_kind); | |||
6443 | } | |||
6444 | ||||
6445 | bool has_outstanding_op() const { | |||
6446 | return outstanding_op_ != kNoOutstandingOp; | |||
6447 | } | |||
6448 | ||||
6449 | bool test_and_reset_outstanding_op(WasmOpcode opcode) { | |||
6450 | DCHECK_NE(kNoOutstandingOp, opcode)((void) 0); | |||
6451 | if (outstanding_op_ != opcode) return false; | |||
6452 | outstanding_op_ = kNoOutstandingOp; | |||
6453 | return true; | |||
6454 | } | |||
6455 | ||||
6456 | void TraceCacheState(FullDecoder* decoder) const { | |||
6457 | if (!FLAG_trace_liftoff) return; | |||
6458 | StdoutStream os; | |||
6459 | for (int control_depth = decoder->control_depth() - 1; control_depth >= -1; | |||
6460 | --control_depth) { | |||
6461 | auto* cache_state = | |||
6462 | control_depth == -1 ? __ cache_state() | |||
6463 | : &decoder->control_at(control_depth) | |||
6464 | ->label_state; | |||
6465 | os << PrintCollection(cache_state->stack_state); | |||
6466 | if (control_depth != -1) PrintF("; "); | |||
6467 | } | |||
6468 | os << "\n"; | |||
6469 | } | |||
6470 | ||||
6471 | void DefineSafepoint() { | |||
6472 | auto safepoint = safepoint_table_builder_.DefineSafepoint(&asm_); | |||
6473 | __ cache_state()->DefineSafepoint(safepoint); | |||
6474 | } | |||
6475 | ||||
6476 | void DefineSafepointWithCalleeSavedRegisters() { | |||
6477 | auto safepoint = safepoint_table_builder_.DefineSafepoint(&asm_); | |||
6478 | __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint); | |||
6479 | } | |||
6480 | ||||
6481 | Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) { | |||
6482 | Register instance = __ cache_state()->cached_instance; | |||
6483 | if (instance == no_reg) { | |||
6484 | instance = __ cache_state()->TrySetCachedInstanceRegister( | |||
6485 | pinned | LiftoffRegList{fallback}); | |||
6486 | if (instance == no_reg) instance = fallback; | |||
6487 | __ LoadInstanceFromFrame(instance); | |||
6488 | } | |||
6489 | return instance; | |||
6490 | } | |||
6491 | ||||
6492 | static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable; | |||
6493 | static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{ | |||
6494 | // MVP: | |||
6495 | kI32, kI64, kF32, kF64, | |||
6496 | // Extern ref: | |||
6497 | kRef, kOptRef, kRtt, kI8, kI16}; | |||
6498 | ||||
6499 | LiftoffAssembler asm_; | |||
6500 | ||||
6501 | // Used for merging code generation of subsequent operations (via look-ahead). | |||
6502 | // Set by the first opcode, reset by the second. | |||
6503 | WasmOpcode outstanding_op_ = kNoOutstandingOp; | |||
6504 | ||||
6505 | // {supported_types_} is updated in {MaybeBailoutForUnsupportedType}. | |||
6506 | base::EnumSet<ValueKind> supported_types_ = kUnconditionallySupported; | |||
6507 | compiler::CallDescriptor* const descriptor_; | |||
6508 | CompilationEnv* const env_; | |||
6509 | DebugSideTableBuilder* const debug_sidetable_builder_; | |||
6510 | const ForDebugging for_debugging_; | |||
6511 | LiftoffBailoutReason bailout_reason_ = kSuccess; | |||
6512 | const int func_index_; | |||
6513 | ZoneVector<OutOfLineCode> out_of_line_code_; | |||
6514 | SourcePositionTableBuilder source_position_table_builder_; | |||
6515 | ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_; | |||
6516 | // Zone used to store information during compilation. The result will be | |||
6517 | // stored independently, such that this zone can die together with the | |||
6518 | // LiftoffCompiler after compilation. | |||
6519 | Zone* compilation_zone_; | |||
6520 | SafepointTableBuilder safepoint_table_builder_; | |||
6521 | // The pc offset of the instructions to reserve the stack frame. Needed to | |||
6522 | // patch the actually needed stack size in the end. | |||
6523 | uint32_t pc_offset_stack_frame_construction_ = 0; | |||
6524 | // For emitting breakpoint, we store a pointer to the position of the next | |||
6525 | // breakpoint, and a pointer after the list of breakpoints as end marker. | |||
6526 | // A single breakpoint at offset 0 indicates that we should prepare the | |||
6527 | // function for stepping by flooding it with breakpoints. | |||
6528 | const int* next_breakpoint_ptr_ = nullptr; | |||
6529 | const int* next_breakpoint_end_ = nullptr; | |||
6530 | ||||
6531 | // Introduce a dead breakpoint to ensure that the calculation of the return | |||
6532 | // address in OSR is correct. | |||
6533 | int dead_breakpoint_ = 0; | |||
6534 | ||||
6535 | // Remember whether the did function-entry break checks (for "hook on function | |||
6536 | // call" and "break on entry" a.k.a. instrumentation breakpoint). This happens | |||
6537 | // at the first breakable opcode in the function (if compiling for debugging). | |||
6538 | bool did_function_entry_break_checks_ = false; | |||
6539 | ||||
6540 | struct HandlerInfo { | |||
6541 | MovableLabel handler; | |||
6542 | int pc_offset; | |||
6543 | }; | |||
6544 | ||||
6545 | ZoneVector<HandlerInfo> handlers_; | |||
6546 | int handler_table_offset_ = Assembler::kNoHandlerTable; | |||
6547 | ||||
6548 | // Current number of exception refs on the stack. | |||
6549 | int num_exceptions_ = 0; | |||
6550 | ||||
6551 | // Number of feedback-collecting call instructions encountered. While | |||
6552 | // compiling, also index of the next such instruction. Used for indexing type | |||
6553 | // feedback. | |||
6554 | uintptr_t num_call_instructions_ = 0; | |||
6555 | ||||
6556 | int32_t* max_steps_; | |||
6557 | int32_t* nondeterminism_; | |||
6558 | ||||
6559 | DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler)LiftoffCompiler() = delete; LiftoffCompiler(const LiftoffCompiler &) = delete; LiftoffCompiler& operator=(const LiftoffCompiler &) = delete; | |||
6560 | }; | |||
6561 | ||||
6562 | // static | |||
6563 | constexpr WasmOpcode LiftoffCompiler::kNoOutstandingOp; | |||
6564 | // static | |||
6565 | constexpr base::EnumSet<ValueKind> LiftoffCompiler::kUnconditionallySupported; | |||
6566 | ||||
6567 | } // namespace | |||
6568 | ||||
6569 | WasmCompilationResult ExecuteLiftoffCompilation( | |||
6570 | CompilationEnv* env, const FunctionBody& func_body, int func_index, | |||
6571 | ForDebugging for_debugging, const LiftoffOptions& compiler_options) { | |||
6572 | base::TimeTicks start_time; | |||
6573 | if (V8_UNLIKELY(FLAG_trace_wasm_compilation_times)(__builtin_expect(!!(FLAG_trace_wasm_compilation_times), 0))) { | |||
6574 | start_time = base::TimeTicks::Now(); | |||
6575 | } | |||
6576 | int func_body_size = static_cast<int>(func_body.end - func_body.start); | |||
6577 | TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),static v8::base::AtomicWord trace_event_unique_atomic6579 = 0 ; const uint8_t* trace_event_unique_category_group_enabled6579 ; trace_event_unique_category_group_enabled6579 = reinterpret_cast <const uint8_t*>(v8::base::Relaxed_Load(&(trace_event_unique_atomic6579 ))); if (!trace_event_unique_category_group_enabled6579) { trace_event_unique_category_group_enabled6579 = v8::internal::tracing::TraceEventHelper::GetTracingController () ->GetCategoryGroupEnabled("disabled-by-default-" "v8.wasm.detailed" ); v8::base::Relaxed_Store(&(trace_event_unique_atomic6579 ), (reinterpret_cast<v8::base::AtomicWord>( trace_event_unique_category_group_enabled6579 ))); };; v8::internal::tracing::ScopedTracer trace_event_unique_tracer6579 ; if (v8::base::Relaxed_Load(reinterpret_cast<const v8::base ::Atomic8*>( trace_event_unique_category_group_enabled6579 )) & (kEnabledForRecording_CategoryGroupEnabledFlags | kEnabledForEventCallback_CategoryGroupEnabledFlags )) { uint64_t h = v8::internal::tracing::AddTraceEvent( ('X') , trace_event_unique_category_group_enabled6579, "wasm.CompileBaseline" , v8::internal::tracing::kGlobalScope, v8::internal::tracing:: kNoId, v8::internal::tracing::kNoId, (static_cast<unsigned int>(0)), "funcIndex", func_index, "bodySize", func_body_size ); trace_event_unique_tracer6579 .Initialize(trace_event_unique_category_group_enabled6579 , "wasm.CompileBaseline", h); } | |||
6578 | "wasm.CompileBaseline", "funcIndex", func_index, "bodySize",static v8::base::AtomicWord trace_event_unique_atomic6579 = 0 ; const uint8_t* trace_event_unique_category_group_enabled6579 ; trace_event_unique_category_group_enabled6579 = reinterpret_cast <const uint8_t*>(v8::base::Relaxed_Load(&(trace_event_unique_atomic6579 ))); if (!trace_event_unique_category_group_enabled6579) { trace_event_unique_category_group_enabled6579 = v8::internal::tracing::TraceEventHelper::GetTracingController () ->GetCategoryGroupEnabled("disabled-by-default-" "v8.wasm.detailed" ); v8::base::Relaxed_Store(&(trace_event_unique_atomic6579 ), (reinterpret_cast<v8::base::AtomicWord>( trace_event_unique_category_group_enabled6579 ))); };; v8::internal::tracing::ScopedTracer trace_event_unique_tracer6579 ; if (v8::base::Relaxed_Load(reinterpret_cast<const v8::base ::Atomic8*>( trace_event_unique_category_group_enabled6579 )) & (kEnabledForRecording_CategoryGroupEnabledFlags | kEnabledForEventCallback_CategoryGroupEnabledFlags )) { uint64_t h = v8::internal::tracing::AddTraceEvent( ('X') , trace_event_unique_category_group_enabled6579, "wasm.CompileBaseline" , v8::internal::tracing::kGlobalScope, v8::internal::tracing:: kNoId, v8::internal::tracing::kNoId, (static_cast<unsigned int>(0)), "funcIndex", func_index, "bodySize", func_body_size ); trace_event_unique_tracer6579 .Initialize(trace_event_unique_category_group_enabled6579 , "wasm.CompileBaseline", h); } | |||
6579 | func_body_size)static v8::base::AtomicWord trace_event_unique_atomic6579 = 0 ; const uint8_t* trace_event_unique_category_group_enabled6579 ; trace_event_unique_category_group_enabled6579 = reinterpret_cast <const uint8_t*>(v8::base::Relaxed_Load(&(trace_event_unique_atomic6579 ))); if (!trace_event_unique_category_group_enabled6579) { trace_event_unique_category_group_enabled6579 = v8::internal::tracing::TraceEventHelper::GetTracingController () ->GetCategoryGroupEnabled("disabled-by-default-" "v8.wasm.detailed" ); v8::base::Relaxed_Store(&(trace_event_unique_atomic6579 ), (reinterpret_cast<v8::base::AtomicWord>( trace_event_unique_category_group_enabled6579 ))); };; v8::internal::tracing::ScopedTracer trace_event_unique_tracer6579 ; if (v8::base::Relaxed_Load(reinterpret_cast<const v8::base ::Atomic8*>( trace_event_unique_category_group_enabled6579 )) & (kEnabledForRecording_CategoryGroupEnabledFlags | kEnabledForEventCallback_CategoryGroupEnabledFlags )) { uint64_t h = v8::internal::tracing::AddTraceEvent( ('X') , trace_event_unique_category_group_enabled6579, "wasm.CompileBaseline" , v8::internal::tracing::kGlobalScope, v8::internal::tracing:: kNoId, v8::internal::tracing::kNoId, (static_cast<unsigned int>(0)), "funcIndex", func_index, "bodySize", func_body_size ); trace_event_unique_tracer6579 .Initialize(trace_event_unique_category_group_enabled6579 , "wasm.CompileBaseline", h); }; | |||
6580 | ||||
6581 | Zone zone(GetWasmEngine()->allocator(), "LiftoffCompilationZone"); | |||
6582 | auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig); | |||
6583 | size_t code_size_estimate = | |||
6584 | WasmCodeManager::EstimateLiftoffCodeSize(func_body_size); | |||
6585 | // Allocate the initial buffer a bit bigger to avoid reallocation during code | |||
6586 | // generation. Overflows when casting to int are fine, as we will allocate at | |||
6587 | // least {AssemblerBase::kMinimalBufferSize} anyway, so in the worst case we | |||
6588 | // have to grow more often. | |||
6589 | int initial_buffer_size = static_cast<int>(128 + code_size_estimate * 4 / 3); | |||
6590 | std::unique_ptr<DebugSideTableBuilder> debug_sidetable_builder; | |||
6591 | if (compiler_options.debug_sidetable) { | |||
6592 | debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>(); | |||
6593 | } | |||
6594 | DCHECK_IMPLIES(compiler_options.max_steps, for_debugging == kForDebugging)((void) 0); | |||
6595 | WasmFeatures unused_detected_features; | |||
6596 | WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder( | |||
6597 | &zone, env->module, env->enabled_features, | |||
6598 | compiler_options.detected_features ? compiler_options.detected_features | |||
6599 | : &unused_detected_features, | |||
6600 | func_body, call_descriptor, env, &zone, | |||
6601 | NewAssemblerBuffer(initial_buffer_size), debug_sidetable_builder.get(), | |||
6602 | for_debugging, func_index, compiler_options.breakpoints, | |||
6603 | compiler_options.dead_breakpoint, compiler_options.max_steps, | |||
6604 | compiler_options.nondeterminism); | |||
6605 | decoder.Decode(); | |||
6606 | LiftoffCompiler* compiler = &decoder.interface(); | |||
6607 | if (decoder.failed()) compiler->OnFirstError(&decoder); | |||
6608 | ||||
6609 | if (auto* counters = compiler_options.counters) { | |||
6610 | // Check that the histogram for the bailout reasons has the correct size. | |||
6611 | DCHECK_EQ(0, counters->liftoff_bailout_reasons()->min())((void) 0); | |||
6612 | DCHECK_EQ(kNumBailoutReasons - 1,((void) 0) | |||
6613 | counters->liftoff_bailout_reasons()->max())((void) 0); | |||
6614 | DCHECK_EQ(kNumBailoutReasons,((void) 0) | |||
6615 | counters->liftoff_bailout_reasons()->num_buckets())((void) 0); | |||
6616 | // Register the bailout reason (can also be {kSuccess}). | |||
6617 | counters->liftoff_bailout_reasons()->AddSample( | |||
6618 | static_cast<int>(compiler->bailout_reason())); | |||
6619 | } | |||
6620 | ||||
6621 | if (compiler->did_bailout()) return WasmCompilationResult{}; | |||
6622 | ||||
6623 | WasmCompilationResult result; | |||
6624 | compiler->GetCode(&result.code_desc); | |||
6625 | result.instr_buffer = compiler->ReleaseBuffer(); | |||
6626 | result.source_positions = compiler->GetSourcePositionTable(); | |||
6627 | result.protected_instructions_data = compiler->GetProtectedInstructionsData(); | |||
6628 | result.frame_slot_count = compiler->GetTotalFrameSlotCountForGC(); | |||
6629 | auto* lowered_call_desc = GetLoweredCallDescriptor(&zone, call_descriptor); | |||
6630 | result.tagged_parameter_slots = lowered_call_desc->GetTaggedParameterSlots(); | |||
6631 | result.func_index = func_index; | |||
6632 | result.result_tier = ExecutionTier::kLiftoff; | |||
6633 | result.for_debugging = for_debugging; | |||
6634 | if (auto* debug_sidetable = compiler_options.debug_sidetable) { | |||
6635 | *debug_sidetable = debug_sidetable_builder->GenerateDebugSideTable(); | |||
6636 | } | |||
6637 | result.feedback_vector_slots = compiler->GetFeedbackVectorSlots(); | |||
6638 | ||||
6639 | if (V8_UNLIKELY(FLAG_trace_wasm_compilation_times)(__builtin_expect(!!(FLAG_trace_wasm_compilation_times), 0))) { | |||
6640 | base::TimeDelta time = base::TimeTicks::Now() - start_time; | |||
6641 | int codesize = result.code_desc.body_size(); | |||
6642 | StdoutStream{} << "Compiled function " | |||
6643 | << reinterpret_cast<const void*>(env->module) << "#" | |||
6644 | << func_index << " using Liftoff, took " | |||
6645 | << time.InMilliseconds() << " ms and " | |||
6646 | << zone.allocation_size() << " bytes; bodysize " | |||
6647 | << func_body_size << " codesize " << codesize << std::endl; | |||
6648 | } | |||
6649 | ||||
6650 | DCHECK(result.succeeded())((void) 0); | |||
6651 | return result; | |||
6652 | } | |||
6653 | ||||
6654 | std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable( | |||
6655 | const WasmCode* code) { | |||
6656 | auto* native_module = code->native_module(); | |||
6657 | auto* function = &native_module->module()->functions[code->index()]; | |||
6658 | ModuleWireBytes wire_bytes{native_module->wire_bytes()}; | |||
6659 | base::Vector<const byte> function_bytes = | |||
6660 | wire_bytes.GetFunctionBytes(function); | |||
6661 | CompilationEnv env = native_module->CreateCompilationEnv(); | |||
6662 | FunctionBody func_body{function->sig, 0, function_bytes.begin(), | |||
6663 | function_bytes.end()}; | |||
6664 | ||||
6665 | Zone zone(GetWasmEngine()->allocator(), "LiftoffDebugSideTableZone"); | |||
6666 | auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, function->sig); | |||
6667 | DebugSideTableBuilder debug_sidetable_builder; | |||
6668 | WasmFeatures detected; | |||
6669 | constexpr int kSteppingBreakpoints[] = {0}; | |||
6670 | DCHECK(code->for_debugging() == kForDebugging ||((void) 0) | |||
6671 | code->for_debugging() == kForStepping)((void) 0); | |||
6672 | base::Vector<const int> breakpoints = | |||
6673 | code->for_debugging() == kForStepping | |||
6674 | ? base::ArrayVector(kSteppingBreakpoints) | |||
6675 | : base::Vector<const int>{}; | |||
6676 | WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder( | |||
6677 | &zone, native_module->module(), env.enabled_features, &detected, | |||
6678 | func_body, call_descriptor, &env, &zone, | |||
6679 | NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize), | |||
6680 | &debug_sidetable_builder, code->for_debugging(), code->index(), | |||
6681 | breakpoints); | |||
6682 | decoder.Decode(); | |||
6683 | DCHECK(decoder.ok())((void) 0); | |||
6684 | DCHECK(!decoder.interface().did_bailout())((void) 0); | |||
6685 | return debug_sidetable_builder.GenerateDebugSideTable(); | |||
6686 | } | |||
6687 | ||||
6688 | #undef __ | |||
6689 | #undef TRACE | |||
6690 | #undef WASM_INSTANCE_OBJECT_FIELD_OFFSET | |||
6691 | #undef WASM_INSTANCE_OBJECT_FIELD_SIZE | |||
6692 | #undef LOAD_INSTANCE_FIELD | |||
6693 | #undef LOAD_TAGGED_PTR_INSTANCE_FIELD | |||
6694 | #undef CODE_COMMENT | |||
6695 | ||||
6696 | } // namespace wasm | |||
6697 | } // namespace internal | |||
6698 | } // namespace v8 |