File: | out/../deps/v8/src/compiler/backend/instruction-selector.cc |
Warning: | line 1096, column 5 Value stored to 'frame_state_entries' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // Copyright 2014 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/compiler/backend/instruction-selector.h" |
6 | |
7 | #include <limits> |
8 | |
9 | #include "src/base/iterator.h" |
10 | #include "src/base/platform/wrappers.h" |
11 | #include "src/codegen/assembler-inl.h" |
12 | #include "src/codegen/interface-descriptors-inl.h" |
13 | #include "src/codegen/tick-counter.h" |
14 | #include "src/common/globals.h" |
15 | #include "src/compiler/backend/instruction-selector-impl.h" |
16 | #include "src/compiler/compiler-source-position-table.h" |
17 | #include "src/compiler/js-heap-broker.h" |
18 | #include "src/compiler/node-matchers.h" |
19 | #include "src/compiler/node-properties.h" |
20 | #include "src/compiler/pipeline.h" |
21 | #include "src/compiler/schedule.h" |
22 | #include "src/compiler/state-values-utils.h" |
23 | #include "src/deoptimizer/deoptimizer.h" |
24 | |
25 | #if V8_ENABLE_WEBASSEMBLY1 |
26 | #include "src/wasm/simd-shuffle.h" |
27 | #endif // V8_ENABLE_WEBASSEMBLY |
28 | |
29 | namespace v8 { |
30 | namespace internal { |
31 | namespace compiler { |
32 | |
33 | Smi NumberConstantToSmi(Node* node) { |
34 | DCHECK_EQ(node->opcode(), IrOpcode::kNumberConstant)((void) 0); |
35 | const double d = OpParameter<double>(node->op()); |
36 | Smi smi = Smi::FromInt(static_cast<int32_t>(d)); |
37 | CHECK_EQ(smi.value(), d)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(smi.value())>::type, typename ::v8::base::pass_value_or_ref<decltype(d)>::type>(( smi.value()), (d)); do { if ((__builtin_expect(!!(!(_cmp)), 0 ))) { V8_Fatal("Check failed: %s.", "smi.value()" " " "==" " " "d"); } } while (false); } while (false); |
38 | return smi; |
39 | } |
40 | |
41 | InstructionSelector::InstructionSelector( |
42 | Zone* zone, size_t node_count, Linkage* linkage, |
43 | InstructionSequence* sequence, Schedule* schedule, |
44 | SourcePositionTable* source_positions, Frame* frame, |
45 | EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter, |
46 | JSHeapBroker* broker, size_t* max_unoptimized_frame_height, |
47 | size_t* max_pushed_argument_count, SourcePositionMode source_position_mode, |
48 | Features features, EnableScheduling enable_scheduling, |
49 | EnableRootsRelativeAddressing enable_roots_relative_addressing, |
50 | EnableTraceTurboJson trace_turbo) |
51 | : zone_(zone), |
52 | linkage_(linkage), |
53 | sequence_(sequence), |
54 | source_positions_(source_positions), |
55 | source_position_mode_(source_position_mode), |
56 | features_(features), |
57 | schedule_(schedule), |
58 | current_block_(nullptr), |
59 | instructions_(zone), |
60 | continuation_inputs_(sequence->zone()), |
61 | continuation_outputs_(sequence->zone()), |
62 | continuation_temps_(sequence->zone()), |
63 | defined_(node_count, false, zone), |
64 | used_(node_count, false, zone), |
65 | effect_level_(node_count, 0, zone), |
66 | virtual_registers_(node_count, |
67 | InstructionOperand::kInvalidVirtualRegister, zone), |
68 | virtual_register_rename_(zone), |
69 | scheduler_(nullptr), |
70 | enable_scheduling_(enable_scheduling), |
71 | enable_roots_relative_addressing_(enable_roots_relative_addressing), |
72 | enable_switch_jump_table_(enable_switch_jump_table), |
73 | state_values_cache_(zone), |
74 | frame_(frame), |
75 | instruction_selection_failed_(false), |
76 | instr_origins_(sequence->zone()), |
77 | trace_turbo_(trace_turbo), |
78 | tick_counter_(tick_counter), |
79 | broker_(broker), |
80 | max_unoptimized_frame_height_(max_unoptimized_frame_height), |
81 | max_pushed_argument_count_(max_pushed_argument_count) |
82 | #if V8_TARGET_ARCH_64_BIT1 |
83 | , |
84 | phi_states_(node_count, Upper32BitsState::kNotYetChecked, zone) |
85 | #endif |
86 | { |
87 | DCHECK_EQ(*max_unoptimized_frame_height, 0)((void) 0); // Caller-initialized. |
88 | |
89 | instructions_.reserve(node_count); |
90 | continuation_inputs_.reserve(5); |
91 | continuation_outputs_.reserve(2); |
92 | |
93 | if (trace_turbo_ == kEnableTraceTurboJson) { |
94 | instr_origins_.assign(node_count, {-1, 0}); |
95 | } |
96 | } |
97 | |
98 | bool InstructionSelector::SelectInstructions() { |
99 | // Mark the inputs of all phis in loop headers as used. |
100 | BasicBlockVector* blocks = schedule()->rpo_order(); |
101 | for (auto const block : *blocks) { |
102 | if (!block->IsLoopHeader()) continue; |
103 | DCHECK_LE(2u, block->PredecessorCount())((void) 0); |
104 | for (Node* const phi : *block) { |
105 | if (phi->opcode() != IrOpcode::kPhi) continue; |
106 | |
107 | // Mark all inputs as used. |
108 | for (Node* const input : phi->inputs()) { |
109 | MarkAsUsed(input); |
110 | } |
111 | } |
112 | } |
113 | |
114 | // Visit each basic block in post order. |
115 | for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) { |
116 | VisitBlock(*i); |
117 | if (instruction_selection_failed()) return false; |
118 | } |
119 | |
120 | // Schedule the selected instructions. |
121 | if (UseInstructionScheduling()) { |
122 | scheduler_ = zone()->New<InstructionScheduler>(zone(), sequence()); |
123 | } |
124 | |
125 | for (auto const block : *blocks) { |
126 | InstructionBlock* instruction_block = |
127 | sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number())); |
128 | for (size_t i = 0; i < instruction_block->phis().size(); i++) { |
129 | UpdateRenamesInPhi(instruction_block->PhiAt(i)); |
130 | } |
131 | size_t end = instruction_block->code_end(); |
132 | size_t start = instruction_block->code_start(); |
133 | DCHECK_LE(end, start)((void) 0); |
134 | StartBlock(RpoNumber::FromInt(block->rpo_number())); |
135 | if (end != start) { |
136 | while (start-- > end + 1) { |
137 | UpdateRenames(instructions_[start]); |
138 | AddInstruction(instructions_[start]); |
139 | } |
140 | UpdateRenames(instructions_[end]); |
141 | AddTerminator(instructions_[end]); |
142 | } |
143 | EndBlock(RpoNumber::FromInt(block->rpo_number())); |
144 | } |
145 | #if DEBUG |
146 | sequence()->ValidateSSA(); |
147 | #endif |
148 | return true; |
149 | } |
150 | |
151 | void InstructionSelector::StartBlock(RpoNumber rpo) { |
152 | if (UseInstructionScheduling()) { |
153 | DCHECK_NOT_NULL(scheduler_)((void) 0); |
154 | scheduler_->StartBlock(rpo); |
155 | } else { |
156 | sequence()->StartBlock(rpo); |
157 | } |
158 | } |
159 | |
160 | void InstructionSelector::EndBlock(RpoNumber rpo) { |
161 | if (UseInstructionScheduling()) { |
162 | DCHECK_NOT_NULL(scheduler_)((void) 0); |
163 | scheduler_->EndBlock(rpo); |
164 | } else { |
165 | sequence()->EndBlock(rpo); |
166 | } |
167 | } |
168 | |
169 | void InstructionSelector::AddTerminator(Instruction* instr) { |
170 | if (UseInstructionScheduling()) { |
171 | DCHECK_NOT_NULL(scheduler_)((void) 0); |
172 | scheduler_->AddTerminator(instr); |
173 | } else { |
174 | sequence()->AddInstruction(instr); |
175 | } |
176 | } |
177 | |
178 | void InstructionSelector::AddInstruction(Instruction* instr) { |
179 | if (UseInstructionScheduling()) { |
180 | DCHECK_NOT_NULL(scheduler_)((void) 0); |
181 | scheduler_->AddInstruction(instr); |
182 | } else { |
183 | sequence()->AddInstruction(instr); |
184 | } |
185 | } |
186 | |
187 | Instruction* InstructionSelector::Emit(InstructionCode opcode, |
188 | InstructionOperand output, |
189 | size_t temp_count, |
190 | InstructionOperand* temps) { |
191 | size_t output_count = output.IsInvalid() ? 0 : 1; |
192 | return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps); |
193 | } |
194 | |
195 | Instruction* InstructionSelector::Emit(InstructionCode opcode, |
196 | InstructionOperand output, |
197 | InstructionOperand a, size_t temp_count, |
198 | InstructionOperand* temps) { |
199 | size_t output_count = output.IsInvalid() ? 0 : 1; |
200 | return Emit(opcode, output_count, &output, 1, &a, temp_count, temps); |
201 | } |
202 | |
203 | Instruction* InstructionSelector::Emit(InstructionCode opcode, |
204 | InstructionOperand output, |
205 | InstructionOperand a, |
206 | InstructionOperand b, size_t temp_count, |
207 | InstructionOperand* temps) { |
208 | size_t output_count = output.IsInvalid() ? 0 : 1; |
209 | InstructionOperand inputs[] = {a, b}; |
210 | size_t input_count = arraysize(inputs)(sizeof(ArraySizeHelper(inputs))); |
211 | return Emit(opcode, output_count, &output, input_count, inputs, temp_count, |
212 | temps); |
213 | } |
214 | |
215 | Instruction* InstructionSelector::Emit(InstructionCode opcode, |
216 | InstructionOperand output, |
217 | InstructionOperand a, |
218 | InstructionOperand b, |
219 | InstructionOperand c, size_t temp_count, |
220 | InstructionOperand* temps) { |
221 | size_t output_count = output.IsInvalid() ? 0 : 1; |
222 | InstructionOperand inputs[] = {a, b, c}; |
223 | size_t input_count = arraysize(inputs)(sizeof(ArraySizeHelper(inputs))); |
224 | return Emit(opcode, output_count, &output, input_count, inputs, temp_count, |
225 | temps); |
226 | } |
227 | |
228 | Instruction* InstructionSelector::Emit( |
229 | InstructionCode opcode, InstructionOperand output, InstructionOperand a, |
230 | InstructionOperand b, InstructionOperand c, InstructionOperand d, |
231 | size_t temp_count, InstructionOperand* temps) { |
232 | size_t output_count = output.IsInvalid() ? 0 : 1; |
233 | InstructionOperand inputs[] = {a, b, c, d}; |
234 | size_t input_count = arraysize(inputs)(sizeof(ArraySizeHelper(inputs))); |
235 | return Emit(opcode, output_count, &output, input_count, inputs, temp_count, |
236 | temps); |
237 | } |
238 | |
239 | Instruction* InstructionSelector::Emit( |
240 | InstructionCode opcode, InstructionOperand output, InstructionOperand a, |
241 | InstructionOperand b, InstructionOperand c, InstructionOperand d, |
242 | InstructionOperand e, size_t temp_count, InstructionOperand* temps) { |
243 | size_t output_count = output.IsInvalid() ? 0 : 1; |
244 | InstructionOperand inputs[] = {a, b, c, d, e}; |
245 | size_t input_count = arraysize(inputs)(sizeof(ArraySizeHelper(inputs))); |
246 | return Emit(opcode, output_count, &output, input_count, inputs, temp_count, |
247 | temps); |
248 | } |
249 | |
250 | Instruction* InstructionSelector::Emit( |
251 | InstructionCode opcode, InstructionOperand output, InstructionOperand a, |
252 | InstructionOperand b, InstructionOperand c, InstructionOperand d, |
253 | InstructionOperand e, InstructionOperand f, size_t temp_count, |
254 | InstructionOperand* temps) { |
255 | size_t output_count = output.IsInvalid() ? 0 : 1; |
256 | InstructionOperand inputs[] = {a, b, c, d, e, f}; |
257 | size_t input_count = arraysize(inputs)(sizeof(ArraySizeHelper(inputs))); |
258 | return Emit(opcode, output_count, &output, input_count, inputs, temp_count, |
259 | temps); |
260 | } |
261 | |
262 | Instruction* InstructionSelector::Emit( |
263 | InstructionCode opcode, size_t output_count, InstructionOperand* outputs, |
264 | size_t input_count, InstructionOperand* inputs, size_t temp_count, |
265 | InstructionOperand* temps) { |
266 | if (output_count >= Instruction::kMaxOutputCount || |
267 | input_count >= Instruction::kMaxInputCount || |
268 | temp_count >= Instruction::kMaxTempCount) { |
269 | set_instruction_selection_failed(); |
270 | return nullptr; |
271 | } |
272 | |
273 | Instruction* instr = |
274 | Instruction::New(instruction_zone(), opcode, output_count, outputs, |
275 | input_count, inputs, temp_count, temps); |
276 | return Emit(instr); |
277 | } |
278 | |
279 | Instruction* InstructionSelector::Emit(Instruction* instr) { |
280 | instructions_.push_back(instr); |
281 | return instr; |
282 | } |
283 | |
284 | bool InstructionSelector::CanCover(Node* user, Node* node) const { |
285 | // 1. Both {user} and {node} must be in the same basic block. |
286 | if (schedule()->block(node) != current_block_) { |
287 | return false; |
288 | } |
289 | // 2. Pure {node}s must be owned by the {user}. |
290 | if (node->op()->HasProperty(Operator::kPure)) { |
291 | return node->OwnedBy(user); |
292 | } |
293 | // 3. Impure {node}s must match the effect level of {user}. |
294 | if (GetEffectLevel(node) != current_effect_level_) { |
295 | return false; |
296 | } |
297 | // 4. Only {node} must have value edges pointing to {user}. |
298 | for (Edge const edge : node->use_edges()) { |
299 | if (edge.from() != user && NodeProperties::IsValueEdge(edge)) { |
300 | return false; |
301 | } |
302 | } |
303 | return true; |
304 | } |
305 | |
306 | bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user, |
307 | Node* node) const { |
308 | BasicBlock* bb_user = schedule()->block(user); |
309 | BasicBlock* bb_node = schedule()->block(node); |
310 | if (bb_user != bb_node) return false; |
311 | for (Edge const edge : node->use_edges()) { |
312 | Node* from = edge.from(); |
313 | if ((from != user) && (schedule()->block(from) == bb_user)) { |
314 | return false; |
315 | } |
316 | } |
317 | return true; |
318 | } |
319 | |
320 | void InstructionSelector::UpdateRenames(Instruction* instruction) { |
321 | for (size_t i = 0; i < instruction->InputCount(); i++) { |
322 | TryRename(instruction->InputAt(i)); |
323 | } |
324 | } |
325 | |
326 | void InstructionSelector::UpdateRenamesInPhi(PhiInstruction* phi) { |
327 | for (size_t i = 0; i < phi->operands().size(); i++) { |
328 | int vreg = phi->operands()[i]; |
329 | int renamed = GetRename(vreg); |
330 | if (vreg != renamed) { |
331 | phi->RenameInput(i, renamed); |
332 | } |
333 | } |
334 | } |
335 | |
336 | int InstructionSelector::GetRename(int virtual_register) { |
337 | int rename = virtual_register; |
338 | while (true) { |
339 | if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break; |
340 | int next = virtual_register_rename_[rename]; |
341 | if (next == InstructionOperand::kInvalidVirtualRegister) { |
342 | break; |
343 | } |
344 | rename = next; |
345 | } |
346 | return rename; |
347 | } |
348 | |
349 | void InstructionSelector::TryRename(InstructionOperand* op) { |
350 | if (!op->IsUnallocated()) return; |
351 | UnallocatedOperand* unalloc = UnallocatedOperand::cast(op); |
352 | int vreg = unalloc->virtual_register(); |
353 | int rename = GetRename(vreg); |
354 | if (rename != vreg) { |
355 | *unalloc = UnallocatedOperand(*unalloc, rename); |
356 | } |
357 | } |
358 | |
359 | void InstructionSelector::SetRename(const Node* node, const Node* rename) { |
360 | int vreg = GetVirtualRegister(node); |
361 | if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) { |
362 | int invalid = InstructionOperand::kInvalidVirtualRegister; |
363 | virtual_register_rename_.resize(vreg + 1, invalid); |
364 | } |
365 | virtual_register_rename_[vreg] = GetVirtualRegister(rename); |
366 | } |
367 | |
368 | int InstructionSelector::GetVirtualRegister(const Node* node) { |
369 | DCHECK_NOT_NULL(node)((void) 0); |
370 | size_t const id = node->id(); |
371 | DCHECK_LT(id, virtual_registers_.size())((void) 0); |
372 | int virtual_register = virtual_registers_[id]; |
373 | if (virtual_register == InstructionOperand::kInvalidVirtualRegister) { |
374 | virtual_register = sequence()->NextVirtualRegister(); |
375 | virtual_registers_[id] = virtual_register; |
376 | } |
377 | return virtual_register; |
378 | } |
379 | |
380 | const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting() |
381 | const { |
382 | std::map<NodeId, int> virtual_registers; |
383 | for (size_t n = 0; n < virtual_registers_.size(); ++n) { |
384 | if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) { |
385 | NodeId const id = static_cast<NodeId>(n); |
386 | virtual_registers.insert(std::make_pair(id, virtual_registers_[n])); |
387 | } |
388 | } |
389 | return virtual_registers; |
390 | } |
391 | |
392 | bool InstructionSelector::IsDefined(Node* node) const { |
393 | DCHECK_NOT_NULL(node)((void) 0); |
394 | size_t const id = node->id(); |
395 | DCHECK_LT(id, defined_.size())((void) 0); |
396 | return defined_[id]; |
397 | } |
398 | |
399 | void InstructionSelector::MarkAsDefined(Node* node) { |
400 | DCHECK_NOT_NULL(node)((void) 0); |
401 | size_t const id = node->id(); |
402 | DCHECK_LT(id, defined_.size())((void) 0); |
403 | defined_[id] = true; |
404 | } |
405 | |
406 | bool InstructionSelector::IsUsed(Node* node) const { |
407 | DCHECK_NOT_NULL(node)((void) 0); |
408 | // TODO(bmeurer): This is a terrible monster hack, but we have to make sure |
409 | // that the Retain is actually emitted, otherwise the GC will mess up. |
410 | if (node->opcode() == IrOpcode::kRetain) return true; |
411 | if (!node->op()->HasProperty(Operator::kEliminatable)) return true; |
412 | size_t const id = node->id(); |
413 | DCHECK_LT(id, used_.size())((void) 0); |
414 | return used_[id]; |
415 | } |
416 | |
417 | void InstructionSelector::MarkAsUsed(Node* node) { |
418 | DCHECK_NOT_NULL(node)((void) 0); |
419 | size_t const id = node->id(); |
420 | DCHECK_LT(id, used_.size())((void) 0); |
421 | used_[id] = true; |
422 | } |
423 | |
424 | int InstructionSelector::GetEffectLevel(Node* node) const { |
425 | DCHECK_NOT_NULL(node)((void) 0); |
426 | size_t const id = node->id(); |
427 | DCHECK_LT(id, effect_level_.size())((void) 0); |
428 | return effect_level_[id]; |
429 | } |
430 | |
431 | int InstructionSelector::GetEffectLevel(Node* node, |
432 | FlagsContinuation* cont) const { |
433 | return cont->IsBranch() |
434 | ? GetEffectLevel( |
435 | cont->true_block()->PredecessorAt(0)->control_input()) |
436 | : GetEffectLevel(node); |
437 | } |
438 | |
439 | void InstructionSelector::SetEffectLevel(Node* node, int effect_level) { |
440 | DCHECK_NOT_NULL(node)((void) 0); |
441 | size_t const id = node->id(); |
442 | DCHECK_LT(id, effect_level_.size())((void) 0); |
443 | effect_level_[id] = effect_level; |
444 | } |
445 | |
446 | bool InstructionSelector::CanAddressRelativeToRootsRegister( |
447 | const ExternalReference& reference) const { |
448 | // There are three things to consider here: |
449 | // 1. CanUseRootsRegister: Is kRootRegister initialized? |
450 | const bool root_register_is_available_and_initialized = CanUseRootsRegister(); |
451 | if (!root_register_is_available_and_initialized) return false; |
452 | |
453 | // 2. enable_roots_relative_addressing_: Can we address everything on the heap |
454 | // through the root register, i.e. are root-relative addresses to arbitrary |
455 | // addresses guaranteed not to change between code generation and |
456 | // execution? |
457 | const bool all_root_relative_offsets_are_constant = |
458 | (enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing); |
459 | if (all_root_relative_offsets_are_constant) return true; |
460 | |
461 | // 3. IsAddressableThroughRootRegister: Is the target address guaranteed to |
462 | // have a fixed root-relative offset? If so, we can ignore 2. |
463 | const bool this_root_relative_offset_is_constant = |
464 | TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(), |
465 | reference); |
466 | return this_root_relative_offset_is_constant; |
467 | } |
468 | |
469 | bool InstructionSelector::CanUseRootsRegister() const { |
470 | return linkage()->GetIncomingDescriptor()->flags() & |
471 | CallDescriptor::kCanUseRoots; |
472 | } |
473 | |
474 | void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep, |
475 | const InstructionOperand& op) { |
476 | UnallocatedOperand unalloc = UnallocatedOperand::cast(op); |
477 | sequence()->MarkAsRepresentation(rep, unalloc.virtual_register()); |
478 | } |
479 | |
480 | void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep, |
481 | Node* node) { |
482 | sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node)); |
483 | } |
484 | |
485 | namespace { |
486 | |
487 | InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g, |
488 | Node* input, FrameStateInputKind kind, |
489 | MachineRepresentation rep) { |
490 | if (rep == MachineRepresentation::kNone) { |
491 | return g->TempImmediate(FrameStateDescriptor::kImpossibleValue); |
492 | } |
493 | |
494 | switch (input->opcode()) { |
495 | case IrOpcode::kInt32Constant: |
496 | case IrOpcode::kInt64Constant: |
497 | case IrOpcode::kFloat32Constant: |
498 | case IrOpcode::kFloat64Constant: |
499 | case IrOpcode::kDelayedStringConstant: |
500 | return g->UseImmediate(input); |
501 | case IrOpcode::kNumberConstant: |
502 | if (rep == MachineRepresentation::kWord32) { |
503 | Smi smi = NumberConstantToSmi(input); |
504 | return g->UseImmediate(static_cast<int32_t>(smi.ptr())); |
505 | } else { |
506 | return g->UseImmediate(input); |
507 | } |
508 | case IrOpcode::kCompressedHeapConstant: |
509 | case IrOpcode::kHeapConstant: { |
510 | if (!CanBeTaggedOrCompressedPointer(rep)) { |
511 | // If we have inconsistent static and dynamic types, e.g. if we |
512 | // smi-check a string, we can get here with a heap object that |
513 | // says it is a smi. In that case, we return an invalid instruction |
514 | // operand, which will be interpreted as an optimized-out value. |
515 | |
516 | // TODO(jarin) Ideally, we should turn the current instruction |
517 | // into an abort (we should never execute it). |
518 | return InstructionOperand(); |
519 | } |
520 | |
521 | Handle<HeapObject> constant = HeapConstantOf(input->op()); |
522 | RootIndex root_index; |
523 | if (isolate->roots_table().IsRootHandle(constant, &root_index) && |
524 | root_index == RootIndex::kOptimizedOut) { |
525 | // For an optimized-out object we return an invalid instruction |
526 | // operand, so that we take the fast path for optimized-out values. |
527 | return InstructionOperand(); |
528 | } |
529 | |
530 | return g->UseImmediate(input); |
531 | } |
532 | case IrOpcode::kArgumentsElementsState: |
533 | case IrOpcode::kArgumentsLengthState: |
534 | case IrOpcode::kObjectState: |
535 | case IrOpcode::kTypedObjectState: |
536 | UNREACHABLE()V8_Fatal("unreachable code"); |
537 | default: |
538 | switch (kind) { |
539 | case FrameStateInputKind::kStackSlot: |
540 | return g->UseUniqueSlot(input); |
541 | case FrameStateInputKind::kAny: |
542 | // Currently deopts "wrap" other operations, so the deopt's inputs |
543 | // are potentially needed until the end of the deoptimising code. |
544 | return g->UseAnyAtEnd(input); |
545 | } |
546 | } |
547 | UNREACHABLE()V8_Fatal("unreachable code"); |
548 | } |
549 | |
550 | } // namespace |
551 | |
552 | class StateObjectDeduplicator { |
553 | public: |
554 | explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {} |
555 | static const size_t kNotDuplicated = SIZE_MAX(18446744073709551615UL); |
556 | |
557 | size_t GetObjectId(Node* node) { |
558 | DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||((void) 0) |
559 | node->opcode() == IrOpcode::kObjectId ||((void) 0) |
560 | node->opcode() == IrOpcode::kArgumentsElementsState)((void) 0); |
561 | for (size_t i = 0; i < objects_.size(); ++i) { |
562 | if (objects_[i] == node) return i; |
563 | // ObjectId nodes are the Turbofan way to express objects with the same |
564 | // identity in the deopt info. So they should always be mapped to |
565 | // previously appearing TypedObjectState nodes. |
566 | if (HasObjectId(objects_[i]) && HasObjectId(node) && |
567 | ObjectIdOf(objects_[i]->op()) == ObjectIdOf(node->op())) { |
568 | return i; |
569 | } |
570 | } |
571 | DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||((void) 0) |
572 | node->opcode() == IrOpcode::kArgumentsElementsState)((void) 0); |
573 | return kNotDuplicated; |
574 | } |
575 | |
576 | size_t InsertObject(Node* node) { |
577 | DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||((void) 0) |
578 | node->opcode() == IrOpcode::kObjectId ||((void) 0) |
579 | node->opcode() == IrOpcode::kArgumentsElementsState)((void) 0); |
580 | size_t id = objects_.size(); |
581 | objects_.push_back(node); |
582 | return id; |
583 | } |
584 | |
585 | size_t size() const { return objects_.size(); } |
586 | |
587 | private: |
588 | static bool HasObjectId(Node* node) { |
589 | return node->opcode() == IrOpcode::kTypedObjectState || |
590 | node->opcode() == IrOpcode::kObjectId; |
591 | } |
592 | |
593 | ZoneVector<Node*> objects_; |
594 | }; |
595 | |
596 | // Returns the number of instruction operands added to inputs. |
597 | size_t InstructionSelector::AddOperandToStateValueDescriptor( |
598 | StateValueList* values, InstructionOperandVector* inputs, |
599 | OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input, |
600 | MachineType type, FrameStateInputKind kind, Zone* zone) { |
601 | DCHECK_NOT_NULL(input)((void) 0); |
602 | switch (input->opcode()) { |
603 | case IrOpcode::kArgumentsElementsState: { |
604 | values->PushArgumentsElements(ArgumentsStateTypeOf(input->op())); |
605 | // The elements backing store of an arguments object participates in the |
606 | // duplicate object counting, but can itself never appear duplicated. |
607 | DCHECK_EQ(StateObjectDeduplicator::kNotDuplicated,((void) 0) |
608 | deduplicator->GetObjectId(input))((void) 0); |
609 | deduplicator->InsertObject(input); |
610 | return 0; |
611 | } |
612 | case IrOpcode::kArgumentsLengthState: { |
613 | values->PushArgumentsLength(); |
614 | return 0; |
615 | } |
616 | case IrOpcode::kObjectState: |
617 | UNREACHABLE()V8_Fatal("unreachable code"); |
618 | case IrOpcode::kTypedObjectState: |
619 | case IrOpcode::kObjectId: { |
620 | size_t id = deduplicator->GetObjectId(input); |
621 | if (id == StateObjectDeduplicator::kNotDuplicated) { |
622 | DCHECK_EQ(IrOpcode::kTypedObjectState, input->opcode())((void) 0); |
623 | size_t entries = 0; |
624 | id = deduplicator->InsertObject(input); |
625 | StateValueList* nested = values->PushRecursiveField(zone, id); |
626 | int const input_count = input->op()->ValueInputCount(); |
627 | ZoneVector<MachineType> const* types = MachineTypesOf(input->op()); |
628 | for (int i = 0; i < input_count; ++i) { |
629 | entries += AddOperandToStateValueDescriptor( |
630 | nested, inputs, g, deduplicator, input->InputAt(i), types->at(i), |
631 | kind, zone); |
632 | } |
633 | return entries; |
634 | } else { |
635 | // Deoptimizer counts duplicate objects for the running id, so we have |
636 | // to push the input again. |
637 | deduplicator->InsertObject(input); |
638 | values->PushDuplicate(id); |
639 | return 0; |
640 | } |
641 | } |
642 | default: { |
643 | InstructionOperand op = |
644 | OperandForDeopt(isolate(), g, input, kind, type.representation()); |
645 | if (op.kind() == InstructionOperand::INVALID) { |
646 | // Invalid operand means the value is impossible or optimized-out. |
647 | values->PushOptimizedOut(); |
648 | return 0; |
649 | } else { |
650 | inputs->push_back(op); |
651 | values->PushPlain(type); |
652 | return 1; |
653 | } |
654 | } |
655 | } |
656 | } |
657 | |
658 | struct InstructionSelector::CachedStateValues : public ZoneObject { |
659 | public: |
660 | CachedStateValues(Zone* zone, StateValueList* values, size_t values_start, |
661 | InstructionOperandVector* inputs, size_t inputs_start) |
662 | : inputs_(inputs->begin() + inputs_start, inputs->end(), zone), |
663 | values_(values->MakeSlice(values_start)) {} |
664 | |
665 | size_t Emit(InstructionOperandVector* inputs, StateValueList* values) { |
666 | inputs->insert(inputs->end(), inputs_.begin(), inputs_.end()); |
667 | values->PushCachedSlice(values_); |
668 | return inputs_.size(); |
669 | } |
670 | |
671 | private: |
672 | InstructionOperandVector inputs_; |
673 | StateValueList::Slice values_; |
674 | }; |
675 | |
676 | class InstructionSelector::CachedStateValuesBuilder { |
677 | public: |
678 | explicit CachedStateValuesBuilder(StateValueList* values, |
679 | InstructionOperandVector* inputs, |
680 | StateObjectDeduplicator* deduplicator) |
681 | : values_(values), |
682 | inputs_(inputs), |
683 | deduplicator_(deduplicator), |
684 | values_start_(values->size()), |
685 | nested_start_(values->nested_count()), |
686 | inputs_start_(inputs->size()), |
687 | deduplicator_start_(deduplicator->size()) {} |
688 | |
689 | // We can only build a CachedStateValues for a StateValue if it didn't update |
690 | // any of the ids in the deduplicator. |
691 | bool CanCache() const { return deduplicator_->size() == deduplicator_start_; } |
692 | |
693 | InstructionSelector::CachedStateValues* Build(Zone* zone) { |
694 | DCHECK(CanCache())((void) 0); |
695 | DCHECK(values_->nested_count() == nested_start_)((void) 0); |
696 | return zone->New<InstructionSelector::CachedStateValues>( |
697 | zone, values_, values_start_, inputs_, inputs_start_); |
698 | } |
699 | |
700 | private: |
701 | StateValueList* values_; |
702 | InstructionOperandVector* inputs_; |
703 | StateObjectDeduplicator* deduplicator_; |
704 | size_t values_start_; |
705 | size_t nested_start_; |
706 | size_t inputs_start_; |
707 | size_t deduplicator_start_; |
708 | }; |
709 | |
710 | size_t InstructionSelector::AddInputsToFrameStateDescriptor( |
711 | StateValueList* values, InstructionOperandVector* inputs, |
712 | OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* node, |
713 | FrameStateInputKind kind, Zone* zone) { |
714 | // StateValues are often shared across different nodes, and processing them is |
715 | // expensive, so cache the result of processing a StateValue so that we can |
716 | // quickly copy the result if we see it again. |
717 | FrameStateInput key(node, kind); |
718 | auto cache_entry = state_values_cache_.find(key); |
719 | if (cache_entry != state_values_cache_.end()) { |
720 | // Entry found in cache, emit cached version. |
721 | return cache_entry->second->Emit(inputs, values); |
722 | } else { |
723 | // Not found in cache, generate and then store in cache if possible. |
724 | size_t entries = 0; |
725 | CachedStateValuesBuilder cache_builder(values, inputs, deduplicator); |
726 | StateValuesAccess::iterator it = StateValuesAccess(node).begin(); |
727 | // Take advantage of sparse nature of StateValuesAccess to skip over |
728 | // multiple empty nodes at once pushing repeated OptimizedOuts all in one |
729 | // go. |
730 | while (!it.done()) { |
731 | values->PushOptimizedOut(it.AdvanceTillNotEmpty()); |
732 | if (it.done()) break; |
733 | StateValuesAccess::TypedNode input_node = *it; |
734 | entries += AddOperandToStateValueDescriptor(values, inputs, g, |
735 | deduplicator, input_node.node, |
736 | input_node.type, kind, zone); |
737 | ++it; |
738 | } |
739 | if (cache_builder.CanCache()) { |
740 | // Use this->zone() to build the cache entry in the instruction selector's |
741 | // zone rather than the more long-lived instruction zone. |
742 | state_values_cache_.emplace(key, cache_builder.Build(this->zone())); |
743 | } |
744 | return entries; |
745 | } |
746 | } |
747 | |
748 | // Returns the number of instruction operands added to inputs. |
749 | size_t InstructionSelector::AddInputsToFrameStateDescriptor( |
750 | FrameStateDescriptor* descriptor, FrameState state, OperandGenerator* g, |
751 | StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs, |
752 | FrameStateInputKind kind, Zone* zone) { |
753 | size_t entries = 0; |
754 | size_t initial_size = inputs->size(); |
755 | USE(initial_size)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{initial_size }; (void)unused_tmp_array_for_use_macro; } while (false); // initial_size is only used for debug. |
756 | |
757 | if (descriptor->outer_state()) { |
758 | entries += AddInputsToFrameStateDescriptor( |
759 | descriptor->outer_state(), FrameState{state.outer_frame_state()}, g, |
760 | deduplicator, inputs, kind, zone); |
761 | } |
762 | |
763 | Node* parameters = state.parameters(); |
764 | Node* locals = state.locals(); |
765 | Node* stack = state.stack(); |
766 | Node* context = state.context(); |
767 | Node* function = state.function(); |
768 | |
769 | DCHECK_EQ(descriptor->parameters_count(),((void) 0) |
770 | StateValuesAccess(parameters).size())((void) 0); |
771 | DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size())((void) 0); |
772 | DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size())((void) 0); |
773 | |
774 | StateValueList* values_descriptor = descriptor->GetStateValueDescriptors(); |
775 | |
776 | DCHECK_EQ(values_descriptor->size(), 0u)((void) 0); |
777 | values_descriptor->ReserveSize(descriptor->GetSize()); |
778 | |
779 | DCHECK_NOT_NULL(function)((void) 0); |
780 | entries += AddOperandToStateValueDescriptor( |
781 | values_descriptor, inputs, g, deduplicator, function, |
782 | MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone); |
783 | |
784 | entries += AddInputsToFrameStateDescriptor( |
785 | values_descriptor, inputs, g, deduplicator, parameters, kind, zone); |
786 | |
787 | if (descriptor->HasContext()) { |
788 | DCHECK_NOT_NULL(context)((void) 0); |
789 | entries += AddOperandToStateValueDescriptor( |
790 | values_descriptor, inputs, g, deduplicator, context, |
791 | MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone); |
792 | } |
793 | |
794 | entries += AddInputsToFrameStateDescriptor(values_descriptor, inputs, g, |
795 | deduplicator, locals, kind, zone); |
796 | entries += AddInputsToFrameStateDescriptor(values_descriptor, inputs, g, |
797 | deduplicator, stack, kind, zone); |
798 | DCHECK_EQ(initial_size + entries, inputs->size())((void) 0); |
799 | return entries; |
800 | } |
801 | |
802 | Instruction* InstructionSelector::EmitWithContinuation( |
803 | InstructionCode opcode, InstructionOperand a, FlagsContinuation* cont) { |
804 | return EmitWithContinuation(opcode, 0, nullptr, 1, &a, cont); |
805 | } |
806 | |
807 | Instruction* InstructionSelector::EmitWithContinuation( |
808 | InstructionCode opcode, InstructionOperand a, InstructionOperand b, |
809 | FlagsContinuation* cont) { |
810 | InstructionOperand inputs[] = {a, b}; |
811 | return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs)(sizeof(ArraySizeHelper(inputs))), inputs, |
812 | cont); |
813 | } |
814 | |
815 | Instruction* InstructionSelector::EmitWithContinuation( |
816 | InstructionCode opcode, InstructionOperand a, InstructionOperand b, |
817 | InstructionOperand c, FlagsContinuation* cont) { |
818 | InstructionOperand inputs[] = {a, b, c}; |
819 | return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs)(sizeof(ArraySizeHelper(inputs))), inputs, |
820 | cont); |
821 | } |
822 | |
823 | Instruction* InstructionSelector::EmitWithContinuation( |
824 | InstructionCode opcode, size_t output_count, InstructionOperand* outputs, |
825 | size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) { |
826 | return EmitWithContinuation(opcode, output_count, outputs, input_count, |
827 | inputs, 0, nullptr, cont); |
828 | } |
829 | |
830 | Instruction* InstructionSelector::EmitWithContinuation( |
831 | InstructionCode opcode, size_t output_count, InstructionOperand* outputs, |
832 | size_t input_count, InstructionOperand* inputs, size_t temp_count, |
833 | InstructionOperand* temps, FlagsContinuation* cont) { |
834 | OperandGenerator g(this); |
835 | |
836 | opcode = cont->Encode(opcode); |
837 | |
838 | continuation_inputs_.resize(0); |
839 | for (size_t i = 0; i < input_count; i++) { |
840 | continuation_inputs_.push_back(inputs[i]); |
841 | } |
842 | |
843 | continuation_outputs_.resize(0); |
844 | for (size_t i = 0; i < output_count; i++) { |
845 | continuation_outputs_.push_back(outputs[i]); |
846 | } |
847 | |
848 | continuation_temps_.resize(0); |
849 | for (size_t i = 0; i < temp_count; i++) { |
850 | continuation_temps_.push_back(temps[i]); |
851 | } |
852 | |
853 | if (cont->IsBranch()) { |
854 | continuation_inputs_.push_back(g.Label(cont->true_block())); |
855 | continuation_inputs_.push_back(g.Label(cont->false_block())); |
856 | } else if (cont->IsDeoptimize()) { |
857 | int immediate_args_count = 0; |
858 | opcode |= DeoptImmedArgsCountField::encode(immediate_args_count) | |
859 | DeoptFrameStateOffsetField::encode(static_cast<int>(input_count)); |
860 | AppendDeoptimizeArguments(&continuation_inputs_, cont->reason(), |
861 | cont->node_id(), cont->feedback(), |
862 | FrameState{cont->frame_state()}); |
863 | } else if (cont->IsSet()) { |
864 | continuation_outputs_.push_back(g.DefineAsRegister(cont->result())); |
865 | } else if (cont->IsSelect()) { |
866 | // The {Select} should put one of two values into the output register, |
867 | // depending on the result of the condition. The two result values are in |
868 | // the last two input slots, the {false_value} in {input_count - 2}, and the |
869 | // true_value in {input_count - 1}. The other inputs are used for the |
870 | // condition. |
871 | AddOutputToSelectContinuation(&g, static_cast<int>(input_count) - 2, |
872 | cont->result()); |
873 | } else if (cont->IsTrap()) { |
874 | int trap_id = static_cast<int>(cont->trap_id()); |
875 | continuation_inputs_.push_back(g.UseImmediate(trap_id)); |
876 | } else { |
877 | DCHECK(cont->IsNone())((void) 0); |
878 | } |
879 | |
880 | size_t const emit_inputs_size = continuation_inputs_.size(); |
881 | auto* emit_inputs = |
882 | emit_inputs_size ? &continuation_inputs_.front() : nullptr; |
883 | size_t const emit_outputs_size = continuation_outputs_.size(); |
884 | auto* emit_outputs = |
885 | emit_outputs_size ? &continuation_outputs_.front() : nullptr; |
886 | size_t const emit_temps_size = continuation_temps_.size(); |
887 | auto* emit_temps = emit_temps_size ? &continuation_temps_.front() : nullptr; |
888 | return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size, |
889 | emit_inputs, emit_temps_size, emit_temps); |
890 | } |
891 | |
892 | void InstructionSelector::AppendDeoptimizeArguments( |
893 | InstructionOperandVector* args, DeoptimizeReason reason, NodeId node_id, |
894 | FeedbackSource const& feedback, FrameState frame_state) { |
895 | OperandGenerator g(this); |
896 | FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state); |
897 | int const state_id = sequence()->AddDeoptimizationEntry( |
898 | descriptor, DeoptimizeKind::kEager, reason, node_id, feedback); |
899 | args->push_back(g.TempImmediate(state_id)); |
900 | StateObjectDeduplicator deduplicator(instruction_zone()); |
901 | AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator, |
902 | args, FrameStateInputKind::kAny, |
903 | instruction_zone()); |
904 | } |
905 | |
906 | // An internal helper class for generating the operands to calls. |
907 | // TODO(bmeurer): Get rid of the CallBuffer business and make |
908 | // InstructionSelector::VisitCall platform independent instead. |
909 | struct CallBuffer { |
910 | CallBuffer(Zone* zone, const CallDescriptor* call_descriptor, |
911 | FrameStateDescriptor* frame_state) |
912 | : descriptor(call_descriptor), |
913 | frame_state_descriptor(frame_state), |
914 | output_nodes(zone), |
915 | outputs(zone), |
916 | instruction_args(zone), |
917 | pushed_nodes(zone) { |
918 | output_nodes.reserve(call_descriptor->ReturnCount()); |
919 | outputs.reserve(call_descriptor->ReturnCount()); |
920 | pushed_nodes.reserve(input_count()); |
921 | instruction_args.reserve(input_count() + frame_state_value_count()); |
922 | } |
923 | |
924 | const CallDescriptor* descriptor; |
925 | FrameStateDescriptor* frame_state_descriptor; |
926 | ZoneVector<PushParameter> output_nodes; |
927 | InstructionOperandVector outputs; |
928 | InstructionOperandVector instruction_args; |
929 | ZoneVector<PushParameter> pushed_nodes; |
930 | |
931 | size_t input_count() const { return descriptor->InputCount(); } |
932 | |
933 | size_t frame_state_count() const { return descriptor->FrameStateCount(); } |
934 | |
935 | size_t frame_state_value_count() const { |
936 | return (frame_state_descriptor == nullptr) |
937 | ? 0 |
938 | : (frame_state_descriptor->GetTotalSize() + |
939 | 1); // Include deopt id. |
940 | } |
941 | }; |
942 | |
943 | // TODO(bmeurer): Get rid of the CallBuffer business and make |
944 | // InstructionSelector::VisitCall platform independent instead. |
945 | void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, |
946 | CallBufferFlags flags, |
947 | int stack_param_delta) { |
948 | OperandGenerator g(this); |
949 | size_t ret_count = buffer->descriptor->ReturnCount(); |
950 | bool is_tail_call = (flags & kCallTail) != 0; |
951 | DCHECK_LE(call->op()->ValueOutputCount(), ret_count)((void) 0); |
952 | DCHECK_EQ(((void) 0) |
953 | call->op()->ValueInputCount(),((void) 0) |
954 | static_cast<int>(buffer->input_count() + buffer->frame_state_count()))((void) 0); |
955 | |
956 | if (ret_count > 0) { |
957 | // Collect the projections that represent multiple outputs from this call. |
958 | if (ret_count == 1) { |
959 | PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)}; |
960 | buffer->output_nodes.push_back(result); |
961 | } else { |
962 | buffer->output_nodes.resize(ret_count); |
963 | for (size_t i = 0; i < ret_count; ++i) { |
964 | LinkageLocation location = buffer->descriptor->GetReturnLocation(i); |
965 | buffer->output_nodes[i] = PushParameter(nullptr, location); |
966 | } |
967 | for (Edge const edge : call->use_edges()) { |
968 | if (!NodeProperties::IsValueEdge(edge)) continue; |
969 | Node* node = edge.from(); |
970 | DCHECK_EQ(IrOpcode::kProjection, node->opcode())((void) 0); |
971 | size_t const index = ProjectionIndexOf(node->op()); |
972 | |
973 | DCHECK_LT(index, buffer->output_nodes.size())((void) 0); |
974 | DCHECK(!buffer->output_nodes[index].node)((void) 0); |
975 | buffer->output_nodes[index].node = node; |
976 | } |
977 | |
978 | frame_->EnsureReturnSlots( |
979 | static_cast<int>(buffer->descriptor->ReturnSlotCount())); |
980 | } |
981 | |
982 | // Filter out the outputs that aren't live because no projection uses them. |
983 | size_t outputs_needed_by_framestate = |
984 | buffer->frame_state_descriptor == nullptr |
985 | ? 0 |
986 | : buffer->frame_state_descriptor->state_combine() |
987 | .ConsumedOutputCount(); |
988 | for (size_t i = 0; i < buffer->output_nodes.size(); i++) { |
989 | bool output_is_live = buffer->output_nodes[i].node != nullptr || |
990 | i < outputs_needed_by_framestate; |
991 | if (output_is_live) { |
992 | LinkageLocation location = buffer->output_nodes[i].location; |
993 | MachineRepresentation rep = location.GetType().representation(); |
994 | |
995 | Node* output = buffer->output_nodes[i].node; |
996 | InstructionOperand op = output == nullptr |
997 | ? g.TempLocation(location) |
998 | : g.DefineAsLocation(output, location); |
999 | MarkAsRepresentation(rep, op); |
1000 | |
1001 | if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) { |
1002 | buffer->outputs.push_back(op); |
1003 | buffer->output_nodes[i].node = nullptr; |
1004 | } |
1005 | } |
1006 | } |
1007 | } |
1008 | |
1009 | // The first argument is always the callee code. |
1010 | Node* callee = call->InputAt(0); |
1011 | bool call_code_immediate = (flags & kCallCodeImmediate) != 0; |
1012 | bool call_address_immediate = (flags & kCallAddressImmediate) != 0; |
1013 | bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0; |
1014 | switch (buffer->descriptor->kind()) { |
1015 | case CallDescriptor::kCallCodeObject: |
1016 | buffer->instruction_args.push_back( |
1017 | (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant) |
1018 | ? g.UseImmediate(callee) |
1019 | : call_use_fixed_target_reg |
1020 | ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister) |
1021 | : g.UseRegister(callee)); |
1022 | break; |
1023 | case CallDescriptor::kCallAddress: |
1024 | buffer->instruction_args.push_back( |
1025 | (call_address_immediate && |
1026 | callee->opcode() == IrOpcode::kExternalConstant) |
1027 | ? g.UseImmediate(callee) |
1028 | : call_use_fixed_target_reg |
1029 | ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister) |
1030 | : g.UseRegister(callee)); |
1031 | break; |
1032 | #if V8_ENABLE_WEBASSEMBLY1 |
1033 | case CallDescriptor::kCallWasmCapiFunction: |
1034 | case CallDescriptor::kCallWasmFunction: |
1035 | case CallDescriptor::kCallWasmImportWrapper: |
1036 | buffer->instruction_args.push_back( |
1037 | (call_address_immediate && |
1038 | (callee->opcode() == IrOpcode::kRelocatableInt64Constant || |
1039 | callee->opcode() == IrOpcode::kRelocatableInt32Constant)) |
1040 | ? g.UseImmediate(callee) |
1041 | : call_use_fixed_target_reg |
1042 | ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister) |
1043 | : g.UseRegister(callee)); |
1044 | break; |
1045 | #endif // V8_ENABLE_WEBASSEMBLY |
1046 | case CallDescriptor::kCallBuiltinPointer: |
1047 | // The common case for builtin pointers is to have the target in a |
1048 | // register. If we have a constant, we use a register anyway to simplify |
1049 | // related code. |
1050 | buffer->instruction_args.push_back( |
1051 | call_use_fixed_target_reg |
1052 | ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister) |
1053 | : g.UseRegister(callee)); |
1054 | break; |
1055 | case CallDescriptor::kCallJSFunction: |
1056 | buffer->instruction_args.push_back( |
1057 | g.UseLocation(callee, buffer->descriptor->GetInputLocation(0))); |
1058 | break; |
1059 | } |
1060 | DCHECK_EQ(1u, buffer->instruction_args.size())((void) 0); |
1061 | |
1062 | // If the call needs a frame state, we insert the state information as |
1063 | // follows (n is the number of value inputs to the frame state): |
1064 | // arg 1 : deoptimization id. |
1065 | // arg 2 - arg (n + 2) : value inputs to the frame state. |
1066 | size_t frame_state_entries = 0; |
1067 | USE(frame_state_entries)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{frame_state_entries }; (void)unused_tmp_array_for_use_macro; } while (false); // frame_state_entries is only used for debug. |
1068 | if (buffer->frame_state_descriptor != nullptr) { |
1069 | FrameState frame_state{ |
1070 | call->InputAt(static_cast<int>(buffer->descriptor->InputCount()))}; |
1071 | |
1072 | // If it was a syntactic tail call we need to drop the current frame and |
1073 | // all the frames on top of it that are either an arguments adaptor frame |
1074 | // or a tail caller frame. |
1075 | if (is_tail_call) { |
1076 | frame_state = FrameState{NodeProperties::GetFrameStateInput(frame_state)}; |
1077 | buffer->frame_state_descriptor = |
1078 | buffer->frame_state_descriptor->outer_state(); |
1079 | while (buffer->frame_state_descriptor != nullptr && |
1080 | buffer->frame_state_descriptor->type() == |
1081 | FrameStateType::kArgumentsAdaptor) { |
1082 | frame_state = |
1083 | FrameState{NodeProperties::GetFrameStateInput(frame_state)}; |
1084 | buffer->frame_state_descriptor = |
1085 | buffer->frame_state_descriptor->outer_state(); |
1086 | } |
1087 | } |
1088 | |
1089 | int const state_id = sequence()->AddDeoptimizationEntry( |
1090 | buffer->frame_state_descriptor, DeoptimizeKind::kLazy, |
1091 | DeoptimizeReason::kUnknown, call->id(), FeedbackSource()); |
1092 | buffer->instruction_args.push_back(g.TempImmediate(state_id)); |
1093 | |
1094 | StateObjectDeduplicator deduplicator(instruction_zone()); |
1095 | |
1096 | frame_state_entries = |
Value stored to 'frame_state_entries' is never read | |
1097 | 1 + AddInputsToFrameStateDescriptor( |
1098 | buffer->frame_state_descriptor, frame_state, &g, &deduplicator, |
1099 | &buffer->instruction_args, FrameStateInputKind::kStackSlot, |
1100 | instruction_zone()); |
1101 | |
1102 | DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size())((void) 0); |
1103 | } |
1104 | |
1105 | size_t input_count = static_cast<size_t>(buffer->input_count()); |
1106 | |
1107 | // Split the arguments into pushed_nodes and instruction_args. Pushed |
1108 | // arguments require an explicit push instruction before the call and do |
1109 | // not appear as arguments to the call. Everything else ends up |
1110 | // as an InstructionOperand argument to the call. |
1111 | auto iter(call->inputs().begin()); |
1112 | size_t pushed_count = 0; |
1113 | for (size_t index = 0; index < input_count; ++iter, ++index) { |
1114 | DCHECK(iter != call->inputs().end())((void) 0); |
1115 | DCHECK_NE(IrOpcode::kFrameState, (*iter)->op()->opcode())((void) 0); |
1116 | if (index == 0) continue; // The first argument (callee) is already done. |
1117 | |
1118 | LinkageLocation location = buffer->descriptor->GetInputLocation(index); |
1119 | if (is_tail_call) { |
1120 | location = LinkageLocation::ConvertToTailCallerLocation( |
1121 | location, stack_param_delta); |
1122 | } |
1123 | InstructionOperand op = g.UseLocation(*iter, location); |
1124 | UnallocatedOperand unallocated = UnallocatedOperand::cast(op); |
1125 | if (unallocated.HasFixedSlotPolicy() && !is_tail_call) { |
1126 | int stack_index = buffer->descriptor->GetStackIndexFromSlot( |
1127 | unallocated.fixed_slot_index()); |
1128 | // This can insert empty slots before stack_index and will insert enough |
1129 | // slots after stack_index to store the parameter. |
1130 | if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) { |
1131 | int num_slots = location.GetSizeInPointers(); |
1132 | buffer->pushed_nodes.resize(stack_index + num_slots); |
1133 | } |
1134 | PushParameter param = {*iter, location}; |
1135 | buffer->pushed_nodes[stack_index] = param; |
1136 | pushed_count++; |
1137 | } else { |
1138 | buffer->instruction_args.push_back(op); |
1139 | } |
1140 | } |
1141 | DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -((void) 0) |
1142 | frame_state_entries)((void) 0); |
1143 | USE(pushed_count)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{pushed_count }; (void)unused_tmp_array_for_use_macro; } while (false); |
1144 | if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACKtrue && is_tail_call && |
1145 | stack_param_delta != 0) { |
1146 | // For tail calls that change the size of their parameter list and keep |
1147 | // their return address on the stack, move the return address to just above |
1148 | // the parameters. |
1149 | LinkageLocation saved_return_location = |
1150 | LinkageLocation::ForSavedCallerReturnAddress(); |
1151 | InstructionOperand return_address = |
1152 | g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation( |
1153 | saved_return_location, stack_param_delta), |
1154 | saved_return_location); |
1155 | buffer->instruction_args.push_back(return_address); |
1156 | } |
1157 | } |
1158 | |
1159 | bool InstructionSelector::IsSourcePositionUsed(Node* node) { |
1160 | return (source_position_mode_ == kAllSourcePositions || |
1161 | node->opcode() == IrOpcode::kCall || |
1162 | node->opcode() == IrOpcode::kTrapIf || |
1163 | node->opcode() == IrOpcode::kTrapUnless || |
1164 | node->opcode() == IrOpcode::kProtectedLoad || |
1165 | node->opcode() == IrOpcode::kProtectedStore); |
1166 | } |
1167 | |
1168 | void InstructionSelector::VisitBlock(BasicBlock* block) { |
1169 | DCHECK(!current_block_)((void) 0); |
1170 | current_block_ = block; |
1171 | auto current_num_instructions = [&] { |
1172 | DCHECK_GE(kMaxInt, instructions_.size())((void) 0); |
1173 | return static_cast<int>(instructions_.size()); |
1174 | }; |
1175 | int current_block_end = current_num_instructions(); |
1176 | |
1177 | int effect_level = 0; |
1178 | for (Node* const node : *block) { |
1179 | SetEffectLevel(node, effect_level); |
1180 | current_effect_level_ = effect_level; |
1181 | if (node->opcode() == IrOpcode::kStore || |
1182 | node->opcode() == IrOpcode::kUnalignedStore || |
1183 | node->opcode() == IrOpcode::kCall || |
1184 | node->opcode() == IrOpcode::kProtectedStore || |
1185 | #define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \ |
1186 | node->opcode() == IrOpcode::k##Opcode || |
1187 | MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicLoad) ADD_EFFECT_FOR_ATOMIC_OP (Word32AtomicStore) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicExchange ) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicCompareExchange) ADD_EFFECT_FOR_ATOMIC_OP (Word32AtomicAdd) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicSub) ADD_EFFECT_FOR_ATOMIC_OP (Word32AtomicAnd) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicOr) ADD_EFFECT_FOR_ATOMIC_OP (Word32AtomicXor) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicPairLoad ) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicPairStore) ADD_EFFECT_FOR_ATOMIC_OP (Word32AtomicPairAdd) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicPairSub ) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicPairAnd) ADD_EFFECT_FOR_ATOMIC_OP (Word32AtomicPairOr) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicPairXor ) ADD_EFFECT_FOR_ATOMIC_OP(Word32AtomicPairExchange) ADD_EFFECT_FOR_ATOMIC_OP (Word32AtomicPairCompareExchange) ADD_EFFECT_FOR_ATOMIC_OP(Word64AtomicLoad ) ADD_EFFECT_FOR_ATOMIC_OP(Word64AtomicStore) ADD_EFFECT_FOR_ATOMIC_OP (Word64AtomicAdd) ADD_EFFECT_FOR_ATOMIC_OP(Word64AtomicSub) ADD_EFFECT_FOR_ATOMIC_OP (Word64AtomicAnd) ADD_EFFECT_FOR_ATOMIC_OP(Word64AtomicOr) ADD_EFFECT_FOR_ATOMIC_OP (Word64AtomicXor) ADD_EFFECT_FOR_ATOMIC_OP(Word64AtomicExchange ) ADD_EFFECT_FOR_ATOMIC_OP(Word64AtomicCompareExchange) |
1188 | #undef ADD_EFFECT_FOR_ATOMIC_OP |
1189 | node->opcode() == IrOpcode::kMemoryBarrier) { |
1190 | ++effect_level; |
1191 | } |
1192 | } |
1193 | |
1194 | // We visit the control first, then the nodes in the block, so the block's |
1195 | // control input should be on the same effect level as the last node. |
1196 | if (block->control_input() != nullptr) { |
1197 | SetEffectLevel(block->control_input(), effect_level); |
1198 | current_effect_level_ = effect_level; |
1199 | } |
1200 | |
1201 | auto FinishEmittedInstructions = [&](Node* node, int instruction_start) { |
1202 | if (instruction_selection_failed()) return false; |
1203 | if (current_num_instructions() == instruction_start) return true; |
1204 | std::reverse(instructions_.begin() + instruction_start, |
1205 | instructions_.end()); |
1206 | if (!node) return true; |
1207 | if (!source_positions_) return true; |
1208 | SourcePosition source_position = source_positions_->GetSourcePosition(node); |
1209 | if (source_position.IsKnown() && IsSourcePositionUsed(node)) { |
1210 | sequence()->SetSourcePosition(instructions_.back(), source_position); |
1211 | } |
1212 | return true; |
1213 | }; |
1214 | |
1215 | // Generate code for the block control "top down", but schedule the code |
1216 | // "bottom up". |
1217 | VisitControl(block); |
1218 | if (!FinishEmittedInstructions(block->control_input(), current_block_end)) { |
1219 | return; |
1220 | } |
1221 | |
1222 | // Visit code in reverse control flow order, because architecture-specific |
1223 | // matching may cover more than one node at a time. |
1224 | for (auto node : base::Reversed(*block)) { |
1225 | int current_node_end = current_num_instructions(); |
1226 | // Skip nodes that are unused or already defined. |
1227 | if (IsUsed(node) && !IsDefined(node)) { |
1228 | // Generate code for this node "top down", but schedule the code "bottom |
1229 | // up". |
1230 | VisitNode(node); |
1231 | if (!FinishEmittedInstructions(node, current_node_end)) return; |
1232 | } |
1233 | if (trace_turbo_ == kEnableTraceTurboJson) { |
1234 | instr_origins_[node->id()] = {current_num_instructions(), |
1235 | current_node_end}; |
1236 | } |
1237 | } |
1238 | |
1239 | // We're done with the block. |
1240 | InstructionBlock* instruction_block = |
1241 | sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number())); |
1242 | if (current_num_instructions() == current_block_end) { |
1243 | // Avoid empty block: insert a {kArchNop} instruction. |
1244 | Emit(Instruction::New(sequence()->zone(), kArchNop)); |
1245 | } |
1246 | instruction_block->set_code_start(current_num_instructions()); |
1247 | instruction_block->set_code_end(current_block_end); |
1248 | current_block_ = nullptr; |
1249 | } |
1250 | |
1251 | void InstructionSelector::VisitControl(BasicBlock* block) { |
1252 | #ifdef DEBUG |
1253 | // SSA deconstruction requires targets of branches not to have phis. |
1254 | // Edge split form guarantees this property, but is more strict. |
1255 | if (block->SuccessorCount() > 1) { |
1256 | for (BasicBlock* const successor : block->successors()) { |
1257 | for (Node* const node : *successor) { |
1258 | if (IrOpcode::IsPhiOpcode(node->opcode())) { |
1259 | std::ostringstream str; |
1260 | str << "You might have specified merged variables for a label with " |
1261 | << "only one predecessor." << std::endl |
1262 | << "# Current Block: " << *successor << std::endl |
1263 | << "# Node: " << *node; |
1264 | FATAL("%s", str.str().c_str())V8_Fatal("%s", str.str().c_str()); |
1265 | } |
1266 | } |
1267 | } |
1268 | } |
1269 | #endif |
1270 | |
1271 | Node* input = block->control_input(); |
1272 | int instruction_end = static_cast<int>(instructions_.size()); |
1273 | switch (block->control()) { |
1274 | case BasicBlock::kGoto: |
1275 | VisitGoto(block->SuccessorAt(0)); |
1276 | break; |
1277 | case BasicBlock::kCall: { |
1278 | DCHECK_EQ(IrOpcode::kCall, input->opcode())((void) 0); |
1279 | BasicBlock* success = block->SuccessorAt(0); |
1280 | BasicBlock* exception = block->SuccessorAt(1); |
1281 | VisitCall(input, exception); |
1282 | VisitGoto(success); |
1283 | break; |
1284 | } |
1285 | case BasicBlock::kTailCall: { |
1286 | DCHECK_EQ(IrOpcode::kTailCall, input->opcode())((void) 0); |
1287 | VisitTailCall(input); |
1288 | break; |
1289 | } |
1290 | case BasicBlock::kBranch: { |
1291 | DCHECK_EQ(IrOpcode::kBranch, input->opcode())((void) 0); |
1292 | BasicBlock* tbranch = block->SuccessorAt(0); |
1293 | BasicBlock* fbranch = block->SuccessorAt(1); |
1294 | if (tbranch == fbranch) { |
1295 | VisitGoto(tbranch); |
1296 | } else { |
1297 | VisitBranch(input, tbranch, fbranch); |
1298 | } |
1299 | break; |
1300 | } |
1301 | case BasicBlock::kSwitch: { |
1302 | DCHECK_EQ(IrOpcode::kSwitch, input->opcode())((void) 0); |
1303 | // Last successor must be {IfDefault}. |
1304 | BasicBlock* default_branch = block->successors().back(); |
1305 | DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode())((void) 0); |
1306 | // All other successors must be {IfValue}s. |
1307 | int32_t min_value = std::numeric_limits<int32_t>::max(); |
1308 | int32_t max_value = std::numeric_limits<int32_t>::min(); |
1309 | size_t case_count = block->SuccessorCount() - 1; |
1310 | ZoneVector<CaseInfo> cases(case_count, zone()); |
1311 | for (size_t i = 0; i < case_count; ++i) { |
1312 | BasicBlock* branch = block->SuccessorAt(i); |
1313 | const IfValueParameters& p = IfValueParametersOf(branch->front()->op()); |
1314 | cases[i] = CaseInfo{p.value(), p.comparison_order(), branch}; |
1315 | if (min_value > p.value()) min_value = p.value(); |
1316 | if (max_value < p.value()) max_value = p.value(); |
1317 | } |
1318 | SwitchInfo sw(cases, min_value, max_value, default_branch); |
1319 | VisitSwitch(input, sw); |
1320 | break; |
1321 | } |
1322 | case BasicBlock::kReturn: { |
1323 | DCHECK_EQ(IrOpcode::kReturn, input->opcode())((void) 0); |
1324 | VisitReturn(input); |
1325 | break; |
1326 | } |
1327 | case BasicBlock::kDeoptimize: { |
1328 | DeoptimizeParameters p = DeoptimizeParametersOf(input->op()); |
1329 | FrameState value{input->InputAt(0)}; |
1330 | VisitDeoptimize(p.reason(), input->id(), p.feedback(), value); |
1331 | break; |
1332 | } |
1333 | case BasicBlock::kThrow: |
1334 | DCHECK_EQ(IrOpcode::kThrow, input->opcode())((void) 0); |
1335 | VisitThrow(input); |
1336 | break; |
1337 | case BasicBlock::kNone: { |
1338 | // Exit block doesn't have control. |
1339 | DCHECK_NULL(input)((void) 0); |
1340 | break; |
1341 | } |
1342 | default: |
1343 | UNREACHABLE()V8_Fatal("unreachable code"); |
1344 | } |
1345 | if (trace_turbo_ == kEnableTraceTurboJson && input) { |
1346 | int instruction_start = static_cast<int>(instructions_.size()); |
1347 | instr_origins_[input->id()] = {instruction_start, instruction_end}; |
1348 | } |
1349 | } |
1350 | |
1351 | void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) { |
1352 | Node* projection0 = NodeProperties::FindProjection(node, 0); |
1353 | if (projection0) { |
1354 | MarkAsWord32(projection0); |
1355 | } |
1356 | Node* projection1 = NodeProperties::FindProjection(node, 1); |
1357 | if (projection1) { |
1358 | MarkAsWord32(projection1); |
1359 | } |
1360 | } |
1361 | |
1362 | void InstructionSelector::VisitNode(Node* node) { |
1363 | tick_counter_->TickAndMaybeEnterSafepoint(); |
1364 | DCHECK_NOT_NULL(schedule()->block(node))((void) 0); // should only use scheduled nodes. |
1365 | switch (node->opcode()) { |
1366 | case IrOpcode::kStart: |
1367 | case IrOpcode::kLoop: |
1368 | case IrOpcode::kEnd: |
1369 | case IrOpcode::kBranch: |
1370 | case IrOpcode::kIfTrue: |
1371 | case IrOpcode::kIfFalse: |
1372 | case IrOpcode::kIfSuccess: |
1373 | case IrOpcode::kSwitch: |
1374 | case IrOpcode::kIfValue: |
1375 | case IrOpcode::kIfDefault: |
1376 | case IrOpcode::kEffectPhi: |
1377 | case IrOpcode::kMerge: |
1378 | case IrOpcode::kTerminate: |
1379 | case IrOpcode::kBeginRegion: |
1380 | // No code needed for these graph artifacts. |
1381 | return; |
1382 | case IrOpcode::kIfException: |
1383 | return MarkAsTagged(node), VisitIfException(node); |
1384 | case IrOpcode::kFinishRegion: |
1385 | return MarkAsTagged(node), VisitFinishRegion(node); |
1386 | case IrOpcode::kParameter: { |
1387 | // Parameters should always be scheduled to the first block. |
1388 | DCHECK_EQ(schedule()->block(node)->rpo_number(), 0)((void) 0); |
1389 | MachineType type = |
1390 | linkage()->GetParameterType(ParameterIndexOf(node->op())); |
1391 | MarkAsRepresentation(type.representation(), node); |
1392 | return VisitParameter(node); |
1393 | } |
1394 | case IrOpcode::kOsrValue: |
1395 | return MarkAsTagged(node), VisitOsrValue(node); |
1396 | case IrOpcode::kPhi: { |
1397 | MachineRepresentation rep = PhiRepresentationOf(node->op()); |
1398 | if (rep == MachineRepresentation::kNone) return; |
1399 | MarkAsRepresentation(rep, node); |
1400 | return VisitPhi(node); |
1401 | } |
1402 | case IrOpcode::kProjection: |
1403 | return VisitProjection(node); |
1404 | case IrOpcode::kInt32Constant: |
1405 | case IrOpcode::kInt64Constant: |
1406 | case IrOpcode::kTaggedIndexConstant: |
1407 | case IrOpcode::kExternalConstant: |
1408 | case IrOpcode::kRelocatableInt32Constant: |
1409 | case IrOpcode::kRelocatableInt64Constant: |
1410 | return VisitConstant(node); |
1411 | case IrOpcode::kFloat32Constant: |
1412 | return MarkAsFloat32(node), VisitConstant(node); |
1413 | case IrOpcode::kFloat64Constant: |
1414 | return MarkAsFloat64(node), VisitConstant(node); |
1415 | case IrOpcode::kHeapConstant: |
1416 | return MarkAsTagged(node), VisitConstant(node); |
1417 | case IrOpcode::kCompressedHeapConstant: |
1418 | return MarkAsCompressed(node), VisitConstant(node); |
1419 | case IrOpcode::kNumberConstant: { |
1420 | double value = OpParameter<double>(node->op()); |
1421 | if (!IsSmiDouble(value)) MarkAsTagged(node); |
1422 | return VisitConstant(node); |
1423 | } |
1424 | case IrOpcode::kDelayedStringConstant: |
1425 | return MarkAsTagged(node), VisitConstant(node); |
1426 | case IrOpcode::kCall: |
1427 | return VisitCall(node); |
1428 | case IrOpcode::kDeoptimizeIf: |
1429 | return VisitDeoptimizeIf(node); |
1430 | case IrOpcode::kDeoptimizeUnless: |
1431 | return VisitDeoptimizeUnless(node); |
1432 | case IrOpcode::kTrapIf: |
1433 | return VisitTrapIf(node, TrapIdOf(node->op())); |
1434 | case IrOpcode::kTrapUnless: |
1435 | return VisitTrapUnless(node, TrapIdOf(node->op())); |
1436 | case IrOpcode::kFrameState: |
1437 | case IrOpcode::kStateValues: |
1438 | case IrOpcode::kObjectState: |
1439 | return; |
1440 | case IrOpcode::kAbortCSADcheck: |
1441 | VisitAbortCSADcheck(node); |
1442 | return; |
1443 | case IrOpcode::kDebugBreak: |
1444 | VisitDebugBreak(node); |
1445 | return; |
1446 | case IrOpcode::kUnreachable: |
1447 | VisitUnreachable(node); |
1448 | return; |
1449 | case IrOpcode::kStaticAssert: |
1450 | VisitStaticAssert(node); |
1451 | return; |
1452 | case IrOpcode::kDeadValue: |
1453 | VisitDeadValue(node); |
1454 | return; |
1455 | case IrOpcode::kComment: |
1456 | VisitComment(node); |
1457 | return; |
1458 | case IrOpcode::kRetain: |
1459 | VisitRetain(node); |
1460 | return; |
1461 | case IrOpcode::kLoad: |
1462 | case IrOpcode::kLoadImmutable: { |
1463 | LoadRepresentation type = LoadRepresentationOf(node->op()); |
1464 | MarkAsRepresentation(type.representation(), node); |
1465 | return VisitLoad(node); |
1466 | } |
1467 | case IrOpcode::kLoadTransform: { |
1468 | MarkAsRepresentation(MachineRepresentation::kSimd128, node); |
1469 | return VisitLoadTransform(node); |
1470 | } |
1471 | case IrOpcode::kLoadLane: { |
1472 | MarkAsRepresentation(MachineRepresentation::kSimd128, node); |
1473 | return VisitLoadLane(node); |
1474 | } |
1475 | case IrOpcode::kStore: |
1476 | return VisitStore(node); |
1477 | case IrOpcode::kProtectedStore: |
1478 | return VisitProtectedStore(node); |
1479 | case IrOpcode::kStoreLane: { |
1480 | MarkAsRepresentation(MachineRepresentation::kSimd128, node); |
1481 | return VisitStoreLane(node); |
1482 | } |
1483 | case IrOpcode::kWord32And: |
1484 | return MarkAsWord32(node), VisitWord32And(node); |
1485 | case IrOpcode::kWord32Or: |
1486 | return MarkAsWord32(node), VisitWord32Or(node); |
1487 | case IrOpcode::kWord32Xor: |
1488 | return MarkAsWord32(node), VisitWord32Xor(node); |
1489 | case IrOpcode::kWord32Shl: |
1490 | return MarkAsWord32(node), VisitWord32Shl(node); |
1491 | case IrOpcode::kWord32Shr: |
1492 | return MarkAsWord32(node), VisitWord32Shr(node); |
1493 | case IrOpcode::kWord32Sar: |
1494 | return MarkAsWord32(node), VisitWord32Sar(node); |
1495 | case IrOpcode::kWord32Rol: |
1496 | return MarkAsWord32(node), VisitWord32Rol(node); |
1497 | case IrOpcode::kWord32Ror: |
1498 | return MarkAsWord32(node), VisitWord32Ror(node); |
1499 | case IrOpcode::kWord32Equal: |
1500 | return VisitWord32Equal(node); |
1501 | case IrOpcode::kWord32Clz: |
1502 | return MarkAsWord32(node), VisitWord32Clz(node); |
1503 | case IrOpcode::kWord32Ctz: |
1504 | return MarkAsWord32(node), VisitWord32Ctz(node); |
1505 | case IrOpcode::kWord32ReverseBits: |
1506 | return MarkAsWord32(node), VisitWord32ReverseBits(node); |
1507 | case IrOpcode::kWord32ReverseBytes: |
1508 | return MarkAsWord32(node), VisitWord32ReverseBytes(node); |
1509 | case IrOpcode::kInt32AbsWithOverflow: |
1510 | return MarkAsWord32(node), VisitInt32AbsWithOverflow(node); |
1511 | case IrOpcode::kWord32Popcnt: |
1512 | return MarkAsWord32(node), VisitWord32Popcnt(node); |
1513 | case IrOpcode::kWord64Popcnt: |
1514 | return MarkAsWord32(node), VisitWord64Popcnt(node); |
1515 | case IrOpcode::kWord32Select: |
1516 | return MarkAsWord32(node), VisitSelect(node); |
1517 | case IrOpcode::kWord64And: |
1518 | return MarkAsWord64(node), VisitWord64And(node); |
1519 | case IrOpcode::kWord64Or: |
1520 | return MarkAsWord64(node), VisitWord64Or(node); |
1521 | case IrOpcode::kWord64Xor: |
1522 | return MarkAsWord64(node), VisitWord64Xor(node); |
1523 | case IrOpcode::kWord64Shl: |
1524 | return MarkAsWord64(node), VisitWord64Shl(node); |
1525 | case IrOpcode::kWord64Shr: |
1526 | return MarkAsWord64(node), VisitWord64Shr(node); |
1527 | case IrOpcode::kWord64Sar: |
1528 | return MarkAsWord64(node), VisitWord64Sar(node); |
1529 | case IrOpcode::kWord64Rol: |
1530 | return MarkAsWord64(node), VisitWord64Rol(node); |
1531 | case IrOpcode::kWord64Ror: |
1532 | return MarkAsWord64(node), VisitWord64Ror(node); |
1533 | case IrOpcode::kWord64Clz: |
1534 | return MarkAsWord64(node), VisitWord64Clz(node); |
1535 | case IrOpcode::kWord64Ctz: |
1536 | return MarkAsWord64(node), VisitWord64Ctz(node); |
1537 | case IrOpcode::kWord64ReverseBits: |
1538 | return MarkAsWord64(node), VisitWord64ReverseBits(node); |
1539 | case IrOpcode::kWord64ReverseBytes: |
1540 | return MarkAsWord64(node), VisitWord64ReverseBytes(node); |
1541 | case IrOpcode::kSimd128ReverseBytes: |
1542 | return MarkAsSimd128(node), VisitSimd128ReverseBytes(node); |
1543 | case IrOpcode::kInt64AbsWithOverflow: |
1544 | return MarkAsWord64(node), VisitInt64AbsWithOverflow(node); |
1545 | case IrOpcode::kWord64Equal: |
1546 | return VisitWord64Equal(node); |
1547 | case IrOpcode::kWord64Select: |
1548 | return MarkAsWord64(node), VisitSelect(node); |
1549 | case IrOpcode::kInt32Add: |
1550 | return MarkAsWord32(node), VisitInt32Add(node); |
1551 | case IrOpcode::kInt32AddWithOverflow: |
1552 | return MarkAsWord32(node), VisitInt32AddWithOverflow(node); |
1553 | case IrOpcode::kInt32Sub: |
1554 | return MarkAsWord32(node), VisitInt32Sub(node); |
1555 | case IrOpcode::kInt32SubWithOverflow: |
1556 | return VisitInt32SubWithOverflow(node); |
1557 | case IrOpcode::kInt32Mul: |
1558 | return MarkAsWord32(node), VisitInt32Mul(node); |
1559 | case IrOpcode::kInt32MulWithOverflow: |
1560 | return MarkAsWord32(node), VisitInt32MulWithOverflow(node); |
1561 | case IrOpcode::kInt32MulHigh: |
1562 | return VisitInt32MulHigh(node); |
1563 | case IrOpcode::kInt32Div: |
1564 | return MarkAsWord32(node), VisitInt32Div(node); |
1565 | case IrOpcode::kInt32Mod: |
1566 | return MarkAsWord32(node), VisitInt32Mod(node); |
1567 | case IrOpcode::kInt32LessThan: |
1568 | return VisitInt32LessThan(node); |
1569 | case IrOpcode::kInt32LessThanOrEqual: |
1570 | return VisitInt32LessThanOrEqual(node); |
1571 | case IrOpcode::kUint32Div: |
1572 | return MarkAsWord32(node), VisitUint32Div(node); |
1573 | case IrOpcode::kUint32LessThan: |
1574 | return VisitUint32LessThan(node); |
1575 | case IrOpcode::kUint32LessThanOrEqual: |
1576 | return VisitUint32LessThanOrEqual(node); |
1577 | case IrOpcode::kUint32Mod: |
1578 | return MarkAsWord32(node), VisitUint32Mod(node); |
1579 | case IrOpcode::kUint32MulHigh: |
1580 | return VisitUint32MulHigh(node); |
1581 | case IrOpcode::kInt64Add: |
1582 | return MarkAsWord64(node), VisitInt64Add(node); |
1583 | case IrOpcode::kInt64AddWithOverflow: |
1584 | return MarkAsWord64(node), VisitInt64AddWithOverflow(node); |
1585 | case IrOpcode::kInt64Sub: |
1586 | return MarkAsWord64(node), VisitInt64Sub(node); |
1587 | case IrOpcode::kInt64SubWithOverflow: |
1588 | return MarkAsWord64(node), VisitInt64SubWithOverflow(node); |
1589 | case IrOpcode::kInt64Mul: |
1590 | return MarkAsWord64(node), VisitInt64Mul(node); |
1591 | case IrOpcode::kInt64Div: |
1592 | return MarkAsWord64(node), VisitInt64Div(node); |
1593 | case IrOpcode::kInt64Mod: |
1594 | return MarkAsWord64(node), VisitInt64Mod(node); |
1595 | case IrOpcode::kInt64LessThan: |
1596 | return VisitInt64LessThan(node); |
1597 | case IrOpcode::kInt64LessThanOrEqual: |
1598 | return VisitInt64LessThanOrEqual(node); |
1599 | case IrOpcode::kUint64Div: |
1600 | return MarkAsWord64(node), VisitUint64Div(node); |
1601 | case IrOpcode::kUint64LessThan: |
1602 | return VisitUint64LessThan(node); |
1603 | case IrOpcode::kUint64LessThanOrEqual: |
1604 | return VisitUint64LessThanOrEqual(node); |
1605 | case IrOpcode::kUint64Mod: |
1606 | return MarkAsWord64(node), VisitUint64Mod(node); |
1607 | case IrOpcode::kBitcastTaggedToWord: |
1608 | case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: |
1609 | return MarkAsRepresentation(MachineType::PointerRepresentation(), node), |
1610 | VisitBitcastTaggedToWord(node); |
1611 | case IrOpcode::kBitcastWordToTagged: |
1612 | return MarkAsTagged(node), VisitBitcastWordToTagged(node); |
1613 | case IrOpcode::kBitcastWordToTaggedSigned: |
1614 | return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node), |
1615 | EmitIdentity(node); |
1616 | case IrOpcode::kChangeFloat32ToFloat64: |
1617 | return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node); |
1618 | case IrOpcode::kChangeInt32ToFloat64: |
1619 | return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node); |
1620 | case IrOpcode::kChangeInt64ToFloat64: |
1621 | return MarkAsFloat64(node), VisitChangeInt64ToFloat64(node); |
1622 | case IrOpcode::kChangeUint32ToFloat64: |
1623 | return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node); |
1624 | case IrOpcode::kChangeFloat64ToInt32: |
1625 | return MarkAsWord32(node), VisitChangeFloat64ToInt32(node); |
1626 | case IrOpcode::kChangeFloat64ToInt64: |
1627 | return MarkAsWord64(node), VisitChangeFloat64ToInt64(node); |
1628 | case IrOpcode::kChangeFloat64ToUint32: |
1629 | return MarkAsWord32(node), VisitChangeFloat64ToUint32(node); |
1630 | case IrOpcode::kChangeFloat64ToUint64: |
1631 | return MarkAsWord64(node), VisitChangeFloat64ToUint64(node); |
1632 | case IrOpcode::kFloat64SilenceNaN: |
1633 | MarkAsFloat64(node); |
1634 | if (CanProduceSignalingNaN(node->InputAt(0))) { |
1635 | return VisitFloat64SilenceNaN(node); |
1636 | } else { |
1637 | return EmitIdentity(node); |
1638 | } |
1639 | case IrOpcode::kTruncateFloat64ToInt64: |
1640 | return MarkAsWord64(node), VisitTruncateFloat64ToInt64(node); |
1641 | case IrOpcode::kTruncateFloat64ToUint32: |
1642 | return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node); |
1643 | case IrOpcode::kTruncateFloat32ToInt32: |
1644 | return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node); |
1645 | case IrOpcode::kTruncateFloat32ToUint32: |
1646 | return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node); |
1647 | case IrOpcode::kTryTruncateFloat32ToInt64: |
1648 | return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node); |
1649 | case IrOpcode::kTryTruncateFloat64ToInt64: |
1650 | return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node); |
1651 | case IrOpcode::kTryTruncateFloat32ToUint64: |
1652 | return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node); |
1653 | case IrOpcode::kTryTruncateFloat64ToUint64: |
1654 | return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node); |
1655 | case IrOpcode::kBitcastWord32ToWord64: |
1656 | return MarkAsWord64(node), VisitBitcastWord32ToWord64(node); |
1657 | case IrOpcode::kChangeInt32ToInt64: |
1658 | return MarkAsWord64(node), VisitChangeInt32ToInt64(node); |
1659 | case IrOpcode::kChangeUint32ToUint64: |
1660 | return MarkAsWord64(node), VisitChangeUint32ToUint64(node); |
1661 | case IrOpcode::kTruncateFloat64ToFloat32: |
1662 | return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node); |
1663 | case IrOpcode::kTruncateFloat64ToWord32: |
1664 | return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node); |
1665 | case IrOpcode::kTruncateInt64ToInt32: |
1666 | return MarkAsWord32(node), VisitTruncateInt64ToInt32(node); |
1667 | case IrOpcode::kRoundFloat64ToInt32: |
1668 | return MarkAsWord32(node), VisitRoundFloat64ToInt32(node); |
1669 | case IrOpcode::kRoundInt64ToFloat32: |
1670 | return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node); |
1671 | case IrOpcode::kRoundInt32ToFloat32: |
1672 | return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node); |
1673 | case IrOpcode::kRoundInt64ToFloat64: |
1674 | return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node); |
1675 | case IrOpcode::kBitcastFloat32ToInt32: |
1676 | return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node); |
1677 | case IrOpcode::kRoundUint32ToFloat32: |
1678 | return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node); |
1679 | case IrOpcode::kRoundUint64ToFloat32: |
1680 | return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node); |
1681 | case IrOpcode::kRoundUint64ToFloat64: |
1682 | return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node); |
1683 | case IrOpcode::kBitcastFloat64ToInt64: |
1684 | return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node); |
1685 | case IrOpcode::kBitcastInt32ToFloat32: |
1686 | return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node); |
1687 | case IrOpcode::kBitcastInt64ToFloat64: |
1688 | return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node); |
1689 | case IrOpcode::kFloat32Add: |
1690 | return MarkAsFloat32(node), VisitFloat32Add(node); |
1691 | case IrOpcode::kFloat32Sub: |
1692 | return MarkAsFloat32(node), VisitFloat32Sub(node); |
1693 | case IrOpcode::kFloat32Neg: |
1694 | return MarkAsFloat32(node), VisitFloat32Neg(node); |
1695 | case IrOpcode::kFloat32Mul: |
1696 | return MarkAsFloat32(node), VisitFloat32Mul(node); |
1697 | case IrOpcode::kFloat32Div: |
1698 | return MarkAsFloat32(node), VisitFloat32Div(node); |
1699 | case IrOpcode::kFloat32Abs: |
1700 | return MarkAsFloat32(node), VisitFloat32Abs(node); |
1701 | case IrOpcode::kFloat32Sqrt: |
1702 | return MarkAsFloat32(node), VisitFloat32Sqrt(node); |
1703 | case IrOpcode::kFloat32Equal: |
1704 | return VisitFloat32Equal(node); |
1705 | case IrOpcode::kFloat32LessThan: |
1706 | return VisitFloat32LessThan(node); |
1707 | case IrOpcode::kFloat32LessThanOrEqual: |
1708 | return VisitFloat32LessThanOrEqual(node); |
1709 | case IrOpcode::kFloat32Max: |
1710 | return MarkAsFloat32(node), VisitFloat32Max(node); |
1711 | case IrOpcode::kFloat32Min: |
1712 | return MarkAsFloat32(node), VisitFloat32Min(node); |
1713 | case IrOpcode::kFloat32Select: |
1714 | return MarkAsFloat32(node), VisitSelect(node); |
1715 | case IrOpcode::kFloat64Add: |
1716 | return MarkAsFloat64(node), VisitFloat64Add(node); |
1717 | case IrOpcode::kFloat64Sub: |
1718 | return MarkAsFloat64(node), VisitFloat64Sub(node); |
1719 | case IrOpcode::kFloat64Neg: |
1720 | return MarkAsFloat64(node), VisitFloat64Neg(node); |
1721 | case IrOpcode::kFloat64Mul: |
1722 | return MarkAsFloat64(node), VisitFloat64Mul(node); |
1723 | case IrOpcode::kFloat64Div: |
1724 | return MarkAsFloat64(node), VisitFloat64Div(node); |
1725 | case IrOpcode::kFloat64Mod: |
1726 | return MarkAsFloat64(node), VisitFloat64Mod(node); |
1727 | case IrOpcode::kFloat64Min: |
1728 | return MarkAsFloat64(node), VisitFloat64Min(node); |
1729 | case IrOpcode::kFloat64Max: |
1730 | return MarkAsFloat64(node), VisitFloat64Max(node); |
1731 | case IrOpcode::kFloat64Abs: |
1732 | return MarkAsFloat64(node), VisitFloat64Abs(node); |
1733 | case IrOpcode::kFloat64Acos: |
1734 | return MarkAsFloat64(node), VisitFloat64Acos(node); |
1735 | case IrOpcode::kFloat64Acosh: |
1736 | return MarkAsFloat64(node), VisitFloat64Acosh(node); |
1737 | case IrOpcode::kFloat64Asin: |
1738 | return MarkAsFloat64(node), VisitFloat64Asin(node); |
1739 | case IrOpcode::kFloat64Asinh: |
1740 | return MarkAsFloat64(node), VisitFloat64Asinh(node); |
1741 | case IrOpcode::kFloat64Atan: |
1742 | return MarkAsFloat64(node), VisitFloat64Atan(node); |
1743 | case IrOpcode::kFloat64Atanh: |
1744 | return MarkAsFloat64(node), VisitFloat64Atanh(node); |
1745 | case IrOpcode::kFloat64Atan2: |
1746 | return MarkAsFloat64(node), VisitFloat64Atan2(node); |
1747 | case IrOpcode::kFloat64Cbrt: |
1748 | return MarkAsFloat64(node), VisitFloat64Cbrt(node); |
1749 | case IrOpcode::kFloat64Cos: |
1750 | return MarkAsFloat64(node), VisitFloat64Cos(node); |
1751 | case IrOpcode::kFloat64Cosh: |
1752 | return MarkAsFloat64(node), VisitFloat64Cosh(node); |
1753 | case IrOpcode::kFloat64Exp: |
1754 | return MarkAsFloat64(node), VisitFloat64Exp(node); |
1755 | case IrOpcode::kFloat64Expm1: |
1756 | return MarkAsFloat64(node), VisitFloat64Expm1(node); |
1757 | case IrOpcode::kFloat64Log: |
1758 | return MarkAsFloat64(node), VisitFloat64Log(node); |
1759 | case IrOpcode::kFloat64Log1p: |
1760 | return MarkAsFloat64(node), VisitFloat64Log1p(node); |
1761 | case IrOpcode::kFloat64Log10: |
1762 | return MarkAsFloat64(node), VisitFloat64Log10(node); |
1763 | case IrOpcode::kFloat64Log2: |
1764 | return MarkAsFloat64(node), VisitFloat64Log2(node); |
1765 | case IrOpcode::kFloat64Pow: |
1766 | return MarkAsFloat64(node), VisitFloat64Pow(node); |
1767 | case IrOpcode::kFloat64Sin: |
1768 | return MarkAsFloat64(node), VisitFloat64Sin(node); |
1769 | case IrOpcode::kFloat64Sinh: |
1770 | return MarkAsFloat64(node), VisitFloat64Sinh(node); |
1771 | case IrOpcode::kFloat64Sqrt: |
1772 | return MarkAsFloat64(node), VisitFloat64Sqrt(node); |
1773 | case IrOpcode::kFloat64Tan: |
1774 | return MarkAsFloat64(node), VisitFloat64Tan(node); |
1775 | case IrOpcode::kFloat64Tanh: |
1776 | return MarkAsFloat64(node), VisitFloat64Tanh(node); |
1777 | case IrOpcode::kFloat64Equal: |
1778 | return VisitFloat64Equal(node); |
1779 | case IrOpcode::kFloat64LessThan: |
1780 | return VisitFloat64LessThan(node); |
1781 | case IrOpcode::kFloat64LessThanOrEqual: |
1782 | return VisitFloat64LessThanOrEqual(node); |
1783 | case IrOpcode::kFloat64Select: |
1784 | return MarkAsFloat64(node), VisitSelect(node); |
1785 | case IrOpcode::kFloat32RoundDown: |
1786 | return MarkAsFloat32(node), VisitFloat32RoundDown(node); |
1787 | case IrOpcode::kFloat64RoundDown: |
1788 | return MarkAsFloat64(node), VisitFloat64RoundDown(node); |
1789 | case IrOpcode::kFloat32RoundUp: |
1790 | return MarkAsFloat32(node), VisitFloat32RoundUp(node); |
1791 | case IrOpcode::kFloat64RoundUp: |
1792 | return MarkAsFloat64(node), VisitFloat64RoundUp(node); |
1793 | case IrOpcode::kFloat32RoundTruncate: |
1794 | return MarkAsFloat32(node), VisitFloat32RoundTruncate(node); |
1795 | case IrOpcode::kFloat64RoundTruncate: |
1796 | return MarkAsFloat64(node), VisitFloat64RoundTruncate(node); |
1797 | case IrOpcode::kFloat64RoundTiesAway: |
1798 | return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node); |
1799 | case IrOpcode::kFloat32RoundTiesEven: |
1800 | return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node); |
1801 | case IrOpcode::kFloat64RoundTiesEven: |
1802 | return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node); |
1803 | case IrOpcode::kFloat64ExtractLowWord32: |
1804 | return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node); |
1805 | case IrOpcode::kFloat64ExtractHighWord32: |
1806 | return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node); |
1807 | case IrOpcode::kFloat64InsertLowWord32: |
1808 | return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node); |
1809 | case IrOpcode::kFloat64InsertHighWord32: |
1810 | return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node); |
1811 | case IrOpcode::kStackSlot: |
1812 | return VisitStackSlot(node); |
1813 | case IrOpcode::kStackPointerGreaterThan: |
1814 | return VisitStackPointerGreaterThan(node); |
1815 | case IrOpcode::kLoadStackCheckOffset: |
1816 | return VisitLoadStackCheckOffset(node); |
1817 | case IrOpcode::kLoadFramePointer: |
1818 | return VisitLoadFramePointer(node); |
1819 | case IrOpcode::kLoadParentFramePointer: |
1820 | return VisitLoadParentFramePointer(node); |
1821 | case IrOpcode::kUnalignedLoad: { |
1822 | LoadRepresentation type = LoadRepresentationOf(node->op()); |
1823 | MarkAsRepresentation(type.representation(), node); |
1824 | return VisitUnalignedLoad(node); |
1825 | } |
1826 | case IrOpcode::kUnalignedStore: |
1827 | return VisitUnalignedStore(node); |
1828 | case IrOpcode::kInt32PairAdd: |
1829 | MarkAsWord32(node); |
1830 | MarkPairProjectionsAsWord32(node); |
1831 | return VisitInt32PairAdd(node); |
1832 | case IrOpcode::kInt32PairSub: |
1833 | MarkAsWord32(node); |
1834 | MarkPairProjectionsAsWord32(node); |
1835 | return VisitInt32PairSub(node); |
1836 | case IrOpcode::kInt32PairMul: |
1837 | MarkAsWord32(node); |
1838 | MarkPairProjectionsAsWord32(node); |
1839 | return VisitInt32PairMul(node); |
1840 | case IrOpcode::kWord32PairShl: |
1841 | MarkAsWord32(node); |
1842 | MarkPairProjectionsAsWord32(node); |
1843 | return VisitWord32PairShl(node); |
1844 | case IrOpcode::kWord32PairShr: |
1845 | MarkAsWord32(node); |
1846 | MarkPairProjectionsAsWord32(node); |
1847 | return VisitWord32PairShr(node); |
1848 | case IrOpcode::kWord32PairSar: |
1849 | MarkAsWord32(node); |
1850 | MarkPairProjectionsAsWord32(node); |
1851 | return VisitWord32PairSar(node); |
1852 | case IrOpcode::kMemoryBarrier: |
1853 | return VisitMemoryBarrier(node); |
1854 | case IrOpcode::kWord32AtomicLoad: { |
1855 | AtomicLoadParameters params = AtomicLoadParametersOf(node->op()); |
1856 | LoadRepresentation type = params.representation(); |
1857 | MarkAsRepresentation(type.representation(), node); |
1858 | return VisitWord32AtomicLoad(node); |
1859 | } |
1860 | case IrOpcode::kWord64AtomicLoad: { |
1861 | AtomicLoadParameters params = AtomicLoadParametersOf(node->op()); |
1862 | LoadRepresentation type = params.representation(); |
1863 | MarkAsRepresentation(type.representation(), node); |
1864 | return VisitWord64AtomicLoad(node); |
1865 | } |
1866 | case IrOpcode::kWord32AtomicStore: |
1867 | return VisitWord32AtomicStore(node); |
1868 | case IrOpcode::kWord64AtomicStore: |
1869 | return VisitWord64AtomicStore(node); |
1870 | case IrOpcode::kWord32AtomicPairStore: |
1871 | return VisitWord32AtomicPairStore(node); |
1872 | case IrOpcode::kWord32AtomicPairLoad: { |
1873 | MarkAsWord32(node); |
1874 | MarkPairProjectionsAsWord32(node); |
1875 | return VisitWord32AtomicPairLoad(node); |
1876 | } |
1877 | #define ATOMIC_CASE(name, rep) \ |
1878 | case IrOpcode::k##rep##Atomic##name: { \ |
1879 | MachineType type = AtomicOpType(node->op()); \ |
1880 | MarkAsRepresentation(type.representation(), node); \ |
1881 | return Visit##rep##Atomic##name(node); \ |
1882 | } |
1883 | ATOMIC_CASE(Add, Word32) |
1884 | ATOMIC_CASE(Add, Word64) |
1885 | ATOMIC_CASE(Sub, Word32) |
1886 | ATOMIC_CASE(Sub, Word64) |
1887 | ATOMIC_CASE(And, Word32) |
1888 | ATOMIC_CASE(And, Word64) |
1889 | ATOMIC_CASE(Or, Word32) |
1890 | ATOMIC_CASE(Or, Word64) |
1891 | ATOMIC_CASE(Xor, Word32) |
1892 | ATOMIC_CASE(Xor, Word64) |
1893 | ATOMIC_CASE(Exchange, Word32) |
1894 | ATOMIC_CASE(Exchange, Word64) |
1895 | ATOMIC_CASE(CompareExchange, Word32) |
1896 | ATOMIC_CASE(CompareExchange, Word64) |
1897 | #undef ATOMIC_CASE |
1898 | #define ATOMIC_CASE(name) \ |
1899 | case IrOpcode::kWord32AtomicPair##name: { \ |
1900 | MarkAsWord32(node); \ |
1901 | MarkPairProjectionsAsWord32(node); \ |
1902 | return VisitWord32AtomicPair##name(node); \ |
1903 | } |
1904 | ATOMIC_CASE(Add) |
1905 | ATOMIC_CASE(Sub) |
1906 | ATOMIC_CASE(And) |
1907 | ATOMIC_CASE(Or) |
1908 | ATOMIC_CASE(Xor) |
1909 | ATOMIC_CASE(Exchange) |
1910 | ATOMIC_CASE(CompareExchange) |
1911 | #undef ATOMIC_CASE |
1912 | case IrOpcode::kProtectedLoad: { |
1913 | LoadRepresentation type = LoadRepresentationOf(node->op()); |
1914 | MarkAsRepresentation(type.representation(), node); |
1915 | return VisitProtectedLoad(node); |
1916 | } |
1917 | case IrOpcode::kSignExtendWord8ToInt32: |
1918 | return MarkAsWord32(node), VisitSignExtendWord8ToInt32(node); |
1919 | case IrOpcode::kSignExtendWord16ToInt32: |
1920 | return MarkAsWord32(node), VisitSignExtendWord16ToInt32(node); |
1921 | case IrOpcode::kSignExtendWord8ToInt64: |
1922 | return MarkAsWord64(node), VisitSignExtendWord8ToInt64(node); |
1923 | case IrOpcode::kSignExtendWord16ToInt64: |
1924 | return MarkAsWord64(node), VisitSignExtendWord16ToInt64(node); |
1925 | case IrOpcode::kSignExtendWord32ToInt64: |
1926 | return MarkAsWord64(node), VisitSignExtendWord32ToInt64(node); |
1927 | case IrOpcode::kUnsafePointerAdd: |
1928 | MarkAsRepresentation(MachineType::PointerRepresentation(), node); |
1929 | return VisitUnsafePointerAdd(node); |
1930 | case IrOpcode::kF64x2Splat: |
1931 | return MarkAsSimd128(node), VisitF64x2Splat(node); |
1932 | case IrOpcode::kF64x2ExtractLane: |
1933 | return MarkAsFloat64(node), VisitF64x2ExtractLane(node); |
1934 | case IrOpcode::kF64x2ReplaceLane: |
1935 | return MarkAsSimd128(node), VisitF64x2ReplaceLane(node); |
1936 | case IrOpcode::kF64x2Abs: |
1937 | return MarkAsSimd128(node), VisitF64x2Abs(node); |
1938 | case IrOpcode::kF64x2Neg: |
1939 | return MarkAsSimd128(node), VisitF64x2Neg(node); |
1940 | case IrOpcode::kF64x2Sqrt: |
1941 | return MarkAsSimd128(node), VisitF64x2Sqrt(node); |
1942 | case IrOpcode::kF64x2Add: |
1943 | return MarkAsSimd128(node), VisitF64x2Add(node); |
1944 | case IrOpcode::kF64x2Sub: |
1945 | return MarkAsSimd128(node), VisitF64x2Sub(node); |
1946 | case IrOpcode::kF64x2Mul: |
1947 | return MarkAsSimd128(node), VisitF64x2Mul(node); |
1948 | case IrOpcode::kF64x2Div: |
1949 | return MarkAsSimd128(node), VisitF64x2Div(node); |
1950 | case IrOpcode::kF64x2Min: |
1951 | return MarkAsSimd128(node), VisitF64x2Min(node); |
1952 | case IrOpcode::kF64x2Max: |
1953 | return MarkAsSimd128(node), VisitF64x2Max(node); |
1954 | case IrOpcode::kF64x2Eq: |
1955 | return MarkAsSimd128(node), VisitF64x2Eq(node); |
1956 | case IrOpcode::kF64x2Ne: |
1957 | return MarkAsSimd128(node), VisitF64x2Ne(node); |
1958 | case IrOpcode::kF64x2Lt: |
1959 | return MarkAsSimd128(node), VisitF64x2Lt(node); |
1960 | case IrOpcode::kF64x2Le: |
1961 | return MarkAsSimd128(node), VisitF64x2Le(node); |
1962 | case IrOpcode::kF64x2Qfma: |
1963 | return MarkAsSimd128(node), VisitF64x2Qfma(node); |
1964 | case IrOpcode::kF64x2Qfms: |
1965 | return MarkAsSimd128(node), VisitF64x2Qfms(node); |
1966 | case IrOpcode::kF64x2Pmin: |
1967 | return MarkAsSimd128(node), VisitF64x2Pmin(node); |
1968 | case IrOpcode::kF64x2Pmax: |
1969 | return MarkAsSimd128(node), VisitF64x2Pmax(node); |
1970 | case IrOpcode::kF64x2Ceil: |
1971 | return MarkAsSimd128(node), VisitF64x2Ceil(node); |
1972 | case IrOpcode::kF64x2Floor: |
1973 | return MarkAsSimd128(node), VisitF64x2Floor(node); |
1974 | case IrOpcode::kF64x2Trunc: |
1975 | return MarkAsSimd128(node), VisitF64x2Trunc(node); |
1976 | case IrOpcode::kF64x2NearestInt: |
1977 | return MarkAsSimd128(node), VisitF64x2NearestInt(node); |
1978 | case IrOpcode::kF64x2ConvertLowI32x4S: |
1979 | return MarkAsSimd128(node), VisitF64x2ConvertLowI32x4S(node); |
1980 | case IrOpcode::kF64x2ConvertLowI32x4U: |
1981 | return MarkAsSimd128(node), VisitF64x2ConvertLowI32x4U(node); |
1982 | case IrOpcode::kF64x2PromoteLowF32x4: |
1983 | return MarkAsSimd128(node), VisitF64x2PromoteLowF32x4(node); |
1984 | case IrOpcode::kF32x4Splat: |
1985 | return MarkAsSimd128(node), VisitF32x4Splat(node); |
1986 | case IrOpcode::kF32x4ExtractLane: |
1987 | return MarkAsFloat32(node), VisitF32x4ExtractLane(node); |
1988 | case IrOpcode::kF32x4ReplaceLane: |
1989 | return MarkAsSimd128(node), VisitF32x4ReplaceLane(node); |
1990 | case IrOpcode::kF32x4SConvertI32x4: |
1991 | return MarkAsSimd128(node), VisitF32x4SConvertI32x4(node); |
1992 | case IrOpcode::kF32x4UConvertI32x4: |
1993 | return MarkAsSimd128(node), VisitF32x4UConvertI32x4(node); |
1994 | case IrOpcode::kF32x4Abs: |
1995 | return MarkAsSimd128(node), VisitF32x4Abs(node); |
1996 | case IrOpcode::kF32x4Neg: |
1997 | return MarkAsSimd128(node), VisitF32x4Neg(node); |
1998 | case IrOpcode::kF32x4Sqrt: |
1999 | return MarkAsSimd128(node), VisitF32x4Sqrt(node); |
2000 | case IrOpcode::kF32x4RecipApprox: |
2001 | return MarkAsSimd128(node), VisitF32x4RecipApprox(node); |
2002 | case IrOpcode::kF32x4RecipSqrtApprox: |
2003 | return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node); |
2004 | case IrOpcode::kF32x4Add: |
2005 | return MarkAsSimd128(node), VisitF32x4Add(node); |
2006 | case IrOpcode::kF32x4Sub: |
2007 | return MarkAsSimd128(node), VisitF32x4Sub(node); |
2008 | case IrOpcode::kF32x4Mul: |
2009 | return MarkAsSimd128(node), VisitF32x4Mul(node); |
2010 | case IrOpcode::kF32x4Div: |
2011 | return MarkAsSimd128(node), VisitF32x4Div(node); |
2012 | case IrOpcode::kF32x4Min: |
2013 | return MarkAsSimd128(node), VisitF32x4Min(node); |
2014 | case IrOpcode::kF32x4Max: |
2015 | return MarkAsSimd128(node), VisitF32x4Max(node); |
2016 | case IrOpcode::kF32x4Eq: |
2017 | return MarkAsSimd128(node), VisitF32x4Eq(node); |
2018 | case IrOpcode::kF32x4Ne: |
2019 | return MarkAsSimd128(node), VisitF32x4Ne(node); |
2020 | case IrOpcode::kF32x4Lt: |
2021 | return MarkAsSimd128(node), VisitF32x4Lt(node); |
2022 | case IrOpcode::kF32x4Le: |
2023 | return MarkAsSimd128(node), VisitF32x4Le(node); |
2024 | case IrOpcode::kF32x4Qfma: |
2025 | return MarkAsSimd128(node), VisitF32x4Qfma(node); |
2026 | case IrOpcode::kF32x4Qfms: |
2027 | return MarkAsSimd128(node), VisitF32x4Qfms(node); |
2028 | case IrOpcode::kF32x4Pmin: |
2029 | return MarkAsSimd128(node), VisitF32x4Pmin(node); |
2030 | case IrOpcode::kF32x4Pmax: |
2031 | return MarkAsSimd128(node), VisitF32x4Pmax(node); |
2032 | case IrOpcode::kF32x4Ceil: |
2033 | return MarkAsSimd128(node), VisitF32x4Ceil(node); |
2034 | case IrOpcode::kF32x4Floor: |
2035 | return MarkAsSimd128(node), VisitF32x4Floor(node); |
2036 | case IrOpcode::kF32x4Trunc: |
2037 | return MarkAsSimd128(node), VisitF32x4Trunc(node); |
2038 | case IrOpcode::kF32x4NearestInt: |
2039 | return MarkAsSimd128(node), VisitF32x4NearestInt(node); |
2040 | case IrOpcode::kF32x4DemoteF64x2Zero: |
2041 | return MarkAsSimd128(node), VisitF32x4DemoteF64x2Zero(node); |
2042 | case IrOpcode::kI64x2Splat: |
2043 | return MarkAsSimd128(node), VisitI64x2Splat(node); |
2044 | case IrOpcode::kI64x2SplatI32Pair: |
2045 | return MarkAsSimd128(node), VisitI64x2SplatI32Pair(node); |
2046 | case IrOpcode::kI64x2ExtractLane: |
2047 | return MarkAsWord64(node), VisitI64x2ExtractLane(node); |
2048 | case IrOpcode::kI64x2ReplaceLane: |
2049 | return MarkAsSimd128(node), VisitI64x2ReplaceLane(node); |
2050 | case IrOpcode::kI64x2ReplaceLaneI32Pair: |
2051 | return MarkAsSimd128(node), VisitI64x2ReplaceLaneI32Pair(node); |
2052 | case IrOpcode::kI64x2Abs: |
2053 | return MarkAsSimd128(node), VisitI64x2Abs(node); |
2054 | case IrOpcode::kI64x2Neg: |
2055 | return MarkAsSimd128(node), VisitI64x2Neg(node); |
2056 | case IrOpcode::kI64x2SConvertI32x4Low: |
2057 | return MarkAsSimd128(node), VisitI64x2SConvertI32x4Low(node); |
2058 | case IrOpcode::kI64x2SConvertI32x4High: |
2059 | return MarkAsSimd128(node), VisitI64x2SConvertI32x4High(node); |
2060 | case IrOpcode::kI64x2UConvertI32x4Low: |
2061 | return MarkAsSimd128(node), VisitI64x2UConvertI32x4Low(node); |
2062 | case IrOpcode::kI64x2UConvertI32x4High: |
2063 | return MarkAsSimd128(node), VisitI64x2UConvertI32x4High(node); |
2064 | case IrOpcode::kI64x2BitMask: |
2065 | return MarkAsWord32(node), VisitI64x2BitMask(node); |
2066 | case IrOpcode::kI64x2Shl: |
2067 | return MarkAsSimd128(node), VisitI64x2Shl(node); |
2068 | case IrOpcode::kI64x2ShrS: |
2069 | return MarkAsSimd128(node), VisitI64x2ShrS(node); |
2070 | case IrOpcode::kI64x2Add: |
2071 | return MarkAsSimd128(node), VisitI64x2Add(node); |
2072 | case IrOpcode::kI64x2Sub: |
2073 | return MarkAsSimd128(node), VisitI64x2Sub(node); |
2074 | case IrOpcode::kI64x2Mul: |
2075 | return MarkAsSimd128(node), VisitI64x2Mul(node); |
2076 | case IrOpcode::kI64x2Eq: |
2077 | return MarkAsSimd128(node), VisitI64x2Eq(node); |
2078 | case IrOpcode::kI64x2Ne: |
2079 | return MarkAsSimd128(node), VisitI64x2Ne(node); |
2080 | case IrOpcode::kI64x2GtS: |
2081 | return MarkAsSimd128(node), VisitI64x2GtS(node); |
2082 | case IrOpcode::kI64x2GeS: |
2083 | return MarkAsSimd128(node), VisitI64x2GeS(node); |
2084 | case IrOpcode::kI64x2ShrU: |
2085 | return MarkAsSimd128(node), VisitI64x2ShrU(node); |
2086 | case IrOpcode::kI64x2ExtMulLowI32x4S: |
2087 | return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4S(node); |
2088 | case IrOpcode::kI64x2ExtMulHighI32x4S: |
2089 | return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4S(node); |
2090 | case IrOpcode::kI64x2ExtMulLowI32x4U: |
2091 | return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4U(node); |
2092 | case IrOpcode::kI64x2ExtMulHighI32x4U: |
2093 | return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4U(node); |
2094 | case IrOpcode::kI32x4Splat: |
2095 | return MarkAsSimd128(node), VisitI32x4Splat(node); |
2096 | case IrOpcode::kI32x4ExtractLane: |
2097 | return MarkAsWord32(node), VisitI32x4ExtractLane(node); |
2098 | case IrOpcode::kI32x4ReplaceLane: |
2099 | return MarkAsSimd128(node), VisitI32x4ReplaceLane(node); |
2100 | case IrOpcode::kI32x4SConvertF32x4: |
2101 | return MarkAsSimd128(node), VisitI32x4SConvertF32x4(node); |
2102 | case IrOpcode::kI32x4SConvertI16x8Low: |
2103 | return MarkAsSimd128(node), VisitI32x4SConvertI16x8Low(node); |
2104 | case IrOpcode::kI32x4SConvertI16x8High: |
2105 | return MarkAsSimd128(node), VisitI32x4SConvertI16x8High(node); |
2106 | case IrOpcode::kI32x4Neg: |
2107 | return MarkAsSimd128(node), VisitI32x4Neg(node); |
2108 | case IrOpcode::kI32x4Shl: |
2109 | return MarkAsSimd128(node), VisitI32x4Shl(node); |
2110 | case IrOpcode::kI32x4ShrS: |
2111 | return MarkAsSimd128(node), VisitI32x4ShrS(node); |
2112 | case IrOpcode::kI32x4Add: |
2113 | return MarkAsSimd128(node), VisitI32x4Add(node); |
2114 | case IrOpcode::kI32x4Sub: |
2115 | return MarkAsSimd128(node), VisitI32x4Sub(node); |
2116 | case IrOpcode::kI32x4Mul: |
2117 | return MarkAsSimd128(node), VisitI32x4Mul(node); |
2118 | case IrOpcode::kI32x4MinS: |
2119 | return MarkAsSimd128(node), VisitI32x4MinS(node); |
2120 | case IrOpcode::kI32x4MaxS: |
2121 | return MarkAsSimd128(node), VisitI32x4MaxS(node); |
2122 | case IrOpcode::kI32x4Eq: |
2123 | return MarkAsSimd128(node), VisitI32x4Eq(node); |
2124 | case IrOpcode::kI32x4Ne: |
2125 | return MarkAsSimd128(node), VisitI32x4Ne(node); |
2126 | case IrOpcode::kI32x4GtS: |
2127 | return MarkAsSimd128(node), VisitI32x4GtS(node); |
2128 | case IrOpcode::kI32x4GeS: |
2129 | return MarkAsSimd128(node), VisitI32x4GeS(node); |
2130 | case IrOpcode::kI32x4UConvertF32x4: |
2131 | return MarkAsSimd128(node), VisitI32x4UConvertF32x4(node); |
2132 | case IrOpcode::kI32x4UConvertI16x8Low: |
2133 | return MarkAsSimd128(node), VisitI32x4UConvertI16x8Low(node); |
2134 | case IrOpcode::kI32x4UConvertI16x8High: |
2135 | return MarkAsSimd128(node), VisitI32x4UConvertI16x8High(node); |
2136 | case IrOpcode::kI32x4ShrU: |
2137 | return MarkAsSimd128(node), VisitI32x4ShrU(node); |
2138 | case IrOpcode::kI32x4MinU: |
2139 | return MarkAsSimd128(node), VisitI32x4MinU(node); |
2140 | case IrOpcode::kI32x4MaxU: |
2141 | return MarkAsSimd128(node), VisitI32x4MaxU(node); |
2142 | case IrOpcode::kI32x4GtU: |
2143 | return MarkAsSimd128(node), VisitI32x4GtU(node); |
2144 | case IrOpcode::kI32x4GeU: |
2145 | return MarkAsSimd128(node), VisitI32x4GeU(node); |
2146 | case IrOpcode::kI32x4Abs: |
2147 | return MarkAsSimd128(node), VisitI32x4Abs(node); |
2148 | case IrOpcode::kI32x4BitMask: |
2149 | return MarkAsWord32(node), VisitI32x4BitMask(node); |
2150 | case IrOpcode::kI32x4DotI16x8S: |
2151 | return MarkAsSimd128(node), VisitI32x4DotI16x8S(node); |
2152 | case IrOpcode::kI32x4ExtMulLowI16x8S: |
2153 | return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8S(node); |
2154 | case IrOpcode::kI32x4ExtMulHighI16x8S: |
2155 | return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8S(node); |
2156 | case IrOpcode::kI32x4ExtMulLowI16x8U: |
2157 | return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8U(node); |
2158 | case IrOpcode::kI32x4ExtMulHighI16x8U: |
2159 | return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8U(node); |
2160 | case IrOpcode::kI32x4ExtAddPairwiseI16x8S: |
2161 | return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8S(node); |
2162 | case IrOpcode::kI32x4ExtAddPairwiseI16x8U: |
2163 | return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8U(node); |
2164 | case IrOpcode::kI32x4TruncSatF64x2SZero: |
2165 | return MarkAsSimd128(node), VisitI32x4TruncSatF64x2SZero(node); |
2166 | case IrOpcode::kI32x4TruncSatF64x2UZero: |
2167 | return MarkAsSimd128(node), VisitI32x4TruncSatF64x2UZero(node); |
2168 | case IrOpcode::kI16x8Splat: |
2169 | return MarkAsSimd128(node), VisitI16x8Splat(node); |
2170 | case IrOpcode::kI16x8ExtractLaneU: |
2171 | return MarkAsWord32(node), VisitI16x8ExtractLaneU(node); |
2172 | case IrOpcode::kI16x8ExtractLaneS: |
2173 | return MarkAsWord32(node), VisitI16x8ExtractLaneS(node); |
2174 | case IrOpcode::kI16x8ReplaceLane: |
2175 | return MarkAsSimd128(node), VisitI16x8ReplaceLane(node); |
2176 | case IrOpcode::kI16x8SConvertI8x16Low: |
2177 | return MarkAsSimd128(node), VisitI16x8SConvertI8x16Low(node); |
2178 | case IrOpcode::kI16x8SConvertI8x16High: |
2179 | return MarkAsSimd128(node), VisitI16x8SConvertI8x16High(node); |
2180 | case IrOpcode::kI16x8Neg: |
2181 | return MarkAsSimd128(node), VisitI16x8Neg(node); |
2182 | case IrOpcode::kI16x8Shl: |
2183 | return MarkAsSimd128(node), VisitI16x8Shl(node); |
2184 | case IrOpcode::kI16x8ShrS: |
2185 | return MarkAsSimd128(node), VisitI16x8ShrS(node); |
2186 | case IrOpcode::kI16x8SConvertI32x4: |
2187 | return MarkAsSimd128(node), VisitI16x8SConvertI32x4(node); |
2188 | case IrOpcode::kI16x8Add: |
2189 | return MarkAsSimd128(node), VisitI16x8Add(node); |
2190 | case IrOpcode::kI16x8AddSatS: |
2191 | return MarkAsSimd128(node), VisitI16x8AddSatS(node); |
2192 | case IrOpcode::kI16x8Sub: |
2193 | return MarkAsSimd128(node), VisitI16x8Sub(node); |
2194 | case IrOpcode::kI16x8SubSatS: |
2195 | return MarkAsSimd128(node), VisitI16x8SubSatS(node); |
2196 | case IrOpcode::kI16x8Mul: |
2197 | return MarkAsSimd128(node), VisitI16x8Mul(node); |
2198 | case IrOpcode::kI16x8MinS: |
2199 | return MarkAsSimd128(node), VisitI16x8MinS(node); |
2200 | case IrOpcode::kI16x8MaxS: |
2201 | return MarkAsSimd128(node), VisitI16x8MaxS(node); |
2202 | case IrOpcode::kI16x8Eq: |
2203 | return MarkAsSimd128(node), VisitI16x8Eq(node); |
2204 | case IrOpcode::kI16x8Ne: |
2205 | return MarkAsSimd128(node), VisitI16x8Ne(node); |
2206 | case IrOpcode::kI16x8GtS: |
2207 | return MarkAsSimd128(node), VisitI16x8GtS(node); |
2208 | case IrOpcode::kI16x8GeS: |
2209 | return MarkAsSimd128(node), VisitI16x8GeS(node); |
2210 | case IrOpcode::kI16x8UConvertI8x16Low: |
2211 | return MarkAsSimd128(node), VisitI16x8UConvertI8x16Low(node); |
2212 | case IrOpcode::kI16x8UConvertI8x16High: |
2213 | return MarkAsSimd128(node), VisitI16x8UConvertI8x16High(node); |
2214 | case IrOpcode::kI16x8ShrU: |
2215 | return MarkAsSimd128(node), VisitI16x8ShrU(node); |
2216 | case IrOpcode::kI16x8UConvertI32x4: |
2217 | return MarkAsSimd128(node), VisitI16x8UConvertI32x4(node); |
2218 | case IrOpcode::kI16x8AddSatU: |
2219 | return MarkAsSimd128(node), VisitI16x8AddSatU(node); |
2220 | case IrOpcode::kI16x8SubSatU: |
2221 | return MarkAsSimd128(node), VisitI16x8SubSatU(node); |
2222 | case IrOpcode::kI16x8MinU: |
2223 | return MarkAsSimd128(node), VisitI16x8MinU(node); |
2224 | case IrOpcode::kI16x8MaxU: |
2225 | return MarkAsSimd128(node), VisitI16x8MaxU(node); |
2226 | case IrOpcode::kI16x8GtU: |
2227 | return MarkAsSimd128(node), VisitI16x8GtU(node); |
2228 | case IrOpcode::kI16x8GeU: |
2229 | return MarkAsSimd128(node), VisitI16x8GeU(node); |
2230 | case IrOpcode::kI16x8RoundingAverageU: |
2231 | return MarkAsSimd128(node), VisitI16x8RoundingAverageU(node); |
2232 | case IrOpcode::kI16x8Q15MulRSatS: |
2233 | return MarkAsSimd128(node), VisitI16x8Q15MulRSatS(node); |
2234 | case IrOpcode::kI16x8Abs: |
2235 | return MarkAsSimd128(node), VisitI16x8Abs(node); |
2236 | case IrOpcode::kI16x8BitMask: |
2237 | return MarkAsWord32(node), VisitI16x8BitMask(node); |
2238 | case IrOpcode::kI16x8ExtMulLowI8x16S: |
2239 | return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16S(node); |
2240 | case IrOpcode::kI16x8ExtMulHighI8x16S: |
2241 | return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16S(node); |
2242 | case IrOpcode::kI16x8ExtMulLowI8x16U: |
2243 | return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16U(node); |
2244 | case IrOpcode::kI16x8ExtMulHighI8x16U: |
2245 | return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16U(node); |
2246 | case IrOpcode::kI16x8ExtAddPairwiseI8x16S: |
2247 | return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16S(node); |
2248 | case IrOpcode::kI16x8ExtAddPairwiseI8x16U: |
2249 | return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16U(node); |
2250 | case IrOpcode::kI8x16Splat: |
2251 | return MarkAsSimd128(node), VisitI8x16Splat(node); |
2252 | case IrOpcode::kI8x16ExtractLaneU: |
2253 | return MarkAsWord32(node), VisitI8x16ExtractLaneU(node); |
2254 | case IrOpcode::kI8x16ExtractLaneS: |
2255 | return MarkAsWord32(node), VisitI8x16ExtractLaneS(node); |
2256 | case IrOpcode::kI8x16ReplaceLane: |
2257 | return MarkAsSimd128(node), VisitI8x16ReplaceLane(node); |
2258 | case IrOpcode::kI8x16Neg: |
2259 | return MarkAsSimd128(node), VisitI8x16Neg(node); |
2260 | case IrOpcode::kI8x16Shl: |
2261 | return MarkAsSimd128(node), VisitI8x16Shl(node); |
2262 | case IrOpcode::kI8x16ShrS: |
2263 | return MarkAsSimd128(node), VisitI8x16ShrS(node); |
2264 | case IrOpcode::kI8x16SConvertI16x8: |
2265 | return MarkAsSimd128(node), VisitI8x16SConvertI16x8(node); |
2266 | case IrOpcode::kI8x16Add: |
2267 | return MarkAsSimd128(node), VisitI8x16Add(node); |
2268 | case IrOpcode::kI8x16AddSatS: |
2269 | return MarkAsSimd128(node), VisitI8x16AddSatS(node); |
2270 | case IrOpcode::kI8x16Sub: |
2271 | return MarkAsSimd128(node), VisitI8x16Sub(node); |
2272 | case IrOpcode::kI8x16SubSatS: |
2273 | return MarkAsSimd128(node), VisitI8x16SubSatS(node); |
2274 | case IrOpcode::kI8x16MinS: |
2275 | return MarkAsSimd128(node), VisitI8x16MinS(node); |
2276 | case IrOpcode::kI8x16MaxS: |
2277 | return MarkAsSimd128(node), VisitI8x16MaxS(node); |
2278 | case IrOpcode::kI8x16Eq: |
2279 | return MarkAsSimd128(node), VisitI8x16Eq(node); |
2280 | case IrOpcode::kI8x16Ne: |
2281 | return MarkAsSimd128(node), VisitI8x16Ne(node); |
2282 | case IrOpcode::kI8x16GtS: |
2283 | return MarkAsSimd128(node), VisitI8x16GtS(node); |
2284 | case IrOpcode::kI8x16GeS: |
2285 | return MarkAsSimd128(node), VisitI8x16GeS(node); |
2286 | case IrOpcode::kI8x16ShrU: |
2287 | return MarkAsSimd128(node), VisitI8x16ShrU(node); |
2288 | case IrOpcode::kI8x16UConvertI16x8: |
2289 | return MarkAsSimd128(node), VisitI8x16UConvertI16x8(node); |
2290 | case IrOpcode::kI8x16AddSatU: |
2291 | return MarkAsSimd128(node), VisitI8x16AddSatU(node); |
2292 | case IrOpcode::kI8x16SubSatU: |
2293 | return MarkAsSimd128(node), VisitI8x16SubSatU(node); |
2294 | case IrOpcode::kI8x16MinU: |
2295 | return MarkAsSimd128(node), VisitI8x16MinU(node); |
2296 | case IrOpcode::kI8x16MaxU: |
2297 | return MarkAsSimd128(node), VisitI8x16MaxU(node); |
2298 | case IrOpcode::kI8x16GtU: |
2299 | return MarkAsSimd128(node), VisitI8x16GtU(node); |
2300 | case IrOpcode::kI8x16GeU: |
2301 | return MarkAsSimd128(node), VisitI8x16GeU(node); |
2302 | case IrOpcode::kI8x16RoundingAverageU: |
2303 | return MarkAsSimd128(node), VisitI8x16RoundingAverageU(node); |
2304 | case IrOpcode::kI8x16Popcnt: |
2305 | return MarkAsSimd128(node), VisitI8x16Popcnt(node); |
2306 | case IrOpcode::kI8x16Abs: |
2307 | return MarkAsSimd128(node), VisitI8x16Abs(node); |
2308 | case IrOpcode::kI8x16BitMask: |
2309 | return MarkAsWord32(node), VisitI8x16BitMask(node); |
2310 | case IrOpcode::kS128Const: |
2311 | return MarkAsSimd128(node), VisitS128Const(node); |
2312 | case IrOpcode::kS128Zero: |
2313 | return MarkAsSimd128(node), VisitS128Zero(node); |
2314 | case IrOpcode::kS128And: |
2315 | return MarkAsSimd128(node), VisitS128And(node); |
2316 | case IrOpcode::kS128Or: |
2317 | return MarkAsSimd128(node), VisitS128Or(node); |
2318 | case IrOpcode::kS128Xor: |
2319 | return MarkAsSimd128(node), VisitS128Xor(node); |
2320 | case IrOpcode::kS128Not: |
2321 | return MarkAsSimd128(node), VisitS128Not(node); |
2322 | case IrOpcode::kS128Select: |
2323 | return MarkAsSimd128(node), VisitS128Select(node); |
2324 | case IrOpcode::kS128AndNot: |
2325 | return MarkAsSimd128(node), VisitS128AndNot(node); |
2326 | case IrOpcode::kI8x16Swizzle: |
2327 | return MarkAsSimd128(node), VisitI8x16Swizzle(node); |
2328 | case IrOpcode::kI8x16Shuffle: |
2329 | return MarkAsSimd128(node), VisitI8x16Shuffle(node); |
2330 | case IrOpcode::kV128AnyTrue: |
2331 | return MarkAsWord32(node), VisitV128AnyTrue(node); |
2332 | case IrOpcode::kI64x2AllTrue: |
2333 | return MarkAsWord32(node), VisitI64x2AllTrue(node); |
2334 | case IrOpcode::kI32x4AllTrue: |
2335 | return MarkAsWord32(node), VisitI32x4AllTrue(node); |
2336 | case IrOpcode::kI16x8AllTrue: |
2337 | return MarkAsWord32(node), VisitI16x8AllTrue(node); |
2338 | case IrOpcode::kI8x16AllTrue: |
2339 | return MarkAsWord32(node), VisitI8x16AllTrue(node); |
2340 | case IrOpcode::kI8x16RelaxedLaneSelect: |
2341 | return MarkAsSimd128(node), VisitI8x16RelaxedLaneSelect(node); |
2342 | case IrOpcode::kI16x8RelaxedLaneSelect: |
2343 | return MarkAsSimd128(node), VisitI16x8RelaxedLaneSelect(node); |
2344 | case IrOpcode::kI32x4RelaxedLaneSelect: |
2345 | return MarkAsSimd128(node), VisitI32x4RelaxedLaneSelect(node); |
2346 | case IrOpcode::kI64x2RelaxedLaneSelect: |
2347 | return MarkAsSimd128(node), VisitI64x2RelaxedLaneSelect(node); |
2348 | case IrOpcode::kF32x4RelaxedMin: |
2349 | return MarkAsSimd128(node), VisitF32x4RelaxedMin(node); |
2350 | case IrOpcode::kF32x4RelaxedMax: |
2351 | return MarkAsSimd128(node), VisitF32x4RelaxedMax(node); |
2352 | case IrOpcode::kF64x2RelaxedMin: |
2353 | return MarkAsSimd128(node), VisitF64x2RelaxedMin(node); |
2354 | case IrOpcode::kF64x2RelaxedMax: |
2355 | return MarkAsSimd128(node), VisitF64x2RelaxedMax(node); |
2356 | case IrOpcode::kI32x4RelaxedTruncF64x2SZero: |
2357 | return MarkAsSimd128(node), VisitI32x4RelaxedTruncF64x2SZero(node); |
2358 | case IrOpcode::kI32x4RelaxedTruncF64x2UZero: |
2359 | return MarkAsSimd128(node), VisitI32x4RelaxedTruncF64x2UZero(node); |
2360 | case IrOpcode::kI32x4RelaxedTruncF32x4S: |
2361 | return MarkAsSimd128(node), VisitI32x4RelaxedTruncF32x4S(node); |
2362 | case IrOpcode::kI32x4RelaxedTruncF32x4U: |
2363 | return MarkAsSimd128(node), VisitI32x4RelaxedTruncF32x4U(node); |
2364 | default: |
2365 | FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),V8_Fatal("Unexpected operator #%d:%s @ node #%d", node->opcode (), node->op()->mnemonic(), node->id()) |
2366 | node->op()->mnemonic(), node->id())V8_Fatal("Unexpected operator #%d:%s @ node #%d", node->opcode (), node->op()->mnemonic(), node->id()); |
2367 | } |
2368 | } |
2369 | |
2370 | void InstructionSelector::VisitStackPointerGreaterThan(Node* node) { |
2371 | FlagsContinuation cont = |
2372 | FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node); |
2373 | VisitStackPointerGreaterThan(node, &cont); |
2374 | } |
2375 | |
2376 | void InstructionSelector::VisitLoadStackCheckOffset(Node* node) { |
2377 | OperandGenerator g(this); |
2378 | Emit(kArchStackCheckOffset, g.DefineAsRegister(node)); |
2379 | } |
2380 | |
2381 | void InstructionSelector::VisitLoadFramePointer(Node* node) { |
2382 | OperandGenerator g(this); |
2383 | Emit(kArchFramePointer, g.DefineAsRegister(node)); |
2384 | } |
2385 | |
2386 | void InstructionSelector::VisitLoadParentFramePointer(Node* node) { |
2387 | OperandGenerator g(this); |
2388 | Emit(kArchParentFramePointer, g.DefineAsRegister(node)); |
2389 | } |
2390 | |
2391 | void InstructionSelector::VisitFloat64Acos(Node* node) { |
2392 | VisitFloat64Ieee754Unop(node, kIeee754Float64Acos); |
2393 | } |
2394 | |
2395 | void InstructionSelector::VisitFloat64Acosh(Node* node) { |
2396 | VisitFloat64Ieee754Unop(node, kIeee754Float64Acosh); |
2397 | } |
2398 | |
2399 | void InstructionSelector::VisitFloat64Asin(Node* node) { |
2400 | VisitFloat64Ieee754Unop(node, kIeee754Float64Asin); |
2401 | } |
2402 | |
2403 | void InstructionSelector::VisitFloat64Asinh(Node* node) { |
2404 | VisitFloat64Ieee754Unop(node, kIeee754Float64Asinh); |
2405 | } |
2406 | |
2407 | void InstructionSelector::VisitFloat64Atan(Node* node) { |
2408 | VisitFloat64Ieee754Unop(node, kIeee754Float64Atan); |
2409 | } |
2410 | |
2411 | void InstructionSelector::VisitFloat64Atanh(Node* node) { |
2412 | VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh); |
2413 | } |
2414 | |
2415 | void InstructionSelector::VisitFloat64Atan2(Node* node) { |
2416 | VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2); |
2417 | } |
2418 | |
2419 | void InstructionSelector::VisitFloat64Cbrt(Node* node) { |
2420 | VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt); |
2421 | } |
2422 | |
2423 | void InstructionSelector::VisitFloat64Cos(Node* node) { |
2424 | VisitFloat64Ieee754Unop(node, kIeee754Float64Cos); |
2425 | } |
2426 | |
2427 | void InstructionSelector::VisitFloat64Cosh(Node* node) { |
2428 | VisitFloat64Ieee754Unop(node, kIeee754Float64Cosh); |
2429 | } |
2430 | |
2431 | void InstructionSelector::VisitFloat64Exp(Node* node) { |
2432 | VisitFloat64Ieee754Unop(node, kIeee754Float64Exp); |
2433 | } |
2434 | |
2435 | void InstructionSelector::VisitFloat64Expm1(Node* node) { |
2436 | VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1); |
2437 | } |
2438 | |
2439 | void InstructionSelector::VisitFloat64Log(Node* node) { |
2440 | VisitFloat64Ieee754Unop(node, kIeee754Float64Log); |
2441 | } |
2442 | |
2443 | void InstructionSelector::VisitFloat64Log1p(Node* node) { |
2444 | VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p); |
2445 | } |
2446 | |
2447 | void InstructionSelector::VisitFloat64Log2(Node* node) { |
2448 | VisitFloat64Ieee754Unop(node, kIeee754Float64Log2); |
2449 | } |
2450 | |
2451 | void InstructionSelector::VisitFloat64Log10(Node* node) { |
2452 | VisitFloat64Ieee754Unop(node, kIeee754Float64Log10); |
2453 | } |
2454 | |
2455 | void InstructionSelector::VisitFloat64Pow(Node* node) { |
2456 | VisitFloat64Ieee754Binop(node, kIeee754Float64Pow); |
2457 | } |
2458 | |
2459 | void InstructionSelector::VisitFloat64Sin(Node* node) { |
2460 | VisitFloat64Ieee754Unop(node, kIeee754Float64Sin); |
2461 | } |
2462 | |
2463 | void InstructionSelector::VisitFloat64Sinh(Node* node) { |
2464 | VisitFloat64Ieee754Unop(node, kIeee754Float64Sinh); |
2465 | } |
2466 | |
2467 | void InstructionSelector::VisitFloat64Tan(Node* node) { |
2468 | VisitFloat64Ieee754Unop(node, kIeee754Float64Tan); |
2469 | } |
2470 | |
2471 | void InstructionSelector::VisitFloat64Tanh(Node* node) { |
2472 | VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh); |
2473 | } |
2474 | |
2475 | void InstructionSelector::EmitTableSwitch( |
2476 | const SwitchInfo& sw, InstructionOperand const& index_operand) { |
2477 | OperandGenerator g(this); |
2478 | size_t input_count = 2 + sw.value_range(); |
2479 | DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2)((void) 0); |
2480 | auto* inputs = zone()->NewArray<InstructionOperand>(input_count); |
2481 | inputs[0] = index_operand; |
2482 | InstructionOperand default_operand = g.Label(sw.default_branch()); |
2483 | std::fill(&inputs[1], &inputs[input_count], default_operand); |
2484 | for (const CaseInfo& c : sw.CasesUnsorted()) { |
2485 | size_t value = c.value - sw.min_value(); |
2486 | DCHECK_LE(0u, value)((void) 0); |
2487 | DCHECK_LT(value + 2, input_count)((void) 0); |
2488 | inputs[value + 2] = g.Label(c.branch); |
2489 | } |
2490 | Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr); |
2491 | } |
2492 | |
2493 | void InstructionSelector::EmitBinarySearchSwitch( |
2494 | const SwitchInfo& sw, InstructionOperand const& value_operand) { |
2495 | OperandGenerator g(this); |
2496 | size_t input_count = 2 + sw.case_count() * 2; |
2497 | DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2)((void) 0); |
2498 | auto* inputs = zone()->NewArray<InstructionOperand>(input_count); |
2499 | inputs[0] = value_operand; |
2500 | inputs[1] = g.Label(sw.default_branch()); |
2501 | std::vector<CaseInfo> cases = sw.CasesSortedByValue(); |
2502 | for (size_t index = 0; index < cases.size(); ++index) { |
2503 | const CaseInfo& c = cases[index]; |
2504 | inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value); |
2505 | inputs[index * 2 + 2 + 1] = g.Label(c.branch); |
2506 | } |
2507 | Emit(kArchBinarySearchSwitch, 0, nullptr, input_count, inputs, 0, nullptr); |
2508 | } |
2509 | |
2510 | void InstructionSelector::VisitBitcastTaggedToWord(Node* node) { |
2511 | EmitIdentity(node); |
2512 | } |
2513 | |
2514 | void InstructionSelector::VisitBitcastWordToTagged(Node* node) { |
2515 | OperandGenerator g(this); |
2516 | Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0))); |
2517 | } |
2518 | |
2519 | // 32 bit targets do not implement the following instructions. |
2520 | #if V8_TARGET_ARCH_32_BIT |
2521 | |
2522 | void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2523 | |
2524 | void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2525 | |
2526 | void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2527 | |
2528 | void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2529 | |
2530 | void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2531 | |
2532 | void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2533 | |
2534 | void InstructionSelector::VisitWord64Rol(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2535 | |
2536 | void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2537 | |
2538 | void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2539 | |
2540 | void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2541 | |
2542 | void InstructionSelector::VisitWord64ReverseBits(Node* node) { |
2543 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2544 | } |
2545 | |
2546 | void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2547 | |
2548 | void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2549 | |
2550 | void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2551 | |
2552 | void InstructionSelector::VisitInt64AddWithOverflow(Node* node) { |
2553 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2554 | } |
2555 | |
2556 | void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2557 | |
2558 | void InstructionSelector::VisitInt64SubWithOverflow(Node* node) { |
2559 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2560 | } |
2561 | |
2562 | void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2563 | |
2564 | void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2565 | |
2566 | void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2567 | |
2568 | void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { |
2569 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2570 | } |
2571 | |
2572 | void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2573 | |
2574 | void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2575 | |
2576 | void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2577 | |
2578 | void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { |
2579 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2580 | } |
2581 | |
2582 | void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2583 | |
2584 | void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { |
2585 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2586 | } |
2587 | |
2588 | void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { |
2589 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2590 | } |
2591 | |
2592 | void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) { |
2593 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2594 | } |
2595 | |
2596 | void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { |
2597 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2598 | } |
2599 | |
2600 | void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) { |
2601 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2602 | } |
2603 | |
2604 | void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) { |
2605 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2606 | } |
2607 | |
2608 | void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) { |
2609 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2610 | } |
2611 | |
2612 | void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) { |
2613 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2614 | } |
2615 | |
2616 | void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) { |
2617 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2618 | } |
2619 | |
2620 | void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) { |
2621 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2622 | } |
2623 | |
2624 | void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) { |
2625 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2626 | } |
2627 | |
2628 | void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { |
2629 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2630 | } |
2631 | |
2632 | void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) { |
2633 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2634 | } |
2635 | |
2636 | void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) { |
2637 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2638 | } |
2639 | |
2640 | void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) { |
2641 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2642 | } |
2643 | |
2644 | void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) { |
2645 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2646 | } |
2647 | |
2648 | void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) { |
2649 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2650 | } |
2651 | |
2652 | void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { |
2653 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2654 | } |
2655 | |
2656 | void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) { |
2657 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2658 | } |
2659 | |
2660 | void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) { |
2661 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2662 | } |
2663 | |
2664 | void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) { |
2665 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2666 | } |
2667 | #endif // V8_TARGET_ARCH_32_BIT |
2668 | |
2669 | // 64 bit targets do not implement the following instructions. |
2670 | #if V8_TARGET_ARCH_64_BIT1 |
2671 | void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2672 | |
2673 | void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2674 | |
2675 | void InstructionSelector::VisitInt32PairMul(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2676 | |
2677 | void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2678 | |
2679 | void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2680 | |
2681 | void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2682 | #endif // V8_TARGET_ARCH_64_BIT |
2683 | |
2684 | #if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS |
2685 | void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) { |
2686 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2687 | } |
2688 | |
2689 | void InstructionSelector::VisitWord32AtomicPairStore(Node* node) { |
2690 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2691 | } |
2692 | |
2693 | void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) { |
2694 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2695 | } |
2696 | |
2697 | void InstructionSelector::VisitWord32AtomicPairSub(Node* node) { |
2698 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2699 | } |
2700 | |
2701 | void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) { |
2702 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2703 | } |
2704 | |
2705 | void InstructionSelector::VisitWord32AtomicPairOr(Node* node) { |
2706 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2707 | } |
2708 | |
2709 | void InstructionSelector::VisitWord32AtomicPairXor(Node* node) { |
2710 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2711 | } |
2712 | |
2713 | void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) { |
2714 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2715 | } |
2716 | |
2717 | void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { |
2718 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2719 | } |
2720 | #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS |
2721 | |
2722 | #if !V8_TARGET_ARCH_X641 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \ |
2723 | !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && \ |
2724 | !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 |
2725 | void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2726 | |
2727 | void InstructionSelector::VisitWord64AtomicStore(Node* node) { |
2728 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2729 | } |
2730 | |
2731 | void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2732 | |
2733 | void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2734 | |
2735 | void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2736 | |
2737 | void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2738 | |
2739 | void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2740 | |
2741 | void InstructionSelector::VisitWord64AtomicExchange(Node* node) { |
2742 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2743 | } |
2744 | |
2745 | void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { |
2746 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2747 | } |
2748 | #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64 |
2749 | // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 && |
2750 | // !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 |
2751 | |
2752 | #if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM |
2753 | // This is only needed on 32-bit to split the 64-bit value into two operands. |
2754 | void InstructionSelector::VisitI64x2SplatI32Pair(Node* node) { |
2755 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2756 | } |
2757 | void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) { |
2758 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2759 | } |
2760 | #endif // !V8_TARGET_ARCH_IA32 |
2761 | |
2762 | #if !V8_TARGET_ARCH_X641 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 |
2763 | #if !V8_TARGET_ARCH_ARM64 |
2764 | #if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 && !V8_TARGET_ARCH_RISCV64 |
2765 | void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2766 | void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2767 | void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2768 | #endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 && |
2769 | // !V8_TARGET_ARCH_RISCV64 |
2770 | #endif // !V8_TARGET_ARCH_ARM64 |
2771 | #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 |
2772 | |
2773 | #if !V8_TARGET_ARCH_X641 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 && \ |
2774 | !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_RISCV64 |
2775 | void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2776 | void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2777 | void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2778 | void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2779 | #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 |
2780 | // && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32 && |
2781 | // !V8_TARGET_ARCH_RISCV64 |
2782 | |
2783 | #if !V8_TARGET_ARCH_X641 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \ |
2784 | !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_ARM |
2785 | void InstructionSelector::VisitI8x16RelaxedLaneSelect(Node* node) { |
2786 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2787 | } |
2788 | void InstructionSelector::VisitI16x8RelaxedLaneSelect(Node* node) { |
2789 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2790 | } |
2791 | void InstructionSelector::VisitI32x4RelaxedLaneSelect(Node* node) { |
2792 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2793 | } |
2794 | void InstructionSelector::VisitI64x2RelaxedLaneSelect(Node* node) { |
2795 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2796 | } |
2797 | void InstructionSelector::VisitF32x4RelaxedMin(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2798 | void InstructionSelector::VisitF32x4RelaxedMax(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2799 | void InstructionSelector::VisitF64x2RelaxedMin(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2800 | void InstructionSelector::VisitF64x2RelaxedMax(Node* node) { UNIMPLEMENTED()V8_Fatal("unimplemented code"); } |
2801 | void InstructionSelector::VisitI32x4RelaxedTruncF64x2SZero(Node* node) { |
2802 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2803 | } |
2804 | void InstructionSelector::VisitI32x4RelaxedTruncF64x2UZero(Node* node) { |
2805 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2806 | } |
2807 | void InstructionSelector::VisitI32x4RelaxedTruncF32x4S(Node* node) { |
2808 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2809 | } |
2810 | void InstructionSelector::VisitI32x4RelaxedTruncF32x4U(Node* node) { |
2811 | UNIMPLEMENTED()V8_Fatal("unimplemented code"); |
2812 | } |
2813 | #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 |
2814 | // && !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARM |
2815 | |
2816 | #if !V8_TARGET_ARCH_X641 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \ |
2817 | !V8_TARGET_ARCH_RISCV64 |
2818 | #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 |
2819 | // && !V8_TARGET_ARCH_RISCV64 |
2820 | |
2821 | void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); } |
2822 | |
2823 | void InstructionSelector::VisitParameter(Node* node) { |
2824 | OperandGenerator g(this); |
2825 | int index = ParameterIndexOf(node->op()); |
2826 | InstructionOperand op = |
2827 | linkage()->ParameterHasSecondaryLocation(index) |
2828 | ? g.DefineAsDualLocation( |
2829 | node, linkage()->GetParameterLocation(index), |
2830 | linkage()->GetParameterSecondaryLocation(index)) |
2831 | : g.DefineAsLocation(node, linkage()->GetParameterLocation(index)); |
2832 | |
2833 | Emit(kArchNop, op); |
2834 | } |
2835 | |
2836 | namespace { |
2837 | |
2838 | LinkageLocation ExceptionLocation() { |
2839 | return LinkageLocation::ForRegister(kReturnRegister0.code(), |
2840 | MachineType::TaggedPointer()); |
2841 | } |
2842 | |
2843 | constexpr InstructionCode EncodeCallDescriptorFlags( |
2844 | InstructionCode opcode, CallDescriptor::Flags flags) { |
2845 | // Note: Not all bits of `flags` are preserved. |
2846 | STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode ==static_assert(CallDescriptor::kFlagsBitsEncodedInInstructionCode == MiscField::kSize, "CallDescriptor::kFlagsBitsEncodedInInstructionCode == MiscField::kSize" ) |
2847 | MiscField::kSize)static_assert(CallDescriptor::kFlagsBitsEncodedInInstructionCode == MiscField::kSize, "CallDescriptor::kFlagsBitsEncodedInInstructionCode == MiscField::kSize" ); |
2848 | DCHECK(Instruction::IsCallWithDescriptorFlags(opcode))((void) 0); |
2849 | return opcode | MiscField::encode(flags & MiscField::kMax); |
2850 | } |
2851 | |
2852 | } // namespace |
2853 | |
2854 | void InstructionSelector::VisitIfException(Node* node) { |
2855 | OperandGenerator g(this); |
2856 | DCHECK_EQ(IrOpcode::kCall, node->InputAt(1)->opcode())((void) 0); |
2857 | Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation())); |
2858 | } |
2859 | |
2860 | void InstructionSelector::VisitOsrValue(Node* node) { |
2861 | OperandGenerator g(this); |
2862 | int index = OsrValueIndexOf(node->op()); |
2863 | Emit(kArchNop, |
2864 | g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index))); |
2865 | } |
2866 | |
2867 | void InstructionSelector::VisitPhi(Node* node) { |
2868 | const int input_count = node->op()->ValueInputCount(); |
2869 | DCHECK_EQ(input_count, current_block_->PredecessorCount())((void) 0); |
2870 | PhiInstruction* phi = instruction_zone()->New<PhiInstruction>( |
2871 | instruction_zone(), GetVirtualRegister(node), |
2872 | static_cast<size_t>(input_count)); |
2873 | sequence() |
2874 | ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number())) |
2875 | ->AddPhi(phi); |
2876 | for (int i = 0; i < input_count; ++i) { |
2877 | Node* const input = node->InputAt(i); |
2878 | MarkAsUsed(input); |
2879 | phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input)); |
2880 | } |
2881 | } |
2882 | |
2883 | void InstructionSelector::VisitProjection(Node* node) { |
2884 | OperandGenerator g(this); |
2885 | Node* value = node->InputAt(0); |
2886 | switch (value->opcode()) { |
2887 | case IrOpcode::kInt32AddWithOverflow: |
2888 | case IrOpcode::kInt32SubWithOverflow: |
2889 | case IrOpcode::kInt32MulWithOverflow: |
2890 | case IrOpcode::kInt64AddWithOverflow: |
2891 | case IrOpcode::kInt64SubWithOverflow: |
2892 | case IrOpcode::kTryTruncateFloat32ToInt64: |
2893 | case IrOpcode::kTryTruncateFloat64ToInt64: |
2894 | case IrOpcode::kTryTruncateFloat32ToUint64: |
2895 | case IrOpcode::kTryTruncateFloat64ToUint64: |
2896 | case IrOpcode::kInt32PairAdd: |
2897 | case IrOpcode::kInt32PairSub: |
2898 | case IrOpcode::kInt32PairMul: |
2899 | case IrOpcode::kWord32PairShl: |
2900 | case IrOpcode::kWord32PairShr: |
2901 | case IrOpcode::kWord32PairSar: |
2902 | case IrOpcode::kInt32AbsWithOverflow: |
2903 | case IrOpcode::kInt64AbsWithOverflow: |
2904 | if (ProjectionIndexOf(node->op()) == 0u) { |
2905 | Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); |
2906 | } else { |
2907 | DCHECK_EQ(1u, ProjectionIndexOf(node->op()))((void) 0); |
2908 | MarkAsUsed(value); |
2909 | } |
2910 | break; |
2911 | default: |
2912 | break; |
2913 | } |
2914 | } |
2915 | |
2916 | void InstructionSelector::VisitConstant(Node* node) { |
2917 | // We must emit a NOP here because every live range needs a defining |
2918 | // instruction in the register allocator. |
2919 | OperandGenerator g(this); |
2920 | Emit(kArchNop, g.DefineAsConstant(node)); |
2921 | } |
2922 | |
2923 | void InstructionSelector::UpdateMaxPushedArgumentCount(size_t count) { |
2924 | *max_pushed_argument_count_ = std::max(count, *max_pushed_argument_count_); |
2925 | } |
2926 | |
2927 | void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { |
2928 | OperandGenerator g(this); |
2929 | auto call_descriptor = CallDescriptorOf(node->op()); |
2930 | SaveFPRegsMode mode = call_descriptor->NeedsCallerSavedFPRegisters() |
2931 | ? SaveFPRegsMode::kSave |
2932 | : SaveFPRegsMode::kIgnore; |
2933 | |
2934 | if (call_descriptor->NeedsCallerSavedRegisters()) { |
2935 | Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(mode)), |
2936 | g.NoOutput()); |
2937 | } |
2938 | |
2939 | FrameStateDescriptor* frame_state_descriptor = nullptr; |
2940 | if (call_descriptor->NeedsFrameState()) { |
2941 | frame_state_descriptor = GetFrameStateDescriptor(FrameState{ |
2942 | node->InputAt(static_cast<int>(call_descriptor->InputCount()))}); |
2943 | } |
2944 | |
2945 | CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor); |
2946 | CallDescriptor::Flags flags = call_descriptor->flags(); |
2947 | |
2948 | // Compute InstructionOperands for inputs and outputs. |
2949 | // TODO(turbofan): on some architectures it's probably better to use |
2950 | // the code object in a register if there are multiple uses of it. |
2951 | // Improve constant pool and the heuristics in the register allocator |
2952 | // for where to emit constants. |
2953 | CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate); |
2954 | InitializeCallBuffer(node, &buffer, call_buffer_flags); |
2955 | |
2956 | EmitPrepareArguments(&buffer.pushed_nodes, call_descriptor, node); |
2957 | UpdateMaxPushedArgumentCount(buffer.pushed_nodes.size()); |
2958 | |
2959 | // Pass label of exception handler block. |
2960 | if (handler) { |
2961 | DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode())((void) 0); |
2962 | flags |= CallDescriptor::kHasExceptionHandler; |
2963 | buffer.instruction_args.push_back(g.Label(handler)); |
2964 | } |
2965 | |
2966 | // Select the appropriate opcode based on the call type. |
2967 | InstructionCode opcode; |
2968 | switch (call_descriptor->kind()) { |
2969 | case CallDescriptor::kCallAddress: { |
2970 | int gp_param_count = |
2971 | static_cast<int>(call_descriptor->GPParameterCount()); |
2972 | int fp_param_count = |
2973 | static_cast<int>(call_descriptor->FPParameterCount()); |
2974 | #if ABI_USES_FUNCTION_DESCRIPTORS |
2975 | // Highest fp_param_count bit is used on AIX to indicate if a CFunction |
2976 | // call has function descriptor or not. |
2977 | STATIC_ASSERT(FPParamField::kSize == kHasFunctionDescriptorBitShift + 1)static_assert(FPParamField::kSize == kHasFunctionDescriptorBitShift + 1, "FPParamField::kSize == kHasFunctionDescriptorBitShift + 1" ); |
2978 | if (!call_descriptor->NoFunctionDescriptor()) { |
2979 | fp_param_count |= 1 << kHasFunctionDescriptorBitShift; |
2980 | } |
2981 | #endif |
2982 | opcode = kArchCallCFunction | ParamField::encode(gp_param_count) | |
2983 | FPParamField::encode(fp_param_count); |
2984 | break; |
2985 | } |
2986 | case CallDescriptor::kCallCodeObject: |
2987 | opcode = EncodeCallDescriptorFlags(kArchCallCodeObject, flags); |
2988 | break; |
2989 | case CallDescriptor::kCallJSFunction: |
2990 | opcode = EncodeCallDescriptorFlags(kArchCallJSFunction, flags); |
2991 | break; |
2992 | #if V8_ENABLE_WEBASSEMBLY1 |
2993 | case CallDescriptor::kCallWasmCapiFunction: |
2994 | case CallDescriptor::kCallWasmFunction: |
2995 | case CallDescriptor::kCallWasmImportWrapper: |
2996 | opcode = EncodeCallDescriptorFlags(kArchCallWasmFunction, flags); |
2997 | break; |
2998 | #endif // V8_ENABLE_WEBASSEMBLY |
2999 | case CallDescriptor::kCallBuiltinPointer: |
3000 | opcode = EncodeCallDescriptorFlags(kArchCallBuiltinPointer, flags); |
3001 | break; |
3002 | } |
3003 | |
3004 | // Emit the call instruction. |
3005 | size_t const output_count = buffer.outputs.size(); |
3006 | auto* outputs = output_count ? &buffer.outputs.front() : nullptr; |
3007 | Instruction* call_instr = |
3008 | Emit(opcode, output_count, outputs, buffer.instruction_args.size(), |
3009 | &buffer.instruction_args.front()); |
3010 | if (instruction_selection_failed()) return; |
3011 | call_instr->MarkAsCall(); |
3012 | |
3013 | EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node); |
3014 | |
3015 | if (call_descriptor->NeedsCallerSavedRegisters()) { |
3016 | Emit( |
3017 | kArchRestoreCallerRegisters | MiscField::encode(static_cast<int>(mode)), |
3018 | g.NoOutput()); |
3019 | } |
3020 | } |
3021 | |
3022 | void InstructionSelector::VisitTailCall(Node* node) { |
3023 | OperandGenerator g(this); |
3024 | |
3025 | auto caller = linkage()->GetIncomingDescriptor(); |
3026 | auto callee = CallDescriptorOf(node->op()); |
3027 | DCHECK(caller->CanTailCall(callee))((void) 0); |
3028 | const int stack_param_delta = callee->GetStackParameterDelta(caller); |
3029 | CallBuffer buffer(zone(), callee, nullptr); |
3030 | |
3031 | // Compute InstructionOperands for inputs and outputs. |
3032 | CallBufferFlags flags(kCallCodeImmediate | kCallTail); |
3033 | if (IsTailCallAddressImmediate()) { |
3034 | flags |= kCallAddressImmediate; |
3035 | } |
3036 | if (callee->flags() & CallDescriptor::kFixedTargetRegister) { |
3037 | flags |= kCallFixedTargetRegister; |
3038 | } |
3039 | InitializeCallBuffer(node, &buffer, flags, stack_param_delta); |
3040 | UpdateMaxPushedArgumentCount(stack_param_delta); |
3041 | |
3042 | // Select the appropriate opcode based on the call type. |
3043 | InstructionCode opcode; |
3044 | InstructionOperandVector temps(zone()); |
3045 | switch (callee->kind()) { |
3046 | case CallDescriptor::kCallCodeObject: |
3047 | opcode = kArchTailCallCodeObject; |
3048 | break; |
3049 | case CallDescriptor::kCallAddress: |
3050 | DCHECK(!caller->IsJSFunctionCall())((void) 0); |
3051 | opcode = kArchTailCallAddress; |
3052 | break; |
3053 | #if V8_ENABLE_WEBASSEMBLY1 |
3054 | case CallDescriptor::kCallWasmFunction: |
3055 | DCHECK(!caller->IsJSFunctionCall())((void) 0); |
3056 | opcode = kArchTailCallWasm; |
3057 | break; |
3058 | #endif // V8_ENABLE_WEBASSEMBLY |
3059 | default: |
3060 | UNREACHABLE()V8_Fatal("unreachable code"); |
3061 | } |
3062 | opcode = EncodeCallDescriptorFlags(opcode, callee->flags()); |
3063 | |
3064 | Emit(kArchPrepareTailCall, g.NoOutput()); |
3065 | |
3066 | // Add an immediate operand that represents the offset to the first slot that |
3067 | // is unused with respect to the stack pointer that has been updated for the |
3068 | // tail call instruction. Backends that pad arguments can write the padding |
3069 | // value at this offset from the stack. |
3070 | const int optional_padding_offset = |
3071 | callee->GetOffsetToFirstUnusedStackSlot() - 1; |
3072 | buffer.instruction_args.push_back(g.TempImmediate(optional_padding_offset)); |
3073 | |
3074 | const int first_unused_slot_offset = |
3075 | kReturnAddressStackSlotCount + stack_param_delta; |
3076 | buffer.instruction_args.push_back(g.TempImmediate(first_unused_slot_offset)); |
3077 | |
3078 | // Emit the tailcall instruction. |
3079 | Emit(opcode, 0, nullptr, buffer.instruction_args.size(), |
3080 | &buffer.instruction_args.front(), temps.size(), |
3081 | temps.empty() ? nullptr : &temps.front()); |
3082 | } |
3083 | |
3084 | void InstructionSelector::VisitGoto(BasicBlock* target) { |
3085 | // jump to the next block. |
3086 | OperandGenerator g(this); |
3087 | Emit(kArchJmp, g.NoOutput(), g.Label(target)); |
3088 | } |
3089 | |
3090 | void InstructionSelector::VisitReturn(Node* ret) { |
3091 | OperandGenerator g(this); |
3092 | const int input_count = linkage()->GetIncomingDescriptor()->ReturnCount() == 0 |
3093 | ? 1 |
3094 | : ret->op()->ValueInputCount(); |
3095 | DCHECK_GE(input_count, 1)((void) 0); |
3096 | auto value_locations = zone()->NewArray<InstructionOperand>(input_count); |
3097 | Node* pop_count = ret->InputAt(0); |
3098 | value_locations[0] = (pop_count->opcode() == IrOpcode::kInt32Constant || |
3099 | pop_count->opcode() == IrOpcode::kInt64Constant) |
3100 | ? g.UseImmediate(pop_count) |
3101 | : g.UseRegister(pop_count); |
3102 | for (int i = 1; i < input_count; ++i) { |
3103 | value_locations[i] = |
3104 | g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i - 1)); |
3105 | } |
3106 | Emit(kArchRet, 0, nullptr, input_count, value_locations); |
3107 | } |
3108 | |
3109 | void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch, |
3110 | BasicBlock* fbranch) { |
3111 | FlagsContinuation cont = |
3112 | FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch); |
3113 | VisitWordCompareZero(branch, branch->InputAt(0), &cont); |
3114 | } |
3115 | |
3116 | void InstructionSelector::VisitDeoptimizeIf(Node* node) { |
3117 | DeoptimizeParameters p = DeoptimizeParametersOf(node->op()); |
3118 | FlagsContinuation cont = FlagsContinuation::ForDeoptimize( |
3119 | kNotEqual, p.reason(), node->id(), p.feedback(), |
3120 | FrameState{node->InputAt(1)}); |
3121 | VisitWordCompareZero(node, node->InputAt(0), &cont); |
3122 | } |
3123 | |
3124 | void InstructionSelector::VisitDeoptimizeUnless(Node* node) { |
3125 | DeoptimizeParameters p = DeoptimizeParametersOf(node->op()); |
3126 | FlagsContinuation cont = FlagsContinuation::ForDeoptimize( |
3127 | kEqual, p.reason(), node->id(), p.feedback(), |
3128 | FrameState{node->InputAt(1)}); |
3129 | VisitWordCompareZero(node, node->InputAt(0), &cont); |
3130 | } |
3131 | |
3132 | void InstructionSelector::VisitSelect(Node* node) { |
3133 | FlagsContinuation cont = |
3134 | FlagsContinuation::ForSelect(kNotEqual, node, |
3135 | node->InputAt(1), node->InputAt(2)); |
3136 | VisitWordCompareZero(node, node->InputAt(0), &cont); |
3137 | } |
3138 | |
3139 | void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) { |
3140 | FlagsContinuation cont = |
3141 | FlagsContinuation::ForTrap(kNotEqual, trap_id, node->InputAt(1)); |
3142 | VisitWordCompareZero(node, node->InputAt(0), &cont); |
3143 | } |
3144 | |
3145 | void InstructionSelector::VisitTrapUnless(Node* node, TrapId trap_id) { |
3146 | FlagsContinuation cont = |
3147 | FlagsContinuation::ForTrap(kEqual, trap_id, node->InputAt(1)); |
3148 | VisitWordCompareZero(node, node->InputAt(0), &cont); |
3149 | } |
3150 | |
3151 | void InstructionSelector::EmitIdentity(Node* node) { |
3152 | MarkAsUsed(node->InputAt(0)); |
3153 | SetRename(node, node->InputAt(0)); |
3154 | } |
3155 | |
3156 | void InstructionSelector::VisitDeoptimize(DeoptimizeReason reason, |
3157 | NodeId node_id, |
3158 | FeedbackSource const& feedback, |
3159 | FrameState frame_state) { |
3160 | InstructionOperandVector args(instruction_zone()); |
3161 | AppendDeoptimizeArguments(&args, reason, node_id, feedback, frame_state); |
3162 | Emit(kArchDeoptimize, 0, nullptr, args.size(), &args.front(), 0, nullptr); |
3163 | } |
3164 | |
3165 | void InstructionSelector::VisitThrow(Node* node) { |
3166 | OperandGenerator g(this); |
3167 | Emit(kArchThrowTerminator, g.NoOutput()); |
3168 | } |
3169 | |
3170 | void InstructionSelector::VisitDebugBreak(Node* node) { |
3171 | OperandGenerator g(this); |
3172 | Emit(kArchDebugBreak, g.NoOutput()); |
3173 | } |
3174 | |
3175 | void InstructionSelector::VisitUnreachable(Node* node) { |
3176 | OperandGenerator g(this); |
3177 | Emit(kArchDebugBreak, g.NoOutput()); |
3178 | } |
3179 | |
3180 | void InstructionSelector::VisitStaticAssert(Node* node) { |
3181 | Node* asserted = node->InputAt(0); |
3182 | UnparkedScopeIfNeeded scope(broker_); |
3183 | AllowHandleDereference allow_handle_dereference; |
3184 | asserted->Print(4); |
3185 | FATAL(V8_Fatal("Expected Turbofan static assert to hold, but got non-true input:\n %s" , StaticAssertSourceOf(node->op())) |
3186 | "Expected Turbofan static assert to hold, but got non-true input:\n %s",V8_Fatal("Expected Turbofan static assert to hold, but got non-true input:\n %s" , StaticAssertSourceOf(node->op())) |
3187 | StaticAssertSourceOf(node->op()))V8_Fatal("Expected Turbofan static assert to hold, but got non-true input:\n %s" , StaticAssertSourceOf(node->op())); |
3188 | } |
3189 | |
3190 | void InstructionSelector::VisitDeadValue(Node* node) { |
3191 | OperandGenerator g(this); |
3192 | MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node); |
3193 | Emit(kArchDebugBreak, g.DefineAsConstant(node)); |
3194 | } |
3195 | |
3196 | void InstructionSelector::VisitComment(Node* node) { |
3197 | OperandGenerator g(this); |
3198 | InstructionOperand operand(g.UseImmediate(node)); |
3199 | Emit(kArchComment, 0, nullptr, 1, &operand); |
3200 | } |
3201 | |
3202 | void InstructionSelector::VisitUnsafePointerAdd(Node* node) { |
3203 | #if V8_TARGET_ARCH_64_BIT1 |
3204 | VisitInt64Add(node); |
3205 | #else // V8_TARGET_ARCH_64_BIT |
3206 | VisitInt32Add(node); |
3207 | #endif // V8_TARGET_ARCH_64_BIT |
3208 | } |
3209 | |
3210 | void InstructionSelector::VisitRetain(Node* node) { |
3211 | OperandGenerator g(this); |
3212 | Emit(kArchNop, g.NoOutput(), g.UseAny(node->InputAt(0))); |
3213 | } |
3214 | |
3215 | bool InstructionSelector::CanProduceSignalingNaN(Node* node) { |
3216 | // TODO(jarin) Improve the heuristic here. |
3217 | if (node->opcode() == IrOpcode::kFloat64Add || |
3218 | node->opcode() == IrOpcode::kFloat64Sub || |
3219 | node->opcode() == IrOpcode::kFloat64Mul) { |
3220 | return false; |
3221 | } |
3222 | return true; |
3223 | } |
3224 | |
3225 | #if V8_TARGET_ARCH_64_BIT1 |
3226 | bool InstructionSelector::ZeroExtendsWord32ToWord64(Node* node, |
3227 | int recursion_depth) { |
3228 | // To compute whether a Node sets its upper 32 bits to zero, there are three |
3229 | // cases. |
3230 | // 1. Phi node, with a computed result already available in phi_states_: |
3231 | // Read the value from phi_states_. |
3232 | // 2. Phi node, with no result available in phi_states_ yet: |
3233 | // Recursively check its inputs, and store the result in phi_states_. |
3234 | // 3. Anything else: |
3235 | // Call the architecture-specific ZeroExtendsWord32ToWord64NoPhis. |
3236 | |
3237 | // Limit recursion depth to avoid the possibility of stack overflow on very |
3238 | // large functions. |
3239 | const int kMaxRecursionDepth = 100; |
3240 | |
3241 | if (node->opcode() == IrOpcode::kPhi) { |
3242 | Upper32BitsState current = phi_states_[node->id()]; |
3243 | if (current != Upper32BitsState::kNotYetChecked) { |
3244 | return current == Upper32BitsState::kUpperBitsGuaranteedZero; |
3245 | } |
3246 | |
3247 | // If further recursion is prevented, we can't make any assumptions about |
3248 | // the output of this phi node. |
3249 | if (recursion_depth >= kMaxRecursionDepth) { |
3250 | return false; |
3251 | } |
3252 | |
3253 | // Mark the current node so that we skip it if we recursively visit it |
3254 | // again. Or, said differently, we compute a largest fixed-point so we can |
3255 | // be optimistic when we hit cycles. |
3256 | phi_states_[node->id()] = Upper32BitsState::kUpperBitsGuaranteedZero; |
3257 | |
3258 | int input_count = node->op()->ValueInputCount(); |
3259 | for (int i = 0; i < input_count; ++i) { |
3260 | Node* input = NodeProperties::GetValueInput(node, i); |
3261 | if (!ZeroExtendsWord32ToWord64(input, recursion_depth + 1)) { |
3262 | phi_states_[node->id()] = Upper32BitsState::kNoGuarantee; |
3263 | return false; |
3264 | } |
3265 | } |
3266 | |
3267 | return true; |
3268 | } |
3269 | return ZeroExtendsWord32ToWord64NoPhis(node); |
3270 | } |
3271 | #endif // V8_TARGET_ARCH_64_BIT |
3272 | |
3273 | namespace { |
3274 | |
3275 | FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone, |
3276 | FrameState state) { |
3277 | DCHECK_EQ(IrOpcode::kFrameState, state->opcode())((void) 0); |
3278 | DCHECK_EQ(FrameState::kFrameStateInputCount, state->InputCount())((void) 0); |
3279 | const FrameStateInfo& state_info = FrameStateInfoOf(state->op()); |
3280 | int parameters = state_info.parameter_count(); |
3281 | int locals = state_info.local_count(); |
3282 | int stack = state_info.type() == FrameStateType::kUnoptimizedFunction ? 1 : 0; |
3283 | |
3284 | FrameStateDescriptor* outer_state = nullptr; |
3285 | if (state.outer_frame_state()->opcode() == IrOpcode::kFrameState) { |
3286 | outer_state = GetFrameStateDescriptorInternal( |
3287 | zone, FrameState{state.outer_frame_state()}); |
3288 | } |
3289 | |
3290 | #if V8_ENABLE_WEBASSEMBLY1 |
3291 | if (state_info.type() == FrameStateType::kJSToWasmBuiltinContinuation) { |
3292 | auto function_info = static_cast<const JSToWasmFrameStateFunctionInfo*>( |
3293 | state_info.function_info()); |
3294 | return zone->New<JSToWasmFrameStateDescriptor>( |
3295 | zone, state_info.type(), state_info.bailout_id(), |
3296 | state_info.state_combine(), parameters, locals, stack, |
3297 | state_info.shared_info(), outer_state, function_info->signature()); |
3298 | } |
3299 | #endif // V8_ENABLE_WEBASSEMBLY |
3300 | |
3301 | return zone->New<FrameStateDescriptor>( |
3302 | zone, state_info.type(), state_info.bailout_id(), |
3303 | state_info.state_combine(), parameters, locals, stack, |
3304 | state_info.shared_info(), outer_state); |
3305 | } |
3306 | |
3307 | } // namespace |
3308 | |
3309 | FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor( |
3310 | FrameState state) { |
3311 | auto* desc = GetFrameStateDescriptorInternal(instruction_zone(), state); |
3312 | *max_unoptimized_frame_height_ = |
3313 | std::max(*max_unoptimized_frame_height_, |
3314 | desc->total_conservative_frame_size_in_bytes()); |
3315 | return desc; |
3316 | } |
3317 | |
3318 | #if V8_ENABLE_WEBASSEMBLY1 |
3319 | void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle, |
3320 | bool* is_swizzle) { |
3321 | // Get raw shuffle indices. |
3322 | memcpy(shuffle, S128ImmediateParameterOf(node->op()).data(), kSimd128Size); |
3323 | bool needs_swap; |
3324 | bool inputs_equal = GetVirtualRegister(node->InputAt(0)) == |
3325 | GetVirtualRegister(node->InputAt(1)); |
3326 | wasm::SimdShuffle::CanonicalizeShuffle(inputs_equal, shuffle, &needs_swap, |
3327 | is_swizzle); |
3328 | if (needs_swap) { |
3329 | SwapShuffleInputs(node); |
3330 | } |
3331 | // Duplicate the first input; for some shuffles on some architectures, it's |
3332 | // easiest to implement a swizzle as a shuffle so it might be used. |
3333 | if (*is_swizzle) { |
3334 | node->ReplaceInput(1, node->InputAt(0)); |
3335 | } |
3336 | } |
3337 | |
3338 | // static |
3339 | void InstructionSelector::SwapShuffleInputs(Node* node) { |
3340 | Node* input0 = node->InputAt(0); |
3341 | Node* input1 = node->InputAt(1); |
3342 | node->ReplaceInput(0, input1); |
3343 | node->ReplaceInput(1, input0); |
3344 | } |
3345 | #endif // V8_ENABLE_WEBASSEMBLY |
3346 | |
3347 | } // namespace compiler |
3348 | } // namespace internal |
3349 | } // namespace v8 |