File: | out/../deps/v8/src/interpreter/bytecode-array-writer.cc |
Warning: | line 440, column 5 Value stored to 'jump_bytecode' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // Copyright 2015 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/interpreter/bytecode-array-writer.h" |
6 | |
7 | #include "src/api/api-inl.h" |
8 | #include "src/heap/local-factory-inl.h" |
9 | #include "src/interpreter/bytecode-jump-table.h" |
10 | #include "src/interpreter/bytecode-label.h" |
11 | #include "src/interpreter/bytecode-node.h" |
12 | #include "src/interpreter/bytecode-register.h" |
13 | #include "src/interpreter/bytecode-source-info.h" |
14 | #include "src/interpreter/constant-array-builder.h" |
15 | #include "src/interpreter/handler-table-builder.h" |
16 | #include "src/objects/objects-inl.h" |
17 | |
18 | namespace v8 { |
19 | namespace internal { |
20 | namespace interpreter { |
21 | |
22 | STATIC_CONST_MEMBER_DEFINITION const size_t |
23 | BytecodeArrayWriter::kMaxSizeOfPackedBytecode; |
24 | |
25 | BytecodeArrayWriter::BytecodeArrayWriter( |
26 | Zone* zone, ConstantArrayBuilder* constant_array_builder, |
27 | SourcePositionTableBuilder::RecordingMode source_position_mode) |
28 | : bytecodes_(zone), |
29 | unbound_jumps_(0), |
30 | source_position_table_builder_(zone, source_position_mode), |
31 | constant_array_builder_(constant_array_builder), |
32 | last_bytecode_(Bytecode::kIllegal), |
33 | last_bytecode_offset_(0), |
34 | last_bytecode_had_source_info_(false), |
35 | elide_noneffectful_bytecodes_(FLAG_ignition_elide_noneffectful_bytecodes), |
36 | exit_seen_in_block_(false) { |
37 | bytecodes_.reserve(512); // Derived via experimentation. |
38 | } |
39 | |
40 | template <typename IsolateT> |
41 | Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray( |
42 | IsolateT* isolate, int register_count, int parameter_count, |
43 | Handle<ByteArray> handler_table) { |
44 | DCHECK_EQ(0, unbound_jumps_)((void) 0); |
45 | |
46 | int bytecode_size = static_cast<int>(bytecodes()->size()); |
47 | int frame_size = register_count * kSystemPointerSize; |
48 | Handle<FixedArray> constant_pool = |
49 | constant_array_builder()->ToFixedArray(isolate); |
50 | Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray( |
51 | bytecode_size, &bytecodes()->front(), frame_size, parameter_count, |
52 | constant_pool); |
53 | bytecode_array->set_handler_table(*handler_table); |
54 | return bytecode_array; |
55 | } |
56 | |
57 | template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) |
58 | Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray( |
59 | Isolate* isolate, int register_count, int parameter_count, |
60 | Handle<ByteArray> handler_table); |
61 | template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) |
62 | Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray( |
63 | LocalIsolate* isolate, int register_count, int parameter_count, |
64 | Handle<ByteArray> handler_table); |
65 | |
66 | template <typename IsolateT> |
67 | Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable( |
68 | IsolateT* isolate) { |
69 | DCHECK(!source_position_table_builder_.Lazy())((void) 0); |
70 | Handle<ByteArray> source_position_table = |
71 | source_position_table_builder_.Omit() |
72 | ? isolate->factory()->empty_byte_array() |
73 | : source_position_table_builder_.ToSourcePositionTable(isolate); |
74 | return source_position_table; |
75 | } |
76 | |
77 | template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) |
78 | Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable( |
79 | Isolate* isolate); |
80 | template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) |
81 | Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable( |
82 | LocalIsolate* isolate); |
83 | |
84 | #ifdef DEBUG |
85 | int BytecodeArrayWriter::CheckBytecodeMatches(BytecodeArray bytecode) { |
86 | int mismatches = false; |
87 | int bytecode_size = static_cast<int>(bytecodes()->size()); |
88 | const byte* bytecode_ptr = &bytecodes()->front(); |
89 | if (bytecode_size != bytecode.length()) mismatches = true; |
90 | |
91 | // If there's a mismatch only in the length of the bytecode (very unlikely) |
92 | // then the first mismatch will be the first extra bytecode. |
93 | int first_mismatch = std::min(bytecode_size, bytecode.length()); |
94 | for (int i = 0; i < first_mismatch; ++i) { |
95 | if (bytecode_ptr[i] != bytecode.get(i)) { |
96 | mismatches = true; |
97 | first_mismatch = i; |
98 | break; |
99 | } |
100 | } |
101 | |
102 | if (mismatches) { |
103 | return first_mismatch; |
104 | } |
105 | return -1; |
106 | } |
107 | #endif |
108 | |
109 | void BytecodeArrayWriter::Write(BytecodeNode* node) { |
110 | DCHECK(!Bytecodes::IsJump(node->bytecode()))((void) 0); |
111 | |
112 | if (exit_seen_in_block_) return; // Don't emit dead code. |
113 | UpdateExitSeenInBlock(node->bytecode()); |
114 | MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid()); |
115 | |
116 | UpdateSourcePositionTable(node); |
117 | EmitBytecode(node); |
118 | } |
119 | |
120 | void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) { |
121 | DCHECK(Bytecodes::IsForwardJump(node->bytecode()))((void) 0); |
122 | |
123 | if (exit_seen_in_block_) return; // Don't emit dead code. |
124 | UpdateExitSeenInBlock(node->bytecode()); |
125 | MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid()); |
126 | |
127 | UpdateSourcePositionTable(node); |
128 | EmitJump(node, label); |
129 | } |
130 | |
131 | void BytecodeArrayWriter::WriteJumpLoop(BytecodeNode* node, |
132 | BytecodeLoopHeader* loop_header) { |
133 | DCHECK_EQ(node->bytecode(), Bytecode::kJumpLoop)((void) 0); |
134 | |
135 | if (exit_seen_in_block_) return; // Don't emit dead code. |
136 | UpdateExitSeenInBlock(node->bytecode()); |
137 | MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid()); |
138 | |
139 | UpdateSourcePositionTable(node); |
140 | EmitJumpLoop(node, loop_header); |
141 | } |
142 | |
143 | void BytecodeArrayWriter::WriteSwitch(BytecodeNode* node, |
144 | BytecodeJumpTable* jump_table) { |
145 | DCHECK(Bytecodes::IsSwitch(node->bytecode()))((void) 0); |
146 | |
147 | if (exit_seen_in_block_) return; // Don't emit dead code. |
148 | UpdateExitSeenInBlock(node->bytecode()); |
149 | MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid()); |
150 | |
151 | UpdateSourcePositionTable(node); |
152 | EmitSwitch(node, jump_table); |
153 | } |
154 | |
155 | void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) { |
156 | DCHECK(label->has_referrer_jump())((void) 0); |
157 | size_t current_offset = bytecodes()->size(); |
158 | // Update the jump instruction's location. |
159 | PatchJump(current_offset, label->jump_offset()); |
160 | label->bind(); |
161 | StartBasicBlock(); |
162 | } |
163 | |
164 | void BytecodeArrayWriter::BindLoopHeader(BytecodeLoopHeader* loop_header) { |
165 | size_t current_offset = bytecodes()->size(); |
166 | loop_header->bind_to(current_offset); |
167 | // Don't start a basic block when the entire loop is dead. |
168 | if (exit_seen_in_block_) return; |
169 | StartBasicBlock(); |
170 | } |
171 | |
172 | void BytecodeArrayWriter::BindJumpTableEntry(BytecodeJumpTable* jump_table, |
173 | int case_value) { |
174 | DCHECK(!jump_table->is_bound(case_value))((void) 0); |
175 | |
176 | size_t current_offset = bytecodes()->size(); |
177 | size_t relative_jump = current_offset - jump_table->switch_bytecode_offset(); |
178 | |
179 | constant_array_builder()->SetJumpTableSmi( |
180 | jump_table->ConstantPoolEntryFor(case_value), |
181 | Smi::FromInt(static_cast<int>(relative_jump))); |
182 | jump_table->mark_bound(case_value); |
183 | |
184 | StartBasicBlock(); |
185 | } |
186 | |
187 | void BytecodeArrayWriter::BindHandlerTarget( |
188 | HandlerTableBuilder* handler_table_builder, int handler_id) { |
189 | size_t current_offset = bytecodes()->size(); |
190 | StartBasicBlock(); |
191 | handler_table_builder->SetHandlerTarget(handler_id, current_offset); |
192 | } |
193 | |
194 | void BytecodeArrayWriter::BindTryRegionStart( |
195 | HandlerTableBuilder* handler_table_builder, int handler_id) { |
196 | size_t current_offset = bytecodes()->size(); |
197 | // Try blocks don't have to be in a separate basic block, but we do have to |
198 | // invalidate the bytecode to avoid eliding it and changing the offset. |
199 | InvalidateLastBytecode(); |
200 | handler_table_builder->SetTryRegionStart(handler_id, current_offset); |
201 | } |
202 | |
203 | void BytecodeArrayWriter::BindTryRegionEnd( |
204 | HandlerTableBuilder* handler_table_builder, int handler_id) { |
205 | // Try blocks don't have to be in a separate basic block, but we do have to |
206 | // invalidate the bytecode to avoid eliding it and changing the offset. |
207 | InvalidateLastBytecode(); |
208 | size_t current_offset = bytecodes()->size(); |
209 | handler_table_builder->SetTryRegionEnd(handler_id, current_offset); |
210 | } |
211 | |
212 | void BytecodeArrayWriter::SetFunctionEntrySourcePosition(int position) { |
213 | bool is_statement = false; |
214 | source_position_table_builder_.AddPosition( |
215 | kFunctionEntryBytecodeOffset, SourcePosition(position), is_statement); |
216 | } |
217 | |
218 | void BytecodeArrayWriter::StartBasicBlock() { |
219 | InvalidateLastBytecode(); |
220 | exit_seen_in_block_ = false; |
221 | } |
222 | |
223 | void BytecodeArrayWriter::UpdateSourcePositionTable( |
224 | const BytecodeNode* const node) { |
225 | int bytecode_offset = static_cast<int>(bytecodes()->size()); |
226 | const BytecodeSourceInfo& source_info = node->source_info(); |
227 | if (source_info.is_valid()) { |
228 | source_position_table_builder()->AddPosition( |
229 | bytecode_offset, SourcePosition(source_info.source_position()), |
230 | source_info.is_statement()); |
231 | } |
232 | } |
233 | |
234 | void BytecodeArrayWriter::UpdateExitSeenInBlock(Bytecode bytecode) { |
235 | switch (bytecode) { |
236 | case Bytecode::kReturn: |
237 | case Bytecode::kThrow: |
238 | case Bytecode::kReThrow: |
239 | case Bytecode::kAbort: |
240 | case Bytecode::kJump: |
241 | case Bytecode::kJumpLoop: |
242 | case Bytecode::kJumpConstant: |
243 | case Bytecode::kSuspendGenerator: |
244 | exit_seen_in_block_ = true; |
245 | break; |
246 | default: |
247 | break; |
248 | } |
249 | } |
250 | |
251 | void BytecodeArrayWriter::MaybeElideLastBytecode(Bytecode next_bytecode, |
252 | bool has_source_info) { |
253 | if (!elide_noneffectful_bytecodes_) return; |
254 | |
255 | // If the last bytecode loaded the accumulator without any external effect, |
256 | // and the next bytecode clobbers this load without reading the accumulator, |
257 | // then the previous bytecode can be elided as it has no effect. |
258 | if (Bytecodes::IsAccumulatorLoadWithoutEffects(last_bytecode_) && |
259 | Bytecodes::GetImplicitRegisterUse(next_bytecode) == |
260 | ImplicitRegisterUse::kWriteAccumulator && |
261 | (!last_bytecode_had_source_info_ || !has_source_info)) { |
262 | DCHECK_GT(bytecodes()->size(), last_bytecode_offset_)((void) 0); |
263 | bytecodes()->resize(last_bytecode_offset_); |
264 | // If the last bytecode had source info we will transfer the source info |
265 | // to this bytecode. |
266 | has_source_info |= last_bytecode_had_source_info_; |
267 | } |
268 | last_bytecode_ = next_bytecode; |
269 | last_bytecode_had_source_info_ = has_source_info; |
270 | last_bytecode_offset_ = bytecodes()->size(); |
271 | } |
272 | |
273 | void BytecodeArrayWriter::InvalidateLastBytecode() { |
274 | last_bytecode_ = Bytecode::kIllegal; |
275 | } |
276 | |
277 | void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) { |
278 | DCHECK_NE(node->bytecode(), Bytecode::kIllegal)((void) 0); |
279 | |
280 | Bytecode bytecode = node->bytecode(); |
281 | OperandScale operand_scale = node->operand_scale(); |
282 | |
283 | if (operand_scale != OperandScale::kSingle) { |
284 | Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale); |
285 | bytecodes()->push_back(Bytecodes::ToByte(prefix)); |
286 | } |
287 | bytecodes()->push_back(Bytecodes::ToByte(bytecode)); |
288 | |
289 | const uint32_t* const operands = node->operands(); |
290 | const int operand_count = node->operand_count(); |
291 | const OperandSize* operand_sizes = |
292 | Bytecodes::GetOperandSizes(bytecode, operand_scale); |
293 | for (int i = 0; i < operand_count; ++i) { |
294 | switch (operand_sizes[i]) { |
295 | case OperandSize::kNone: |
296 | UNREACHABLE()V8_Fatal("unreachable code"); |
297 | case OperandSize::kByte: |
298 | bytecodes()->push_back(static_cast<uint8_t>(operands[i])); |
299 | break; |
300 | case OperandSize::kShort: { |
301 | uint16_t operand = static_cast<uint16_t>(operands[i]); |
302 | const uint8_t* raw_operand = reinterpret_cast<const uint8_t*>(&operand); |
303 | bytecodes()->push_back(raw_operand[0]); |
304 | bytecodes()->push_back(raw_operand[1]); |
305 | break; |
306 | } |
307 | case OperandSize::kQuad: { |
308 | const uint8_t* raw_operand = |
309 | reinterpret_cast<const uint8_t*>(&operands[i]); |
310 | bytecodes()->push_back(raw_operand[0]); |
311 | bytecodes()->push_back(raw_operand[1]); |
312 | bytecodes()->push_back(raw_operand[2]); |
313 | bytecodes()->push_back(raw_operand[3]); |
314 | break; |
315 | } |
316 | } |
317 | } |
318 | } |
319 | |
320 | // static |
321 | Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) { |
322 | switch (jump_bytecode) { |
323 | case Bytecode::kJump: |
324 | return Bytecode::kJumpConstant; |
325 | case Bytecode::kJumpIfTrue: |
326 | return Bytecode::kJumpIfTrueConstant; |
327 | case Bytecode::kJumpIfFalse: |
328 | return Bytecode::kJumpIfFalseConstant; |
329 | case Bytecode::kJumpIfToBooleanTrue: |
330 | return Bytecode::kJumpIfToBooleanTrueConstant; |
331 | case Bytecode::kJumpIfToBooleanFalse: |
332 | return Bytecode::kJumpIfToBooleanFalseConstant; |
333 | case Bytecode::kJumpIfNull: |
334 | return Bytecode::kJumpIfNullConstant; |
335 | case Bytecode::kJumpIfNotNull: |
336 | return Bytecode::kJumpIfNotNullConstant; |
337 | case Bytecode::kJumpIfUndefined: |
338 | return Bytecode::kJumpIfUndefinedConstant; |
339 | case Bytecode::kJumpIfNotUndefined: |
340 | return Bytecode::kJumpIfNotUndefinedConstant; |
341 | case Bytecode::kJumpIfUndefinedOrNull: |
342 | return Bytecode::kJumpIfUndefinedOrNullConstant; |
343 | case Bytecode::kJumpIfJSReceiver: |
344 | return Bytecode::kJumpIfJSReceiverConstant; |
345 | default: |
346 | UNREACHABLE()V8_Fatal("unreachable code"); |
347 | } |
348 | } |
349 | |
350 | void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location, |
351 | int delta) { |
352 | Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location)); |
353 | DCHECK(Bytecodes::IsForwardJump(jump_bytecode))((void) 0); |
354 | DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode))((void) 0); |
355 | DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm)((void) 0); |
356 | DCHECK_GT(delta, 0)((void) 0); |
357 | size_t operand_location = jump_location + 1; |
358 | DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder)((void) 0); |
359 | if (Bytecodes::ScaleForUnsignedOperand(delta) == OperandScale::kSingle) { |
360 | // The jump fits within the range of an UImm8 operand, so cancel |
361 | // the reservation and jump directly. |
362 | constant_array_builder()->DiscardReservedEntry(OperandSize::kByte); |
363 | bytecodes()->at(operand_location) = static_cast<uint8_t>(delta); |
364 | } else { |
365 | // The jump does not fit within the range of an UImm8 operand, so |
366 | // commit reservation putting the offset into the constant pool, |
367 | // and update the jump instruction and operand. |
368 | size_t entry = constant_array_builder()->CommitReservedEntry( |
369 | OperandSize::kByte, Smi::FromInt(delta)); |
370 | DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),((void) 0) |
371 | OperandSize::kByte)((void) 0); |
372 | jump_bytecode = GetJumpWithConstantOperand(jump_bytecode); |
373 | bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode); |
374 | bytecodes()->at(operand_location) = static_cast<uint8_t>(entry); |
375 | } |
376 | } |
377 | |
378 | void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location, |
379 | int delta) { |
380 | Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location)); |
381 | DCHECK(Bytecodes::IsForwardJump(jump_bytecode))((void) 0); |
382 | DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode))((void) 0); |
383 | DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm)((void) 0); |
384 | DCHECK_GT(delta, 0)((void) 0); |
385 | size_t operand_location = jump_location + 1; |
386 | uint8_t operand_bytes[2]; |
387 | if (Bytecodes::ScaleForUnsignedOperand(delta) <= OperandScale::kDouble) { |
388 | // The jump fits within the range of an Imm16 operand, so cancel |
389 | // the reservation and jump directly. |
390 | constant_array_builder()->DiscardReservedEntry(OperandSize::kShort); |
391 | base::WriteUnalignedValue<uint16_t>( |
392 | reinterpret_cast<Address>(operand_bytes), static_cast<uint16_t>(delta)); |
393 | } else { |
394 | // The jump does not fit within the range of an Imm16 operand, so |
395 | // commit reservation putting the offset into the constant pool, |
396 | // and update the jump instruction and operand. |
397 | size_t entry = constant_array_builder()->CommitReservedEntry( |
398 | OperandSize::kShort, Smi::FromInt(delta)); |
399 | jump_bytecode = GetJumpWithConstantOperand(jump_bytecode); |
400 | bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode); |
401 | base::WriteUnalignedValue<uint16_t>( |
402 | reinterpret_cast<Address>(operand_bytes), static_cast<uint16_t>(entry)); |
403 | } |
404 | DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&((void) 0) |
405 | bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder)((void) 0); |
406 | bytecodes()->at(operand_location++) = operand_bytes[0]; |
407 | bytecodes()->at(operand_location) = operand_bytes[1]; |
408 | } |
409 | |
410 | void BytecodeArrayWriter::PatchJumpWith32BitOperand(size_t jump_location, |
411 | int delta) { |
412 | DCHECK(Bytecodes::IsJumpImmediate(((void) 0) |
413 | Bytecodes::FromByte(bytecodes()->at(jump_location))))((void) 0); |
414 | constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad); |
415 | uint8_t operand_bytes[4]; |
416 | base::WriteUnalignedValue<uint32_t>(reinterpret_cast<Address>(operand_bytes), |
417 | static_cast<uint32_t>(delta)); |
418 | size_t operand_location = jump_location + 1; |
419 | DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&((void) 0) |
420 | bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder &&((void) 0) |
421 | bytecodes()->at(operand_location + 2) == k8BitJumpPlaceholder &&((void) 0) |
422 | bytecodes()->at(operand_location + 3) == k8BitJumpPlaceholder)((void) 0); |
423 | bytecodes()->at(operand_location++) = operand_bytes[0]; |
424 | bytecodes()->at(operand_location++) = operand_bytes[1]; |
425 | bytecodes()->at(operand_location++) = operand_bytes[2]; |
426 | bytecodes()->at(operand_location) = operand_bytes[3]; |
427 | } |
428 | |
429 | void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) { |
430 | Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location)); |
431 | int delta = static_cast<int>(jump_target - jump_location); |
432 | int prefix_offset = 0; |
433 | OperandScale operand_scale = OperandScale::kSingle; |
434 | if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) { |
435 | // If a prefix scaling bytecode is emitted the target offset is one |
436 | // less than the case of no prefix scaling bytecode. |
437 | delta -= 1; |
438 | prefix_offset = 1; |
439 | operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode); |
440 | jump_bytecode = |
Value stored to 'jump_bytecode' is never read | |
441 | Bytecodes::FromByte(bytecodes()->at(jump_location + prefix_offset)); |
442 | } |
443 | |
444 | DCHECK(Bytecodes::IsJump(jump_bytecode))((void) 0); |
445 | switch (operand_scale) { |
446 | case OperandScale::kSingle: |
447 | PatchJumpWith8BitOperand(jump_location, delta); |
448 | break; |
449 | case OperandScale::kDouble: |
450 | PatchJumpWith16BitOperand(jump_location + prefix_offset, delta); |
451 | break; |
452 | case OperandScale::kQuadruple: |
453 | PatchJumpWith32BitOperand(jump_location + prefix_offset, delta); |
454 | break; |
455 | default: |
456 | UNREACHABLE()V8_Fatal("unreachable code"); |
457 | } |
458 | unbound_jumps_--; |
459 | } |
460 | |
461 | void BytecodeArrayWriter::EmitJumpLoop(BytecodeNode* node, |
462 | BytecodeLoopHeader* loop_header) { |
463 | DCHECK_EQ(node->bytecode(), Bytecode::kJumpLoop)((void) 0); |
464 | DCHECK_EQ(0u, node->operand(0))((void) 0); |
465 | |
466 | size_t current_offset = bytecodes()->size(); |
467 | |
468 | CHECK_GE(current_offset, loop_header->offset())do { bool _cmp = ::v8::base::CmpGEImpl< typename ::v8::base ::pass_value_or_ref<decltype(current_offset)>::type, typename ::v8::base::pass_value_or_ref<decltype(loop_header->offset ())>::type>((current_offset), (loop_header->offset() )); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s." , "current_offset" " " ">=" " " "loop_header->offset()" ); } } while (false); } while (false); |
469 | CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32))do { bool _cmp = ::v8::base::CmpLEImpl< typename ::v8::base ::pass_value_or_ref<decltype(current_offset)>::type, typename ::v8::base::pass_value_or_ref<decltype(static_cast<size_t >(kMaxUInt32))>::type>((current_offset), (static_cast <size_t>(kMaxUInt32))); do { if ((__builtin_expect(!!(! (_cmp)), 0))) { V8_Fatal("Check failed: %s.", "current_offset" " " "<=" " " "static_cast<size_t>(kMaxUInt32)"); } } while (false); } while (false); |
470 | // Label has been bound already so this is a backwards jump. |
471 | uint32_t delta = |
472 | static_cast<uint32_t>(current_offset - loop_header->offset()); |
473 | OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta); |
474 | if (operand_scale > OperandScale::kSingle) { |
475 | // Adjust for scaling byte prefix for wide jump offset. |
476 | delta += 1; |
477 | } |
478 | node->update_operand0(delta); |
479 | EmitBytecode(node); |
480 | } |
481 | |
482 | void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) { |
483 | DCHECK(Bytecodes::IsForwardJump(node->bytecode()))((void) 0); |
484 | DCHECK_EQ(0u, node->operand(0))((void) 0); |
485 | |
486 | size_t current_offset = bytecodes()->size(); |
487 | |
488 | // The label has not yet been bound so this is a forward reference |
489 | // that will be patched when the label is bound. We create a |
490 | // reservation in the constant pool so the jump can be patched |
491 | // when the label is bound. The reservation means the maximum size |
492 | // of the operand for the constant is known and the jump can |
493 | // be emitted into the bytecode stream with space for the operand. |
494 | unbound_jumps_++; |
495 | label->set_referrer(current_offset); |
496 | OperandSize reserved_operand_size = |
497 | constant_array_builder()->CreateReservedEntry(); |
498 | DCHECK_NE(Bytecode::kJumpLoop, node->bytecode())((void) 0); |
499 | switch (reserved_operand_size) { |
500 | case OperandSize::kNone: |
501 | UNREACHABLE()V8_Fatal("unreachable code"); |
502 | case OperandSize::kByte: |
503 | node->update_operand0(k8BitJumpPlaceholder); |
504 | break; |
505 | case OperandSize::kShort: |
506 | node->update_operand0(k16BitJumpPlaceholder); |
507 | break; |
508 | case OperandSize::kQuad: |
509 | node->update_operand0(k32BitJumpPlaceholder); |
510 | break; |
511 | } |
512 | EmitBytecode(node); |
513 | } |
514 | |
515 | void BytecodeArrayWriter::EmitSwitch(BytecodeNode* node, |
516 | BytecodeJumpTable* jump_table) { |
517 | DCHECK(Bytecodes::IsSwitch(node->bytecode()))((void) 0); |
518 | |
519 | size_t current_offset = bytecodes()->size(); |
520 | if (node->operand_scale() > OperandScale::kSingle) { |
521 | // Adjust for scaling byte prefix. |
522 | current_offset += 1; |
523 | } |
524 | jump_table->set_switch_bytecode_offset(current_offset); |
525 | |
526 | EmitBytecode(node); |
527 | } |
528 | |
529 | } // namespace interpreter |
530 | } // namespace internal |
531 | } // namespace v8 |