| File: | out/../deps/v8/src/compiler/code-assembler.cc |
| Warning: | line 364, column 5 Value stored to 'node' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "src/compiler/code-assembler.h" |
| 6 | |
| 7 | #include <ostream> |
| 8 | |
| 9 | #include "src/base/bits.h" |
| 10 | #include "src/codegen/code-factory.h" |
| 11 | #include "src/codegen/interface-descriptors-inl.h" |
| 12 | #include "src/codegen/machine-type.h" |
| 13 | #include "src/codegen/macro-assembler.h" |
| 14 | #include "src/compiler/backend/instruction-selector.h" |
| 15 | #include "src/compiler/graph.h" |
| 16 | #include "src/compiler/js-graph.h" |
| 17 | #include "src/compiler/linkage.h" |
| 18 | #include "src/compiler/node-matchers.h" |
| 19 | #include "src/compiler/pipeline.h" |
| 20 | #include "src/compiler/raw-machine-assembler.h" |
| 21 | #include "src/compiler/schedule.h" |
| 22 | #include "src/execution/frames.h" |
| 23 | #include "src/handles/handles-inl.h" |
| 24 | #include "src/heap/factory-inl.h" |
| 25 | #include "src/interpreter/bytecodes.h" |
| 26 | #include "src/numbers/conversions-inl.h" |
| 27 | #include "src/objects/smi.h" |
| 28 | #include "src/utils/memcopy.h" |
| 29 | #include "src/zone/zone.h" |
| 30 | |
| 31 | namespace v8 { |
| 32 | namespace internal { |
| 33 | |
| 34 | constexpr MachineType MachineTypeOf<Smi>::value; |
| 35 | constexpr MachineType MachineTypeOf<Object>::value; |
| 36 | constexpr MachineType MachineTypeOf<MaybeObject>::value; |
| 37 | |
| 38 | namespace compiler { |
| 39 | |
| 40 | static_assert(std::is_convertible<TNode<Number>, TNode<Object>>::value, |
| 41 | "test subtyping"); |
| 42 | static_assert( |
| 43 | std::is_convertible<TNode<Number>, TNode<UnionT<Smi, HeapObject>>>::value, |
| 44 | "test subtyping"); |
| 45 | static_assert( |
| 46 | !std::is_convertible<TNode<UnionT<Smi, HeapObject>>, TNode<Number>>::value, |
| 47 | "test subtyping"); |
| 48 | |
| 49 | CodeAssemblerState::CodeAssemblerState( |
| 50 | Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor, |
| 51 | CodeKind kind, const char* name, Builtin builtin) |
| 52 | // TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for |
| 53 | // bytecode handlers? |
| 54 | : CodeAssemblerState( |
| 55 | isolate, zone, |
| 56 | Linkage::GetStubCallDescriptor( |
| 57 | zone, descriptor, descriptor.GetStackParameterCount(), |
| 58 | CallDescriptor::kNoFlags, Operator::kNoProperties), |
| 59 | kind, name, builtin) {} |
| 60 | |
| 61 | CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone, |
| 62 | int parameter_count, CodeKind kind, |
| 63 | const char* name, Builtin builtin) |
| 64 | : CodeAssemblerState( |
| 65 | isolate, zone, |
| 66 | Linkage::GetJSCallDescriptor(zone, false, parameter_count, |
| 67 | CallDescriptor::kCanUseRoots), |
| 68 | kind, name, builtin) {} |
| 69 | |
| 70 | CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone, |
| 71 | CallDescriptor* call_descriptor, |
| 72 | CodeKind kind, const char* name, |
| 73 | Builtin builtin) |
| 74 | : raw_assembler_(new RawMachineAssembler( |
| 75 | isolate, zone->New<Graph>(zone), call_descriptor, |
| 76 | MachineType::PointerRepresentation(), |
| 77 | InstructionSelector::SupportedMachineOperatorFlags(), |
| 78 | InstructionSelector::AlignmentRequirements())), |
| 79 | kind_(kind), |
| 80 | name_(name), |
| 81 | builtin_(builtin), |
| 82 | code_generated_(false), |
| 83 | variables_(zone), |
| 84 | jsgraph_(zone->New<JSGraph>( |
| 85 | isolate, raw_assembler_->graph(), raw_assembler_->common(), |
| 86 | zone->New<JSOperatorBuilder>(zone), raw_assembler_->simplified(), |
| 87 | raw_assembler_->machine())) {} |
| 88 | |
| 89 | CodeAssemblerState::~CodeAssemblerState() = default; |
| 90 | |
| 91 | int CodeAssemblerState::parameter_count() const { |
| 92 | return static_cast<int>(raw_assembler_->call_descriptor()->ParameterCount()); |
| 93 | } |
| 94 | |
| 95 | CodeAssembler::~CodeAssembler() = default; |
| 96 | |
| 97 | #if DEBUG |
| 98 | void CodeAssemblerState::PrintCurrentBlock(std::ostream& os) { |
| 99 | raw_assembler_->PrintCurrentBlock(os); |
| 100 | } |
| 101 | #endif |
| 102 | |
| 103 | bool CodeAssemblerState::InsideBlock() { return raw_assembler_->InsideBlock(); } |
| 104 | |
| 105 | void CodeAssemblerState::SetInitialDebugInformation(const char* msg, |
| 106 | const char* file, |
| 107 | int line) { |
| 108 | #if DEBUG |
| 109 | AssemblerDebugInfo debug_info = {msg, file, line}; |
| 110 | raw_assembler_->SetCurrentExternalSourcePosition({file, line}); |
| 111 | raw_assembler_->SetInitialDebugInformation(debug_info); |
| 112 | #endif // DEBUG |
| 113 | } |
| 114 | |
| 115 | class BreakOnNodeDecorator final : public GraphDecorator { |
| 116 | public: |
| 117 | explicit BreakOnNodeDecorator(NodeId node_id) : node_id_(node_id) {} |
| 118 | |
| 119 | void Decorate(Node* node) final { |
| 120 | if (node->id() == node_id_) { |
| 121 | base::OS::DebugBreak(); |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | private: |
| 126 | NodeId node_id_; |
| 127 | }; |
| 128 | |
| 129 | void CodeAssembler::BreakOnNode(int node_id) { |
| 130 | Graph* graph = raw_assembler()->graph(); |
| 131 | Zone* zone = graph->zone(); |
| 132 | GraphDecorator* decorator = |
| 133 | zone->New<BreakOnNodeDecorator>(static_cast<NodeId>(node_id)); |
| 134 | graph->AddDecorator(decorator); |
| 135 | } |
| 136 | |
| 137 | void CodeAssembler::RegisterCallGenerationCallbacks( |
| 138 | const CodeAssemblerCallback& call_prologue, |
| 139 | const CodeAssemblerCallback& call_epilogue) { |
| 140 | // The callback can be registered only once. |
| 141 | DCHECK(!state_->call_prologue_)((void) 0); |
| 142 | DCHECK(!state_->call_epilogue_)((void) 0); |
| 143 | state_->call_prologue_ = call_prologue; |
| 144 | state_->call_epilogue_ = call_epilogue; |
| 145 | } |
| 146 | |
| 147 | void CodeAssembler::UnregisterCallGenerationCallbacks() { |
| 148 | state_->call_prologue_ = nullptr; |
| 149 | state_->call_epilogue_ = nullptr; |
| 150 | } |
| 151 | |
| 152 | void CodeAssembler::CallPrologue() { |
| 153 | if (state_->call_prologue_) { |
| 154 | state_->call_prologue_(); |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | void CodeAssembler::CallEpilogue() { |
| 159 | if (state_->call_epilogue_) { |
| 160 | state_->call_epilogue_(); |
| 161 | } |
| 162 | } |
| 163 | |
| 164 | bool CodeAssembler::Word32ShiftIsSafe() const { |
| 165 | return raw_assembler()->machine()->Word32ShiftIsSafe(); |
| 166 | } |
| 167 | |
| 168 | // static |
| 169 | Handle<Code> CodeAssembler::GenerateCode( |
| 170 | CodeAssemblerState* state, const AssemblerOptions& options, |
| 171 | const ProfileDataFromFile* profile_data) { |
| 172 | DCHECK(!state->code_generated_)((void) 0); |
| 173 | |
| 174 | RawMachineAssembler* rasm = state->raw_assembler_.get(); |
| 175 | |
| 176 | Handle<Code> code; |
| 177 | Graph* graph = rasm->ExportForOptimization(); |
| 178 | |
| 179 | code = Pipeline::GenerateCodeForCodeStub( |
| 180 | rasm->isolate(), rasm->call_descriptor(), graph, state->jsgraph_, |
| 181 | rasm->source_positions(), state->kind_, state->name_, |
| 182 | state->builtin_, options, profile_data) |
| 183 | .ToHandleChecked(); |
| 184 | |
| 185 | state->code_generated_ = true; |
| 186 | return code; |
| 187 | } |
| 188 | |
| 189 | bool CodeAssembler::Is64() const { return raw_assembler()->machine()->Is64(); } |
| 190 | bool CodeAssembler::Is32() const { return raw_assembler()->machine()->Is32(); } |
| 191 | |
| 192 | bool CodeAssembler::IsFloat64RoundUpSupported() const { |
| 193 | return raw_assembler()->machine()->Float64RoundUp().IsSupported(); |
| 194 | } |
| 195 | |
| 196 | bool CodeAssembler::IsFloat64RoundDownSupported() const { |
| 197 | return raw_assembler()->machine()->Float64RoundDown().IsSupported(); |
| 198 | } |
| 199 | |
| 200 | bool CodeAssembler::IsFloat64RoundTiesEvenSupported() const { |
| 201 | return raw_assembler()->machine()->Float64RoundTiesEven().IsSupported(); |
| 202 | } |
| 203 | |
| 204 | bool CodeAssembler::IsFloat64RoundTruncateSupported() const { |
| 205 | return raw_assembler()->machine()->Float64RoundTruncate().IsSupported(); |
| 206 | } |
| 207 | |
| 208 | bool CodeAssembler::IsInt32AbsWithOverflowSupported() const { |
| 209 | return raw_assembler()->machine()->Int32AbsWithOverflow().IsSupported(); |
| 210 | } |
| 211 | |
| 212 | bool CodeAssembler::IsInt64AbsWithOverflowSupported() const { |
| 213 | return raw_assembler()->machine()->Int64AbsWithOverflow().IsSupported(); |
| 214 | } |
| 215 | |
| 216 | bool CodeAssembler::IsIntPtrAbsWithOverflowSupported() const { |
| 217 | return Is64() ? IsInt64AbsWithOverflowSupported() |
| 218 | : IsInt32AbsWithOverflowSupported(); |
| 219 | } |
| 220 | |
| 221 | bool CodeAssembler::IsWord32PopcntSupported() const { |
| 222 | return raw_assembler()->machine()->Word32Popcnt().IsSupported(); |
| 223 | } |
| 224 | |
| 225 | bool CodeAssembler::IsWord64PopcntSupported() const { |
| 226 | return raw_assembler()->machine()->Word64Popcnt().IsSupported(); |
| 227 | } |
| 228 | |
| 229 | bool CodeAssembler::IsWord32CtzSupported() const { |
| 230 | return raw_assembler()->machine()->Word32Ctz().IsSupported(); |
| 231 | } |
| 232 | |
| 233 | bool CodeAssembler::IsWord64CtzSupported() const { |
| 234 | return raw_assembler()->machine()->Word64Ctz().IsSupported(); |
| 235 | } |
| 236 | |
| 237 | #ifdef DEBUG |
| 238 | void CodeAssembler::GenerateCheckMaybeObjectIsObject(TNode<MaybeObject> node, |
| 239 | const char* location) { |
| 240 | Label ok(this); |
| 241 | GotoIf(WordNotEqual(WordAnd(BitcastMaybeObjectToWord(node), |
| 242 | IntPtrConstant(kHeapObjectTagMask)), |
| 243 | IntPtrConstant(kWeakHeapObjectTag)), |
| 244 | &ok); |
| 245 | base::EmbeddedVector<char, 1024> message; |
| 246 | SNPrintF(message, "no Object: %s", location); |
| 247 | TNode<String> message_node = StringConstant(message.begin()); |
| 248 | // This somewhat misuses the AbortCSADcheck runtime function. This will print |
| 249 | // "abort: CSA_DCHECK failed: <message>", which is good enough. |
| 250 | AbortCSADcheck(message_node); |
| 251 | Unreachable(); |
| 252 | Bind(&ok); |
| 253 | } |
| 254 | #endif |
| 255 | |
| 256 | TNode<Int32T> CodeAssembler::Int32Constant(int32_t value) { |
| 257 | return UncheckedCast<Int32T>(jsgraph()->Int32Constant(value)); |
| 258 | } |
| 259 | |
| 260 | TNode<Int64T> CodeAssembler::Int64Constant(int64_t value) { |
| 261 | return UncheckedCast<Int64T>(jsgraph()->Int64Constant(value)); |
| 262 | } |
| 263 | |
| 264 | TNode<IntPtrT> CodeAssembler::IntPtrConstant(intptr_t value) { |
| 265 | return UncheckedCast<IntPtrT>(jsgraph()->IntPtrConstant(value)); |
| 266 | } |
| 267 | |
| 268 | TNode<TaggedIndex> CodeAssembler::TaggedIndexConstant(intptr_t value) { |
| 269 | DCHECK(TaggedIndex::IsValid(value))((void) 0); |
| 270 | return UncheckedCast<TaggedIndex>(raw_assembler()->IntPtrConstant(value)); |
| 271 | } |
| 272 | |
| 273 | TNode<Number> CodeAssembler::NumberConstant(double value) { |
| 274 | int smi_value; |
| 275 | if (DoubleToSmiInteger(value, &smi_value)) { |
| 276 | return UncheckedCast<Number>(SmiConstant(smi_value)); |
| 277 | } else { |
| 278 | // We allocate the heap number constant eagerly at this point instead of |
| 279 | // deferring allocation to code generation |
| 280 | // (see AllocateAndInstallRequestedHeapObjects) since that makes it easier |
| 281 | // to generate constant lookups for embedded builtins. |
| 282 | return UncheckedCast<Number>(HeapConstant( |
| 283 | isolate()->factory()->NewHeapNumberForCodeAssembler(value))); |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | TNode<Smi> CodeAssembler::SmiConstant(Smi value) { |
| 288 | return UncheckedCast<Smi>(BitcastWordToTaggedSigned( |
| 289 | IntPtrConstant(static_cast<intptr_t>(value.ptr())))); |
| 290 | } |
| 291 | |
| 292 | TNode<Smi> CodeAssembler::SmiConstant(int value) { |
| 293 | return SmiConstant(Smi::FromInt(value)); |
| 294 | } |
| 295 | |
| 296 | TNode<HeapObject> CodeAssembler::UntypedHeapConstant( |
| 297 | Handle<HeapObject> object) { |
| 298 | return UncheckedCast<HeapObject>(jsgraph()->HeapConstant(object)); |
| 299 | } |
| 300 | |
| 301 | TNode<String> CodeAssembler::StringConstant(const char* str) { |
| 302 | Handle<String> internalized_string = |
| 303 | factory()->InternalizeString(base::OneByteVector(str)); |
| 304 | return UncheckedCast<String>(HeapConstant(internalized_string)); |
| 305 | } |
| 306 | |
| 307 | TNode<Oddball> CodeAssembler::BooleanConstant(bool value) { |
| 308 | Handle<Object> object = isolate()->factory()->ToBoolean(value); |
| 309 | return UncheckedCast<Oddball>( |
| 310 | jsgraph()->HeapConstant(Handle<HeapObject>::cast(object))); |
| 311 | } |
| 312 | |
| 313 | TNode<ExternalReference> CodeAssembler::ExternalConstant( |
| 314 | ExternalReference address) { |
| 315 | return UncheckedCast<ExternalReference>( |
| 316 | raw_assembler()->ExternalConstant(address)); |
| 317 | } |
| 318 | |
| 319 | TNode<Float32T> CodeAssembler::Float32Constant(double value) { |
| 320 | return UncheckedCast<Float32T>(jsgraph()->Float32Constant(value)); |
| 321 | } |
| 322 | |
| 323 | TNode<Float64T> CodeAssembler::Float64Constant(double value) { |
| 324 | return UncheckedCast<Float64T>(jsgraph()->Float64Constant(value)); |
| 325 | } |
| 326 | |
| 327 | bool CodeAssembler::IsMapOffsetConstant(Node* node) { |
| 328 | return raw_assembler()->IsMapOffsetConstant(node); |
| 329 | } |
| 330 | |
| 331 | bool CodeAssembler::TryToInt32Constant(TNode<IntegralT> node, |
| 332 | int32_t* out_value) { |
| 333 | { |
| 334 | Int64Matcher m(node); |
| 335 | if (m.HasResolvedValue() && |
| 336 | m.IsInRange(std::numeric_limits<int32_t>::min(), |
| 337 | std::numeric_limits<int32_t>::max())) { |
| 338 | *out_value = static_cast<int32_t>(m.ResolvedValue()); |
| 339 | return true; |
| 340 | } |
| 341 | } |
| 342 | |
| 343 | { |
| 344 | Int32Matcher m(node); |
| 345 | if (m.HasResolvedValue()) { |
| 346 | *out_value = m.ResolvedValue(); |
| 347 | return true; |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | return false; |
| 352 | } |
| 353 | |
| 354 | bool CodeAssembler::TryToInt64Constant(TNode<IntegralT> node, |
| 355 | int64_t* out_value) { |
| 356 | Int64Matcher m(node); |
| 357 | if (m.HasResolvedValue()) *out_value = m.ResolvedValue(); |
| 358 | return m.HasResolvedValue(); |
| 359 | } |
| 360 | |
| 361 | bool CodeAssembler::TryToSmiConstant(TNode<Smi> tnode, Smi* out_value) { |
| 362 | Node* node = tnode; |
| 363 | if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned) { |
| 364 | node = node->InputAt(0); |
Value stored to 'node' is never read | |
| 365 | } |
| 366 | return TryToSmiConstant(ReinterpretCast<IntPtrT>(tnode), out_value); |
| 367 | } |
| 368 | |
| 369 | bool CodeAssembler::TryToSmiConstant(TNode<IntegralT> node, Smi* out_value) { |
| 370 | IntPtrMatcher m(node); |
| 371 | if (m.HasResolvedValue()) { |
| 372 | intptr_t value = m.ResolvedValue(); |
| 373 | // Make sure that the value is actually a smi |
| 374 | CHECK_EQ(0, value & ((static_cast<intptr_t>(1) << kSmiShiftSize) - 1))do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(0)>::type, typename ::v8:: base::pass_value_or_ref<decltype(value & ((static_cast <intptr_t>(1) << kSmiShiftSize) - 1))>::type> ((0), (value & ((static_cast<intptr_t>(1) << kSmiShiftSize ) - 1))); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal ("Check failed: %s.", "0" " " "==" " " "value & ((static_cast<intptr_t>(1) << kSmiShiftSize) - 1)" ); } } while (false); } while (false); |
| 375 | *out_value = Smi(static_cast<Address>(value)); |
| 376 | return true; |
| 377 | } |
| 378 | return false; |
| 379 | } |
| 380 | |
| 381 | bool CodeAssembler::TryToIntPtrConstant(TNode<Smi> tnode, intptr_t* out_value) { |
| 382 | Node* node = tnode; |
| 383 | if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned || |
| 384 | node->opcode() == IrOpcode::kBitcastWordToTagged) { |
| 385 | node = node->InputAt(0); |
| 386 | } |
| 387 | return TryToIntPtrConstant(ReinterpretCast<IntPtrT>(tnode), out_value); |
| 388 | } |
| 389 | |
| 390 | bool CodeAssembler::TryToIntPtrConstant(TNode<IntegralT> node, |
| 391 | intptr_t* out_value) { |
| 392 | IntPtrMatcher m(node); |
| 393 | if (m.HasResolvedValue()) *out_value = m.ResolvedValue(); |
| 394 | return m.HasResolvedValue(); |
| 395 | } |
| 396 | |
| 397 | bool CodeAssembler::IsUndefinedConstant(TNode<Object> node) { |
| 398 | compiler::HeapObjectMatcher m(node); |
| 399 | return m.Is(isolate()->factory()->undefined_value()); |
| 400 | } |
| 401 | |
| 402 | bool CodeAssembler::IsNullConstant(TNode<Object> node) { |
| 403 | compiler::HeapObjectMatcher m(node); |
| 404 | return m.Is(isolate()->factory()->null_value()); |
| 405 | } |
| 406 | |
| 407 | Node* CodeAssembler::UntypedParameter(int index) { |
| 408 | if (index == kTargetParameterIndex) return raw_assembler()->TargetParameter(); |
| 409 | return raw_assembler()->Parameter(index); |
| 410 | } |
| 411 | |
| 412 | bool CodeAssembler::IsJSFunctionCall() const { |
| 413 | auto call_descriptor = raw_assembler()->call_descriptor(); |
| 414 | return call_descriptor->IsJSFunctionCall(); |
| 415 | } |
| 416 | |
| 417 | TNode<Context> CodeAssembler::GetJSContextParameter() { |
| 418 | auto call_descriptor = raw_assembler()->call_descriptor(); |
| 419 | DCHECK(call_descriptor->IsJSFunctionCall())((void) 0); |
| 420 | return Parameter<Context>(Linkage::GetJSCallContextParamIndex( |
| 421 | static_cast<int>(call_descriptor->JSParameterCount()))); |
| 422 | } |
| 423 | |
| 424 | void CodeAssembler::Return(TNode<Object> value) { |
| 425 | DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 426 | DCHECK(raw_assembler()->call_descriptor()->GetReturnType(0).IsTagged())((void) 0); |
| 427 | return raw_assembler()->Return(value); |
| 428 | } |
| 429 | |
| 430 | void CodeAssembler::Return(TNode<Object> value1, TNode<Object> value2) { |
| 431 | DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 432 | DCHECK(raw_assembler()->call_descriptor()->GetReturnType(0).IsTagged())((void) 0); |
| 433 | DCHECK(raw_assembler()->call_descriptor()->GetReturnType(1).IsTagged())((void) 0); |
| 434 | return raw_assembler()->Return(value1, value2); |
| 435 | } |
| 436 | |
| 437 | void CodeAssembler::Return(TNode<Object> value1, TNode<Object> value2, |
| 438 | TNode<Object> value3) { |
| 439 | DCHECK_EQ(3, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 440 | DCHECK(raw_assembler()->call_descriptor()->GetReturnType(0).IsTagged())((void) 0); |
| 441 | DCHECK(raw_assembler()->call_descriptor()->GetReturnType(1).IsTagged())((void) 0); |
| 442 | DCHECK(raw_assembler()->call_descriptor()->GetReturnType(2).IsTagged())((void) 0); |
| 443 | return raw_assembler()->Return(value1, value2, value3); |
| 444 | } |
| 445 | |
| 446 | void CodeAssembler::Return(TNode<Int32T> value) { |
| 447 | DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 448 | DCHECK_EQ(MachineType::Int32(),((void) 0) |
| 449 | raw_assembler()->call_descriptor()->GetReturnType(0))((void) 0); |
| 450 | return raw_assembler()->Return(value); |
| 451 | } |
| 452 | |
| 453 | void CodeAssembler::Return(TNode<Uint32T> value) { |
| 454 | DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 455 | DCHECK_EQ(MachineType::Uint32(),((void) 0) |
| 456 | raw_assembler()->call_descriptor()->GetReturnType(0))((void) 0); |
| 457 | return raw_assembler()->Return(value); |
| 458 | } |
| 459 | |
| 460 | void CodeAssembler::Return(TNode<WordT> value) { |
| 461 | DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 462 | DCHECK_EQ(((void) 0) |
| 463 | MachineType::PointerRepresentation(),((void) 0) |
| 464 | raw_assembler()->call_descriptor()->GetReturnType(0).representation())((void) 0); |
| 465 | return raw_assembler()->Return(value); |
| 466 | } |
| 467 | |
| 468 | void CodeAssembler::Return(TNode<Float32T> value) { |
| 469 | DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 470 | DCHECK_EQ(MachineType::Float32(),((void) 0) |
| 471 | raw_assembler()->call_descriptor()->GetReturnType(0))((void) 0); |
| 472 | return raw_assembler()->Return(value); |
| 473 | } |
| 474 | |
| 475 | void CodeAssembler::Return(TNode<Float64T> value) { |
| 476 | DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 477 | DCHECK_EQ(MachineType::Float64(),((void) 0) |
| 478 | raw_assembler()->call_descriptor()->GetReturnType(0))((void) 0); |
| 479 | return raw_assembler()->Return(value); |
| 480 | } |
| 481 | |
| 482 | void CodeAssembler::Return(TNode<WordT> value1, TNode<WordT> value2) { |
| 483 | DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 484 | DCHECK_EQ(((void) 0) |
| 485 | MachineType::PointerRepresentation(),((void) 0) |
| 486 | raw_assembler()->call_descriptor()->GetReturnType(0).representation())((void) 0); |
| 487 | DCHECK_EQ(((void) 0) |
| 488 | MachineType::PointerRepresentation(),((void) 0) |
| 489 | raw_assembler()->call_descriptor()->GetReturnType(1).representation())((void) 0); |
| 490 | return raw_assembler()->Return(value1, value2); |
| 491 | } |
| 492 | |
| 493 | void CodeAssembler::Return(TNode<WordT> value1, TNode<Object> value2) { |
| 494 | DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 495 | DCHECK_EQ(((void) 0) |
| 496 | MachineType::PointerRepresentation(),((void) 0) |
| 497 | raw_assembler()->call_descriptor()->GetReturnType(0).representation())((void) 0); |
| 498 | DCHECK(raw_assembler()->call_descriptor()->GetReturnType(1).IsTagged())((void) 0); |
| 499 | return raw_assembler()->Return(value1, value2); |
| 500 | } |
| 501 | |
| 502 | void CodeAssembler::PopAndReturn(Node* pop, Node* value) { |
| 503 | DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount())((void) 0); |
| 504 | return raw_assembler()->PopAndReturn(pop, value); |
| 505 | } |
| 506 | |
| 507 | void CodeAssembler::ReturnIf(TNode<BoolT> condition, TNode<Object> value) { |
| 508 | Label if_return(this), if_continue(this); |
| 509 | Branch(condition, &if_return, &if_continue); |
| 510 | Bind(&if_return); |
| 511 | Return(value); |
| 512 | Bind(&if_continue); |
| 513 | } |
| 514 | |
| 515 | void CodeAssembler::AbortCSADcheck(Node* message) { |
| 516 | raw_assembler()->AbortCSADcheck(message); |
| 517 | } |
| 518 | |
| 519 | void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); } |
| 520 | |
| 521 | void CodeAssembler::Unreachable() { |
| 522 | DebugBreak(); |
| 523 | raw_assembler()->Unreachable(); |
| 524 | } |
| 525 | |
| 526 | void CodeAssembler::Comment(std::string str) { |
| 527 | if (!FLAG_code_comments) return; |
| 528 | raw_assembler()->Comment(str); |
| 529 | } |
| 530 | |
| 531 | void CodeAssembler::StaticAssert(TNode<BoolT> value, const char* source) { |
| 532 | raw_assembler()->StaticAssert(value, source); |
| 533 | } |
| 534 | |
| 535 | void CodeAssembler::SetSourcePosition(const char* file, int line) { |
| 536 | raw_assembler()->SetCurrentExternalSourcePosition({file, line}); |
| 537 | } |
| 538 | |
| 539 | void CodeAssembler::PushSourcePosition() { |
| 540 | auto position = raw_assembler()->GetCurrentExternalSourcePosition(); |
| 541 | state_->macro_call_stack_.push_back(position); |
| 542 | } |
| 543 | |
| 544 | void CodeAssembler::PopSourcePosition() { |
| 545 | state_->macro_call_stack_.pop_back(); |
| 546 | } |
| 547 | |
| 548 | const std::vector<FileAndLine>& CodeAssembler::GetMacroSourcePositionStack() |
| 549 | const { |
| 550 | return state_->macro_call_stack_; |
| 551 | } |
| 552 | |
| 553 | void CodeAssembler::Bind(Label* label) { return label->Bind(); } |
| 554 | |
| 555 | #if DEBUG |
| 556 | void CodeAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) { |
| 557 | return label->Bind(debug_info); |
| 558 | } |
| 559 | #endif // DEBUG |
| 560 | |
| 561 | TNode<RawPtrT> CodeAssembler::LoadFramePointer() { |
| 562 | return UncheckedCast<RawPtrT>(raw_assembler()->LoadFramePointer()); |
| 563 | } |
| 564 | |
| 565 | TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() { |
| 566 | return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer()); |
| 567 | } |
| 568 | |
| 569 | #define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \ |
| 570 | TNode<ResType> CodeAssembler::name(TNode<Arg1Type> a, TNode<Arg2Type> b) { \ |
| 571 | return UncheckedCast<ResType>(raw_assembler()->name(a, b)); \ |
| 572 | } |
| 573 | CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)DEFINE_CODE_ASSEMBLER_BINARY_OP(Float32Equal, BoolT, Float32T , Float32T) DEFINE_CODE_ASSEMBLER_BINARY_OP(Float32LessThan, BoolT , Float32T, Float32T) DEFINE_CODE_ASSEMBLER_BINARY_OP(Float32LessThanOrEqual , BoolT, Float32T, Float32T) DEFINE_CODE_ASSEMBLER_BINARY_OP( Float32GreaterThan, BoolT, Float32T, Float32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float32GreaterThanOrEqual, BoolT, Float32T, Float32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Equal, BoolT, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64NotEqual, BoolT, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64LessThan, BoolT, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64LessThanOrEqual, BoolT, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64GreaterThan, BoolT, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64GreaterThanOrEqual, BoolT, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int32GreaterThan, BoolT, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int32GreaterThanOrEqual, BoolT, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int32LessThan, BoolT, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int32LessThanOrEqual, BoolT, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (IntPtrLessThan, BoolT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (IntPtrLessThanOrEqual, BoolT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (IntPtrGreaterThan, BoolT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (IntPtrGreaterThanOrEqual, BoolT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (Uint32LessThan, BoolT, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Uint32LessThanOrEqual, BoolT, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Uint32GreaterThan, BoolT, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Uint32GreaterThanOrEqual, BoolT, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (UintPtrLessThan, BoolT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (UintPtrLessThanOrEqual, BoolT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (UintPtrGreaterThan, BoolT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (UintPtrGreaterThanOrEqual, BoolT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Add, Float64T, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Sub, Float64T, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Mul, Float64T, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Div, Float64T, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Mod, Float64T, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Atan2, Float64T, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Pow, Float64T, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Max, Float64T, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64Min, Float64T, Float64T, Float64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64InsertLowWord32, Float64T, Float64T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Float64InsertHighWord32, Float64T, Float64T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (I8x16Eq, I8x16T, I8x16T, I8x16T) DEFINE_CODE_ASSEMBLER_BINARY_OP (IntPtrAdd, WordT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (IntPtrSub, WordT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (IntPtrMul, WordT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (IntPtrDiv, IntPtrT, IntPtrT, IntPtrT) DEFINE_CODE_ASSEMBLER_BINARY_OP (IntPtrAddWithOverflow, PairT<IntPtrT, BoolT>, IntPtrT, IntPtrT) DEFINE_CODE_ASSEMBLER_BINARY_OP(IntPtrSubWithOverflow , PairT<IntPtrT, BoolT>, IntPtrT, IntPtrT) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int32Add, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int32AddWithOverflow, PairT<Int32T, BoolT>, Int32T, Int32T ) DEFINE_CODE_ASSEMBLER_BINARY_OP(Int32Sub, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP(Int32SubWithOverflow , PairT<Int32T, BoolT>, Int32T, Int32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int32Mul, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int32MulWithOverflow, PairT<Int32T, BoolT>, Int32T, Int32T ) DEFINE_CODE_ASSEMBLER_BINARY_OP(Int32Div, Int32T, Int32T, Int32T ) DEFINE_CODE_ASSEMBLER_BINARY_OP(Int32Mod, Int32T, Int32T, Int32T ) DEFINE_CODE_ASSEMBLER_BINARY_OP(Int64Add, Word64T, Word64T, Word64T) DEFINE_CODE_ASSEMBLER_BINARY_OP(Int64Sub, Word64T, Word64T , Word64T) DEFINE_CODE_ASSEMBLER_BINARY_OP(Int64SubWithOverflow , PairT<Int64T, BoolT>, Int64T, Int64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int64Mul, Word64T, Word64T, Word64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int64Div, Int64T, Int64T, Int64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Int64Mod, Int64T, Int64T, Int64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (WordOr, WordT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (WordAnd, WordT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (WordXor, WordT, WordT, WordT) DEFINE_CODE_ASSEMBLER_BINARY_OP (WordRor, WordT, WordT, IntegralT) DEFINE_CODE_ASSEMBLER_BINARY_OP (WordShl, WordT, WordT, IntegralT) DEFINE_CODE_ASSEMBLER_BINARY_OP (WordShr, WordT, WordT, IntegralT) DEFINE_CODE_ASSEMBLER_BINARY_OP (WordSar, WordT, WordT, IntegralT) DEFINE_CODE_ASSEMBLER_BINARY_OP (WordSarShiftOutZeros, WordT, WordT, IntegralT) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word32Or, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word32And, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word32Xor, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word32Ror, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word32Shl, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word32Shr, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word32Sar, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word32SarShiftOutZeros, Word32T, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word64And, Word64T, Word64T, Word64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word64Or, Word64T, Word64T, Word64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word64Xor, Word64T, Word64T, Word64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word64Shl, Word64T, Word64T, Word64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word64Shr, Word64T, Word64T, Word64T) DEFINE_CODE_ASSEMBLER_BINARY_OP (Word64Sar, Word64T, Word64T, Word64T) |
| 574 | #undef DEFINE_CODE_ASSEMBLER_BINARY_OP |
| 575 | |
| 576 | TNode<WordT> CodeAssembler::WordShl(TNode<WordT> value, int shift) { |
| 577 | return (shift != 0) ? WordShl(value, IntPtrConstant(shift)) : value; |
| 578 | } |
| 579 | |
| 580 | TNode<WordT> CodeAssembler::WordShr(TNode<WordT> value, int shift) { |
| 581 | return (shift != 0) ? WordShr(value, IntPtrConstant(shift)) : value; |
| 582 | } |
| 583 | |
| 584 | TNode<WordT> CodeAssembler::WordSar(TNode<WordT> value, int shift) { |
| 585 | return (shift != 0) ? WordSar(value, IntPtrConstant(shift)) : value; |
| 586 | } |
| 587 | |
| 588 | TNode<Word32T> CodeAssembler::Word32Shr(TNode<Word32T> value, int shift) { |
| 589 | return (shift != 0) ? Word32Shr(value, Int32Constant(shift)) : value; |
| 590 | } |
| 591 | |
| 592 | TNode<Word32T> CodeAssembler::Word32Sar(TNode<Word32T> value, int shift) { |
| 593 | return (shift != 0) ? Word32Sar(value, Int32Constant(shift)) : value; |
| 594 | } |
| 595 | |
| 596 | #define CODE_ASSEMBLER_COMPARE(Name, ArgT, VarT, ToConstant, op) \ |
| 597 | TNode<BoolT> CodeAssembler::Name(TNode<ArgT> left, TNode<ArgT> right) { \ |
| 598 | VarT lhs, rhs; \ |
| 599 | if (ToConstant(left, &lhs) && ToConstant(right, &rhs)) { \ |
| 600 | return BoolConstant(lhs op rhs); \ |
| 601 | } \ |
| 602 | return UncheckedCast<BoolT>(raw_assembler()->Name(left, right)); \ |
| 603 | } |
| 604 | |
| 605 | CODE_ASSEMBLER_COMPARE(IntPtrEqual, WordT, intptr_t, TryToIntPtrConstant, ==) |
| 606 | CODE_ASSEMBLER_COMPARE(WordEqual, WordT, intptr_t, TryToIntPtrConstant, ==) |
| 607 | CODE_ASSEMBLER_COMPARE(WordNotEqual, WordT, intptr_t, TryToIntPtrConstant, !=) |
| 608 | CODE_ASSEMBLER_COMPARE(Word32Equal, Word32T, int32_t, TryToInt32Constant, ==) |
| 609 | CODE_ASSEMBLER_COMPARE(Word32NotEqual, Word32T, int32_t, TryToInt32Constant, !=) |
| 610 | CODE_ASSEMBLER_COMPARE(Word64Equal, Word64T, int64_t, TryToInt64Constant, ==) |
| 611 | CODE_ASSEMBLER_COMPARE(Word64NotEqual, Word64T, int64_t, TryToInt64Constant, !=) |
| 612 | #undef CODE_ASSEMBLER_COMPARE |
| 613 | |
| 614 | TNode<UintPtrT> CodeAssembler::ChangeUint32ToWord(TNode<Word32T> value) { |
| 615 | if (raw_assembler()->machine()->Is64()) { |
| 616 | return UncheckedCast<UintPtrT>( |
| 617 | raw_assembler()->ChangeUint32ToUint64(value)); |
| 618 | } |
| 619 | return ReinterpretCast<UintPtrT>(value); |
| 620 | } |
| 621 | |
| 622 | TNode<IntPtrT> CodeAssembler::ChangeInt32ToIntPtr(TNode<Word32T> value) { |
| 623 | if (raw_assembler()->machine()->Is64()) { |
| 624 | return UncheckedCast<IntPtrT>(raw_assembler()->ChangeInt32ToInt64(value)); |
| 625 | } |
| 626 | return ReinterpretCast<IntPtrT>(value); |
| 627 | } |
| 628 | |
| 629 | TNode<IntPtrT> CodeAssembler::ChangeFloat64ToIntPtr(TNode<Float64T> value) { |
| 630 | if (raw_assembler()->machine()->Is64()) { |
| 631 | return UncheckedCast<IntPtrT>(raw_assembler()->ChangeFloat64ToInt64(value)); |
| 632 | } |
| 633 | return UncheckedCast<IntPtrT>(raw_assembler()->ChangeFloat64ToInt32(value)); |
| 634 | } |
| 635 | |
| 636 | TNode<UintPtrT> CodeAssembler::ChangeFloat64ToUintPtr(TNode<Float64T> value) { |
| 637 | if (raw_assembler()->machine()->Is64()) { |
| 638 | return UncheckedCast<UintPtrT>( |
| 639 | raw_assembler()->ChangeFloat64ToUint64(value)); |
| 640 | } |
| 641 | return UncheckedCast<UintPtrT>(raw_assembler()->ChangeFloat64ToUint32(value)); |
| 642 | } |
| 643 | |
| 644 | TNode<Float64T> CodeAssembler::ChangeUintPtrToFloat64(TNode<UintPtrT> value) { |
| 645 | if (raw_assembler()->machine()->Is64()) { |
| 646 | // TODO(turbofan): Maybe we should introduce a ChangeUint64ToFloat64 |
| 647 | // machine operator to TurboFan here? |
| 648 | return UncheckedCast<Float64T>( |
| 649 | raw_assembler()->RoundUint64ToFloat64(value)); |
| 650 | } |
| 651 | return UncheckedCast<Float64T>(raw_assembler()->ChangeUint32ToFloat64(value)); |
| 652 | } |
| 653 | |
| 654 | TNode<Float64T> CodeAssembler::RoundIntPtrToFloat64(Node* value) { |
| 655 | if (raw_assembler()->machine()->Is64()) { |
| 656 | return UncheckedCast<Float64T>(raw_assembler()->RoundInt64ToFloat64(value)); |
| 657 | } |
| 658 | return UncheckedCast<Float64T>(raw_assembler()->ChangeInt32ToFloat64(value)); |
| 659 | } |
| 660 | |
| 661 | TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) { |
| 662 | return UncheckedCast<Int32T>(raw_assembler()->TruncateFloat32ToInt32( |
| 663 | value, TruncateKind::kSetOverflowToMin)); |
| 664 | } |
| 665 | #define DEFINE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \ |
| 666 | TNode<ResType> CodeAssembler::name(TNode<ArgType> a) { \ |
| 667 | return UncheckedCast<ResType>(raw_assembler()->name(a)); \ |
| 668 | } |
| 669 | CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Abs, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Acos, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Acosh, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Asin, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Asinh, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Atan, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Atanh, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Cos, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Cosh, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Exp, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Expm1, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Log, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Log1p, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Log2, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Log10, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Cbrt, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Neg, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Sin, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Sinh, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Sqrt, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Tan, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64Tanh, Float64T, Float64T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64ExtractLowWord32, Uint32T , Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64ExtractHighWord32 , Uint32T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(BitcastTaggedToWord , IntPtrT, Object) DEFINE_CODE_ASSEMBLER_UNARY_OP(BitcastTaggedToWordForTagAndSmiBits , IntPtrT, AnyTaggedT) DEFINE_CODE_ASSEMBLER_UNARY_OP(BitcastMaybeObjectToWord , IntPtrT, MaybeObject) DEFINE_CODE_ASSEMBLER_UNARY_OP(BitcastWordToTagged , Object, WordT) DEFINE_CODE_ASSEMBLER_UNARY_OP(BitcastWordToTaggedSigned , Smi, WordT) DEFINE_CODE_ASSEMBLER_UNARY_OP(TruncateFloat64ToFloat32 , Float32T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(TruncateFloat64ToWord32 , Uint32T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(TruncateInt64ToInt32 , Int32T, Int64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(ChangeFloat32ToFloat64 , Float64T, Float32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(ChangeFloat64ToUint32 , Uint32T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(ChangeFloat64ToUint64 , Uint64T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(ChangeInt32ToFloat64 , Float64T, Int32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(ChangeInt32ToInt64 , Int64T, Int32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(ChangeUint32ToFloat64 , Float64T, Word32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(ChangeUint32ToUint64 , Uint64T, Word32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(BitcastInt32ToFloat32 , Float32T, Word32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(BitcastFloat32ToInt32 , Uint32T, Float32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(RoundFloat64ToInt32 , Int32T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(RoundInt32ToFloat32 , Float32T, Int32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64SilenceNaN , Float64T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64RoundDown , Float64T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64RoundUp , Float64T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64RoundTiesEven , Float64T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Float64RoundTruncate , Float64T, Float64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Word32Clz , Int32T, Word32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Word64Clz, Int64T , Word64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Word32Ctz, Int32T, Word32T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(Word64Ctz, Int64T, Word64T) DEFINE_CODE_ASSEMBLER_UNARY_OP (Word32Popcnt, Int32T, Word32T) DEFINE_CODE_ASSEMBLER_UNARY_OP (Word64Popcnt, Int64T, Word64T) DEFINE_CODE_ASSEMBLER_UNARY_OP (Word32BitwiseNot, Word32T, Word32T) DEFINE_CODE_ASSEMBLER_UNARY_OP (WordNot, WordT, WordT) DEFINE_CODE_ASSEMBLER_UNARY_OP(Word64Not , Word64T, Word64T) DEFINE_CODE_ASSEMBLER_UNARY_OP(I8x16BitMask , Int32T, I8x16T) DEFINE_CODE_ASSEMBLER_UNARY_OP(I8x16Splat, I8x16T , Int32T) DEFINE_CODE_ASSEMBLER_UNARY_OP(Int32AbsWithOverflow , PairT<Int32T, BoolT>, Int32T) DEFINE_CODE_ASSEMBLER_UNARY_OP (Int64AbsWithOverflow, PairT<Int64T, BoolT>, Int64T) DEFINE_CODE_ASSEMBLER_UNARY_OP (IntPtrAbsWithOverflow, PairT<IntPtrT, BoolT>, IntPtrT) DEFINE_CODE_ASSEMBLER_UNARY_OP(Word32BinaryNot, BoolT, Word32T ) DEFINE_CODE_ASSEMBLER_UNARY_OP(StackPointerGreaterThan, BoolT , WordT) |
| 670 | #undef DEFINE_CODE_ASSEMBLER_UNARY_OP |
| 671 | |
| 672 | Node* CodeAssembler::Load(MachineType type, Node* base) { |
| 673 | return raw_assembler()->Load(type, base); |
| 674 | } |
| 675 | |
| 676 | Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset) { |
| 677 | return raw_assembler()->Load(type, base, offset); |
| 678 | } |
| 679 | |
| 680 | TNode<Object> CodeAssembler::LoadFullTagged(Node* base) { |
| 681 | return BitcastWordToTagged(Load<RawPtrT>(base)); |
| 682 | } |
| 683 | |
| 684 | TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset) { |
| 685 | // Please use LoadFromObject(MachineType::MapInHeader(), object, |
| 686 | // IntPtrConstant(-kHeapObjectTag)) instead. |
| 687 | DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset))((void) 0); |
| 688 | return BitcastWordToTagged(Load<RawPtrT>(base, offset)); |
| 689 | } |
| 690 | |
| 691 | Node* CodeAssembler::AtomicLoad(MachineType type, AtomicMemoryOrder order, |
| 692 | TNode<RawPtrT> base, TNode<WordT> offset) { |
| 693 | DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset))((void) 0); |
| 694 | return raw_assembler()->AtomicLoad(AtomicLoadParameters(type, order), base, |
| 695 | offset); |
| 696 | } |
| 697 | |
| 698 | template <class Type> |
| 699 | TNode<Type> CodeAssembler::AtomicLoad64(AtomicMemoryOrder order, |
| 700 | TNode<RawPtrT> base, |
| 701 | TNode<WordT> offset) { |
| 702 | return UncheckedCast<Type>(raw_assembler()->AtomicLoad64( |
| 703 | AtomicLoadParameters(MachineType::Uint64(), order), base, offset)); |
| 704 | } |
| 705 | |
| 706 | template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>( |
| 707 | AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset); |
| 708 | template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>( |
| 709 | AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset); |
| 710 | |
| 711 | Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object, |
| 712 | TNode<IntPtrT> offset) { |
| 713 | return raw_assembler()->LoadFromObject(type, object, offset); |
| 714 | } |
| 715 | |
| 716 | #ifdef V8_MAP_PACKING |
| 717 | Node* CodeAssembler::PackMapWord(Node* value) { |
| 718 | TNode<IntPtrT> map_word = |
| 719 | BitcastTaggedToWordForTagAndSmiBits(UncheckedCast<AnyTaggedT>(value)); |
| 720 | TNode<WordT> packed = WordXor(UncheckedCast<WordT>(map_word), |
| 721 | IntPtrConstant(Internals::kMapWordXorMask)); |
| 722 | return BitcastWordToTaggedSigned(packed); |
| 723 | } |
| 724 | #endif |
| 725 | |
| 726 | TNode<AnyTaggedT> CodeAssembler::LoadRootMapWord(RootIndex root_index) { |
| 727 | #ifdef V8_MAP_PACKING |
| 728 | Handle<Object> root = isolate()->root_handle(root_index); |
| 729 | Node* map = HeapConstant(Handle<Map>::cast(root)); |
| 730 | map = PackMapWord(map); |
| 731 | return ReinterpretCast<AnyTaggedT>(map); |
| 732 | #else |
| 733 | return LoadRoot(root_index); |
| 734 | #endif |
| 735 | } |
| 736 | |
| 737 | TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) { |
| 738 | if (RootsTable::IsImmortalImmovable(root_index)) { |
| 739 | Handle<Object> root = isolate()->root_handle(root_index); |
| 740 | if (root->IsSmi()) { |
| 741 | return SmiConstant(Smi::cast(*root)); |
| 742 | } else { |
| 743 | return HeapConstant(Handle<HeapObject>::cast(root)); |
| 744 | } |
| 745 | } |
| 746 | |
| 747 | // TODO(jgruber): In theory we could generate better code for this by |
| 748 | // letting the macro assembler decide how to load from the roots list. In most |
| 749 | // cases, it would boil down to loading from a fixed kRootRegister offset. |
| 750 | TNode<ExternalReference> isolate_root = |
| 751 | ExternalConstant(ExternalReference::isolate_root(isolate())); |
| 752 | int offset = IsolateData::root_slot_offset(root_index); |
| 753 | return UncheckedCast<Object>( |
| 754 | LoadFullTagged(isolate_root, IntPtrConstant(offset))); |
| 755 | } |
| 756 | |
| 757 | Node* CodeAssembler::UnalignedLoad(MachineType type, TNode<RawPtrT> base, |
| 758 | TNode<WordT> offset) { |
| 759 | return raw_assembler()->UnalignedLoad(type, static_cast<Node*>(base), offset); |
| 760 | } |
| 761 | |
| 762 | void CodeAssembler::Store(Node* base, Node* value) { |
| 763 | raw_assembler()->Store(MachineRepresentation::kTagged, base, value, |
| 764 | kFullWriteBarrier); |
| 765 | } |
| 766 | |
| 767 | void CodeAssembler::StoreToObject(MachineRepresentation rep, |
| 768 | TNode<Object> object, TNode<IntPtrT> offset, |
| 769 | Node* value, |
| 770 | StoreToObjectWriteBarrier write_barrier) { |
| 771 | WriteBarrierKind write_barrier_kind; |
| 772 | switch (write_barrier) { |
| 773 | case StoreToObjectWriteBarrier::kFull: |
| 774 | write_barrier_kind = WriteBarrierKind::kFullWriteBarrier; |
| 775 | break; |
| 776 | case StoreToObjectWriteBarrier::kMap: |
| 777 | write_barrier_kind = WriteBarrierKind::kMapWriteBarrier; |
| 778 | break; |
| 779 | case StoreToObjectWriteBarrier::kNone: |
| 780 | if (CanBeTaggedPointer(rep)) { |
| 781 | write_barrier_kind = WriteBarrierKind::kAssertNoWriteBarrier; |
| 782 | } else { |
| 783 | write_barrier_kind = WriteBarrierKind::kNoWriteBarrier; |
| 784 | } |
| 785 | break; |
| 786 | } |
| 787 | raw_assembler()->StoreToObject(rep, object, offset, value, |
| 788 | write_barrier_kind); |
| 789 | } |
| 790 | |
| 791 | void CodeAssembler::OptimizedStoreField(MachineRepresentation rep, |
| 792 | TNode<HeapObject> object, int offset, |
| 793 | Node* value) { |
| 794 | raw_assembler()->OptimizedStoreField(rep, object, offset, value, |
| 795 | WriteBarrierKind::kFullWriteBarrier); |
| 796 | } |
| 797 | |
| 798 | void CodeAssembler::OptimizedStoreFieldAssertNoWriteBarrier( |
| 799 | MachineRepresentation rep, TNode<HeapObject> object, int offset, |
| 800 | Node* value) { |
| 801 | raw_assembler()->OptimizedStoreField(rep, object, offset, value, |
| 802 | WriteBarrierKind::kAssertNoWriteBarrier); |
| 803 | } |
| 804 | |
| 805 | void CodeAssembler::OptimizedStoreFieldUnsafeNoWriteBarrier( |
| 806 | MachineRepresentation rep, TNode<HeapObject> object, int offset, |
| 807 | Node* value) { |
| 808 | raw_assembler()->OptimizedStoreField(rep, object, offset, value, |
| 809 | WriteBarrierKind::kNoWriteBarrier); |
| 810 | } |
| 811 | |
| 812 | void CodeAssembler::OptimizedStoreMap(TNode<HeapObject> object, |
| 813 | TNode<Map> map) { |
| 814 | raw_assembler()->OptimizedStoreMap(object, map); |
| 815 | } |
| 816 | |
| 817 | void CodeAssembler::Store(Node* base, Node* offset, Node* value) { |
| 818 | // Please use OptimizedStoreMap(base, value) instead. |
| 819 | DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset))((void) 0); |
| 820 | raw_assembler()->Store(MachineRepresentation::kTagged, base, offset, value, |
| 821 | kFullWriteBarrier); |
| 822 | } |
| 823 | |
| 824 | void CodeAssembler::StoreEphemeronKey(Node* base, Node* offset, Node* value) { |
| 825 | DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset))((void) 0); |
| 826 | raw_assembler()->Store(MachineRepresentation::kTagged, base, offset, value, |
| 827 | kEphemeronKeyWriteBarrier); |
| 828 | } |
| 829 | |
| 830 | void CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base, |
| 831 | Node* value) { |
| 832 | raw_assembler()->Store( |
| 833 | rep, base, value, |
| 834 | CanBeTaggedPointer(rep) ? kAssertNoWriteBarrier : kNoWriteBarrier); |
| 835 | } |
| 836 | |
| 837 | void CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base, |
| 838 | Node* offset, Node* value) { |
| 839 | // Please use OptimizedStoreMap(base, value) instead. |
| 840 | DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset))((void) 0); |
| 841 | raw_assembler()->Store( |
| 842 | rep, base, offset, value, |
| 843 | CanBeTaggedPointer(rep) ? kAssertNoWriteBarrier : kNoWriteBarrier); |
| 844 | } |
| 845 | |
| 846 | void CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep, |
| 847 | Node* base, Node* value) { |
| 848 | raw_assembler()->Store(rep, base, value, kNoWriteBarrier); |
| 849 | } |
| 850 | |
| 851 | void CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep, |
| 852 | Node* base, Node* offset, |
| 853 | Node* value) { |
| 854 | // Please use OptimizedStoreMap(base, value) instead. |
| 855 | DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset))((void) 0); |
| 856 | raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier); |
| 857 | } |
| 858 | |
| 859 | void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base, |
| 860 | TNode<Object> tagged_value) { |
| 861 | StoreNoWriteBarrier(MachineType::PointerRepresentation(), base, |
| 862 | BitcastTaggedToWord(tagged_value)); |
| 863 | } |
| 864 | |
| 865 | void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base, |
| 866 | TNode<IntPtrT> offset, |
| 867 | TNode<Object> tagged_value) { |
| 868 | // Please use OptimizedStoreMap(base, tagged_value) instead. |
| 869 | DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset))((void) 0); |
| 870 | StoreNoWriteBarrier(MachineType::PointerRepresentation(), base, offset, |
| 871 | BitcastTaggedToWord(tagged_value)); |
| 872 | } |
| 873 | |
| 874 | void CodeAssembler::AtomicStore(MachineRepresentation rep, |
| 875 | AtomicMemoryOrder order, TNode<RawPtrT> base, |
| 876 | TNode<WordT> offset, TNode<Word32T> value) { |
| 877 | DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset))((void) 0); |
| 878 | raw_assembler()->AtomicStore( |
| 879 | AtomicStoreParameters(rep, WriteBarrierKind::kNoWriteBarrier, order), |
| 880 | base, offset, value); |
| 881 | } |
| 882 | |
| 883 | void CodeAssembler::AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base, |
| 884 | TNode<WordT> offset, TNode<UintPtrT> value, |
| 885 | TNode<UintPtrT> value_high) { |
| 886 | raw_assembler()->AtomicStore64( |
| 887 | AtomicStoreParameters(MachineRepresentation::kWord64, |
| 888 | WriteBarrierKind::kNoWriteBarrier, order), |
| 889 | base, offset, value, value_high); |
| 890 | } |
| 891 | |
| 892 | #define ATOMIC_FUNCTION(name) \ |
| 893 | TNode<Word32T> CodeAssembler::Atomic##name( \ |
| 894 | MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset, \ |
| 895 | TNode<Word32T> value) { \ |
| 896 | return UncheckedCast<Word32T>( \ |
| 897 | raw_assembler()->Atomic##name(type, base, offset, value)); \ |
| 898 | } \ |
| 899 | template <class Type> \ |
| 900 | TNode<Type> CodeAssembler::Atomic##name##64( \ |
| 901 | TNode<RawPtrT> base, TNode<UintPtrT> offset, TNode<UintPtrT> value, \ |
| 902 | TNode<UintPtrT> value_high) { \ |
| 903 | return UncheckedCast<Type>( \ |
| 904 | raw_assembler()->Atomic##name##64(base, offset, value, value_high)); \ |
| 905 | } \ |
| 906 | template TNode<AtomicInt64> CodeAssembler::Atomic##name##64 < AtomicInt64 > \ |
| 907 | (TNode<RawPtrT> base, TNode<UintPtrT> offset, TNode<UintPtrT> value, \ |
| 908 | TNode<UintPtrT> value_high); \ |
| 909 | template TNode<AtomicUint64> CodeAssembler::Atomic##name##64 < \ |
| 910 | AtomicUint64 > (TNode<RawPtrT> base, TNode<UintPtrT> offset, \ |
| 911 | TNode<UintPtrT> value, TNode<UintPtrT> value_high); |
| 912 | ATOMIC_FUNCTION(Add) |
| 913 | ATOMIC_FUNCTION(Sub) |
| 914 | ATOMIC_FUNCTION(And) |
| 915 | ATOMIC_FUNCTION(Or) |
| 916 | ATOMIC_FUNCTION(Xor) |
| 917 | ATOMIC_FUNCTION(Exchange) |
| 918 | #undef ATOMIC_FUNCTION |
| 919 | |
| 920 | TNode<Word32T> CodeAssembler::AtomicCompareExchange(MachineType type, |
| 921 | TNode<RawPtrT> base, |
| 922 | TNode<WordT> offset, |
| 923 | TNode<Word32T> old_value, |
| 924 | TNode<Word32T> new_value) { |
| 925 | return UncheckedCast<Word32T>(raw_assembler()->AtomicCompareExchange( |
| 926 | type, base, offset, old_value, new_value)); |
| 927 | } |
| 928 | |
| 929 | template <class Type> |
| 930 | TNode<Type> CodeAssembler::AtomicCompareExchange64( |
| 931 | TNode<RawPtrT> base, TNode<WordT> offset, TNode<UintPtrT> old_value, |
| 932 | TNode<UintPtrT> new_value, TNode<UintPtrT> old_value_high, |
| 933 | TNode<UintPtrT> new_value_high) { |
| 934 | // This uses Uint64() intentionally: AtomicCompareExchange is not implemented |
| 935 | // for Int64(), which is fine because the machine instruction only cares |
| 936 | // about words. |
| 937 | return UncheckedCast<Type>(raw_assembler()->AtomicCompareExchange64( |
| 938 | base, offset, old_value, old_value_high, new_value, new_value_high)); |
| 939 | } |
| 940 | |
| 941 | template TNode<AtomicInt64> CodeAssembler::AtomicCompareExchange64<AtomicInt64>( |
| 942 | TNode<RawPtrT> base, TNode<WordT> offset, TNode<UintPtrT> old_value, |
| 943 | TNode<UintPtrT> new_value, TNode<UintPtrT> old_value_high, |
| 944 | TNode<UintPtrT> new_value_high); |
| 945 | template TNode<AtomicUint64> |
| 946 | CodeAssembler::AtomicCompareExchange64<AtomicUint64>( |
| 947 | TNode<RawPtrT> base, TNode<WordT> offset, TNode<UintPtrT> old_value, |
| 948 | TNode<UintPtrT> new_value, TNode<UintPtrT> old_value_high, |
| 949 | TNode<UintPtrT> new_value_high); |
| 950 | |
| 951 | void CodeAssembler::StoreRoot(RootIndex root_index, TNode<Object> value) { |
| 952 | DCHECK(!RootsTable::IsImmortalImmovable(root_index))((void) 0); |
| 953 | TNode<ExternalReference> isolate_root = |
| 954 | ExternalConstant(ExternalReference::isolate_root(isolate())); |
| 955 | int offset = IsolateData::root_slot_offset(root_index); |
| 956 | StoreFullTaggedNoWriteBarrier(isolate_root, IntPtrConstant(offset), value); |
| 957 | } |
| 958 | |
| 959 | Node* CodeAssembler::Projection(int index, Node* value) { |
| 960 | DCHECK_LT(index, value->op()->ValueOutputCount())((void) 0); |
| 961 | return raw_assembler()->Projection(index, value); |
| 962 | } |
| 963 | |
| 964 | TNode<HeapObject> CodeAssembler::OptimizedAllocate( |
| 965 | TNode<IntPtrT> size, AllocationType allocation, |
| 966 | AllowLargeObjects allow_large_objects) { |
| 967 | return UncheckedCast<HeapObject>(raw_assembler()->OptimizedAllocate( |
| 968 | size, allocation, allow_large_objects)); |
| 969 | } |
| 970 | |
| 971 | void CodeAssembler::HandleException(Node* node) { |
| 972 | if (state_->exception_handler_labels_.size() == 0) return; |
| 973 | CodeAssemblerExceptionHandlerLabel* label = |
| 974 | state_->exception_handler_labels_.back(); |
| 975 | |
| 976 | if (node->op()->HasProperty(Operator::kNoThrow)) { |
| 977 | return; |
| 978 | } |
| 979 | |
| 980 | Label success(this), exception(this, Label::kDeferred); |
| 981 | success.MergeVariables(); |
| 982 | exception.MergeVariables(); |
| 983 | |
| 984 | raw_assembler()->Continuations(node, success.label_, exception.label_); |
| 985 | |
| 986 | Bind(&exception); |
| 987 | const Operator* op = raw_assembler()->common()->IfException(); |
| 988 | Node* exception_value = raw_assembler()->AddNode(op, node, node); |
| 989 | label->AddInputs({UncheckedCast<Object>(exception_value)}); |
| 990 | Goto(label->plain_label()); |
| 991 | |
| 992 | Bind(&success); |
| 993 | raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node); |
| 994 | } |
| 995 | |
| 996 | namespace { |
| 997 | template <size_t kMaxSize> |
| 998 | class NodeArray { |
| 999 | public: |
| 1000 | void Add(Node* node) { |
| 1001 | DCHECK_GT(kMaxSize, size())((void) 0); |
| 1002 | *ptr_++ = node; |
| 1003 | } |
| 1004 | |
| 1005 | Node* const* data() const { return arr_; } |
| 1006 | int size() const { return static_cast<int>(ptr_ - arr_); } |
| 1007 | |
| 1008 | private: |
| 1009 | Node* arr_[kMaxSize]; |
| 1010 | Node** ptr_ = arr_; |
| 1011 | }; |
| 1012 | } // namespace |
| 1013 | |
| 1014 | Node* CodeAssembler::CallRuntimeImpl( |
| 1015 | Runtime::FunctionId function, TNode<Object> context, |
| 1016 | std::initializer_list<TNode<Object>> args) { |
| 1017 | int result_size = Runtime::FunctionForId(function)->result_size; |
| 1018 | TNode<CodeT> centry = |
| 1019 | HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size)); |
| 1020 | constexpr size_t kMaxNumArgs = 6; |
| 1021 | DCHECK_GE(kMaxNumArgs, args.size())((void) 0); |
| 1022 | int argc = static_cast<int>(args.size()); |
| 1023 | auto call_descriptor = Linkage::GetRuntimeCallDescriptor( |
| 1024 | zone(), function, argc, Operator::kNoProperties, |
| 1025 | Runtime::MayAllocate(function) ? CallDescriptor::kNoFlags |
| 1026 | : CallDescriptor::kNoAllocate); |
| 1027 | |
| 1028 | TNode<ExternalReference> ref = |
| 1029 | ExternalConstant(ExternalReference::Create(function)); |
| 1030 | TNode<Int32T> arity = Int32Constant(argc); |
| 1031 | |
| 1032 | NodeArray<kMaxNumArgs + 4> inputs; |
| 1033 | inputs.Add(centry); |
| 1034 | for (auto arg : args) inputs.Add(arg); |
| 1035 | inputs.Add(ref); |
| 1036 | inputs.Add(arity); |
| 1037 | inputs.Add(context); |
| 1038 | |
| 1039 | CallPrologue(); |
| 1040 | Node* return_value = |
| 1041 | raw_assembler()->CallN(call_descriptor, inputs.size(), inputs.data()); |
| 1042 | HandleException(return_value); |
| 1043 | CallEpilogue(); |
| 1044 | return return_value; |
| 1045 | } |
| 1046 | |
| 1047 | void CodeAssembler::TailCallRuntimeImpl( |
| 1048 | Runtime::FunctionId function, TNode<Int32T> arity, TNode<Object> context, |
| 1049 | std::initializer_list<TNode<Object>> args) { |
| 1050 | int result_size = Runtime::FunctionForId(function)->result_size; |
| 1051 | TNode<CodeT> centry = |
| 1052 | HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size)); |
| 1053 | constexpr size_t kMaxNumArgs = 6; |
| 1054 | DCHECK_GE(kMaxNumArgs, args.size())((void) 0); |
| 1055 | int argc = static_cast<int>(args.size()); |
| 1056 | auto call_descriptor = Linkage::GetRuntimeCallDescriptor( |
| 1057 | zone(), function, argc, Operator::kNoProperties, |
| 1058 | CallDescriptor::kNoFlags); |
| 1059 | |
| 1060 | TNode<ExternalReference> ref = |
| 1061 | ExternalConstant(ExternalReference::Create(function)); |
| 1062 | |
| 1063 | NodeArray<kMaxNumArgs + 4> inputs; |
| 1064 | inputs.Add(centry); |
| 1065 | for (auto arg : args) inputs.Add(arg); |
| 1066 | inputs.Add(ref); |
| 1067 | inputs.Add(arity); |
| 1068 | inputs.Add(context); |
| 1069 | |
| 1070 | raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data()); |
| 1071 | } |
| 1072 | |
| 1073 | Node* CodeAssembler::CallStubN(StubCallMode call_mode, |
| 1074 | const CallInterfaceDescriptor& descriptor, |
| 1075 | int input_count, Node* const* inputs) { |
| 1076 | DCHECK(call_mode == StubCallMode::kCallCodeObject ||((void) 0) |
| 1077 | call_mode == StubCallMode::kCallBuiltinPointer)((void) 0); |
| 1078 | |
| 1079 | // implicit nodes are target and optionally context. |
| 1080 | int implicit_nodes = descriptor.HasContextParameter() ? 2 : 1; |
| 1081 | DCHECK_LE(implicit_nodes, input_count)((void) 0); |
| 1082 | int argc = input_count - implicit_nodes; |
| 1083 | #ifdef DEBUG |
| 1084 | if (descriptor.AllowVarArgs()) { |
| 1085 | DCHECK_LE(descriptor.GetParameterCount(), argc)((void) 0); |
| 1086 | } else { |
| 1087 | DCHECK_EQ(descriptor.GetParameterCount(), argc)((void) 0); |
| 1088 | } |
| 1089 | #endif |
| 1090 | // Extra arguments not mentioned in the descriptor are passed on the stack. |
| 1091 | int stack_parameter_count = argc - descriptor.GetRegisterParameterCount(); |
| 1092 | DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count)((void) 0); |
| 1093 | |
| 1094 | auto call_descriptor = Linkage::GetStubCallDescriptor( |
| 1095 | zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags, |
| 1096 | Operator::kNoProperties, call_mode); |
| 1097 | |
| 1098 | CallPrologue(); |
| 1099 | Node* return_value = |
| 1100 | raw_assembler()->CallN(call_descriptor, input_count, inputs); |
| 1101 | HandleException(return_value); |
| 1102 | CallEpilogue(); |
| 1103 | return return_value; |
| 1104 | } |
| 1105 | |
| 1106 | void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor, |
| 1107 | TNode<CodeT> target, TNode<Object> context, |
| 1108 | std::initializer_list<Node*> args) { |
| 1109 | constexpr size_t kMaxNumArgs = 11; |
| 1110 | DCHECK_GE(kMaxNumArgs, args.size())((void) 0); |
| 1111 | DCHECK_EQ(descriptor.GetParameterCount(), args.size())((void) 0); |
| 1112 | auto call_descriptor = Linkage::GetStubCallDescriptor( |
| 1113 | zone(), descriptor, descriptor.GetStackParameterCount(), |
| 1114 | CallDescriptor::kNoFlags, Operator::kNoProperties); |
| 1115 | |
| 1116 | NodeArray<kMaxNumArgs + 2> inputs; |
| 1117 | inputs.Add(target); |
| 1118 | for (auto arg : args) inputs.Add(arg); |
| 1119 | if (descriptor.HasContextParameter()) { |
| 1120 | inputs.Add(context); |
| 1121 | } |
| 1122 | |
| 1123 | raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data()); |
| 1124 | } |
| 1125 | |
| 1126 | Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode, |
| 1127 | const CallInterfaceDescriptor& descriptor, |
| 1128 | TNode<Object> target, TNode<Object> context, |
| 1129 | std::initializer_list<Node*> args) { |
| 1130 | DCHECK(call_mode == StubCallMode::kCallCodeObject ||((void) 0) |
| 1131 | call_mode == StubCallMode::kCallBuiltinPointer)((void) 0); |
| 1132 | |
| 1133 | constexpr size_t kMaxNumArgs = 10; |
| 1134 | DCHECK_GE(kMaxNumArgs, args.size())((void) 0); |
| 1135 | |
| 1136 | NodeArray<kMaxNumArgs + 2> inputs; |
| 1137 | inputs.Add(target); |
| 1138 | for (auto arg : args) inputs.Add(arg); |
| 1139 | if (descriptor.HasContextParameter()) { |
| 1140 | inputs.Add(context); |
| 1141 | } |
| 1142 | |
| 1143 | return CallStubN(call_mode, descriptor, inputs.size(), inputs.data()); |
| 1144 | } |
| 1145 | |
| 1146 | Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor, |
| 1147 | TNode<Object> target, TNode<Object> context, |
| 1148 | TNode<Object> function, |
| 1149 | base::Optional<TNode<Object>> new_target, |
| 1150 | TNode<Int32T> arity, |
| 1151 | std::initializer_list<Node*> args) { |
| 1152 | constexpr size_t kMaxNumArgs = 10; |
| 1153 | DCHECK_GE(kMaxNumArgs, args.size())((void) 0); |
| 1154 | NodeArray<kMaxNumArgs + 5> inputs; |
| 1155 | inputs.Add(target); |
| 1156 | inputs.Add(function); |
| 1157 | if (new_target) { |
| 1158 | inputs.Add(*new_target); |
| 1159 | } |
| 1160 | inputs.Add(arity); |
| 1161 | for (auto arg : args) inputs.Add(arg); |
| 1162 | if (descriptor.HasContextParameter()) { |
| 1163 | inputs.Add(context); |
| 1164 | } |
| 1165 | return CallStubN(StubCallMode::kCallCodeObject, descriptor, inputs.size(), |
| 1166 | inputs.data()); |
| 1167 | } |
| 1168 | |
| 1169 | void CodeAssembler::TailCallStubThenBytecodeDispatchImpl( |
| 1170 | const CallInterfaceDescriptor& descriptor, Node* target, Node* context, |
| 1171 | std::initializer_list<Node*> args) { |
| 1172 | constexpr size_t kMaxNumArgs = 6; |
| 1173 | DCHECK_GE(kMaxNumArgs, args.size())((void) 0); |
| 1174 | |
| 1175 | DCHECK_LE(descriptor.GetParameterCount(), args.size())((void) 0); |
| 1176 | int argc = static_cast<int>(args.size()); |
| 1177 | // Extra arguments not mentioned in the descriptor are passed on the stack. |
| 1178 | int stack_parameter_count = argc - descriptor.GetRegisterParameterCount(); |
| 1179 | DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count)((void) 0); |
| 1180 | auto call_descriptor = Linkage::GetStubCallDescriptor( |
| 1181 | zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags, |
| 1182 | Operator::kNoProperties); |
| 1183 | |
| 1184 | NodeArray<kMaxNumArgs + 2> inputs; |
| 1185 | inputs.Add(target); |
| 1186 | for (auto arg : args) inputs.Add(arg); |
| 1187 | inputs.Add(context); |
| 1188 | |
| 1189 | raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data()); |
| 1190 | } |
| 1191 | |
| 1192 | template <class... TArgs> |
| 1193 | void CodeAssembler::TailCallBytecodeDispatch( |
| 1194 | const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target, |
| 1195 | TArgs... args) { |
| 1196 | DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args))((void) 0); |
| 1197 | auto call_descriptor = Linkage::GetBytecodeDispatchCallDescriptor( |
| 1198 | zone(), descriptor, descriptor.GetStackParameterCount()); |
| 1199 | |
| 1200 | Node* nodes[] = {target, args...}; |
| 1201 | CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes))do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(descriptor.GetParameterCount( ) + 1)>::type, typename ::v8::base::pass_value_or_ref<decltype ((sizeof(ArraySizeHelper(nodes))))>::type>((descriptor. GetParameterCount() + 1), ((sizeof(ArraySizeHelper(nodes))))) ; do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s." , "descriptor.GetParameterCount() + 1" " " "==" " " "(sizeof(ArraySizeHelper(nodes)))" ); } } while (false); } while (false); |
| 1202 | raw_assembler()->TailCallN(call_descriptor, arraysize(nodes)(sizeof(ArraySizeHelper(nodes))), nodes); |
| 1203 | } |
| 1204 | |
| 1205 | // Instantiate TailCallBytecodeDispatch() for argument counts used by |
| 1206 | // CSA-generated code |
| 1207 | template V8_EXPORT_PRIVATE void CodeAssembler::TailCallBytecodeDispatch( |
| 1208 | const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target, |
| 1209 | TNode<Object>, TNode<IntPtrT>, TNode<BytecodeArray>, |
| 1210 | TNode<ExternalReference>); |
| 1211 | |
| 1212 | void CodeAssembler::TailCallJSCode(TNode<CodeT> code, TNode<Context> context, |
| 1213 | TNode<JSFunction> function, |
| 1214 | TNode<Object> new_target, |
| 1215 | TNode<Int32T> arg_count) { |
| 1216 | JSTrampolineDescriptor descriptor; |
| 1217 | auto call_descriptor = Linkage::GetStubCallDescriptor( |
| 1218 | zone(), descriptor, descriptor.GetStackParameterCount(), |
| 1219 | CallDescriptor::kFixedTargetRegister, Operator::kNoProperties); |
| 1220 | |
| 1221 | Node* nodes[] = {code, function, new_target, arg_count, context}; |
| 1222 | CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes))do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(descriptor.GetParameterCount( ) + 2)>::type, typename ::v8::base::pass_value_or_ref<decltype ((sizeof(ArraySizeHelper(nodes))))>::type>((descriptor. GetParameterCount() + 2), ((sizeof(ArraySizeHelper(nodes))))) ; do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s." , "descriptor.GetParameterCount() + 2" " " "==" " " "(sizeof(ArraySizeHelper(nodes)))" ); } } while (false); } while (false); |
| 1223 | raw_assembler()->TailCallN(call_descriptor, arraysize(nodes)(sizeof(ArraySizeHelper(nodes))), nodes); |
| 1224 | } |
| 1225 | |
| 1226 | Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature, |
| 1227 | int input_count, Node* const* inputs) { |
| 1228 | auto call_descriptor = Linkage::GetSimplifiedCDescriptor(zone(), signature); |
| 1229 | return raw_assembler()->CallN(call_descriptor, input_count, inputs); |
| 1230 | } |
| 1231 | |
| 1232 | Node* CodeAssembler::CallCFunction( |
| 1233 | Node* function, base::Optional<MachineType> return_type, |
| 1234 | std::initializer_list<CodeAssembler::CFunctionArg> args) { |
| 1235 | return raw_assembler()->CallCFunction(function, return_type, args); |
| 1236 | } |
| 1237 | |
| 1238 | Node* CodeAssembler::CallCFunctionWithoutFunctionDescriptor( |
| 1239 | Node* function, MachineType return_type, |
| 1240 | std::initializer_list<CodeAssembler::CFunctionArg> args) { |
| 1241 | return raw_assembler()->CallCFunctionWithoutFunctionDescriptor( |
| 1242 | function, return_type, args); |
| 1243 | } |
| 1244 | |
| 1245 | Node* CodeAssembler::CallCFunctionWithCallerSavedRegisters( |
| 1246 | Node* function, MachineType return_type, SaveFPRegsMode mode, |
| 1247 | std::initializer_list<CodeAssembler::CFunctionArg> args) { |
| 1248 | DCHECK(return_type.LessThanOrEqualPointerSize())((void) 0); |
| 1249 | return raw_assembler()->CallCFunctionWithCallerSavedRegisters( |
| 1250 | function, return_type, mode, args); |
| 1251 | } |
| 1252 | |
| 1253 | void CodeAssembler::Goto(Label* label) { |
| 1254 | label->MergeVariables(); |
| 1255 | raw_assembler()->Goto(label->label_); |
| 1256 | } |
| 1257 | |
| 1258 | void CodeAssembler::GotoIf(TNode<IntegralT> condition, Label* true_label) { |
| 1259 | Label false_label(this); |
| 1260 | Branch(condition, true_label, &false_label); |
| 1261 | Bind(&false_label); |
| 1262 | } |
| 1263 | |
| 1264 | void CodeAssembler::GotoIfNot(TNode<IntegralT> condition, Label* false_label) { |
| 1265 | Label true_label(this); |
| 1266 | Branch(condition, &true_label, false_label); |
| 1267 | Bind(&true_label); |
| 1268 | } |
| 1269 | |
| 1270 | void CodeAssembler::Branch(TNode<IntegralT> condition, Label* true_label, |
| 1271 | Label* false_label) { |
| 1272 | int32_t constant; |
| 1273 | if (TryToInt32Constant(condition, &constant)) { |
| 1274 | if ((true_label->is_used() || true_label->is_bound()) && |
| 1275 | (false_label->is_used() || false_label->is_bound())) { |
| 1276 | return Goto(constant ? true_label : false_label); |
| 1277 | } |
| 1278 | } |
| 1279 | true_label->MergeVariables(); |
| 1280 | false_label->MergeVariables(); |
| 1281 | return raw_assembler()->Branch(condition, true_label->label_, |
| 1282 | false_label->label_); |
| 1283 | } |
| 1284 | |
| 1285 | void CodeAssembler::Branch(TNode<BoolT> condition, |
| 1286 | const std::function<void()>& true_body, |
| 1287 | const std::function<void()>& false_body) { |
| 1288 | int32_t constant; |
| 1289 | if (TryToInt32Constant(condition, &constant)) { |
| 1290 | return constant ? true_body() : false_body(); |
| 1291 | } |
| 1292 | |
| 1293 | Label vtrue(this), vfalse(this); |
| 1294 | Branch(condition, &vtrue, &vfalse); |
| 1295 | |
| 1296 | Bind(&vtrue); |
| 1297 | true_body(); |
| 1298 | |
| 1299 | Bind(&vfalse); |
| 1300 | false_body(); |
| 1301 | } |
| 1302 | |
| 1303 | void CodeAssembler::Branch(TNode<BoolT> condition, Label* true_label, |
| 1304 | const std::function<void()>& false_body) { |
| 1305 | int32_t constant; |
| 1306 | if (TryToInt32Constant(condition, &constant)) { |
| 1307 | return constant ? Goto(true_label) : false_body(); |
| 1308 | } |
| 1309 | |
| 1310 | Label vfalse(this); |
| 1311 | Branch(condition, true_label, &vfalse); |
| 1312 | Bind(&vfalse); |
| 1313 | false_body(); |
| 1314 | } |
| 1315 | |
| 1316 | void CodeAssembler::Branch(TNode<BoolT> condition, |
| 1317 | const std::function<void()>& true_body, |
| 1318 | Label* false_label) { |
| 1319 | int32_t constant; |
| 1320 | if (TryToInt32Constant(condition, &constant)) { |
| 1321 | return constant ? true_body() : Goto(false_label); |
| 1322 | } |
| 1323 | |
| 1324 | Label vtrue(this); |
| 1325 | Branch(condition, &vtrue, false_label); |
| 1326 | Bind(&vtrue); |
| 1327 | true_body(); |
| 1328 | } |
| 1329 | |
| 1330 | void CodeAssembler::Switch(Node* index, Label* default_label, |
| 1331 | const int32_t* case_values, Label** case_labels, |
| 1332 | size_t case_count) { |
| 1333 | RawMachineLabel** labels = zone()->NewArray<RawMachineLabel*>(case_count); |
| 1334 | for (size_t i = 0; i < case_count; ++i) { |
| 1335 | labels[i] = case_labels[i]->label_; |
| 1336 | case_labels[i]->MergeVariables(); |
| 1337 | } |
| 1338 | default_label->MergeVariables(); |
| 1339 | return raw_assembler()->Switch(index, default_label->label_, case_values, |
| 1340 | labels, case_count); |
| 1341 | } |
| 1342 | |
| 1343 | bool CodeAssembler::UnalignedLoadSupported(MachineRepresentation rep) const { |
| 1344 | return raw_assembler()->machine()->UnalignedLoadSupported(rep); |
| 1345 | } |
| 1346 | bool CodeAssembler::UnalignedStoreSupported(MachineRepresentation rep) const { |
| 1347 | return raw_assembler()->machine()->UnalignedStoreSupported(rep); |
| 1348 | } |
| 1349 | |
| 1350 | // RawMachineAssembler delegate helpers: |
| 1351 | Isolate* CodeAssembler::isolate() const { return raw_assembler()->isolate(); } |
| 1352 | |
| 1353 | Factory* CodeAssembler::factory() const { return isolate()->factory(); } |
| 1354 | |
| 1355 | Zone* CodeAssembler::zone() const { return raw_assembler()->zone(); } |
| 1356 | |
| 1357 | bool CodeAssembler::IsExceptionHandlerActive() const { |
| 1358 | return state_->exception_handler_labels_.size() != 0; |
| 1359 | } |
| 1360 | |
| 1361 | RawMachineAssembler* CodeAssembler::raw_assembler() const { |
| 1362 | return state_->raw_assembler_.get(); |
| 1363 | } |
| 1364 | |
| 1365 | JSGraph* CodeAssembler::jsgraph() const { return state_->jsgraph_; } |
| 1366 | |
| 1367 | // The core implementation of Variable is stored through an indirection so |
| 1368 | // that it can outlive the often block-scoped Variable declarations. This is |
| 1369 | // needed to ensure that variable binding and merging through phis can |
| 1370 | // properly be verified. |
| 1371 | class CodeAssemblerVariable::Impl : public ZoneObject { |
| 1372 | public: |
| 1373 | explicit Impl(MachineRepresentation rep, CodeAssemblerState::VariableId id) |
| 1374 | : |
| 1375 | #if DEBUG |
| 1376 | debug_info_(AssemblerDebugInfo(nullptr, nullptr, -1)), |
| 1377 | #endif |
| 1378 | value_(nullptr), |
| 1379 | rep_(rep), |
| 1380 | var_id_(id) { |
| 1381 | } |
| 1382 | |
| 1383 | #if DEBUG |
| 1384 | AssemblerDebugInfo debug_info() const { return debug_info_; } |
| 1385 | void set_debug_info(AssemblerDebugInfo debug_info) { |
| 1386 | debug_info_ = debug_info; |
| 1387 | } |
| 1388 | |
| 1389 | AssemblerDebugInfo debug_info_; |
| 1390 | #endif // DEBUG |
| 1391 | bool operator<(const CodeAssemblerVariable::Impl& other) const { |
| 1392 | return var_id_ < other.var_id_; |
| 1393 | } |
| 1394 | Node* value_; |
| 1395 | MachineRepresentation rep_; |
| 1396 | CodeAssemblerState::VariableId var_id_; |
| 1397 | }; |
| 1398 | |
| 1399 | bool CodeAssemblerVariable::ImplComparator::operator()( |
| 1400 | const CodeAssemblerVariable::Impl* a, |
| 1401 | const CodeAssemblerVariable::Impl* b) const { |
| 1402 | return *a < *b; |
| 1403 | } |
| 1404 | |
| 1405 | CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler, |
| 1406 | MachineRepresentation rep) |
| 1407 | : impl_(assembler->zone()->New<Impl>(rep, |
| 1408 | assembler->state()->NextVariableId())), |
| 1409 | state_(assembler->state()) { |
| 1410 | state_->variables_.insert(impl_); |
| 1411 | } |
| 1412 | |
| 1413 | CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler, |
| 1414 | MachineRepresentation rep, |
| 1415 | Node* initial_value) |
| 1416 | : CodeAssemblerVariable(assembler, rep) { |
| 1417 | Bind(initial_value); |
| 1418 | } |
| 1419 | |
| 1420 | #if DEBUG |
| 1421 | CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler, |
| 1422 | AssemblerDebugInfo debug_info, |
| 1423 | MachineRepresentation rep) |
| 1424 | : impl_(assembler->zone()->New<Impl>(rep, |
| 1425 | assembler->state()->NextVariableId())), |
| 1426 | state_(assembler->state()) { |
| 1427 | impl_->set_debug_info(debug_info); |
| 1428 | state_->variables_.insert(impl_); |
| 1429 | } |
| 1430 | |
| 1431 | CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler, |
| 1432 | AssemblerDebugInfo debug_info, |
| 1433 | MachineRepresentation rep, |
| 1434 | Node* initial_value) |
| 1435 | : CodeAssemblerVariable(assembler, debug_info, rep) { |
| 1436 | impl_->set_debug_info(debug_info); |
| 1437 | Bind(initial_value); |
| 1438 | } |
| 1439 | #endif // DEBUG |
| 1440 | |
| 1441 | CodeAssemblerVariable::~CodeAssemblerVariable() { |
| 1442 | state_->variables_.erase(impl_); |
| 1443 | } |
| 1444 | |
| 1445 | void CodeAssemblerVariable::Bind(Node* value) { impl_->value_ = value; } |
| 1446 | |
| 1447 | Node* CodeAssemblerVariable::value() const { |
| 1448 | #if DEBUG |
| 1449 | if (!IsBound()) { |
| 1450 | std::stringstream str; |
| 1451 | str << "#Use of unbound variable:" |
| 1452 | << "#\n Variable: " << *this << "#\n Current Block: "; |
| 1453 | state_->PrintCurrentBlock(str); |
| 1454 | FATAL("%s", str.str().c_str())V8_Fatal("%s", str.str().c_str()); |
| 1455 | } |
| 1456 | if (!state_->InsideBlock()) { |
| 1457 | std::stringstream str; |
| 1458 | str << "#Accessing variable value outside a block:" |
| 1459 | << "#\n Variable: " << *this; |
| 1460 | FATAL("%s", str.str().c_str())V8_Fatal("%s", str.str().c_str()); |
| 1461 | } |
| 1462 | #endif // DEBUG |
| 1463 | return impl_->value_; |
| 1464 | } |
| 1465 | |
| 1466 | MachineRepresentation CodeAssemblerVariable::rep() const { return impl_->rep_; } |
| 1467 | |
| 1468 | bool CodeAssemblerVariable::IsBound() const { return impl_->value_ != nullptr; } |
| 1469 | |
| 1470 | std::ostream& operator<<(std::ostream& os, |
| 1471 | const CodeAssemblerVariable::Impl& impl) { |
| 1472 | #if DEBUG |
| 1473 | AssemblerDebugInfo info = impl.debug_info(); |
| 1474 | if (info.name) os << "V" << info; |
| 1475 | #endif // DEBUG |
| 1476 | return os; |
| 1477 | } |
| 1478 | |
| 1479 | std::ostream& operator<<(std::ostream& os, |
| 1480 | const CodeAssemblerVariable& variable) { |
| 1481 | os << *variable.impl_; |
| 1482 | return os; |
| 1483 | } |
| 1484 | |
| 1485 | CodeAssemblerLabel::CodeAssemblerLabel(CodeAssembler* assembler, |
| 1486 | size_t vars_count, |
| 1487 | CodeAssemblerVariable* const* vars, |
| 1488 | CodeAssemblerLabel::Type type) |
| 1489 | : bound_(false), |
| 1490 | merge_count_(0), |
| 1491 | state_(assembler->state()), |
| 1492 | label_(nullptr) { |
| 1493 | label_ = assembler->zone()->New<RawMachineLabel>( |
| 1494 | type == kDeferred ? RawMachineLabel::kDeferred |
| 1495 | : RawMachineLabel::kNonDeferred); |
| 1496 | for (size_t i = 0; i < vars_count; ++i) { |
| 1497 | variable_phis_[vars[i]->impl_] = nullptr; |
| 1498 | } |
| 1499 | } |
| 1500 | |
| 1501 | CodeAssemblerLabel::~CodeAssemblerLabel() { label_->~RawMachineLabel(); } |
| 1502 | |
| 1503 | void CodeAssemblerLabel::MergeVariables() { |
| 1504 | ++merge_count_; |
| 1505 | for (CodeAssemblerVariable::Impl* var : state_->variables_) { |
| 1506 | size_t count = 0; |
| 1507 | Node* node = var->value_; |
| 1508 | if (node != nullptr) { |
| 1509 | auto i = variable_merges_.find(var); |
| 1510 | if (i != variable_merges_.end()) { |
| 1511 | i->second.push_back(node); |
| 1512 | count = i->second.size(); |
| 1513 | } else { |
| 1514 | count = 1; |
| 1515 | variable_merges_[var] = std::vector<Node*>(1, node); |
| 1516 | } |
| 1517 | } |
| 1518 | // If the following asserts, then you've jumped to a label without a bound |
| 1519 | // variable along that path that expects to merge its value into a phi. |
| 1520 | // This can also occur if a label is bound that is never jumped to. |
| 1521 | DCHECK(variable_phis_.find(var) == variable_phis_.end() ||((void) 0) |
| 1522 | count == merge_count_)((void) 0); |
| 1523 | USE(count)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{count}; (void)unused_tmp_array_for_use_macro; } while (false); |
| 1524 | |
| 1525 | // If the label is already bound, we already know the set of variables to |
| 1526 | // merge and phi nodes have already been created. |
| 1527 | if (bound_) { |
| 1528 | auto phi = variable_phis_.find(var); |
| 1529 | if (phi != variable_phis_.end()) { |
| 1530 | DCHECK_NOT_NULL(phi->second)((void) 0); |
| 1531 | state_->raw_assembler_->AppendPhiInput(phi->second, node); |
| 1532 | } else { |
| 1533 | auto i = variable_merges_.find(var); |
| 1534 | if (i != variable_merges_.end()) { |
| 1535 | // If the following assert fires, then you've declared a variable that |
| 1536 | // has the same bound value along all paths up until the point you |
| 1537 | // bound this label, but then later merged a path with a new value for |
| 1538 | // the variable after the label bind (it's not possible to add phis to |
| 1539 | // the bound label after the fact, just make sure to list the variable |
| 1540 | // in the label's constructor's list of merged variables). |
| 1541 | #if DEBUG |
| 1542 | if (find_if(i->second.begin(), i->second.end(), |
| 1543 | [node](Node* e) -> bool { return node != e; }) != |
| 1544 | i->second.end()) { |
| 1545 | std::stringstream str; |
| 1546 | str << "Unmerged variable found when jumping to block. \n" |
| 1547 | << "# Variable: " << *var; |
| 1548 | if (bound_) { |
| 1549 | str << "\n# Target block: " << *label_->block(); |
| 1550 | } |
| 1551 | str << "\n# Current Block: "; |
| 1552 | state_->PrintCurrentBlock(str); |
| 1553 | FATAL("%s", str.str().c_str())V8_Fatal("%s", str.str().c_str()); |
| 1554 | } |
| 1555 | #endif // DEBUG |
| 1556 | } |
| 1557 | } |
| 1558 | } |
| 1559 | } |
| 1560 | } |
| 1561 | |
| 1562 | #if DEBUG |
| 1563 | void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) { |
| 1564 | if (bound_) { |
| 1565 | std::stringstream str; |
| 1566 | str << "Cannot bind the same label twice:" |
| 1567 | << "\n# current: " << debug_info |
| 1568 | << "\n# previous: " << *label_->block(); |
| 1569 | FATAL("%s", str.str().c_str())V8_Fatal("%s", str.str().c_str()); |
| 1570 | } |
| 1571 | if (FLAG_enable_source_at_csa_bind) { |
| 1572 | state_->raw_assembler_->SetCurrentExternalSourcePosition( |
| 1573 | {debug_info.file, debug_info.line}); |
| 1574 | } |
| 1575 | state_->raw_assembler_->Bind(label_, debug_info); |
| 1576 | UpdateVariablesAfterBind(); |
| 1577 | } |
| 1578 | #endif // DEBUG |
| 1579 | |
| 1580 | void CodeAssemblerLabel::Bind() { |
| 1581 | DCHECK(!bound_)((void) 0); |
| 1582 | state_->raw_assembler_->Bind(label_); |
| 1583 | UpdateVariablesAfterBind(); |
| 1584 | } |
| 1585 | |
| 1586 | void CodeAssemblerLabel::UpdateVariablesAfterBind() { |
| 1587 | // Make sure that all variables that have changed along any path up to this |
| 1588 | // point are marked as merge variables. |
| 1589 | for (auto var : state_->variables_) { |
| 1590 | Node* shared_value = nullptr; |
| 1591 | auto i = variable_merges_.find(var); |
| 1592 | if (i != variable_merges_.end()) { |
| 1593 | for (auto value : i->second) { |
| 1594 | DCHECK_NOT_NULL(value)((void) 0); |
| 1595 | if (value != shared_value) { |
| 1596 | if (shared_value == nullptr) { |
| 1597 | shared_value = value; |
| 1598 | } else { |
| 1599 | variable_phis_[var] = nullptr; |
| 1600 | } |
| 1601 | } |
| 1602 | } |
| 1603 | } |
| 1604 | } |
| 1605 | |
| 1606 | for (auto var : variable_phis_) { |
| 1607 | CodeAssemblerVariable::Impl* var_impl = var.first; |
| 1608 | auto i = variable_merges_.find(var_impl); |
| 1609 | #if DEBUG |
| 1610 | bool not_found = i == variable_merges_.end(); |
| 1611 | if (not_found || i->second.size() != merge_count_) { |
| 1612 | std::stringstream str; |
| 1613 | str << "A variable that has been marked as beeing merged at the label" |
| 1614 | << "\n# doesn't have a bound value along all of the paths that " |
| 1615 | << "\n# have been merged into the label up to this point." |
| 1616 | << "\n#" |
| 1617 | << "\n# This can happen in the following cases:" |
| 1618 | << "\n# - By explicitly marking it so in the label constructor" |
| 1619 | << "\n# - By having seen different bound values at branches" |
| 1620 | << "\n#" |
| 1621 | << "\n# Merge count: expected=" << merge_count_ |
| 1622 | << " vs. found=" << (not_found ? 0 : i->second.size()) |
| 1623 | << "\n# Variable: " << *var_impl |
| 1624 | << "\n# Current Block: " << *label_->block(); |
| 1625 | FATAL("%s", str.str().c_str())V8_Fatal("%s", str.str().c_str()); |
| 1626 | } |
| 1627 | #endif // DEBUG |
| 1628 | Node* phi = state_->raw_assembler_->Phi( |
| 1629 | var.first->rep_, static_cast<int>(merge_count_), &(i->second[0])); |
| 1630 | variable_phis_[var_impl] = phi; |
| 1631 | } |
| 1632 | |
| 1633 | // Bind all variables to a merge phi, the common value along all paths or |
| 1634 | // null. |
| 1635 | for (auto var : state_->variables_) { |
| 1636 | auto i = variable_phis_.find(var); |
| 1637 | if (i != variable_phis_.end()) { |
| 1638 | var->value_ = i->second; |
| 1639 | } else { |
| 1640 | auto j = variable_merges_.find(var); |
| 1641 | if (j != variable_merges_.end() && j->second.size() == merge_count_) { |
| 1642 | var->value_ = j->second.back(); |
| 1643 | } else { |
| 1644 | var->value_ = nullptr; |
| 1645 | } |
| 1646 | } |
| 1647 | } |
| 1648 | |
| 1649 | bound_ = true; |
| 1650 | } |
| 1651 | |
| 1652 | void CodeAssemblerParameterizedLabelBase::AddInputs(std::vector<Node*> inputs) { |
| 1653 | if (!phi_nodes_.empty()) { |
| 1654 | DCHECK_EQ(inputs.size(), phi_nodes_.size())((void) 0); |
| 1655 | for (size_t i = 0; i < inputs.size(); ++i) { |
| 1656 | // We use {nullptr} as a sentinel for an uninitialized value. |
| 1657 | if (phi_nodes_[i] == nullptr) continue; |
| 1658 | state_->raw_assembler_->AppendPhiInput(phi_nodes_[i], inputs[i]); |
| 1659 | } |
| 1660 | } else { |
| 1661 | DCHECK_EQ(inputs.size(), phi_inputs_.size())((void) 0); |
| 1662 | for (size_t i = 0; i < inputs.size(); ++i) { |
| 1663 | phi_inputs_[i].push_back(inputs[i]); |
| 1664 | } |
| 1665 | } |
| 1666 | } |
| 1667 | |
| 1668 | Node* CodeAssemblerParameterizedLabelBase::CreatePhi( |
| 1669 | MachineRepresentation rep, const std::vector<Node*>& inputs) { |
| 1670 | for (Node* input : inputs) { |
| 1671 | // We use {nullptr} as a sentinel for an uninitialized value. We must not |
| 1672 | // create phi nodes for these. |
| 1673 | if (input == nullptr) return nullptr; |
| 1674 | } |
| 1675 | return state_->raw_assembler_->Phi(rep, static_cast<int>(inputs.size()), |
| 1676 | &inputs.front()); |
| 1677 | } |
| 1678 | |
| 1679 | const std::vector<Node*>& CodeAssemblerParameterizedLabelBase::CreatePhis( |
| 1680 | std::vector<MachineRepresentation> representations) { |
| 1681 | DCHECK(is_used())((void) 0); |
| 1682 | DCHECK(phi_nodes_.empty())((void) 0); |
| 1683 | phi_nodes_.reserve(phi_inputs_.size()); |
| 1684 | DCHECK_EQ(representations.size(), phi_inputs_.size())((void) 0); |
| 1685 | for (size_t i = 0; i < phi_inputs_.size(); ++i) { |
| 1686 | phi_nodes_.push_back(CreatePhi(representations[i], phi_inputs_[i])); |
| 1687 | } |
| 1688 | return phi_nodes_; |
| 1689 | } |
| 1690 | |
| 1691 | void CodeAssemblerState::PushExceptionHandler( |
| 1692 | CodeAssemblerExceptionHandlerLabel* label) { |
| 1693 | exception_handler_labels_.push_back(label); |
| 1694 | } |
| 1695 | |
| 1696 | void CodeAssemblerState::PopExceptionHandler() { |
| 1697 | exception_handler_labels_.pop_back(); |
| 1698 | } |
| 1699 | |
| 1700 | ScopedExceptionHandler::ScopedExceptionHandler( |
| 1701 | CodeAssembler* assembler, CodeAssemblerExceptionHandlerLabel* label) |
| 1702 | : has_handler_(label != nullptr), |
| 1703 | assembler_(assembler), |
| 1704 | compatibility_label_(nullptr), |
| 1705 | exception_(nullptr) { |
| 1706 | if (has_handler_) { |
| 1707 | assembler_->state()->PushExceptionHandler(label); |
| 1708 | } |
| 1709 | } |
| 1710 | |
| 1711 | ScopedExceptionHandler::ScopedExceptionHandler( |
| 1712 | CodeAssembler* assembler, CodeAssemblerLabel* label, |
| 1713 | TypedCodeAssemblerVariable<Object>* exception) |
| 1714 | : has_handler_(label != nullptr), |
| 1715 | assembler_(assembler), |
| 1716 | compatibility_label_(label), |
| 1717 | exception_(exception) { |
| 1718 | if (has_handler_) { |
| 1719 | label_ = std::make_unique<CodeAssemblerExceptionHandlerLabel>( |
| 1720 | assembler, CodeAssemblerLabel::kDeferred); |
| 1721 | assembler_->state()->PushExceptionHandler(label_.get()); |
| 1722 | } |
| 1723 | } |
| 1724 | |
| 1725 | ScopedExceptionHandler::~ScopedExceptionHandler() { |
| 1726 | if (has_handler_) { |
| 1727 | assembler_->state()->PopExceptionHandler(); |
| 1728 | } |
| 1729 | if (label_ && label_->is_used()) { |
| 1730 | CodeAssembler::Label skip(assembler_); |
| 1731 | bool inside_block = assembler_->state()->InsideBlock(); |
| 1732 | if (inside_block) { |
| 1733 | assembler_->Goto(&skip); |
| 1734 | } |
| 1735 | TNode<Object> e; |
| 1736 | assembler_->Bind(label_.get(), &e); |
| 1737 | if (exception_ != nullptr) *exception_ = e; |
| 1738 | assembler_->Goto(compatibility_label_); |
| 1739 | if (inside_block) { |
| 1740 | assembler_->Bind(&skip); |
| 1741 | } |
| 1742 | } |
| 1743 | } |
| 1744 | |
| 1745 | } // namespace compiler |
| 1746 | |
| 1747 | } // namespace internal |
| 1748 | } // namespace v8 |