1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/compiler/runtime_api.h"
6#include "vm/globals.h"
7
8// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
9// For `GenericCheckBoundInstr::UseUnboxedRepresentation`
10#include "vm/compiler/backend/il.h"
11
12#define SHOULD_NOT_INCLUDE_RUNTIME
13
14#include "vm/compiler/backend/locations.h"
15#include "vm/compiler/stub_code_compiler.h"
16
17#if defined(TARGET_ARCH_X64)
18
19#include "vm/class_id.h"
20#include "vm/code_entry_kind.h"
21#include "vm/compiler/api/type_check_mode.h"
22#include "vm/compiler/assembler/assembler.h"
23#include "vm/constants.h"
24#include "vm/instructions.h"
25#include "vm/static_type_exactness_state.h"
26#include "vm/tags.h"
27
28#define __ assembler->
29
30namespace dart {
31
32DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects.");
33DEFINE_FLAG(bool,
34 use_slow_path,
35 false,
36 "Set to true for debugging & verifying the slow paths.");
37DECLARE_FLAG(bool, precompiled_mode);
38
39namespace compiler {
40
41// Ensures that [RAX] is a new object, if not it will be added to the remembered
42// set via a leaf runtime call.
43//
44// WARNING: This might clobber all registers except for [RAX], [THR] and [FP].
45// The caller should simply call LeaveStubFrame() and return.
46static void EnsureIsNewOrRemembered(Assembler* assembler,
47 bool preserve_registers = true) {
48 // If the object is not remembered we call a leaf-runtime to add it to the
49 // remembered set.
50 Label done;
51 __ testq(RAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
52 __ BranchIf(NOT_ZERO, &done);
53
54 if (preserve_registers) {
55 __ EnterCallRuntimeFrame(0);
56 } else {
57 __ ReserveAlignedFrameSpace(0);
58 }
59 __ movq(CallingConventions::kArg1Reg, RAX);
60 __ movq(CallingConventions::kArg2Reg, THR);
61 __ CallRuntime(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
62 if (preserve_registers) {
63 __ LeaveCallRuntimeFrame();
64 }
65
66 __ Bind(&done);
67}
68
69// Input parameters:
70// RSP : points to return address.
71// RSP + 8 : address of last argument in argument array.
72// RSP + 8*R10 : address of first argument in argument array.
73// RSP + 8*R10 + 8 : address of return value.
74// RBX : address of the runtime function to call.
75// R10 : number of arguments to the call.
76// Must preserve callee saved registers R12 and R13.
77void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
78 const intptr_t thread_offset = target::NativeArguments::thread_offset();
79 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
80 const intptr_t argv_offset = target::NativeArguments::argv_offset();
81 const intptr_t retval_offset = target::NativeArguments::retval_offset();
82
83 __ movq(CODE_REG,
84 Address(THR, target::Thread::call_to_runtime_stub_offset()));
85 __ EnterStubFrame();
86
87 // Save exit frame information to enable stack walking as we are about
88 // to transition to Dart VM C++ code.
89 __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()), RBP);
90
91 // Mark that the thread exited generated code through a runtime call.
92 __ movq(Address(THR, target::Thread::exit_through_ffi_offset()),
93 Immediate(target::Thread::exit_through_runtime_call()));
94
95#if defined(DEBUG)
96 {
97 Label ok;
98 // Check that we are always entering from Dart code.
99 __ movq(RAX, Immediate(VMTag::kDartCompiledTagId));
100 __ cmpq(RAX, Assembler::VMTagAddress());
101 __ j(EQUAL, &ok, Assembler::kNearJump);
102 __ Stop("Not coming from Dart code.");
103 __ Bind(&ok);
104 }
105#endif
106
107 // Mark that the thread is executing VM code.
108 __ movq(Assembler::VMTagAddress(), RBX);
109
110 // Reserve space for arguments and align frame before entering C++ world.
111 __ subq(RSP, Immediate(target::NativeArguments::StructSize()));
112 if (OS::ActivationFrameAlignment() > 1) {
113 __ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
114 }
115
116 // Pass target::NativeArguments structure by value and call runtime.
117 __ movq(Address(RSP, thread_offset), THR); // Set thread in NativeArgs.
118 // There are no runtime calls to closures, so we do not need to set the tag
119 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
120 __ movq(Address(RSP, argc_tag_offset),
121 R10); // Set argc in target::NativeArguments.
122 // Compute argv.
123 __ leaq(RAX,
124 Address(RBP, R10, TIMES_8,
125 target::frame_layout.param_end_from_fp * target::kWordSize));
126 __ movq(Address(RSP, argv_offset),
127 RAX); // Set argv in target::NativeArguments.
128 __ addq(RAX,
129 Immediate(1 * target::kWordSize)); // Retval is next to 1st argument.
130 __ movq(Address(RSP, retval_offset),
131 RAX); // Set retval in target::NativeArguments.
132#if defined(TARGET_OS_WINDOWS)
133 ASSERT(target::NativeArguments::StructSize() >
134 CallingConventions::kRegisterTransferLimit);
135 __ movq(CallingConventions::kArg1Reg, RSP);
136#endif
137 __ CallCFunction(RBX);
138
139 // Mark that the thread is executing Dart code.
140 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
141
142 // Mark that the thread has not exited generated Dart code.
143 __ movq(Address(THR, target::Thread::exit_through_ffi_offset()),
144 Immediate(0));
145
146 // Reset exit frame information in Isolate's mutator thread structure.
147 __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
148 Immediate(0));
149
150 // Restore the global object pool after returning from runtime (old space is
151 // moving, so the GOP could have been relocated).
152 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
153 __ movq(PP, Address(THR, target::Thread::global_object_pool_offset()));
154 }
155
156 __ LeaveStubFrame();
157
158 // The following return can jump to a lazy-deopt stub, which assumes RAX
159 // contains a return value and will save it in a GC-visible way. We therefore
160 // have to ensure RAX does not contain any garbage value left from the C
161 // function we called (which has return type "void").
162 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
163 __ xorq(RAX, RAX);
164 __ ret();
165}
166
167static void GenerateSharedStubGeneric(
168 Assembler* assembler,
169 bool save_fpu_registers,
170 intptr_t self_code_stub_offset_from_thread,
171 bool allow_return,
172 std::function<void()> perform_runtime_call) {
173 // We want the saved registers to appear like part of the caller's frame, so
174 // we push them before calling EnterStubFrame.
175 __ PushRegisters(kDartAvailableCpuRegs,
176 save_fpu_registers ? kAllFpuRegistersList : 0);
177
178 const intptr_t kSavedCpuRegisterSlots =
179 Utils::CountOneBitsWord(kDartAvailableCpuRegs);
180 const intptr_t kSavedFpuRegisterSlots =
181 save_fpu_registers
182 ? kNumberOfFpuRegisters * kFpuRegisterSize / target::kWordSize
183 : 0;
184 const intptr_t kAllSavedRegistersSlots =
185 kSavedCpuRegisterSlots + kSavedFpuRegisterSlots;
186
187 // Copy down the return address so the stack layout is correct.
188 __ pushq(Address(RSP, kAllSavedRegistersSlots * target::kWordSize));
189 __ movq(CODE_REG, Address(THR, self_code_stub_offset_from_thread));
190 __ EnterStubFrame();
191 perform_runtime_call();
192 if (!allow_return) {
193 __ Breakpoint();
194 return;
195 }
196 __ LeaveStubFrame();
197 // Copy up the return address (in case it was changed).
198 __ popq(TMP);
199 __ movq(Address(RSP, kAllSavedRegistersSlots * target::kWordSize), TMP);
200 __ PopRegisters(kDartAvailableCpuRegs,
201 save_fpu_registers ? kAllFpuRegistersList : 0);
202 __ ret();
203}
204
205static void GenerateSharedStub(Assembler* assembler,
206 bool save_fpu_registers,
207 const RuntimeEntry* target,
208 intptr_t self_code_stub_offset_from_thread,
209 bool allow_return,
210 bool store_runtime_result_in_rax = false) {
211 auto perform_runtime_call = [&]() {
212 if (store_runtime_result_in_rax) {
213 __ PushImmediate(Immediate(0));
214 }
215 __ CallRuntime(*target, /*argument_count=*/0);
216 if (store_runtime_result_in_rax) {
217 __ PopRegister(RAX);
218 __ movq(Address(RBP,
219 target::kWordSize *
220 StubCodeCompiler::WordOffsetFromFpToCpuRegister(RAX)),
221 RAX);
222 }
223 };
224 GenerateSharedStubGeneric(assembler, save_fpu_registers,
225 self_code_stub_offset_from_thread, allow_return,
226 perform_runtime_call);
227}
228
229void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
230 RegisterSet all_registers;
231 all_registers.AddAllGeneralRegisters();
232 __ PushRegisters(all_registers.cpu_registers(),
233 all_registers.fpu_registers());
234
235 __ EnterFrame(0);
236 __ ReserveAlignedFrameSpace(0);
237 __ movq(RAX, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
238 __ CallCFunction(RAX);
239 __ LeaveFrame();
240
241 __ PopRegisters(all_registers.cpu_registers(), all_registers.fpu_registers());
242 __ ret();
243}
244
245void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
246 RegisterSet all_registers;
247 all_registers.AddAllGeneralRegisters();
248 __ PushRegisters(all_registers.cpu_registers(),
249 all_registers.fpu_registers());
250
251 __ EnterFrame(0);
252 __ ReserveAlignedFrameSpace(0);
253
254 // Set the execution state to VM while waiting for the safepoint to end.
255 // This isn't strictly necessary but enables tests to check that we're not
256 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
257 __ movq(Address(THR, target::Thread::execution_state_offset()),
258 Immediate(target::Thread::vm_execution_state()));
259
260 __ movq(RAX, Address(THR, kExitSafepointRuntimeEntry.OffsetFromThread()));
261 __ CallCFunction(RAX);
262 __ LeaveFrame();
263
264 __ PopRegisters(all_registers.cpu_registers(), all_registers.fpu_registers());
265 __ ret();
266}
267
268// Calls native code within a safepoint.
269//
270// On entry:
271// Stack: arguments set up and aligned for native call, excl. shadow space
272// RBX = target address to call
273//
274// On exit:
275// Stack pointer lowered by shadow space
276// RBX, R12 clobbered
277void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
278 Assembler* assembler) {
279 __ movq(R12, compiler::Immediate(target::Thread::exit_through_ffi()));
280 __ TransitionGeneratedToNative(RBX, FPREG, R12,
281 /*enter_safepoint=*/true);
282
283 __ popq(R12);
284 __ CallCFunction(RBX);
285
286 __ TransitionNativeToGenerated(/*leave_safepoint=*/true);
287
288 // Faster than jmp because it doesn't confuse the branch predictor.
289 __ pushq(R12);
290 __ ret();
291}
292
293#if !defined(DART_PRECOMPILER)
294void StubCodeCompiler::GenerateJITCallbackTrampolines(
295 Assembler* assembler,
296 intptr_t next_callback_id) {
297 Label done;
298
299 // RAX is volatile and not used for passing any arguments.
300 COMPILE_ASSERT(!IsCalleeSavedRegister(RAX) && !IsArgumentRegister(RAX));
301
302 for (intptr_t i = 0;
303 i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) {
304 __ movq(RAX, compiler::Immediate(next_callback_id + i));
305 __ jmp(&done);
306 }
307
308 ASSERT_EQUAL(__ CodeSize(),
309 kNativeCallbackTrampolineSize *
310 NativeCallbackTrampolines::NumCallbackTrampolinesPerPage());
311
312 __ Bind(&done);
313
314 const intptr_t shared_stub_start = __ CodeSize();
315
316 // Save THR which is callee-saved.
317 __ pushq(THR);
318
319 // 2 = THR & return address
320 COMPILE_ASSERT(2 == StubCodeCompiler::kNativeCallbackTrampolineStackDelta);
321
322 // Save the callback ID.
323 __ pushq(RAX);
324
325 // Save all registers which might hold arguments.
326 __ PushRegisters(CallingConventions::kArgumentRegisters,
327 CallingConventions::kFpuArgumentRegisters);
328
329 // Load the thread, verify the callback ID and exit the safepoint.
330 //
331 // We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline
332 // in order to save code size on this shared stub.
333 {
334 __ EnterFrame(0);
335 __ ReserveAlignedFrameSpace(0);
336
337 COMPILE_ASSERT(RAX != CallingConventions::kArg1Reg);
338 __ movq(CallingConventions::kArg1Reg, RAX);
339 __ movq(RAX, compiler::Immediate(reinterpret_cast<int64_t>(
340 DLRT_GetThreadForNativeCallbackTrampoline)));
341 __ CallCFunction(RAX);
342 __ movq(THR, RAX);
343
344 __ LeaveFrame();
345 }
346
347 // Restore the arguments.
348 __ PopRegisters(CallingConventions::kArgumentRegisters,
349 CallingConventions::kFpuArgumentRegisters);
350
351 // Restore the callback ID.
352 __ popq(RAX);
353
354 // Current state:
355 //
356 // Stack:
357 // <old stack (arguments)>
358 // <return address>
359 // <saved THR>
360 //
361 // Registers: Like entry, except RAX == callback_id and THR == thread
362 // All argument registers are untouched.
363
364 COMPILE_ASSERT(!IsCalleeSavedRegister(TMP) && !IsArgumentRegister(TMP));
365
366 // Load the target from the thread.
367 __ movq(TMP, compiler::Address(
368 THR, compiler::target::Thread::callback_code_offset()));
369 __ movq(TMP, compiler::FieldAddress(
370 TMP, compiler::target::GrowableObjectArray::data_offset()));
371 __ movq(TMP, __ ElementAddressForRegIndex(
372 /*external=*/false,
373 /*array_cid=*/kArrayCid,
374 /*index, smi-tagged=*/compiler::target::kWordSize * 2,
375 /*index_unboxed=*/false,
376 /*array=*/TMP,
377 /*index=*/RAX));
378 __ movq(TMP, compiler::FieldAddress(
379 TMP, compiler::target::Code::entry_point_offset()));
380
381 // On entry to the function, there will be two extra slots on the stack:
382 // the saved THR and the return address. The target will know to skip them.
383 __ call(TMP);
384
385 // EnterSafepoint takes care to not clobber *any* registers (besides TMP).
386 __ EnterSafepoint();
387
388 // Restore THR (callee-saved).
389 __ popq(THR);
390
391 __ ret();
392
393 // 'kNativeCallbackSharedStubSize' is an upper bound because the exact
394 // instruction size can vary slightly based on OS calling conventions.
395 ASSERT((__ CodeSize() - shared_stub_start) <= kNativeCallbackSharedStubSize);
396 ASSERT(__ CodeSize() <= VirtualMemory::PageSize());
397
398#if defined(DEBUG)
399 while (__ CodeSize() < VirtualMemory::PageSize()) {
400 __ Breakpoint();
401 }
402#endif
403}
404#endif // !defined(DART_PRECOMPILER)
405
406// RBX: The extracted method.
407// RDX: The type_arguments_field_offset (or 0)
408void StubCodeCompiler::GenerateBuildMethodExtractorStub(
409 Assembler* assembler,
410 const Object& closure_allocation_stub,
411 const Object& context_allocation_stub) {
412 const intptr_t kReceiverOffsetInWords =
413 target::frame_layout.param_end_from_fp + 1;
414
415 __ EnterStubFrame();
416
417 // Push type_arguments vector (or null)
418 Label no_type_args;
419 __ movq(RCX, Address(THR, target::Thread::object_null_offset()));
420 __ cmpq(RDX, Immediate(0));
421 __ j(EQUAL, &no_type_args, Assembler::kNearJump);
422 __ movq(RAX, Address(RBP, target::kWordSize * kReceiverOffsetInWords));
423 __ movq(RCX, Address(RAX, RDX, TIMES_1, 0));
424 __ Bind(&no_type_args);
425 __ pushq(RCX);
426
427 // Push extracted method.
428 __ pushq(RBX);
429
430 // Allocate context.
431 {
432 Label done, slow_path;
433 __ TryAllocateArray(kContextCid, target::Context::InstanceSize(1),
434 &slow_path, Assembler::kFarJump,
435 RAX, // instance
436 RSI, // end address
437 RDI);
438 __ movq(RSI, Address(THR, target::Thread::object_null_offset()));
439 __ movq(FieldAddress(RAX, target::Context::parent_offset()), RSI);
440 __ movq(FieldAddress(RAX, target::Context::num_variables_offset()),
441 Immediate(1));
442 __ jmp(&done);
443
444 __ Bind(&slow_path);
445
446 __ LoadImmediate(/*num_vars=*/R10, Immediate(1));
447 __ LoadObject(CODE_REG, context_allocation_stub);
448 __ call(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
449
450 __ Bind(&done);
451 }
452
453 // Store receiver in context
454 __ movq(RSI, Address(RBP, target::kWordSize * kReceiverOffsetInWords));
455 __ StoreIntoObject(
456 RAX, FieldAddress(RAX, target::Context::variable_offset(0)), RSI);
457
458 // Push context.
459 __ pushq(RAX);
460
461 // Allocate closure.
462 __ LoadObject(CODE_REG, closure_allocation_stub);
463 __ call(FieldAddress(
464 CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kUnchecked)));
465
466 // Populate closure object.
467 __ popq(RCX); // Pop context.
468 __ StoreIntoObject(RAX, FieldAddress(RAX, target::Closure::context_offset()),
469 RCX);
470 __ popq(RCX); // Pop extracted method.
471 __ StoreIntoObjectNoBarrier(
472 RAX, FieldAddress(RAX, target::Closure::function_offset()), RCX);
473 __ popq(RCX); // Pop type argument vector.
474 __ StoreIntoObjectNoBarrier(
475 RAX,
476 FieldAddress(RAX, target::Closure::instantiator_type_arguments_offset()),
477 RCX);
478 __ LoadObject(RCX, EmptyTypeArguments());
479 __ StoreIntoObjectNoBarrier(
480 RAX, FieldAddress(RAX, target::Closure::delayed_type_arguments_offset()),
481 RCX);
482
483 __ LeaveStubFrame();
484 __ Ret();
485}
486
487void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
488 Assembler* assembler) {
489 __ EnterStubFrame();
490 __ CallRuntime(kNullErrorRuntimeEntry, /*argument_count=*/0);
491 // The NullError runtime entry does not return.
492 __ Breakpoint();
493}
494
495void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
496 Assembler* assembler) {
497 GenerateSharedStub(
498 assembler, /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
499 target::Thread::null_error_shared_without_fpu_regs_stub_offset(),
500 /*allow_return=*/false);
501}
502
503void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
504 Assembler* assembler) {
505 GenerateSharedStub(
506 assembler, /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
507 target::Thread::null_error_shared_with_fpu_regs_stub_offset(),
508 /*allow_return=*/false);
509}
510
511void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub(
512 Assembler* assembler) {
513 GenerateSharedStub(
514 assembler, /*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
515 target::Thread::null_arg_error_shared_without_fpu_regs_stub_offset(),
516 /*allow_return=*/false);
517}
518
519void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub(
520 Assembler* assembler) {
521 GenerateSharedStub(
522 assembler, /*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
523 target::Thread::null_arg_error_shared_with_fpu_regs_stub_offset(),
524 /*allow_return=*/false);
525}
526
527void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub(
528 Assembler* assembler) {
529 GenerateSharedStub(
530 assembler, /*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
531 target::Thread::null_cast_error_shared_without_fpu_regs_stub_offset(),
532 /*allow_return=*/false);
533}
534
535void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub(
536 Assembler* assembler) {
537 GenerateSharedStub(
538 assembler, /*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
539 target::Thread::null_cast_error_shared_with_fpu_regs_stub_offset(),
540 /*allow_return=*/false);
541}
542
543static void GenerateRangeError(Assembler* assembler, bool with_fpu_regs) {
544 auto perform_runtime_call = [&]() {
545 // If the generated code has unboxed index/length we need to box them before
546 // calling the runtime entry.
547 if (GenericCheckBoundInstr::UseUnboxedRepresentation()) {
548 Label length, smi_case;
549
550 // The user-controlled index might not fit into a Smi.
551 __ addq(RangeErrorABI::kIndexReg, RangeErrorABI::kIndexReg);
552 __ BranchIf(NO_OVERFLOW, &length);
553 {
554 // Allocate a mint, reload the two registers and popualte the mint.
555 __ PushImmediate(Immediate(0));
556 __ CallRuntime(kAllocateMintRuntimeEntry, /*argument_count=*/0);
557 __ PopRegister(RangeErrorABI::kIndexReg);
558 __ movq(
559 TMP,
560 Address(RBP, target::kWordSize *
561 StubCodeCompiler::WordOffsetFromFpToCpuRegister(
562 RangeErrorABI::kIndexReg)));
563 __ movq(FieldAddress(RangeErrorABI::kIndexReg,
564 target::Mint::value_offset()),
565 TMP);
566 __ movq(
567 RangeErrorABI::kLengthReg,
568 Address(RBP, target::kWordSize *
569 StubCodeCompiler::WordOffsetFromFpToCpuRegister(
570 RangeErrorABI::kLengthReg)));
571 }
572
573 // Length is guaranteed to be in positive Smi range (it comes from a load
574 // of a vm recognized array).
575 __ Bind(&length);
576 __ SmiTag(RangeErrorABI::kLengthReg);
577 }
578 __ PushRegister(RangeErrorABI::kLengthReg);
579 __ PushRegister(RangeErrorABI::kIndexReg);
580 __ CallRuntime(kRangeErrorRuntimeEntry, /*argument_count=*/2);
581 __ Breakpoint();
582 };
583
584 GenerateSharedStubGeneric(
585 assembler, /*save_fpu_registers=*/with_fpu_regs,
586 with_fpu_regs
587 ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
588 : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
589 /*allow_return=*/false, perform_runtime_call);
590}
591
592void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub(
593 Assembler* assembler) {
594 GenerateRangeError(assembler, /*with_fpu_regs=*/false);
595}
596
597void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub(
598 Assembler* assembler) {
599 GenerateRangeError(assembler, /*with_fpu_regs=*/true);
600}
601
602void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
603 Assembler* assembler) {
604 GenerateSharedStub(
605 assembler, /*save_fpu_registers=*/false, &kStackOverflowRuntimeEntry,
606 target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
607 /*allow_return=*/true);
608}
609
610void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
611 Assembler* assembler) {
612 GenerateSharedStub(
613 assembler, /*save_fpu_registers=*/true, &kStackOverflowRuntimeEntry,
614 target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
615 /*allow_return=*/true);
616}
617
618// Input parameters:
619// RSP : points to return address.
620// RSP + 8 : address of return value.
621// RAX : address of first argument in argument array.
622// RBX : address of the native function to call.
623// R10 : argc_tag including number of arguments and function kind.
624static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
625 Address wrapper_address) {
626 const intptr_t native_args_struct_offset = 0;
627 const intptr_t thread_offset =
628 target::NativeArguments::thread_offset() + native_args_struct_offset;
629 const intptr_t argc_tag_offset =
630 target::NativeArguments::argc_tag_offset() + native_args_struct_offset;
631 const intptr_t argv_offset =
632 target::NativeArguments::argv_offset() + native_args_struct_offset;
633 const intptr_t retval_offset =
634 target::NativeArguments::retval_offset() + native_args_struct_offset;
635
636 __ EnterStubFrame();
637
638 // Save exit frame information to enable stack walking as we are about
639 // to transition to native code.
640 __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()), RBP);
641
642 // Mark that the thread exited generated code through a runtime call.
643 __ movq(Address(THR, target::Thread::exit_through_ffi_offset()),
644 Immediate(target::Thread::exit_through_runtime_call()));
645
646#if defined(DEBUG)
647 {
648 Label ok;
649 // Check that we are always entering from Dart code.
650 __ movq(R8, Immediate(VMTag::kDartCompiledTagId));
651 __ cmpq(R8, Assembler::VMTagAddress());
652 __ j(EQUAL, &ok, Assembler::kNearJump);
653 __ Stop("Not coming from Dart code.");
654 __ Bind(&ok);
655 }
656#endif
657
658 // Mark that the thread is executing native code.
659 __ movq(Assembler::VMTagAddress(), RBX);
660
661 // Reserve space for the native arguments structure passed on the stack (the
662 // outgoing pointer parameter to the native arguments structure is passed in
663 // RDI) and align frame before entering the C++ world.
664 __ subq(RSP, Immediate(target::NativeArguments::StructSize()));
665 if (OS::ActivationFrameAlignment() > 1) {
666 __ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
667 }
668
669 // Pass target::NativeArguments structure by value and call native function.
670 __ movq(Address(RSP, thread_offset), THR); // Set thread in NativeArgs.
671 __ movq(Address(RSP, argc_tag_offset),
672 R10); // Set argc in target::NativeArguments.
673 __ movq(Address(RSP, argv_offset),
674 RAX); // Set argv in target::NativeArguments.
675 __ leaq(RAX,
676 Address(RBP, 2 * target::kWordSize)); // Compute return value addr.
677 __ movq(Address(RSP, retval_offset),
678 RAX); // Set retval in target::NativeArguments.
679
680 // Pass the pointer to the target::NativeArguments.
681 __ movq(CallingConventions::kArg1Reg, RSP);
682 // Pass pointer to function entrypoint.
683 __ movq(CallingConventions::kArg2Reg, RBX);
684
685 __ movq(RAX, wrapper_address);
686 __ CallCFunction(RAX);
687
688 // Mark that the thread is executing Dart code.
689 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
690
691 // Mark that the thread has not exited generated Dart code.
692 __ movq(Address(THR, target::Thread::exit_through_ffi_offset()),
693 Immediate(0));
694
695 // Reset exit frame information in Isolate's mutator thread structure.
696 __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
697 Immediate(0));
698
699 // Restore the global object pool after returning from runtime (old space is
700 // moving, so the GOP could have been relocated).
701 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
702 __ movq(PP, Address(THR, target::Thread::global_object_pool_offset()));
703 }
704
705 __ LeaveStubFrame();
706 __ ret();
707}
708
709void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
710 GenerateCallNativeWithWrapperStub(
711 assembler,
712 Address(THR,
713 target::Thread::no_scope_native_wrapper_entry_point_offset()));
714}
715
716void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
717 GenerateCallNativeWithWrapperStub(
718 assembler,
719 Address(THR,
720 target::Thread::auto_scope_native_wrapper_entry_point_offset()));
721}
722
723// Input parameters:
724// RSP : points to return address.
725// RSP + 8 : address of return value.
726// RAX : address of first argument in argument array.
727// RBX : address of the native function to call.
728// R10 : argc_tag including number of arguments and function kind.
729void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
730 GenerateCallNativeWithWrapperStub(
731 assembler,
732 Address(THR,
733 target::Thread::bootstrap_native_wrapper_entry_point_offset()));
734}
735
736// Input parameters:
737// R10: arguments descriptor array.
738void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
739 __ EnterStubFrame();
740 __ pushq(R10); // Preserve arguments descriptor array.
741 // Setup space on stack for return value.
742 __ pushq(Immediate(0));
743 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
744 __ popq(CODE_REG); // Get Code object result.
745 __ popq(R10); // Restore arguments descriptor array.
746 // Remove the stub frame as we are about to jump to the dart function.
747 __ LeaveStubFrame();
748
749 __ movq(RBX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
750 __ jmp(RBX);
751}
752
753// Called from a static call only when an invalid code has been entered
754// (invalid because its function was optimized or deoptimized).
755// R10: arguments descriptor array.
756void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
757 Label monomorphic;
758 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
759
760 // This was a static call.
761 // Load code pointer to this stub from the thread:
762 // The one that is passed in, is not correct - it points to the code object
763 // that needs to be replaced.
764 __ movq(CODE_REG,
765 Address(THR, target::Thread::fix_callers_target_code_offset()));
766 __ EnterStubFrame();
767 __ pushq(R10); // Preserve arguments descriptor array.
768 // Setup space on stack for return value.
769 __ pushq(Immediate(0));
770 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
771 __ popq(CODE_REG); // Get Code object.
772 __ popq(R10); // Restore arguments descriptor array.
773 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
774 __ LeaveStubFrame();
775 __ jmp(RAX);
776 __ int3();
777
778 __ Bind(&monomorphic);
779 // This was a switchable call.
780 // Load code pointer to this stub from the thread:
781 // The one that is passed in, is not correct - it points to the code object
782 // that needs to be replaced.
783 __ movq(CODE_REG,
784 Address(THR, target::Thread::fix_callers_target_code_offset()));
785 __ EnterStubFrame();
786 __ pushq(RBX); // Preserve cache (guarded CID as Smi).
787 __ pushq(RDX); // Preserve receiver.
788 __ pushq(Immediate(0)); // Result slot.
789 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 0);
790 __ popq(CODE_REG); // Get Code object.
791 __ popq(RDX); // Restore receiver.
792 __ popq(RBX); // Restore cache (guarded CID as Smi).
793 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
794 CodeEntryKind::kMonomorphic)));
795 __ LeaveStubFrame();
796 __ jmp(RAX);
797 __ int3();
798}
799
800// Called from object allocate instruction when the allocation stub has been
801// disabled.
802void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
803 Assembler* assembler) {
804 // Load code pointer to this stub from the thread:
805 // The one that is passed in, is not correct - it points to the code object
806 // that needs to be replaced.
807 __ movq(CODE_REG,
808 Address(THR, target::Thread::fix_allocation_stub_code_offset()));
809 __ EnterStubFrame();
810 // Setup space on stack for return value.
811 __ pushq(Immediate(0));
812 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
813 __ popq(CODE_REG); // Get Code object.
814 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
815 __ LeaveStubFrame();
816 __ jmp(RAX);
817 __ int3();
818}
819
820// Input parameters:
821// R10: smi-tagged argument count, may be zero.
822// RBP[target::frame_layout.param_end_from_fp + 1]: last argument.
823static void PushArrayOfArguments(Assembler* assembler) {
824 __ LoadObject(R12, NullObject());
825 // Allocate array to store arguments of caller.
826 __ movq(RBX, R12); // Null element type for raw Array.
827 __ Call(StubCodeAllocateArray());
828 __ SmiUntag(R10);
829 // RAX: newly allocated array.
830 // R10: length of the array (was preserved by the stub).
831 __ pushq(RAX); // Array is in RAX and on top of stack.
832 __ leaq(R12,
833 Address(RBP, R10, TIMES_8,
834 target::frame_layout.param_end_from_fp * target::kWordSize));
835 __ leaq(RBX, FieldAddress(RAX, target::Array::data_offset()));
836 // R12: address of first argument on stack.
837 // RBX: address of first argument in array.
838 Label loop, loop_condition;
839#if defined(DEBUG)
840 static const bool kJumpLength = Assembler::kFarJump;
841#else
842 static const bool kJumpLength = Assembler::kNearJump;
843#endif // DEBUG
844 __ jmp(&loop_condition, kJumpLength);
845 __ Bind(&loop);
846 __ movq(RDI, Address(R12, 0));
847 // Generational barrier is needed, array is not necessarily in new space.
848 __ StoreIntoObject(RAX, Address(RBX, 0), RDI);
849 __ addq(RBX, Immediate(target::kWordSize));
850 __ subq(R12, Immediate(target::kWordSize));
851 __ Bind(&loop_condition);
852 __ decq(R10);
853 __ j(POSITIVE, &loop, Assembler::kNearJump);
854}
855
856// Used by eager and lazy deoptimization. Preserve result in RAX if necessary.
857// This stub translates optimized frame into unoptimized frame. The optimized
858// frame can contain values in registers and on stack, the unoptimized
859// frame contains all values on stack.
860// Deoptimization occurs in following steps:
861// - Push all registers that can contain values.
862// - Call C routine to copy the stack and saved registers into temporary buffer.
863// - Adjust caller's frame to correct unoptimized frame size.
864// - Fill the unoptimized frame.
865// - Materialize objects that require allocation (e.g. Double instances).
866// GC can occur only after frame is fully rewritten.
867// Stack after EnterDartFrame(0, PP, kNoRegister) below:
868// +------------------+
869// | Saved PP | <- PP
870// +------------------+
871// | PC marker | <- TOS
872// +------------------+
873// | Saved FP | <- FP of stub
874// +------------------+
875// | return-address | (deoptimization point)
876// +------------------+
877// | Saved CODE_REG |
878// +------------------+
879// | ... | <- SP of optimized frame
880//
881// Parts of the code cannot GC, part of the code can GC.
882static void GenerateDeoptimizationSequence(Assembler* assembler,
883 DeoptStubKind kind) {
884 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
885 // is no need to set the correct PC marker or load PP, since they get patched.
886 __ EnterStubFrame();
887
888 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
889 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
890 const intptr_t saved_result_slot_from_fp =
891 target::frame_layout.first_local_from_fp + 1 -
892 (kNumberOfCpuRegisters - RAX);
893 const intptr_t saved_exception_slot_from_fp =
894 target::frame_layout.first_local_from_fp + 1 -
895 (kNumberOfCpuRegisters - RAX);
896 const intptr_t saved_stacktrace_slot_from_fp =
897 target::frame_layout.first_local_from_fp + 1 -
898 (kNumberOfCpuRegisters - RDX);
899 // Result in RAX is preserved as part of pushing all registers below.
900
901 // Push registers in their enumeration order: lowest register number at
902 // lowest address.
903 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
904 if (i == CODE_REG) {
905 // Save the original value of CODE_REG pushed before invoking this stub
906 // instead of the value used to call this stub.
907 __ pushq(Address(RBP, 2 * target::kWordSize));
908 } else {
909 __ pushq(static_cast<Register>(i));
910 }
911 }
912 __ subq(RSP, Immediate(kNumberOfXmmRegisters * kFpuRegisterSize));
913 intptr_t offset = 0;
914 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
915 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
916 __ movups(Address(RSP, offset), xmm_reg);
917 offset += kFpuRegisterSize;
918 }
919
920 // Pass address of saved registers block.
921 __ movq(CallingConventions::kArg1Reg, RSP);
922 bool is_lazy =
923 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
924 __ movq(CallingConventions::kArg2Reg, Immediate(is_lazy ? 1 : 0));
925 __ ReserveAlignedFrameSpace(0); // Ensure stack is aligned before the call.
926 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
927 // Result (RAX) is stack-size (FP - SP) in bytes.
928
929 if (kind == kLazyDeoptFromReturn) {
930 // Restore result into RBX temporarily.
931 __ movq(RBX, Address(RBP, saved_result_slot_from_fp * target::kWordSize));
932 } else if (kind == kLazyDeoptFromThrow) {
933 // Restore result into RBX temporarily.
934 __ movq(RBX,
935 Address(RBP, saved_exception_slot_from_fp * target::kWordSize));
936 __ movq(RDX,
937 Address(RBP, saved_stacktrace_slot_from_fp * target::kWordSize));
938 }
939
940 // There is a Dart Frame on the stack. We must restore PP and leave frame.
941 __ RestoreCodePointer();
942 __ LeaveStubFrame();
943
944 __ popq(RCX); // Preserve return address.
945 __ movq(RSP, RBP); // Discard optimized frame.
946 __ subq(RSP, RAX); // Reserve space for deoptimized frame.
947 __ pushq(RCX); // Restore return address.
948
949 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
950 // is no need to set the correct PC marker or load PP, since they get patched.
951 __ EnterStubFrame();
952
953 if (kind == kLazyDeoptFromReturn) {
954 __ pushq(RBX); // Preserve result as first local.
955 } else if (kind == kLazyDeoptFromThrow) {
956 __ pushq(RBX); // Preserve exception as first local.
957 __ pushq(RDX); // Preserve stacktrace as second local.
958 }
959 __ ReserveAlignedFrameSpace(0);
960 // Pass last FP as a parameter.
961 __ movq(CallingConventions::kArg1Reg, RBP);
962 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
963 if (kind == kLazyDeoptFromReturn) {
964 // Restore result into RBX.
965 __ movq(RBX, Address(RBP, target::frame_layout.first_local_from_fp *
966 target::kWordSize));
967 } else if (kind == kLazyDeoptFromThrow) {
968 // Restore exception into RBX.
969 __ movq(RBX, Address(RBP, target::frame_layout.first_local_from_fp *
970 target::kWordSize));
971 // Restore stacktrace into RDX.
972 __ movq(RDX, Address(RBP, (target::frame_layout.first_local_from_fp - 1) *
973 target::kWordSize));
974 }
975 // Code above cannot cause GC.
976 // There is a Dart Frame on the stack. We must restore PP and leave frame.
977 __ RestoreCodePointer();
978 __ LeaveStubFrame();
979
980 // Frame is fully rewritten at this point and it is safe to perform a GC.
981 // Materialize any objects that were deferred by FillFrame because they
982 // require allocation.
983 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
984 __ EnterStubFrame();
985 if (kind == kLazyDeoptFromReturn) {
986 __ pushq(RBX); // Preserve result, it will be GC-d here.
987 } else if (kind == kLazyDeoptFromThrow) {
988 __ pushq(RBX); // Preserve exception.
989 __ pushq(RDX); // Preserve stacktrace.
990 }
991 __ pushq(Immediate(target::ToRawSmi(0))); // Space for the result.
992 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
993 // Result tells stub how many bytes to remove from the expression stack
994 // of the bottom-most frame. They were used as materialization arguments.
995 __ popq(RBX);
996 __ SmiUntag(RBX);
997 if (kind == kLazyDeoptFromReturn) {
998 __ popq(RAX); // Restore result.
999 } else if (kind == kLazyDeoptFromThrow) {
1000 __ popq(RDX); // Restore stacktrace.
1001 __ popq(RAX); // Restore exception.
1002 }
1003 __ LeaveStubFrame();
1004
1005 __ popq(RCX); // Pop return address.
1006 __ addq(RSP, RBX); // Remove materialization arguments.
1007 __ pushq(RCX); // Push return address.
1008 // The caller is responsible for emitting the return instruction.
1009}
1010
1011// RAX: result, must be preserved
1012void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
1013 Assembler* assembler) {
1014 // Push zap value instead of CODE_REG for lazy deopt.
1015 __ pushq(Immediate(kZapCodeReg));
1016 // Return address for "call" to deopt stub.
1017 __ pushq(Immediate(kZapReturnAddress));
1018 __ movq(CODE_REG,
1019 Address(THR, target::Thread::lazy_deopt_from_return_stub_offset()));
1020 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
1021 __ ret();
1022}
1023
1024// RAX: exception, must be preserved
1025// RDX: stacktrace, must be preserved
1026void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
1027 Assembler* assembler) {
1028 // Push zap value instead of CODE_REG for lazy deopt.
1029 __ pushq(Immediate(kZapCodeReg));
1030 // Return address for "call" to deopt stub.
1031 __ pushq(Immediate(kZapReturnAddress));
1032 __ movq(CODE_REG,
1033 Address(THR, target::Thread::lazy_deopt_from_throw_stub_offset()));
1034 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
1035 __ ret();
1036}
1037
1038void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
1039 __ popq(TMP);
1040 __ pushq(CODE_REG);
1041 __ pushq(TMP);
1042 __ movq(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
1043 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
1044 __ ret();
1045}
1046
1047// Input:
1048// RBX - icdata/megamorphic_cache
1049// RDI - arguments descriptor size
1050static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler,
1051 Register receiver_reg) {
1052 __ pushq(Immediate(0)); // Setup space on stack for result.
1053 __ pushq(receiver_reg); // Receiver.
1054 __ pushq(RBX); // ICData/MegamorphicCache.
1055 __ pushq(R10); // Arguments descriptor array.
1056
1057 // Adjust arguments count.
1058 __ cmpq(
1059 FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
1060 Immediate(0));
1061 __ movq(R10, RDI);
1062 Label args_count_ok;
1063 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1064 __ addq(R10, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1065 __ Bind(&args_count_ok);
1066
1067 // R10: Smi-tagged arguments array length.
1068 PushArrayOfArguments(assembler);
1069 const intptr_t kNumArgs = 4;
1070 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
1071 __ Drop(4);
1072 __ popq(RAX); // Return value.
1073 __ LeaveStubFrame();
1074 __ ret();
1075}
1076
1077// Input:
1078// RBX - icdata/megamorphic_cache
1079// R10 - argument descriptor
1080static void GenerateDispatcherCode(Assembler* assembler,
1081 Label* call_target_function) {
1082 __ Comment("NoSuchMethodDispatch");
1083 // When lazily generated invocation dispatchers are disabled, the
1084 // miss-handler may return null.
1085 __ CompareObject(RAX, NullObject());
1086 __ j(NOT_EQUAL, call_target_function);
1087
1088 __ EnterStubFrame();
1089 // Load the receiver.
1090 __ movq(RDI, FieldAddress(R10, target::ArgumentsDescriptor::size_offset()));
1091 __ movq(RAX,
1092 Address(RBP, RDI, TIMES_HALF_WORD_SIZE,
1093 target::frame_layout.param_end_from_fp * target::kWordSize));
1094
1095 GenerateNoSuchMethodDispatcherBody(assembler, /*receiver_reg=*/RAX);
1096}
1097
1098// Input:
1099// RBX - icdata/megamorphic_cache
1100// RDX - receiver
1101void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
1102 Assembler* assembler) {
1103 __ EnterStubFrame();
1104
1105 __ movq(R10, FieldAddress(
1106 RBX, target::CallSiteData::arguments_descriptor_offset()));
1107 __ movq(RDI, FieldAddress(R10, target::ArgumentsDescriptor::size_offset()));
1108
1109 GenerateNoSuchMethodDispatcherBody(assembler, /*receiver_reg=*/RDX);
1110}
1111
1112// Called for inline allocation of arrays.
1113// Input parameters:
1114// R10 : Array length as Smi.
1115// RBX : array element type (either NULL or an instantiated type).
1116// NOTE: R10 cannot be clobbered here as the caller relies on it being saved.
1117// The newly allocated object is returned in RAX.
1118void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
1119 if (!FLAG_use_slow_path) {
1120 Label slow_case;
1121 // Compute the size to be allocated, it is based on the array length
1122 // and is computed as:
1123 // RoundedAllocationSize(
1124 // (array_length * target::kwordSize) + target::Array::header_size()).
1125 __ movq(RDI, R10); // Array Length.
1126 // Check that length is a positive Smi.
1127 __ testq(RDI, Immediate(kSmiTagMask));
1128 __ j(NOT_ZERO, &slow_case);
1129
1130 __ cmpq(RDI, Immediate(0));
1131 __ j(LESS, &slow_case);
1132 // Check for maximum allowed length.
1133 const Immediate& max_len =
1134 Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
1135 __ cmpq(RDI, max_len);
1136 __ j(GREATER, &slow_case);
1137
1138 // Check for allocation tracing.
1139 NOT_IN_PRODUCT(
1140 __ MaybeTraceAllocation(kArrayCid, &slow_case, Assembler::kFarJump));
1141
1142 const intptr_t fixed_size_plus_alignment_padding =
1143 target::Array::header_size() +
1144 target::ObjectAlignment::kObjectAlignment - 1;
1145 // RDI is a Smi.
1146 __ leaq(RDI, Address(RDI, TIMES_4, fixed_size_plus_alignment_padding));
1147 ASSERT(kSmiTagShift == 1);
1148 __ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
1149
1150 const intptr_t cid = kArrayCid;
1151 __ movq(RAX, Address(THR, target::Thread::top_offset()));
1152
1153 // RDI: allocation size.
1154 __ movq(RCX, RAX);
1155 __ addq(RCX, RDI);
1156 __ j(CARRY, &slow_case);
1157
1158 // Check if the allocation fits into the remaining space.
1159 // RAX: potential new object start.
1160 // RCX: potential next object start.
1161 // RDI: allocation size.
1162 __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
1163 __ j(ABOVE_EQUAL, &slow_case);
1164
1165 // Successfully allocated the object(s), now update top to point to
1166 // next object start and initialize the object.
1167 __ movq(Address(THR, target::Thread::top_offset()), RCX);
1168 __ addq(RAX, Immediate(kHeapObjectTag));
1169
1170 // Initialize the tags.
1171 // RAX: new object start as a tagged pointer.
1172 // RDI: allocation size.
1173 {
1174 Label size_tag_overflow, done;
1175 __ cmpq(RDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
1176 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
1177 __ shlq(RDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
1178 target::ObjectAlignment::kObjectAlignmentLog2));
1179 __ jmp(&done, Assembler::kNearJump);
1180
1181 __ Bind(&size_tag_overflow);
1182 __ LoadImmediate(RDI, Immediate(0));
1183 __ Bind(&done);
1184
1185 // Get the class index and insert it into the tags.
1186 uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
1187 __ orq(RDI, Immediate(tags));
1188 __ movq(FieldAddress(RAX, target::Array::tags_offset()), RDI); // Tags.
1189 }
1190
1191 // RAX: new object start as a tagged pointer.
1192 // Store the type argument field.
1193 // No generational barrier needed, since we store into a new object.
1194 __ StoreIntoObjectNoBarrier(
1195 RAX, FieldAddress(RAX, target::Array::type_arguments_offset()), RBX);
1196
1197 // Set the length field.
1198 __ StoreIntoObjectNoBarrier(
1199 RAX, FieldAddress(RAX, target::Array::length_offset()), R10);
1200
1201 // Initialize all array elements to raw_null.
1202 // RAX: new object start as a tagged pointer.
1203 // RCX: new object end address.
1204 // RDI: iterator which initially points to the start of the variable
1205 // data area to be initialized.
1206 __ LoadObject(R12, NullObject());
1207 __ leaq(RDI, FieldAddress(RAX, target::Array::header_size()));
1208 Label done;
1209 Label init_loop;
1210 __ Bind(&init_loop);
1211 __ cmpq(RDI, RCX);
1212#if defined(DEBUG)
1213 static const bool kJumpLength = Assembler::kFarJump;
1214#else
1215 static const bool kJumpLength = Assembler::kNearJump;
1216#endif // DEBUG
1217 __ j(ABOVE_EQUAL, &done, kJumpLength);
1218 // No generational barrier needed, since we are storing null.
1219 __ StoreIntoObjectNoBarrier(RAX, Address(RDI, 0), R12);
1220 __ addq(RDI, Immediate(target::kWordSize));
1221 __ jmp(&init_loop, kJumpLength);
1222 __ Bind(&done);
1223 __ ret(); // returns the newly allocated object in RAX.
1224
1225 // Unable to allocate the array using the fast inline code, just call
1226 // into the runtime.
1227 __ Bind(&slow_case);
1228 }
1229 // Create a stub frame as we are pushing some objects on the stack before
1230 // calling into the runtime.
1231 __ EnterStubFrame();
1232 // Setup space on stack for return value.
1233 __ pushq(Immediate(0));
1234 __ pushq(R10); // Array length as Smi.
1235 __ pushq(RBX); // Element type.
1236 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1237 __ popq(RAX); // Pop element type argument.
1238 __ popq(R10); // Pop array length argument.
1239 __ popq(RAX); // Pop return value from return slot.
1240
1241 // Write-barrier elimination might be enabled for this array (depending on the
1242 // array length). To be sure we will check if the allocated object is in old
1243 // space and if so call a leaf runtime to add it to the remembered set.
1244 EnsureIsNewOrRemembered(assembler);
1245
1246 __ LeaveStubFrame();
1247 __ ret();
1248}
1249
1250void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
1251 Assembler* assembler) {
1252 // For test purpose call allocation stub without inline allocation attempt.
1253 if (!FLAG_use_slow_path) {
1254 Label slow_case;
1255 __ TryAllocate(compiler::MintClass(), &slow_case, /*near_jump=*/true,
1256 AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
1257 __ Ret();
1258
1259 __ Bind(&slow_case);
1260 }
1261 COMPILE_ASSERT(AllocateMintABI::kResultReg == RAX);
1262 GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
1263 &kAllocateMintRuntimeEntry,
1264 target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
1265 /*allow_return=*/true,
1266 /*store_runtime_result_in_rax=*/true);
1267}
1268
1269void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
1270 Assembler* assembler) {
1271 // For test purpose call allocation stub without inline allocation attempt.
1272 if (!FLAG_use_slow_path) {
1273 Label slow_case;
1274 __ TryAllocate(compiler::MintClass(), &slow_case, /*near_jump=*/true,
1275 AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
1276 __ Ret();
1277
1278 __ Bind(&slow_case);
1279 }
1280 COMPILE_ASSERT(AllocateMintABI::kResultReg == RAX);
1281 GenerateSharedStub(
1282 assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
1283 target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
1284 /*allow_return=*/true,
1285 /*store_runtime_result_in_rax=*/true);
1286}
1287
1288// Called when invoking Dart code from C++ (VM code).
1289// Input parameters:
1290// RSP : points to return address.
1291// RDI : target code
1292// RSI : arguments descriptor array.
1293// RDX : arguments array.
1294// RCX : current thread.
1295void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
1296 __ pushq(Address(RSP, 0)); // Marker for the profiler.
1297 __ EnterFrame(0);
1298
1299 const Register kTargetCodeReg = CallingConventions::kArg1Reg;
1300 const Register kArgDescReg = CallingConventions::kArg2Reg;
1301 const Register kArgsReg = CallingConventions::kArg3Reg;
1302 const Register kThreadReg = CallingConventions::kArg4Reg;
1303
1304 // Push code object to PC marker slot.
1305 __ pushq(Address(kThreadReg, target::Thread::invoke_dart_code_stub_offset()));
1306
1307 // At this point, the stack looks like:
1308 // | stub code object
1309 // | saved RBP | <-- RBP
1310 // | saved PC (return to DartEntry::InvokeFunction) |
1311
1312 const intptr_t kInitialOffset = 2;
1313 // Save arguments descriptor array, later replaced by Smi argument count.
1314 const intptr_t kArgumentsDescOffset = -(kInitialOffset)*target::kWordSize;
1315 __ pushq(kArgDescReg);
1316
1317 // Save C++ ABI callee-saved registers.
1318 __ PushRegisters(CallingConventions::kCalleeSaveCpuRegisters,
1319 CallingConventions::kCalleeSaveXmmRegisters);
1320
1321 // If any additional (or fewer) values are pushed, the offsets in
1322 // target::frame_layout.exit_link_slot_from_entry_fp will need to be changed.
1323
1324 // Set up THR, which caches the current thread in Dart code.
1325 if (THR != kThreadReg) {
1326 __ movq(THR, kThreadReg);
1327 }
1328
1329#if defined(USING_SHADOW_CALL_STACK)
1330#error Unimplemented
1331#endif
1332
1333 // Save the current VMTag on the stack.
1334 __ movq(RAX, Assembler::VMTagAddress());
1335 __ pushq(RAX);
1336
1337 // Save top resource and top exit frame info. Use RAX as a temporary register.
1338 // StackFrameIterator reads the top exit frame info saved in this frame.
1339 __ movq(RAX, Address(THR, target::Thread::top_resource_offset()));
1340 __ pushq(RAX);
1341 __ movq(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
1342
1343 __ movq(RAX, Address(THR, target::Thread::exit_through_ffi_offset()));
1344 __ pushq(RAX);
1345 __ movq(Address(THR, target::Thread::exit_through_ffi_offset()),
1346 Immediate(0));
1347
1348 __ movq(RAX, Address(THR, target::Thread::top_exit_frame_info_offset()));
1349 __ pushq(RAX);
1350
1351 // The constant target::frame_layout.exit_link_slot_from_entry_fp must be kept
1352 // in sync with the code above.
1353 __ EmitEntryFrameVerification();
1354
1355 __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
1356 Immediate(0));
1357
1358 // Mark that the thread is executing Dart code. Do this after initializing the
1359 // exit link for the profiler.
1360 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
1361
1362 // Load arguments descriptor array into R10, which is passed to Dart code.
1363 __ movq(R10, Address(kArgDescReg, VMHandles::kOffsetOfRawPtrInHandle));
1364
1365 // Push arguments. At this point we only need to preserve kTargetCodeReg.
1366 ASSERT(kTargetCodeReg != RDX);
1367
1368 // Load number of arguments into RBX and adjust count for type arguments.
1369 __ movq(RBX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
1370 __ cmpq(
1371 FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
1372 Immediate(0));
1373 Label args_count_ok;
1374 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1375 __ addq(RBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1376 __ Bind(&args_count_ok);
1377 // Save number of arguments as Smi on stack, replacing saved ArgumentsDesc.
1378 __ movq(Address(RBP, kArgumentsDescOffset), RBX);
1379 __ SmiUntag(RBX);
1380
1381 // Compute address of 'arguments array' data area into RDX.
1382 __ movq(RDX, Address(kArgsReg, VMHandles::kOffsetOfRawPtrInHandle));
1383 __ leaq(RDX, FieldAddress(RDX, target::Array::data_offset()));
1384
1385 // Set up arguments for the Dart call.
1386 Label push_arguments;
1387 Label done_push_arguments;
1388 __ j(ZERO, &done_push_arguments, Assembler::kNearJump);
1389 __ LoadImmediate(RAX, Immediate(0));
1390 __ Bind(&push_arguments);
1391 __ pushq(Address(RDX, RAX, TIMES_8, 0));
1392 __ incq(RAX);
1393 __ cmpq(RAX, RBX);
1394 __ j(LESS, &push_arguments, Assembler::kNearJump);
1395 __ Bind(&done_push_arguments);
1396
1397 // Call the Dart code entrypoint.
1398 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
1399 __ movq(PP, Address(THR, target::Thread::global_object_pool_offset()));
1400 } else {
1401 __ xorq(PP, PP); // GC-safe value into PP.
1402 }
1403 __ movq(CODE_REG,
1404 Address(kTargetCodeReg, VMHandles::kOffsetOfRawPtrInHandle));
1405 __ movq(kTargetCodeReg,
1406 FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1407 __ call(kTargetCodeReg); // R10 is the arguments descriptor array.
1408
1409 // Read the saved number of passed arguments as Smi.
1410 __ movq(RDX, Address(RBP, kArgumentsDescOffset));
1411
1412 // Get rid of arguments pushed on the stack.
1413 __ leaq(RSP, Address(RSP, RDX, TIMES_4, 0)); // RDX is a Smi.
1414
1415 // Restore the saved top exit frame info and top resource back into the
1416 // Isolate structure.
1417 __ popq(Address(THR, target::Thread::top_exit_frame_info_offset()));
1418 __ popq(Address(THR, target::Thread::exit_through_ffi_offset()));
1419 __ popq(Address(THR, target::Thread::top_resource_offset()));
1420
1421 // Restore the current VMTag from the stack.
1422 __ popq(Assembler::VMTagAddress());
1423
1424#if defined(USING_SHADOW_CALL_STACK)
1425#error Unimplemented
1426#endif
1427
1428 // Restore C++ ABI callee-saved registers.
1429 __ PopRegisters(CallingConventions::kCalleeSaveCpuRegisters,
1430 CallingConventions::kCalleeSaveXmmRegisters);
1431 __ set_constant_pool_allowed(false);
1432
1433 // Restore the frame pointer.
1434 __ LeaveFrame();
1435 __ popq(RCX);
1436
1437 __ ret();
1438}
1439
1440// Called when invoking compiled Dart code from interpreted Dart code.
1441// Input parameters:
1442// RSP : points to return address.
1443// RDI : target raw code
1444// RSI : arguments raw descriptor array.
1445// RDX : address of first argument.
1446// RCX : current thread.
1447void StubCodeCompiler::GenerateInvokeDartCodeFromBytecodeStub(
1448 Assembler* assembler) {
1449 if (FLAG_precompiled_mode) {
1450 __ Stop("Not using interpreter");
1451 return;
1452 }
1453
1454 __ pushq(Address(RSP, 0)); // Marker for the profiler.
1455 __ EnterFrame(0);
1456
1457 const Register kTargetCodeReg = CallingConventions::kArg1Reg;
1458 const Register kArgDescReg = CallingConventions::kArg2Reg;
1459 const Register kArg0Reg = CallingConventions::kArg3Reg;
1460 const Register kThreadReg = CallingConventions::kArg4Reg;
1461
1462 // Push code object to PC marker slot.
1463 __ pushq(
1464 Address(kThreadReg,
1465 target::Thread::invoke_dart_code_from_bytecode_stub_offset()));
1466
1467 // At this point, the stack looks like:
1468 // | stub code object
1469 // | saved RBP | <-- RBP
1470 // | saved PC (return to interpreter's InvokeCompiled) |
1471
1472 const intptr_t kInitialOffset = 2;
1473 // Save arguments descriptor array, later replaced by Smi argument count.
1474 const intptr_t kArgumentsDescOffset = -(kInitialOffset)*target::kWordSize;
1475 __ pushq(kArgDescReg);
1476
1477 // Save C++ ABI callee-saved registers.
1478 __ PushRegisters(CallingConventions::kCalleeSaveCpuRegisters,
1479 CallingConventions::kCalleeSaveXmmRegisters);
1480
1481 // If any additional (or fewer) values are pushed, the offsets in
1482 // target::frame_layout.exit_link_slot_from_entry_fp will need to be changed.
1483
1484 // Set up THR, which caches the current thread in Dart code.
1485 if (THR != kThreadReg) {
1486 __ movq(THR, kThreadReg);
1487 }
1488
1489#if defined(USING_SHADOW_CALL_STACK)
1490#error Unimplemented
1491#endif
1492
1493 // Save the current VMTag on the stack.
1494 __ movq(RAX, Assembler::VMTagAddress());
1495 __ pushq(RAX);
1496
1497 // Save top resource and top exit frame info. Use RAX as a temporary register.
1498 // StackFrameIterator reads the top exit frame info saved in this frame.
1499 __ movq(RAX, Address(THR, target::Thread::top_resource_offset()));
1500 __ pushq(RAX);
1501 __ movq(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
1502
1503 __ movq(RAX, Address(THR, target::Thread::exit_through_ffi_offset()));
1504 __ pushq(RAX);
1505 __ movq(Address(THR, target::Thread::exit_through_ffi_offset()),
1506 Immediate(0));
1507
1508 __ movq(RAX, Address(THR, target::Thread::top_exit_frame_info_offset()));
1509 __ pushq(RAX);
1510 __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
1511 Immediate(0));
1512
1513// The constant target::frame_layout.exit_link_slot_from_entry_fp must be kept
1514// in sync with the code below.
1515#if defined(DEBUG)
1516 {
1517 Label ok;
1518 __ leaq(RAX,
1519 Address(RBP, target::frame_layout.exit_link_slot_from_entry_fp *
1520 target::kWordSize));
1521 __ cmpq(RAX, RSP);
1522 __ j(EQUAL, &ok);
1523 __ Stop("target::frame_layout.exit_link_slot_from_entry_fp mismatch");
1524 __ Bind(&ok);
1525 }
1526#endif
1527
1528 // Mark that the thread is executing Dart code. Do this after initializing the
1529 // exit link for the profiler.
1530 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
1531
1532 // Load arguments descriptor array into R10, which is passed to Dart code.
1533 __ movq(R10, kArgDescReg);
1534
1535 // Push arguments. At this point we only need to preserve kTargetCodeReg.
1536 ASSERT(kTargetCodeReg != RDX);
1537
1538 // Load number of arguments into RBX and adjust count for type arguments.
1539 __ movq(RBX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
1540 __ cmpq(
1541 FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
1542 Immediate(0));
1543 Label args_count_ok;
1544 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1545 __ addq(RBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1546 __ Bind(&args_count_ok);
1547 // Save number of arguments as Smi on stack, replacing saved ArgumentsDesc.
1548 __ movq(Address(RBP, kArgumentsDescOffset), RBX);
1549 __ SmiUntag(RBX);
1550
1551 // Compute address of first argument into RDX.
1552 if (kArg0Reg != RDX) { // Different registers on WIN64.
1553 __ movq(RDX, kArg0Reg);
1554 }
1555
1556 // Set up arguments for the Dart call.
1557 Label push_arguments;
1558 Label done_push_arguments;
1559 __ j(ZERO, &done_push_arguments, Assembler::kNearJump);
1560 __ LoadImmediate(RAX, Immediate(0));
1561 __ Bind(&push_arguments);
1562 __ pushq(Address(RDX, RAX, TIMES_8, 0));
1563 __ incq(RAX);
1564 __ cmpq(RAX, RBX);
1565 __ j(LESS, &push_arguments, Assembler::kNearJump);
1566 __ Bind(&done_push_arguments);
1567
1568 // Call the Dart code entrypoint.
1569 __ xorq(PP, PP); // GC-safe value into PP.
1570 __ movq(CODE_REG, kTargetCodeReg);
1571 __ movq(kTargetCodeReg,
1572 FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1573 __ call(kTargetCodeReg); // R10 is the arguments descriptor array.
1574
1575 // Read the saved number of passed arguments as Smi.
1576 __ movq(RDX, Address(RBP, kArgumentsDescOffset));
1577
1578 // Get rid of arguments pushed on the stack.
1579 __ leaq(RSP, Address(RSP, RDX, TIMES_4, 0)); // RDX is a Smi.
1580
1581 // Restore the saved top exit frame info and top resource back into the
1582 // Isolate structure.
1583 __ popq(Address(THR, target::Thread::top_exit_frame_info_offset()));
1584 __ popq(Address(THR, target::Thread::exit_through_ffi_offset()));
1585 __ popq(Address(THR, target::Thread::top_resource_offset()));
1586
1587 // Restore the current VMTag from the stack.
1588 __ popq(Assembler::VMTagAddress());
1589
1590#if defined(USING_SHADOW_CALL_STACK)
1591#error Unimplemented
1592#endif
1593
1594 // Restore C++ ABI callee-saved registers.
1595 __ PopRegisters(CallingConventions::kCalleeSaveCpuRegisters,
1596 CallingConventions::kCalleeSaveXmmRegisters);
1597 __ set_constant_pool_allowed(false);
1598
1599 // Restore the frame pointer.
1600 __ LeaveFrame();
1601 __ popq(RCX);
1602
1603 __ ret();
1604}
1605
1606// Helper to generate space allocation of context stub.
1607// This does not initialise the fields of the context.
1608// Input:
1609// R10: number of context variables.
1610// Output:
1611// RAX: new, uinitialised allocated RawContext object.
1612// Clobbered:
1613// R13
1614static void GenerateAllocateContextSpaceStub(Assembler* assembler,
1615 Label* slow_case) {
1616 // First compute the rounded instance size.
1617 // R10: number of context variables.
1618 intptr_t fixed_size_plus_alignment_padding =
1619 (target::Context::header_size() +
1620 target::ObjectAlignment::kObjectAlignment - 1);
1621 __ leaq(R13, Address(R10, TIMES_8, fixed_size_plus_alignment_padding));
1622 __ andq(R13, Immediate(-target::ObjectAlignment::kObjectAlignment));
1623
1624 // Check for allocation tracing.
1625 NOT_IN_PRODUCT(
1626 __ MaybeTraceAllocation(kContextCid, slow_case, Assembler::kFarJump));
1627
1628 // Now allocate the object.
1629 // R10: number of context variables.
1630 __ movq(RAX, Address(THR, target::Thread::top_offset()));
1631 __ addq(R13, RAX);
1632 // Check if the allocation fits into the remaining space.
1633 // RAX: potential new object.
1634 // R13: potential next object start.
1635 // R10: number of context variables.
1636 __ cmpq(R13, Address(THR, target::Thread::end_offset()));
1637 __ j(ABOVE_EQUAL, slow_case);
1638
1639 // Successfully allocated the object, now update top to point to
1640 // next object start and initialize the object.
1641 // RAX: new object.
1642 // R13: next object start.
1643 // R10: number of context variables.
1644 __ movq(Address(THR, target::Thread::top_offset()), R13);
1645 // R13: Size of allocation in bytes.
1646 __ subq(R13, RAX);
1647 __ addq(RAX, Immediate(kHeapObjectTag));
1648 // Generate isolate-independent code to allow sharing between isolates.
1649
1650 // Calculate the size tag.
1651 // RAX: new object.
1652 // R10: number of context variables.
1653 {
1654 Label size_tag_overflow, done;
1655 __ leaq(R13, Address(R10, TIMES_8, fixed_size_plus_alignment_padding));
1656 __ andq(R13, Immediate(-target::ObjectAlignment::kObjectAlignment));
1657 __ cmpq(R13, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
1658 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
1659 __ shlq(R13, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
1660 target::ObjectAlignment::kObjectAlignmentLog2));
1661 __ jmp(&done);
1662
1663 __ Bind(&size_tag_overflow);
1664 // Set overflow size tag value.
1665 __ LoadImmediate(R13, Immediate(0));
1666
1667 __ Bind(&done);
1668 // RAX: new object.
1669 // R10: number of context variables.
1670 // R13: size and bit tags.
1671 uint32_t tags = target::MakeTagWordForNewSpaceObject(kContextCid, 0);
1672 __ orq(R13, Immediate(tags));
1673 __ movq(FieldAddress(RAX, target::Object::tags_offset()), R13); // Tags.
1674 }
1675
1676 // Setup up number of context variables field.
1677 // RAX: new object.
1678 // R10: number of context variables as integer value (not object).
1679 __ movq(FieldAddress(RAX, target::Context::num_variables_offset()), R10);
1680}
1681
1682// Called for inline allocation of contexts.
1683// Input:
1684// R10: number of context variables.
1685// Output:
1686// RAX: new allocated RawContext object.
1687// Clobbered:
1688// R9, R13
1689void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
1690 __ LoadObject(R9, NullObject());
1691 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1692 Label slow_case;
1693
1694 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1695
1696 // Setup the parent field.
1697 // RAX: new object.
1698 // R9: Parent object, initialised to null.
1699 // No generational barrier needed, since we are storing null.
1700 __ StoreIntoObjectNoBarrier(
1701 RAX, FieldAddress(RAX, target::Context::parent_offset()), R9);
1702
1703 // Initialize the context variables.
1704 // RAX: new object.
1705 // R10: number of context variables.
1706 {
1707 Label loop, entry;
1708 __ leaq(R13, FieldAddress(RAX, target::Context::variable_offset(0)));
1709#if defined(DEBUG)
1710 static const bool kJumpLength = Assembler::kFarJump;
1711#else
1712 static const bool kJumpLength = Assembler::kNearJump;
1713#endif // DEBUG
1714 __ jmp(&entry, kJumpLength);
1715 __ Bind(&loop);
1716 __ decq(R10);
1717 // No generational barrier needed, since we are storing null.
1718 __ StoreIntoObjectNoBarrier(RAX, Address(R13, R10, TIMES_8, 0), R9);
1719 __ Bind(&entry);
1720 __ cmpq(R10, Immediate(0));
1721 __ j(NOT_EQUAL, &loop, Assembler::kNearJump);
1722 }
1723
1724 // Done allocating and initializing the context.
1725 // RAX: new object.
1726 __ ret();
1727
1728 __ Bind(&slow_case);
1729 }
1730 // Create a stub frame.
1731 __ EnterStubFrame();
1732 __ pushq(R9); // Setup space on stack for the return value.
1733 __ SmiTag(R10);
1734 __ pushq(R10); // Push number of context variables.
1735 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1736 __ popq(RAX); // Pop number of context variables argument.
1737 __ popq(RAX); // Pop the new context object.
1738 // Write-barrier elimination might be enabled for this context (depending on
1739 // the size). To be sure we will check if the allocated object is in old
1740 // space and if so call a leaf runtime to add it to the remembered set.
1741 EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
1742
1743 // RAX: new object
1744 // Restore the frame pointer.
1745 __ LeaveStubFrame();
1746
1747 __ ret();
1748}
1749
1750// Called for inline clone of contexts.
1751// Input:
1752// R9: context to clone.
1753// Output:
1754// RAX: new allocated RawContext object.
1755// Clobbered:
1756// R10, R13
1757void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
1758 {
1759 Label slow_case;
1760
1761 // Load num. variable (int32_t) in the existing context.
1762 __ movsxd(R10, FieldAddress(R9, target::Context::num_variables_offset()));
1763
1764 // Allocate new context of same size.
1765 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1766
1767 // Load parent in the existing context.
1768 __ movq(R13, FieldAddress(R9, target::Context::parent_offset()));
1769 // Setup the parent field.
1770 // RAX: new object.
1771 // R9: Old parent object.
1772 __ StoreIntoObjectNoBarrier(
1773 RAX, FieldAddress(RAX, target::Context::parent_offset()), R13);
1774
1775 // Clone the context variables.
1776 // RAX: new context clone.
1777 // R10: number of context variables.
1778 {
1779 Label loop, entry;
1780 __ jmp(&entry, Assembler::kNearJump);
1781 __ Bind(&loop);
1782 __ decq(R10);
1783 __ movq(R13, FieldAddress(R9, R10, TIMES_8,
1784 target::Context::variable_offset(0)));
1785 __ StoreIntoObjectNoBarrier(
1786 RAX,
1787 FieldAddress(RAX, R10, TIMES_8, target::Context::variable_offset(0)),
1788 R13);
1789 __ Bind(&entry);
1790 __ cmpq(R10, Immediate(0));
1791 __ j(NOT_EQUAL, &loop, Assembler::kNearJump);
1792 }
1793
1794 // Done allocating and initializing the context.
1795 // RAX: new object.
1796 __ ret();
1797
1798 __ Bind(&slow_case);
1799 }
1800
1801 // Create a stub frame.
1802 __ EnterStubFrame();
1803
1804 __ PushObject(NullObject()); // Make space on stack for the return value.
1805 __ pushq(R9); // Push context.
1806 __ CallRuntime(kCloneContextRuntimeEntry, 1); // Clone context.
1807 __ popq(RAX); // Pop context argument.
1808 __ popq(RAX); // Pop the new context object.
1809
1810 // Write-barrier elimination might be enabled for this context (depending on
1811 // the size). To be sure we will check if the allocated object is in old
1812 // space and if so call a leaf runtime to add it to the remembered set.
1813 EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
1814
1815 // RAX: new object
1816 // Restore the frame pointer.
1817 __ LeaveStubFrame();
1818
1819 __ ret();
1820}
1821
1822void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
1823 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1824 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
1825
1826 Register reg = static_cast<Register>(i);
1827 intptr_t start = __ CodeSize();
1828 __ pushq(kWriteBarrierObjectReg);
1829 __ movq(kWriteBarrierObjectReg, reg);
1830 __ call(Address(THR, target::Thread::write_barrier_entry_point_offset()));
1831 __ popq(kWriteBarrierObjectReg);
1832 __ ret();
1833 intptr_t end = __ CodeSize();
1834
1835 RELEASE_ASSERT(end - start == kStoreBufferWrapperSize);
1836 }
1837}
1838
1839// Helper stub to implement Assembler::StoreIntoObject/Array.
1840// Input parameters:
1841// RDX: Object (old)
1842// RAX: Value (old or new)
1843// R13: Slot
1844// If RAX is new, add RDX to the store buffer. Otherwise RAX is old, mark RAX
1845// and add it to the mark list.
1846COMPILE_ASSERT(kWriteBarrierObjectReg == RDX);
1847COMPILE_ASSERT(kWriteBarrierValueReg == RAX);
1848COMPILE_ASSERT(kWriteBarrierSlotReg == R13);
1849static void GenerateWriteBarrierStubHelper(Assembler* assembler,
1850 Address stub_code,
1851 bool cards) {
1852 Label add_to_mark_stack, remember_card;
1853 __ testq(RAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
1854 __ j(ZERO, &add_to_mark_stack);
1855
1856 if (cards) {
1857 __ movl(TMP, FieldAddress(RDX, target::Object::tags_offset()));
1858 __ testl(TMP, Immediate(1 << target::ObjectLayout::kCardRememberedBit));
1859 __ j(NOT_ZERO, &remember_card, Assembler::kFarJump);
1860 } else {
1861#if defined(DEBUG)
1862 Label ok;
1863 __ movl(TMP, FieldAddress(RDX, target::Object::tags_offset()));
1864 __ testl(TMP, Immediate(1 << target::ObjectLayout::kCardRememberedBit));
1865 __ j(ZERO, &ok, Assembler::kFarJump);
1866 __ Stop("Wrong barrier");
1867 __ Bind(&ok);
1868#endif
1869 }
1870
1871 // Update the tags that this object has been remembered.
1872 // Note that we use 32 bit operations here to match the size of the
1873 // background sweeper which is also manipulating this 32 bit word.
1874 // RDX: Address being stored
1875 // RAX: Current tag value
1876 // lock+andl is an atomic read-modify-write.
1877 __ lock();
1878 __ andl(FieldAddress(RDX, target::Object::tags_offset()),
1879 Immediate(~(1 << target::ObjectLayout::kOldAndNotRememberedBit)));
1880
1881 // Save registers being destroyed.
1882 __ pushq(RAX);
1883 __ pushq(RCX);
1884
1885 // Load the StoreBuffer block out of the thread. Then load top_ out of the
1886 // StoreBufferBlock and add the address to the pointers_.
1887 // RDX: Address being stored
1888 __ movq(RAX, Address(THR, target::Thread::store_buffer_block_offset()));
1889 __ movl(RCX, Address(RAX, target::StoreBufferBlock::top_offset()));
1890 __ movq(
1891 Address(RAX, RCX, TIMES_8, target::StoreBufferBlock::pointers_offset()),
1892 RDX);
1893
1894 // Increment top_ and check for overflow.
1895 // RCX: top_
1896 // RAX: StoreBufferBlock
1897 Label overflow;
1898 __ incq(RCX);
1899 __ movl(Address(RAX, target::StoreBufferBlock::top_offset()), RCX);
1900 __ cmpl(RCX, Immediate(target::StoreBufferBlock::kSize));
1901 // Restore values.
1902 __ popq(RCX);
1903 __ popq(RAX);
1904 __ j(EQUAL, &overflow, Assembler::kNearJump);
1905 __ ret();
1906
1907 // Handle overflow: Call the runtime leaf function.
1908 __ Bind(&overflow);
1909 // Setup frame, push callee-saved registers.
1910 __ pushq(CODE_REG);
1911 __ movq(CODE_REG, stub_code);
1912 __ EnterCallRuntimeFrame(0);
1913 __ movq(CallingConventions::kArg1Reg, THR);
1914 __ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
1915 __ LeaveCallRuntimeFrame();
1916 __ popq(CODE_REG);
1917 __ ret();
1918
1919 __ Bind(&add_to_mark_stack);
1920 __ pushq(RAX); // Spill.
1921 __ pushq(RCX); // Spill.
1922 __ movq(TMP, RAX); // RAX is fixed implicit operand of CAS.
1923
1924 // Atomically clear kOldAndNotMarkedBit.
1925 // Note that we use 32 bit operations here to match the size of the
1926 // background marker which is also manipulating this 32 bit word.
1927 Label retry, lost_race, marking_overflow;
1928 __ movl(RAX, FieldAddress(TMP, target::Object::tags_offset()));
1929 __ Bind(&retry);
1930 __ movl(RCX, RAX);
1931 __ testl(RCX, Immediate(1 << target::ObjectLayout::kOldAndNotMarkedBit));
1932 __ j(ZERO, &lost_race); // Marked by another thread.
1933 __ andl(RCX, Immediate(~(1 << target::ObjectLayout::kOldAndNotMarkedBit)));
1934 __ LockCmpxchgl(FieldAddress(TMP, target::Object::tags_offset()), RCX);
1935 __ j(NOT_EQUAL, &retry, Assembler::kNearJump);
1936
1937 __ movq(RAX, Address(THR, target::Thread::marking_stack_block_offset()));
1938 __ movl(RCX, Address(RAX, target::MarkingStackBlock::top_offset()));
1939 __ movq(
1940 Address(RAX, RCX, TIMES_8, target::MarkingStackBlock::pointers_offset()),
1941 TMP);
1942 __ incq(RCX);
1943 __ movl(Address(RAX, target::MarkingStackBlock::top_offset()), RCX);
1944 __ cmpl(RCX, Immediate(target::MarkingStackBlock::kSize));
1945 __ popq(RCX); // Unspill.
1946 __ popq(RAX); // Unspill.
1947 __ j(EQUAL, &marking_overflow, Assembler::kNearJump);
1948 __ ret();
1949
1950 __ Bind(&marking_overflow);
1951 __ pushq(CODE_REG);
1952 __ movq(CODE_REG, stub_code);
1953 __ EnterCallRuntimeFrame(0);
1954 __ movq(CallingConventions::kArg1Reg, THR);
1955 __ CallRuntime(kMarkingStackBlockProcessRuntimeEntry, 1);
1956 __ LeaveCallRuntimeFrame();
1957 __ popq(CODE_REG);
1958 __ ret();
1959
1960 __ Bind(&lost_race);
1961 __ popq(RCX); // Unspill.
1962 __ popq(RAX); // Unspill.
1963 __ ret();
1964
1965 if (cards) {
1966 Label remember_card_slow;
1967
1968 // Get card table.
1969 __ Bind(&remember_card);
1970 __ movq(TMP, RDX); // Object.
1971 __ andq(TMP, Immediate(target::kOldPageMask)); // OldPage.
1972 __ cmpq(Address(TMP, target::OldPage::card_table_offset()), Immediate(0));
1973 __ j(EQUAL, &remember_card_slow, Assembler::kNearJump);
1974
1975 // Dirty the card.
1976 __ subq(R13, TMP); // Offset in page.
1977 __ movq(TMP,
1978 Address(TMP, target::OldPage::card_table_offset())); // Card table.
1979 __ shrq(
1980 R13,
1981 Immediate(target::OldPage::kBytesPerCardLog2)); // Index in card table.
1982 __ movb(Address(TMP, R13, TIMES_1, 0), Immediate(1));
1983 __ ret();
1984
1985 // Card table not yet allocated.
1986 __ Bind(&remember_card_slow);
1987 __ pushq(CODE_REG);
1988 __ movq(CODE_REG, stub_code);
1989 __ EnterCallRuntimeFrame(0);
1990 __ movq(CallingConventions::kArg1Reg, RDX);
1991 __ movq(CallingConventions::kArg2Reg, R13);
1992 __ CallRuntime(kRememberCardRuntimeEntry, 2);
1993 __ LeaveCallRuntimeFrame();
1994 __ popq(CODE_REG);
1995 __ ret();
1996 }
1997}
1998
1999void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
2000 GenerateWriteBarrierStubHelper(
2001 assembler, Address(THR, target::Thread::write_barrier_code_offset()),
2002 false);
2003}
2004
2005void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
2006 GenerateWriteBarrierStubHelper(
2007 assembler,
2008 Address(THR, target::Thread::array_write_barrier_code_offset()), true);
2009}
2010
2011static void GenerateAllocateObjectHelper(Assembler* assembler,
2012 bool is_cls_parameterized) {
2013 // Note: Keep in sync with calling function.
2014 // kAllocationStubTypeArgumentsReg = RDX
2015 const Register kTagsReg = R8;
2016
2017 {
2018 Label slow_case;
2019 const Register kNewTopReg = R9;
2020
2021 // Allocate the object and update top to point to
2022 // next object start and initialize the allocated object.
2023 {
2024 const Register kInstanceSizeReg = RSI;
2025
2026 __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
2027
2028 __ movq(RAX, Address(THR, target::Thread::top_offset()));
2029 __ leaq(kNewTopReg, Address(RAX, kInstanceSizeReg, TIMES_1, 0));
2030 // Check if the allocation fits into the remaining space.
2031 __ cmpq(kNewTopReg, Address(THR, target::Thread::end_offset()));
2032 __ j(ABOVE_EQUAL, &slow_case);
2033
2034 __ movq(Address(THR, target::Thread::top_offset()), kNewTopReg);
2035 } // kInstanceSizeReg = RSI
2036
2037 // Set the tags.
2038 // 64 bit store also zeros the identity hash field.
2039 __ movq(Address(RAX, target::Object::tags_offset()), kTagsReg);
2040
2041 __ addq(RAX, Immediate(kHeapObjectTag));
2042
2043 // Initialize the remaining words of the object.
2044 {
2045 const Register kNextFieldReg = RDI;
2046 __ leaq(kNextFieldReg,
2047 FieldAddress(RAX, target::Instance::first_field_offset()));
2048
2049 const Register kNullReg = R10;
2050 __ LoadObject(kNullReg, NullObject());
2051
2052 // Loop until the whole object is initialized.
2053 Label init_loop;
2054 Label done;
2055 __ Bind(&init_loop);
2056 __ cmpq(kNextFieldReg, kNewTopReg);
2057#if defined(DEBUG)
2058 static const bool kJumpLength = Assembler::kFarJump;
2059#else
2060 static const bool kJumpLength = Assembler::kNearJump;
2061#endif // DEBUG
2062 __ j(ABOVE_EQUAL, &done, kJumpLength);
2063 __ StoreIntoObjectNoBarrier(RAX, Address(kNextFieldReg, 0), kNullReg);
2064 __ addq(kNextFieldReg, Immediate(target::kWordSize));
2065 __ jmp(&init_loop, Assembler::kNearJump);
2066 __ Bind(&done);
2067 } // kNextFieldReg = RDI, kNullReg = R10
2068
2069 if (is_cls_parameterized) {
2070 Label not_parameterized_case;
2071
2072 const Register kClsIdReg = R9;
2073 const Register kTypeOffsetReg = RDI;
2074
2075 __ ExtractClassIdFromTags(kClsIdReg, kTagsReg);
2076
2077 // Load class' type_arguments_field offset in words.
2078 __ LoadClassById(kTypeOffsetReg, kClsIdReg);
2079 __ movl(
2080 kTypeOffsetReg,
2081 FieldAddress(kTypeOffsetReg,
2082 target::Class::
2083 host_type_arguments_field_offset_in_words_offset()));
2084
2085 // Set the type arguments in the new object.
2086 __ StoreIntoObject(RAX, FieldAddress(RAX, kTypeOffsetReg, TIMES_8, 0),
2087 kAllocationStubTypeArgumentsReg);
2088
2089 __ Bind(&not_parameterized_case);
2090 } // kTypeOffsetReg = RDI;
2091
2092 __ ret();
2093
2094 __ Bind(&slow_case);
2095 } // kNewTopReg = R9;
2096
2097 // Fall back on slow case:
2098 if (!is_cls_parameterized) {
2099 __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject());
2100 }
2101 // Tail call to generic allocation stub.
2102 __ jmp(
2103 Address(THR, target::Thread::allocate_object_slow_entry_point_offset()));
2104}
2105
2106// Called for inline allocation of objects (any class).
2107void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
2108 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
2109}
2110
2111void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
2112 Assembler* assembler) {
2113 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
2114}
2115
2116void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
2117 // Note: Keep in sync with calling stub.
2118 // kAllocationStubTypeArgumentsReg = RDX
2119 const Register kTagsToClsIdReg = R8;
2120
2121 if (!FLAG_use_bare_instructions) {
2122 __ movq(CODE_REG,
2123 Address(THR, target::Thread::call_to_runtime_stub_offset()));
2124 }
2125
2126 __ ExtractClassIdFromTags(kTagsToClsIdReg, kTagsToClsIdReg);
2127
2128 // Create a stub frame.
2129 // Ensure constant pool is allowed so we can e.g. load class object.
2130 __ EnterStubFrame();
2131
2132 // Setup space on stack for return value.
2133 __ LoadObject(RAX, NullObject());
2134 __ pushq(RAX);
2135
2136 // Push class of object to be allocated.
2137 __ LoadClassById(RAX, kTagsToClsIdReg);
2138 __ pushq(RAX);
2139
2140 // Must be Object::null() if non-parameterized class.
2141 __ pushq(kAllocationStubTypeArgumentsReg);
2142
2143 __ CallRuntime(kAllocateObjectRuntimeEntry, 2);
2144
2145 __ popq(RAX); // Pop argument (type arguments of object).
2146 __ popq(RAX); // Pop argument (class of object).
2147 __ popq(RAX); // Pop result (newly allocated object).
2148
2149 // Write-barrier elimination is enabled for [cls] and we therefore need to
2150 // ensure that the object is in new-space or has remembered bit set.
2151 EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
2152
2153 // RAX: new object
2154 // Restore the frame pointer.
2155 __ LeaveStubFrame();
2156
2157 __ ret();
2158}
2159
2160// Called for inline allocation of objects.
2161void StubCodeCompiler::GenerateAllocationStubForClass(
2162 Assembler* assembler,
2163 UnresolvedPcRelativeCalls* unresolved_calls,
2164 const Class& cls,
2165 const Code& allocate_object,
2166 const Code& allocat_object_parametrized) {
2167 static_assert(kAllocationStubTypeArgumentsReg == RDX,
2168 "Adjust register allocation in the AllocationStub");
2169
2170 classid_t cls_id = target::Class::GetId(cls);
2171 ASSERT(cls_id != kIllegalCid);
2172
2173 RELEASE_ASSERT(AllocateObjectInstr::WillAllocateNewOrRemembered(cls));
2174
2175 const intptr_t cls_type_arg_field_offset =
2176 target::Class::TypeArgumentsFieldOffset(cls);
2177
2178 // The generated code is different if the class is parameterized.
2179 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
2180 ASSERT(!is_cls_parameterized ||
2181 cls_type_arg_field_offset != target::Class::kNoTypeArguments);
2182
2183 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
2184 ASSERT(instance_size > 0);
2185 // User-defined classes should always be allocatable in new space.
2186 RELEASE_ASSERT(target::Heap::IsAllocatableInNewSpace(instance_size));
2187
2188 const uint32_t tags =
2189 target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
2190
2191 // Note: Keep in sync with helper function.
2192 // kAllocationStubTypeArgumentsReg = RDX
2193 const Register kTagsReg = R8;
2194
2195 __ movq(kTagsReg, Immediate(tags));
2196
2197 // Load the appropriate generic alloc. stub.
2198 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
2199 !target::Class::TraceAllocation(cls) &&
2200 target::SizeFitsInSizeTag(instance_size)) {
2201 if (is_cls_parameterized) {
2202 // TODO(41974): Assign all allocation stubs to the root loading unit?
2203 if (false &&
2204 !IsSameObject(NullObject(),
2205 CastHandle<Object>(allocat_object_parametrized))) {
2206 __ GenerateUnRelocatedPcRelativeTailCall();
2207 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2208 __ CodeSize(), allocat_object_parametrized, /*is_tail_call=*/true));
2209 } else {
2210 __ jmp(Address(THR,
2211 target::Thread::
2212 allocate_object_parameterized_entry_point_offset()));
2213 }
2214 } else {
2215 // TODO(41974): Assign all allocation stubs to the root loading unit?
2216 if (false &&
2217 !IsSameObject(NullObject(), CastHandle<Object>(allocate_object))) {
2218 __ GenerateUnRelocatedPcRelativeTailCall();
2219 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2220 __ CodeSize(), allocate_object, /*is_tail_call=*/true));
2221 } else {
2222 __ jmp(
2223 Address(THR, target::Thread::allocate_object_entry_point_offset()));
2224 }
2225 }
2226 } else {
2227 if (!is_cls_parameterized) {
2228 __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject());
2229 }
2230 __ jmp(Address(THR,
2231 target::Thread::allocate_object_slow_entry_point_offset()));
2232 }
2233}
2234
2235// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
2236// from the entry code of a dart function after an error in passed argument
2237// name or number is detected.
2238// Input parameters:
2239// RSP : points to return address.
2240// RSP + 8 : address of last argument.
2241// R10 : arguments descriptor array.
2242void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
2243 Assembler* assembler) {
2244 __ EnterStubFrame();
2245
2246 // Load the receiver.
2247 __ movq(R13, FieldAddress(R10, target::ArgumentsDescriptor::size_offset()));
2248 __ movq(RAX,
2249 Address(RBP, R13, TIMES_4,
2250 target::frame_layout.param_end_from_fp * target::kWordSize));
2251
2252 // Load the function.
2253 __ movq(RBX, FieldAddress(RAX, target::Closure::function_offset()));
2254
2255 __ pushq(Immediate(0)); // Result slot.
2256 __ pushq(RAX); // Receiver.
2257 __ pushq(RBX); // Function.
2258 __ pushq(R10); // Arguments descriptor array.
2259
2260 // Adjust arguments count.
2261 __ cmpq(
2262 FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
2263 Immediate(0));
2264 __ movq(R10, R13);
2265 Label args_count_ok;
2266 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
2267 __ addq(R10, Immediate(target::ToRawSmi(1))); // Include the type arguments.
2268 __ Bind(&args_count_ok);
2269
2270 // R10: Smi-tagged arguments array length.
2271 PushArrayOfArguments(assembler);
2272
2273 const intptr_t kNumArgs = 4;
2274 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
2275 // noSuchMethod on closures always throws an error, so it will never return.
2276 __ int3();
2277}
2278
2279// Cannot use function object from ICData as it may be the inlined
2280// function and not the top-scope function.
2281void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
2282 Assembler* assembler) {
2283 if (FLAG_precompiled_mode) {
2284 __ Breakpoint();
2285 return;
2286 }
2287 Register ic_reg = RBX;
2288 Register func_reg = RDI;
2289 if (FLAG_trace_optimized_ic_calls) {
2290 __ EnterStubFrame();
2291 __ pushq(func_reg); // Preserve
2292 __ pushq(ic_reg); // Preserve.
2293 __ pushq(ic_reg); // Argument.
2294 __ pushq(func_reg); // Argument.
2295 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
2296 __ popq(RAX); // Discard argument;
2297 __ popq(RAX); // Discard argument;
2298 __ popq(ic_reg); // Restore.
2299 __ popq(func_reg); // Restore.
2300 __ LeaveStubFrame();
2301 }
2302 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
2303}
2304
2305// Loads function into 'temp_reg', preserves 'ic_reg'.
2306void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
2307 Register temp_reg) {
2308 if (FLAG_precompiled_mode) {
2309 __ Breakpoint();
2310 return;
2311 }
2312 if (FLAG_optimization_counter_threshold >= 0) {
2313 Register ic_reg = RBX;
2314 Register func_reg = temp_reg;
2315 ASSERT(ic_reg != func_reg);
2316 __ Comment("Increment function counter");
2317 __ movq(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
2318 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
2319 }
2320}
2321
2322// Note: RBX must be preserved.
2323// Attempt a quick Smi operation for known operations ('kind'). The ICData
2324// must have been primed with a Smi/Smi check that will be used for counting
2325// the invocations.
2326static void EmitFastSmiOp(Assembler* assembler,
2327 Token::Kind kind,
2328 intptr_t num_args,
2329 Label* not_smi_or_overflow) {
2330 __ Comment("Fast Smi op");
2331 ASSERT(num_args == 2);
2332 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left.
2333 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Right
2334 __ movq(R13, RCX);
2335 __ orq(R13, RAX);
2336 __ testq(R13, Immediate(kSmiTagMask));
2337 __ j(NOT_ZERO, not_smi_or_overflow);
2338 switch (kind) {
2339 case Token::kADD: {
2340 __ addq(RAX, RCX);
2341 __ j(OVERFLOW, not_smi_or_overflow);
2342 break;
2343 }
2344 case Token::kLT: {
2345 __ cmpq(RAX, RCX);
2346 __ setcc(GREATER_EQUAL, ByteRegisterOf(RAX));
2347 __ movzxb(RAX, RAX); // RAX := RAX < RCX ? 0 : 1
2348 __ movq(RAX,
2349 Address(THR, RAX, TIMES_8, target::Thread::bool_true_offset()));
2350 ASSERT(target::Thread::bool_true_offset() + 8 ==
2351 target::Thread::bool_false_offset());
2352 break;
2353 }
2354 case Token::kEQ: {
2355 __ cmpq(RAX, RCX);
2356 __ setcc(NOT_EQUAL, ByteRegisterOf(RAX));
2357 __ movzxb(RAX, RAX); // RAX := RAX == RCX ? 0 : 1
2358 __ movq(RAX,
2359 Address(THR, RAX, TIMES_8, target::Thread::bool_true_offset()));
2360 ASSERT(target::Thread::bool_true_offset() + 8 ==
2361 target::Thread::bool_false_offset());
2362 break;
2363 }
2364 default:
2365 UNIMPLEMENTED();
2366 }
2367
2368 // RBX: IC data object (preserved).
2369 __ movq(R13, FieldAddress(RBX, target::ICData::entries_offset()));
2370 // R13: ic_data_array with check entries: classes and target functions.
2371 __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
2372// R13: points directly to the first ic data array element.
2373#if defined(DEBUG)
2374 // Check that first entry is for Smi/Smi.
2375 Label error, ok;
2376 const Immediate& imm_smi_cid = Immediate(target::ToRawSmi(kSmiCid));
2377 __ cmpq(Address(R13, 0 * target::kWordSize), imm_smi_cid);
2378 __ j(NOT_EQUAL, &error, Assembler::kNearJump);
2379 __ cmpq(Address(R13, 1 * target::kWordSize), imm_smi_cid);
2380 __ j(EQUAL, &ok, Assembler::kNearJump);
2381 __ Bind(&error);
2382 __ Stop("Incorrect IC data");
2383 __ Bind(&ok);
2384#endif
2385
2386 if (FLAG_optimization_counter_threshold >= 0) {
2387 const intptr_t count_offset =
2388 target::ICData::CountIndexFor(num_args) * target::kWordSize;
2389 // Update counter, ignore overflow.
2390 __ addq(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
2391 }
2392
2393 __ ret();
2394}
2395
2396// Saves the offset of the target entry-point (from the Function) into R8.
2397//
2398// Must be the first code generated, since any code before will be skipped in
2399// the unchecked entry-point.
2400static void GenerateRecordEntryPoint(Assembler* assembler) {
2401 Label done;
2402 __ movq(R8,
2403 Immediate(target::Function::entry_point_offset() - kHeapObjectTag));
2404 __ jmp(&done);
2405 __ BindUncheckedEntryPoint();
2406 __ movq(R8, Immediate(target::Function::entry_point_offset(
2407 CodeEntryKind::kUnchecked) -
2408 kHeapObjectTag));
2409 __ Bind(&done);
2410}
2411
2412// Generate inline cache check for 'num_args'.
2413// RDX: receiver (if instance call)
2414// RBX: ICData
2415// RSP[0]: return address
2416// Control flow:
2417// - If receiver is null -> jump to IC miss.
2418// - If receiver is Smi -> load Smi class.
2419// - If receiver is not-Smi -> load receiver's class.
2420// - Check if 'num_args' (including receiver) match any IC data group.
2421// - Match found -> jump to target.
2422// - Match not found -> jump to IC miss.
2423void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
2424 Assembler* assembler,
2425 intptr_t num_args,
2426 const RuntimeEntry& handle_ic_miss,
2427 Token::Kind kind,
2428 Optimized optimized,
2429 CallType type,
2430 Exactness exactness) {
2431 if (FLAG_precompiled_mode) {
2432 __ Breakpoint();
2433 return;
2434 }
2435
2436 const bool save_entry_point = kind == Token::kILLEGAL;
2437 if (save_entry_point) {
2438 GenerateRecordEntryPoint(assembler);
2439 }
2440
2441 if (optimized == kOptimized) {
2442 GenerateOptimizedUsageCounterIncrement(assembler);
2443 } else {
2444 GenerateUsageCounterIncrement(assembler, /* scratch */ RCX);
2445 }
2446
2447 ASSERT(num_args == 1 || num_args == 2);
2448#if defined(DEBUG)
2449 {
2450 Label ok;
2451 // Check that the IC data array has NumArgsTested() == num_args.
2452 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2453 __ movl(RCX, FieldAddress(RBX, target::ICData::state_bits_offset()));
2454 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2455 __ andq(RCX, Immediate(target::ICData::NumArgsTestedMask()));
2456 __ cmpq(RCX, Immediate(num_args));
2457 __ j(EQUAL, &ok, Assembler::kNearJump);
2458 __ Stop("Incorrect stub for IC data");
2459 __ Bind(&ok);
2460 }
2461#endif // DEBUG
2462
2463#if !defined(PRODUCT)
2464 Label stepping, done_stepping;
2465 if (optimized == kUnoptimized) {
2466 __ Comment("Check single stepping");
2467 __ LoadIsolate(RAX);
2468 __ cmpb(Address(RAX, target::Isolate::single_step_offset()), Immediate(0));
2469 __ j(NOT_EQUAL, &stepping);
2470 __ Bind(&done_stepping);
2471 }
2472#endif
2473
2474 Label not_smi_or_overflow;
2475 if (kind != Token::kILLEGAL) {
2476 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
2477 }
2478 __ Bind(&not_smi_or_overflow);
2479
2480 __ Comment("Extract ICData initial values and receiver cid");
2481 // RBX: IC data object (preserved).
2482 __ movq(R13, FieldAddress(RBX, target::ICData::entries_offset()));
2483 // R13: ic_data_array with check entries: classes and target functions.
2484 __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
2485 // R13: points directly to the first ic data array element.
2486
2487 if (type == kInstanceCall) {
2488 __ LoadTaggedClassIdMayBeSmi(RAX, RDX);
2489 __ movq(R10, FieldAddress(
2490 RBX, target::CallSiteData::arguments_descriptor_offset()));
2491 if (num_args == 2) {
2492 __ movq(RCX,
2493 FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
2494 __ movq(R9, Address(RSP, RCX, TIMES_4, -target::kWordSize));
2495 __ LoadTaggedClassIdMayBeSmi(RCX, R9);
2496 }
2497 } else {
2498 __ movq(R10, FieldAddress(
2499 RBX, target::CallSiteData::arguments_descriptor_offset()));
2500 __ movq(RCX,
2501 FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
2502 __ movq(RDX, Address(RSP, RCX, TIMES_4, 0));
2503 __ LoadTaggedClassIdMayBeSmi(RAX, RDX);
2504 if (num_args == 2) {
2505 __ movq(R9, Address(RSP, RCX, TIMES_4, -target::kWordSize));
2506 __ LoadTaggedClassIdMayBeSmi(RCX, R9);
2507 }
2508 }
2509 // RAX: first argument class ID as Smi.
2510 // RCX: second argument class ID as Smi.
2511 // R10: args descriptor
2512
2513 // Loop that checks if there is an IC data match.
2514 Label loop, found, miss;
2515 __ Comment("ICData loop");
2516
2517 // We unroll the generic one that is generated once more than the others.
2518 const bool optimize = kind == Token::kILLEGAL;
2519 const intptr_t target_offset =
2520 target::ICData::TargetIndexFor(num_args) * target::kWordSize;
2521 const intptr_t count_offset =
2522 target::ICData::CountIndexFor(num_args) * target::kWordSize;
2523 const intptr_t exactness_offset =
2524 target::ICData::ExactnessIndexFor(num_args) * target::kWordSize;
2525
2526 __ Bind(&loop);
2527 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
2528 Label update;
2529 __ movq(R9, Address(R13, 0));
2530 __ cmpq(RAX, R9); // Class id match?
2531 if (num_args == 2) {
2532 __ j(NOT_EQUAL, &update); // Continue.
2533 __ movq(R9, Address(R13, target::kWordSize));
2534 // R9: next class ID to check (smi).
2535 __ cmpq(RCX, R9); // Class id match?
2536 }
2537 __ j(EQUAL, &found); // Break.
2538
2539 __ Bind(&update);
2540
2541 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2542 num_args, exactness == kCheckExactness) *
2543 target::kWordSize;
2544 __ addq(R13, Immediate(entry_size)); // Next entry.
2545
2546 __ cmpq(R9, Immediate(target::ToRawSmi(kIllegalCid))); // Done?
2547 if (unroll == 0) {
2548 __ j(NOT_EQUAL, &loop);
2549 } else {
2550 __ j(EQUAL, &miss);
2551 }
2552 }
2553
2554 __ Bind(&miss);
2555 __ Comment("IC miss");
2556 // Compute address of arguments (first read number of arguments from
2557 // arguments descriptor array and then compute address on the stack).
2558 __ movq(RAX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
2559 __ leaq(RAX, Address(RSP, RAX, TIMES_4, 0)); // RAX is Smi.
2560 __ EnterStubFrame();
2561 if (save_entry_point) {
2562 __ SmiTag(R8); // Entry-point offset is not Smi.
2563 __ pushq(R8); // Preserve entry point.
2564 }
2565 __ pushq(R10); // Preserve arguments descriptor array.
2566 __ pushq(RBX); // Preserve IC data object.
2567 __ pushq(Immediate(0)); // Result slot.
2568 // Push call arguments.
2569 for (intptr_t i = 0; i < num_args; i++) {
2570 __ movq(RCX, Address(RAX, -target::kWordSize * i));
2571 __ pushq(RCX);
2572 }
2573 __ pushq(RBX); // Pass IC data object.
2574 __ CallRuntime(handle_ic_miss, num_args + 1);
2575 // Remove the call arguments pushed earlier, including the IC data object.
2576 for (intptr_t i = 0; i < num_args + 1; i++) {
2577 __ popq(RAX);
2578 }
2579 __ popq(RAX); // Pop returned function object into RAX.
2580 __ popq(RBX); // Restore IC data array.
2581 __ popq(R10); // Restore arguments descriptor array.
2582 if (save_entry_point) {
2583 __ popq(R8); // Restore entry point.
2584 __ SmiUntag(R8); // Entry-point offset is not Smi.
2585 }
2586 __ RestoreCodePointer();
2587 __ LeaveStubFrame();
2588 Label call_target_function;
2589 if (!FLAG_lazy_dispatchers) {
2590 GenerateDispatcherCode(assembler, &call_target_function);
2591 } else {
2592 __ jmp(&call_target_function);
2593 }
2594
2595 __ Bind(&found);
2596 // R13: Pointer to an IC data check group.
2597 Label call_target_function_through_unchecked_entry;
2598 if (exactness == kCheckExactness) {
2599 Label exactness_ok;
2600 ASSERT(num_args == 1);
2601 __ movq(RAX, Address(R13, exactness_offset));
2602 __ cmpq(RAX, Immediate(target::ToRawSmi(
2603 StaticTypeExactnessState::HasExactSuperType().Encode())));
2604 __ j(LESS, &exactness_ok);
2605 __ j(EQUAL, &call_target_function_through_unchecked_entry);
2606
2607 // Check trivial exactness.
2608 // Note: ICDataLayout::receivers_static_type_ is guaranteed to be not null
2609 // because we only emit calls to this stub when it is not null.
2610 __ movq(RCX,
2611 FieldAddress(RBX, target::ICData::receivers_static_type_offset()));
2612 __ movq(RCX, FieldAddress(RCX, target::Type::arguments_offset()));
2613 // RAX contains an offset to type arguments in words as a smi,
2614 // hence TIMES_4. RDX is guaranteed to be non-smi because it is expected
2615 // to have type arguments.
2616 __ cmpq(RCX, FieldAddress(RDX, RAX, TIMES_4, 0));
2617 __ j(EQUAL, &call_target_function_through_unchecked_entry);
2618
2619 // Update exactness state (not-exact anymore).
2620 __ movq(Address(R13, exactness_offset),
2621 Immediate(target::ToRawSmi(
2622 StaticTypeExactnessState::NotExact().Encode())));
2623 __ Bind(&exactness_ok);
2624 }
2625 __ movq(RAX, Address(R13, target_offset));
2626
2627 if (FLAG_optimization_counter_threshold >= 0) {
2628 __ Comment("Update ICData counter");
2629 // Ignore overflow.
2630 __ addq(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
2631 }
2632
2633 __ Comment("Call target (via specified entry point)");
2634 __ Bind(&call_target_function);
2635 // RAX: Target function.
2636 __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
2637 if (save_entry_point) {
2638 __ addq(R8, RAX);
2639 __ jmp(Address(R8, 0));
2640 } else {
2641 __ jmp(FieldAddress(RAX, target::Function::entry_point_offset()));
2642 }
2643
2644 if (exactness == kCheckExactness) {
2645 __ Bind(&call_target_function_through_unchecked_entry);
2646 if (FLAG_optimization_counter_threshold >= 0) {
2647 __ Comment("Update ICData counter");
2648 // Ignore overflow.
2649 __ addq(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
2650 }
2651 __ Comment("Call target (via unchecked entry point)");
2652 __ movq(RAX, Address(R13, target_offset));
2653 __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
2654 __ jmp(FieldAddress(
2655 RAX, target::Function::entry_point_offset(CodeEntryKind::kUnchecked)));
2656 }
2657
2658#if !defined(PRODUCT)
2659 if (optimized == kUnoptimized) {
2660 __ Bind(&stepping);
2661 __ EnterStubFrame();
2662 if (type == kInstanceCall) {
2663 __ pushq(RDX); // Preserve receiver.
2664 }
2665 __ pushq(RBX); // Preserve ICData.
2666 if (save_entry_point) {
2667 __ SmiTag(R8); // Entry-point offset is not Smi.
2668 __ pushq(R8); // Preserve entry point.
2669 }
2670 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2671 if (save_entry_point) {
2672 __ popq(R8); // Restore entry point.
2673 __ SmiUntag(R8);
2674 }
2675 __ popq(RBX); // Restore ICData.
2676 if (type == kInstanceCall) {
2677 __ popq(RDX); // Restore receiver.
2678 }
2679 __ RestoreCodePointer();
2680 __ LeaveStubFrame();
2681 __ jmp(&done_stepping);
2682 }
2683#endif
2684}
2685
2686// RDX: receiver
2687// RBX: ICData
2688// RSP[0]: return address
2689void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
2690 Assembler* assembler) {
2691 GenerateNArgsCheckInlineCacheStub(
2692 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2693 kUnoptimized, kInstanceCall, kIgnoreExactness);
2694}
2695
2696// RDX: receiver
2697// RBX: ICData
2698// RSP[0]: return address
2699void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
2700 Assembler* assembler) {
2701 GenerateNArgsCheckInlineCacheStub(
2702 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2703 kUnoptimized, kInstanceCall, kCheckExactness);
2704}
2705
2706// RDX: receiver
2707// RBX: ICData
2708// RSP[0]: return address
2709void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
2710 Assembler* assembler) {
2711 GenerateNArgsCheckInlineCacheStub(
2712 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2713 kUnoptimized, kInstanceCall, kIgnoreExactness);
2714}
2715
2716// RDX: receiver
2717// RBX: ICData
2718// RSP[0]: return address
2719void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
2720 GenerateNArgsCheckInlineCacheStub(
2721 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
2722 kUnoptimized, kInstanceCall, kIgnoreExactness);
2723}
2724
2725// RDX: receiver
2726// RBX: ICData
2727// RSP[0]: return address
2728void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
2729 GenerateNArgsCheckInlineCacheStub(
2730 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
2731 kUnoptimized, kInstanceCall, kIgnoreExactness);
2732}
2733
2734// RDX: receiver
2735// RBX: ICData
2736// RSP[0]: return address
2737void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
2738 GenerateNArgsCheckInlineCacheStub(
2739 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
2740 kUnoptimized, kInstanceCall, kIgnoreExactness);
2741}
2742
2743// RDX: receiver
2744// RBX: ICData
2745// RDI: Function
2746// RSP[0]: return address
2747void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
2748 Assembler* assembler) {
2749 GenerateNArgsCheckInlineCacheStub(
2750 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2751 kOptimized, kInstanceCall, kIgnoreExactness);
2752}
2753
2754// RDX: receiver
2755// RBX: ICData
2756// RDI: Function
2757// RSP[0]: return address
2758void StubCodeCompiler::
2759 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
2760 Assembler* assembler) {
2761 GenerateNArgsCheckInlineCacheStub(
2762 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2763 kOptimized, kInstanceCall, kCheckExactness);
2764}
2765
2766// RDX: receiver
2767// RBX: ICData
2768// RDI: Function
2769// RSP[0]: return address
2770void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
2771 Assembler* assembler) {
2772 GenerateNArgsCheckInlineCacheStub(
2773 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2774 kOptimized, kInstanceCall, kIgnoreExactness);
2775}
2776
2777// RBX: ICData
2778// RSP[0]: return address
2779void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
2780 Assembler* assembler) {
2781 GenerateRecordEntryPoint(assembler);
2782 GenerateUsageCounterIncrement(assembler, /* scratch */ RCX);
2783#if defined(DEBUG)
2784 {
2785 Label ok;
2786 // Check that the IC data array has NumArgsTested() == 0.
2787 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2788 __ movl(RCX, FieldAddress(RBX, target::ICData::state_bits_offset()));
2789 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2790 __ andq(RCX, Immediate(target::ICData::NumArgsTestedMask()));
2791 __ cmpq(RCX, Immediate(0));
2792 __ j(EQUAL, &ok, Assembler::kNearJump);
2793 __ Stop("Incorrect IC data for unoptimized static call");
2794 __ Bind(&ok);
2795 }
2796#endif // DEBUG
2797
2798#if !defined(PRODUCT)
2799 // Check single stepping.
2800 Label stepping, done_stepping;
2801 __ LoadIsolate(RAX);
2802 __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
2803 __ cmpq(RAX, Immediate(0));
2804#if defined(DEBUG)
2805 static const bool kJumpLength = Assembler::kFarJump;
2806#else
2807 static const bool kJumpLength = Assembler::kNearJump;
2808#endif // DEBUG
2809 __ j(NOT_EQUAL, &stepping, kJumpLength);
2810 __ Bind(&done_stepping);
2811#endif
2812
2813 // RBX: IC data object (preserved).
2814 __ movq(R12, FieldAddress(RBX, target::ICData::entries_offset()));
2815 // R12: ic_data_array with entries: target functions and count.
2816 __ leaq(R12, FieldAddress(R12, target::Array::data_offset()));
2817 // R12: points directly to the first ic data array element.
2818 const intptr_t target_offset =
2819 target::ICData::TargetIndexFor(0) * target::kWordSize;
2820 const intptr_t count_offset =
2821 target::ICData::CountIndexFor(0) * target::kWordSize;
2822
2823 if (FLAG_optimization_counter_threshold >= 0) {
2824 // Increment count for this call, ignore overflow.
2825 __ addq(Address(R12, count_offset), Immediate(target::ToRawSmi(1)));
2826 }
2827
2828 // Load arguments descriptor into R10.
2829 __ movq(R10, FieldAddress(
2830 RBX, target::CallSiteData::arguments_descriptor_offset()));
2831
2832 // Get function and call it, if possible.
2833 __ movq(RAX, Address(R12, target_offset));
2834 __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
2835
2836 __ addq(R8, RAX);
2837 __ jmp(Address(R8, 0));
2838
2839#if !defined(PRODUCT)
2840 __ Bind(&stepping);
2841 __ EnterStubFrame();
2842 __ pushq(RBX); // Preserve IC data object.
2843 __ SmiTag(R8); // Entry-point is not Smi.
2844 __ pushq(R8); // Preserve entry-point.
2845 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2846 __ popq(R8); // Restore entry-point.
2847 __ SmiUntag(R8);
2848 __ popq(RBX);
2849 __ RestoreCodePointer();
2850 __ LeaveStubFrame();
2851 __ jmp(&done_stepping, Assembler::kNearJump);
2852#endif
2853}
2854
2855// RBX: ICData
2856// RSP[0]: return address
2857void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
2858 Assembler* assembler) {
2859 GenerateNArgsCheckInlineCacheStub(
2860 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2861 kUnoptimized, kStaticCall, kIgnoreExactness);
2862}
2863
2864// RBX: ICData
2865// RSP[0]: return address
2866void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
2867 Assembler* assembler) {
2868 GenerateNArgsCheckInlineCacheStub(
2869 assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2870 kUnoptimized, kStaticCall, kIgnoreExactness);
2871}
2872
2873// Stub for compiling a function and jumping to the compiled code.
2874// R10: Arguments descriptor.
2875// RAX: Function.
2876void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
2877 __ EnterStubFrame();
2878 __ pushq(R10); // Preserve arguments descriptor array.
2879 __ pushq(RAX); // Pass function.
2880 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2881 __ popq(RAX); // Restore function.
2882 __ popq(R10); // Restore arguments descriptor array.
2883 __ LeaveStubFrame();
2884
2885 // When using the interpreter, the function's code may now point to the
2886 // InterpretCall stub. Make sure RAX, R10, and RBX are preserved.
2887 __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
2888 __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
2889 __ jmp(RCX);
2890}
2891
2892// Stub for interpreting a function call.
2893// R10: Arguments descriptor.
2894// RAX: Function.
2895void StubCodeCompiler::GenerateInterpretCallStub(Assembler* assembler) {
2896 if (FLAG_precompiled_mode) {
2897 __ Stop("Not using interpreter");
2898 return;
2899 }
2900
2901 __ EnterStubFrame();
2902
2903#if defined(DEBUG)
2904 {
2905 Label ok;
2906 // Check that we are always entering from Dart code.
2907 __ movq(R8, Immediate(VMTag::kDartCompiledTagId));
2908 __ cmpq(R8, Assembler::VMTagAddress());
2909 __ j(EQUAL, &ok, Assembler::kNearJump);
2910 __ Stop("Not coming from Dart code.");
2911 __ Bind(&ok);
2912 }
2913#endif
2914
2915 // Adjust arguments count for type arguments vector.
2916 __ movq(R11, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
2917 __ SmiUntag(R11);
2918 __ cmpq(
2919 FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
2920 Immediate(0));
2921 Label args_count_ok;
2922 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
2923 __ incq(R11);
2924 __ Bind(&args_count_ok);
2925
2926 // Compute argv.
2927 __ leaq(R12,
2928 Address(RBP, R11, TIMES_8,
2929 target::frame_layout.param_end_from_fp * target::kWordSize));
2930
2931 // Indicate decreasing memory addresses of arguments with negative argc.
2932 __ negq(R11);
2933
2934 // Reserve shadow space for args and align frame before entering C++ world.
2935 __ subq(RSP, Immediate(5 * target::kWordSize));
2936 if (OS::ActivationFrameAlignment() > 1) {
2937 __ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
2938 }
2939
2940 __ movq(CallingConventions::kArg1Reg, RAX); // Function.
2941 __ movq(CallingConventions::kArg2Reg, R10); // Arguments descriptor.
2942 __ movq(CallingConventions::kArg3Reg, R11); // Negative argc.
2943 __ movq(CallingConventions::kArg4Reg, R12); // Argv.
2944
2945#if defined(TARGET_OS_WINDOWS)
2946 __ movq(Address(RSP, 0 * target::kWordSize), THR); // Thread.
2947#else
2948 __ movq(CallingConventions::kArg5Reg, THR); // Thread.
2949#endif
2950 // Save exit frame information to enable stack walking as we are about
2951 // to transition to Dart VM C++ code.
2952 __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()), RBP);
2953
2954 // Mark that the thread exited generated code through a runtime call.
2955 __ movq(Address(THR, target::Thread::exit_through_ffi_offset()),
2956 Immediate(target::Thread::exit_through_runtime_call()));
2957
2958 // Mark that the thread is executing VM code.
2959 __ movq(RAX,
2960 Address(THR, target::Thread::interpret_call_entry_point_offset()));
2961 __ movq(Assembler::VMTagAddress(), RAX);
2962
2963 __ call(RAX);
2964
2965 // Mark that the thread is executing Dart code.
2966 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
2967
2968 // Mark that the thread has not exited generated Dart code.
2969 __ movq(Address(THR, target::Thread::exit_through_ffi_offset()),
2970 Immediate(0));
2971
2972 // Reset exit frame information in Isolate's mutator thread structure.
2973 __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
2974 Immediate(0));
2975
2976 __ LeaveStubFrame();
2977 __ ret();
2978}
2979
2980// RBX: Contains an ICData.
2981// TOS(0): return address (Dart code).
2982void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
2983#if defined(PRODUCT)
2984 __ Stop("No debugging in PRODUCT mode");
2985#else
2986 __ EnterStubFrame();
2987 __ pushq(RDX); // Preserve receiver.
2988 __ pushq(RBX); // Preserve IC data.
2989 __ pushq(Immediate(0)); // Result slot.
2990 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2991 __ popq(CODE_REG); // Original stub.
2992 __ popq(RBX); // Restore IC data.
2993 __ popq(RDX); // Restore receiver.
2994 __ LeaveStubFrame();
2995
2996 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2997 __ jmp(RAX); // Jump to original stub.
2998#endif // defined(PRODUCT)
2999}
3000
3001void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
3002 Assembler* assembler) {
3003#if defined(PRODUCT)
3004 __ Stop("No debugging in PRODUCT mode");
3005#else
3006 __ EnterStubFrame();
3007 __ pushq(RDX); // Preserve receiver.
3008 __ pushq(RBX); // Preserve IC data.
3009 __ pushq(Immediate(0)); // Result slot.
3010 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
3011 __ popq(CODE_REG); // Original stub.
3012 __ popq(RBX); // Restore IC data.
3013 __ popq(RDX); // Restore receiver.
3014 __ LeaveStubFrame();
3015
3016 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3017 __ jmp(RAX); // Jump to original stub.
3018#endif // defined(PRODUCT)
3019}
3020
3021// TOS(0): return address (Dart code).
3022void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
3023#if defined(PRODUCT)
3024 __ Stop("No debugging in PRODUCT mode");
3025#else
3026 __ EnterStubFrame();
3027 __ pushq(Immediate(0)); // Result slot.
3028 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
3029 __ popq(CODE_REG); // Original stub.
3030 __ LeaveStubFrame();
3031
3032 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3033 __ jmp(RAX); // Jump to original stub.
3034#endif // defined(PRODUCT)
3035}
3036
3037// Called only from unoptimized code.
3038void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
3039#if defined(PRODUCT)
3040 __ Stop("No debugging in PRODUCT mode");
3041#else
3042 // Check single stepping.
3043 Label stepping, done_stepping;
3044 __ LoadIsolate(RAX);
3045 __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
3046 __ cmpq(RAX, Immediate(0));
3047 __ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
3048 __ Bind(&done_stepping);
3049 __ ret();
3050
3051 __ Bind(&stepping);
3052 __ EnterStubFrame();
3053 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3054 __ LeaveStubFrame();
3055 __ jmp(&done_stepping, Assembler::kNearJump);
3056#endif // defined(PRODUCT)
3057}
3058
3059// Used to check class and type arguments. Arguments passed in registers:
3060//
3061// Inputs:
3062// - R9 : RawSubtypeTestCache
3063// - RAX : instance to test against.
3064// - RDX : instantiator type arguments (for n=4).
3065// - RCX : function type arguments (for n=4).
3066//
3067// - TOS + 0: return address.
3068//
3069// Preserves R9/RAX/RCX/RDX, RBX.
3070//
3071// Result in R8: null -> not found, otherwise result (true or false).
3072static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
3073 ASSERT(n == 1 || n == 2 || n == 4 || n == 6);
3074
3075 const Register kInstanceCidOrFunction = R10;
3076 const Register kInstanceInstantiatorTypeArgumentsReg = R13;
3077 const Register kInstanceParentFunctionTypeArgumentsReg = PP;
3078 const Register kInstanceDelayedFunctionTypeArgumentsReg = CODE_REG;
3079
3080 const Register kNullReg = R8;
3081
3082 __ LoadObject(kNullReg, NullObject());
3083
3084 // Free up these 2 registers to be used for 6-value test.
3085 if (n >= 6) {
3086 __ pushq(kInstanceParentFunctionTypeArgumentsReg);
3087 __ pushq(kInstanceDelayedFunctionTypeArgumentsReg);
3088 }
3089
3090 // Loop initialization (moved up here to avoid having all dependent loads
3091 // after each other).
3092
3093 // We avoid a load-acquire barrier here by relying on the fact that all other
3094 // loads from the array are data-dependent loads.
3095 __ movq(RSI, FieldAddress(TypeTestABI::kSubtypeTestCacheReg,
3096 target::SubtypeTestCache::cache_offset()));
3097 __ addq(RSI, Immediate(target::Array::data_offset() - kHeapObjectTag));
3098
3099 Label loop, not_closure;
3100 if (n >= 4) {
3101 __ LoadClassIdMayBeSmi(kInstanceCidOrFunction, TypeTestABI::kInstanceReg);
3102 } else {
3103 __ LoadClassId(kInstanceCidOrFunction, TypeTestABI::kInstanceReg);
3104 }
3105 __ cmpq(kInstanceCidOrFunction, Immediate(kClosureCid));
3106 __ j(NOT_EQUAL, &not_closure, Assembler::kNearJump);
3107
3108 // Closure handling.
3109 {
3110 __ movq(kInstanceCidOrFunction,
3111 FieldAddress(TypeTestABI::kInstanceReg,
3112 target::Closure::function_offset()));
3113 if (n >= 2) {
3114 __ movq(
3115 kInstanceInstantiatorTypeArgumentsReg,
3116 FieldAddress(TypeTestABI::kInstanceReg,
3117 target::Closure::instantiator_type_arguments_offset()));
3118 if (n >= 6) {
3119 ASSERT(n == 6);
3120 __ movq(
3121 kInstanceParentFunctionTypeArgumentsReg,
3122 FieldAddress(TypeTestABI::kInstanceReg,
3123 target::Closure::function_type_arguments_offset()));
3124 __ movq(kInstanceDelayedFunctionTypeArgumentsReg,
3125 FieldAddress(TypeTestABI::kInstanceReg,
3126 target::Closure::delayed_type_arguments_offset()));
3127 }
3128 }
3129 __ jmp(&loop, Assembler::kNearJump);
3130 }
3131
3132 // Non-Closure handling.
3133 {
3134 __ Bind(&not_closure);
3135 if (n >= 2) {
3136 Label has_no_type_arguments;
3137 __ LoadClassById(RDI, kInstanceCidOrFunction);
3138 __ movq(kInstanceInstantiatorTypeArgumentsReg, kNullReg);
3139 __ movl(RDI,
3140 FieldAddress(
3141 RDI, target::Class::
3142 host_type_arguments_field_offset_in_words_offset()));
3143 __ cmpl(RDI, Immediate(target::Class::kNoTypeArguments));
3144 __ j(EQUAL, &has_no_type_arguments, Assembler::kNearJump);
3145 __ movq(kInstanceInstantiatorTypeArgumentsReg,
3146 FieldAddress(TypeTestABI::kInstanceReg, RDI, TIMES_8, 0));
3147 __ Bind(&has_no_type_arguments);
3148
3149 if (n >= 6) {
3150 __ movq(kInstanceParentFunctionTypeArgumentsReg, kNullReg);
3151 __ movq(kInstanceDelayedFunctionTypeArgumentsReg, kNullReg);
3152 }
3153 }
3154 __ SmiTag(kInstanceCidOrFunction);
3155 }
3156
3157 Label found, not_found, next_iteration;
3158
3159 // Loop header.
3160 __ Bind(&loop);
3161 __ movq(
3162 RDI,
3163 Address(RSI, target::kWordSize *
3164 target::SubtypeTestCache::kInstanceClassIdOrFunction));
3165 __ cmpq(RDI, kNullReg);
3166 __ j(EQUAL, &not_found, Assembler::kNearJump);
3167 __ cmpq(RDI, kInstanceCidOrFunction);
3168 if (n == 1) {
3169 __ j(EQUAL, &found, Assembler::kNearJump);
3170 } else {
3171 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
3172 __ cmpq(kInstanceInstantiatorTypeArgumentsReg,
3173 Address(RSI, target::kWordSize *
3174 target::SubtypeTestCache::kInstanceTypeArguments));
3175 if (n == 2) {
3176 __ j(EQUAL, &found, Assembler::kNearJump);
3177 } else {
3178 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
3179 __ cmpq(
3180 TypeTestABI::kInstantiatorTypeArgumentsReg,
3181 Address(RSI,
3182 target::kWordSize *
3183 target::SubtypeTestCache::kInstantiatorTypeArguments));
3184 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
3185 __ cmpq(
3186 TypeTestABI::kFunctionTypeArgumentsReg,
3187 Address(RSI, target::kWordSize *
3188 target::SubtypeTestCache::kFunctionTypeArguments));
3189
3190 if (n == 4) {
3191 __ j(EQUAL, &found, Assembler::kNearJump);
3192 } else {
3193 ASSERT(n == 6);
3194 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
3195
3196 __ cmpq(kInstanceParentFunctionTypeArgumentsReg,
3197 Address(RSI, target::kWordSize *
3198 target::SubtypeTestCache::
3199 kInstanceParentFunctionTypeArguments));
3200 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
3201 __ cmpq(kInstanceDelayedFunctionTypeArgumentsReg,
3202 Address(RSI, target::kWordSize *
3203 target::SubtypeTestCache::
3204 kInstanceDelayedFunctionTypeArguments));
3205 __ j(EQUAL, &found, Assembler::kNearJump);
3206 }
3207 }
3208 }
3209
3210 __ Bind(&next_iteration);
3211 __ addq(RSI, Immediate(target::kWordSize *
3212 target::SubtypeTestCache::kTestEntryLength));
3213 __ jmp(&loop, Assembler::kNearJump);
3214
3215 __ Bind(&found);
3216 __ movq(R8, Address(RSI, target::kWordSize *
3217 target::SubtypeTestCache::kTestResult));
3218 if (n >= 6) {
3219 __ popq(kInstanceDelayedFunctionTypeArgumentsReg);
3220 __ popq(kInstanceParentFunctionTypeArgumentsReg);
3221 }
3222 __ ret();
3223
3224 __ Bind(&not_found);
3225 if (n >= 6) {
3226 __ popq(kInstanceDelayedFunctionTypeArgumentsReg);
3227 __ popq(kInstanceParentFunctionTypeArgumentsReg);
3228 }
3229 __ ret();
3230}
3231
3232// See comment on [GenerateSubtypeNTestCacheStub].
3233void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
3234 GenerateSubtypeNTestCacheStub(assembler, 1);
3235}
3236
3237// See comment on [GenerateSubtypeNTestCacheStub].
3238void StubCodeCompiler::GenerateSubtype2TestCacheStub(Assembler* assembler) {
3239 GenerateSubtypeNTestCacheStub(assembler, 2);
3240}
3241
3242// See comment on [GenerateSubtypeNTestCacheStub].
3243void StubCodeCompiler::GenerateSubtype4TestCacheStub(Assembler* assembler) {
3244 GenerateSubtypeNTestCacheStub(assembler, 4);
3245}
3246
3247// See comment on [GenerateSubtypeNTestCacheStub].
3248void StubCodeCompiler::GenerateSubtype6TestCacheStub(Assembler* assembler) {
3249 GenerateSubtypeNTestCacheStub(assembler, 6);
3250}
3251
3252// Used to test whether a given value is of a given type (different variants,
3253// all have the same calling convention).
3254//
3255// Inputs:
3256// - R9 : RawSubtypeTestCache
3257// - RAX : instance to test against.
3258// - RDX : instantiator type arguments (if needed).
3259// - RCX : function type arguments (if needed).
3260//
3261// - RBX : type to test against.
3262// - R10 : name of destination variable.
3263//
3264// Preserves R9/RAX/RCX/RDX, RBX, R10.
3265//
3266// Note of warning: The caller will not populate CODE_REG and we have therefore
3267// no access to the pool.
3268void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
3269 __ movq(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
3270 __ jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3271}
3272
3273// Used instead of DefaultTypeTestStub when null is assignable.
3274void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
3275 Assembler* assembler) {
3276 Label done;
3277
3278 // Fast case for 'null'.
3279 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
3280 __ BranchIf(EQUAL, &done);
3281
3282 __ movq(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
3283 __ jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3284
3285 __ Bind(&done);
3286 __ Ret();
3287}
3288
3289void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
3290 __ Ret();
3291}
3292
3293void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
3294 __ Breakpoint();
3295}
3296
3297static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
3298 TypeCheckMode mode) {
3299 __ PushObject(NullObject()); // Make room for result.
3300 __ pushq(TypeTestABI::kInstanceReg);
3301 __ pushq(TypeTestABI::kDstTypeReg);
3302 __ pushq(TypeTestABI::kInstantiatorTypeArgumentsReg);
3303 __ pushq(TypeTestABI::kFunctionTypeArgumentsReg);
3304 __ PushObject(NullObject());
3305 __ pushq(TypeTestABI::kSubtypeTestCacheReg);
3306 __ PushImmediate(Immediate(target::ToRawSmi(mode)));
3307 __ CallRuntime(kTypeCheckRuntimeEntry, 7);
3308 __ Drop(1); // mode
3309 __ popq(TypeTestABI::kSubtypeTestCacheReg);
3310 __ Drop(1);
3311 __ popq(TypeTestABI::kFunctionTypeArgumentsReg);
3312 __ popq(TypeTestABI::kInstantiatorTypeArgumentsReg);
3313 __ popq(TypeTestABI::kDstTypeReg);
3314 __ popq(TypeTestABI::kInstanceReg);
3315 __ Drop(1); // Discard return value.
3316}
3317
3318void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
3319 Assembler* assembler) {
3320 __ movq(
3321 CODE_REG,
3322 Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
3323 __ EnterStubFrame();
3324 InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
3325 __ LeaveStubFrame();
3326 __ Ret();
3327}
3328
3329// Used instead of LazySpecializeTypeTestStub when null is assignable.
3330void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
3331 Assembler* assembler) {
3332 Label done;
3333
3334 // Fast case for 'null'.
3335 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
3336 __ BranchIf(EQUAL, &done);
3337
3338 __ movq(
3339 CODE_REG,
3340 Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
3341 __ EnterStubFrame();
3342 InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
3343 __ LeaveStubFrame();
3344
3345 __ Bind(&done);
3346 __ Ret();
3347}
3348
3349void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
3350 Label done, call_runtime;
3351
3352 if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
3353 __ movq(CODE_REG,
3354 Address(THR, target::Thread::slow_type_test_stub_offset()));
3355 }
3356 __ EnterStubFrame();
3357
3358 // If the subtype-cache is null, it needs to be lazily-created by the runtime.
3359 __ CompareObject(TypeTestABI::kSubtypeTestCacheReg, NullObject());
3360 __ BranchIf(EQUAL, &call_runtime);
3361
3362 const Register kTmp = RDI;
3363
3364 // If this is not a [Type] object, we'll go to the runtime.
3365 Label is_simple_case, is_complex_case;
3366 __ LoadClassId(kTmp, TypeTestABI::kDstTypeReg);
3367 __ cmpq(kTmp, Immediate(kTypeCid));
3368 __ BranchIf(NOT_EQUAL, &is_complex_case);
3369
3370 // Check whether this [Type] is instantiated/uninstantiated.
3371 __ cmpb(
3372 FieldAddress(TypeTestABI::kDstTypeReg, target::Type::type_state_offset()),
3373 Immediate(target::AbstractTypeLayout::kTypeStateFinalizedInstantiated));
3374 __ BranchIf(NOT_EQUAL, &is_complex_case);
3375
3376 // Check whether this [Type] is a function type.
3377 __ movq(kTmp, FieldAddress(TypeTestABI::kDstTypeReg,
3378 target::Type::signature_offset()));
3379 __ CompareObject(kTmp, NullObject());
3380 __ BranchIf(NOT_EQUAL, &is_complex_case);
3381
3382 // This [Type] could be a FutureOr. Subtype2TestCache does not support Smi.
3383 __ BranchIfSmi(TypeTestABI::kInstanceReg, &is_complex_case);
3384
3385 // Fall through to &is_simple_case
3386
3387 __ Bind(&is_simple_case);
3388 {
3389 __ Call(StubCodeSubtype2TestCache());
3390 __ CompareObject(R8, CastHandle<Object>(TrueObject()));
3391 __ BranchIf(EQUAL, &done); // Cache said: yes.
3392 __ Jump(&call_runtime);
3393 }
3394
3395 __ Bind(&is_complex_case);
3396 {
3397 __ Call(StubCodeSubtype6TestCache());
3398 __ CompareObject(R8, CastHandle<Object>(TrueObject()));
3399 __ BranchIf(EQUAL, &done); // Cache said: yes.
3400 // Fall through to runtime_call
3401 }
3402
3403 __ Bind(&call_runtime);
3404
3405 InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub);
3406
3407 __ Bind(&done);
3408 __ LeaveStubFrame();
3409 __ Ret();
3410}
3411
3412// Return the current stack pointer address, used to stack alignment
3413// checks.
3414// TOS + 0: return address
3415// Result in RAX.
3416void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
3417 __ leaq(RAX, Address(RSP, target::kWordSize));
3418 __ ret();
3419}
3420
3421// Jump to a frame on the call stack.
3422// TOS + 0: return address
3423// Arg1: program counter
3424// Arg2: stack pointer
3425// Arg3: frame_pointer
3426// Arg4: thread
3427// No Result.
3428void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
3429 __ movq(THR, CallingConventions::kArg4Reg);
3430 __ movq(RBP, CallingConventions::kArg3Reg);
3431 __ movq(RSP, CallingConventions::kArg2Reg);
3432#if defined(USING_SHADOW_CALL_STACK)
3433#error Unimplemented
3434#endif
3435 Label exit_through_non_ffi;
3436 // Check if we exited generated from FFI. If so do transition.
3437 __ cmpq(compiler::Address(
3438 THR, compiler::target::Thread::exit_through_ffi_offset()),
3439 compiler::Immediate(target::Thread::exit_through_ffi()));
3440 __ j(NOT_EQUAL, &exit_through_non_ffi, compiler::Assembler::kNearJump);
3441 __ TransitionNativeToGenerated(/*leave_safepoint=*/true);
3442 __ Bind(&exit_through_non_ffi);
3443
3444 // Set the tag.
3445 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
3446 // Clear top exit frame.
3447 __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
3448 Immediate(0));
3449 // Restore the pool pointer.
3450 __ RestoreCodePointer();
3451 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
3452 __ movq(PP, Address(THR, target::Thread::global_object_pool_offset()));
3453 } else {
3454 __ LoadPoolPointer(PP);
3455 }
3456 __ jmp(CallingConventions::kArg1Reg); // Jump to program counter.
3457}
3458
3459// Run an exception handler. Execution comes from JumpToFrame stub.
3460//
3461// The arguments are stored in the Thread object.
3462// No result.
3463void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
3464 ASSERT(kExceptionObjectReg == RAX);
3465 ASSERT(kStackTraceObjectReg == RDX);
3466 __ movq(CallingConventions::kArg1Reg,
3467 Address(THR, target::Thread::resume_pc_offset()));
3468
3469 word offset_from_thread = 0;
3470 bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
3471 ASSERT(ok);
3472 __ movq(TMP, Address(THR, offset_from_thread));
3473
3474 // Load the exception from the current thread.
3475 Address exception_addr(THR, target::Thread::active_exception_offset());
3476 __ movq(kExceptionObjectReg, exception_addr);
3477 __ movq(exception_addr, TMP);
3478
3479 // Load the stacktrace from the current thread.
3480 Address stacktrace_addr(THR, target::Thread::active_stacktrace_offset());
3481 __ movq(kStackTraceObjectReg, stacktrace_addr);
3482 __ movq(stacktrace_addr, TMP);
3483
3484 __ jmp(CallingConventions::kArg1Reg); // Jump to continuation point.
3485}
3486
3487// Deoptimize a frame on the call stack before rewinding.
3488// The arguments are stored in the Thread object.
3489// No result.
3490void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
3491 // Push zap value instead of CODE_REG.
3492 __ pushq(Immediate(kZapCodeReg));
3493
3494 // Push the deopt pc.
3495 __ pushq(Address(THR, target::Thread::resume_pc_offset()));
3496#if defined(USING_SHADOW_CALL_STACK)
3497#error Unimplemented
3498#endif
3499 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
3500
3501 // After we have deoptimized, jump to the correct frame.
3502 __ EnterStubFrame();
3503 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
3504 __ LeaveStubFrame();
3505 __ int3();
3506}
3507
3508// Calls to the runtime to optimize the given function.
3509// RDI: function to be reoptimized.
3510// R10: argument descriptor (preserved).
3511void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
3512 __ movq(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
3513 __ EnterStubFrame();
3514 __ pushq(R10); // Preserve args descriptor.
3515 __ pushq(Immediate(0)); // Result slot.
3516 __ pushq(RDI); // Arg0: function to optimize
3517 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
3518 __ popq(RAX); // Discard argument.
3519 __ popq(RAX); // Get Code object.
3520 __ popq(R10); // Restore argument descriptor.
3521 __ LeaveStubFrame();
3522 __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
3523 __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
3524 __ jmp(RCX);
3525 __ int3();
3526}
3527
3528// Does identical check (object references are equal or not equal) with special
3529// checks for boxed numbers.
3530// Left and right are pushed on stack.
3531// Return ZF set.
3532// Note: A Mint cannot contain a value that would fit in Smi.
3533static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
3534 const Register left,
3535 const Register right) {
3536 Label reference_compare, done, check_mint;
3537 // If any of the arguments is Smi do reference compare.
3538 __ testq(left, Immediate(kSmiTagMask));
3539 __ j(ZERO, &reference_compare);
3540 __ testq(right, Immediate(kSmiTagMask));
3541 __ j(ZERO, &reference_compare);
3542
3543 // Value compare for two doubles.
3544 __ CompareClassId(left, kDoubleCid);
3545 __ j(NOT_EQUAL, &check_mint, Assembler::kNearJump);
3546 __ CompareClassId(right, kDoubleCid);
3547 __ j(NOT_EQUAL, &done, Assembler::kFarJump);
3548
3549 // Double values bitwise compare.
3550 __ movq(left, FieldAddress(left, target::Double::value_offset()));
3551 __ cmpq(left, FieldAddress(right, target::Double::value_offset()));
3552 __ jmp(&done, Assembler::kFarJump);
3553
3554 __ Bind(&check_mint);
3555 __ CompareClassId(left, kMintCid);
3556 __ j(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
3557 __ CompareClassId(right, kMintCid);
3558 __ j(NOT_EQUAL, &done, Assembler::kFarJump);
3559 __ movq(left, FieldAddress(left, target::Mint::value_offset()));
3560 __ cmpq(left, FieldAddress(right, target::Mint::value_offset()));
3561 __ jmp(&done, Assembler::kFarJump);
3562
3563 __ Bind(&reference_compare);
3564 __ cmpq(left, right);
3565 __ Bind(&done);
3566}
3567
3568// Called only from unoptimized code. All relevant registers have been saved.
3569// TOS + 0: return address
3570// TOS + 1: right argument.
3571// TOS + 2: left argument.
3572// Returns ZF set.
3573void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
3574 Assembler* assembler) {
3575#if !defined(PRODUCT)
3576 // Check single stepping.
3577 Label stepping, done_stepping;
3578 __ LoadIsolate(RAX);
3579 __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
3580 __ cmpq(RAX, Immediate(0));
3581 __ j(NOT_EQUAL, &stepping);
3582 __ Bind(&done_stepping);
3583#endif
3584
3585 const Register left = RAX;
3586 const Register right = RDX;
3587
3588 __ movq(left, Address(RSP, 2 * target::kWordSize));
3589 __ movq(right, Address(RSP, 1 * target::kWordSize));
3590 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
3591 __ ret();
3592
3593#if !defined(PRODUCT)
3594 __ Bind(&stepping);
3595 __ EnterStubFrame();
3596 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3597 __ RestoreCodePointer();
3598 __ LeaveStubFrame();
3599 __ jmp(&done_stepping);
3600#endif
3601}
3602
3603// Called from optimized code only.
3604// TOS + 0: return address
3605// TOS + 1: right argument.
3606// TOS + 2: left argument.
3607// Returns ZF set.
3608void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
3609 Assembler* assembler) {
3610 const Register left = RAX;
3611 const Register right = RDX;
3612
3613 __ movq(left, Address(RSP, 2 * target::kWordSize));
3614 __ movq(right, Address(RSP, 1 * target::kWordSize));
3615 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
3616 __ ret();
3617}
3618
3619// Called from megamorphic calls.
3620// RDX: receiver (passed to target)
3621// RBX: target::MegamorphicCache (preserved)
3622// Passed to target:
3623// CODE_REG: target Code
3624// R10: arguments descriptor
3625void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
3626 // Jump if receiver is a smi.
3627 Label smi_case;
3628 __ testq(RDX, Immediate(kSmiTagMask));
3629 // Jump out of line for smi case.
3630 __ j(ZERO, &smi_case, Assembler::kNearJump);
3631
3632 // Loads the cid of the object.
3633 __ LoadClassId(RAX, RDX);
3634
3635 Label cid_loaded;
3636 __ Bind(&cid_loaded);
3637 __ movq(R9, FieldAddress(RBX, target::MegamorphicCache::mask_offset()));
3638 __ movq(RDI, FieldAddress(RBX, target::MegamorphicCache::buckets_offset()));
3639 // R9: mask as a smi.
3640 // RDI: cache buckets array.
3641
3642 // Tag cid as a smi.
3643 __ addq(RAX, RAX);
3644
3645 // Compute the table index.
3646 ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
3647 // Use leaq and subq multiply with 7 == 8 - 1.
3648 __ leaq(RCX, Address(RAX, TIMES_8, 0));
3649 __ subq(RCX, RAX);
3650
3651 Label loop;
3652 __ Bind(&loop);
3653 __ andq(RCX, R9);
3654
3655 const intptr_t base = target::Array::data_offset();
3656 // RCX is smi tagged, but table entries are two words, so TIMES_8.
3657 Label probe_failed;
3658 __ cmpq(RAX, FieldAddress(RDI, RCX, TIMES_8, base));
3659 __ j(NOT_EQUAL, &probe_failed, Assembler::kNearJump);
3660
3661 Label load_target;
3662 __ Bind(&load_target);
3663 // Call the target found in the cache. For a class id match, this is a
3664 // proper target for the given name and arguments descriptor. If the
3665 // illegal class id was found, the target is a cache miss handler that can
3666 // be invoked as a normal Dart function.
3667 const auto target_address =
3668 FieldAddress(RDI, RCX, TIMES_8, base + target::kWordSize);
3669 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
3670 __ movq(R10, FieldAddress(
3671 RBX, target::CallSiteData::arguments_descriptor_offset()));
3672 __ jmp(target_address);
3673 } else {
3674 __ movq(RAX, target_address);
3675 __ movq(R10, FieldAddress(
3676 RBX, target::CallSiteData::arguments_descriptor_offset()));
3677 __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
3678 __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
3679 __ jmp(RCX);
3680 }
3681
3682 // Probe failed, check if it is a miss.
3683 __ Bind(&probe_failed);
3684 __ cmpq(FieldAddress(RDI, RCX, TIMES_8, base),
3685 Immediate(target::ToRawSmi(kIllegalCid)));
3686 Label miss;
3687 __ j(ZERO, &miss, Assembler::kNearJump);
3688
3689 // Try next entry in the table.
3690 __ AddImmediate(RCX, Immediate(target::ToRawSmi(1)));
3691 __ jmp(&loop);
3692
3693 // Load cid for the Smi case.
3694 __ Bind(&smi_case);
3695 __ movq(RAX, Immediate(kSmiCid));
3696 __ jmp(&cid_loaded);
3697
3698 __ Bind(&miss);
3699 GenerateSwitchableCallMissStub(assembler);
3700}
3701
3702// Input:
3703// RBX - icdata
3704// RDX - receiver object
3705void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
3706 Label loop, found, miss;
3707 __ movq(R13, FieldAddress(RBX, target::ICData::entries_offset()));
3708 __ movq(R10, FieldAddress(
3709 RBX, target::CallSiteData::arguments_descriptor_offset()));
3710 __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
3711 // R13: first IC entry
3712 __ LoadTaggedClassIdMayBeSmi(RAX, RDX);
3713 // RAX: receiver cid as Smi
3714
3715 __ Bind(&loop);
3716 __ movq(R9, Address(R13, 0));
3717 __ cmpq(RAX, R9);
3718 __ j(EQUAL, &found, Assembler::kNearJump);
3719
3720 ASSERT(target::ToRawSmi(kIllegalCid) == 0);
3721 __ testq(R9, R9);
3722 __ j(ZERO, &miss, Assembler::kNearJump);
3723
3724 const intptr_t entry_length =
3725 target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
3726 target::kWordSize;
3727 __ addq(R13, Immediate(entry_length)); // Next entry.
3728 __ jmp(&loop);
3729
3730 __ Bind(&found);
3731 const intptr_t code_offset =
3732 target::ICData::CodeIndexFor(1) * target::kWordSize;
3733 const intptr_t entry_offset =
3734 target::ICData::EntryPointIndexFor(1) * target::kWordSize;
3735 if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
3736 __ movq(CODE_REG, Address(R13, code_offset));
3737 }
3738 __ jmp(Address(R13, entry_offset));
3739
3740 __ Bind(&miss);
3741 __ LoadIsolate(RAX);
3742 __ movq(CODE_REG, Address(RAX, target::Isolate::ic_miss_code_offset()));
3743 __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3744 __ jmp(RCX);
3745}
3746
3747void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
3748 Assembler* assembler) {
3749 Label have_cid, miss;
3750
3751 __ movq(RAX, Immediate(kSmiCid));
3752 __ movzxw(
3753 RCX,
3754 FieldAddress(RBX, target::MonomorphicSmiableCall::expected_cid_offset()));
3755 __ testq(RDX, Immediate(kSmiTagMask));
3756 __ j(ZERO, &have_cid, Assembler::kNearJump);
3757 __ LoadClassId(RAX, RDX);
3758 __ Bind(&have_cid);
3759 __ cmpq(RAX, RCX);
3760 __ j(NOT_EQUAL, &miss, Assembler::kNearJump);
3761 if (FLAG_use_bare_instructions) {
3762 __ jmp(
3763 FieldAddress(RBX, target::MonomorphicSmiableCall::entrypoint_offset()));
3764 } else {
3765 __ movq(CODE_REG,
3766 FieldAddress(RBX, target::MonomorphicSmiableCall::target_offset()));
3767 __ jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3768 }
3769
3770 __ Bind(&miss);
3771 __ jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
3772}
3773
3774// Called from switchable IC calls.
3775// RDX: receiver
3776void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
3777 __ movq(CODE_REG,
3778 Address(THR, target::Thread::switchable_call_miss_stub_offset()));
3779 __ EnterStubFrame();
3780 __ pushq(RDX); // Preserve receiver.
3781
3782 __ pushq(Immediate(0)); // Result slot.
3783 __ pushq(Immediate(0)); // Arg0: stub out.
3784 __ pushq(RDX); // Arg1: Receiver
3785 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3786 __ popq(RBX);
3787 __ popq(CODE_REG); // result = stub
3788 __ popq(RBX); // result = IC
3789
3790 __ popq(RDX); // Restore receiver.
3791 __ LeaveStubFrame();
3792
3793 __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
3794 CodeEntryKind::kNormal)));
3795 __ jmp(RCX);
3796}
3797
3798// Called from switchable IC calls.
3799// RDX: receiver
3800// RBX: SingleTargetCache
3801// Passed to target::
3802// CODE_REG: target Code object
3803void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
3804 Label miss;
3805 __ LoadClassIdMayBeSmi(RAX, RDX);
3806 __ movzxw(R9,
3807 FieldAddress(RBX, target::SingleTargetCache::lower_limit_offset()));
3808 __ movzxw(R10,
3809 FieldAddress(RBX, target::SingleTargetCache::upper_limit_offset()));
3810 __ cmpq(RAX, R9);
3811 __ j(LESS, &miss, Assembler::kNearJump);
3812 __ cmpq(RAX, R10);
3813 __ j(GREATER, &miss, Assembler::kNearJump);
3814 __ movq(RCX,
3815 FieldAddress(RBX, target::SingleTargetCache::entry_point_offset()));
3816 __ movq(CODE_REG,
3817 FieldAddress(RBX, target::SingleTargetCache::target_offset()));
3818 __ jmp(RCX);
3819
3820 __ Bind(&miss);
3821 __ EnterStubFrame();
3822 __ pushq(RDX); // Preserve receiver.
3823
3824 __ pushq(Immediate(0)); // Result slot.
3825 __ pushq(Immediate(0)); // Arg0: stub out
3826 __ pushq(RDX); // Arg1: Receiver
3827 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3828 __ popq(RBX);
3829 __ popq(CODE_REG); // result = stub
3830 __ popq(RBX); // result = IC
3831
3832 __ popq(RDX); // Restore receiver.
3833 __ LeaveStubFrame();
3834
3835 __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
3836 CodeEntryKind::kMonomorphic)));
3837 __ jmp(RCX);
3838}
3839
3840void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
3841 Assembler* assembler) {
3842 __ int3();
3843}
3844
3845void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
3846 __ int3();
3847}
3848
3849void StubCodeCompiler::GenerateNotLoadedStub(Assembler* assembler) {
3850 __ EnterStubFrame();
3851 __ CallRuntime(kNotLoadedRuntimeEntry, 0);
3852 __ int3();
3853}
3854
3855// Instantiate type arguments from instantiator and function type args.
3856// RBX: uninstantiated type arguments.
3857// RDX: instantiator type arguments.
3858// RCX: function type arguments.
3859// Returns instantiated type arguments in RAX.
3860void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub(
3861 Assembler* assembler) {
3862 // Lookup cache before calling runtime.
3863 __ movq(RAX, compiler::FieldAddress(
3864 InstantiationABI::kUninstantiatedTypeArgumentsReg,
3865 target::TypeArguments::instantiations_offset()));
3866 __ leaq(RAX, compiler::FieldAddress(RAX, Array::data_offset()));
3867
3868 // The instantiations cache is initialized with Object::zero_array() and is
3869 // therefore guaranteed to contain kNoInstantiator. No length check needed.
3870 compiler::Label loop, next, found, call_runtime;
3871 __ Bind(&loop);
3872
3873 // Use load-acquire to test for sentinel, if we found non-sentinel it is safe
3874 // to access the other entries. If we found a sentinel we go to runtime.
3875 __ LoadAcquire(RDI, RAX,
3876 TypeArguments::Instantiation::kInstantiatorTypeArgsIndex *
3877 target::kWordSize);
3878 __ CompareImmediate(RDI, Smi::RawValue(TypeArguments::kNoInstantiator));
3879 __ j(EQUAL, &call_runtime, compiler::Assembler::kNearJump);
3880
3881 __ cmpq(RDI, InstantiationABI::kInstantiatorTypeArgumentsReg);
3882 __ j(NOT_EQUAL, &next, compiler::Assembler::kNearJump);
3883 __ movq(R10, compiler::Address(
3884 RAX, TypeArguments::Instantiation::kFunctionTypeArgsIndex *
3885 target::kWordSize));
3886 __ cmpq(R10, InstantiationABI::kFunctionTypeArgumentsReg);
3887 __ j(EQUAL, &found, compiler::Assembler::kNearJump);
3888 __ Bind(&next);
3889 __ addq(RAX, compiler::Immediate(TypeArguments::Instantiation::kSizeInWords *
3890 target::kWordSize));
3891 __ jmp(&loop);
3892
3893 // Instantiate non-null type arguments.
3894 // A runtime call to instantiate the type arguments is required.
3895 __ Bind(&call_runtime);
3896 __ EnterStubFrame();
3897 __ PushObject(Object::null_object()); // Make room for the result.
3898 __ pushq(InstantiationABI::kUninstantiatedTypeArgumentsReg);
3899 __ pushq(InstantiationABI::kInstantiatorTypeArgumentsReg);
3900 __ pushq(InstantiationABI::kFunctionTypeArgumentsReg);
3901 __ CallRuntime(kInstantiateTypeArgumentsRuntimeEntry, 3);
3902 __ Drop(3); // Drop 2 type vectors, and uninstantiated type.
3903 __ popq(InstantiationABI::kResultTypeArgumentsReg);
3904 __ LeaveStubFrame();
3905 __ ret();
3906
3907 __ Bind(&found);
3908 __ movq(InstantiationABI::kResultTypeArgumentsReg,
3909 compiler::Address(
3910 RAX, TypeArguments::Instantiation::kInstantiatedTypeArgsIndex *
3911 target::kWordSize));
3912 __ ret();
3913}
3914
3915void StubCodeCompiler::
3916 GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub(
3917 Assembler* assembler) {
3918 // Return the instantiator type arguments if its nullability is compatible for
3919 // sharing, otherwise proceed to instantiation cache lookup.
3920 compiler::Label cache_lookup;
3921 __ movq(RAX, compiler::FieldAddress(
3922 InstantiationABI::kUninstantiatedTypeArgumentsReg,
3923 target::TypeArguments::nullability_offset()));
3924 __ movq(RDI, compiler::FieldAddress(
3925 InstantiationABI::kInstantiatorTypeArgumentsReg,
3926 target::TypeArguments::nullability_offset()));
3927 __ andq(RDI, RAX);
3928 __ cmpq(RDI, RAX);
3929 __ j(NOT_EQUAL, &cache_lookup, compiler::Assembler::kNearJump);
3930 __ movq(InstantiationABI::kResultTypeArgumentsReg,
3931 InstantiationABI::kInstantiatorTypeArgumentsReg);
3932 __ ret();
3933
3934 __ Bind(&cache_lookup);
3935 GenerateInstantiateTypeArgumentsStub(assembler);
3936}
3937
3938void StubCodeCompiler::GenerateInstantiateTypeArgumentsMayShareFunctionTAStub(
3939 Assembler* assembler) {
3940 // Return the function type arguments if its nullability is compatible for
3941 // sharing, otherwise proceed to instantiation cache lookup.
3942 compiler::Label cache_lookup;
3943 __ movq(RAX, compiler::FieldAddress(
3944 InstantiationABI::kUninstantiatedTypeArgumentsReg,
3945 target::TypeArguments::nullability_offset()));
3946 __ movq(RDI,
3947 compiler::FieldAddress(InstantiationABI::kFunctionTypeArgumentsReg,
3948 target::TypeArguments::nullability_offset()));
3949 __ andq(RDI, RAX);
3950 __ cmpq(RDI, RAX);
3951 __ j(NOT_EQUAL, &cache_lookup, compiler::Assembler::kNearJump);
3952 __ movq(InstantiationABI::kResultTypeArgumentsReg,
3953 InstantiationABI::kFunctionTypeArgumentsReg);
3954 __ ret();
3955
3956 __ Bind(&cache_lookup);
3957 GenerateInstantiateTypeArgumentsStub(assembler);
3958}
3959
3960} // namespace compiler
3961
3962} // namespace dart
3963
3964#endif // defined(TARGET_ARCH_X64)
3965