1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/compiler/runtime_api.h"
6#include "vm/globals.h"
7
8// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
9// For `GenericCheckBoundInstr::UseUnboxedRepresentation`
10#include "vm/compiler/backend/il.h"
11
12#define SHOULD_NOT_INCLUDE_RUNTIME
13
14#include "vm/compiler/stub_code_compiler.h"
15
16#if defined(TARGET_ARCH_ARM)
17
18#include "vm/class_id.h"
19#include "vm/code_entry_kind.h"
20#include "vm/compiler/api/type_check_mode.h"
21#include "vm/compiler/assembler/assembler.h"
22#include "vm/compiler/backend/locations.h"
23#include "vm/constants.h"
24#include "vm/instructions.h"
25#include "vm/static_type_exactness_state.h"
26#include "vm/tags.h"
27
28#define __ assembler->
29
30namespace dart {
31
32DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects.");
33DEFINE_FLAG(bool,
34 use_slow_path,
35 false,
36 "Set to true for debugging & verifying the slow paths.");
37DECLARE_FLAG(bool, precompiled_mode);
38
39namespace compiler {
40
41// Ensures that [R0] is a new object, if not it will be added to the remembered
42// set via a leaf runtime call.
43//
44// WARNING: This might clobber all registers except for [R0], [THR] and [FP].
45// The caller should simply call LeaveStubFrame() and return.
46static void EnsureIsNewOrRemembered(Assembler* assembler,
47 bool preserve_registers = true) {
48 // If the object is not remembered we call a leaf-runtime to add it to the
49 // remembered set.
50 Label done;
51 __ tst(R0, Operand(1 << target::ObjectAlignment::kNewObjectBitPosition));
52 __ BranchIf(NOT_ZERO, &done);
53
54 if (preserve_registers) {
55 __ EnterCallRuntimeFrame(0);
56 } else {
57 __ ReserveAlignedFrameSpace(0);
58 }
59 // [R0] already contains first argument.
60 __ mov(R1, Operand(THR));
61 __ CallRuntime(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
62 if (preserve_registers) {
63 __ LeaveCallRuntimeFrame();
64 }
65
66 __ Bind(&done);
67}
68
69// Input parameters:
70// LR : return address.
71// SP : address of last argument in argument array.
72// SP + 4*R4 - 4 : address of first argument in argument array.
73// SP + 4*R4 : address of return value.
74// R9 : address of the runtime function to call.
75// R4 : number of arguments to the call.
76void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
77 const intptr_t thread_offset = target::NativeArguments::thread_offset();
78 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
79 const intptr_t argv_offset = target::NativeArguments::argv_offset();
80 const intptr_t retval_offset = target::NativeArguments::retval_offset();
81
82 __ ldr(CODE_REG, Address(THR, target::Thread::call_to_runtime_stub_offset()));
83 __ EnterStubFrame();
84
85 // Save exit frame information to enable stack walking as we are about
86 // to transition to Dart VM C++ code.
87 __ StoreToOffset(kWord, FP, THR,
88 target::Thread::top_exit_frame_info_offset());
89
90 // Mark that the thread exited generated code through a runtime call.
91 __ LoadImmediate(R8, target::Thread::exit_through_runtime_call());
92 __ StoreToOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
93
94#if defined(DEBUG)
95 {
96 Label ok;
97 // Check that we are always entering from Dart code.
98 __ LoadFromOffset(kWord, R8, THR, target::Thread::vm_tag_offset());
99 __ CompareImmediate(R8, VMTag::kDartCompiledTagId);
100 __ b(&ok, EQ);
101 __ Stop("Not coming from Dart code.");
102 __ Bind(&ok);
103 }
104#endif
105
106 // Mark that the thread is executing VM code.
107 __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
108
109 // Reserve space for arguments and align frame before entering C++ world.
110 // target::NativeArguments are passed in registers.
111 ASSERT(target::NativeArguments::StructSize() == 4 * target::kWordSize);
112 __ ReserveAlignedFrameSpace(0);
113
114 // Pass target::NativeArguments structure by value and call runtime.
115 // Registers R0, R1, R2, and R3 are used.
116
117 ASSERT(thread_offset == 0 * target::kWordSize);
118 // Set thread in NativeArgs.
119 __ mov(R0, Operand(THR));
120
121 // There are no runtime calls to closures, so we do not need to set the tag
122 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
123 ASSERT(argc_tag_offset == 1 * target::kWordSize);
124 __ mov(R1, Operand(R4)); // Set argc in target::NativeArguments.
125
126 ASSERT(argv_offset == 2 * target::kWordSize);
127 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv.
128 // Set argv in target::NativeArguments.
129 __ AddImmediate(R2,
130 target::frame_layout.param_end_from_fp * target::kWordSize);
131
132 ASSERT(retval_offset == 3 * target::kWordSize);
133 __ add(R3, R2,
134 Operand(target::kWordSize)); // Retval is next to 1st argument.
135
136 // Call runtime or redirection via simulator.
137 __ blx(R9);
138
139 // Mark that the thread is executing Dart code.
140 __ LoadImmediate(R2, VMTag::kDartCompiledTagId);
141 __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
142
143 // Mark that the thread has not exited generated Dart code.
144 __ LoadImmediate(R2, 0);
145 __ StoreToOffset(kWord, R2, THR, target::Thread::exit_through_ffi_offset());
146
147 // Reset exit frame information in Isolate's mutator thread structure.
148 __ StoreToOffset(kWord, R2, THR,
149 target::Thread::top_exit_frame_info_offset());
150
151 // Restore the global object pool after returning from runtime (old space is
152 // moving, so the GOP could have been relocated).
153 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
154 __ SetupGlobalPoolAndDispatchTable();
155 }
156
157 __ LeaveStubFrame();
158
159 // The following return can jump to a lazy-deopt stub, which assumes R0
160 // contains a return value and will save it in a GC-visible way. We therefore
161 // have to ensure R0 does not contain any garbage value left from the C
162 // function we called (which has return type "void").
163 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
164 __ LoadImmediate(R0, 0);
165 __ Ret();
166}
167
168void GenerateSharedStubGeneric(Assembler* assembler,
169 bool save_fpu_registers,
170 intptr_t self_code_stub_offset_from_thread,
171 bool allow_return,
172 std::function<void()> perform_runtime_call) {
173 // If the target CPU does not support VFP the caller should always use the
174 // non-FPU stub.
175 if (save_fpu_registers && !TargetCPUFeatures::vfp_supported()) {
176 __ Breakpoint();
177 return;
178 }
179
180 // We want the saved registers to appear like part of the caller's frame, so
181 // we push them before calling EnterStubFrame.
182 RegisterSet all_registers;
183 all_registers.AddAllNonReservedRegisters(save_fpu_registers);
184
185 // To make the stack map calculation architecture independent we do the same
186 // as on intel.
187 __ Push(LR);
188 __ PushRegisters(all_registers);
189 __ ldr(CODE_REG, Address(THR, self_code_stub_offset_from_thread));
190 __ EnterStubFrame();
191 perform_runtime_call();
192 if (!allow_return) {
193 __ Breakpoint();
194 return;
195 }
196 __ LeaveStubFrame();
197 __ PopRegisters(all_registers);
198 __ Drop(1); // We use the LR restored via LeaveStubFrame.
199 __ bx(LR);
200}
201
202static void GenerateSharedStub(Assembler* assembler,
203 bool save_fpu_registers,
204 const RuntimeEntry* target,
205 intptr_t self_code_stub_offset_from_thread,
206 bool allow_return,
207 bool store_runtime_result_in_r0 = false) {
208 ASSERT(!store_runtime_result_in_r0 || allow_return);
209 auto perform_runtime_call = [&]() {
210 if (store_runtime_result_in_r0) {
211 __ PushRegister(LR);
212 }
213 __ CallRuntime(*target, /*argument_count=*/0);
214 if (store_runtime_result_in_r0) {
215 __ PopRegister(R0);
216 __ str(
217 R0,
218 Address(FP, target::kWordSize *
219 StubCodeCompiler::WordOffsetFromFpToCpuRegister(R0)));
220 }
221 };
222 GenerateSharedStubGeneric(assembler, save_fpu_registers,
223 self_code_stub_offset_from_thread, allow_return,
224 perform_runtime_call);
225}
226
227// R1: The extracted method.
228// R4: The type_arguments_field_offset (or 0)
229// SP+0: The object from which we are tearing a method off.
230void StubCodeCompiler::GenerateBuildMethodExtractorStub(
231 Assembler* assembler,
232 const Object& closure_allocation_stub,
233 const Object& context_allocation_stub) {
234 const intptr_t kReceiverOffset = target::frame_layout.param_end_from_fp + 1;
235
236 __ EnterStubFrame();
237
238 // Build type_arguments vector (or null)
239 __ cmp(R4, Operand(0));
240 __ ldr(R3, Address(THR, target::Thread::object_null_offset()), EQ);
241 __ ldr(R0, Address(FP, kReceiverOffset * target::kWordSize), NE);
242 __ ldr(R3, Address(R0, R4), NE);
243
244 // Push type arguments & extracted method.
245 __ PushList(1 << R3 | 1 << R1);
246
247 // Allocate context.
248 {
249 Label done, slow_path;
250 __ TryAllocateArray(kContextCid, target::Context::InstanceSize(1),
251 &slow_path,
252 R0, // instance
253 R1, // end address
254 R2, R3);
255 __ ldr(R1, Address(THR, target::Thread::object_null_offset()));
256 __ str(R1, FieldAddress(R0, target::Context::parent_offset()));
257 __ LoadImmediate(R1, 1);
258 __ str(R1, FieldAddress(R0, target::Context::num_variables_offset()));
259 __ b(&done);
260
261 __ Bind(&slow_path);
262
263 __ LoadImmediate(/*num_vars=*/R1, 1);
264 __ LoadObject(CODE_REG, context_allocation_stub);
265 __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
266 __ blx(R0);
267
268 __ Bind(&done);
269 }
270
271 // Store receiver in context
272 __ ldr(R1, Address(FP, target::kWordSize * kReceiverOffset));
273 __ StoreIntoObject(R0, FieldAddress(R0, target::Context::variable_offset(0)),
274 R1);
275
276 // Push context.
277 __ Push(R0);
278
279 // Allocate closure.
280 __ LoadObject(CODE_REG, closure_allocation_stub);
281 __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset(
282 CodeEntryKind::kUnchecked)));
283 __ blx(R1);
284
285 // Populate closure object.
286 __ Pop(R1); // Pop context.
287 __ StoreIntoObject(R0, FieldAddress(R0, target::Closure::context_offset()),
288 R1);
289 __ PopList(1 << R3 | 1 << R1); // Pop type arguments & extracted method.
290 __ StoreIntoObjectNoBarrier(
291 R0, FieldAddress(R0, target::Closure::function_offset()), R1);
292 __ StoreIntoObjectNoBarrier(
293 R0,
294 FieldAddress(R0, target::Closure::instantiator_type_arguments_offset()),
295 R3);
296 __ LoadObject(R1, EmptyTypeArguments());
297 __ StoreIntoObjectNoBarrier(
298 R0, FieldAddress(R0, target::Closure::delayed_type_arguments_offset()),
299 R1);
300
301 __ LeaveStubFrame();
302 __ Ret();
303}
304
305void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
306 RegisterSet all_registers;
307 all_registers.AddAllGeneralRegisters();
308 __ PushRegisters(all_registers);
309
310 __ EnterFrame((1 << FP) | (1 << LR), 0);
311 __ ReserveAlignedFrameSpace(0);
312 __ ldr(R0, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
313 __ blx(R0);
314 __ LeaveFrame((1 << FP) | (1 << LR), 0);
315
316 __ PopRegisters(all_registers);
317 __ Ret();
318}
319
320void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
321 RegisterSet all_registers;
322 all_registers.AddAllGeneralRegisters();
323 __ PushRegisters(all_registers);
324
325 __ EnterFrame((1 << FP) | (1 << LR), 0);
326 __ ReserveAlignedFrameSpace(0);
327
328 // Set the execution state to VM while waiting for the safepoint to end.
329 // This isn't strictly necessary but enables tests to check that we're not
330 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
331 __ LoadImmediate(R0, target::Thread::vm_execution_state());
332 __ str(R0, Address(THR, target::Thread::execution_state_offset()));
333
334 __ ldr(R0, Address(THR, kExitSafepointRuntimeEntry.OffsetFromThread()));
335 __ blx(R0);
336 __ LeaveFrame((1 << FP) | (1 << LR), 0);
337
338 __ PopRegisters(all_registers);
339 __ Ret();
340}
341
342// Call a native function within a safepoint.
343//
344// On entry:
345// Stack: set up for call, incl. alignment
346// R8: target to call
347//
348// On exit:
349// Stack: preserved
350// NOTFP, R4: clobbered, although normally callee-saved
351void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
352 Assembler* assembler) {
353 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R4)) != 0);
354
355 // TransitionGeneratedToNative might clobber LR if it takes the slow path.
356 __ mov(R4, Operand(LR));
357
358 __ LoadImmediate(R9, target::Thread::exit_through_ffi());
359 __ TransitionGeneratedToNative(R8, FPREG, R9 /*volatile*/, NOTFP,
360 /*enter_safepoint=*/true);
361
362 __ blx(R8);
363
364 __ TransitionNativeToGenerated(R9 /*volatile*/, NOTFP,
365 /*exit_safepoint=*/true);
366
367 __ bx(R4);
368}
369
370#if !defined(DART_PRECOMPILER)
371void StubCodeCompiler::GenerateJITCallbackTrampolines(
372 Assembler* assembler,
373 intptr_t next_callback_id) {
374#if defined(USING_SIMULATOR)
375 // TODO(37299): FFI is not support in SIMARM.
376 __ Breakpoint();
377#else
378 Label done;
379
380 // TMP is volatile and not used for passing any arguments.
381 COMPILE_ASSERT(!IsCalleeSavedRegister(TMP) && !IsArgumentRegister(TMP));
382
383 for (intptr_t i = 0;
384 i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) {
385 // We don't use LoadImmediate because we need the trampoline size to be
386 // fixed independently of the callback ID.
387 //
388 // PC points two instructions ahead of the current one -- directly where we
389 // store the callback ID.
390 __ ldr(TMP, Address(PC, 0));
391 __ b(&done);
392 __ Emit(next_callback_id + i);
393 }
394
395 ASSERT(__ CodeSize() ==
396 kNativeCallbackTrampolineSize *
397 NativeCallbackTrampolines::NumCallbackTrampolinesPerPage());
398
399 __ Bind(&done);
400
401 const intptr_t shared_stub_start = __ CodeSize();
402
403 // Save THR (callee-saved), R4 & R5 (temporaries, callee-saved), and LR.
404 COMPILE_ASSERT(StubCodeCompiler::kNativeCallbackTrampolineStackDelta == 4);
405 __ PushList((1 << LR) | (1 << THR) | (1 << R4) | (1 << R5));
406
407 // Don't rely on TMP being preserved by assembler macros anymore.
408 __ mov(R4, Operand(TMP));
409
410 COMPILE_ASSERT(IsCalleeSavedRegister(R4));
411 COMPILE_ASSERT(!IsArgumentRegister(THR));
412
413 RegisterSet argument_registers;
414 argument_registers.AddAllArgumentRegisters();
415 __ PushRegisters(argument_registers);
416
417 // Load the thread, verify the callback ID and exit the safepoint.
418 //
419 // We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline
420 // in order to safe code size on this shared stub.
421 {
422 __ EnterFrame(1 << FP, 0);
423 __ ReserveAlignedFrameSpace(0);
424
425 __ mov(R0, Operand(R4));
426
427 // Since DLRT_GetThreadForNativeCallbackTrampoline can theoretically be
428 // loaded anywhere, we use the same trick as before to ensure a predictable
429 // instruction sequence.
430 Label call;
431 __ ldr(R1, Address(PC, 0));
432 __ b(&call);
433 __ Emit(
434 reinterpret_cast<intptr_t>(&DLRT_GetThreadForNativeCallbackTrampoline));
435
436 __ Bind(&call);
437 __ blx(R1);
438 __ mov(THR, Operand(R0));
439
440 __ LeaveFrame(1 << FP);
441 }
442
443 __ PopRegisters(argument_registers);
444
445 COMPILE_ASSERT(!IsArgumentRegister(R8));
446
447 // Load the code object.
448 __ LoadFromOffset(kWord, R5, THR,
449 compiler::target::Thread::callback_code_offset());
450 __ LoadFieldFromOffset(kWord, R5, R5,
451 compiler::target::GrowableObjectArray::data_offset());
452 __ ldr(R5, __ ElementAddressForRegIndex(
453 /*is_load=*/true,
454 /*external=*/false,
455 /*array_cid=*/kArrayCid,
456 /*index_scale, smi-tagged=*/compiler::target::kWordSize * 2,
457 /*index_unboxed=*/false,
458 /*array=*/R5,
459 /*index=*/R4));
460 __ LoadFieldFromOffset(kWord, R5, R5,
461 compiler::target::Code::entry_point_offset());
462
463 // On entry to the function, there will be four extra slots on the stack:
464 // saved THR, R4, R5 and the return address. The target will know to skip
465 // them.
466 __ blx(R5);
467
468 // EnterSafepoint clobbers R4, R5 and TMP, all saved or volatile.
469 __ EnterSafepoint(R4, R5);
470
471 // Returns.
472 __ PopList((1 << PC) | (1 << THR) | (1 << R4) | (1 << R5));
473
474 ASSERT((__ CodeSize() - shared_stub_start) == kNativeCallbackSharedStubSize);
475 ASSERT(__ CodeSize() <= VirtualMemory::PageSize());
476
477#if defined(DEBUG)
478 while (__ CodeSize() < VirtualMemory::PageSize()) {
479 __ Breakpoint();
480 }
481#endif
482#endif
483}
484#endif // !defined(DART_PRECOMPILER)
485
486void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
487 Assembler* assembler) {
488 __ EnterStubFrame();
489 __ CallRuntime(kNullErrorRuntimeEntry, /*argument_count=*/0);
490 // The NullError runtime entry does not return.
491 __ Breakpoint();
492}
493
494void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
495 Assembler* assembler) {
496 GenerateSharedStub(
497 assembler, /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
498 target::Thread::null_error_shared_without_fpu_regs_stub_offset(),
499 /*allow_return=*/false);
500}
501
502void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
503 Assembler* assembler) {
504 GenerateSharedStub(
505 assembler, /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
506 target::Thread::null_error_shared_with_fpu_regs_stub_offset(),
507 /*allow_return=*/false);
508}
509
510void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub(
511 Assembler* assembler) {
512 GenerateSharedStub(
513 assembler, /*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
514 target::Thread::null_arg_error_shared_without_fpu_regs_stub_offset(),
515 /*allow_return=*/false);
516}
517
518void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub(
519 Assembler* assembler) {
520 GenerateSharedStub(
521 assembler, /*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
522 target::Thread::null_arg_error_shared_with_fpu_regs_stub_offset(),
523 /*allow_return=*/false);
524}
525
526void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub(
527 Assembler* assembler) {
528 GenerateSharedStub(
529 assembler, /*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
530 target::Thread::null_cast_error_shared_without_fpu_regs_stub_offset(),
531 /*allow_return=*/false);
532}
533
534void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub(
535 Assembler* assembler) {
536 GenerateSharedStub(
537 assembler, /*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
538 target::Thread::null_cast_error_shared_with_fpu_regs_stub_offset(),
539 /*allow_return=*/false);
540}
541
542static void GenerateRangeError(Assembler* assembler, bool with_fpu_regs) {
543 auto perform_runtime_call = [&]() {
544 ASSERT(!GenericCheckBoundInstr::UseUnboxedRepresentation());
545 __ PushRegister(RangeErrorABI::kLengthReg);
546 __ PushRegister(RangeErrorABI::kIndexReg);
547 __ CallRuntime(kRangeErrorRuntimeEntry, /*argument_count=*/2);
548 __ Breakpoint();
549 };
550
551 GenerateSharedStubGeneric(
552 assembler, /*save_fpu_registers=*/with_fpu_regs,
553 with_fpu_regs
554 ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
555 : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
556 /*allow_return=*/false, perform_runtime_call);
557}
558
559void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub(
560 Assembler* assembler) {
561 GenerateRangeError(assembler, /*with_fpu_regs=*/false);
562}
563
564void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub(
565 Assembler* assembler) {
566 GenerateRangeError(assembler, /*with_fpu_regs=*/true);
567}
568
569void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
570 Assembler* assembler) {
571 GenerateSharedStub(
572 assembler, /*save_fpu_registers=*/false, &kStackOverflowRuntimeEntry,
573 target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
574 /*allow_return=*/true);
575}
576
577void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
578 Assembler* assembler) {
579 GenerateSharedStub(
580 assembler, /*save_fpu_registers=*/true, &kStackOverflowRuntimeEntry,
581 target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
582 /*allow_return=*/true);
583}
584
585// Input parameters:
586// LR : return address.
587// SP : address of return value.
588// R9 : address of the native function to call.
589// R2 : address of first argument in argument array.
590// R1 : argc_tag including number of arguments and function kind.
591static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
592 Address wrapper) {
593 const intptr_t thread_offset = target::NativeArguments::thread_offset();
594 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
595 const intptr_t argv_offset = target::NativeArguments::argv_offset();
596 const intptr_t retval_offset = target::NativeArguments::retval_offset();
597
598 __ EnterStubFrame();
599
600 // Save exit frame information to enable stack walking as we are about
601 // to transition to native code.
602 __ StoreToOffset(kWord, FP, THR,
603 target::Thread::top_exit_frame_info_offset());
604
605 // Mark that the thread exited generated code through a runtime call.
606 __ LoadImmediate(R8, target::Thread::exit_through_runtime_call());
607 __ StoreToOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
608
609#if defined(DEBUG)
610 {
611 Label ok;
612 // Check that we are always entering from Dart code.
613 __ LoadFromOffset(kWord, R8, THR, target::Thread::vm_tag_offset());
614 __ CompareImmediate(R8, VMTag::kDartCompiledTagId);
615 __ b(&ok, EQ);
616 __ Stop("Not coming from Dart code.");
617 __ Bind(&ok);
618 }
619#endif
620
621 // Mark that the thread is executing native code.
622 __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
623
624 // Reserve space for the native arguments structure passed on the stack (the
625 // outgoing pointer parameter to the native arguments structure is passed in
626 // R0) and align frame before entering the C++ world.
627 __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
628
629 // Initialize target::NativeArguments structure and call native function.
630 // Registers R0, R1, R2, and R3 are used.
631
632 ASSERT(thread_offset == 0 * target::kWordSize);
633 // Set thread in NativeArgs.
634 __ mov(R0, Operand(THR));
635
636 // There are no native calls to closures, so we do not need to set the tag
637 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
638 ASSERT(argc_tag_offset == 1 * target::kWordSize);
639 // Set argc in target::NativeArguments: R1 already contains argc.
640
641 ASSERT(argv_offset == 2 * target::kWordSize);
642 // Set argv in target::NativeArguments: R2 already contains argv.
643
644 // Set retval in NativeArgs.
645 ASSERT(retval_offset == 3 * target::kWordSize);
646 __ add(R3, FP, Operand(2 * target::kWordSize));
647
648 // Passing the structure by value as in runtime calls would require changing
649 // Dart API for native functions.
650 // For now, space is reserved on the stack and we pass a pointer to it.
651 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
652 __ mov(R0, Operand(SP)); // Pass the pointer to the target::NativeArguments.
653
654 __ mov(R1, Operand(R9)); // Pass the function entrypoint to call.
655
656 // Call native function invocation wrapper or redirection via simulator.
657 __ ldr(LR, wrapper);
658 __ blx(LR);
659
660 // Mark that the thread is executing Dart code.
661 __ LoadImmediate(R2, VMTag::kDartCompiledTagId);
662 __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
663
664 // Mark that the thread has not exited generated Dart code.
665 __ LoadImmediate(R2, 0);
666 __ StoreToOffset(kWord, R2, THR, target::Thread::exit_through_ffi_offset());
667
668 // Reset exit frame information in Isolate's mutator thread structure.
669 __ StoreToOffset(kWord, R2, THR,
670 target::Thread::top_exit_frame_info_offset());
671
672 // Restore the global object pool after returning from runtime (old space is
673 // moving, so the GOP could have been relocated).
674 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
675 __ SetupGlobalPoolAndDispatchTable();
676 }
677
678 __ LeaveStubFrame();
679 __ Ret();
680}
681
682void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
683 GenerateCallNativeWithWrapperStub(
684 assembler,
685 Address(THR,
686 target::Thread::no_scope_native_wrapper_entry_point_offset()));
687}
688
689void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
690 GenerateCallNativeWithWrapperStub(
691 assembler,
692 Address(THR,
693 target::Thread::auto_scope_native_wrapper_entry_point_offset()));
694}
695
696// Input parameters:
697// LR : return address.
698// SP : address of return value.
699// R9 : address of the native function to call.
700// R2 : address of first argument in argument array.
701// R1 : argc_tag including number of arguments and function kind.
702void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
703 GenerateCallNativeWithWrapperStub(
704 assembler,
705 Address(THR,
706 target::Thread::bootstrap_native_wrapper_entry_point_offset()));
707}
708
709// Input parameters:
710// R4: arguments descriptor array.
711void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
712 // Create a stub frame as we are pushing some objects on the stack before
713 // calling into the runtime.
714 __ EnterStubFrame();
715 // Setup space on stack for return value and preserve arguments descriptor.
716 __ LoadImmediate(R0, 0);
717 __ PushList((1 << R0) | (1 << R4));
718 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
719 // Get Code object result and restore arguments descriptor array.
720 __ PopList((1 << R0) | (1 << R4));
721 // Remove the stub frame.
722 __ LeaveStubFrame();
723 // Jump to the dart function.
724 __ mov(CODE_REG, Operand(R0));
725 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
726}
727
728// Called from a static call only when an invalid code has been entered
729// (invalid because its function was optimized or deoptimized).
730// R4: arguments descriptor array.
731void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
732 Label monomorphic;
733 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
734
735 // Load code pointer to this stub from the thread:
736 // The one that is passed in, is not correct - it points to the code object
737 // that needs to be replaced.
738 __ ldr(CODE_REG,
739 Address(THR, target::Thread::fix_callers_target_code_offset()));
740 // Create a stub frame as we are pushing some objects on the stack before
741 // calling into the runtime.
742 __ EnterStubFrame();
743 // Setup space on stack for return value and preserve arguments descriptor.
744 __ LoadImmediate(R0, 0);
745 __ PushList((1 << R0) | (1 << R4));
746 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
747 // Get Code object result and restore arguments descriptor array.
748 __ PopList((1 << R0) | (1 << R4));
749 // Remove the stub frame.
750 __ LeaveStubFrame();
751 // Jump to the dart function.
752 __ mov(CODE_REG, Operand(R0));
753 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
754
755 __ Bind(&monomorphic);
756 // Load code pointer to this stub from the thread:
757 // The one that is passed in, is not correct - it points to the code object
758 // that needs to be replaced.
759 __ ldr(CODE_REG,
760 Address(THR, target::Thread::fix_callers_target_code_offset()));
761 // Create a stub frame as we are pushing some objects on the stack before
762 // calling into the runtime.
763 __ EnterStubFrame();
764 __ LoadImmediate(R1, 0);
765 __ Push(R9); // Preserve cache (guarded CID as Smi).
766 __ Push(R0); // Preserve receiver.
767 __ Push(R1);
768 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 0);
769 __ Pop(CODE_REG);
770 __ Pop(R0); // Restore receiver.
771 __ Pop(R9); // Restore cache (guarded CID as Smi).
772 // Remove the stub frame.
773 __ LeaveStubFrame();
774 // Jump to the dart function.
775 __ Branch(FieldAddress(
776 CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kMonomorphic)));
777}
778
779// Called from object allocate instruction when the allocation stub has been
780// disabled.
781void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
782 Assembler* assembler) {
783 // Load code pointer to this stub from the thread:
784 // The one that is passed in, is not correct - it points to the code object
785 // that needs to be replaced.
786 __ ldr(CODE_REG,
787 Address(THR, target::Thread::fix_allocation_stub_code_offset()));
788 __ EnterStubFrame();
789 // Setup space on stack for return value.
790 __ LoadImmediate(R0, 0);
791 __ Push(R0);
792 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
793 // Get Code object result.
794 __ Pop(R0);
795 // Remove the stub frame.
796 __ LeaveStubFrame();
797 // Jump to the dart function.
798 __ mov(CODE_REG, Operand(R0));
799 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
800}
801
802// Input parameters:
803// R2: smi-tagged argument count, may be zero.
804// FP[target::frame_layout.param_end_from_fp + 1]: last argument.
805static void PushArrayOfArguments(Assembler* assembler) {
806 // Allocate array to store arguments of caller.
807 __ LoadObject(R1, NullObject());
808 // R1: null element type for raw Array.
809 // R2: smi-tagged argument count, may be zero.
810 __ BranchLink(StubCodeAllocateArray());
811 // R0: newly allocated array.
812 // R2: smi-tagged argument count, may be zero (was preserved by the stub).
813 __ Push(R0); // Array is in R0 and on top of stack.
814 __ AddImmediate(R1, FP,
815 target::frame_layout.param_end_from_fp * target::kWordSize);
816 __ AddImmediate(R3, R0, target::Array::data_offset() - kHeapObjectTag);
817 // Copy arguments from stack to array (starting at the end).
818 // R1: address just beyond last argument on stack.
819 // R3: address of first argument in array.
820 Label enter;
821 __ b(&enter);
822 Label loop;
823 __ Bind(&loop);
824 __ ldr(R8, Address(R1, target::kWordSize, Address::PreIndex));
825 // Generational barrier is needed, array is not necessarily in new space.
826 __ StoreIntoObject(R0, Address(R3, R2, LSL, 1), R8);
827 __ Bind(&enter);
828 __ subs(R2, R2, Operand(target::ToRawSmi(1))); // R2 is Smi.
829 __ b(&loop, PL);
830}
831
832// Used by eager and lazy deoptimization. Preserve result in R0 if necessary.
833// This stub translates optimized frame into unoptimized frame. The optimized
834// frame can contain values in registers and on stack, the unoptimized
835// frame contains all values on stack.
836// Deoptimization occurs in following steps:
837// - Push all registers that can contain values.
838// - Call C routine to copy the stack and saved registers into temporary buffer.
839// - Adjust caller's frame to correct unoptimized frame size.
840// - Fill the unoptimized frame.
841// - Materialize objects that require allocation (e.g. Double instances).
842// GC can occur only after frame is fully rewritten.
843// Stack after EnterFrame(...) below:
844// +------------------+
845// | Saved PP | <- TOS
846// +------------------+
847// | Saved FP | <- FP of stub
848// +------------------+
849// | Saved LR | (deoptimization point)
850// +------------------+
851// | pc marker |
852// +------------------+
853// | Saved CODE_REG |
854// +------------------+
855// | ... | <- SP of optimized frame
856//
857// Parts of the code cannot GC, part of the code can GC.
858static void GenerateDeoptimizationSequence(Assembler* assembler,
859 DeoptStubKind kind) {
860 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
861 // is no need to set the correct PC marker or load PP, since they get patched.
862 __ EnterDartFrame(0);
863 __ LoadPoolPointer();
864
865 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
866 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
867 const intptr_t saved_result_slot_from_fp =
868 target::frame_layout.first_local_from_fp + 1 -
869 (kNumberOfCpuRegisters - R0);
870 const intptr_t saved_exception_slot_from_fp =
871 target::frame_layout.first_local_from_fp + 1 -
872 (kNumberOfCpuRegisters - R0);
873 const intptr_t saved_stacktrace_slot_from_fp =
874 target::frame_layout.first_local_from_fp + 1 -
875 (kNumberOfCpuRegisters - R1);
876 // Result in R0 is preserved as part of pushing all registers below.
877
878 // Push registers in their enumeration order: lowest register number at
879 // lowest address.
880 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
881 if (i == CODE_REG) {
882 // Save the original value of CODE_REG pushed before invoking this stub
883 // instead of the value used to call this stub.
884 __ ldr(IP, Address(FP, 2 * target::kWordSize));
885 __ Push(IP);
886 } else if (i == SP) {
887 // Push(SP) has unpredictable behavior.
888 __ mov(IP, Operand(SP));
889 __ Push(IP);
890 } else {
891 __ Push(static_cast<Register>(i));
892 }
893 }
894
895 if (TargetCPUFeatures::vfp_supported()) {
896 ASSERT(kFpuRegisterSize == 4 * target::kWordSize);
897 if (kNumberOfDRegisters > 16) {
898 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16);
899 __ vstmd(DB_W, SP, D0, 16);
900 } else {
901 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters);
902 }
903 } else {
904 __ AddImmediate(SP, -kNumberOfFpuRegisters * kFpuRegisterSize);
905 }
906
907 __ mov(R0, Operand(SP)); // Pass address of saved registers block.
908 bool is_lazy =
909 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
910 __ mov(R1, Operand(is_lazy ? 1 : 0));
911 __ ReserveAlignedFrameSpace(0);
912 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
913 // Result (R0) is stack-size (FP - SP) in bytes.
914
915 if (kind == kLazyDeoptFromReturn) {
916 // Restore result into R1 temporarily.
917 __ ldr(R1, Address(FP, saved_result_slot_from_fp * target::kWordSize));
918 } else if (kind == kLazyDeoptFromThrow) {
919 // Restore result into R1 temporarily.
920 __ ldr(R1, Address(FP, saved_exception_slot_from_fp * target::kWordSize));
921 __ ldr(R2, Address(FP, saved_stacktrace_slot_from_fp * target::kWordSize));
922 }
923
924 __ RestoreCodePointer();
925 __ LeaveDartFrame();
926 __ sub(SP, FP, Operand(R0));
927
928 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
929 // is no need to set the correct PC marker or load PP, since they get patched.
930 __ EnterStubFrame();
931 __ mov(R0, Operand(FP)); // Get last FP address.
932 if (kind == kLazyDeoptFromReturn) {
933 __ Push(R1); // Preserve result as first local.
934 } else if (kind == kLazyDeoptFromThrow) {
935 __ Push(R1); // Preserve exception as first local.
936 __ Push(R2); // Preserve stacktrace as second local.
937 }
938 __ ReserveAlignedFrameSpace(0);
939 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0.
940 if (kind == kLazyDeoptFromReturn) {
941 // Restore result into R1.
942 __ ldr(R1, Address(FP, target::frame_layout.first_local_from_fp *
943 target::kWordSize));
944 } else if (kind == kLazyDeoptFromThrow) {
945 // Restore result into R1.
946 __ ldr(R1, Address(FP, target::frame_layout.first_local_from_fp *
947 target::kWordSize));
948 __ ldr(R2, Address(FP, (target::frame_layout.first_local_from_fp - 1) *
949 target::kWordSize));
950 }
951 // Code above cannot cause GC.
952 __ RestoreCodePointer();
953 __ LeaveStubFrame();
954
955 // Frame is fully rewritten at this point and it is safe to perform a GC.
956 // Materialize any objects that were deferred by FillFrame because they
957 // require allocation.
958 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
959 __ EnterStubFrame();
960 if (kind == kLazyDeoptFromReturn) {
961 __ Push(R1); // Preserve result, it will be GC-d here.
962 } else if (kind == kLazyDeoptFromThrow) {
963 __ Push(R1); // Preserve exception, it will be GC-d here.
964 __ Push(R2); // Preserve stacktrace, it will be GC-d here.
965 }
966 __ PushObject(NullObject()); // Space for the result.
967 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
968 // Result tells stub how many bytes to remove from the expression stack
969 // of the bottom-most frame. They were used as materialization arguments.
970 __ Pop(R2);
971 if (kind == kLazyDeoptFromReturn) {
972 __ Pop(R0); // Restore result.
973 } else if (kind == kLazyDeoptFromThrow) {
974 __ Pop(R1); // Restore stacktrace.
975 __ Pop(R0); // Restore exception.
976 }
977 __ LeaveStubFrame();
978 // Remove materialization arguments.
979 __ add(SP, SP, Operand(R2, ASR, kSmiTagSize));
980 // The caller is responsible for emitting the return instruction.
981}
982
983// R0: result, must be preserved
984void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
985 Assembler* assembler) {
986 // Push zap value instead of CODE_REG for lazy deopt.
987 __ LoadImmediate(IP, kZapCodeReg);
988 __ Push(IP);
989 // Return address for "call" to deopt stub.
990 __ LoadImmediate(LR, kZapReturnAddress);
991 __ ldr(CODE_REG,
992 Address(THR, target::Thread::lazy_deopt_from_return_stub_offset()));
993 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
994 __ Ret();
995}
996
997// R0: exception, must be preserved
998// R1: stacktrace, must be preserved
999void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
1000 Assembler* assembler) {
1001 // Push zap value instead of CODE_REG for lazy deopt.
1002 __ LoadImmediate(IP, kZapCodeReg);
1003 __ Push(IP);
1004 // Return address for "call" to deopt stub.
1005 __ LoadImmediate(LR, kZapReturnAddress);
1006 __ ldr(CODE_REG,
1007 Address(THR, target::Thread::lazy_deopt_from_throw_stub_offset()));
1008 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
1009 __ Ret();
1010}
1011
1012void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
1013 __ Push(CODE_REG);
1014 __ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
1015 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
1016 __ Ret();
1017}
1018
1019// R9: ICData/MegamorphicCache
1020static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
1021 __ EnterStubFrame();
1022
1023 __ ldr(R4,
1024 FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
1025
1026 // Load the receiver.
1027 __ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::size_offset()));
1028 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
1029 __ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
1030 target::kWordSize));
1031 __ LoadImmediate(IP, 0);
1032 __ Push(IP); // Result slot.
1033 __ Push(R8); // Receiver.
1034 __ Push(R9); // ICData/MegamorphicCache.
1035 __ Push(R4); // Arguments descriptor.
1036
1037 // Adjust arguments count.
1038 __ ldr(R3,
1039 FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
1040 __ cmp(R3, Operand(0));
1041 __ AddImmediate(R2, R2, target::ToRawSmi(1),
1042 NE); // Include the type arguments.
1043
1044 // R2: Smi-tagged arguments array length.
1045 PushArrayOfArguments(assembler);
1046 const intptr_t kNumArgs = 4;
1047 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
1048 __ Drop(4);
1049 __ Pop(R0); // Return value.
1050 __ LeaveStubFrame();
1051 __ Ret();
1052}
1053
1054static void GenerateDispatcherCode(Assembler* assembler,
1055 Label* call_target_function) {
1056 __ Comment("NoSuchMethodDispatch");
1057 // When lazily generated invocation dispatchers are disabled, the
1058 // miss-handler may return null.
1059 __ CompareObject(R0, NullObject());
1060 __ b(call_target_function, NE);
1061
1062 GenerateNoSuchMethodDispatcherBody(assembler);
1063}
1064
1065// Input:
1066// R4 - arguments descriptor
1067// R9 - icdata/megamorphic_cache
1068void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
1069 Assembler* assembler) {
1070 GenerateNoSuchMethodDispatcherBody(assembler);
1071}
1072
1073// Called for inline allocation of arrays.
1074// Input parameters:
1075// LR: return address.
1076// R1: array element type (either NULL or an instantiated type).
1077// R2: array length as Smi (must be preserved).
1078// The newly allocated object is returned in R0.
1079void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
1080 if (!FLAG_use_slow_path) {
1081 Label slow_case;
1082 // Compute the size to be allocated, it is based on the array length
1083 // and is computed as:
1084 // RoundedAllocationSize(
1085 // (array_length * kwordSize) + target::Array::header_size()).
1086 __ mov(R3, Operand(R2)); // Array length.
1087 // Check that length is a positive Smi.
1088 __ tst(R3, Operand(kSmiTagMask));
1089 __ b(&slow_case, NE);
1090
1091 __ cmp(R3, Operand(0));
1092 __ b(&slow_case, LT);
1093
1094 // Check for maximum allowed length.
1095 const intptr_t max_len =
1096 target::ToRawSmi(target::Array::kMaxNewSpaceElements);
1097 __ CompareImmediate(R3, max_len);
1098 __ b(&slow_case, GT);
1099
1100 const intptr_t cid = kArrayCid;
1101 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid));
1102 NOT_IN_PRODUCT(__ MaybeTraceAllocation(R4, &slow_case));
1103
1104 const intptr_t fixed_size_plus_alignment_padding =
1105 target::Array::header_size() +
1106 target::ObjectAlignment::kObjectAlignment - 1;
1107 __ LoadImmediate(R9, fixed_size_plus_alignment_padding);
1108 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
1109 ASSERT(kSmiTagShift == 1);
1110 __ bic(R9, R9, Operand(target::ObjectAlignment::kObjectAlignment - 1));
1111
1112 // R9: Allocation size.
1113 // Potential new object start.
1114 __ ldr(R0, Address(THR, target::Thread::top_offset()));
1115 __ adds(R3, R0, Operand(R9)); // Potential next object start.
1116 __ b(&slow_case, CS); // Branch if unsigned overflow.
1117
1118 // Check if the allocation fits into the remaining space.
1119 // R0: potential new object start.
1120 // R3: potential next object start.
1121 // R9: allocation size.
1122 __ ldr(TMP, Address(THR, target::Thread::end_offset()));
1123 __ cmp(R3, Operand(TMP));
1124 __ b(&slow_case, CS);
1125
1126 // Successfully allocated the object(s), now update top to point to
1127 // next object start and initialize the object.
1128 __ str(R3, Address(THR, target::Thread::top_offset()));
1129 __ add(R0, R0, Operand(kHeapObjectTag));
1130
1131 // Initialize the tags.
1132 // R0: new object start as a tagged pointer.
1133 // R3: new object end address.
1134 // R9: allocation size.
1135 {
1136 const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos -
1137 target::ObjectAlignment::kObjectAlignmentLog2;
1138
1139 __ CompareImmediate(R9, target::ObjectLayout::kSizeTagMaxSizeTag);
1140 __ mov(R8, Operand(R9, LSL, shift), LS);
1141 __ mov(R8, Operand(0), HI);
1142
1143 // Get the class index and insert it into the tags.
1144 // R8: size and bit tags.
1145 const uint32_t tags =
1146 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1147 __ LoadImmediate(TMP, tags);
1148 __ orr(R8, R8, Operand(TMP));
1149 __ str(R8,
1150 FieldAddress(R0, target::Array::tags_offset())); // Store tags.
1151 }
1152
1153 // R0: new object start as a tagged pointer.
1154 // R3: new object end address.
1155 // Store the type argument field.
1156 __ StoreIntoObjectNoBarrier(
1157 R0, FieldAddress(R0, target::Array::type_arguments_offset()), R1);
1158
1159 // Set the length field.
1160 __ StoreIntoObjectNoBarrier(
1161 R0, FieldAddress(R0, target::Array::length_offset()), R2);
1162
1163 // Initialize all array elements to raw_null.
1164 // R0: new object start as a tagged pointer.
1165 // R8, R9: null
1166 // R4: iterator which initially points to the start of the variable
1167 // data area to be initialized.
1168 // R3: new object end address.
1169 // R9: allocation size.
1170
1171 __ LoadObject(R8, NullObject());
1172 __ mov(R9, Operand(R8));
1173 __ AddImmediate(R4, R0, target::Array::header_size() - kHeapObjectTag);
1174 __ InitializeFieldsNoBarrier(R0, R4, R3, R8, R9);
1175 __ Ret(); // Returns the newly allocated object in R0.
1176 // Unable to allocate the array using the fast inline code, just call
1177 // into the runtime.
1178 __ Bind(&slow_case);
1179 }
1180
1181 // Create a stub frame as we are pushing some objects on the stack before
1182 // calling into the runtime.
1183 __ EnterStubFrame();
1184 __ LoadImmediate(TMP, 0);
1185 // Setup space on stack for return value.
1186 // Push array length as Smi and element type.
1187 __ PushList((1 << R1) | (1 << R2) | (1 << IP));
1188 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1189 // Pop arguments; result is popped in IP.
1190 __ PopList((1 << R1) | (1 << R2) | (1 << IP)); // R2 is restored.
1191 __ mov(R0, Operand(IP));
1192
1193 // Write-barrier elimination might be enabled for this array (depending on the
1194 // array length). To be sure we will check if the allocated object is in old
1195 // space and if so call a leaf runtime to add it to the remembered set.
1196 EnsureIsNewOrRemembered(assembler);
1197
1198 __ LeaveStubFrame();
1199 __ Ret();
1200}
1201
1202// Called for allocation of Mint.
1203void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
1204 Assembler* assembler) {
1205 // For test purpose call allocation stub without inline allocation attempt.
1206 if (!FLAG_use_slow_path) {
1207 Label slow_case;
1208 __ TryAllocate(compiler::MintClass(), &slow_case,
1209 AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
1210 __ Ret();
1211
1212 __ Bind(&slow_case);
1213 }
1214 COMPILE_ASSERT(AllocateMintABI::kResultReg == R0);
1215 GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
1216 &kAllocateMintRuntimeEntry,
1217 target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
1218 /*allow_return=*/true,
1219 /*store_runtime_result_in_r0=*/true);
1220}
1221
1222// Called for allocation of Mint.
1223void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
1224 Assembler* assembler) {
1225 // For test purpose call allocation stub without inline allocation attempt.
1226 if (!FLAG_use_slow_path) {
1227 Label slow_case;
1228 __ TryAllocate(compiler::MintClass(), &slow_case,
1229 AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
1230 __ Ret();
1231
1232 __ Bind(&slow_case);
1233 }
1234 COMPILE_ASSERT(AllocateMintABI::kResultReg == R0);
1235 GenerateSharedStub(
1236 assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
1237 target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
1238 /*allow_return=*/true,
1239 /*store_runtime_result_in_r0=*/true);
1240}
1241
1242// Called when invoking Dart code from C++ (VM code).
1243// Input parameters:
1244// LR : points to return address.
1245// R0 : code object of the Dart function to call.
1246// R1 : arguments descriptor array.
1247// R2 : arguments array.
1248// R3 : current thread.
1249void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
1250 __ Push(LR); // Marker for the profiler.
1251 __ EnterFrame((1 << FP) | (1 << LR), 0);
1252
1253 // Push code object to PC marker slot.
1254 __ ldr(IP, Address(R3, target::Thread::invoke_dart_code_stub_offset()));
1255 __ Push(IP);
1256
1257 __ PushNativeCalleeSavedRegisters();
1258
1259 // Set up THR, which caches the current thread in Dart code.
1260 if (THR != R3) {
1261 __ mov(THR, Operand(R3));
1262 }
1263
1264#if defined(USING_SHADOW_CALL_STACK)
1265#error Unimplemented
1266#endif
1267
1268 // Save the current VMTag on the stack.
1269 __ LoadFromOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
1270 __ Push(R9);
1271
1272 // Save top resource and top exit frame info. Use R4-6 as temporary registers.
1273 // StackFrameIterator reads the top exit frame info saved in this frame.
1274 __ LoadFromOffset(kWord, R4, THR, target::Thread::top_resource_offset());
1275 __ Push(R4);
1276 __ LoadImmediate(R8, 0);
1277 __ StoreToOffset(kWord, R8, THR, target::Thread::top_resource_offset());
1278
1279 __ LoadFromOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
1280 __ Push(R8);
1281 __ LoadImmediate(R8, 0);
1282 __ StoreToOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
1283
1284 __ LoadFromOffset(kWord, R9, THR,
1285 target::Thread::top_exit_frame_info_offset());
1286 __ StoreToOffset(kWord, R8, THR,
1287 target::Thread::top_exit_frame_info_offset());
1288
1289 // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
1290 // with the code below.
1291#if defined(TARGET_OS_MACOS) || defined(TARGET_OS_MACOS_IOS)
1292 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -27);
1293#else
1294 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -28);
1295#endif
1296 __ Push(R9);
1297
1298 __ EmitEntryFrameVerification(R9);
1299
1300 // Mark that the thread is executing Dart code. Do this after initializing the
1301 // exit link for the profiler.
1302 __ LoadImmediate(R9, VMTag::kDartCompiledTagId);
1303 __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
1304
1305 // Load arguments descriptor array into R4, which is passed to Dart code.
1306 __ ldr(R4, Address(R1, target::VMHandles::kOffsetOfRawPtrInHandle));
1307
1308 // Load number of arguments into R9 and adjust count for type arguments.
1309 __ ldr(R3,
1310 FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
1311 __ ldr(R9, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
1312 __ cmp(R3, Operand(0));
1313 __ AddImmediate(R9, R9, target::ToRawSmi(1),
1314 NE); // Include the type arguments.
1315 __ SmiUntag(R9);
1316
1317 // Compute address of 'arguments array' data area into R2.
1318 __ ldr(R2, Address(R2, target::VMHandles::kOffsetOfRawPtrInHandle));
1319 __ AddImmediate(R2, target::Array::data_offset() - kHeapObjectTag);
1320
1321 // Set up arguments for the Dart call.
1322 Label push_arguments;
1323 Label done_push_arguments;
1324 __ CompareImmediate(R9, 0); // check if there are arguments.
1325 __ b(&done_push_arguments, EQ);
1326 __ LoadImmediate(R1, 0);
1327 __ Bind(&push_arguments);
1328 __ ldr(R3, Address(R2));
1329 __ Push(R3);
1330 __ AddImmediate(R2, target::kWordSize);
1331 __ AddImmediate(R1, 1);
1332 __ cmp(R1, Operand(R9));
1333 __ b(&push_arguments, LT);
1334 __ Bind(&done_push_arguments);
1335
1336 // Call the Dart code entrypoint.
1337 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
1338 __ SetupGlobalPoolAndDispatchTable();
1339 } else {
1340 __ LoadImmediate(PP, 0); // GC safe value into PP.
1341 }
1342 __ ldr(CODE_REG, Address(R0, target::VMHandles::kOffsetOfRawPtrInHandle));
1343 __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1344 __ blx(R0); // R4 is the arguments descriptor array.
1345
1346 // Get rid of arguments pushed on the stack.
1347 __ AddImmediate(
1348 SP, FP,
1349 target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
1350
1351 // Restore the saved top exit frame info and top resource back into the
1352 // Isolate structure. Uses R9 as a temporary register for this.
1353 __ Pop(R9);
1354 __ StoreToOffset(kWord, R9, THR,
1355 target::Thread::top_exit_frame_info_offset());
1356 __ Pop(R9);
1357 __ StoreToOffset(kWord, R9, THR, target::Thread::exit_through_ffi_offset());
1358 __ Pop(R9);
1359 __ StoreToOffset(kWord, R9, THR, target::Thread::top_resource_offset());
1360
1361 // Restore the current VMTag from the stack.
1362 __ Pop(R4);
1363 __ StoreToOffset(kWord, R4, THR, target::Thread::vm_tag_offset());
1364
1365#if defined(USING_SHADOW_CALL_STACK)
1366#error Unimplemented
1367#endif
1368
1369 __ PopNativeCalleeSavedRegisters();
1370
1371 __ set_constant_pool_allowed(false);
1372
1373 // Restore the frame pointer and return.
1374 __ LeaveFrame((1 << FP) | (1 << LR));
1375 __ Drop(1);
1376 __ Ret();
1377}
1378
1379// Called when invoking compiled Dart code from interpreted Dart code.
1380// Input parameters:
1381// LR : points to return address.
1382// R0 : raw code object of the Dart function to call.
1383// R1 : arguments raw descriptor array.
1384// R2 : address of first argument.
1385// R3 : current thread.
1386void StubCodeCompiler::GenerateInvokeDartCodeFromBytecodeStub(
1387 Assembler* assembler) {
1388 if (FLAG_precompiled_mode) {
1389 __ Stop("Not using interpreter");
1390 return;
1391 }
1392
1393 __ Push(LR); // Marker for the profiler.
1394 __ EnterFrame((1 << FP) | (1 << LR), 0);
1395
1396 // Push code object to PC marker slot.
1397 __ ldr(IP,
1398 Address(R3,
1399 target::Thread::invoke_dart_code_from_bytecode_stub_offset()));
1400 __ Push(IP);
1401
1402 // Save new context and C++ ABI callee-saved registers.
1403 __ PushList(kAbiPreservedCpuRegs);
1404
1405 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg);
1406 if (TargetCPUFeatures::vfp_supported()) {
1407 ASSERT(2 * kAbiPreservedFpuRegCount < 16);
1408 // Save FPU registers. 2 D registers per Q register.
1409 __ vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
1410 } else {
1411 __ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize));
1412 }
1413
1414 // Set up THR, which caches the current thread in Dart code.
1415 if (THR != R3) {
1416 __ mov(THR, Operand(R3));
1417 }
1418
1419#if defined(USING_SHADOW_CALL_STACK)
1420#error Unimplemented
1421#endif
1422
1423 // Save the current VMTag on the stack.
1424 __ LoadFromOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
1425 __ Push(R9);
1426
1427 // Save top resource and top exit frame info. Use R4-6 as temporary registers.
1428 // StackFrameIterator reads the top exit frame info saved in this frame.
1429 __ LoadFromOffset(kWord, R4, THR, target::Thread::top_resource_offset());
1430 __ Push(R4);
1431 __ LoadImmediate(R8, 0);
1432 __ StoreToOffset(kWord, R8, THR, target::Thread::top_resource_offset());
1433
1434 __ LoadFromOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
1435 __ Push(R8);
1436 __ LoadImmediate(R8, 0);
1437 __ StoreToOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
1438
1439 __ LoadFromOffset(kWord, R9, THR,
1440 target::Thread::top_exit_frame_info_offset());
1441 __ StoreToOffset(kWord, R8, THR,
1442 target::Thread::top_exit_frame_info_offset());
1443
1444 // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
1445 // with the code below.
1446#if defined(TARGET_OS_MACOS) || defined(TARGET_OS_MACOS_IOS)
1447 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -27);
1448#else
1449 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -28);
1450#endif
1451 __ Push(R9);
1452
1453 // Mark that the thread is executing Dart code. Do this after initializing the
1454 // exit link for the profiler.
1455 __ LoadImmediate(R9, VMTag::kDartCompiledTagId);
1456 __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
1457
1458 // Load arguments descriptor array into R4, which is passed to Dart code.
1459 __ mov(R4, Operand(R1));
1460
1461 // Load number of arguments into R9 and adjust count for type arguments.
1462 __ ldr(R3,
1463 FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
1464 __ ldr(R9, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
1465 __ cmp(R3, Operand(0));
1466 __ AddImmediate(R9, R9, target::ToRawSmi(1),
1467 NE); // Include the type arguments.
1468 __ SmiUntag(R9);
1469
1470 // R2 points to first argument.
1471 // Set up arguments for the Dart call.
1472 Label push_arguments;
1473 Label done_push_arguments;
1474 __ CompareImmediate(R9, 0); // check if there are arguments.
1475 __ b(&done_push_arguments, EQ);
1476 __ LoadImmediate(R1, 0);
1477 __ Bind(&push_arguments);
1478 __ ldr(R3, Address(R2));
1479 __ Push(R3);
1480 __ AddImmediate(R2, target::kWordSize);
1481 __ AddImmediate(R1, 1);
1482 __ cmp(R1, Operand(R9));
1483 __ b(&push_arguments, LT);
1484 __ Bind(&done_push_arguments);
1485
1486 // Call the Dart code entrypoint.
1487 __ LoadImmediate(PP, 0); // GC safe value into PP.
1488 __ mov(CODE_REG, Operand(R0));
1489 __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1490 __ blx(R0); // R4 is the arguments descriptor array.
1491
1492 // Get rid of arguments pushed on the stack.
1493 __ AddImmediate(
1494 SP, FP,
1495 target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
1496
1497 // Restore the saved top exit frame info and top resource back into the
1498 // Isolate structure. Uses R9 as a temporary register for this.
1499 __ Pop(R9);
1500 __ StoreToOffset(kWord, R9, THR,
1501 target::Thread::top_exit_frame_info_offset());
1502 __ Pop(R9);
1503 __ StoreToOffset(kWord, R9, THR, target::Thread::exit_through_ffi_offset());
1504 __ Pop(R9);
1505 __ StoreToOffset(kWord, R9, THR, target::Thread::top_resource_offset());
1506
1507 // Restore the current VMTag from the stack.
1508 __ Pop(R4);
1509 __ StoreToOffset(kWord, R4, THR, target::Thread::vm_tag_offset());
1510
1511 // Restore C++ ABI callee-saved registers.
1512 if (TargetCPUFeatures::vfp_supported()) {
1513 // Restore FPU registers. 2 D registers per Q register.
1514 __ vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
1515 } else {
1516 __ AddImmediate(SP, kAbiPreservedFpuRegCount * kFpuRegisterSize);
1517 }
1518
1519#if defined(USING_SHADOW_CALL_STACK)
1520#error Unimplemented
1521#endif
1522
1523 // Restore CPU registers.
1524 __ PopList(kAbiPreservedCpuRegs);
1525 __ set_constant_pool_allowed(false);
1526
1527 // Restore the frame pointer and return.
1528 __ LeaveFrame((1 << FP) | (1 << LR));
1529 __ Drop(1);
1530 __ Ret();
1531}
1532
1533// Helper to generate space allocation of context stub.
1534// This does not initialise the fields of the context.
1535// Input:
1536// R1: number of context variables.
1537// Output:
1538// R0: new allocated RawContext object.
1539// Clobbered:
1540// R2, R3, R8, R9
1541static void GenerateAllocateContext(Assembler* assembler, Label* slow_case) {
1542 // First compute the rounded instance size.
1543 // R1: number of context variables.
1544 const intptr_t fixed_size_plus_alignment_padding =
1545 target::Context::header_size() +
1546 target::ObjectAlignment::kObjectAlignment - 1;
1547 __ LoadImmediate(R2, fixed_size_plus_alignment_padding);
1548 __ add(R2, R2, Operand(R1, LSL, 2));
1549 ASSERT(kSmiTagShift == 1);
1550 __ bic(R2, R2, Operand(target::ObjectAlignment::kObjectAlignment - 1));
1551
1552 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R8, kContextCid));
1553 NOT_IN_PRODUCT(__ MaybeTraceAllocation(R8, slow_case));
1554 // Now allocate the object.
1555 // R1: number of context variables.
1556 // R2: object size.
1557 __ ldr(R0, Address(THR, target::Thread::top_offset()));
1558 __ add(R3, R2, Operand(R0));
1559 // Check if the allocation fits into the remaining space.
1560 // R0: potential new object.
1561 // R1: number of context variables.
1562 // R2: object size.
1563 // R3: potential next object start.
1564 __ ldr(IP, Address(THR, target::Thread::end_offset()));
1565 __ cmp(R3, Operand(IP));
1566 __ b(slow_case, CS); // Branch if unsigned higher or equal.
1567
1568 // Successfully allocated the object, now update top to point to
1569 // next object start and initialize the object.
1570 // R0: new object start (untagged).
1571 // R1: number of context variables.
1572 // R2: object size.
1573 // R3: next object start.
1574 __ str(R3, Address(THR, target::Thread::top_offset()));
1575 __ add(R0, R0, Operand(kHeapObjectTag));
1576
1577 // Calculate the size tag.
1578 // R0: new object (tagged).
1579 // R1: number of context variables.
1580 // R2: object size.
1581 // R3: next object start.
1582 const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos -
1583 target::ObjectAlignment::kObjectAlignmentLog2;
1584 __ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag);
1585 // If no size tag overflow, shift R2 left, else set R2 to zero.
1586 __ mov(R9, Operand(R2, LSL, shift), LS);
1587 __ mov(R9, Operand(0), HI);
1588
1589 // Get the class index and insert it into the tags.
1590 // R9: size and bit tags.
1591 const uint32_t tags =
1592 target::MakeTagWordForNewSpaceObject(kContextCid, /*instance_size=*/0);
1593
1594 __ LoadImmediate(IP, tags);
1595 __ orr(R9, R9, Operand(IP));
1596 __ str(R9, FieldAddress(R0, target::Object::tags_offset()));
1597
1598 // Setup up number of context variables field.
1599 // R0: new object.
1600 // R1: number of context variables as integer value (not object).
1601 // R2: object size.
1602 // R3: next object start.
1603 __ str(R1, FieldAddress(R0, target::Context::num_variables_offset()));
1604}
1605
1606// Called for inline allocation of contexts.
1607// Input:
1608// R1: number of context variables.
1609// Output:
1610// R0: new allocated RawContext object.
1611// Clobbered:
1612// Potentially any since is can go to runtime.
1613void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
1614 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1615 Label slow_case;
1616
1617 GenerateAllocateContext(assembler, &slow_case);
1618
1619 // Setup the parent field.
1620 // R0: new object.
1621 // R2: object size.
1622 // R3: next object start.
1623 __ LoadObject(R8, NullObject());
1624 __ MoveRegister(R9, R8); // Needed for InitializeFieldsNoBarrier.
1625 __ StoreIntoObjectNoBarrier(
1626 R0, FieldAddress(R0, target::Context::parent_offset()), R8);
1627
1628 // Initialize the context variables.
1629 // R0: new object.
1630 // R2: object size.
1631 // R3: next object start.
1632 // R8, R9: raw null.
1633 __ AddImmediate(R1, R0,
1634 target::Context::variable_offset(0) - kHeapObjectTag);
1635 __ InitializeFieldsNoBarrier(R0, R1, R3, R8, R9);
1636
1637 // Done allocating and initializing the context.
1638 // R0: new object.
1639 __ Ret();
1640
1641 __ Bind(&slow_case);
1642 }
1643
1644 // Create a stub frame as we are pushing some objects on the stack before
1645 // calling into the runtime.
1646 __ EnterStubFrame();
1647 // Setup space on stack for return value.
1648 __ LoadImmediate(R2, 0);
1649 __ SmiTag(R1);
1650 __ PushList((1 << R1) | (1 << R2));
1651 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1652 __ Drop(1); // Pop number of context variables argument.
1653 __ Pop(R0); // Pop the new context object.
1654
1655 // Write-barrier elimination might be enabled for this context (depending on
1656 // the size). To be sure we will check if the allocated object is in old
1657 // space and if so call a leaf runtime to add it to the remembered set.
1658 EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
1659
1660 // R0: new object
1661 // Restore the frame pointer.
1662 __ LeaveStubFrame();
1663
1664 __ Ret();
1665}
1666
1667// Called for clone of contexts.
1668// Input:
1669// R4: context variable to clone.
1670// Output:
1671// R0: new allocated RawContext object.
1672// Clobbered:
1673// Potentially any since it can go to runtime.
1674void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
1675 {
1676 Label slow_case;
1677
1678 // Load num. variable in the existing context.
1679 __ ldr(R1, FieldAddress(R4, target::Context::num_variables_offset()));
1680
1681 GenerateAllocateContext(assembler, &slow_case);
1682
1683 // Load parent in the existing context.
1684 __ ldr(R2, FieldAddress(R4, target::Context::parent_offset()));
1685 // Setup the parent field.
1686 // R0: new object.
1687 __ StoreIntoObjectNoBarrier(
1688 R0, FieldAddress(R0, target::Context::parent_offset()), R2);
1689
1690 // Clone the context variables.
1691 // R0: new object.
1692 // R1: number of context variables.
1693 {
1694 Label loop, done;
1695 __ AddImmediate(R2, R0,
1696 target::Context::variable_offset(0) - kHeapObjectTag);
1697 __ AddImmediate(R3, R4,
1698 target::Context::variable_offset(0) - kHeapObjectTag);
1699
1700 __ Bind(&loop);
1701 __ subs(R1, R1, Operand(1));
1702 __ b(&done, MI);
1703
1704 __ ldr(R9, Address(R3, R1, LSL, target::kWordSizeLog2));
1705 __ str(R9, Address(R2, R1, LSL, target::kWordSizeLog2));
1706
1707 __ b(&loop, NE); // Loop if R1 not zero.
1708
1709 __ Bind(&done);
1710 }
1711
1712 // Done allocating and initializing the context.
1713 // R0: new object.
1714 __ Ret();
1715
1716 __ Bind(&slow_case);
1717 }
1718
1719 // Create a stub frame as we are pushing some objects on the stack before
1720 // calling into the runtime.
1721 __ EnterStubFrame();
1722 // Setup space on stack for return value.
1723 __ LoadImmediate(R0, 0);
1724 __ PushRegisterPair(R4, R0);
1725 __ CallRuntime(kCloneContextRuntimeEntry, 1); // Clone context.
1726 // R4: Pop number of context variables argument.
1727 // R0: Pop the new context object.
1728 __ PopRegisterPair(R4, R0);
1729
1730 // Write-barrier elimination might be enabled for this context (depending on
1731 // the size). To be sure we will check if the allocated object is in old
1732 // space and if so call a leaf runtime to add it to the remembered set.
1733 EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
1734
1735 // R0: new object
1736 // Restore the frame pointer.
1737 __ LeaveStubFrame();
1738 __ Ret();
1739}
1740
1741void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
1742 RegList saved = (1 << LR) | (1 << kWriteBarrierObjectReg);
1743 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1744 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
1745
1746 Register reg = static_cast<Register>(i);
1747 intptr_t start = __ CodeSize();
1748 __ PushList(saved);
1749 __ mov(kWriteBarrierObjectReg, Operand(reg));
1750 __ ldr(LR,
1751 Address(THR, target::Thread::write_barrier_entry_point_offset()));
1752 __ blx(LR);
1753 __ PopList(saved);
1754 __ bx(LR);
1755 intptr_t end = __ CodeSize();
1756
1757 RELEASE_ASSERT(end - start == kStoreBufferWrapperSize);
1758 }
1759}
1760
1761// Helper stub to implement Assembler::StoreIntoObject.
1762// Input parameters:
1763// R1: Object (old)
1764// R0: Value (old or new)
1765// R9: Slot
1766// If R0 is new, add R1 to the store buffer. Otherwise R0 is old, mark R0
1767// and add it to the mark list.
1768COMPILE_ASSERT(kWriteBarrierObjectReg == R1);
1769COMPILE_ASSERT(kWriteBarrierValueReg == R0);
1770COMPILE_ASSERT(kWriteBarrierSlotReg == R9);
1771static void GenerateWriteBarrierStubHelper(Assembler* assembler,
1772 Address stub_code,
1773 bool cards) {
1774 Label add_to_mark_stack, remember_card;
1775 __ tst(R0, Operand(1 << target::ObjectAlignment::kNewObjectBitPosition));
1776 __ b(&add_to_mark_stack, ZERO);
1777
1778 if (cards) {
1779 __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
1780 __ tst(TMP, Operand(1 << target::ObjectLayout::kCardRememberedBit));
1781 __ b(&remember_card, NOT_ZERO);
1782 } else {
1783#if defined(DEBUG)
1784 Label ok;
1785 __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
1786 __ tst(TMP, Operand(1 << target::ObjectLayout::kCardRememberedBit));
1787 __ b(&ok, ZERO);
1788 __ Stop("Wrong barrier");
1789 __ Bind(&ok);
1790#endif
1791 }
1792
1793 // Save values being destroyed.
1794 __ PushList((1 << R2) | (1 << R3) | (1 << R4));
1795
1796 // Atomically set the remembered bit of the object header.
1797 ASSERT(target::Object::tags_offset() == 0);
1798 __ sub(R3, R1, Operand(kHeapObjectTag));
1799 // R3: Untagged address of header word (ldrex/strex do not support offsets).
1800 Label retry;
1801 __ Bind(&retry);
1802 __ ldrex(R2, R3);
1803 __ bic(R2, R2, Operand(1 << target::ObjectLayout::kOldAndNotRememberedBit));
1804 __ strex(R4, R2, R3);
1805 __ cmp(R4, Operand(1));
1806 __ b(&retry, EQ);
1807
1808 // Load the StoreBuffer block out of the thread. Then load top_ out of the
1809 // StoreBufferBlock and add the address to the pointers_.
1810 __ ldr(R4, Address(THR, target::Thread::store_buffer_block_offset()));
1811 __ ldr(R2, Address(R4, target::StoreBufferBlock::top_offset()));
1812 __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
1813 __ str(R1, Address(R3, target::StoreBufferBlock::pointers_offset()));
1814
1815 // Increment top_ and check for overflow.
1816 // R2: top_.
1817 // R4: StoreBufferBlock.
1818 Label overflow;
1819 __ add(R2, R2, Operand(1));
1820 __ str(R2, Address(R4, target::StoreBufferBlock::top_offset()));
1821 __ CompareImmediate(R2, target::StoreBufferBlock::kSize);
1822 // Restore values.
1823 __ PopList((1 << R2) | (1 << R3) | (1 << R4));
1824 __ b(&overflow, EQ);
1825 __ Ret();
1826
1827 // Handle overflow: Call the runtime leaf function.
1828 __ Bind(&overflow);
1829 // Setup frame, push callee-saved registers.
1830
1831 __ Push(CODE_REG);
1832 __ ldr(CODE_REG, stub_code);
1833 __ EnterCallRuntimeFrame(0 * target::kWordSize);
1834 __ mov(R0, Operand(THR));
1835 __ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
1836 // Restore callee-saved registers, tear down frame.
1837 __ LeaveCallRuntimeFrame();
1838 __ Pop(CODE_REG);
1839 __ Ret();
1840
1841 __ Bind(&add_to_mark_stack);
1842 __ PushList((1 << R2) | (1 << R3) | (1 << R4)); // Spill.
1843
1844 Label marking_retry, lost_race, marking_overflow;
1845 // Atomically clear kOldAndNotMarkedBit.
1846 ASSERT(target::Object::tags_offset() == 0);
1847 __ sub(R3, R0, Operand(kHeapObjectTag));
1848 // R3: Untagged address of header word (ldrex/strex do not support offsets).
1849 __ Bind(&marking_retry);
1850 __ ldrex(R2, R3);
1851 __ tst(R2, Operand(1 << target::ObjectLayout::kOldAndNotMarkedBit));
1852 __ b(&lost_race, ZERO);
1853 __ bic(R2, R2, Operand(1 << target::ObjectLayout::kOldAndNotMarkedBit));
1854 __ strex(R4, R2, R3);
1855 __ cmp(R4, Operand(1));
1856 __ b(&marking_retry, EQ);
1857
1858 __ ldr(R4, Address(THR, target::Thread::marking_stack_block_offset()));
1859 __ ldr(R2, Address(R4, target::MarkingStackBlock::top_offset()));
1860 __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
1861 __ str(R0, Address(R3, target::MarkingStackBlock::pointers_offset()));
1862 __ add(R2, R2, Operand(1));
1863 __ str(R2, Address(R4, target::MarkingStackBlock::top_offset()));
1864 __ CompareImmediate(R2, target::MarkingStackBlock::kSize);
1865 __ PopList((1 << R4) | (1 << R2) | (1 << R3)); // Unspill.
1866 __ b(&marking_overflow, EQ);
1867 __ Ret();
1868
1869 __ Bind(&marking_overflow);
1870 __ Push(CODE_REG);
1871 __ ldr(CODE_REG, stub_code);
1872 __ EnterCallRuntimeFrame(0 * target::kWordSize);
1873 __ mov(R0, Operand(THR));
1874 __ CallRuntime(kMarkingStackBlockProcessRuntimeEntry, 1);
1875 __ LeaveCallRuntimeFrame();
1876 __ Pop(CODE_REG);
1877 __ Ret();
1878
1879 __ Bind(&lost_race);
1880 __ PopList((1 << R2) | (1 << R3) | (1 << R4)); // Unspill.
1881 __ Ret();
1882
1883 if (cards) {
1884 Label remember_card_slow;
1885
1886 // Get card table.
1887 __ Bind(&remember_card);
1888 __ AndImmediate(TMP, R1, target::kOldPageMask); // OldPage.
1889 __ ldr(TMP,
1890 Address(TMP, target::OldPage::card_table_offset())); // Card table.
1891 __ cmp(TMP, Operand(0));
1892 __ b(&remember_card_slow, EQ);
1893
1894 // Dirty the card.
1895 __ AndImmediate(TMP, R1, target::kOldPageMask); // OldPage.
1896 __ sub(R9, R9, Operand(TMP)); // Offset in page.
1897 __ ldr(TMP,
1898 Address(TMP, target::OldPage::card_table_offset())); // Card table.
1899 __ add(TMP, TMP,
1900 Operand(R9, LSR,
1901 target::OldPage::kBytesPerCardLog2)); // Card address.
1902 __ strb(R1,
1903 Address(TMP, 0)); // Low byte of R0 is non-zero from object tag.
1904 __ Ret();
1905
1906 // Card table not yet allocated.
1907 __ Bind(&remember_card_slow);
1908 __ Push(CODE_REG);
1909 __ Push(R0);
1910 __ Push(R1);
1911 __ ldr(CODE_REG, stub_code);
1912 __ mov(R0, Operand(R1)); // Arg0 = Object
1913 __ mov(R1, Operand(R9)); // Arg1 = Slot
1914 __ EnterCallRuntimeFrame(0);
1915 __ CallRuntime(kRememberCardRuntimeEntry, 2);
1916 __ LeaveCallRuntimeFrame();
1917 __ Pop(R1);
1918 __ Pop(R0);
1919 __ Pop(CODE_REG);
1920 __ Ret();
1921 }
1922}
1923
1924void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
1925 GenerateWriteBarrierStubHelper(
1926 assembler, Address(THR, target::Thread::write_barrier_code_offset()),
1927 false);
1928}
1929
1930void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
1931 GenerateWriteBarrierStubHelper(
1932 assembler,
1933 Address(THR, target::Thread::array_write_barrier_code_offset()), true);
1934}
1935
1936static void GenerateAllocateObjectHelper(Assembler* assembler,
1937 bool is_cls_parameterized) {
1938 const Register kInstanceReg = R0;
1939 // R1
1940 const Register kTagsReg = R2;
1941 // kAllocationStubTypeArgumentsReg = R3
1942
1943 {
1944 Label slow_case;
1945
1946 const Register kNewTopReg = R8;
1947
1948 // Bump allocation.
1949 {
1950 const Register kEndReg = R1;
1951 const Register kInstanceSizeReg = R9;
1952
1953 __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
1954
1955 // Load two words from Thread::top: top and end.
1956 // kInstanceReg: potential next object start.
1957 __ ldrd(kInstanceReg, kEndReg, THR, target::Thread::top_offset());
1958
1959 __ add(kNewTopReg, kInstanceReg, Operand(kInstanceSizeReg));
1960
1961 __ CompareRegisters(kEndReg, kNewTopReg);
1962 __ b(&slow_case, UNSIGNED_LESS_EQUAL);
1963
1964 // Successfully allocated the object, now update top to point to
1965 // next object start and store the class in the class field of object.
1966 __ str(kNewTopReg, Address(THR, target::Thread::top_offset()));
1967 } // kEndReg = R1, kInstanceSizeReg = R9
1968
1969 // Tags.
1970 __ str(kTagsReg, Address(kInstanceReg, target::Object::tags_offset()));
1971
1972 // Initialize the remaining words of the object.
1973 {
1974 const Register kFieldReg = R1;
1975 const Register kNullReg = R9;
1976
1977 __ LoadObject(kNullReg, NullObject());
1978
1979 __ AddImmediate(kFieldReg, kInstanceReg,
1980 target::Instance::first_field_offset());
1981 Label done, init_loop;
1982 __ Bind(&init_loop);
1983 __ CompareRegisters(kFieldReg, kNewTopReg);
1984 __ b(&done, UNSIGNED_GREATER_EQUAL);
1985 __ str(kNullReg,
1986 Address(kFieldReg, target::kWordSize, Address::PostIndex));
1987 __ b(&init_loop);
1988
1989 __ Bind(&done);
1990 } // kFieldReg = R1, kNullReg = R9
1991
1992 // Store parameterized type.
1993 if (is_cls_parameterized) {
1994 Label not_parameterized_case;
1995
1996 const Register kClsIdReg = R2;
1997 const Register kTypeOffestReg = R9;
1998
1999 __ ExtractClassIdFromTags(kClsIdReg, kTagsReg);
2000
2001 // Load class' type_arguments_field offset in words.
2002 __ LoadClassById(kTypeOffestReg, kClsIdReg);
2003 __ ldr(
2004 kTypeOffestReg,
2005 FieldAddress(kTypeOffestReg,
2006 target::Class::
2007 host_type_arguments_field_offset_in_words_offset()));
2008
2009 // Set the type arguments in the new object.
2010 __ StoreIntoObjectNoBarrier(
2011 kInstanceReg,
2012 Address(kInstanceReg, kTypeOffestReg, LSL, target::kWordSizeLog2),
2013 kAllocationStubTypeArgumentsReg);
2014
2015 __ Bind(&not_parameterized_case);
2016 } // kClsIdReg = R1, kTypeOffestReg = R9
2017
2018 __ AddImmediate(kInstanceReg, kInstanceReg, kHeapObjectTag);
2019
2020 __ Ret();
2021
2022 __ Bind(&slow_case);
2023 } // kNewTopReg = R8
2024
2025 // Fall back on slow case:
2026 {
2027 const Register kStubReg = R8;
2028
2029 if (!is_cls_parameterized) {
2030 __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject());
2031 }
2032
2033 // Tail call to generic allocation stub.
2034 __ ldr(kStubReg,
2035 Address(THR,
2036 target::Thread::allocate_object_slow_entry_point_offset()));
2037 __ bx(kStubReg);
2038 } // kStubReg = R8
2039}
2040
2041// Called for inline allocation of objects (any class).
2042void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
2043 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
2044}
2045
2046void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
2047 Assembler* assembler) {
2048 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
2049}
2050
2051void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
2052 const Register kInstanceReg = R0;
2053 const Register kClsReg = R1;
2054 const Register kTagsReg = R2;
2055 // kAllocationStubTypeArgumentsReg = R3
2056
2057 if (!FLAG_use_bare_instructions) {
2058 __ ldr(CODE_REG,
2059 Address(THR, target::Thread::call_to_runtime_stub_offset()));
2060 }
2061
2062 // Create a stub frame as we are pushing some objects on the stack before
2063 // calling into the runtime.
2064 __ EnterStubFrame();
2065
2066 __ ExtractClassIdFromTags(kInstanceReg, kTagsReg);
2067 __ LoadClassById(kClsReg, kInstanceReg);
2068
2069 __ LoadObject(kInstanceReg, NullObject());
2070
2071 // Pushes result slot, then parameter class.
2072 __ PushRegisterPair(kClsReg, kInstanceReg);
2073
2074 // Should be Object::null() if class is non-parameterized.
2075 __ Push(kAllocationStubTypeArgumentsReg);
2076
2077 __ CallRuntime(kAllocateObjectRuntimeEntry, 2);
2078
2079 // Load result off the stack into result register.
2080 __ ldr(kInstanceReg, Address(SP, 2 * target::kWordSize));
2081
2082 // Write-barrier elimination is enabled for [cls] and we therefore need to
2083 // ensure that the object is in new-space or has remembered bit set.
2084 EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
2085
2086 __ LeaveDartFrameAndReturn();
2087}
2088
2089// Called for inline allocation of objects.
2090void StubCodeCompiler::GenerateAllocationStubForClass(
2091 Assembler* assembler,
2092 UnresolvedPcRelativeCalls* unresolved_calls,
2093 const Class& cls,
2094 const Code& allocate_object,
2095 const Code& allocat_object_parametrized) {
2096 classid_t cls_id = target::Class::GetId(cls);
2097 ASSERT(cls_id != kIllegalCid);
2098
2099 RELEASE_ASSERT(AllocateObjectInstr::WillAllocateNewOrRemembered(cls));
2100
2101 // The generated code is different if the class is parameterized.
2102 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
2103 ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
2104 cls) != target::Class::kNoTypeArguments);
2105
2106 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
2107 ASSERT(instance_size > 0);
2108 RELEASE_ASSERT(target::Heap::IsAllocatableInNewSpace(instance_size));
2109
2110 const uint32_t tags =
2111 target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
2112
2113 // Note: Keep in sync with helper function.
2114 // kInstanceReg = R0
2115 const Register kTagsReg = R2;
2116 // kAllocationStubTypeArgumentsReg = R3
2117
2118 __ LoadImmediate(kTagsReg, tags);
2119
2120 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
2121 !target::Class::TraceAllocation(cls) &&
2122 target::SizeFitsInSizeTag(instance_size)) {
2123 if (is_cls_parameterized) {
2124 // TODO(41974): Assign all allocation stubs to the root loading unit?
2125 if (false &&
2126 !IsSameObject(NullObject(),
2127 CastHandle<Object>(allocat_object_parametrized))) {
2128 __ GenerateUnRelocatedPcRelativeTailCall();
2129 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2130 __ CodeSize(), allocat_object_parametrized, /*is_tail_call=*/true));
2131 } else {
2132 __ ldr(PC,
2133 Address(THR,
2134 target::Thread::
2135 allocate_object_parameterized_entry_point_offset()));
2136 }
2137 } else {
2138 // TODO(41974): Assign all allocation stubs to the root loading unit?
2139 if (false &&
2140 !IsSameObject(NullObject(), CastHandle<Object>(allocate_object))) {
2141 __ GenerateUnRelocatedPcRelativeTailCall();
2142 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2143 __ CodeSize(), allocate_object, /*is_tail_call=*/true));
2144 } else {
2145 __ ldr(
2146 PC,
2147 Address(THR, target::Thread::allocate_object_entry_point_offset()));
2148 }
2149 }
2150 } else {
2151 if (!is_cls_parameterized) {
2152 __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject());
2153 }
2154 __ ldr(PC,
2155 Address(THR,
2156 target::Thread::allocate_object_slow_entry_point_offset()));
2157 }
2158}
2159
2160// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
2161// from the entry code of a dart function after an error in passed argument
2162// name or number is detected.
2163// Input parameters:
2164// LR : return address.
2165// SP : address of last argument.
2166// R4: arguments descriptor array.
2167void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
2168 Assembler* assembler) {
2169 __ EnterStubFrame();
2170
2171 // Load the receiver.
2172 __ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
2173 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
2174 __ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
2175 target::kWordSize));
2176
2177 // Load the function.
2178 __ ldr(R6, FieldAddress(R8, target::Closure::function_offset()));
2179
2180 // Push space for the return value.
2181 // Push the receiver.
2182 // Push arguments descriptor array.
2183 __ LoadImmediate(IP, 0);
2184 __ PushList((1 << R4) | (1 << R6) | (1 << R8) | (1 << IP));
2185
2186 // Adjust arguments count.
2187 __ ldr(R3,
2188 FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
2189 __ cmp(R3, Operand(0));
2190 __ AddImmediate(R2, R2, target::ToRawSmi(1),
2191 NE); // Include the type arguments.
2192
2193 // R2: Smi-tagged arguments array length.
2194 PushArrayOfArguments(assembler);
2195
2196 const intptr_t kNumArgs = 4;
2197 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
2198 // noSuchMethod on closures always throws an error, so it will never return.
2199 __ bkpt(0);
2200}
2201
2202// R8: function object.
2203// R9: inline cache data object.
2204// Cannot use function object from ICData as it may be the inlined
2205// function and not the top-scope function.
2206void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
2207 Assembler* assembler) {
2208 Register ic_reg = R9;
2209 Register func_reg = R8;
2210 if (FLAG_precompiled_mode) {
2211 __ Breakpoint();
2212 return;
2213 }
2214 if (FLAG_trace_optimized_ic_calls) {
2215 __ EnterStubFrame();
2216 __ PushList((1 << R9) | (1 << R8)); // Preserve.
2217 __ Push(ic_reg); // Argument.
2218 __ Push(func_reg); // Argument.
2219 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
2220 __ Drop(2); // Discard argument;
2221 __ PopList((1 << R9) | (1 << R8)); // Restore.
2222 __ LeaveStubFrame();
2223 }
2224 __ ldr(TMP, FieldAddress(func_reg, target::Function::usage_counter_offset()));
2225 __ add(TMP, TMP, Operand(1));
2226 __ str(TMP, FieldAddress(func_reg, target::Function::usage_counter_offset()));
2227}
2228
2229// Loads function into 'temp_reg'.
2230void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
2231 Register temp_reg) {
2232 if (FLAG_precompiled_mode) {
2233 __ Breakpoint();
2234 return;
2235 }
2236 if (FLAG_optimization_counter_threshold >= 0) {
2237 Register ic_reg = R9;
2238 Register func_reg = temp_reg;
2239 ASSERT(temp_reg == R8);
2240 __ Comment("Increment function counter");
2241 __ ldr(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
2242 __ ldr(TMP,
2243 FieldAddress(func_reg, target::Function::usage_counter_offset()));
2244 __ add(TMP, TMP, Operand(1));
2245 __ str(TMP,
2246 FieldAddress(func_reg, target::Function::usage_counter_offset()));
2247 }
2248}
2249
2250// Note: R9 must be preserved.
2251// Attempt a quick Smi operation for known operations ('kind'). The ICData
2252// must have been primed with a Smi/Smi check that will be used for counting
2253// the invocations.
2254static void EmitFastSmiOp(Assembler* assembler,
2255 Token::Kind kind,
2256 intptr_t num_args,
2257 Label* not_smi_or_overflow) {
2258 __ Comment("Fast Smi op");
2259 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left.
2260 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Right.
2261 __ orr(TMP, R0, Operand(R1));
2262 __ tst(TMP, Operand(kSmiTagMask));
2263 __ b(not_smi_or_overflow, NE);
2264 switch (kind) {
2265 case Token::kADD: {
2266 __ adds(R0, R1, Operand(R0)); // Adds.
2267 __ b(not_smi_or_overflow, VS); // Branch if overflow.
2268 break;
2269 }
2270 case Token::kLT: {
2271 __ cmp(R0, Operand(R1));
2272 __ LoadObject(R0, CastHandle<Object>(TrueObject()), LT);
2273 __ LoadObject(R0, CastHandle<Object>(FalseObject()), GE);
2274 break;
2275 }
2276 case Token::kEQ: {
2277 __ cmp(R0, Operand(R1));
2278 __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
2279 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
2280 break;
2281 }
2282 default:
2283 UNIMPLEMENTED();
2284 }
2285 // R9: IC data object (preserved).
2286 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
2287 // R8: ic_data_array with check entries: classes and target functions.
2288 __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
2289// R8: points directly to the first ic data array element.
2290#if defined(DEBUG)
2291 // Check that first entry is for Smi/Smi.
2292 Label error, ok;
2293 const intptr_t imm_smi_cid = target::ToRawSmi(kSmiCid);
2294 __ ldr(R1, Address(R8, 0));
2295 __ CompareImmediate(R1, imm_smi_cid);
2296 __ b(&error, NE);
2297 __ ldr(R1, Address(R8, target::kWordSize));
2298 __ CompareImmediate(R1, imm_smi_cid);
2299 __ b(&ok, EQ);
2300 __ Bind(&error);
2301 __ Stop("Incorrect IC data");
2302 __ Bind(&ok);
2303#endif
2304 if (FLAG_optimization_counter_threshold >= 0) {
2305 // Update counter, ignore overflow.
2306 const intptr_t count_offset =
2307 target::ICData::CountIndexFor(num_args) * target::kWordSize;
2308 __ LoadFromOffset(kWord, R1, R8, count_offset);
2309 __ adds(R1, R1, Operand(target::ToRawSmi(1)));
2310 __ StoreIntoSmiField(Address(R8, count_offset), R1);
2311 }
2312 __ Ret();
2313}
2314
2315// Saves the offset of the target entry-point (from the Function) into R3.
2316//
2317// Must be the first code generated, since any code before will be skipped in
2318// the unchecked entry-point.
2319static void GenerateRecordEntryPoint(Assembler* assembler) {
2320 Label done;
2321 __ mov(R3, Operand(target::Function::entry_point_offset() - kHeapObjectTag));
2322 __ b(&done);
2323 __ BindUncheckedEntryPoint();
2324 __ mov(
2325 R3,
2326 Operand(target::Function::entry_point_offset(CodeEntryKind::kUnchecked) -
2327 kHeapObjectTag));
2328 __ Bind(&done);
2329}
2330
2331// Generate inline cache check for 'num_args'.
2332// R0: receiver (if instance call)
2333// R9: ICData
2334// LR: return address
2335// Control flow:
2336// - If receiver is null -> jump to IC miss.
2337// - If receiver is Smi -> load Smi class.
2338// - If receiver is not-Smi -> load receiver's class.
2339// - Check if 'num_args' (including receiver) match any IC data group.
2340// - Match found -> jump to target.
2341// - Match not found -> jump to IC miss.
2342void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
2343 Assembler* assembler,
2344 intptr_t num_args,
2345 const RuntimeEntry& handle_ic_miss,
2346 Token::Kind kind,
2347 Optimized optimized,
2348 CallType type,
2349 Exactness exactness) {
2350 if (FLAG_precompiled_mode) {
2351 __ Breakpoint();
2352 return;
2353 }
2354
2355 const bool save_entry_point = kind == Token::kILLEGAL;
2356 if (save_entry_point) {
2357 GenerateRecordEntryPoint(assembler);
2358 }
2359
2360 if (optimized == kOptimized) {
2361 GenerateOptimizedUsageCounterIncrement(assembler);
2362 } else {
2363 GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
2364 }
2365
2366 ASSERT(exactness == kIgnoreExactness); // Unimplemented.
2367 __ CheckCodePointer();
2368 ASSERT(num_args == 1 || num_args == 2);
2369#if defined(DEBUG)
2370 {
2371 Label ok;
2372 // Check that the IC data array has NumArgsTested() == num_args.
2373 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2374 __ ldr(R8, FieldAddress(R9, target::ICData::state_bits_offset()));
2375 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2376 __ and_(R8, R8, Operand(target::ICData::NumArgsTestedMask()));
2377 __ CompareImmediate(R8, num_args);
2378 __ b(&ok, EQ);
2379 __ Stop("Incorrect stub for IC data");
2380 __ Bind(&ok);
2381 }
2382#endif // DEBUG
2383
2384#if !defined(PRODUCT)
2385 Label stepping, done_stepping;
2386 if (optimized == kUnoptimized) {
2387 __ Comment("Check single stepping");
2388 __ LoadIsolate(R8);
2389 __ ldrb(R8, Address(R8, target::Isolate::single_step_offset()));
2390 __ CompareImmediate(R8, 0);
2391 __ b(&stepping, NE);
2392 __ Bind(&done_stepping);
2393 }
2394#endif
2395
2396 Label not_smi_or_overflow;
2397 if (kind != Token::kILLEGAL) {
2398 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
2399 }
2400 __ Bind(&not_smi_or_overflow);
2401
2402 __ Comment("Extract ICData initial values and receiver cid");
2403 // R9: IC data object (preserved).
2404 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
2405 // R8: ic_data_array with check entries: classes and target functions.
2406 const int kIcDataOffset = target::Array::data_offset() - kHeapObjectTag;
2407 // R8: points at the IC data array.
2408
2409 if (type == kInstanceCall) {
2410 __ LoadTaggedClassIdMayBeSmi(R0, R0);
2411 __ ldr(R4, FieldAddress(
2412 R9, target::CallSiteData::arguments_descriptor_offset()));
2413 if (num_args == 2) {
2414 __ ldr(R1, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
2415 __ sub(R1, R1, Operand(target::ToRawSmi(2)));
2416 __ ldr(R1, Address(SP, R1, LSL, 1)); // R1 (argument_count - 2) is Smi.
2417 __ LoadTaggedClassIdMayBeSmi(R1, R1);
2418 }
2419 } else {
2420 // Load arguments descriptor into R4.
2421 __ ldr(R4, FieldAddress(
2422 R9, target::CallSiteData::arguments_descriptor_offset()));
2423
2424 // Get the receiver's class ID (first read number of arguments from
2425 // arguments descriptor array and then access the receiver from the stack).
2426 __ ldr(R1, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
2427 __ sub(R1, R1, Operand(target::ToRawSmi(1)));
2428 // R1: argument_count - 1 (smi).
2429
2430 __ ldr(R0, Address(SP, R1, LSL, 1)); // R1 (argument_count - 1) is Smi.
2431 __ LoadTaggedClassIdMayBeSmi(R0, R0);
2432
2433 if (num_args == 2) {
2434 __ sub(R1, R1, Operand(target::ToRawSmi(1)));
2435 __ ldr(R1, Address(SP, R1, LSL, 1)); // R1 (argument_count - 2) is Smi.
2436 __ LoadTaggedClassIdMayBeSmi(R1, R1);
2437 }
2438 }
2439 // R0: first argument class ID as Smi.
2440 // R1: second argument class ID as Smi.
2441 // R4: args descriptor
2442
2443 // Loop that checks if there is an IC data match.
2444 Label loop, found, miss;
2445 __ Comment("ICData loop");
2446
2447 // We unroll the generic one that is generated once more than the others.
2448 const bool optimize = kind == Token::kILLEGAL;
2449
2450 __ Bind(&loop);
2451 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
2452 Label update;
2453
2454 __ ldr(R2, Address(R8, kIcDataOffset));
2455 __ cmp(R0, Operand(R2)); // Class id match?
2456 if (num_args == 2) {
2457 __ b(&update, NE); // Continue.
2458 __ ldr(R2, Address(R8, kIcDataOffset + target::kWordSize));
2459 __ cmp(R1, Operand(R2)); // Class id match?
2460 }
2461 __ b(&found, EQ); // Break.
2462
2463 __ Bind(&update);
2464
2465 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2466 num_args, exactness == kCheckExactness) *
2467 target::kWordSize;
2468 __ AddImmediate(R8, entry_size); // Next entry.
2469
2470 __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid)); // Done?
2471 if (unroll == 0) {
2472 __ b(&loop, NE);
2473 } else {
2474 __ b(&miss, EQ);
2475 }
2476 }
2477
2478 __ Bind(&miss);
2479 __ Comment("IC miss");
2480 // Compute address of arguments.
2481 __ ldr(R1, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
2482 __ sub(R1, R1, Operand(target::ToRawSmi(1)));
2483 // R1: argument_count - 1 (smi).
2484 __ add(R1, SP, Operand(R1, LSL, 1)); // R1 is Smi.
2485 // R1: address of receiver.
2486 // Create a stub frame as we are pushing some objects on the stack before
2487 // calling into the runtime.
2488 __ EnterStubFrame();
2489 __ LoadImmediate(R0, 0);
2490 // Preserve IC data object and arguments descriptor array and
2491 // setup space on stack for result (target code object).
2492 RegList regs = (1 << R0) | (1 << R4) | (1 << R9);
2493 if (save_entry_point) {
2494 __ SmiTag(R3);
2495 regs |= 1 << R3;
2496 }
2497 __ PushList(regs);
2498 // Push call arguments.
2499 for (intptr_t i = 0; i < num_args; i++) {
2500 __ LoadFromOffset(kWord, TMP, R1, -i * target::kWordSize);
2501 __ Push(TMP);
2502 }
2503 // Pass IC data object.
2504 __ Push(R9);
2505 __ CallRuntime(handle_ic_miss, num_args + 1);
2506 // Remove the call arguments pushed earlier, including the IC data object.
2507 __ Drop(num_args + 1);
2508 // Pop returned function object into R0.
2509 // Restore arguments descriptor array and IC data array.
2510 __ PopList(regs);
2511 if (save_entry_point) {
2512 __ SmiUntag(R3);
2513 }
2514 __ RestoreCodePointer();
2515 __ LeaveStubFrame();
2516 Label call_target_function;
2517 if (!FLAG_lazy_dispatchers) {
2518 GenerateDispatcherCode(assembler, &call_target_function);
2519 } else {
2520 __ b(&call_target_function);
2521 }
2522
2523 __ Bind(&found);
2524 // R8: pointer to an IC data check group.
2525 const intptr_t target_offset =
2526 target::ICData::TargetIndexFor(num_args) * target::kWordSize;
2527 const intptr_t count_offset =
2528 target::ICData::CountIndexFor(num_args) * target::kWordSize;
2529 __ LoadFromOffset(kWord, R0, R8, kIcDataOffset + target_offset);
2530
2531 if (FLAG_optimization_counter_threshold >= 0) {
2532 __ Comment("Update caller's counter");
2533 __ LoadFromOffset(kWord, R1, R8, kIcDataOffset + count_offset);
2534 // Ignore overflow.
2535 __ adds(R1, R1, Operand(target::ToRawSmi(1)));
2536 __ StoreIntoSmiField(Address(R8, kIcDataOffset + count_offset), R1);
2537 }
2538
2539 __ Comment("Call target");
2540 __ Bind(&call_target_function);
2541 // R0: target function.
2542 __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
2543
2544 if (save_entry_point) {
2545 __ Branch(Address(R0, R3));
2546 } else {
2547 __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
2548 }
2549
2550#if !defined(PRODUCT)
2551 if (optimized == kUnoptimized) {
2552 __ Bind(&stepping);
2553 __ EnterStubFrame();
2554 if (type == kInstanceCall) {
2555 __ Push(R0); // Preserve receiver.
2556 }
2557 RegList regs = 1 << R9;
2558 if (save_entry_point) {
2559 regs |= 1 << R3;
2560 __ SmiTag(R3); // Entry-point is not Smi.
2561 }
2562 __ PushList(regs); // Preserve IC data and entry-point.
2563 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2564 __ PopList(regs); // Restore IC data and entry-point
2565 if (save_entry_point) {
2566 __ SmiUntag(R3);
2567 }
2568 if (type == kInstanceCall) {
2569 __ Pop(R0);
2570 }
2571 __ RestoreCodePointer();
2572 __ LeaveStubFrame();
2573 __ b(&done_stepping);
2574 }
2575#endif
2576}
2577
2578// R0: receiver
2579// R9: ICData
2580// LR: return address
2581void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
2582 Assembler* assembler) {
2583 GenerateNArgsCheckInlineCacheStub(
2584 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2585 kUnoptimized, kInstanceCall, kIgnoreExactness);
2586}
2587
2588// R0: receiver
2589// R9: ICData
2590// LR: return address
2591void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
2592 Assembler* assembler) {
2593 __ Stop("Unimplemented");
2594}
2595
2596// R0: receiver
2597// R9: ICData
2598// LR: return address
2599void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
2600 Assembler* assembler) {
2601 GenerateNArgsCheckInlineCacheStub(
2602 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2603 kUnoptimized, kInstanceCall, kIgnoreExactness);
2604}
2605
2606// R0: receiver
2607// R9: ICData
2608// LR: return address
2609void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
2610 GenerateNArgsCheckInlineCacheStub(
2611 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
2612 kUnoptimized, kInstanceCall, kIgnoreExactness);
2613}
2614
2615// R0: receiver
2616// R9: ICData
2617// LR: return address
2618void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
2619 GenerateNArgsCheckInlineCacheStub(
2620 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
2621 kUnoptimized, kInstanceCall, kIgnoreExactness);
2622}
2623
2624// R0: receiver
2625// R9: ICData
2626// LR: return address
2627void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
2628 GenerateNArgsCheckInlineCacheStub(
2629 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
2630 kUnoptimized, kInstanceCall, kIgnoreExactness);
2631}
2632
2633// R0: receiver
2634// R9: ICData
2635// R8: Function
2636// LR: return address
2637void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
2638 Assembler* assembler) {
2639 GenerateNArgsCheckInlineCacheStub(
2640 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2641 kOptimized, kInstanceCall, kIgnoreExactness);
2642}
2643
2644// R0: receiver
2645// R9: ICData
2646// R8: Function
2647// LR: return address
2648void StubCodeCompiler::
2649 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
2650 Assembler* assembler) {
2651 __ Stop("Unimplemented");
2652}
2653
2654// R0: receiver
2655// R9: ICData
2656// R8: Function
2657// LR: return address
2658void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
2659 Assembler* assembler) {
2660 GenerateNArgsCheckInlineCacheStub(
2661 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2662 kOptimized, kInstanceCall, kIgnoreExactness);
2663}
2664
2665// R9: ICData
2666// LR: return address
2667void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
2668 Assembler* assembler) {
2669 GenerateRecordEntryPoint(assembler);
2670 GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
2671#if defined(DEBUG)
2672 {
2673 Label ok;
2674 // Check that the IC data array has NumArgsTested() == 0.
2675 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2676 __ ldr(R8, FieldAddress(R9, target::ICData::state_bits_offset()));
2677 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2678 __ and_(R8, R8, Operand(target::ICData::NumArgsTestedMask()));
2679 __ CompareImmediate(R8, 0);
2680 __ b(&ok, EQ);
2681 __ Stop("Incorrect IC data for unoptimized static call");
2682 __ Bind(&ok);
2683 }
2684#endif // DEBUG
2685
2686#if !defined(PRODUCT)
2687 // Check single stepping.
2688 Label stepping, done_stepping;
2689 __ LoadIsolate(R8);
2690 __ ldrb(R8, Address(R8, target::Isolate::single_step_offset()));
2691 __ CompareImmediate(R8, 0);
2692 __ b(&stepping, NE);
2693 __ Bind(&done_stepping);
2694#endif
2695
2696 // R9: IC data object (preserved).
2697 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
2698 // R8: ic_data_array with entries: target functions and count.
2699 __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
2700 // R8: points directly to the first ic data array element.
2701 const intptr_t target_offset =
2702 target::ICData::TargetIndexFor(0) * target::kWordSize;
2703 const intptr_t count_offset =
2704 target::ICData::CountIndexFor(0) * target::kWordSize;
2705
2706 if (FLAG_optimization_counter_threshold >= 0) {
2707 // Increment count for this call, ignore overflow.
2708 __ LoadFromOffset(kWord, R1, R8, count_offset);
2709 __ adds(R1, R1, Operand(target::ToRawSmi(1)));
2710 __ StoreIntoSmiField(Address(R8, count_offset), R1);
2711 }
2712
2713 // Load arguments descriptor into R4.
2714 __ ldr(R4,
2715 FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
2716
2717 // Get function and call it, if possible.
2718 __ LoadFromOffset(kWord, R0, R8, target_offset);
2719 __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
2720
2721 __ Branch(Address(R0, R3));
2722
2723#if !defined(PRODUCT)
2724 __ Bind(&stepping);
2725 __ EnterStubFrame();
2726 __ SmiTag(R3); // Entry-point is not Smi.
2727 __ PushList((1 << R9) | (1 << R3)); // Preserve IC data and entry-point.
2728 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2729 __ PopList((1 << R9) | (1 << R3));
2730 __ SmiUntag(R3);
2731 __ RestoreCodePointer();
2732 __ LeaveStubFrame();
2733 __ b(&done_stepping);
2734#endif
2735}
2736
2737// R9: ICData
2738// LR: return address
2739void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
2740 Assembler* assembler) {
2741 GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
2742 GenerateNArgsCheckInlineCacheStub(
2743 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2744 kUnoptimized, kStaticCall, kIgnoreExactness);
2745}
2746
2747// R9: ICData
2748// LR: return address
2749void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
2750 Assembler* assembler) {
2751 GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
2752 GenerateNArgsCheckInlineCacheStub(
2753 assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2754 kUnoptimized, kStaticCall, kIgnoreExactness);
2755}
2756
2757// Stub for compiling a function and jumping to the compiled code.
2758// R4: Arguments descriptor.
2759// R0: Function.
2760void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
2761 __ EnterStubFrame();
2762 __ PushList((1 << R0) | (1 << R4)); // Preserve arg desc, pass function.
2763 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2764 __ PopList((1 << R0) | (1 << R4));
2765 __ LeaveStubFrame();
2766
2767 // When using the interpreter, the function's code may now point to the
2768 // InterpretCall stub. Make sure R0, R4 and R9 are preserved.
2769 __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
2770 __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
2771}
2772
2773// Stub for interpreting a function call.
2774// R4: Arguments descriptor.
2775// R0: Function.
2776void StubCodeCompiler::GenerateInterpretCallStub(Assembler* assembler) {
2777 if (FLAG_precompiled_mode) {
2778 __ Stop("Not using interpreter");
2779 return;
2780 }
2781 __ EnterStubFrame();
2782
2783#if defined(DEBUG)
2784 {
2785 Label ok;
2786 // Check that we are always entering from Dart code.
2787 __ LoadFromOffset(kWord, R8, THR, target::Thread::vm_tag_offset());
2788 __ CompareImmediate(R8, VMTag::kDartCompiledTagId);
2789 __ b(&ok, EQ);
2790 __ Stop("Not coming from Dart code.");
2791 __ Bind(&ok);
2792 }
2793#endif
2794
2795 // Adjust arguments count for type arguments vector.
2796 __ LoadFieldFromOffset(kWord, R2, R4,
2797 target::ArgumentsDescriptor::count_offset());
2798 __ SmiUntag(R2);
2799 __ LoadFieldFromOffset(kWord, R1, R4,
2800 target::ArgumentsDescriptor::type_args_len_offset());
2801 __ cmp(R1, Operand(0));
2802 __ AddImmediate(R2, R2, 1, NE); // Include the type arguments.
2803
2804 // Compute argv.
2805 __ mov(R3, Operand(R2, LSL, 2));
2806 __ add(R3, FP, Operand(R3));
2807 __ AddImmediate(R3,
2808 target::frame_layout.param_end_from_fp * target::kWordSize);
2809
2810 // Indicate decreasing memory addresses of arguments with negative argc.
2811 __ rsb(R2, R2, Operand(0));
2812
2813 // Align frame before entering C++ world. Fifth argument passed on the stack.
2814 __ ReserveAlignedFrameSpace(1 * target::kWordSize);
2815
2816 // Pass arguments in registers.
2817 // R0: Function.
2818 __ mov(R1, Operand(R4)); // Arguments descriptor.
2819 // R2: Negative argc.
2820 // R3: Argv.
2821 __ str(THR, Address(SP, 0)); // Fifth argument: Thread.
2822
2823 // Save exit frame information to enable stack walking as we are about
2824 // to transition to Dart VM C++ code.
2825 __ StoreToOffset(kWord, FP, THR,
2826 target::Thread::top_exit_frame_info_offset());
2827
2828 // Mark that the thread exited generated code through a runtime call.
2829 __ LoadImmediate(R5, target::Thread::exit_through_runtime_call());
2830 __ StoreToOffset(kWord, R5, THR, target::Thread::exit_through_ffi_offset());
2831
2832 // Mark that the thread is executing VM code.
2833 __ LoadFromOffset(kWord, R5, THR,
2834 target::Thread::interpret_call_entry_point_offset());
2835 __ StoreToOffset(kWord, R5, THR, target::Thread::vm_tag_offset());
2836
2837 __ blx(R5);
2838
2839 // Mark that the thread is executing Dart code.
2840 __ LoadImmediate(R2, VMTag::kDartCompiledTagId);
2841 __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
2842
2843 // Mark that the thread has not exited generated Dart code.
2844 __ LoadImmediate(R2, 0);
2845 __ StoreToOffset(kWord, R2, THR, target::Thread::exit_through_ffi_offset());
2846
2847 // Reset exit frame information in Isolate's mutator thread structure.
2848 __ StoreToOffset(kWord, R2, THR,
2849 target::Thread::top_exit_frame_info_offset());
2850
2851 __ LeaveStubFrame();
2852 __ Ret();
2853}
2854
2855// R9: Contains an ICData.
2856void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
2857#if defined(PRODUCT)
2858 __ Stop("No debugging in PRODUCT mode");
2859#else
2860 __ EnterStubFrame();
2861 __ Push(R0); // Preserve receiver.
2862 __ Push(R9); // Preserve IC data.
2863 __ PushImmediate(0); // Space for result.
2864 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2865 __ Pop(CODE_REG); // Original stub.
2866 __ Pop(R9); // Restore IC data.
2867 __ Pop(R0); // Restore receiver.
2868 __ LeaveStubFrame();
2869 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2870#endif // defined(PRODUCT)
2871}
2872
2873void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
2874 Assembler* assembler) {
2875#if defined(PRODUCT)
2876 __ Stop("No debugging in PRODUCT mode");
2877#else
2878 __ EnterStubFrame();
2879 __ Push(R9); // Preserve IC data.
2880 __ PushImmediate(0); // Space for result.
2881 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2882 __ Pop(CODE_REG); // Original stub.
2883 __ Pop(R9); // Restore IC data.
2884 __ LeaveStubFrame();
2885 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2886#endif // defined(PRODUCT)
2887}
2888
2889void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
2890#if defined(PRODUCT)
2891 __ Stop("No debugging in PRODUCT mode");
2892#else
2893 __ EnterStubFrame();
2894 __ LoadImmediate(R0, 0);
2895 // Make room for result.
2896 __ PushList((1 << R0));
2897 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2898 __ PopList((1 << CODE_REG));
2899 __ LeaveStubFrame();
2900 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2901#endif // defined(PRODUCT)
2902}
2903
2904// Called only from unoptimized code. All relevant registers have been saved.
2905void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
2906#if defined(PRODUCT)
2907 __ Stop("No debugging in PRODUCT mode");
2908#else
2909 // Check single stepping.
2910 Label stepping, done_stepping;
2911 __ LoadIsolate(R1);
2912 __ ldrb(R1, Address(R1, target::Isolate::single_step_offset()));
2913 __ CompareImmediate(R1, 0);
2914 __ b(&stepping, NE);
2915 __ Bind(&done_stepping);
2916 __ Ret();
2917
2918 __ Bind(&stepping);
2919 __ EnterStubFrame();
2920 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2921 __ LeaveStubFrame();
2922 __ b(&done_stepping);
2923#endif // defined(PRODUCT)
2924}
2925
2926// Used to check class and type arguments. Arguments passed in registers:
2927// LR: return address.
2928// R0: instance (must be preserved).
2929// R2: instantiator type arguments (only if n >= 4, can be raw_null).
2930// R1: function type arguments (only if n >= 4, can be raw_null).
2931// R3: target::SubtypeTestCache.
2932//
2933// Preserves R0/R2.
2934// Preserves NOTFP with bare instructions and CODE_REG without.
2935//
2936// Result in R1: null -> not found, otherwise result (true or false).
2937static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
2938 ASSERT(n == 1 || n == 2 || n == 4 || n == 6);
2939
2940 const Register kInstanceCidOrFunction = R8;
2941 const Register kInstanceInstantiatorTypeArgumentsReg = R4;
2942 const Register kInstanceDelayedFunctionTypeArgumentsReg = PP;
2943
2944 Register kInstanceParentFunctionTypeArgumentsReg;
2945 Register kNullReg;
2946 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
2947 // NOTFP must be preserved, but CODE_REG can be freely used.
2948 kInstanceParentFunctionTypeArgumentsReg = NOTFP;
2949 kNullReg = CODE_REG;
2950 } else {
2951 // CODE_REG must be preserved, but NOTFP can be freely used.
2952 kInstanceParentFunctionTypeArgumentsReg = CODE_REG;
2953 kNullReg = NOTFP;
2954 }
2955
2956 __ LoadObject(kNullReg, NullObject());
2957
2958 // Free up these 2 registers to be used for 6-value test.
2959 if (n >= 6) {
2960 __ PushList(1 << kInstanceParentFunctionTypeArgumentsReg |
2961 1 << kInstanceDelayedFunctionTypeArgumentsReg);
2962 }
2963
2964 // Loop initialization (moved up here to avoid having all dependent loads
2965 // after each other).
2966
2967 // We avoid a load-acquire barrier here by relying on the fact that all other
2968 // loads from the array are data-dependent loads.
2969 __ ldr(TypeTestABI::kSubtypeTestCacheReg,
2970 FieldAddress(TypeTestABI::kSubtypeTestCacheReg,
2971 target::SubtypeTestCache::cache_offset()));
2972 __ AddImmediate(TypeTestABI::kSubtypeTestCacheReg,
2973 target::Array::data_offset() - kHeapObjectTag);
2974
2975 Label loop, not_closure;
2976 if (n >= 4) {
2977 __ LoadClassIdMayBeSmi(kInstanceCidOrFunction, TypeTestABI::kInstanceReg);
2978 } else {
2979 __ LoadClassId(kInstanceCidOrFunction, TypeTestABI::kInstanceReg);
2980 }
2981 __ CompareImmediate(kInstanceCidOrFunction, kClosureCid);
2982 __ b(&not_closure, NE);
2983
2984 // Closure handling.
2985 {
2986 __ ldr(kInstanceCidOrFunction,
2987 FieldAddress(TypeTestABI::kInstanceReg,
2988 target::Closure::function_offset()));
2989 if (n >= 2) {
2990 __ ldr(
2991 kInstanceInstantiatorTypeArgumentsReg,
2992 FieldAddress(TypeTestABI::kInstanceReg,
2993 target::Closure::instantiator_type_arguments_offset()));
2994 if (n >= 6) {
2995 ASSERT(n == 6);
2996 __ ldr(kInstanceParentFunctionTypeArgumentsReg,
2997 FieldAddress(TypeTestABI::kInstanceReg,
2998 target::Closure::function_type_arguments_offset()));
2999 __ ldr(kInstanceDelayedFunctionTypeArgumentsReg,
3000 FieldAddress(TypeTestABI::kInstanceReg,
3001 target::Closure::delayed_type_arguments_offset()));
3002 }
3003 }
3004 __ b(&loop);
3005 }
3006
3007 // Non-Closure handling.
3008 {
3009 __ Bind(&not_closure);
3010 if (n >= 2) {
3011 Label has_no_type_arguments;
3012 __ LoadClassById(R9, kInstanceCidOrFunction);
3013 __ mov(kInstanceInstantiatorTypeArgumentsReg, Operand(kNullReg));
3014 __ ldr(R9,
3015 FieldAddress(
3016 R9, target::Class::
3017 host_type_arguments_field_offset_in_words_offset()));
3018 __ CompareImmediate(R9, target::Class::kNoTypeArguments);
3019 __ b(&has_no_type_arguments, EQ);
3020 __ add(R9, TypeTestABI::kInstanceReg, Operand(R9, LSL, 2));
3021 __ ldr(kInstanceInstantiatorTypeArgumentsReg, FieldAddress(R9, 0));
3022 __ Bind(&has_no_type_arguments);
3023
3024 if (n >= 6) {
3025 __ mov(kInstanceParentFunctionTypeArgumentsReg, Operand(kNullReg));
3026 __ mov(kInstanceDelayedFunctionTypeArgumentsReg, Operand(kNullReg));
3027 }
3028 }
3029 __ SmiTag(kInstanceCidOrFunction);
3030 }
3031
3032 Label found, not_found, next_iteration;
3033
3034 // Loop header.
3035 __ Bind(&loop);
3036 __ ldr(R9, Address(TypeTestABI::kSubtypeTestCacheReg,
3037 target::kWordSize *
3038 target::SubtypeTestCache::kInstanceClassIdOrFunction));
3039 __ cmp(R9, Operand(kNullReg));
3040 __ b(&not_found, EQ);
3041 __ cmp(R9, Operand(kInstanceCidOrFunction));
3042 if (n == 1) {
3043 __ b(&found, EQ);
3044 } else {
3045 __ b(&next_iteration, NE);
3046 __ ldr(R9, Address(TypeTestABI::kSubtypeTestCacheReg,
3047 target::kWordSize *
3048 target::SubtypeTestCache::kInstanceTypeArguments));
3049 __ cmp(R9, Operand(kInstanceInstantiatorTypeArgumentsReg));
3050 if (n == 2) {
3051 __ b(&found, EQ);
3052 } else {
3053 __ b(&next_iteration, NE);
3054 __ ldr(R9,
3055 Address(TypeTestABI::kSubtypeTestCacheReg,
3056 target::kWordSize *
3057 target::SubtypeTestCache::kInstantiatorTypeArguments));
3058 __ cmp(R9, Operand(TypeTestABI::kInstantiatorTypeArgumentsReg));
3059 __ b(&next_iteration, NE);
3060 __ ldr(R9, Address(TypeTestABI::kSubtypeTestCacheReg,
3061 target::kWordSize *
3062 target::SubtypeTestCache::kFunctionTypeArguments));
3063 __ cmp(R9, Operand(TypeTestABI::kFunctionTypeArgumentsReg));
3064 if (n == 4) {
3065 __ b(&found, EQ);
3066 } else {
3067 ASSERT(n == 6);
3068 __ b(&next_iteration, NE);
3069
3070 __ ldr(R9, Address(TypeTestABI::kSubtypeTestCacheReg,
3071 target::kWordSize *
3072 target::SubtypeTestCache::
3073 kInstanceParentFunctionTypeArguments));
3074 __ cmp(R9, Operand(kInstanceParentFunctionTypeArgumentsReg));
3075 __ b(&next_iteration, NE);
3076
3077 __ ldr(R9, Address(TypeTestABI::kSubtypeTestCacheReg,
3078 target::kWordSize *
3079 target::SubtypeTestCache::
3080 kInstanceDelayedFunctionTypeArguments));
3081 __ cmp(R9, Operand(kInstanceDelayedFunctionTypeArgumentsReg));
3082 __ b(&found, EQ);
3083 }
3084 }
3085 }
3086 __ Bind(&next_iteration);
3087 __ AddImmediate(
3088 TypeTestABI::kSubtypeTestCacheReg,
3089 target::kWordSize * target::SubtypeTestCache::kTestEntryLength);
3090 __ b(&loop);
3091
3092 __ Bind(&found);
3093 __ ldr(R1,
3094 Address(TypeTestABI::kSubtypeTestCacheReg,
3095 target::kWordSize * target::SubtypeTestCache::kTestResult));
3096 if (n >= 6) {
3097 __ PopList(1 << kInstanceParentFunctionTypeArgumentsReg |
3098 1 << kInstanceDelayedFunctionTypeArgumentsReg);
3099 }
3100 __ Ret();
3101
3102 __ Bind(&not_found);
3103 __ mov(R1, Operand(kNullReg));
3104 if (n >= 6) {
3105 __ PopList(1 << kInstanceParentFunctionTypeArgumentsReg |
3106 1 << kInstanceDelayedFunctionTypeArgumentsReg);
3107 }
3108 __ Ret();
3109}
3110
3111// See comment on [GenerateSubtypeNTestCacheStub].
3112void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
3113 GenerateSubtypeNTestCacheStub(assembler, 1);
3114}
3115
3116// See comment on [GenerateSubtypeNTestCacheStub].
3117void StubCodeCompiler::GenerateSubtype2TestCacheStub(Assembler* assembler) {
3118 GenerateSubtypeNTestCacheStub(assembler, 2);
3119}
3120
3121// See comment on [GenerateSubtypeNTestCacheStub].
3122void StubCodeCompiler::GenerateSubtype4TestCacheStub(Assembler* assembler) {
3123 GenerateSubtypeNTestCacheStub(assembler, 4);
3124}
3125
3126// See comment on [GenerateSubtypeNTestCacheStub].
3127void StubCodeCompiler::GenerateSubtype6TestCacheStub(Assembler* assembler) {
3128 GenerateSubtypeNTestCacheStub(assembler, 6);
3129}
3130
3131// Used to test whether a given value is of a given type (different variants,
3132// all have the same calling convention).
3133//
3134// Inputs:
3135// - R0 : instance to test against.
3136// - R2 : instantiator type arguments (if needed).
3137// - R1 : function type arguments (if needed).
3138//
3139// - R3 : subtype test cache.
3140//
3141// - R8 : type to test against.
3142// - R4 : name of destination variable.
3143//
3144// Preserves R0/R2.
3145//
3146// Note of warning: The caller will not populate CODE_REG and we have therefore
3147// no access to the pool.
3148void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
3149 __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
3150 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3151}
3152
3153// Used instead of DefaultTypeTestStub when null is assignable.
3154void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
3155 Assembler* assembler) {
3156 Label done;
3157
3158 // Fast case for 'null'.
3159 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
3160 __ BranchIf(EQUAL, &done);
3161
3162 __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
3163 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3164
3165 __ Bind(&done);
3166 __ Ret();
3167}
3168
3169void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
3170 __ Ret();
3171}
3172
3173void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
3174 __ Breakpoint();
3175}
3176
3177static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
3178 TypeCheckMode mode) {
3179 __ PushObject(NullObject()); // Make room for result.
3180 __ Push(TypeTestABI::kInstanceReg);
3181 __ Push(TypeTestABI::kDstTypeReg);
3182 __ Push(TypeTestABI::kInstantiatorTypeArgumentsReg);
3183 __ Push(TypeTestABI::kFunctionTypeArgumentsReg);
3184 __ PushObject(NullObject());
3185 __ Push(TypeTestABI::kSubtypeTestCacheReg);
3186 __ PushImmediate(target::ToRawSmi(mode));
3187 __ CallRuntime(kTypeCheckRuntimeEntry, 7);
3188 __ Drop(1); // mode
3189 __ Pop(TypeTestABI::kSubtypeTestCacheReg);
3190 __ Drop(1); // dst_name
3191 __ Pop(TypeTestABI::kFunctionTypeArgumentsReg);
3192 __ Pop(TypeTestABI::kInstantiatorTypeArgumentsReg);
3193 __ Pop(TypeTestABI::kDstTypeReg);
3194 __ Pop(TypeTestABI::kInstanceReg);
3195 __ Drop(1); // Discard return value.
3196}
3197
3198void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
3199 Assembler* assembler) {
3200 __ ldr(CODE_REG,
3201 Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
3202 __ EnterStubFrame();
3203 InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
3204 __ LeaveStubFrame();
3205 __ Ret();
3206}
3207
3208// Used instead of LazySpecializeTypeTestStub when null is assignable.
3209void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
3210 Assembler* assembler) {
3211 Label done;
3212
3213 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
3214 __ BranchIf(EQUAL, &done);
3215
3216 __ ldr(CODE_REG,
3217 Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
3218 __ EnterStubFrame();
3219 InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
3220 __ LeaveStubFrame();
3221
3222 __ Bind(&done);
3223 __ Ret();
3224}
3225
3226void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
3227 Label done, call_runtime;
3228
3229 if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
3230 __ ldr(CODE_REG,
3231 Address(THR, target::Thread::slow_type_test_stub_offset()));
3232 }
3233 __ EnterStubFrame();
3234
3235 // If the subtype-cache is null, it needs to be lazily-created by the runtime.
3236 __ CompareObject(TypeTestABI::kSubtypeTestCacheReg, NullObject());
3237 __ BranchIf(EQUAL, &call_runtime);
3238
3239 const Register kTmp = R9;
3240
3241 // If this is not a [Type] object, we'll go to the runtime.
3242 Label is_simple_case, is_complex_case;
3243 __ LoadClassId(kTmp, TypeTestABI::kDstTypeReg);
3244 __ cmp(kTmp, Operand(kTypeCid));
3245 __ BranchIf(NOT_EQUAL, &is_complex_case);
3246
3247 // Check whether this [Type] is instantiated/uninstantiated.
3248 __ ldrb(kTmp, FieldAddress(TypeTestABI::kDstTypeReg,
3249 target::Type::type_state_offset()));
3250 __ cmp(kTmp,
3251 Operand(target::AbstractTypeLayout::kTypeStateFinalizedInstantiated));
3252 __ BranchIf(NOT_EQUAL, &is_complex_case);
3253
3254 // Check whether this [Type] is a function type.
3255 __ ldr(kTmp, FieldAddress(TypeTestABI::kDstTypeReg,
3256 target::Type::signature_offset()));
3257 __ CompareObject(kTmp, NullObject());
3258 __ BranchIf(NOT_EQUAL, &is_complex_case);
3259
3260 // This [Type] could be a FutureOr. Subtype2TestCache does not support Smi.
3261 __ BranchIfSmi(TypeTestABI::kInstanceReg, &is_complex_case);
3262
3263 // Fall through to &is_simple_case
3264
3265 const intptr_t kRegsToSave = (1 << TypeTestABI::kSubtypeTestCacheReg) |
3266 (1 << TypeTestABI::kDstTypeReg) |
3267 (1 << TypeTestABI::kFunctionTypeArgumentsReg);
3268
3269 __ Bind(&is_simple_case);
3270 {
3271 __ PushList(kRegsToSave);
3272 __ BranchLink(StubCodeSubtype2TestCache());
3273 __ CompareObject(R1, CastHandle<Object>(TrueObject()));
3274 __ PopList(kRegsToSave);
3275 __ BranchIf(EQUAL, &done); // Cache said: yes.
3276 __ Jump(&call_runtime);
3277 }
3278
3279 __ Bind(&is_complex_case);
3280 {
3281 __ PushList(kRegsToSave);
3282 __ BranchLink(StubCodeSubtype6TestCache());
3283 __ CompareObject(R1, CastHandle<Object>(TrueObject()));
3284 __ PopList(kRegsToSave);
3285 __ BranchIf(EQUAL, &done); // Cache said: yes.
3286 // Fall through to runtime_call
3287 }
3288
3289 __ Bind(&call_runtime);
3290
3291 InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub);
3292
3293 __ Bind(&done);
3294 __ LeaveStubFrame();
3295 __ Ret();
3296}
3297
3298// Return the current stack pointer address, used to do stack alignment checks.
3299void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
3300 __ mov(R0, Operand(SP));
3301 __ Ret();
3302}
3303
3304// Jump to a frame on the call stack.
3305// LR: return address.
3306// R0: program_counter.
3307// R1: stack_pointer.
3308// R2: frame_pointer.
3309// R3: thread.
3310// Does not return.
3311void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
3312 ASSERT(kExceptionObjectReg == R0);
3313 ASSERT(kStackTraceObjectReg == R1);
3314 __ mov(IP, Operand(R1)); // Copy Stack pointer into IP.
3315 __ mov(LR, Operand(R0)); // Program counter.
3316 __ mov(THR, Operand(R3)); // Thread.
3317 __ mov(FP, Operand(R2)); // Frame_pointer.
3318 __ mov(SP, Operand(IP)); // Set Stack pointer.
3319#if defined(USING_SHADOW_CALL_STACK)
3320#error Unimplemented
3321#endif
3322 Label exit_through_non_ffi;
3323 Register tmp1 = R0, tmp2 = R1;
3324 // Check if we exited generated from FFI. If so do transition.
3325 __ LoadFromOffset(kWord, tmp1, THR,
3326 compiler::target::Thread::exit_through_ffi_offset());
3327 __ LoadImmediate(tmp2, target::Thread::exit_through_ffi());
3328 __ cmp(tmp1, Operand(tmp2));
3329 __ b(&exit_through_non_ffi, NE);
3330 __ TransitionNativeToGenerated(tmp1, tmp2,
3331 /*leave_safepoint=*/true);
3332 __ Bind(&exit_through_non_ffi);
3333
3334 // Set the tag.
3335 __ LoadImmediate(R2, VMTag::kDartCompiledTagId);
3336 __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
3337 // Clear top exit frame.
3338 __ LoadImmediate(R2, 0);
3339 __ StoreToOffset(kWord, R2, THR,
3340 target::Thread::top_exit_frame_info_offset());
3341 // Restore the pool pointer.
3342 __ RestoreCodePointer();
3343 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
3344 __ SetupGlobalPoolAndDispatchTable();
3345 __ set_constant_pool_allowed(true);
3346 } else {
3347 __ LoadPoolPointer();
3348 }
3349 __ bx(LR); // Jump to continuation point.
3350}
3351
3352// Run an exception handler. Execution comes from JumpToFrame
3353// stub or from the simulator.
3354//
3355// The arguments are stored in the Thread object.
3356// Does not return.
3357void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
3358 __ LoadFromOffset(kWord, LR, THR, target::Thread::resume_pc_offset());
3359
3360 word offset_from_thread = 0;
3361 bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
3362 ASSERT(ok);
3363 __ LoadFromOffset(kWord, R2, THR, offset_from_thread);
3364
3365 // Exception object.
3366 __ LoadFromOffset(kWord, R0, THR, target::Thread::active_exception_offset());
3367 __ StoreToOffset(kWord, R2, THR, target::Thread::active_exception_offset());
3368
3369 // StackTrace object.
3370 __ LoadFromOffset(kWord, R1, THR, target::Thread::active_stacktrace_offset());
3371 __ StoreToOffset(kWord, R2, THR, target::Thread::active_stacktrace_offset());
3372
3373 __ bx(LR); // Jump to the exception handler code.
3374}
3375
3376// Deoptimize a frame on the call stack before rewinding.
3377// The arguments are stored in the Thread object.
3378// No result.
3379void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
3380 // Push zap value instead of CODE_REG.
3381 __ LoadImmediate(IP, kZapCodeReg);
3382 __ Push(IP);
3383
3384 // Load the deopt pc into LR.
3385 __ LoadFromOffset(kWord, LR, THR, target::Thread::resume_pc_offset());
3386 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
3387
3388 // After we have deoptimized, jump to the correct frame.
3389 __ EnterStubFrame();
3390 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
3391 __ LeaveStubFrame();
3392 __ bkpt(0);
3393}
3394
3395// Calls to the runtime to optimize the given function.
3396// R8: function to be reoptimized.
3397// R4: argument descriptor (preserved).
3398void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
3399 __ ldr(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
3400 __ EnterStubFrame();
3401 __ Push(R4);
3402 __ LoadImmediate(IP, 0);
3403 __ Push(IP); // Setup space on stack for return value.
3404 __ Push(R8);
3405 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
3406 __ Pop(R0); // Discard argument.
3407 __ Pop(R0); // Get Function object
3408 __ Pop(R4); // Restore argument descriptor.
3409 __ LeaveStubFrame();
3410 __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
3411 __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
3412 __ bkpt(0);
3413}
3414
3415// Does identical check (object references are equal or not equal) with special
3416// checks for boxed numbers.
3417// LR: return address.
3418// Return Zero condition flag set if equal.
3419// Note: A Mint cannot contain a value that would fit in Smi.
3420static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
3421 const Register left,
3422 const Register right,
3423 const Register temp) {
3424 Label reference_compare, done, check_mint;
3425 // If any of the arguments is Smi do reference compare.
3426 __ tst(left, Operand(kSmiTagMask));
3427 __ b(&reference_compare, EQ);
3428 __ tst(right, Operand(kSmiTagMask));
3429 __ b(&reference_compare, EQ);
3430
3431 // Value compare for two doubles.
3432 __ CompareClassId(left, kDoubleCid, temp);
3433 __ b(&check_mint, NE);
3434 __ CompareClassId(right, kDoubleCid, temp);
3435 __ b(&done, NE);
3436
3437 // Double values bitwise compare.
3438 __ ldr(temp, FieldAddress(left, target::Double::value_offset() +
3439 0 * target::kWordSize));
3440 __ ldr(IP, FieldAddress(right, target::Double::value_offset() +
3441 0 * target::kWordSize));
3442 __ cmp(temp, Operand(IP));
3443 __ b(&done, NE);
3444 __ ldr(temp, FieldAddress(left, target::Double::value_offset() +
3445 1 * target::kWordSize));
3446 __ ldr(IP, FieldAddress(right, target::Double::value_offset() +
3447 1 * target::kWordSize));
3448 __ cmp(temp, Operand(IP));
3449 __ b(&done);
3450
3451 __ Bind(&check_mint);
3452 __ CompareClassId(left, kMintCid, temp);
3453 __ b(&reference_compare, NE);
3454 __ CompareClassId(right, kMintCid, temp);
3455 __ b(&done, NE);
3456 __ ldr(temp, FieldAddress(
3457 left, target::Mint::value_offset() + 0 * target::kWordSize));
3458 __ ldr(IP, FieldAddress(
3459 right, target::Mint::value_offset() + 0 * target::kWordSize));
3460 __ cmp(temp, Operand(IP));
3461 __ b(&done, NE);
3462 __ ldr(temp, FieldAddress(
3463 left, target::Mint::value_offset() + 1 * target::kWordSize));
3464 __ ldr(IP, FieldAddress(
3465 right, target::Mint::value_offset() + 1 * target::kWordSize));
3466 __ cmp(temp, Operand(IP));
3467 __ b(&done);
3468
3469 __ Bind(&reference_compare);
3470 __ cmp(left, Operand(right));
3471 __ Bind(&done);
3472}
3473
3474// Called only from unoptimized code. All relevant registers have been saved.
3475// LR: return address.
3476// SP + 4: left operand.
3477// SP + 0: right operand.
3478// Return Zero condition flag set if equal.
3479void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
3480 Assembler* assembler) {
3481#if !defined(PRODUCT)
3482 // Check single stepping.
3483 Label stepping, done_stepping;
3484 __ LoadIsolate(R1);
3485 __ ldrb(R1, Address(R1, target::Isolate::single_step_offset()));
3486 __ CompareImmediate(R1, 0);
3487 __ b(&stepping, NE);
3488 __ Bind(&done_stepping);
3489#endif
3490
3491 const Register temp = R2;
3492 const Register left = R1;
3493 const Register right = R0;
3494 __ ldr(left, Address(SP, 1 * target::kWordSize));
3495 __ ldr(right, Address(SP, 0 * target::kWordSize));
3496 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
3497 __ Ret();
3498
3499#if !defined(PRODUCT)
3500 __ Bind(&stepping);
3501 __ EnterStubFrame();
3502 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3503 __ RestoreCodePointer();
3504 __ LeaveStubFrame();
3505 __ b(&done_stepping);
3506#endif
3507}
3508
3509// Called from optimized code only.
3510// LR: return address.
3511// SP + 4: left operand.
3512// SP + 0: right operand.
3513// Return Zero condition flag set if equal.
3514void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
3515 Assembler* assembler) {
3516 const Register temp = R2;
3517 const Register left = R1;
3518 const Register right = R0;
3519 __ ldr(left, Address(SP, 1 * target::kWordSize));
3520 __ ldr(right, Address(SP, 0 * target::kWordSize));
3521 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
3522 __ Ret();
3523}
3524
3525// Called from megamorphic calls.
3526// R0: receiver
3527// R9: MegamorphicCache (preserved)
3528// Passed to target:
3529// R0: function
3530// R4: arguments descriptor
3531// CODE_REG: target Code
3532void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
3533 __ LoadTaggedClassIdMayBeSmi(R8, R0);
3534 // R8: receiver cid as Smi.
3535 __ ldr(R2, FieldAddress(R9, target::MegamorphicCache::buckets_offset()));
3536 __ ldr(R1, FieldAddress(R9, target::MegamorphicCache::mask_offset()));
3537 // R2: cache buckets array.
3538 // R1: mask as a smi.
3539
3540 // Compute the table index.
3541 ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
3542 // Use reverse subtract to multiply with 7 == 8 - 1.
3543 __ rsb(R3, R8, Operand(R8, LSL, 3));
3544 // R3: probe.
3545 Label loop;
3546 __ Bind(&loop);
3547 __ and_(R3, R3, Operand(R1));
3548
3549 const intptr_t base = target::Array::data_offset();
3550 // R3 is smi tagged, but table entries are two words, so LSL 2.
3551 Label probe_failed;
3552 __ add(IP, R2, Operand(R3, LSL, 2));
3553 __ ldr(R6, FieldAddress(IP, base));
3554 __ cmp(R6, Operand(R8));
3555 __ b(&probe_failed, NE);
3556
3557 Label load_target;
3558 __ Bind(&load_target);
3559 // Call the target found in the cache. For a class id match, this is a
3560 // proper target for the given name and arguments descriptor. If the
3561 // illegal class id was found, the target is a cache miss handler that can
3562 // be invoked as a normal Dart function.
3563 const auto target_address = FieldAddress(IP, base + target::kWordSize);
3564 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
3565 __ ldr(
3566 ARGS_DESC_REG,
3567 FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
3568 __ Branch(target_address);
3569 } else {
3570 __ ldr(R0, target_address);
3571 __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
3572 __ ldr(
3573 ARGS_DESC_REG,
3574 FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
3575 __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
3576 }
3577
3578 // Probe failed, check if it is a miss.
3579 __ Bind(&probe_failed);
3580 ASSERT(kIllegalCid == 0);
3581 __ tst(R6, Operand(R6));
3582 Label miss;
3583 __ b(&miss, EQ); // branch if miss.
3584
3585 // Try next entry in the table.
3586 __ AddImmediate(R3, target::ToRawSmi(1));
3587 __ b(&loop);
3588
3589 __ Bind(&miss);
3590 GenerateSwitchableCallMissStub(assembler);
3591}
3592
3593void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
3594 Label loop, found, miss;
3595 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
3596 __ ldr(R4,
3597 FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
3598 __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
3599 // R8: first IC entry
3600 __ LoadTaggedClassIdMayBeSmi(R1, R0);
3601 // R1: receiver cid as Smi
3602
3603 __ Bind(&loop);
3604 __ ldr(R2, Address(R8, 0));
3605 __ cmp(R1, Operand(R2));
3606 __ b(&found, EQ);
3607 __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid));
3608 __ b(&miss, EQ);
3609
3610 const intptr_t entry_length =
3611 target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
3612 target::kWordSize;
3613 __ AddImmediate(R8, entry_length); // Next entry.
3614 __ b(&loop);
3615
3616 __ Bind(&found);
3617 const intptr_t code_offset =
3618 target::ICData::CodeIndexFor(1) * target::kWordSize;
3619 const intptr_t entry_offset =
3620 target::ICData::EntryPointIndexFor(1) * target::kWordSize;
3621 if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
3622 __ ldr(CODE_REG, Address(R8, code_offset));
3623 }
3624 __ Branch(Address(R8, entry_offset));
3625
3626 __ Bind(&miss);
3627 __ LoadIsolate(R2);
3628 __ ldr(CODE_REG, Address(R2, target::Isolate::ic_miss_code_offset()));
3629 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3630}
3631
3632// Implement the monomorphic entry check for call-sites where the receiver
3633// might be a Smi.
3634//
3635// R0: receiver
3636// R9: MonomorphicSmiableCall object
3637//
3638// R2, R3: clobbered
3639void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
3640 Assembler* assembler) {
3641 __ LoadClassIdMayBeSmi(IP, R0);
3642
3643 // expected_cid_ should come right after target_
3644 ASSERT(target::MonomorphicSmiableCall::expected_cid_offset() ==
3645 target::MonomorphicSmiableCall::target_offset() + target::kWordSize);
3646 // entrypoint_ should come right after expected_cid_
3647 ASSERT(target::MonomorphicSmiableCall::entrypoint_offset() ==
3648 target::MonomorphicSmiableCall::expected_cid_offset() +
3649 target::kWordSize);
3650
3651 if (FLAG_use_bare_instructions) {
3652 // Simultaneously load the expected cid into R2 and the entrypoint into R3.
3653 __ ldrd(
3654 R2, R3, R9,
3655 target::MonomorphicSmiableCall::expected_cid_offset() - kHeapObjectTag);
3656 __ cmp(R2, Operand(IP));
3657 __ Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()),
3658 NE);
3659 __ bx(R3);
3660 } else {
3661 // Simultaneously load the target into R2 and the expected cid into R3.
3662 __ ldrd(R2, R3, R9,
3663 target::MonomorphicSmiableCall::target_offset() - kHeapObjectTag);
3664 __ mov(CODE_REG, Operand(R2));
3665 __ cmp(R3, Operand(IP));
3666 __ Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()),
3667 NE);
3668 __ LoadField(IP, FieldAddress(R2, target::Code::entry_point_offset()));
3669 __ bx(IP);
3670 }
3671}
3672
3673static void CallSwitchableCallMissRuntimeEntry(Assembler* assembler,
3674 Register receiver_reg) {
3675 __ LoadImmediate(IP, 0);
3676 __ Push(IP); // Result slot
3677 __ Push(IP); // Arg0: stub out
3678 __ Push(receiver_reg); // Arg1: Receiver
3679 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3680 __ Pop(R0); // Get the receiver
3681 __ Pop(CODE_REG); // result = stub
3682 __ Pop(R9); // result = IC
3683}
3684
3685// Called from switchable IC calls.
3686// R0: receiver
3687void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
3688 __ ldr(CODE_REG,
3689 Address(THR, target::Thread::switchable_call_miss_stub_offset()));
3690 __ EnterStubFrame();
3691 CallSwitchableCallMissRuntimeEntry(assembler, /*receiver_reg=*/R0);
3692 __ LeaveStubFrame();
3693
3694 __ Branch(FieldAddress(
3695 CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kNormal)));
3696}
3697
3698// Called from switchable IC calls.
3699// R0: receiver
3700// R9: SingleTargetCache
3701// Passed to target:
3702// CODE_REG: target Code object
3703void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
3704 Label miss;
3705 __ LoadClassIdMayBeSmi(R1, R0);
3706 __ ldrh(R2,
3707 FieldAddress(R9, target::SingleTargetCache::lower_limit_offset()));
3708 __ ldrh(R3,
3709 FieldAddress(R9, target::SingleTargetCache::upper_limit_offset()));
3710
3711 __ cmp(R1, Operand(R2));
3712 __ b(&miss, LT);
3713 __ cmp(R1, Operand(R3));
3714 __ b(&miss, GT);
3715
3716 __ ldr(CODE_REG,
3717 FieldAddress(R9, target::SingleTargetCache::target_offset()));
3718 __ Branch(FieldAddress(R9, target::SingleTargetCache::entry_point_offset()));
3719
3720 __ Bind(&miss);
3721 __ EnterStubFrame();
3722 CallSwitchableCallMissRuntimeEntry(assembler, /*receiver_reg=*/R0);
3723 __ LeaveStubFrame();
3724
3725 __ Branch(FieldAddress(
3726 CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kMonomorphic)));
3727}
3728
3729void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
3730 Assembler* assembler) {
3731 __ bkpt(0);
3732}
3733
3734void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
3735 __ bkpt(0);
3736}
3737
3738void StubCodeCompiler::GenerateNotLoadedStub(Assembler* assembler) {
3739 __ EnterStubFrame();
3740 __ CallRuntime(kNotLoadedRuntimeEntry, 0);
3741 __ bkpt(0);
3742}
3743
3744// Instantiate type arguments from instantiator and function type args.
3745// R3 uninstantiated type arguments.
3746// R2 instantiator type arguments.
3747// R1: function type arguments.
3748// Returns instantiated type arguments in R0.
3749void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub(
3750 Assembler* assembler) {
3751 // Lookup cache before calling runtime.
3752 __ ldr(R0, compiler::FieldAddress(
3753 InstantiationABI::kUninstantiatedTypeArgumentsReg,
3754 target::TypeArguments::instantiations_offset()));
3755 __ AddImmediate(R0, compiler::target::Array::data_offset() - kHeapObjectTag);
3756 // The instantiations cache is initialized with Object::zero_array() and is
3757 // therefore guaranteed to contain kNoInstantiator. No length check needed.
3758 compiler::Label loop, next, found, call_runtime;
3759 __ Bind(&loop);
3760
3761 // Use load-acquire to test for sentinel, if we found non-sentinel it is safe
3762 // to access the other entries. If we found a sentinel we go to runtime.
3763 __ LoadAcquire(R4, R0,
3764 TypeArguments::Instantiation::kInstantiatorTypeArgsIndex *
3765 target::kWordSize);
3766 __ CompareImmediate(R4, Smi::RawValue(TypeArguments::kNoInstantiator));
3767 __ b(&call_runtime, EQ);
3768
3769 __ cmp(R4,
3770 compiler::Operand(InstantiationABI::kInstantiatorTypeArgumentsReg));
3771 __ b(&next, NE);
3772 __ ldr(IP, compiler::Address(
3773 R0, TypeArguments::Instantiation::kFunctionTypeArgsIndex *
3774 target::kWordSize));
3775 __ cmp(IP, compiler::Operand(InstantiationABI::kFunctionTypeArgumentsReg));
3776 __ b(&found, EQ);
3777 __ Bind(&next);
3778 __ AddImmediate(
3779 R0, TypeArguments::Instantiation::kSizeInWords * target::kWordSize);
3780 __ b(&loop);
3781
3782 // Instantiate non-null type arguments.
3783 // A runtime call to instantiate the type arguments is required.
3784 __ Bind(&call_runtime);
3785 __ EnterStubFrame();
3786 __ PushObject(Object::null_object()); // Make room for the result.
3787 static_assert((InstantiationABI::kUninstantiatedTypeArgumentsReg >
3788 InstantiationABI::kInstantiatorTypeArgumentsReg) &&
3789 (InstantiationABI::kInstantiatorTypeArgumentsReg >
3790 InstantiationABI::kFunctionTypeArgumentsReg),
3791 "Should be ordered to push arguments with one instruction");
3792 __ PushList((1 << InstantiationABI::kUninstantiatedTypeArgumentsReg) |
3793 (1 << InstantiationABI::kInstantiatorTypeArgumentsReg) |
3794 (1 << InstantiationABI::kFunctionTypeArgumentsReg));
3795 __ CallRuntime(kInstantiateTypeArgumentsRuntimeEntry, 3);
3796 __ Drop(3); // Drop 2 type vectors, and uninstantiated type.
3797 __ Pop(InstantiationABI::kResultTypeArgumentsReg);
3798 __ LeaveStubFrame();
3799 __ Ret();
3800
3801 __ Bind(&found);
3802 __ ldr(InstantiationABI::kResultTypeArgumentsReg,
3803 compiler::Address(
3804 R0, TypeArguments::Instantiation::kInstantiatedTypeArgsIndex *
3805 target::kWordSize));
3806 __ Ret();
3807}
3808
3809void StubCodeCompiler::
3810 GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub(
3811 Assembler* assembler) {
3812 // Return the instantiator type arguments if its nullability is compatible for
3813 // sharing, otherwise proceed to instantiation cache lookup.
3814 compiler::Label cache_lookup;
3815 __ ldr(R0, compiler::FieldAddress(
3816 InstantiationABI::kUninstantiatedTypeArgumentsReg,
3817 target::TypeArguments::nullability_offset()));
3818 __ ldr(R4,
3819 compiler::FieldAddress(InstantiationABI::kInstantiatorTypeArgumentsReg,
3820 target::TypeArguments::nullability_offset()));
3821 __ and_(R4, R4, Operand(R0));
3822 __ cmp(R4, Operand(R0));
3823 __ b(&cache_lookup, NE);
3824 __ mov(InstantiationABI::kResultTypeArgumentsReg,
3825 Operand(InstantiationABI::kInstantiatorTypeArgumentsReg));
3826 __ Ret();
3827
3828 __ Bind(&cache_lookup);
3829 GenerateInstantiateTypeArgumentsStub(assembler);
3830}
3831
3832void StubCodeCompiler::GenerateInstantiateTypeArgumentsMayShareFunctionTAStub(
3833 Assembler* assembler) {
3834 // Return the function type arguments if its nullability is compatible for
3835 // sharing, otherwise proceed to instantiation cache lookup.
3836 compiler::Label cache_lookup;
3837 __ ldr(R0, compiler::FieldAddress(
3838 InstantiationABI::kUninstantiatedTypeArgumentsReg,
3839 target::TypeArguments::nullability_offset()));
3840 __ ldr(R4,
3841 compiler::FieldAddress(InstantiationABI::kFunctionTypeArgumentsReg,
3842 target::TypeArguments::nullability_offset()));
3843 __ and_(R4, R4, Operand(R0));
3844 __ cmp(R4, Operand(R0));
3845 __ b(&cache_lookup, NE);
3846 __ mov(InstantiationABI::kResultTypeArgumentsReg,
3847 Operand(InstantiationABI::kFunctionTypeArgumentsReg));
3848 __ Ret();
3849
3850 __ Bind(&cache_lookup);
3851 GenerateInstantiateTypeArgumentsStub(assembler);
3852}
3853
3854} // namespace compiler
3855
3856} // namespace dart
3857
3858#endif // defined(TARGET_ARCH_ARM)
3859