1 | // Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/globals.h" |
6 | |
7 | // For `AllocateObjectInstr::WillAllocateNewOrRemembered` |
8 | // For `GenericCheckBoundInstr::UseUnboxedRepresentation` |
9 | #include "vm/compiler/backend/il.h" |
10 | |
11 | #define SHOULD_NOT_INCLUDE_RUNTIME |
12 | |
13 | #include "vm/compiler/stub_code_compiler.h" |
14 | |
15 | #if defined(TARGET_ARCH_ARM64) |
16 | |
17 | #include "vm/class_id.h" |
18 | #include "vm/code_entry_kind.h" |
19 | #include "vm/compiler/api/type_check_mode.h" |
20 | #include "vm/compiler/assembler/assembler.h" |
21 | #include "vm/compiler/backend/locations.h" |
22 | #include "vm/constants.h" |
23 | #include "vm/instructions.h" |
24 | #include "vm/static_type_exactness_state.h" |
25 | #include "vm/tags.h" |
26 | |
27 | #define __ assembler-> |
28 | |
29 | namespace dart { |
30 | |
31 | DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects." ); |
32 | DEFINE_FLAG(bool, |
33 | use_slow_path, |
34 | false, |
35 | "Set to true for debugging & verifying the slow paths." ); |
36 | DECLARE_FLAG(bool, precompiled_mode); |
37 | |
38 | namespace compiler { |
39 | |
40 | // Ensures that [R0] is a new object, if not it will be added to the remembered |
41 | // set via a leaf runtime call. |
42 | // |
43 | // WARNING: This might clobber all registers except for [R0], [THR] and [FP]. |
44 | // The caller should simply call LeaveStubFrame() and return. |
45 | static void EnsureIsNewOrRemembered(Assembler* assembler, |
46 | bool preserve_registers = true) { |
47 | // If the object is not remembered we call a leaf-runtime to add it to the |
48 | // remembered set. |
49 | Label done; |
50 | __ tbnz(&done, R0, target::ObjectAlignment::kNewObjectBitPosition); |
51 | |
52 | if (preserve_registers) { |
53 | __ EnterCallRuntimeFrame(0); |
54 | } else { |
55 | __ ReserveAlignedFrameSpace(0); |
56 | } |
57 | // [R0] already contains first argument. |
58 | __ mov(R1, THR); |
59 | __ CallRuntime(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2); |
60 | if (preserve_registers) { |
61 | __ LeaveCallRuntimeFrame(); |
62 | } |
63 | |
64 | __ Bind(&done); |
65 | } |
66 | |
67 | // Input parameters: |
68 | // LR : return address. |
69 | // SP : address of last argument in argument array. |
70 | // SP + 8*R4 - 8 : address of first argument in argument array. |
71 | // SP + 8*R4 : address of return value. |
72 | // R5 : address of the runtime function to call. |
73 | // R4 : number of arguments to the call. |
74 | void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) { |
75 | const intptr_t thread_offset = target::NativeArguments::thread_offset(); |
76 | const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset(); |
77 | const intptr_t argv_offset = target::NativeArguments::argv_offset(); |
78 | const intptr_t retval_offset = target::NativeArguments::retval_offset(); |
79 | |
80 | __ Comment("CallToRuntimeStub" ); |
81 | __ ldr(CODE_REG, Address(THR, target::Thread::call_to_runtime_stub_offset())); |
82 | __ SetPrologueOffset(); |
83 | __ EnterStubFrame(); |
84 | |
85 | // Save exit frame information to enable stack walking as we are about |
86 | // to transition to Dart VM C++ code. |
87 | __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset()); |
88 | |
89 | // Mark that the thread exited generated code through a runtime call. |
90 | __ LoadImmediate(R8, target::Thread::exit_through_runtime_call()); |
91 | __ StoreToOffset(R8, THR, target::Thread::exit_through_ffi_offset()); |
92 | |
93 | #if defined(DEBUG) |
94 | { |
95 | Label ok; |
96 | // Check that we are always entering from Dart code. |
97 | __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset()); |
98 | __ CompareImmediate(R8, VMTag::kDartCompiledTagId); |
99 | __ b(&ok, EQ); |
100 | __ Stop("Not coming from Dart code." ); |
101 | __ Bind(&ok); |
102 | } |
103 | #endif |
104 | |
105 | // Mark that the thread is executing VM code. |
106 | __ StoreToOffset(R5, THR, target::Thread::vm_tag_offset()); |
107 | |
108 | // Reserve space for arguments and align frame before entering C++ world. |
109 | // target::NativeArguments are passed in registers. |
110 | __ Comment("align stack" ); |
111 | // Reserve space for arguments. |
112 | ASSERT(target::NativeArguments::StructSize() == 4 * target::kWordSize); |
113 | __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize()); |
114 | |
115 | // Pass target::NativeArguments structure by value and call runtime. |
116 | // Registers R0, R1, R2, and R3 are used. |
117 | |
118 | ASSERT(thread_offset == 0 * target::kWordSize); |
119 | // Set thread in NativeArgs. |
120 | __ mov(R0, THR); |
121 | |
122 | // There are no runtime calls to closures, so we do not need to set the tag |
123 | // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. |
124 | ASSERT(argc_tag_offset == 1 * target::kWordSize); |
125 | __ mov(R1, R4); // Set argc in target::NativeArguments. |
126 | |
127 | ASSERT(argv_offset == 2 * target::kWordSize); |
128 | __ add(R2, ZR, Operand(R4, LSL, 3)); |
129 | __ add(R2, FP, Operand(R2)); // Compute argv. |
130 | // Set argv in target::NativeArguments. |
131 | __ AddImmediate(R2, |
132 | target::frame_layout.param_end_from_fp * target::kWordSize); |
133 | |
134 | ASSERT(retval_offset == 3 * target::kWordSize); |
135 | __ AddImmediate(R3, R2, target::kWordSize); |
136 | |
137 | __ StoreToOffset(R0, SP, thread_offset); |
138 | __ StoreToOffset(R1, SP, argc_tag_offset); |
139 | __ StoreToOffset(R2, SP, argv_offset); |
140 | __ StoreToOffset(R3, SP, retval_offset); |
141 | __ mov(R0, SP); // Pass the pointer to the target::NativeArguments. |
142 | |
143 | // We are entering runtime code, so the C stack pointer must be restored from |
144 | // the stack limit to the top of the stack. We cache the stack limit address |
145 | // in a callee-saved register. |
146 | __ mov(R25, CSP); |
147 | __ mov(CSP, SP); |
148 | |
149 | __ blr(R5); |
150 | __ Comment("CallToRuntimeStub return" ); |
151 | |
152 | // Restore SP and CSP. |
153 | __ mov(SP, CSP); |
154 | __ mov(CSP, R25); |
155 | |
156 | // Refresh pinned registers values (inc. write barrier mask and null object). |
157 | __ RestorePinnedRegisters(); |
158 | |
159 | // Retval is next to 1st argument. |
160 | // Mark that the thread is executing Dart code. |
161 | __ LoadImmediate(R2, VMTag::kDartCompiledTagId); |
162 | __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset()); |
163 | |
164 | // Mark that the thread has not exited generated Dart code. |
165 | __ LoadImmediate(R2, 0); |
166 | __ StoreToOffset(R2, THR, target::Thread::exit_through_ffi_offset()); |
167 | |
168 | // Reset exit frame information in Isolate's mutator thread structure. |
169 | __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset()); |
170 | |
171 | // Restore the global object pool after returning from runtime (old space is |
172 | // moving, so the GOP could have been relocated). |
173 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { |
174 | __ SetupGlobalPoolAndDispatchTable(); |
175 | } |
176 | |
177 | __ LeaveStubFrame(); |
178 | |
179 | // The following return can jump to a lazy-deopt stub, which assumes R0 |
180 | // contains a return value and will save it in a GC-visible way. We therefore |
181 | // have to ensure R0 does not contain any garbage value left from the C |
182 | // function we called (which has return type "void"). |
183 | // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.) |
184 | __ LoadImmediate(R0, 0); |
185 | __ ret(); |
186 | } |
187 | |
188 | static void GenerateSharedStubGeneric( |
189 | Assembler* assembler, |
190 | bool save_fpu_registers, |
191 | intptr_t self_code_stub_offset_from_thread, |
192 | bool allow_return, |
193 | std::function<void()> perform_runtime_call) { |
194 | // We want the saved registers to appear like part of the caller's frame, so |
195 | // we push them before calling EnterStubFrame. |
196 | RegisterSet all_registers; |
197 | all_registers.AddAllNonReservedRegisters(save_fpu_registers); |
198 | |
199 | // To make the stack map calculation architecture independent we do the same |
200 | // as on intel. |
201 | __ Push(LR); |
202 | __ PushRegisters(all_registers); |
203 | __ ldr(CODE_REG, Address(THR, self_code_stub_offset_from_thread)); |
204 | __ EnterStubFrame(); |
205 | perform_runtime_call(); |
206 | if (!allow_return) { |
207 | __ Breakpoint(); |
208 | return; |
209 | } |
210 | __ LeaveStubFrame(); |
211 | __ PopRegisters(all_registers); |
212 | __ Drop(1); // We use the LR restored via LeaveStubFrame. |
213 | __ ret(LR); |
214 | } |
215 | |
216 | static void GenerateSharedStub(Assembler* assembler, |
217 | bool save_fpu_registers, |
218 | const RuntimeEntry* target, |
219 | intptr_t self_code_stub_offset_from_thread, |
220 | bool allow_return, |
221 | bool store_runtime_result_in_r0 = false) { |
222 | ASSERT(!store_runtime_result_in_r0 || allow_return); |
223 | auto perform_runtime_call = [&]() { |
224 | if (store_runtime_result_in_r0) { |
225 | __ PushRegister(NULL_REG); |
226 | } |
227 | __ CallRuntime(*target, /*argument_count=*/0); |
228 | if (store_runtime_result_in_r0) { |
229 | __ PopRegister(R0); |
230 | __ str( |
231 | R0, |
232 | Address(FP, target::kWordSize * |
233 | StubCodeCompiler::WordOffsetFromFpToCpuRegister(R0))); |
234 | } |
235 | }; |
236 | GenerateSharedStubGeneric(assembler, save_fpu_registers, |
237 | self_code_stub_offset_from_thread, allow_return, |
238 | perform_runtime_call); |
239 | } |
240 | |
241 | void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) { |
242 | RegisterSet all_registers; |
243 | all_registers.AddAllGeneralRegisters(); |
244 | |
245 | __ EnterFrame(0); |
246 | __ PushRegisters(all_registers); |
247 | |
248 | __ mov(CALLEE_SAVED_TEMP, CSP); |
249 | __ mov(CALLEE_SAVED_TEMP2, SP); |
250 | __ ReserveAlignedFrameSpace(0); |
251 | __ mov(CSP, SP); |
252 | |
253 | __ ldr(R0, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread())); |
254 | __ blr(R0); |
255 | |
256 | __ mov(SP, CALLEE_SAVED_TEMP2); |
257 | __ mov(CSP, CALLEE_SAVED_TEMP); |
258 | |
259 | __ PopRegisters(all_registers); |
260 | __ LeaveFrame(); |
261 | |
262 | __ Ret(); |
263 | } |
264 | |
265 | void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) { |
266 | RegisterSet all_registers; |
267 | all_registers.AddAllGeneralRegisters(); |
268 | |
269 | __ EnterFrame(0); |
270 | __ PushRegisters(all_registers); |
271 | |
272 | __ mov(CALLEE_SAVED_TEMP, CSP); |
273 | __ mov(CALLEE_SAVED_TEMP2, SP); |
274 | __ ReserveAlignedFrameSpace(0); |
275 | __ mov(CSP, SP); |
276 | |
277 | // Set the execution state to VM while waiting for the safepoint to end. |
278 | // This isn't strictly necessary but enables tests to check that we're not |
279 | // in native code anymore. See tests/ffi/function_gc_test.dart for example. |
280 | __ LoadImmediate(R0, target::Thread::vm_execution_state()); |
281 | __ str(R0, Address(THR, target::Thread::execution_state_offset())); |
282 | |
283 | __ ldr(R0, Address(THR, kExitSafepointRuntimeEntry.OffsetFromThread())); |
284 | __ blr(R0); |
285 | |
286 | __ mov(SP, CALLEE_SAVED_TEMP2); |
287 | __ mov(CSP, CALLEE_SAVED_TEMP); |
288 | |
289 | __ PopRegisters(all_registers); |
290 | __ LeaveFrame(); |
291 | |
292 | __ Ret(); |
293 | } |
294 | |
295 | // Calls native code within a safepoint. |
296 | // |
297 | // On entry: |
298 | // R8: target to call |
299 | // Stack: set up for native call (SP), aligned, CSP < SP |
300 | // |
301 | // On exit: |
302 | // R19: clobbered, although normally callee-saved |
303 | // Stack: preserved, CSP == SP |
304 | void StubCodeCompiler::GenerateCallNativeThroughSafepointStub( |
305 | Assembler* assembler) { |
306 | COMPILE_ASSERT((1 << R19) & kAbiPreservedCpuRegs); |
307 | |
308 | __ mov(R19, LR); |
309 | __ LoadImmediate(R9, target::Thread::exit_through_ffi()); |
310 | __ TransitionGeneratedToNative(R8, FPREG, R9 /*volatile*/, |
311 | /*enter_safepoint=*/true); |
312 | __ mov(R25, CSP); |
313 | __ mov(CSP, SP); |
314 | |
315 | #if defined(DEBUG) |
316 | // Check CSP alignment. |
317 | __ andi(R10 /*volatile*/, SP, |
318 | Immediate(~(OS::ActivationFrameAlignment() - 1))); |
319 | __ cmp(R10, Operand(SP)); |
320 | Label done; |
321 | __ b(&done, EQ); |
322 | __ Breakpoint(); |
323 | __ Bind(&done); |
324 | #endif |
325 | |
326 | __ blr(R8); |
327 | |
328 | __ mov(SP, CSP); |
329 | __ mov(CSP, R25); |
330 | |
331 | __ TransitionNativeToGenerated(R9, /*leave_safepoint=*/true); |
332 | __ ret(R19); |
333 | } |
334 | |
335 | #if !defined(DART_PRECOMPILER) |
336 | void StubCodeCompiler::GenerateJITCallbackTrampolines( |
337 | Assembler* assembler, |
338 | intptr_t next_callback_id) { |
339 | #if !defined(HOST_ARCH_ARM64) |
340 | // TODO(37299): FFI is not support in SIMARM64. |
341 | __ Breakpoint(); |
342 | #else |
343 | Label done; |
344 | |
345 | // R8 is volatile and not used for passing any arguments. |
346 | COMPILE_ASSERT(!IsCalleeSavedRegister(R8) && !IsArgumentRegister(R8)); |
347 | for (intptr_t i = 0; |
348 | i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) { |
349 | // We don't use LoadImmediate because we need the trampoline size to be |
350 | // fixed independently of the callback ID. |
351 | // |
352 | // Instead we paste the callback ID directly in the code load it |
353 | // PC-relative. |
354 | __ ldr(R8, compiler::Address::PC(2 * Instr::kInstrSize)); |
355 | __ b(&done); |
356 | __ Emit(next_callback_id + i); |
357 | } |
358 | |
359 | ASSERT(__ CodeSize() == |
360 | kNativeCallbackTrampolineSize * |
361 | NativeCallbackTrampolines::NumCallbackTrampolinesPerPage()); |
362 | |
363 | __ Bind(&done); |
364 | |
365 | const intptr_t shared_stub_start = __ CodeSize(); |
366 | |
367 | // The load of the callback ID might have incorrect higher-order bits, since |
368 | // we only emit a 32-bit callback ID. |
369 | __ uxtw(R8, R8); |
370 | |
371 | // Save THR (callee-saved) and LR on real real C stack (CSP). Keeps it |
372 | // aligned. |
373 | COMPILE_ASSERT(StubCodeCompiler::kNativeCallbackTrampolineStackDelta == 2); |
374 | __ stp(THR, LR, Address(CSP, -2 * target::kWordSize, Address::PairPreIndex)); |
375 | |
376 | COMPILE_ASSERT(!IsArgumentRegister(THR)); |
377 | |
378 | RegisterSet all_registers; |
379 | all_registers.AddAllArgumentRegisters(); |
380 | |
381 | // The call below might clobber R8 (volatile, holding callback_id). |
382 | all_registers.Add(Location::RegisterLocation(R8)); |
383 | |
384 | // Load the thread, verify the callback ID and exit the safepoint. |
385 | // |
386 | // We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline |
387 | // in order to safe code size on this shared stub. |
388 | { |
389 | __ mov(SP, CSP); |
390 | |
391 | __ EnterFrame(0); |
392 | __ PushRegisters(all_registers); |
393 | |
394 | __ EnterFrame(0); |
395 | __ ReserveAlignedFrameSpace(0); |
396 | |
397 | __ mov(CSP, SP); |
398 | |
399 | // Since DLRT_GetThreadForNativeCallbackTrampoline can theoretically be |
400 | // loaded anywhere, we use the same trick as before to ensure a predictable |
401 | // instruction sequence. |
402 | Label call; |
403 | __ mov(R0, R8); |
404 | __ ldr(R1, compiler::Address::PC(2 * Instr::kInstrSize)); |
405 | __ b(&call); |
406 | |
407 | __ Emit64( |
408 | reinterpret_cast<int64_t>(&DLRT_GetThreadForNativeCallbackTrampoline)); |
409 | |
410 | __ Bind(&call); |
411 | __ blr(R1); |
412 | __ mov(THR, R0); |
413 | |
414 | __ LeaveFrame(); |
415 | |
416 | __ PopRegisters(all_registers); |
417 | __ LeaveFrame(); |
418 | |
419 | __ mov(CSP, SP); |
420 | } |
421 | |
422 | COMPILE_ASSERT(!IsCalleeSavedRegister(R9) && !IsArgumentRegister(R9)); |
423 | |
424 | // Load the code object. |
425 | __ LoadFromOffset(R9, THR, compiler::target::Thread::callback_code_offset()); |
426 | __ LoadFieldFromOffset(R9, R9, |
427 | compiler::target::GrowableObjectArray::data_offset()); |
428 | __ ldr(R9, __ ElementAddressForRegIndex( |
429 | /*external=*/false, |
430 | /*array_cid=*/kArrayCid, |
431 | /*index, smi-tagged=*/compiler::target::kWordSize * 2, |
432 | /*index_unboxed=*/false, |
433 | /*array=*/R9, |
434 | /*index=*/R8, |
435 | /*temp=*/TMP)); |
436 | __ LoadFieldFromOffset(R9, R9, compiler::target::Code::entry_point_offset()); |
437 | |
438 | // Clobbers all volatile registers, including the callback ID in R8. |
439 | // Resets CSP and SP, important for EnterSafepoint below. |
440 | __ blr(R9); |
441 | |
442 | // EnterSafepoint clobbers TMP, TMP2 and R8 -- all volatile and not holding |
443 | // return values. |
444 | __ EnterSafepoint(/*scratch=*/R8); |
445 | |
446 | // Pop LR and THR from the real stack (CSP). |
447 | __ ldp(THR, LR, Address(CSP, 2 * target::kWordSize, Address::PairPostIndex)); |
448 | |
449 | __ ret(); |
450 | |
451 | ASSERT((__ CodeSize() - shared_stub_start) == kNativeCallbackSharedStubSize); |
452 | ASSERT(__ CodeSize() <= VirtualMemory::PageSize()); |
453 | |
454 | #if defined(DEBUG) |
455 | while (__ CodeSize() < VirtualMemory::PageSize()) { |
456 | __ Breakpoint(); |
457 | } |
458 | #endif |
459 | #endif // !defined(HOST_ARCH_ARM64) |
460 | } |
461 | #endif // !defined(DART_PRECOMPILER) |
462 | |
463 | // R1: The extracted method. |
464 | // R4: The type_arguments_field_offset (or 0) |
465 | void StubCodeCompiler::GenerateBuildMethodExtractorStub( |
466 | Assembler* assembler, |
467 | const Object& closure_allocation_stub, |
468 | const Object& context_allocation_stub) { |
469 | const intptr_t kReceiverOffset = target::frame_layout.param_end_from_fp + 1; |
470 | |
471 | __ EnterStubFrame(); |
472 | |
473 | // Build type_arguments vector (or null) |
474 | Label no_type_args; |
475 | __ ldr(R3, Address(THR, target::Thread::object_null_offset()), kDoubleWord); |
476 | __ cmp(R4, Operand(0)); |
477 | __ b(&no_type_args, EQ); |
478 | __ ldr(R0, Address(FP, kReceiverOffset * target::kWordSize)); |
479 | __ ldr(R3, Address(R0, R4)); |
480 | __ Bind(&no_type_args); |
481 | |
482 | // Push type arguments & extracted method. |
483 | __ PushPair(R3, R1); |
484 | |
485 | // Allocate context. |
486 | { |
487 | Label done, slow_path; |
488 | __ TryAllocateArray(kContextCid, target::Context::InstanceSize(1), |
489 | &slow_path, |
490 | R0, // instance |
491 | R1, // end address |
492 | R2, R3); |
493 | __ ldr(R1, Address(THR, target::Thread::object_null_offset())); |
494 | __ str(R1, FieldAddress(R0, target::Context::parent_offset())); |
495 | __ LoadImmediate(R1, 1); |
496 | __ str(R1, FieldAddress(R0, target::Context::num_variables_offset())); |
497 | __ b(&done); |
498 | |
499 | __ Bind(&slow_path); |
500 | |
501 | __ LoadImmediate(/*num_vars=*/R1, 1); |
502 | __ LoadObject(CODE_REG, context_allocation_stub); |
503 | __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset())); |
504 | __ blr(R0); |
505 | |
506 | __ Bind(&done); |
507 | } |
508 | |
509 | // Store receiver in context |
510 | __ ldr(R1, Address(FP, target::kWordSize * kReceiverOffset)); |
511 | __ StoreIntoObject(R0, FieldAddress(R0, target::Context::variable_offset(0)), |
512 | R1); |
513 | |
514 | // Push context. |
515 | __ Push(R0); |
516 | |
517 | // Allocate closure. |
518 | __ LoadObject(CODE_REG, closure_allocation_stub); |
519 | __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset( |
520 | CodeEntryKind::kUnchecked))); |
521 | __ blr(R1); |
522 | |
523 | // Populate closure object. |
524 | __ Pop(R1); // Pop context. |
525 | __ StoreIntoObject(R0, FieldAddress(R0, target::Closure::context_offset()), |
526 | R1); |
527 | __ PopPair(R3, R1); // Pop type arguments & extracted method. |
528 | __ StoreIntoObjectNoBarrier( |
529 | R0, FieldAddress(R0, target::Closure::function_offset()), R1); |
530 | __ StoreIntoObjectNoBarrier( |
531 | R0, |
532 | FieldAddress(R0, target::Closure::instantiator_type_arguments_offset()), |
533 | R3); |
534 | __ LoadObject(R1, EmptyTypeArguments()); |
535 | __ StoreIntoObjectNoBarrier( |
536 | R0, FieldAddress(R0, target::Closure::delayed_type_arguments_offset()), |
537 | R1); |
538 | |
539 | __ LeaveStubFrame(); |
540 | __ Ret(); |
541 | } |
542 | |
543 | void StubCodeCompiler::GenerateDispatchTableNullErrorStub( |
544 | Assembler* assembler) { |
545 | __ EnterStubFrame(); |
546 | __ CallRuntime(kNullErrorRuntimeEntry, /*argument_count=*/0); |
547 | // The NullError runtime entry does not return. |
548 | __ Breakpoint(); |
549 | } |
550 | |
551 | void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub( |
552 | Assembler* assembler) { |
553 | GenerateSharedStub( |
554 | assembler, /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry, |
555 | target::Thread::null_error_shared_without_fpu_regs_stub_offset(), |
556 | /*allow_return=*/false); |
557 | } |
558 | |
559 | void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub( |
560 | Assembler* assembler) { |
561 | GenerateSharedStub( |
562 | assembler, /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry, |
563 | target::Thread::null_error_shared_with_fpu_regs_stub_offset(), |
564 | /*allow_return=*/false); |
565 | } |
566 | |
567 | void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub( |
568 | Assembler* assembler) { |
569 | GenerateSharedStub( |
570 | assembler, /*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry, |
571 | target::Thread::null_arg_error_shared_without_fpu_regs_stub_offset(), |
572 | /*allow_return=*/false); |
573 | } |
574 | |
575 | void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub( |
576 | Assembler* assembler) { |
577 | GenerateSharedStub( |
578 | assembler, /*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry, |
579 | target::Thread::null_arg_error_shared_with_fpu_regs_stub_offset(), |
580 | /*allow_return=*/false); |
581 | } |
582 | |
583 | void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub( |
584 | Assembler* assembler) { |
585 | GenerateSharedStub( |
586 | assembler, /*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry, |
587 | target::Thread::null_cast_error_shared_without_fpu_regs_stub_offset(), |
588 | /*allow_return=*/false); |
589 | } |
590 | |
591 | void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub( |
592 | Assembler* assembler) { |
593 | GenerateSharedStub( |
594 | assembler, /*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry, |
595 | target::Thread::null_cast_error_shared_with_fpu_regs_stub_offset(), |
596 | /*allow_return=*/false); |
597 | } |
598 | |
599 | static void GenerateRangeError(Assembler* assembler, bool with_fpu_regs) { |
600 | auto perform_runtime_call = [&]() { |
601 | // If the generated code has unboxed index/length we need to box them before |
602 | // calling the runtime entry. |
603 | if (GenericCheckBoundInstr::UseUnboxedRepresentation()) { |
604 | Label length, smi_case; |
605 | |
606 | // The user-controlled index might not fit into a Smi. |
607 | __ adds(RangeErrorABI::kIndexReg, RangeErrorABI::kIndexReg, |
608 | compiler::Operand(RangeErrorABI::kIndexReg)); |
609 | __ BranchIf(NO_OVERFLOW, &length); |
610 | { |
611 | // Allocate a mint, reload the two registers and popualte the mint. |
612 | __ PushRegister(NULL_REG); |
613 | __ CallRuntime(kAllocateMintRuntimeEntry, /*argument_count=*/0); |
614 | __ PopRegister(RangeErrorABI::kIndexReg); |
615 | __ ldr(TMP, |
616 | Address(FP, target::kWordSize * |
617 | StubCodeCompiler::WordOffsetFromFpToCpuRegister( |
618 | RangeErrorABI::kIndexReg))); |
619 | __ str(TMP, FieldAddress(RangeErrorABI::kIndexReg, |
620 | target::Mint::value_offset())); |
621 | __ ldr(RangeErrorABI::kLengthReg, |
622 | Address(FP, target::kWordSize * |
623 | StubCodeCompiler::WordOffsetFromFpToCpuRegister( |
624 | RangeErrorABI::kLengthReg))); |
625 | } |
626 | |
627 | // Length is guaranteed to be in positive Smi range (it comes from a load |
628 | // of a vm recognized array). |
629 | __ Bind(&length); |
630 | __ SmiTag(RangeErrorABI::kLengthReg); |
631 | } |
632 | __ PushRegister(RangeErrorABI::kLengthReg); |
633 | __ PushRegister(RangeErrorABI::kIndexReg); |
634 | __ CallRuntime(kRangeErrorRuntimeEntry, /*argument_count=*/2); |
635 | __ Breakpoint(); |
636 | }; |
637 | |
638 | GenerateSharedStubGeneric( |
639 | assembler, /*save_fpu_registers=*/with_fpu_regs, |
640 | with_fpu_regs |
641 | ? target::Thread::range_error_shared_with_fpu_regs_stub_offset() |
642 | : target::Thread::range_error_shared_without_fpu_regs_stub_offset(), |
643 | /*allow_return=*/false, perform_runtime_call); |
644 | } |
645 | |
646 | void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub( |
647 | Assembler* assembler) { |
648 | GenerateRangeError(assembler, /*with_fpu_regs=*/false); |
649 | } |
650 | |
651 | void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub( |
652 | Assembler* assembler) { |
653 | GenerateRangeError(assembler, /*with_fpu_regs=*/true); |
654 | } |
655 | |
656 | void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub( |
657 | Assembler* assembler) { |
658 | GenerateSharedStub( |
659 | assembler, /*save_fpu_registers=*/false, &kStackOverflowRuntimeEntry, |
660 | target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(), |
661 | /*allow_return=*/true); |
662 | } |
663 | |
664 | void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub( |
665 | Assembler* assembler) { |
666 | GenerateSharedStub( |
667 | assembler, /*save_fpu_registers=*/true, &kStackOverflowRuntimeEntry, |
668 | target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(), |
669 | /*allow_return=*/true); |
670 | } |
671 | |
672 | // Input parameters: |
673 | // LR : return address. |
674 | // SP : address of return value. |
675 | // R5 : address of the native function to call. |
676 | // R2 : address of first argument in argument array. |
677 | // R1 : argc_tag including number of arguments and function kind. |
678 | static void GenerateCallNativeWithWrapperStub(Assembler* assembler, |
679 | Address wrapper) { |
680 | const intptr_t thread_offset = target::NativeArguments::thread_offset(); |
681 | const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset(); |
682 | const intptr_t argv_offset = target::NativeArguments::argv_offset(); |
683 | const intptr_t retval_offset = target::NativeArguments::retval_offset(); |
684 | |
685 | __ EnterStubFrame(); |
686 | |
687 | // Save exit frame information to enable stack walking as we are about |
688 | // to transition to native code. |
689 | __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset()); |
690 | |
691 | // Mark that the thread exited generated code through a runtime call. |
692 | __ LoadImmediate(R6, target::Thread::exit_through_runtime_call()); |
693 | __ StoreToOffset(R6, THR, target::Thread::exit_through_ffi_offset()); |
694 | |
695 | #if defined(DEBUG) |
696 | { |
697 | Label ok; |
698 | // Check that we are always entering from Dart code. |
699 | __ LoadFromOffset(R6, THR, target::Thread::vm_tag_offset()); |
700 | __ CompareImmediate(R6, VMTag::kDartCompiledTagId); |
701 | __ b(&ok, EQ); |
702 | __ Stop("Not coming from Dart code." ); |
703 | __ Bind(&ok); |
704 | } |
705 | #endif |
706 | |
707 | // Mark that the thread is executing native code. |
708 | __ StoreToOffset(R5, THR, target::Thread::vm_tag_offset()); |
709 | |
710 | // Reserve space for the native arguments structure passed on the stack (the |
711 | // outgoing pointer parameter to the native arguments structure is passed in |
712 | // R0) and align frame before entering the C++ world. |
713 | __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize()); |
714 | |
715 | // Initialize target::NativeArguments structure and call native function. |
716 | // Registers R0, R1, R2, and R3 are used. |
717 | |
718 | ASSERT(thread_offset == 0 * target::kWordSize); |
719 | // Set thread in NativeArgs. |
720 | __ mov(R0, THR); |
721 | |
722 | // There are no native calls to closures, so we do not need to set the tag |
723 | // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. |
724 | ASSERT(argc_tag_offset == 1 * target::kWordSize); |
725 | // Set argc in target::NativeArguments: R1 already contains argc. |
726 | |
727 | ASSERT(argv_offset == 2 * target::kWordSize); |
728 | // Set argv in target::NativeArguments: R2 already contains argv. |
729 | |
730 | // Set retval in NativeArgs. |
731 | ASSERT(retval_offset == 3 * target::kWordSize); |
732 | __ AddImmediate(R3, FP, 2 * target::kWordSize); |
733 | |
734 | // Passing the structure by value as in runtime calls would require changing |
735 | // Dart API for native functions. |
736 | // For now, space is reserved on the stack and we pass a pointer to it. |
737 | __ StoreToOffset(R0, SP, thread_offset); |
738 | __ StoreToOffset(R1, SP, argc_tag_offset); |
739 | __ StoreToOffset(R2, SP, argv_offset); |
740 | __ StoreToOffset(R3, SP, retval_offset); |
741 | __ mov(R0, SP); // Pass the pointer to the target::NativeArguments. |
742 | |
743 | // We are entering runtime code, so the C stack pointer must be restored from |
744 | // the stack limit to the top of the stack. We cache the stack limit address |
745 | // in the Dart SP register, which is callee-saved in the C ABI. |
746 | __ mov(R25, CSP); |
747 | __ mov(CSP, SP); |
748 | |
749 | __ mov(R1, R5); // Pass the function entrypoint to call. |
750 | |
751 | // Call native function invocation wrapper or redirection via simulator. |
752 | __ ldr(LR, wrapper); |
753 | __ blr(LR); |
754 | |
755 | // Restore SP and CSP. |
756 | __ mov(SP, CSP); |
757 | __ mov(CSP, R25); |
758 | |
759 | // Refresh pinned registers values (inc. write barrier mask and null object). |
760 | __ RestorePinnedRegisters(); |
761 | |
762 | // Mark that the thread is executing Dart code. |
763 | __ LoadImmediate(R2, VMTag::kDartCompiledTagId); |
764 | __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset()); |
765 | |
766 | // Mark that the thread has not exited generated Dart code. |
767 | __ LoadImmediate(R2, 0); |
768 | __ StoreToOffset(R2, THR, target::Thread::exit_through_ffi_offset()); |
769 | |
770 | // Reset exit frame information in Isolate's mutator thread structure. |
771 | __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset()); |
772 | |
773 | // Restore the global object pool after returning from runtime (old space is |
774 | // moving, so the GOP could have been relocated). |
775 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { |
776 | __ SetupGlobalPoolAndDispatchTable(); |
777 | } |
778 | |
779 | __ LeaveStubFrame(); |
780 | __ ret(); |
781 | } |
782 | |
783 | void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) { |
784 | GenerateCallNativeWithWrapperStub( |
785 | assembler, |
786 | Address(THR, |
787 | target::Thread::no_scope_native_wrapper_entry_point_offset())); |
788 | } |
789 | |
790 | void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) { |
791 | GenerateCallNativeWithWrapperStub( |
792 | assembler, |
793 | Address(THR, |
794 | target::Thread::auto_scope_native_wrapper_entry_point_offset())); |
795 | } |
796 | |
797 | // Input parameters: |
798 | // LR : return address. |
799 | // SP : address of return value. |
800 | // R5 : address of the native function to call. |
801 | // R2 : address of first argument in argument array. |
802 | // R1 : argc_tag including number of arguments and function kind. |
803 | void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) { |
804 | GenerateCallNativeWithWrapperStub( |
805 | assembler, |
806 | Address(THR, |
807 | target::Thread::bootstrap_native_wrapper_entry_point_offset())); |
808 | } |
809 | |
810 | // Input parameters: |
811 | // R4: arguments descriptor array. |
812 | void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) { |
813 | // Create a stub frame as we are pushing some objects on the stack before |
814 | // calling into the runtime. |
815 | __ EnterStubFrame(); |
816 | // Setup space on stack for return value and preserve arguments descriptor. |
817 | __ Push(R4); |
818 | __ Push(ZR); |
819 | __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); |
820 | // Get Code object result and restore arguments descriptor array. |
821 | __ Pop(CODE_REG); |
822 | __ Pop(R4); |
823 | // Remove the stub frame. |
824 | __ LeaveStubFrame(); |
825 | // Jump to the dart function. |
826 | __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset()); |
827 | __ br(R0); |
828 | } |
829 | |
830 | // Called from a static call only when an invalid code has been entered |
831 | // (invalid because its function was optimized or deoptimized). |
832 | // R4: arguments descriptor array. |
833 | void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) { |
834 | Label monomorphic; |
835 | __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic); |
836 | |
837 | // Load code pointer to this stub from the thread: |
838 | // The one that is passed in, is not correct - it points to the code object |
839 | // that needs to be replaced. |
840 | __ ldr(CODE_REG, |
841 | Address(THR, target::Thread::fix_callers_target_code_offset())); |
842 | // Create a stub frame as we are pushing some objects on the stack before |
843 | // calling into the runtime. |
844 | __ EnterStubFrame(); |
845 | // Setup space on stack for return value and preserve arguments descriptor. |
846 | __ Push(R4); |
847 | __ Push(ZR); |
848 | __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); |
849 | // Get Code object result and restore arguments descriptor array. |
850 | __ Pop(CODE_REG); |
851 | __ Pop(R4); |
852 | // Remove the stub frame. |
853 | __ LeaveStubFrame(); |
854 | // Jump to the dart function. |
855 | __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset()); |
856 | __ br(R0); |
857 | |
858 | __ Bind(&monomorphic); |
859 | // Load code pointer to this stub from the thread: |
860 | // The one that is passed in, is not correct - it points to the code object |
861 | // that needs to be replaced. |
862 | __ ldr(CODE_REG, |
863 | Address(THR, target::Thread::fix_callers_target_code_offset())); |
864 | // Create a stub frame as we are pushing some objects on the stack before |
865 | // calling into the runtime. |
866 | __ EnterStubFrame(); |
867 | __ Push(R5); // Preserve cache (guarded CID as Smi). |
868 | __ Push(R0); // Preserve receiver. |
869 | __ Push(ZR); |
870 | __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 0); |
871 | __ Pop(CODE_REG); |
872 | __ Pop(R0); // Restore receiver. |
873 | __ Pop(R5); // Restore cache (guarded CID as Smi). |
874 | // Remove the stub frame. |
875 | __ LeaveStubFrame(); |
876 | // Jump to the dart function. |
877 | __ LoadFieldFromOffset( |
878 | R1, CODE_REG, |
879 | target::Code::entry_point_offset(CodeEntryKind::kMonomorphic)); |
880 | __ br(R1); |
881 | } |
882 | |
883 | // Called from object allocate instruction when the allocation stub has been |
884 | // disabled. |
885 | void StubCodeCompiler::GenerateFixAllocationStubTargetStub( |
886 | Assembler* assembler) { |
887 | // Load code pointer to this stub from the thread: |
888 | // The one that is passed in, is not correct - it points to the code object |
889 | // that needs to be replaced. |
890 | __ ldr(CODE_REG, |
891 | Address(THR, target::Thread::fix_allocation_stub_code_offset())); |
892 | __ EnterStubFrame(); |
893 | // Setup space on stack for return value. |
894 | __ Push(ZR); |
895 | __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); |
896 | // Get Code object result. |
897 | __ Pop(CODE_REG); |
898 | // Remove the stub frame. |
899 | __ LeaveStubFrame(); |
900 | // Jump to the dart function. |
901 | __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset()); |
902 | __ br(R0); |
903 | } |
904 | |
905 | // Input parameters: |
906 | // R2: smi-tagged argument count, may be zero. |
907 | // FP[target::frame_layout.param_end_from_fp + 1]: last argument. |
908 | static void PushArrayOfArguments(Assembler* assembler) { |
909 | // Allocate array to store arguments of caller. |
910 | __ LoadObject(R1, NullObject()); |
911 | // R1: null element type for raw Array. |
912 | // R2: smi-tagged argument count, may be zero. |
913 | __ BranchLink(StubCodeAllocateArray()); |
914 | // R0: newly allocated array. |
915 | // R2: smi-tagged argument count, may be zero (was preserved by the stub). |
916 | __ Push(R0); // Array is in R0 and on top of stack. |
917 | __ add(R1, FP, Operand(R2, LSL, 2)); |
918 | __ AddImmediate(R1, |
919 | target::frame_layout.param_end_from_fp * target::kWordSize); |
920 | __ AddImmediate(R3, R0, target::Array::data_offset() - kHeapObjectTag); |
921 | // R1: address of first argument on stack. |
922 | // R3: address of first argument in array. |
923 | |
924 | Label loop, loop_exit; |
925 | __ Bind(&loop); |
926 | __ CompareRegisters(R2, ZR); |
927 | __ b(&loop_exit, LE); |
928 | __ ldr(R7, Address(R1)); |
929 | __ AddImmediate(R1, -target::kWordSize); |
930 | __ AddImmediate(R3, target::kWordSize); |
931 | __ AddImmediate(R2, R2, -target::ToRawSmi(1)); |
932 | __ StoreIntoObject(R0, Address(R3, -target::kWordSize), R7); |
933 | __ b(&loop); |
934 | __ Bind(&loop_exit); |
935 | } |
936 | |
937 | // Used by eager and lazy deoptimization. Preserve result in RAX if necessary. |
938 | // This stub translates optimized frame into unoptimized frame. The optimized |
939 | // frame can contain values in registers and on stack, the unoptimized |
940 | // frame contains all values on stack. |
941 | // Deoptimization occurs in following steps: |
942 | // - Push all registers that can contain values. |
943 | // - Call C routine to copy the stack and saved registers into temporary buffer. |
944 | // - Adjust caller's frame to correct unoptimized frame size. |
945 | // - Fill the unoptimized frame. |
946 | // - Materialize objects that require allocation (e.g. Double instances). |
947 | // GC can occur only after frame is fully rewritten. |
948 | // Stack after TagAndPushPP() below: |
949 | // +------------------+ |
950 | // | Saved PP | <- PP |
951 | // +------------------+ |
952 | // | PC marker | <- TOS |
953 | // +------------------+ |
954 | // | Saved FP | <- FP of stub |
955 | // +------------------+ |
956 | // | return-address | (deoptimization point) |
957 | // +------------------+ |
958 | // | Saved CODE_REG | |
959 | // +------------------+ |
960 | // | ... | <- SP of optimized frame |
961 | // |
962 | // Parts of the code cannot GC, part of the code can GC. |
963 | static void GenerateDeoptimizationSequence(Assembler* assembler, |
964 | DeoptStubKind kind) { |
965 | // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there |
966 | // is no need to set the correct PC marker or load PP, since they get patched. |
967 | __ EnterStubFrame(); |
968 | |
969 | // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry |
970 | // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. |
971 | const intptr_t saved_result_slot_from_fp = |
972 | target::frame_layout.first_local_from_fp + 1 - |
973 | (kNumberOfCpuRegisters - R0); |
974 | const intptr_t saved_exception_slot_from_fp = |
975 | target::frame_layout.first_local_from_fp + 1 - |
976 | (kNumberOfCpuRegisters - R0); |
977 | const intptr_t saved_stacktrace_slot_from_fp = |
978 | target::frame_layout.first_local_from_fp + 1 - |
979 | (kNumberOfCpuRegisters - R1); |
980 | // Result in R0 is preserved as part of pushing all registers below. |
981 | |
982 | // Push registers in their enumeration order: lowest register number at |
983 | // lowest address. |
984 | for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) { |
985 | const Register r = static_cast<Register>(i); |
986 | if (r == CODE_REG) { |
987 | // Save the original value of CODE_REG pushed before invoking this stub |
988 | // instead of the value used to call this stub. |
989 | COMPILE_ASSERT(R25 > CODE_REG); |
990 | __ ldr(R25, Address(FP, 2 * target::kWordSize)); |
991 | __ str(R25, Address(SP, -1 * target::kWordSize, Address::PreIndex)); |
992 | } else if (r == R15) { |
993 | // Because we save registers in decreasing order, IP0 will already be |
994 | // saved. |
995 | COMPILE_ASSERT(IP0 == R16); |
996 | __ mov(IP0, R15); |
997 | __ str(IP0, Address(SP, -1 * target::kWordSize, Address::PreIndex)); |
998 | } else { |
999 | __ str(r, Address(SP, -1 * target::kWordSize, Address::PreIndex)); |
1000 | } |
1001 | } |
1002 | |
1003 | for (intptr_t reg_idx = kNumberOfVRegisters - 1; reg_idx >= 0; reg_idx--) { |
1004 | VRegister vreg = static_cast<VRegister>(reg_idx); |
1005 | __ PushQuad(vreg); |
1006 | } |
1007 | |
1008 | __ mov(R0, SP); // Pass address of saved registers block. |
1009 | bool is_lazy = |
1010 | (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow); |
1011 | __ LoadImmediate(R1, is_lazy ? 1 : 0); |
1012 | __ ReserveAlignedFrameSpace(0); |
1013 | __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); |
1014 | // Result (R0) is stack-size (FP - SP) in bytes. |
1015 | |
1016 | if (kind == kLazyDeoptFromReturn) { |
1017 | // Restore result into R1 temporarily. |
1018 | __ LoadFromOffset(R1, FP, saved_result_slot_from_fp * target::kWordSize); |
1019 | } else if (kind == kLazyDeoptFromThrow) { |
1020 | // Restore result into R1 temporarily. |
1021 | __ LoadFromOffset(R1, FP, saved_exception_slot_from_fp * target::kWordSize); |
1022 | __ LoadFromOffset(R2, FP, |
1023 | saved_stacktrace_slot_from_fp * target::kWordSize); |
1024 | } |
1025 | |
1026 | // There is a Dart Frame on the stack. We must restore PP and leave frame. |
1027 | __ RestoreCodePointer(); |
1028 | __ LeaveStubFrame(); |
1029 | __ sub(SP, FP, Operand(R0)); |
1030 | |
1031 | // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there |
1032 | // is no need to set the correct PC marker or load PP, since they get patched. |
1033 | __ EnterStubFrame(); |
1034 | |
1035 | if (kind == kLazyDeoptFromReturn) { |
1036 | __ Push(R1); // Preserve result as first local. |
1037 | } else if (kind == kLazyDeoptFromThrow) { |
1038 | __ Push(R1); // Preserve exception as first local. |
1039 | __ Push(R2); // Preserve stacktrace as second local. |
1040 | } |
1041 | __ ReserveAlignedFrameSpace(0); |
1042 | __ mov(R0, FP); // Pass last FP as parameter in R0. |
1043 | __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); |
1044 | if (kind == kLazyDeoptFromReturn) { |
1045 | // Restore result into R1. |
1046 | __ LoadFromOffset( |
1047 | R1, FP, target::frame_layout.first_local_from_fp * target::kWordSize); |
1048 | } else if (kind == kLazyDeoptFromThrow) { |
1049 | // Restore result into R1. |
1050 | __ LoadFromOffset( |
1051 | R1, FP, target::frame_layout.first_local_from_fp * target::kWordSize); |
1052 | __ LoadFromOffset( |
1053 | R2, FP, |
1054 | (target::frame_layout.first_local_from_fp - 1) * target::kWordSize); |
1055 | } |
1056 | // Code above cannot cause GC. |
1057 | // There is a Dart Frame on the stack. We must restore PP and leave frame. |
1058 | __ RestoreCodePointer(); |
1059 | __ LeaveStubFrame(); |
1060 | |
1061 | // Frame is fully rewritten at this point and it is safe to perform a GC. |
1062 | // Materialize any objects that were deferred by FillFrame because they |
1063 | // require allocation. |
1064 | // Enter stub frame with loading PP. The caller's PP is not materialized yet. |
1065 | __ EnterStubFrame(); |
1066 | if (kind == kLazyDeoptFromReturn) { |
1067 | __ Push(R1); // Preserve result, it will be GC-d here. |
1068 | } else if (kind == kLazyDeoptFromThrow) { |
1069 | __ Push(R1); // Preserve exception, it will be GC-d here. |
1070 | __ Push(R2); // Preserve stacktrace, it will be GC-d here. |
1071 | } |
1072 | |
1073 | __ Push(ZR); // Space for the result. |
1074 | __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); |
1075 | // Result tells stub how many bytes to remove from the expression stack |
1076 | // of the bottom-most frame. They were used as materialization arguments. |
1077 | __ Pop(R2); |
1078 | __ SmiUntag(R2); |
1079 | if (kind == kLazyDeoptFromReturn) { |
1080 | __ Pop(R0); // Restore result. |
1081 | } else if (kind == kLazyDeoptFromThrow) { |
1082 | __ Pop(R1); // Restore stacktrace. |
1083 | __ Pop(R0); // Restore exception. |
1084 | } |
1085 | __ LeaveStubFrame(); |
1086 | // Remove materialization arguments. |
1087 | __ add(SP, SP, Operand(R2)); |
1088 | // The caller is responsible for emitting the return instruction. |
1089 | } |
1090 | |
1091 | // R0: result, must be preserved |
1092 | void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub( |
1093 | Assembler* assembler) { |
1094 | // Push zap value instead of CODE_REG for lazy deopt. |
1095 | __ LoadImmediate(TMP, kZapCodeReg); |
1096 | __ Push(TMP); |
1097 | // Return address for "call" to deopt stub. |
1098 | __ LoadImmediate(LR, kZapReturnAddress); |
1099 | __ ldr(CODE_REG, |
1100 | Address(THR, target::Thread::lazy_deopt_from_return_stub_offset())); |
1101 | GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn); |
1102 | __ ret(); |
1103 | } |
1104 | |
1105 | // R0: exception, must be preserved |
1106 | // R1: stacktrace, must be preserved |
1107 | void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub( |
1108 | Assembler* assembler) { |
1109 | // Push zap value instead of CODE_REG for lazy deopt. |
1110 | __ LoadImmediate(TMP, kZapCodeReg); |
1111 | __ Push(TMP); |
1112 | // Return address for "call" to deopt stub. |
1113 | __ LoadImmediate(LR, kZapReturnAddress); |
1114 | __ ldr(CODE_REG, |
1115 | Address(THR, target::Thread::lazy_deopt_from_throw_stub_offset())); |
1116 | GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow); |
1117 | __ ret(); |
1118 | } |
1119 | |
1120 | void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) { |
1121 | __ Push(CODE_REG); |
1122 | __ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset())); |
1123 | GenerateDeoptimizationSequence(assembler, kEagerDeopt); |
1124 | __ ret(); |
1125 | } |
1126 | |
1127 | // R5: ICData/MegamorphicCache |
1128 | static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) { |
1129 | __ EnterStubFrame(); |
1130 | |
1131 | __ ldr(R4, |
1132 | FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset())); |
1133 | |
1134 | // Load the receiver. |
1135 | __ LoadFieldFromOffset(R2, R4, target::ArgumentsDescriptor::size_offset()); |
1136 | __ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi. |
1137 | __ LoadFromOffset(R6, TMP, |
1138 | target::frame_layout.param_end_from_fp * target::kWordSize); |
1139 | __ Push(ZR); // Result slot. |
1140 | __ Push(R6); // Receiver. |
1141 | __ Push(R5); // ICData/MegamorphicCache. |
1142 | __ Push(R4); // Arguments descriptor. |
1143 | |
1144 | // Adjust arguments count. |
1145 | __ LoadFieldFromOffset(R3, R4, |
1146 | target::ArgumentsDescriptor::type_args_len_offset()); |
1147 | __ AddImmediate(TMP, R2, 1); // Include the type arguments. |
1148 | __ cmp(R3, Operand(0)); |
1149 | __ csinc(R2, R2, TMP, EQ); // R2 <- (R3 == 0) ? R2 : TMP + 1 (R2 : R2 + 2). |
1150 | |
1151 | // R2: Smi-tagged arguments array length. |
1152 | PushArrayOfArguments(assembler); |
1153 | const intptr_t kNumArgs = 4; |
1154 | __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs); |
1155 | __ Drop(4); |
1156 | __ Pop(R0); // Return value. |
1157 | __ LeaveStubFrame(); |
1158 | __ ret(); |
1159 | } |
1160 | |
1161 | static void GenerateDispatcherCode(Assembler* assembler, |
1162 | Label* call_target_function) { |
1163 | __ Comment("NoSuchMethodDispatch" ); |
1164 | // When lazily generated invocation dispatchers are disabled, the |
1165 | // miss-handler may return null. |
1166 | __ CompareObject(R0, NullObject()); |
1167 | __ b(call_target_function, NE); |
1168 | |
1169 | GenerateNoSuchMethodDispatcherBody(assembler); |
1170 | } |
1171 | |
1172 | // Input: |
1173 | // R4 - arguments descriptor |
1174 | // R5 - icdata/megamorphic_cache |
1175 | void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub( |
1176 | Assembler* assembler) { |
1177 | GenerateNoSuchMethodDispatcherBody(assembler); |
1178 | } |
1179 | |
1180 | // Called for inline allocation of arrays. |
1181 | // Input parameters: |
1182 | // LR: return address. |
1183 | // R2: array length as Smi. |
1184 | // R1: array element type (either NULL or an instantiated type). |
1185 | // NOTE: R2 cannot be clobbered here as the caller relies on it being saved. |
1186 | // The newly allocated object is returned in R0. |
1187 | void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) { |
1188 | if (!FLAG_use_slow_path) { |
1189 | Label slow_case; |
1190 | // Compute the size to be allocated, it is based on the array length |
1191 | // and is computed as: |
1192 | // RoundedAllocationSize( |
1193 | // (array_length * kwordSize) + target::Array::header_size()). |
1194 | // Assert that length is a Smi. |
1195 | __ tsti(R2, Immediate(kSmiTagMask)); |
1196 | __ b(&slow_case, NE); |
1197 | |
1198 | __ cmp(R2, Operand(0)); |
1199 | __ b(&slow_case, LT); |
1200 | |
1201 | // Check for maximum allowed length. |
1202 | const intptr_t max_len = |
1203 | target::ToRawSmi(target::Array::kMaxNewSpaceElements); |
1204 | __ CompareImmediate(R2, max_len); |
1205 | __ b(&slow_case, GT); |
1206 | |
1207 | const intptr_t cid = kArrayCid; |
1208 | NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, R4, &slow_case)); |
1209 | |
1210 | // Calculate and align allocation size. |
1211 | // Load new object start and calculate next object start. |
1212 | // R1: array element type. |
1213 | // R2: array length as Smi. |
1214 | __ ldr(R0, Address(THR, target::Thread::top_offset())); |
1215 | intptr_t fixed_size_plus_alignment_padding = |
1216 | target::Array::header_size() + |
1217 | target::ObjectAlignment::kObjectAlignment - 1; |
1218 | __ LoadImmediate(R3, fixed_size_plus_alignment_padding); |
1219 | __ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi. |
1220 | ASSERT(kSmiTagShift == 1); |
1221 | __ andi(R3, R3, |
1222 | Immediate(~(target::ObjectAlignment::kObjectAlignment - 1))); |
1223 | // R0: potential new object start. |
1224 | // R3: object size in bytes. |
1225 | __ adds(R7, R3, Operand(R0)); |
1226 | __ b(&slow_case, CS); // Branch if unsigned overflow. |
1227 | |
1228 | // Check if the allocation fits into the remaining space. |
1229 | // R0: potential new object start. |
1230 | // R1: array element type. |
1231 | // R2: array length as Smi. |
1232 | // R3: array size. |
1233 | // R7: potential next object start. |
1234 | __ LoadFromOffset(TMP, THR, target::Thread::end_offset()); |
1235 | __ CompareRegisters(R7, TMP); |
1236 | __ b(&slow_case, CS); // Branch if unsigned higher or equal. |
1237 | |
1238 | // Successfully allocated the object(s), now update top to point to |
1239 | // next object start and initialize the object. |
1240 | // R0: potential new object start. |
1241 | // R3: array size. |
1242 | // R7: potential next object start. |
1243 | __ str(R7, Address(THR, target::Thread::top_offset())); |
1244 | __ add(R0, R0, Operand(kHeapObjectTag)); |
1245 | |
1246 | // R0: new object start as a tagged pointer. |
1247 | // R1: array element type. |
1248 | // R2: array length as Smi. |
1249 | // R3: array size. |
1250 | // R7: new object end address. |
1251 | |
1252 | // Store the type argument field. |
1253 | __ StoreIntoObjectOffsetNoBarrier( |
1254 | R0, target::Array::type_arguments_offset(), R1); |
1255 | |
1256 | // Set the length field. |
1257 | __ StoreIntoObjectOffsetNoBarrier(R0, target::Array::length_offset(), R2); |
1258 | |
1259 | // Calculate the size tag. |
1260 | // R0: new object start as a tagged pointer. |
1261 | // R2: array length as Smi. |
1262 | // R3: array size. |
1263 | // R7: new object end address. |
1264 | const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos - |
1265 | target::ObjectAlignment::kObjectAlignmentLog2; |
1266 | __ CompareImmediate(R3, target::ObjectLayout::kSizeTagMaxSizeTag); |
1267 | // If no size tag overflow, shift R1 left, else set R1 to zero. |
1268 | __ LslImmediate(TMP, R3, shift); |
1269 | __ csel(R1, TMP, R1, LS); |
1270 | __ csel(R1, ZR, R1, HI); |
1271 | |
1272 | // Get the class index and insert it into the tags. |
1273 | const uint32_t tags = |
1274 | target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0); |
1275 | |
1276 | __ LoadImmediate(TMP, tags); |
1277 | __ orr(R1, R1, Operand(TMP)); |
1278 | __ StoreFieldToOffset(R1, R0, target::Array::tags_offset()); |
1279 | |
1280 | // Initialize all array elements to raw_null. |
1281 | // R0: new object start as a tagged pointer. |
1282 | // R7: new object end address. |
1283 | // R2: array length as Smi. |
1284 | __ AddImmediate(R1, R0, target::Array::data_offset() - kHeapObjectTag); |
1285 | // R1: iterator which initially points to the start of the variable |
1286 | // data area to be initialized. |
1287 | Label loop, done; |
1288 | __ Bind(&loop); |
1289 | // TODO(cshapiro): StoreIntoObjectNoBarrier |
1290 | __ CompareRegisters(R1, R7); |
1291 | __ b(&done, CS); |
1292 | __ str(NULL_REG, Address(R1)); // Store if unsigned lower. |
1293 | __ AddImmediate(R1, target::kWordSize); |
1294 | __ b(&loop); // Loop until R1 == R7. |
1295 | __ Bind(&done); |
1296 | |
1297 | // Done allocating and initializing the array. |
1298 | // R0: new object. |
1299 | // R2: array length as Smi (preserved for the caller.) |
1300 | __ ret(); |
1301 | |
1302 | // Unable to allocate the array using the fast inline code, just call |
1303 | // into the runtime. |
1304 | __ Bind(&slow_case); |
1305 | } |
1306 | // Create a stub frame as we are pushing some objects on the stack before |
1307 | // calling into the runtime. |
1308 | __ EnterStubFrame(); |
1309 | // Setup space on stack for return value. |
1310 | // Push array length as Smi and element type. |
1311 | __ Push(ZR); |
1312 | __ Push(R2); |
1313 | __ Push(R1); |
1314 | __ CallRuntime(kAllocateArrayRuntimeEntry, 2); |
1315 | // Pop arguments; result is popped in IP. |
1316 | __ Pop(R1); |
1317 | __ Pop(R2); |
1318 | __ Pop(R0); |
1319 | |
1320 | // Write-barrier elimination might be enabled for this array (depending on the |
1321 | // array length). To be sure we will check if the allocated object is in old |
1322 | // space and if so call a leaf runtime to add it to the remembered set. |
1323 | EnsureIsNewOrRemembered(assembler); |
1324 | |
1325 | __ LeaveStubFrame(); |
1326 | __ ret(); |
1327 | } |
1328 | |
1329 | void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub( |
1330 | Assembler* assembler) { |
1331 | // For test purpose call allocation stub without inline allocation attempt. |
1332 | if (!FLAG_use_slow_path) { |
1333 | Label slow_case; |
1334 | __ TryAllocate(compiler::MintClass(), &slow_case, |
1335 | AllocateMintABI::kResultReg, AllocateMintABI::kTempReg); |
1336 | __ Ret(); |
1337 | |
1338 | __ Bind(&slow_case); |
1339 | } |
1340 | COMPILE_ASSERT(AllocateMintABI::kResultReg == R0); |
1341 | GenerateSharedStub(assembler, /*save_fpu_registers=*/true, |
1342 | &kAllocateMintRuntimeEntry, |
1343 | target::Thread::allocate_mint_with_fpu_regs_stub_offset(), |
1344 | /*allow_return=*/true, |
1345 | /*store_runtime_result_in_r0=*/true); |
1346 | } |
1347 | |
1348 | void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub( |
1349 | Assembler* assembler) { |
1350 | // For test purpose call allocation stub without inline allocation attempt. |
1351 | if (!FLAG_use_slow_path) { |
1352 | Label slow_case; |
1353 | __ TryAllocate(compiler::MintClass(), &slow_case, |
1354 | AllocateMintABI::kResultReg, AllocateMintABI::kTempReg); |
1355 | __ Ret(); |
1356 | |
1357 | __ Bind(&slow_case); |
1358 | } |
1359 | COMPILE_ASSERT(AllocateMintABI::kResultReg == R0); |
1360 | GenerateSharedStub( |
1361 | assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry, |
1362 | target::Thread::allocate_mint_without_fpu_regs_stub_offset(), |
1363 | /*allow_return=*/true, |
1364 | /*store_runtime_result_in_r0=*/true); |
1365 | } |
1366 | |
1367 | // Called when invoking Dart code from C++ (VM code). |
1368 | // Input parameters: |
1369 | // LR : points to return address. |
1370 | // R0 : code object of the Dart function to call. |
1371 | // R1 : arguments descriptor array. |
1372 | // R2 : arguments array. |
1373 | // R3 : current thread. |
1374 | void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) { |
1375 | __ Comment("InvokeDartCodeStub" ); |
1376 | |
1377 | // Copy the C stack pointer (CSP/R31) into the stack pointer we'll actually |
1378 | // use to access the stack (SP/R15) and set the C stack pointer to near the |
1379 | // stack limit, loaded from the Thread held in R3, to prevent signal handlers |
1380 | // from over-writing Dart frames. |
1381 | __ mov(SP, CSP); |
1382 | __ SetupCSPFromThread(R3); |
1383 | __ Push(LR); // Marker for the profiler. |
1384 | __ EnterFrame(0); |
1385 | |
1386 | // Push code object to PC marker slot. |
1387 | __ ldr(TMP, Address(R3, target::Thread::invoke_dart_code_stub_offset())); |
1388 | __ Push(TMP); |
1389 | |
1390 | #if defined(TARGET_OS_FUCHSIA) |
1391 | __ str(R18, Address(R3, target::Thread::saved_shadow_call_stack_offset())); |
1392 | #elif defined(USING_SHADOW_CALL_STACK) |
1393 | #error Unimplemented |
1394 | #endif |
1395 | |
1396 | __ PushNativeCalleeSavedRegisters(); |
1397 | |
1398 | // Set up THR, which caches the current thread in Dart code. |
1399 | if (THR != R3) { |
1400 | __ mov(THR, R3); |
1401 | } |
1402 | |
1403 | // Refresh pinned registers values (inc. write barrier mask and null object). |
1404 | __ RestorePinnedRegisters(); |
1405 | |
1406 | // Save the current VMTag on the stack. |
1407 | __ LoadFromOffset(R4, THR, target::Thread::vm_tag_offset()); |
1408 | __ Push(R4); |
1409 | |
1410 | // Save top resource and top exit frame info. Use R6 as a temporary register. |
1411 | // StackFrameIterator reads the top exit frame info saved in this frame. |
1412 | __ LoadFromOffset(R6, THR, target::Thread::top_resource_offset()); |
1413 | __ StoreToOffset(ZR, THR, target::Thread::top_resource_offset()); |
1414 | __ Push(R6); |
1415 | |
1416 | __ LoadFromOffset(R6, THR, target::Thread::exit_through_ffi_offset()); |
1417 | __ Push(R6); |
1418 | __ LoadImmediate(R6, 0); |
1419 | __ StoreToOffset(R6, THR, target::Thread::exit_through_ffi_offset()); |
1420 | |
1421 | __ LoadFromOffset(R6, THR, target::Thread::top_exit_frame_info_offset()); |
1422 | __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset()); |
1423 | // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync |
1424 | // with the code below. |
1425 | #if defined(TARGET_OS_FUCHSIA) |
1426 | ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -24); |
1427 | #else |
1428 | ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -23); |
1429 | #endif |
1430 | __ Push(R6); |
1431 | |
1432 | // Mark that the thread is executing Dart code. Do this after initializing the |
1433 | // exit link for the profiler. |
1434 | __ LoadImmediate(R6, VMTag::kDartCompiledTagId); |
1435 | __ StoreToOffset(R6, THR, target::Thread::vm_tag_offset()); |
1436 | |
1437 | // Load arguments descriptor array into R4, which is passed to Dart code. |
1438 | __ LoadFromOffset(R4, R1, VMHandles::kOffsetOfRawPtrInHandle); |
1439 | |
1440 | // Load number of arguments into R5 and adjust count for type arguments. |
1441 | __ LoadFieldFromOffset(R5, R4, target::ArgumentsDescriptor::count_offset()); |
1442 | __ LoadFieldFromOffset(R3, R4, |
1443 | target::ArgumentsDescriptor::type_args_len_offset()); |
1444 | __ AddImmediate(TMP, R5, 1); // Include the type arguments. |
1445 | __ cmp(R3, Operand(0)); |
1446 | __ csinc(R5, R5, TMP, EQ); // R5 <- (R3 == 0) ? R5 : TMP + 1 (R5 : R5 + 2). |
1447 | __ SmiUntag(R5); |
1448 | |
1449 | // Compute address of 'arguments array' data area into R2. |
1450 | __ LoadFromOffset(R2, R2, VMHandles::kOffsetOfRawPtrInHandle); |
1451 | __ AddImmediate(R2, target::Array::data_offset() - kHeapObjectTag); |
1452 | |
1453 | // Set up arguments for the Dart call. |
1454 | Label push_arguments; |
1455 | Label done_push_arguments; |
1456 | __ cmp(R5, Operand(0)); |
1457 | __ b(&done_push_arguments, EQ); // check if there are arguments. |
1458 | __ LoadImmediate(R1, 0); |
1459 | __ Bind(&push_arguments); |
1460 | __ ldr(R3, Address(R2)); |
1461 | __ Push(R3); |
1462 | __ add(R1, R1, Operand(1)); |
1463 | __ add(R2, R2, Operand(target::kWordSize)); |
1464 | __ cmp(R1, Operand(R5)); |
1465 | __ b(&push_arguments, LT); |
1466 | __ Bind(&done_push_arguments); |
1467 | |
1468 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { |
1469 | __ SetupGlobalPoolAndDispatchTable(); |
1470 | } else { |
1471 | // We now load the pool pointer(PP) with a GC safe value as we are about to |
1472 | // invoke dart code. We don't need a real object pool here. |
1473 | // Smi zero does not work because ARM64 assumes PP to be untagged. |
1474 | __ LoadObject(PP, NullObject()); |
1475 | } |
1476 | |
1477 | // Call the Dart code entrypoint. |
1478 | __ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle)); |
1479 | __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset())); |
1480 | __ blr(R0); // R4 is the arguments descriptor array. |
1481 | __ Comment("InvokeDartCodeStub return" ); |
1482 | |
1483 | // Get rid of arguments pushed on the stack. |
1484 | __ AddImmediate( |
1485 | SP, FP, |
1486 | target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize); |
1487 | |
1488 | // Restore the saved top exit frame info and top resource back into the |
1489 | // Isolate structure. Uses R6 as a temporary register for this. |
1490 | __ Pop(R6); |
1491 | __ StoreToOffset(R6, THR, target::Thread::top_exit_frame_info_offset()); |
1492 | __ Pop(R6); |
1493 | __ StoreToOffset(R6, THR, target::Thread::exit_through_ffi_offset()); |
1494 | __ Pop(R6); |
1495 | __ StoreToOffset(R6, THR, target::Thread::top_resource_offset()); |
1496 | |
1497 | // Restore the current VMTag from the stack. |
1498 | __ Pop(R4); |
1499 | __ StoreToOffset(R4, THR, target::Thread::vm_tag_offset()); |
1500 | |
1501 | #if defined(TARGET_OS_FUCHSIA) |
1502 | __ mov(R3, THR); |
1503 | #endif |
1504 | |
1505 | __ PopNativeCalleeSavedRegisters(); // Clobbers THR |
1506 | |
1507 | #if defined(TARGET_OS_FUCHSIA) |
1508 | __ str(R18, Address(R3, target::Thread::saved_shadow_call_stack_offset())); |
1509 | #elif defined(USING_SHADOW_CALL_STACK) |
1510 | #error Unimplemented |
1511 | #endif |
1512 | |
1513 | // Restore the frame pointer and C stack pointer and return. |
1514 | __ LeaveFrame(); |
1515 | __ Drop(1); |
1516 | __ RestoreCSP(); |
1517 | __ ret(); |
1518 | } |
1519 | |
1520 | // Called when invoking compiled Dart code from interpreted Dart code. |
1521 | // Input parameters: |
1522 | // LR : points to return address. |
1523 | // R0 : raw code object of the Dart function to call. |
1524 | // R1 : arguments raw descriptor array. |
1525 | // R2 : address of first argument. |
1526 | // R3 : current thread. |
1527 | void StubCodeCompiler::GenerateInvokeDartCodeFromBytecodeStub( |
1528 | Assembler* assembler) { |
1529 | if (FLAG_precompiled_mode) { |
1530 | __ Stop("Not using interpreter" ); |
1531 | return; |
1532 | } |
1533 | |
1534 | // Copy the C stack pointer (CSP/R31) into the stack pointer we'll actually |
1535 | // use to access the stack (SP/R15) and set the C stack pointer to near the |
1536 | // stack limit, loaded from the Thread held in R3, to prevent signal handlers |
1537 | // from over-writing Dart frames. |
1538 | __ mov(SP, CSP); |
1539 | __ SetupCSPFromThread(R3); |
1540 | __ Push(LR); // Marker for the profiler. |
1541 | __ EnterFrame(0); |
1542 | |
1543 | // Push code object to PC marker slot. |
1544 | __ ldr(TMP, |
1545 | Address(R3, |
1546 | target::Thread::invoke_dart_code_from_bytecode_stub_offset())); |
1547 | __ Push(TMP); |
1548 | |
1549 | #if defined(TARGET_OS_FUCHSIA) |
1550 | __ str(R18, Address(R3, target::Thread::saved_shadow_call_stack_offset())); |
1551 | #elif defined(USING_SHADOW_CALL_STACK) |
1552 | #error Unimplemented |
1553 | #endif |
1554 | |
1555 | __ PushNativeCalleeSavedRegisters(); |
1556 | |
1557 | // Set up THR, which caches the current thread in Dart code. |
1558 | if (THR != R3) { |
1559 | __ mov(THR, R3); |
1560 | } |
1561 | |
1562 | // Refresh pinned registers values (inc. write barrier mask and null object). |
1563 | __ RestorePinnedRegisters(); |
1564 | |
1565 | // Save the current VMTag on the stack. |
1566 | __ LoadFromOffset(R4, THR, target::Thread::vm_tag_offset()); |
1567 | __ Push(R4); |
1568 | |
1569 | // Save top resource and top exit frame info. Use R6 as a temporary register. |
1570 | // StackFrameIterator reads the top exit frame info saved in this frame. |
1571 | __ LoadFromOffset(R6, THR, target::Thread::top_resource_offset()); |
1572 | __ StoreToOffset(ZR, THR, target::Thread::top_resource_offset()); |
1573 | __ Push(R6); |
1574 | |
1575 | __ LoadFromOffset(R6, THR, target::Thread::exit_through_ffi_offset()); |
1576 | __ Push(R6); |
1577 | __ LoadImmediate(R6, 0); |
1578 | __ StoreToOffset(R6, THR, target::Thread::exit_through_ffi_offset()); |
1579 | |
1580 | __ LoadFromOffset(R6, THR, target::Thread::top_exit_frame_info_offset()); |
1581 | __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset()); |
1582 | // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync |
1583 | // with the code below. |
1584 | #if defined(TARGET_OS_FUCHSIA) |
1585 | ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -24); |
1586 | #else |
1587 | ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -23); |
1588 | #endif |
1589 | __ Push(R6); |
1590 | |
1591 | // Mark that the thread is executing Dart code. Do this after initializing the |
1592 | // exit link for the profiler. |
1593 | __ LoadImmediate(R6, VMTag::kDartCompiledTagId); |
1594 | __ StoreToOffset(R6, THR, target::Thread::vm_tag_offset()); |
1595 | |
1596 | // Load arguments descriptor array into R4, which is passed to Dart code. |
1597 | __ mov(R4, R1); |
1598 | |
1599 | // Load number of arguments into R5 and adjust count for type arguments. |
1600 | __ LoadFieldFromOffset(R5, R4, target::ArgumentsDescriptor::count_offset()); |
1601 | __ LoadFieldFromOffset(R3, R4, |
1602 | target::ArgumentsDescriptor::type_args_len_offset()); |
1603 | __ AddImmediate(TMP, R5, 1); // Include the type arguments. |
1604 | __ cmp(R3, Operand(0)); |
1605 | __ csinc(R5, R5, TMP, EQ); // R5 <- (R3 == 0) ? R5 : TMP + 1 (R5 : R5 + 2). |
1606 | __ SmiUntag(R5); |
1607 | |
1608 | // R2 points to first argument. |
1609 | // Set up arguments for the Dart call. |
1610 | Label push_arguments; |
1611 | Label done_push_arguments; |
1612 | __ cmp(R5, Operand(0)); |
1613 | __ b(&done_push_arguments, EQ); // check if there are arguments. |
1614 | __ LoadImmediate(R1, 0); |
1615 | __ Bind(&push_arguments); |
1616 | __ ldr(R3, Address(R2)); |
1617 | __ Push(R3); |
1618 | __ add(R1, R1, Operand(1)); |
1619 | __ add(R2, R2, Operand(target::kWordSize)); |
1620 | __ cmp(R1, Operand(R5)); |
1621 | __ b(&push_arguments, LT); |
1622 | __ Bind(&done_push_arguments); |
1623 | |
1624 | // We now load the pool pointer(PP) with a GC safe value as we are about to |
1625 | // invoke dart code. We don't need a real object pool here. |
1626 | // Smi zero does not work because ARM64 assumes PP to be untagged. |
1627 | __ LoadObject(PP, NullObject()); |
1628 | |
1629 | // Call the Dart code entrypoint. |
1630 | __ mov(CODE_REG, R0); |
1631 | __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset())); |
1632 | __ blr(R0); // R4 is the arguments descriptor array. |
1633 | |
1634 | // Get rid of arguments pushed on the stack. |
1635 | __ AddImmediate( |
1636 | SP, FP, |
1637 | target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize); |
1638 | |
1639 | // Restore the saved top exit frame info and top resource back into the |
1640 | // Isolate structure. Uses R6 as a temporary register for this. |
1641 | __ Pop(R6); |
1642 | __ StoreToOffset(R6, THR, target::Thread::top_exit_frame_info_offset()); |
1643 | __ Pop(R6); |
1644 | __ StoreToOffset(R6, THR, target::Thread::exit_through_ffi_offset()); |
1645 | __ Pop(R6); |
1646 | __ StoreToOffset(R6, THR, target::Thread::top_resource_offset()); |
1647 | |
1648 | // Restore the current VMTag from the stack. |
1649 | __ Pop(R4); |
1650 | __ StoreToOffset(R4, THR, target::Thread::vm_tag_offset()); |
1651 | |
1652 | #if defined(TARGET_OS_FUCHSIA) |
1653 | __ mov(R3, THR); |
1654 | #endif |
1655 | |
1656 | __ PopNativeCalleeSavedRegisters(); // Clobbers THR |
1657 | |
1658 | #if defined(TARGET_OS_FUCHSIA) |
1659 | __ str(R18, Address(R3, target::Thread::saved_shadow_call_stack_offset())); |
1660 | #elif defined(USING_SHADOW_CALL_STACK) |
1661 | #error Unimplemented |
1662 | #endif |
1663 | |
1664 | // Restore the frame pointer and C stack pointer and return. |
1665 | __ LeaveFrame(); |
1666 | __ Drop(1); |
1667 | __ RestoreCSP(); |
1668 | __ ret(); |
1669 | } |
1670 | |
1671 | // Helper to generate space allocation of context stub. |
1672 | // This does not initialise the fields of the context. |
1673 | // Input: |
1674 | // R1: number of context variables. |
1675 | // Output: |
1676 | // R0: new allocated RawContext object. |
1677 | // Clobbered: |
1678 | // R2, R3, R4, TMP |
1679 | static void GenerateAllocateContextSpaceStub(Assembler* assembler, |
1680 | Label* slow_case) { |
1681 | // First compute the rounded instance size. |
1682 | // R1: number of context variables. |
1683 | intptr_t fixed_size_plus_alignment_padding = |
1684 | target::Context::header_size() + |
1685 | target::ObjectAlignment::kObjectAlignment - 1; |
1686 | __ LoadImmediate(R2, fixed_size_plus_alignment_padding); |
1687 | __ add(R2, R2, Operand(R1, LSL, 3)); |
1688 | ASSERT(kSmiTagShift == 1); |
1689 | __ andi(R2, R2, Immediate(~(target::ObjectAlignment::kObjectAlignment - 1))); |
1690 | |
1691 | NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, R4, slow_case)); |
1692 | // Now allocate the object. |
1693 | // R1: number of context variables. |
1694 | // R2: object size. |
1695 | __ ldr(R0, Address(THR, target::Thread::top_offset())); |
1696 | __ add(R3, R2, Operand(R0)); |
1697 | // Check if the allocation fits into the remaining space. |
1698 | // R0: potential new object. |
1699 | // R1: number of context variables. |
1700 | // R2: object size. |
1701 | // R3: potential next object start. |
1702 | __ ldr(TMP, Address(THR, target::Thread::end_offset())); |
1703 | __ CompareRegisters(R3, TMP); |
1704 | __ b(slow_case, CS); // Branch if unsigned higher or equal. |
1705 | |
1706 | // Successfully allocated the object, now update top to point to |
1707 | // next object start and initialize the object. |
1708 | // R0: new object. |
1709 | // R1: number of context variables. |
1710 | // R2: object size. |
1711 | // R3: next object start. |
1712 | __ str(R3, Address(THR, target::Thread::top_offset())); |
1713 | __ add(R0, R0, Operand(kHeapObjectTag)); |
1714 | |
1715 | // Calculate the size tag. |
1716 | // R0: new object. |
1717 | // R1: number of context variables. |
1718 | // R2: object size. |
1719 | const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos - |
1720 | target::ObjectAlignment::kObjectAlignmentLog2; |
1721 | __ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag); |
1722 | // If no size tag overflow, shift R2 left, else set R2 to zero. |
1723 | __ LslImmediate(TMP, R2, shift); |
1724 | __ csel(R2, TMP, R2, LS); |
1725 | __ csel(R2, ZR, R2, HI); |
1726 | |
1727 | // Get the class index and insert it into the tags. |
1728 | // R2: size and bit tags. |
1729 | const uint32_t tags = |
1730 | target::MakeTagWordForNewSpaceObject(kContextCid, /*instance_size=*/0); |
1731 | |
1732 | __ LoadImmediate(TMP, tags); |
1733 | __ orr(R2, R2, Operand(TMP)); |
1734 | __ StoreFieldToOffset(R2, R0, target::Object::tags_offset()); |
1735 | |
1736 | // Setup up number of context variables field. |
1737 | // R0: new object. |
1738 | // R1: number of context variables as integer value (not object). |
1739 | __ StoreFieldToOffset(R1, R0, target::Context::num_variables_offset()); |
1740 | } |
1741 | |
1742 | // Called for inline allocation of contexts. |
1743 | // Input: |
1744 | // R1: number of context variables. |
1745 | // Output: |
1746 | // R0: new allocated RawContext object. |
1747 | // Clobbered: |
1748 | // R2, R3, R4, TMP |
1749 | void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) { |
1750 | if (!FLAG_use_slow_path && FLAG_inline_alloc) { |
1751 | Label slow_case; |
1752 | |
1753 | GenerateAllocateContextSpaceStub(assembler, &slow_case); |
1754 | |
1755 | // Setup the parent field. |
1756 | // R0: new object. |
1757 | // R1: number of context variables. |
1758 | __ LoadObject(R2, NullObject()); |
1759 | __ StoreFieldToOffset(R2, R0, target::Context::parent_offset()); |
1760 | |
1761 | // Initialize the context variables. |
1762 | // R0: new object. |
1763 | // R1: number of context variables. |
1764 | // R2: raw null. |
1765 | { |
1766 | Label loop, done; |
1767 | __ AddImmediate(R3, R0, |
1768 | target::Context::variable_offset(0) - kHeapObjectTag); |
1769 | __ Bind(&loop); |
1770 | __ subs(R1, R1, Operand(1)); |
1771 | __ b(&done, MI); |
1772 | __ str(R2, Address(R3, R1, UXTX, Address::Scaled)); |
1773 | __ b(&loop, NE); // Loop if R1 not zero. |
1774 | __ Bind(&done); |
1775 | } |
1776 | |
1777 | // Done allocating and initializing the context. |
1778 | // R0: new object. |
1779 | __ ret(); |
1780 | |
1781 | __ Bind(&slow_case); |
1782 | } |
1783 | // Create a stub frame as we are pushing some objects on the stack before |
1784 | // calling into the runtime. |
1785 | __ EnterStubFrame(); |
1786 | // Setup space on stack for return value. |
1787 | __ SmiTag(R1); |
1788 | __ PushObject(NullObject()); |
1789 | __ Push(R1); |
1790 | __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context. |
1791 | __ Drop(1); // Pop number of context variables argument. |
1792 | __ Pop(R0); // Pop the new context object. |
1793 | |
1794 | // Write-barrier elimination might be enabled for this context (depending on |
1795 | // the size). To be sure we will check if the allocated object is in old |
1796 | // space and if so call a leaf runtime to add it to the remembered set. |
1797 | EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false); |
1798 | |
1799 | // R0: new object |
1800 | // Restore the frame pointer. |
1801 | __ LeaveStubFrame(); |
1802 | |
1803 | __ ret(); |
1804 | } |
1805 | |
1806 | // Called for clone of contexts. |
1807 | // Input: |
1808 | // R5: context variable to clone. |
1809 | // Output: |
1810 | // R0: new allocated RawContext object. |
1811 | // Clobbered: |
1812 | // R1, (R2), R3, R4, (TMP) |
1813 | void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) { |
1814 | { |
1815 | Label slow_case; |
1816 | |
1817 | // Load num. variable (int32) in the existing context. |
1818 | __ ldr(R1, FieldAddress(R5, target::Context::num_variables_offset()), |
1819 | kWord); |
1820 | |
1821 | GenerateAllocateContextSpaceStub(assembler, &slow_case); |
1822 | |
1823 | // Load parent in the existing context. |
1824 | __ ldr(R3, FieldAddress(R5, target::Context::parent_offset())); |
1825 | // Setup the parent field. |
1826 | // R0: new context. |
1827 | __ StoreIntoObjectNoBarrier( |
1828 | R0, FieldAddress(R0, target::Context::parent_offset()), R3); |
1829 | |
1830 | // Clone the context variables. |
1831 | // R0: new context. |
1832 | // R1: number of context variables. |
1833 | { |
1834 | Label loop, done; |
1835 | // R3: Variable array address, new context. |
1836 | __ AddImmediate(R3, R0, |
1837 | target::Context::variable_offset(0) - kHeapObjectTag); |
1838 | // R4: Variable array address, old context. |
1839 | __ AddImmediate(R4, R5, |
1840 | target::Context::variable_offset(0) - kHeapObjectTag); |
1841 | |
1842 | __ Bind(&loop); |
1843 | __ subs(R1, R1, Operand(1)); |
1844 | __ b(&done, MI); |
1845 | |
1846 | __ ldr(R5, Address(R4, R1, UXTX, Address::Scaled)); |
1847 | __ str(R5, Address(R3, R1, UXTX, Address::Scaled)); |
1848 | __ b(&loop, NE); // Loop if R1 not zero. |
1849 | |
1850 | __ Bind(&done); |
1851 | } |
1852 | |
1853 | // Done allocating and initializing the context. |
1854 | // R0: new object. |
1855 | __ ret(); |
1856 | |
1857 | __ Bind(&slow_case); |
1858 | } |
1859 | |
1860 | // Create a stub frame as we are pushing some objects on the stack before |
1861 | // calling into the runtime. |
1862 | __ EnterStubFrame(); |
1863 | // Setup space on stack for return value. |
1864 | __ PushPair(R5, NULL_REG); |
1865 | __ CallRuntime(kCloneContextRuntimeEntry, 1); // Clone context. |
1866 | // Pop number of context variables argument. |
1867 | // Pop the new context object. |
1868 | __ PopPair(R1, R0); |
1869 | |
1870 | // Write-barrier elimination might be enabled for this context (depending on |
1871 | // the size). To be sure we will check if the allocated object is in old |
1872 | // space and if so call a leaf runtime to add it to the remembered set. |
1873 | EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false); |
1874 | |
1875 | // R0: new object |
1876 | // Restore the frame pointer. |
1877 | __ LeaveStubFrame(); |
1878 | __ ret(); |
1879 | } |
1880 | |
1881 | void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) { |
1882 | for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) { |
1883 | if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue; |
1884 | |
1885 | Register reg = static_cast<Register>(i); |
1886 | intptr_t start = __ CodeSize(); |
1887 | __ Push(LR); |
1888 | __ Push(kWriteBarrierObjectReg); |
1889 | __ mov(kWriteBarrierObjectReg, reg); |
1890 | __ ldr(LR, |
1891 | Address(THR, target::Thread::write_barrier_entry_point_offset())); |
1892 | __ blr(LR); |
1893 | __ Pop(kWriteBarrierObjectReg); |
1894 | __ Pop(LR); |
1895 | __ ret(LR); |
1896 | intptr_t end = __ CodeSize(); |
1897 | |
1898 | RELEASE_ASSERT(end - start == kStoreBufferWrapperSize); |
1899 | } |
1900 | } |
1901 | |
1902 | // Helper stub to implement Assembler::StoreIntoObject/Array. |
1903 | // Input parameters: |
1904 | // R1: Object (old) |
1905 | // R0: Value (old or new) |
1906 | // R25: Slot |
1907 | // If R0 is new, add R1 to the store buffer. Otherwise R0 is old, mark R0 |
1908 | // and add it to the mark list. |
1909 | COMPILE_ASSERT(kWriteBarrierObjectReg == R1); |
1910 | COMPILE_ASSERT(kWriteBarrierValueReg == R0); |
1911 | COMPILE_ASSERT(kWriteBarrierSlotReg == R25); |
1912 | static void GenerateWriteBarrierStubHelper(Assembler* assembler, |
1913 | Address stub_code, |
1914 | bool cards) { |
1915 | Label add_to_mark_stack, remember_card; |
1916 | __ tbz(&add_to_mark_stack, R0, |
1917 | target::ObjectAlignment::kNewObjectBitPosition); |
1918 | |
1919 | if (cards) { |
1920 | __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kWord); |
1921 | __ tbnz(&remember_card, TMP, target::ObjectLayout::kCardRememberedBit); |
1922 | } else { |
1923 | #if defined(DEBUG) |
1924 | Label ok; |
1925 | __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kWord); |
1926 | __ tbz(&ok, TMP, target::ObjectLayout::kCardRememberedBit); |
1927 | __ Stop("Wrong barrier" ); |
1928 | __ Bind(&ok); |
1929 | #endif |
1930 | } |
1931 | |
1932 | // Save values being destroyed. |
1933 | __ Push(R2); |
1934 | __ Push(R3); |
1935 | __ Push(R4); |
1936 | |
1937 | // Atomically set the remembered bit of the object header. |
1938 | ASSERT(target::Object::tags_offset() == 0); |
1939 | __ sub(R3, R1, Operand(kHeapObjectTag)); |
1940 | // R3: Untagged address of header word (ldxr/stxr do not support offsets). |
1941 | // Note that we use 32 bit operations here to match the size of the |
1942 | // background sweeper which is also manipulating this 32 bit word. |
1943 | Label retry; |
1944 | __ Bind(&retry); |
1945 | __ ldxr(R2, R3, kWord); |
1946 | __ AndImmediate(R2, R2, |
1947 | ~(1 << target::ObjectLayout::kOldAndNotRememberedBit)); |
1948 | __ stxr(R4, R2, R3, kWord); |
1949 | __ cbnz(&retry, R4); |
1950 | |
1951 | // Load the StoreBuffer block out of the thread. Then load top_ out of the |
1952 | // StoreBufferBlock and add the address to the pointers_. |
1953 | __ LoadFromOffset(R4, THR, target::Thread::store_buffer_block_offset()); |
1954 | __ LoadFromOffset(R2, R4, target::StoreBufferBlock::top_offset(), |
1955 | kUnsignedWord); |
1956 | __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2)); |
1957 | __ StoreToOffset(R1, R3, target::StoreBufferBlock::pointers_offset()); |
1958 | |
1959 | // Increment top_ and check for overflow. |
1960 | // R2: top_. |
1961 | // R4: StoreBufferBlock. |
1962 | Label overflow; |
1963 | __ add(R2, R2, Operand(1)); |
1964 | __ StoreToOffset(R2, R4, target::StoreBufferBlock::top_offset(), |
1965 | kUnsignedWord); |
1966 | __ CompareImmediate(R2, target::StoreBufferBlock::kSize); |
1967 | // Restore values. |
1968 | __ Pop(R4); |
1969 | __ Pop(R3); |
1970 | __ Pop(R2); |
1971 | __ b(&overflow, EQ); |
1972 | __ ret(); |
1973 | |
1974 | // Handle overflow: Call the runtime leaf function. |
1975 | __ Bind(&overflow); |
1976 | // Setup frame, push callee-saved registers. |
1977 | |
1978 | __ Push(CODE_REG); |
1979 | __ ldr(CODE_REG, stub_code); |
1980 | __ EnterCallRuntimeFrame(0 * target::kWordSize); |
1981 | __ mov(R0, THR); |
1982 | __ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1); |
1983 | // Restore callee-saved registers, tear down frame. |
1984 | __ LeaveCallRuntimeFrame(); |
1985 | __ Pop(CODE_REG); |
1986 | __ ret(); |
1987 | |
1988 | __ Bind(&add_to_mark_stack); |
1989 | __ Push(R2); // Spill. |
1990 | __ Push(R3); // Spill. |
1991 | __ Push(R4); // Spill. |
1992 | |
1993 | // Atomically clear kOldAndNotMarkedBit. |
1994 | // Note that we use 32 bit operations here to match the size of the |
1995 | // background sweeper which is also manipulating this 32 bit word. |
1996 | Label marking_retry, lost_race, marking_overflow; |
1997 | ASSERT(target::Object::tags_offset() == 0); |
1998 | __ sub(R3, R0, Operand(kHeapObjectTag)); |
1999 | // R3: Untagged address of header word (ldxr/stxr do not support offsets). |
2000 | __ Bind(&marking_retry); |
2001 | __ ldxr(R2, R3, kWord); |
2002 | __ tbz(&lost_race, R2, target::ObjectLayout::kOldAndNotMarkedBit); |
2003 | __ AndImmediate(R2, R2, ~(1 << target::ObjectLayout::kOldAndNotMarkedBit)); |
2004 | __ stxr(R4, R2, R3, kWord); |
2005 | __ cbnz(&marking_retry, R4); |
2006 | |
2007 | __ LoadFromOffset(R4, THR, target::Thread::marking_stack_block_offset()); |
2008 | __ LoadFromOffset(R2, R4, target::MarkingStackBlock::top_offset(), |
2009 | kUnsignedWord); |
2010 | __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2)); |
2011 | __ StoreToOffset(R0, R3, target::MarkingStackBlock::pointers_offset()); |
2012 | __ add(R2, R2, Operand(1)); |
2013 | __ StoreToOffset(R2, R4, target::MarkingStackBlock::top_offset(), |
2014 | kUnsignedWord); |
2015 | __ CompareImmediate(R2, target::MarkingStackBlock::kSize); |
2016 | __ Pop(R4); // Unspill. |
2017 | __ Pop(R3); // Unspill. |
2018 | __ Pop(R2); // Unspill. |
2019 | __ b(&marking_overflow, EQ); |
2020 | __ ret(); |
2021 | |
2022 | __ Bind(&marking_overflow); |
2023 | __ Push(CODE_REG); |
2024 | __ ldr(CODE_REG, stub_code); |
2025 | __ EnterCallRuntimeFrame(0 * target::kWordSize); |
2026 | __ mov(R0, THR); |
2027 | __ CallRuntime(kMarkingStackBlockProcessRuntimeEntry, 1); |
2028 | __ LeaveCallRuntimeFrame(); |
2029 | __ Pop(CODE_REG); |
2030 | __ ret(); |
2031 | |
2032 | __ Bind(&lost_race); |
2033 | __ Pop(R4); // Unspill. |
2034 | __ Pop(R3); // Unspill. |
2035 | __ Pop(R2); // Unspill. |
2036 | __ ret(); |
2037 | |
2038 | if (cards) { |
2039 | Label remember_card_slow; |
2040 | |
2041 | // Get card table. |
2042 | __ Bind(&remember_card); |
2043 | __ AndImmediate(TMP, R1, target::kOldPageMask); // OldPage. |
2044 | __ ldr(TMP, |
2045 | Address(TMP, target::OldPage::card_table_offset())); // Card table. |
2046 | __ cbz(&remember_card_slow, TMP); |
2047 | |
2048 | // Dirty the card. |
2049 | __ AndImmediate(TMP, R1, target::kOldPageMask); // OldPage. |
2050 | __ sub(R25, R25, Operand(TMP)); // Offset in page. |
2051 | __ ldr(TMP, |
2052 | Address(TMP, target::OldPage::card_table_offset())); // Card table. |
2053 | __ add(TMP, TMP, |
2054 | Operand(R25, LSR, |
2055 | target::OldPage::kBytesPerCardLog2)); // Card address. |
2056 | __ str(R1, Address(TMP, 0), |
2057 | kUnsignedByte); // Low byte of R1 is non-zero from object tag. |
2058 | __ ret(); |
2059 | |
2060 | // Card table not yet allocated. |
2061 | __ Bind(&remember_card_slow); |
2062 | __ Push(CODE_REG); |
2063 | __ PushPair(R0, R1); |
2064 | __ ldr(CODE_REG, stub_code); |
2065 | __ mov(R0, R1); // Arg0 = Object |
2066 | __ mov(R1, R25); // Arg1 = Slot |
2067 | __ EnterCallRuntimeFrame(0); |
2068 | __ CallRuntime(kRememberCardRuntimeEntry, 2); |
2069 | __ LeaveCallRuntimeFrame(); |
2070 | __ PopPair(R0, R1); |
2071 | __ Pop(CODE_REG); |
2072 | __ ret(); |
2073 | } |
2074 | } |
2075 | |
2076 | void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) { |
2077 | GenerateWriteBarrierStubHelper( |
2078 | assembler, Address(THR, target::Thread::write_barrier_code_offset()), |
2079 | false); |
2080 | } |
2081 | |
2082 | void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) { |
2083 | GenerateWriteBarrierStubHelper( |
2084 | assembler, |
2085 | Address(THR, target::Thread::array_write_barrier_code_offset()), true); |
2086 | } |
2087 | |
2088 | static void GenerateAllocateObjectHelper(Assembler* assembler, |
2089 | bool is_cls_parameterized) { |
2090 | const Register kInstanceReg = R0; |
2091 | // kAllocationStubTypeArgumentsReg = R1 |
2092 | const Register kTagsReg = R2; |
2093 | |
2094 | { |
2095 | Label slow_case; |
2096 | |
2097 | const Register kNewTopReg = R3; |
2098 | |
2099 | // Bump allocation. |
2100 | { |
2101 | const Register kInstanceSizeReg = R4; |
2102 | const Register kEndReg = R5; |
2103 | |
2104 | __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg); |
2105 | |
2106 | // Load two words from Thread::top: top and end. |
2107 | // kInstanceReg: potential next object start. |
2108 | __ ldp(kInstanceReg, kEndReg, |
2109 | Address(THR, target::Thread::top_offset(), Address::PairOffset)); |
2110 | |
2111 | __ add(kNewTopReg, kInstanceReg, Operand(kInstanceSizeReg)); |
2112 | |
2113 | __ CompareRegisters(kEndReg, kNewTopReg); |
2114 | __ b(&slow_case, UNSIGNED_LESS_EQUAL); |
2115 | |
2116 | // Successfully allocated the object, now update top to point to |
2117 | // next object start and store the class in the class field of object. |
2118 | __ str(kNewTopReg, Address(THR, target::Thread::top_offset())); |
2119 | } // kInstanceSizeReg = R4, kEndReg = R5 |
2120 | |
2121 | // Tags. |
2122 | __ str(kTagsReg, Address(kInstanceReg, target::Object::tags_offset())); |
2123 | |
2124 | // Initialize the remaining words of the object. |
2125 | { |
2126 | const Register kFieldReg = R4; |
2127 | |
2128 | __ AddImmediate(kFieldReg, kInstanceReg, |
2129 | target::Instance::first_field_offset()); |
2130 | Label done, init_loop; |
2131 | __ Bind(&init_loop); |
2132 | __ CompareRegisters(kFieldReg, kNewTopReg); |
2133 | __ b(&done, UNSIGNED_GREATER_EQUAL); |
2134 | __ str(NULL_REG, |
2135 | Address(kFieldReg, target::kWordSize, Address::PostIndex)); |
2136 | __ b(&init_loop); |
2137 | |
2138 | __ Bind(&done); |
2139 | } // kFieldReg = R4 |
2140 | |
2141 | if (is_cls_parameterized) { |
2142 | Label not_parameterized_case; |
2143 | |
2144 | const Register kClsIdReg = R4; |
2145 | const Register kTypeOffestReg = R5; |
2146 | |
2147 | __ ExtractClassIdFromTags(kClsIdReg, kTagsReg); |
2148 | |
2149 | // Load class' type_arguments_field offset in words. |
2150 | __ LoadClassById(kTypeOffestReg, kClsIdReg); |
2151 | __ ldr( |
2152 | kTypeOffestReg, |
2153 | FieldAddress(kTypeOffestReg, |
2154 | target::Class:: |
2155 | host_type_arguments_field_offset_in_words_offset()), |
2156 | kWord); |
2157 | |
2158 | // Set the type arguments in the new object. |
2159 | __ StoreIntoObjectNoBarrier( |
2160 | kInstanceReg, |
2161 | Address(kInstanceReg, kTypeOffestReg, UXTX, Address::Scaled), |
2162 | kAllocationStubTypeArgumentsReg); |
2163 | |
2164 | __ Bind(¬_parameterized_case); |
2165 | } // kClsIdReg = R4, kTypeOffestReg = R5 |
2166 | |
2167 | __ AddImmediate(kInstanceReg, kInstanceReg, kHeapObjectTag); |
2168 | |
2169 | __ ret(); |
2170 | |
2171 | __ Bind(&slow_case); |
2172 | } // kNewTopReg = R3 |
2173 | |
2174 | // Fall back on slow case: |
2175 | if (!is_cls_parameterized) { |
2176 | __ mov(kAllocationStubTypeArgumentsReg, NULL_REG); |
2177 | } |
2178 | // Tail call to generic allocation stub. |
2179 | __ ldr( |
2180 | R3, |
2181 | Address(THR, target::Thread::allocate_object_slow_entry_point_offset())); |
2182 | __ br(R3); |
2183 | } |
2184 | |
2185 | // Called for inline allocation of objects (any class). |
2186 | void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) { |
2187 | GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false); |
2188 | } |
2189 | |
2190 | void StubCodeCompiler::GenerateAllocateObjectParameterizedStub( |
2191 | Assembler* assembler) { |
2192 | GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true); |
2193 | } |
2194 | |
2195 | void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) { |
2196 | const Register kInstanceReg = R0; |
2197 | // kAllocationStubTypeArgumentsReg = R1 |
2198 | const Register kTagsToClsIdReg = R2; |
2199 | |
2200 | if (!FLAG_use_bare_instructions) { |
2201 | __ ldr(CODE_REG, |
2202 | Address(THR, target::Thread::call_to_runtime_stub_offset())); |
2203 | } |
2204 | |
2205 | __ ExtractClassIdFromTags(kTagsToClsIdReg, kTagsToClsIdReg); |
2206 | |
2207 | // Create a stub frame as we are pushing some objects on the stack before |
2208 | // calling into the runtime. |
2209 | __ EnterStubFrame(); |
2210 | |
2211 | __ LoadClassById(R0, kTagsToClsIdReg); |
2212 | __ PushPair(R0, NULL_REG); // Pushes result slot, then class object. |
2213 | |
2214 | // Should be Object::null() if class is non-parameterized. |
2215 | __ Push(kAllocationStubTypeArgumentsReg); |
2216 | |
2217 | __ CallRuntime(kAllocateObjectRuntimeEntry, 2); |
2218 | |
2219 | // Load result off the stack into result register. |
2220 | __ ldr(kInstanceReg, Address(SP, 2 * target::kWordSize)); |
2221 | |
2222 | // Write-barrier elimination is enabled for [cls] and we therefore need to |
2223 | // ensure that the object is in new-space or has remembered bit set. |
2224 | EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false); |
2225 | |
2226 | __ LeaveStubFrame(); |
2227 | |
2228 | __ ret(); |
2229 | } |
2230 | |
2231 | // Called for inline allocation of objects. |
2232 | void StubCodeCompiler::GenerateAllocationStubForClass( |
2233 | Assembler* assembler, |
2234 | UnresolvedPcRelativeCalls* unresolved_calls, |
2235 | const Class& cls, |
2236 | const Code& allocate_object, |
2237 | const Code& allocat_object_parametrized) { |
2238 | static_assert(kAllocationStubTypeArgumentsReg == R1, |
2239 | "Adjust register allocation in the AllocationStub" ); |
2240 | |
2241 | classid_t cls_id = target::Class::GetId(cls); |
2242 | ASSERT(cls_id != kIllegalCid); |
2243 | |
2244 | RELEASE_ASSERT(AllocateObjectInstr::WillAllocateNewOrRemembered(cls)); |
2245 | |
2246 | // The generated code is different if the class is parameterized. |
2247 | const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0; |
2248 | ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset( |
2249 | cls) != target::Class::kNoTypeArguments); |
2250 | |
2251 | const intptr_t instance_size = target::Class::GetInstanceSize(cls); |
2252 | ASSERT(instance_size > 0); |
2253 | RELEASE_ASSERT(target::Heap::IsAllocatableInNewSpace(instance_size)); |
2254 | |
2255 | const uint32_t tags = |
2256 | target::MakeTagWordForNewSpaceObject(cls_id, instance_size); |
2257 | |
2258 | // Note: Keep in sync with helper function. |
2259 | // kInstanceReg = R0 |
2260 | // kAllocationStubTypeArgumentsReg = R1 |
2261 | const Register kTagsReg = R2; |
2262 | |
2263 | __ LoadImmediate(kTagsReg, tags); |
2264 | |
2265 | if (!FLAG_use_slow_path && FLAG_inline_alloc && |
2266 | !target::Class::TraceAllocation(cls) && |
2267 | target::SizeFitsInSizeTag(instance_size)) { |
2268 | if (is_cls_parameterized) { |
2269 | // TODO(41974): Assign all allocation stubs to the root loading unit? |
2270 | if (false && |
2271 | !IsSameObject(NullObject(), |
2272 | CastHandle<Object>(allocat_object_parametrized))) { |
2273 | __ GenerateUnRelocatedPcRelativeTailCall(); |
2274 | unresolved_calls->Add(new UnresolvedPcRelativeCall( |
2275 | __ CodeSize(), allocat_object_parametrized, /*is_tail_call=*/true)); |
2276 | } else { |
2277 | __ ldr(R4, |
2278 | Address(THR, |
2279 | target::Thread:: |
2280 | allocate_object_parameterized_entry_point_offset())); |
2281 | __ br(R4); |
2282 | } |
2283 | } else { |
2284 | // TODO(41974): Assign all allocation stubs to the root loading unit? |
2285 | if (false && |
2286 | !IsSameObject(NullObject(), CastHandle<Object>(allocate_object))) { |
2287 | __ GenerateUnRelocatedPcRelativeTailCall(); |
2288 | unresolved_calls->Add(new UnresolvedPcRelativeCall( |
2289 | __ CodeSize(), allocate_object, /*is_tail_call=*/true)); |
2290 | } else { |
2291 | __ ldr( |
2292 | R4, |
2293 | Address(THR, target::Thread::allocate_object_entry_point_offset())); |
2294 | __ br(R4); |
2295 | } |
2296 | } |
2297 | } else { |
2298 | if (!is_cls_parameterized) { |
2299 | __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject()); |
2300 | } |
2301 | __ ldr(R4, |
2302 | Address(THR, |
2303 | target::Thread::allocate_object_slow_entry_point_offset())); |
2304 | __ br(R4); |
2305 | } |
2306 | } |
2307 | |
2308 | // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function |
2309 | // from the entry code of a dart function after an error in passed argument |
2310 | // name or number is detected. |
2311 | // Input parameters: |
2312 | // LR : return address. |
2313 | // SP : address of last argument. |
2314 | // R4: arguments descriptor array. |
2315 | void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub( |
2316 | Assembler* assembler) { |
2317 | __ EnterStubFrame(); |
2318 | |
2319 | // Load the receiver. |
2320 | __ LoadFieldFromOffset(R2, R4, target::ArgumentsDescriptor::size_offset()); |
2321 | __ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi. |
2322 | __ LoadFromOffset(R6, TMP, |
2323 | target::frame_layout.param_end_from_fp * target::kWordSize); |
2324 | |
2325 | // Load the function. |
2326 | __ LoadFieldFromOffset(TMP, R6, target::Closure::function_offset()); |
2327 | |
2328 | __ Push(ZR); // Result slot. |
2329 | __ Push(R6); // Receiver. |
2330 | __ Push(TMP); // Function |
2331 | __ Push(R4); // Arguments descriptor. |
2332 | |
2333 | // Adjust arguments count. |
2334 | __ LoadFieldFromOffset(R3, R4, |
2335 | target::ArgumentsDescriptor::type_args_len_offset()); |
2336 | __ AddImmediate(TMP, R2, 1); // Include the type arguments. |
2337 | __ cmp(R3, Operand(0)); |
2338 | __ csinc(R2, R2, TMP, EQ); // R2 <- (R3 == 0) ? R2 : TMP + 1 (R2 : R2 + 2). |
2339 | |
2340 | // R2: Smi-tagged arguments array length. |
2341 | PushArrayOfArguments(assembler); |
2342 | |
2343 | const intptr_t kNumArgs = 4; |
2344 | __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs); |
2345 | // noSuchMethod on closures always throws an error, so it will never return. |
2346 | __ brk(0); |
2347 | } |
2348 | |
2349 | // R6: function object. |
2350 | // R5: inline cache data object. |
2351 | // Cannot use function object from ICData as it may be the inlined |
2352 | // function and not the top-scope function. |
2353 | void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement( |
2354 | Assembler* assembler) { |
2355 | Register ic_reg = R5; |
2356 | Register func_reg = R6; |
2357 | if (FLAG_precompiled_mode) { |
2358 | __ Breakpoint(); |
2359 | return; |
2360 | } |
2361 | if (FLAG_trace_optimized_ic_calls) { |
2362 | __ EnterStubFrame(); |
2363 | __ Push(R6); // Preserve. |
2364 | __ Push(R5); // Preserve. |
2365 | __ Push(ic_reg); // Argument. |
2366 | __ Push(func_reg); // Argument. |
2367 | __ CallRuntime(kTraceICCallRuntimeEntry, 2); |
2368 | __ Drop(2); // Discard argument; |
2369 | __ Pop(R5); // Restore. |
2370 | __ Pop(R6); // Restore. |
2371 | __ LeaveStubFrame(); |
2372 | } |
2373 | __ LoadFieldFromOffset(R7, func_reg, target::Function::usage_counter_offset(), |
2374 | kWord); |
2375 | __ add(R7, R7, Operand(1)); |
2376 | __ StoreFieldToOffset(R7, func_reg, target::Function::usage_counter_offset(), |
2377 | kWord); |
2378 | } |
2379 | |
2380 | // Loads function into 'temp_reg'. |
2381 | void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler, |
2382 | Register temp_reg) { |
2383 | if (FLAG_precompiled_mode) { |
2384 | __ Breakpoint(); |
2385 | return; |
2386 | } |
2387 | if (FLAG_optimization_counter_threshold >= 0) { |
2388 | Register ic_reg = R5; |
2389 | Register func_reg = temp_reg; |
2390 | ASSERT(temp_reg == R6); |
2391 | __ Comment("Increment function counter" ); |
2392 | __ LoadFieldFromOffset(func_reg, ic_reg, target::ICData::owner_offset()); |
2393 | __ LoadFieldFromOffset(R7, func_reg, |
2394 | target::Function::usage_counter_offset(), kWord); |
2395 | __ AddImmediate(R7, 1); |
2396 | __ StoreFieldToOffset(R7, func_reg, |
2397 | target::Function::usage_counter_offset(), kWord); |
2398 | } |
2399 | } |
2400 | |
2401 | // Note: R5 must be preserved. |
2402 | // Attempt a quick Smi operation for known operations ('kind'). The ICData |
2403 | // must have been primed with a Smi/Smi check that will be used for counting |
2404 | // the invocations. |
2405 | static void EmitFastSmiOp(Assembler* assembler, |
2406 | Token::Kind kind, |
2407 | intptr_t num_args, |
2408 | Label* not_smi_or_overflow) { |
2409 | __ Comment("Fast Smi op" ); |
2410 | __ ldr(R0, Address(SP, +1 * target::kWordSize)); // Left. |
2411 | __ ldr(R1, Address(SP, +0 * target::kWordSize)); // Right. |
2412 | __ orr(TMP, R0, Operand(R1)); |
2413 | __ BranchIfNotSmi(TMP, not_smi_or_overflow); |
2414 | switch (kind) { |
2415 | case Token::kADD: { |
2416 | __ adds(R0, R1, Operand(R0)); // Adds. |
2417 | __ b(not_smi_or_overflow, VS); // Branch if overflow. |
2418 | break; |
2419 | } |
2420 | case Token::kLT: { |
2421 | __ CompareRegisters(R0, R1); |
2422 | __ LoadObject(R0, CastHandle<Object>(TrueObject())); |
2423 | __ LoadObject(R1, CastHandle<Object>(FalseObject())); |
2424 | __ csel(R0, R0, R1, LT); |
2425 | break; |
2426 | } |
2427 | case Token::kEQ: { |
2428 | __ CompareRegisters(R0, R1); |
2429 | __ LoadObject(R0, CastHandle<Object>(TrueObject())); |
2430 | __ LoadObject(R1, CastHandle<Object>(FalseObject())); |
2431 | __ csel(R0, R0, R1, EQ); |
2432 | break; |
2433 | } |
2434 | default: |
2435 | UNIMPLEMENTED(); |
2436 | } |
2437 | |
2438 | // R5: IC data object (preserved). |
2439 | __ LoadFieldFromOffset(R6, R5, target::ICData::entries_offset()); |
2440 | // R6: ic_data_array with check entries: classes and target functions. |
2441 | __ AddImmediate(R6, target::Array::data_offset() - kHeapObjectTag); |
2442 | // R6: points directly to the first ic data array element. |
2443 | #if defined(DEBUG) |
2444 | // Check that first entry is for Smi/Smi. |
2445 | Label error, ok; |
2446 | const intptr_t imm_smi_cid = target::ToRawSmi(kSmiCid); |
2447 | __ ldr(R1, Address(R6, 0)); |
2448 | __ CompareImmediate(R1, imm_smi_cid); |
2449 | __ b(&error, NE); |
2450 | __ ldr(R1, Address(R6, target::kWordSize)); |
2451 | __ CompareImmediate(R1, imm_smi_cid); |
2452 | __ b(&ok, EQ); |
2453 | __ Bind(&error); |
2454 | __ Stop("Incorrect IC data" ); |
2455 | __ Bind(&ok); |
2456 | #endif |
2457 | if (FLAG_optimization_counter_threshold >= 0) { |
2458 | const intptr_t count_offset = |
2459 | target::ICData::CountIndexFor(num_args) * target::kWordSize; |
2460 | // Update counter, ignore overflow. |
2461 | __ LoadFromOffset(R1, R6, count_offset); |
2462 | __ adds(R1, R1, Operand(target::ToRawSmi(1))); |
2463 | __ StoreToOffset(R1, R6, count_offset); |
2464 | } |
2465 | |
2466 | __ ret(); |
2467 | } |
2468 | |
2469 | // Saves the offset of the target entry-point (from the Function) into R8. |
2470 | // |
2471 | // Must be the first code generated, since any code before will be skipped in |
2472 | // the unchecked entry-point. |
2473 | static void GenerateRecordEntryPoint(Assembler* assembler) { |
2474 | Label done; |
2475 | __ LoadImmediate(R8, target::Function::entry_point_offset() - kHeapObjectTag); |
2476 | __ b(&done); |
2477 | __ BindUncheckedEntryPoint(); |
2478 | __ LoadImmediate( |
2479 | R8, target::Function::entry_point_offset(CodeEntryKind::kUnchecked) - |
2480 | kHeapObjectTag); |
2481 | __ Bind(&done); |
2482 | } |
2483 | |
2484 | // Generate inline cache check for 'num_args'. |
2485 | // R0: receiver (if instance call) |
2486 | // R5: ICData |
2487 | // LR: return address |
2488 | // Control flow: |
2489 | // - If receiver is null -> jump to IC miss. |
2490 | // - If receiver is Smi -> load Smi class. |
2491 | // - If receiver is not-Smi -> load receiver's class. |
2492 | // - Check if 'num_args' (including receiver) match any IC data group. |
2493 | // - Match found -> jump to target. |
2494 | // - Match not found -> jump to IC miss. |
2495 | void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub( |
2496 | Assembler* assembler, |
2497 | intptr_t num_args, |
2498 | const RuntimeEntry& handle_ic_miss, |
2499 | Token::Kind kind, |
2500 | Optimized optimized, |
2501 | CallType type, |
2502 | Exactness exactness) { |
2503 | const bool save_entry_point = kind == Token::kILLEGAL; |
2504 | if (FLAG_precompiled_mode) { |
2505 | __ Breakpoint(); |
2506 | return; |
2507 | } |
2508 | |
2509 | if (save_entry_point) { |
2510 | GenerateRecordEntryPoint(assembler); |
2511 | } |
2512 | |
2513 | if (optimized == kOptimized) { |
2514 | GenerateOptimizedUsageCounterIncrement(assembler); |
2515 | } else { |
2516 | GenerateUsageCounterIncrement(assembler, /*scratch=*/R6); |
2517 | } |
2518 | |
2519 | ASSERT(exactness == kIgnoreExactness); // Unimplemented. |
2520 | ASSERT(num_args == 1 || num_args == 2); |
2521 | #if defined(DEBUG) |
2522 | { |
2523 | Label ok; |
2524 | // Check that the IC data array has NumArgsTested() == num_args. |
2525 | // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. |
2526 | __ LoadFromOffset(R6, R5, |
2527 | target::ICData::state_bits_offset() - kHeapObjectTag, |
2528 | kUnsignedWord); |
2529 | ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed. |
2530 | __ andi(R6, R6, Immediate(target::ICData::NumArgsTestedMask())); |
2531 | __ CompareImmediate(R6, num_args); |
2532 | __ b(&ok, EQ); |
2533 | __ Stop("Incorrect stub for IC data" ); |
2534 | __ Bind(&ok); |
2535 | } |
2536 | #endif // DEBUG |
2537 | |
2538 | #if !defined(PRODUCT) |
2539 | Label stepping, done_stepping; |
2540 | if (optimized == kUnoptimized) { |
2541 | __ Comment("Check single stepping" ); |
2542 | __ LoadIsolate(R6); |
2543 | __ LoadFromOffset(R6, R6, target::Isolate::single_step_offset(), |
2544 | kUnsignedByte); |
2545 | __ CompareRegisters(R6, ZR); |
2546 | __ b(&stepping, NE); |
2547 | __ Bind(&done_stepping); |
2548 | } |
2549 | #endif |
2550 | |
2551 | Label not_smi_or_overflow; |
2552 | if (kind != Token::kILLEGAL) { |
2553 | EmitFastSmiOp(assembler, kind, num_args, ¬_smi_or_overflow); |
2554 | } |
2555 | __ Bind(¬_smi_or_overflow); |
2556 | |
2557 | __ Comment("Extract ICData initial values and receiver cid" ); |
2558 | // R5: IC data object (preserved). |
2559 | __ LoadFieldFromOffset(R6, R5, target::ICData::entries_offset()); |
2560 | // R6: ic_data_array with check entries: classes and target functions. |
2561 | __ AddImmediate(R6, target::Array::data_offset() - kHeapObjectTag); |
2562 | // R6: points directly to the first ic data array element. |
2563 | |
2564 | if (type == kInstanceCall) { |
2565 | __ LoadTaggedClassIdMayBeSmi(R0, R0); |
2566 | __ LoadFieldFromOffset(R4, R5, |
2567 | target::CallSiteData::arguments_descriptor_offset()); |
2568 | if (num_args == 2) { |
2569 | __ LoadFieldFromOffset(R7, R4, |
2570 | target::ArgumentsDescriptor::count_offset()); |
2571 | __ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode. |
2572 | __ sub(R7, R7, Operand(2)); |
2573 | // R1 <- [SP + (R1 << 3)] |
2574 | __ ldr(R1, Address(SP, R7, UXTX, Address::Scaled)); |
2575 | __ LoadTaggedClassIdMayBeSmi(R1, R1); |
2576 | } |
2577 | } else { |
2578 | __ LoadFieldFromOffset(R4, R5, |
2579 | target::CallSiteData::arguments_descriptor_offset()); |
2580 | // Get the receiver's class ID (first read number of arguments from |
2581 | // arguments descriptor array and then access the receiver from the stack). |
2582 | __ LoadFieldFromOffset(R7, R4, target::ArgumentsDescriptor::count_offset()); |
2583 | __ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode. |
2584 | __ sub(R7, R7, Operand(1)); |
2585 | // R0 <- [SP + (R7 << 3)] |
2586 | __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled)); |
2587 | __ LoadTaggedClassIdMayBeSmi(R0, R0); |
2588 | if (num_args == 2) { |
2589 | __ AddImmediate(R1, R7, -1); |
2590 | // R1 <- [SP + (R1 << 3)] |
2591 | __ ldr(R1, Address(SP, R1, UXTX, Address::Scaled)); |
2592 | __ LoadTaggedClassIdMayBeSmi(R1, R1); |
2593 | } |
2594 | } |
2595 | // R0: first argument class ID as Smi. |
2596 | // R1: second argument class ID as Smi. |
2597 | // R4: args descriptor |
2598 | |
2599 | // We unroll the generic one that is generated once more than the others. |
2600 | const bool optimize = kind == Token::kILLEGAL; |
2601 | |
2602 | // Loop that checks if there is an IC data match. |
2603 | Label loop, found, miss; |
2604 | __ Comment("ICData loop" ); |
2605 | |
2606 | __ Bind(&loop); |
2607 | for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) { |
2608 | Label update; |
2609 | |
2610 | __ LoadFromOffset(R2, R6, 0); |
2611 | __ CompareRegisters(R0, R2); // Class id match? |
2612 | if (num_args == 2) { |
2613 | __ b(&update, NE); // Continue. |
2614 | __ LoadFromOffset(R2, R6, target::kWordSize); |
2615 | __ CompareRegisters(R1, R2); // Class id match? |
2616 | } |
2617 | __ b(&found, EQ); // Break. |
2618 | |
2619 | __ Bind(&update); |
2620 | |
2621 | const intptr_t entry_size = target::ICData::TestEntryLengthFor( |
2622 | num_args, exactness == kCheckExactness) * |
2623 | target::kWordSize; |
2624 | __ AddImmediate(R6, entry_size); // Next entry. |
2625 | |
2626 | __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid)); // Done? |
2627 | if (unroll == 0) { |
2628 | __ b(&loop, NE); |
2629 | } else { |
2630 | __ b(&miss, EQ); |
2631 | } |
2632 | } |
2633 | |
2634 | __ Bind(&miss); |
2635 | __ Comment("IC miss" ); |
2636 | |
2637 | // Compute address of arguments. |
2638 | __ LoadFieldFromOffset(R7, R4, target::ArgumentsDescriptor::count_offset()); |
2639 | __ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode. |
2640 | __ sub(R7, R7, Operand(1)); |
2641 | // R7: argument_count - 1 (untagged). |
2642 | // R7 <- SP + (R7 << 3) |
2643 | __ add(R7, SP, Operand(R7, UXTX, 3)); // R7 is Untagged. |
2644 | // R7: address of receiver. |
2645 | // Create a stub frame as we are pushing some objects on the stack before |
2646 | // calling into the runtime. |
2647 | __ EnterStubFrame(); |
2648 | // Preserve IC data object and arguments descriptor array and |
2649 | // setup space on stack for result (target code object). |
2650 | __ Push(R4); // Preserve arguments descriptor array. |
2651 | __ Push(R5); // Preserve IC Data. |
2652 | if (save_entry_point) { |
2653 | __ SmiTag(R8); |
2654 | __ Push(R8); |
2655 | } |
2656 | // Setup space on stack for the result (target code object). |
2657 | __ Push(ZR); |
2658 | // Push call arguments. |
2659 | for (intptr_t i = 0; i < num_args; i++) { |
2660 | __ LoadFromOffset(TMP, R7, -i * target::kWordSize); |
2661 | __ Push(TMP); |
2662 | } |
2663 | // Pass IC data object. |
2664 | __ Push(R5); |
2665 | __ CallRuntime(handle_ic_miss, num_args + 1); |
2666 | // Remove the call arguments pushed earlier, including the IC data object. |
2667 | __ Drop(num_args + 1); |
2668 | // Pop returned function object into R0. |
2669 | // Restore arguments descriptor array and IC data array. |
2670 | __ Pop(R0); // Pop returned function object into R0. |
2671 | if (save_entry_point) { |
2672 | __ Pop(R8); |
2673 | __ SmiUntag(R8); |
2674 | } |
2675 | __ Pop(R5); // Restore IC Data. |
2676 | __ Pop(R4); // Restore arguments descriptor array. |
2677 | __ RestoreCodePointer(); |
2678 | __ LeaveStubFrame(); |
2679 | Label call_target_function; |
2680 | if (!FLAG_lazy_dispatchers) { |
2681 | GenerateDispatcherCode(assembler, &call_target_function); |
2682 | } else { |
2683 | __ b(&call_target_function); |
2684 | } |
2685 | |
2686 | __ Bind(&found); |
2687 | __ Comment("Update caller's counter" ); |
2688 | // R6: pointer to an IC data check group. |
2689 | const intptr_t target_offset = |
2690 | target::ICData::TargetIndexFor(num_args) * target::kWordSize; |
2691 | const intptr_t count_offset = |
2692 | target::ICData::CountIndexFor(num_args) * target::kWordSize; |
2693 | __ LoadFromOffset(R0, R6, target_offset); |
2694 | |
2695 | if (FLAG_optimization_counter_threshold >= 0) { |
2696 | // Update counter, ignore overflow. |
2697 | __ LoadFromOffset(R1, R6, count_offset); |
2698 | __ adds(R1, R1, Operand(target::ToRawSmi(1))); |
2699 | __ StoreToOffset(R1, R6, count_offset); |
2700 | } |
2701 | |
2702 | __ Comment("Call target" ); |
2703 | __ Bind(&call_target_function); |
2704 | // R0: target function. |
2705 | __ LoadFieldFromOffset(CODE_REG, R0, target::Function::code_offset()); |
2706 | if (save_entry_point) { |
2707 | __ add(R2, R0, Operand(R8)); |
2708 | __ ldr(R2, Address(R2, 0)); |
2709 | } else { |
2710 | __ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset()); |
2711 | } |
2712 | __ br(R2); |
2713 | |
2714 | #if !defined(PRODUCT) |
2715 | if (optimized == kUnoptimized) { |
2716 | __ Bind(&stepping); |
2717 | __ EnterStubFrame(); |
2718 | if (type == kInstanceCall) { |
2719 | __ Push(R0); // Preserve receiver. |
2720 | } |
2721 | if (save_entry_point) { |
2722 | __ SmiTag(R8); |
2723 | __ Push(R8); |
2724 | } |
2725 | __ Push(R5); // Preserve IC data. |
2726 | __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
2727 | __ Pop(R5); |
2728 | if (save_entry_point) { |
2729 | __ Pop(R8); |
2730 | __ SmiUntag(R8); |
2731 | } |
2732 | if (type == kInstanceCall) { |
2733 | __ Pop(R0); |
2734 | } |
2735 | __ RestoreCodePointer(); |
2736 | __ LeaveStubFrame(); |
2737 | __ b(&done_stepping); |
2738 | } |
2739 | #endif |
2740 | } |
2741 | |
2742 | // R0: receiver |
2743 | // R5: ICData |
2744 | // LR: return address |
2745 | void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub( |
2746 | Assembler* assembler) { |
2747 | GenerateNArgsCheckInlineCacheStub( |
2748 | assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, |
2749 | kUnoptimized, kInstanceCall, kIgnoreExactness); |
2750 | } |
2751 | |
2752 | // R0: receiver |
2753 | // R5: ICData |
2754 | // LR: return address |
2755 | void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub( |
2756 | Assembler* assembler) { |
2757 | __ Stop("Unimplemented" ); |
2758 | } |
2759 | |
2760 | // R0: receiver |
2761 | // R5: ICData |
2762 | // LR: return address |
2763 | void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub( |
2764 | Assembler* assembler) { |
2765 | GenerateNArgsCheckInlineCacheStub( |
2766 | assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL, |
2767 | kUnoptimized, kInstanceCall, kIgnoreExactness); |
2768 | } |
2769 | |
2770 | // R0: receiver |
2771 | // R5: ICData |
2772 | // LR: return address |
2773 | void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) { |
2774 | GenerateNArgsCheckInlineCacheStub( |
2775 | assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, |
2776 | kUnoptimized, kInstanceCall, kIgnoreExactness); |
2777 | } |
2778 | |
2779 | // R0: receiver |
2780 | // R5: ICData |
2781 | // LR: return address |
2782 | void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) { |
2783 | GenerateNArgsCheckInlineCacheStub( |
2784 | assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, |
2785 | kUnoptimized, kInstanceCall, kIgnoreExactness); |
2786 | } |
2787 | |
2788 | // R0: receiver |
2789 | // R5: ICData |
2790 | // LR: return address |
2791 | void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { |
2792 | GenerateNArgsCheckInlineCacheStub( |
2793 | assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, |
2794 | kUnoptimized, kInstanceCall, kIgnoreExactness); |
2795 | } |
2796 | |
2797 | // R0: receiver |
2798 | // R5: ICData |
2799 | // R6: Function |
2800 | // LR: return address |
2801 | void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub( |
2802 | Assembler* assembler) { |
2803 | GenerateNArgsCheckInlineCacheStub( |
2804 | assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, |
2805 | kOptimized, kInstanceCall, kIgnoreExactness); |
2806 | } |
2807 | |
2808 | // R0: receiver |
2809 | // R5: ICData |
2810 | // R6: Function |
2811 | // LR: return address |
2812 | void StubCodeCompiler:: |
2813 | GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub( |
2814 | Assembler* assembler) { |
2815 | __ Stop("Unimplemented" ); |
2816 | } |
2817 | |
2818 | // R0: receiver |
2819 | // R5: ICData |
2820 | // R6: Function |
2821 | // LR: return address |
2822 | void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub( |
2823 | Assembler* assembler) { |
2824 | GenerateNArgsCheckInlineCacheStub( |
2825 | assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL, |
2826 | kOptimized, kInstanceCall, kIgnoreExactness); |
2827 | } |
2828 | |
2829 | // R5: ICData |
2830 | // LR: return address |
2831 | void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub( |
2832 | Assembler* assembler) { |
2833 | GenerateRecordEntryPoint(assembler); |
2834 | GenerateUsageCounterIncrement(assembler, /* scratch */ R6); |
2835 | #if defined(DEBUG) |
2836 | { |
2837 | Label ok; |
2838 | // Check that the IC data array has NumArgsTested() == 0. |
2839 | // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. |
2840 | __ LoadFromOffset(R6, R5, |
2841 | target::ICData::state_bits_offset() - kHeapObjectTag, |
2842 | kUnsignedWord); |
2843 | ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed. |
2844 | __ andi(R6, R6, Immediate(target::ICData::NumArgsTestedMask())); |
2845 | __ CompareImmediate(R6, 0); |
2846 | __ b(&ok, EQ); |
2847 | __ Stop("Incorrect IC data for unoptimized static call" ); |
2848 | __ Bind(&ok); |
2849 | } |
2850 | #endif // DEBUG |
2851 | |
2852 | // Check single stepping. |
2853 | #if !defined(PRODUCT) |
2854 | Label stepping, done_stepping; |
2855 | __ LoadIsolate(R6); |
2856 | __ LoadFromOffset(R6, R6, target::Isolate::single_step_offset(), |
2857 | kUnsignedByte); |
2858 | __ CompareImmediate(R6, 0); |
2859 | __ b(&stepping, NE); |
2860 | __ Bind(&done_stepping); |
2861 | #endif |
2862 | |
2863 | // R5: IC data object (preserved). |
2864 | __ LoadFieldFromOffset(R6, R5, target::ICData::entries_offset()); |
2865 | // R6: ic_data_array with entries: target functions and count. |
2866 | __ AddImmediate(R6, target::Array::data_offset() - kHeapObjectTag); |
2867 | // R6: points directly to the first ic data array element. |
2868 | const intptr_t target_offset = |
2869 | target::ICData::TargetIndexFor(0) * target::kWordSize; |
2870 | const intptr_t count_offset = |
2871 | target::ICData::CountIndexFor(0) * target::kWordSize; |
2872 | |
2873 | if (FLAG_optimization_counter_threshold >= 0) { |
2874 | // Increment count for this call, ignore overflow. |
2875 | __ LoadFromOffset(R1, R6, count_offset); |
2876 | __ adds(R1, R1, Operand(target::ToRawSmi(1))); |
2877 | __ StoreToOffset(R1, R6, count_offset); |
2878 | } |
2879 | |
2880 | // Load arguments descriptor into R4. |
2881 | __ LoadFieldFromOffset(R4, R5, |
2882 | target::CallSiteData::arguments_descriptor_offset()); |
2883 | |
2884 | // Get function and call it, if possible. |
2885 | __ LoadFromOffset(R0, R6, target_offset); |
2886 | __ LoadFieldFromOffset(CODE_REG, R0, target::Function::code_offset()); |
2887 | __ add(R2, R0, Operand(R8)); |
2888 | __ ldr(R2, Address(R2, 0)); |
2889 | __ br(R2); |
2890 | |
2891 | #if !defined(PRODUCT) |
2892 | __ Bind(&stepping); |
2893 | __ EnterStubFrame(); |
2894 | __ Push(R5); // Preserve IC data. |
2895 | __ SmiTag(R8); |
2896 | __ Push(R8); |
2897 | __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
2898 | __ Pop(R8); |
2899 | __ SmiUntag(R8); |
2900 | __ Pop(R5); |
2901 | __ RestoreCodePointer(); |
2902 | __ LeaveStubFrame(); |
2903 | __ b(&done_stepping); |
2904 | #endif |
2905 | } |
2906 | |
2907 | // R5: ICData |
2908 | // LR: return address |
2909 | void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub( |
2910 | Assembler* assembler) { |
2911 | GenerateUsageCounterIncrement(assembler, /* scratch */ R6); |
2912 | GenerateNArgsCheckInlineCacheStub( |
2913 | assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, |
2914 | kUnoptimized, kStaticCall, kIgnoreExactness); |
2915 | } |
2916 | |
2917 | // R5: ICData |
2918 | // LR: return address |
2919 | void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub( |
2920 | Assembler* assembler) { |
2921 | GenerateUsageCounterIncrement(assembler, /* scratch */ R6); |
2922 | GenerateNArgsCheckInlineCacheStub( |
2923 | assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL, |
2924 | kUnoptimized, kStaticCall, kIgnoreExactness); |
2925 | } |
2926 | |
2927 | // Stub for compiling a function and jumping to the compiled code. |
2928 | // R4: Arguments descriptor. |
2929 | // R0: Function. |
2930 | void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) { |
2931 | // Preserve arg desc. |
2932 | __ EnterStubFrame(); |
2933 | __ Push(R4); // Save arg. desc. |
2934 | __ Push(R0); // Pass function. |
2935 | __ CallRuntime(kCompileFunctionRuntimeEntry, 1); |
2936 | __ Pop(R0); // Restore argument. |
2937 | __ Pop(R4); // Restore arg desc. |
2938 | __ LeaveStubFrame(); |
2939 | |
2940 | // When using the interpreter, the function's code may now point to the |
2941 | // InterpretCall stub. Make sure R0, R4, and R5 are preserved. |
2942 | __ LoadFieldFromOffset(CODE_REG, R0, target::Function::code_offset()); |
2943 | __ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset()); |
2944 | __ br(R2); |
2945 | } |
2946 | |
2947 | // Stub for interpreting a function call. |
2948 | // R4: Arguments descriptor. |
2949 | // R0: Function. |
2950 | void StubCodeCompiler::GenerateInterpretCallStub(Assembler* assembler) { |
2951 | if (FLAG_precompiled_mode) { |
2952 | __ Stop("Not using interpreter" ); |
2953 | return; |
2954 | } |
2955 | |
2956 | __ SetPrologueOffset(); |
2957 | __ EnterStubFrame(); |
2958 | |
2959 | #if defined(DEBUG) |
2960 | { |
2961 | Label ok; |
2962 | // Check that we are always entering from Dart code. |
2963 | __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset()); |
2964 | __ CompareImmediate(R8, VMTag::kDartCompiledTagId); |
2965 | __ b(&ok, EQ); |
2966 | __ Stop("Not coming from Dart code." ); |
2967 | __ Bind(&ok); |
2968 | } |
2969 | #endif |
2970 | |
2971 | // Adjust arguments count for type arguments vector. |
2972 | __ LoadFieldFromOffset(R2, R4, target::ArgumentsDescriptor::count_offset()); |
2973 | __ SmiUntag(R2); |
2974 | __ LoadFieldFromOffset(R1, R4, |
2975 | target::ArgumentsDescriptor::type_args_len_offset()); |
2976 | __ cmp(R1, Operand(0)); |
2977 | __ csinc(R2, R2, R2, EQ); // R2 <- (R1 == 0) ? R2 : R2 + 1. |
2978 | |
2979 | // Compute argv. |
2980 | __ add(R3, ZR, Operand(R2, LSL, 3)); |
2981 | __ add(R3, FP, Operand(R3)); |
2982 | __ AddImmediate(R3, |
2983 | target::frame_layout.param_end_from_fp * target::kWordSize); |
2984 | |
2985 | // Indicate decreasing memory addresses of arguments with negative argc. |
2986 | __ neg(R2, R2); |
2987 | |
2988 | // Align frame before entering C++ world. No shadow stack space required. |
2989 | __ ReserveAlignedFrameSpace(0 * target::kWordSize); |
2990 | |
2991 | // Pass arguments in registers. |
2992 | // R0: Function. |
2993 | __ mov(R1, R4); // Arguments descriptor. |
2994 | // R2: Negative argc. |
2995 | // R3: Argv. |
2996 | __ mov(R4, THR); // Thread. |
2997 | |
2998 | // Save exit frame information to enable stack walking as we are about |
2999 | // to transition to Dart VM C++ code. |
3000 | __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset()); |
3001 | |
3002 | // Mark that the thread exited generated code through a runtime call. |
3003 | __ LoadImmediate(R5, target::Thread::exit_through_runtime_call()); |
3004 | __ StoreToOffset(R5, THR, target::Thread::exit_through_ffi_offset()); |
3005 | |
3006 | // Mark that the thread is executing VM code. |
3007 | __ LoadFromOffset(R5, THR, |
3008 | target::Thread::interpret_call_entry_point_offset()); |
3009 | __ StoreToOffset(R5, THR, target::Thread::vm_tag_offset()); |
3010 | |
3011 | // We are entering runtime code, so the C stack pointer must be restored from |
3012 | // the stack limit to the top of the stack. We cache the stack limit address |
3013 | // in a callee-saved register. |
3014 | __ mov(R25, CSP); |
3015 | __ mov(CSP, SP); |
3016 | |
3017 | __ blr(R5); |
3018 | |
3019 | // Restore SP and CSP. |
3020 | __ mov(SP, CSP); |
3021 | __ mov(CSP, R25); |
3022 | |
3023 | // Refresh pinned registers values (inc. write barrier mask and null object). |
3024 | __ RestorePinnedRegisters(); |
3025 | |
3026 | // Mark that the thread is executing Dart code. |
3027 | __ LoadImmediate(R2, VMTag::kDartCompiledTagId); |
3028 | __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset()); |
3029 | |
3030 | // Mark that the thread has not exited generated Dart code. |
3031 | __ StoreToOffset(ZR, THR, target::Thread::exit_through_ffi_offset()); |
3032 | |
3033 | // Reset exit frame information in Isolate's mutator thread structure. |
3034 | __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset()); |
3035 | |
3036 | __ LeaveStubFrame(); |
3037 | __ ret(); |
3038 | } |
3039 | |
3040 | // R5: Contains an ICData. |
3041 | void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) { |
3042 | #if defined(PRODUCT) |
3043 | __ Stop("No debugging in PRODUCT mode" ); |
3044 | #else |
3045 | __ EnterStubFrame(); |
3046 | __ Push(R0); // Preserve receiver. |
3047 | __ Push(R5); // Preserve IC data. |
3048 | __ Push(ZR); // Space for result. |
3049 | __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); |
3050 | __ Pop(CODE_REG); // Original stub. |
3051 | __ Pop(R5); // Restore IC data. |
3052 | __ Pop(R0); // Restore receiver. |
3053 | __ LeaveStubFrame(); |
3054 | __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset()); |
3055 | __ br(TMP); |
3056 | #endif // defined(PRODUCT) |
3057 | } |
3058 | |
3059 | void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub( |
3060 | Assembler* assembler) { |
3061 | #if defined(PRODUCT) |
3062 | __ Stop("No debugging in PRODUCT mode" ); |
3063 | #else |
3064 | __ EnterStubFrame(); |
3065 | __ Push(R5); // Preserve IC data. |
3066 | __ Push(ZR); // Space for result. |
3067 | __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); |
3068 | __ Pop(CODE_REG); // Original stub. |
3069 | __ Pop(R5); // Restore IC data. |
3070 | __ LeaveStubFrame(); |
3071 | __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset()); |
3072 | __ br(TMP); |
3073 | #endif // defined(PRODUCT) |
3074 | } |
3075 | |
3076 | void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { |
3077 | #if defined(PRODUCT) |
3078 | __ Stop("No debugging in PRODUCT mode" ); |
3079 | #else |
3080 | __ EnterStubFrame(); |
3081 | __ Push(ZR); // Space for result. |
3082 | __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); |
3083 | __ Pop(CODE_REG); |
3084 | __ LeaveStubFrame(); |
3085 | __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset()); |
3086 | __ br(R0); |
3087 | #endif // defined(PRODUCT) |
3088 | } |
3089 | |
3090 | // Called only from unoptimized code. All relevant registers have been saved. |
3091 | void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) { |
3092 | #if defined(PRODUCT) |
3093 | __ Stop("No debugging in PRODUCT mode" ); |
3094 | #else |
3095 | // Check single stepping. |
3096 | Label stepping, done_stepping; |
3097 | __ LoadIsolate(R1); |
3098 | __ LoadFromOffset(R1, R1, target::Isolate::single_step_offset(), |
3099 | kUnsignedByte); |
3100 | __ CompareImmediate(R1, 0); |
3101 | __ b(&stepping, NE); |
3102 | __ Bind(&done_stepping); |
3103 | __ ret(); |
3104 | |
3105 | __ Bind(&stepping); |
3106 | __ EnterStubFrame(); |
3107 | __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
3108 | __ LeaveStubFrame(); |
3109 | __ b(&done_stepping); |
3110 | #endif // defined(PRODUCT) |
3111 | } |
3112 | |
3113 | // Used to check class and type arguments. Arguments passed in registers: |
3114 | // LR: return address. |
3115 | // R0: instance (must be preserved). |
3116 | // R2: instantiator type arguments (only if n == 4, can be raw_null). |
3117 | // R1: function type arguments (only if n == 4, can be raw_null). |
3118 | // R3: target::SubtypeTestCache. |
3119 | // |
3120 | // Preserves R0/R2/R8. |
3121 | // |
3122 | // Result in R1: null -> not found, otherwise result (true or false). |
3123 | static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { |
3124 | ASSERT(n == 1 || n == 2 || n == 4 || n == 6); |
3125 | |
3126 | const Register kInstanceCidOrFunction = R6; |
3127 | const Register kInstanceInstantiatorTypeArgumentsReg = R4; |
3128 | const Register kInstanceParentFunctionTypeArgumentsReg = R9; |
3129 | const Register kInstanceDelayedFunctionTypeArgumentsReg = R10; |
3130 | |
3131 | const Register kNullReg = R7; |
3132 | |
3133 | __ LoadObject(kNullReg, NullObject()); |
3134 | |
3135 | // Loop initialization (moved up here to avoid having all dependent loads |
3136 | // after each other). |
3137 | |
3138 | // We avoid a load-acquire barrier here by relying on the fact that all other |
3139 | // loads from the array are data-dependent loads. |
3140 | __ ldr(TypeTestABI::kSubtypeTestCacheReg, |
3141 | FieldAddress(TypeTestABI::kSubtypeTestCacheReg, |
3142 | target::SubtypeTestCache::cache_offset())); |
3143 | __ AddImmediate(TypeTestABI::kSubtypeTestCacheReg, |
3144 | target::Array::data_offset() - kHeapObjectTag); |
3145 | |
3146 | Label loop, not_closure; |
3147 | if (n >= 4) { |
3148 | __ LoadClassIdMayBeSmi(kInstanceCidOrFunction, |
3149 | TypeTestABI::TypeTestABI::kInstanceReg); |
3150 | } else { |
3151 | __ LoadClassId(kInstanceCidOrFunction, TypeTestABI::kInstanceReg); |
3152 | } |
3153 | __ CompareImmediate(kInstanceCidOrFunction, kClosureCid); |
3154 | __ b(¬_closure, NE); |
3155 | |
3156 | // Closure handling. |
3157 | { |
3158 | __ ldr(kInstanceCidOrFunction, |
3159 | FieldAddress(TypeTestABI::kInstanceReg, |
3160 | target::Closure::function_offset())); |
3161 | if (n >= 2) { |
3162 | __ ldr( |
3163 | kInstanceInstantiatorTypeArgumentsReg, |
3164 | FieldAddress(TypeTestABI::kInstanceReg, |
3165 | target::Closure::instantiator_type_arguments_offset())); |
3166 | if (n >= 6) { |
3167 | ASSERT(n == 6); |
3168 | __ ldr(kInstanceParentFunctionTypeArgumentsReg, |
3169 | FieldAddress(TypeTestABI::kInstanceReg, |
3170 | target::Closure::function_type_arguments_offset())); |
3171 | __ ldr(kInstanceDelayedFunctionTypeArgumentsReg, |
3172 | FieldAddress(TypeTestABI::kInstanceReg, |
3173 | target::Closure::delayed_type_arguments_offset())); |
3174 | } |
3175 | } |
3176 | __ b(&loop); |
3177 | } |
3178 | |
3179 | // Non-Closure handling. |
3180 | { |
3181 | __ Bind(¬_closure); |
3182 | if (n >= 2) { |
3183 | Label has_no_type_arguments; |
3184 | __ LoadClassById(R5, kInstanceCidOrFunction); |
3185 | __ mov(kInstanceInstantiatorTypeArgumentsReg, kNullReg); |
3186 | __ LoadFieldFromOffset( |
3187 | R5, R5, |
3188 | target::Class::host_type_arguments_field_offset_in_words_offset(), |
3189 | kWord); |
3190 | __ CompareImmediate(R5, target::Class::kNoTypeArguments); |
3191 | __ b(&has_no_type_arguments, EQ); |
3192 | __ add(R5, TypeTestABI::kInstanceReg, Operand(R5, LSL, 3)); |
3193 | __ ldr(kInstanceInstantiatorTypeArgumentsReg, FieldAddress(R5, 0)); |
3194 | __ Bind(&has_no_type_arguments); |
3195 | |
3196 | if (n >= 6) { |
3197 | __ mov(kInstanceParentFunctionTypeArgumentsReg, kNullReg); |
3198 | __ mov(kInstanceDelayedFunctionTypeArgumentsReg, kNullReg); |
3199 | } |
3200 | } |
3201 | __ SmiTag(kInstanceCidOrFunction); |
3202 | } |
3203 | |
3204 | Label found, not_found, next_iteration; |
3205 | |
3206 | // Loop header |
3207 | __ Bind(&loop); |
3208 | __ ldr(R5, Address(TypeTestABI::kSubtypeTestCacheReg, |
3209 | target::kWordSize * |
3210 | target::SubtypeTestCache::kInstanceClassIdOrFunction)); |
3211 | __ cmp(R5, Operand(kNullReg)); |
3212 | __ b(¬_found, EQ); |
3213 | __ cmp(R5, Operand(kInstanceCidOrFunction)); |
3214 | if (n == 1) { |
3215 | __ b(&found, EQ); |
3216 | } else { |
3217 | __ b(&next_iteration, NE); |
3218 | __ ldr(R5, Address(TypeTestABI::kSubtypeTestCacheReg, |
3219 | target::kWordSize * |
3220 | target::SubtypeTestCache::kInstanceTypeArguments)); |
3221 | __ cmp(R5, Operand(kInstanceInstantiatorTypeArgumentsReg)); |
3222 | if (n == 2) { |
3223 | __ b(&found, EQ); |
3224 | } else { |
3225 | __ b(&next_iteration, NE); |
3226 | __ ldr(R5, |
3227 | Address(TypeTestABI::kSubtypeTestCacheReg, |
3228 | target::kWordSize * |
3229 | target::SubtypeTestCache::kInstantiatorTypeArguments)); |
3230 | __ cmp(R5, Operand(TypeTestABI::kInstantiatorTypeArgumentsReg)); |
3231 | __ b(&next_iteration, NE); |
3232 | __ ldr(R5, Address(TypeTestABI::kSubtypeTestCacheReg, |
3233 | target::kWordSize * |
3234 | target::SubtypeTestCache::kFunctionTypeArguments)); |
3235 | __ cmp(R5, Operand(TypeTestABI::kFunctionTypeArgumentsReg)); |
3236 | if (n == 4) { |
3237 | __ b(&found, EQ); |
3238 | } else { |
3239 | ASSERT(n == 6); |
3240 | __ b(&next_iteration, NE); |
3241 | |
3242 | __ ldr(R5, Address(TypeTestABI::kSubtypeTestCacheReg, |
3243 | target::kWordSize * |
3244 | target::SubtypeTestCache:: |
3245 | kInstanceParentFunctionTypeArguments)); |
3246 | __ cmp(R5, Operand(kInstanceParentFunctionTypeArgumentsReg)); |
3247 | __ b(&next_iteration, NE); |
3248 | |
3249 | __ ldr(R5, Address(TypeTestABI::kSubtypeTestCacheReg, |
3250 | target::kWordSize * |
3251 | target::SubtypeTestCache:: |
3252 | kInstanceDelayedFunctionTypeArguments)); |
3253 | __ cmp(R5, Operand(kInstanceDelayedFunctionTypeArgumentsReg)); |
3254 | __ b(&found, EQ); |
3255 | } |
3256 | } |
3257 | } |
3258 | __ Bind(&next_iteration); |
3259 | __ AddImmediate( |
3260 | TypeTestABI::kSubtypeTestCacheReg, |
3261 | target::kWordSize * target::SubtypeTestCache::kTestEntryLength); |
3262 | __ b(&loop); |
3263 | |
3264 | __ Bind(&found); |
3265 | __ ldr(R1, |
3266 | Address(TypeTestABI::kSubtypeTestCacheReg, |
3267 | target::kWordSize * target::SubtypeTestCache::kTestResult)); |
3268 | __ ret(); |
3269 | |
3270 | __ Bind(¬_found); |
3271 | __ mov(R1, kNullReg); |
3272 | __ ret(); |
3273 | } |
3274 | |
3275 | // See comment on [GenerateSubtypeNTestCacheStub]. |
3276 | void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) { |
3277 | GenerateSubtypeNTestCacheStub(assembler, 1); |
3278 | } |
3279 | |
3280 | // See comment on [GenerateSubtypeNTestCacheStub]. |
3281 | void StubCodeCompiler::GenerateSubtype2TestCacheStub(Assembler* assembler) { |
3282 | GenerateSubtypeNTestCacheStub(assembler, 2); |
3283 | } |
3284 | |
3285 | // See comment on [GenerateSubtypeNTestCacheStub]. |
3286 | void StubCodeCompiler::GenerateSubtype4TestCacheStub(Assembler* assembler) { |
3287 | GenerateSubtypeNTestCacheStub(assembler, 4); |
3288 | } |
3289 | |
3290 | // See comment on [GenerateSubtypeNTestCacheStub]. |
3291 | void StubCodeCompiler::GenerateSubtype6TestCacheStub(Assembler* assembler) { |
3292 | GenerateSubtypeNTestCacheStub(assembler, 6); |
3293 | } |
3294 | |
3295 | // Used to test whether a given value is of a given type (different variants, |
3296 | // all have the same calling convention). |
3297 | // |
3298 | // Inputs: |
3299 | // - R0 : instance to test against. |
3300 | // - R2 : instantiator type arguments (if needed). |
3301 | // - R1 : function type arguments (if needed). |
3302 | // |
3303 | // - R3 : subtype test cache. |
3304 | // |
3305 | // - R8 : type to test against. |
3306 | // - R4 : name of destination variable. |
3307 | // |
3308 | // Preserves R0/R2. |
3309 | // |
3310 | // Note of warning: The caller will not populate CODE_REG and we have therefore |
3311 | // no access to the pool. |
3312 | void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) { |
3313 | // Tail call the [SubtypeTestCache]-based implementation. |
3314 | __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset())); |
3315 | __ ldr(R9, FieldAddress(CODE_REG, target::Code::entry_point_offset())); |
3316 | __ br(R9); |
3317 | } |
3318 | |
3319 | // Used instead of DefaultTypeTestStub when null is assignable. |
3320 | void StubCodeCompiler::GenerateDefaultNullableTypeTestStub( |
3321 | Assembler* assembler) { |
3322 | Label done; |
3323 | |
3324 | // Fast case for 'null'. |
3325 | __ CompareObject(TypeTestABI::kInstanceReg, NullObject()); |
3326 | __ BranchIf(EQUAL, &done); |
3327 | |
3328 | // Tail call the [SubtypeTestCache]-based implementation. |
3329 | __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset())); |
3330 | __ ldr(R9, FieldAddress(CODE_REG, target::Code::entry_point_offset())); |
3331 | __ br(R9); |
3332 | |
3333 | __ Bind(&done); |
3334 | __ Ret(); |
3335 | } |
3336 | |
3337 | void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) { |
3338 | __ Ret(); |
3339 | } |
3340 | |
3341 | void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) { |
3342 | __ Breakpoint(); |
3343 | } |
3344 | |
3345 | static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler, |
3346 | TypeCheckMode mode) { |
3347 | __ PushObject(NullObject()); // Make room for result. |
3348 | __ Push(TypeTestABI::kInstanceReg); |
3349 | __ Push(TypeTestABI::kDstTypeReg); |
3350 | __ Push(TypeTestABI::kInstantiatorTypeArgumentsReg); |
3351 | __ Push(TypeTestABI::kFunctionTypeArgumentsReg); |
3352 | __ PushObject(NullObject()); |
3353 | __ Push(TypeTestABI::kSubtypeTestCacheReg); |
3354 | __ PushImmediate(target::ToRawSmi(mode)); |
3355 | __ CallRuntime(kTypeCheckRuntimeEntry, 7); |
3356 | __ Drop(1); // mode |
3357 | __ Pop(TypeTestABI::kSubtypeTestCacheReg); |
3358 | __ Drop(1); // dst_name |
3359 | __ Pop(TypeTestABI::kFunctionTypeArgumentsReg); |
3360 | __ Pop(TypeTestABI::kInstantiatorTypeArgumentsReg); |
3361 | __ Pop(TypeTestABI::kDstTypeReg); |
3362 | __ Pop(TypeTestABI::kInstanceReg); |
3363 | __ Drop(1); // Discard return value. |
3364 | } |
3365 | |
3366 | void StubCodeCompiler::GenerateLazySpecializeTypeTestStub( |
3367 | Assembler* assembler) { |
3368 | __ ldr(CODE_REG, |
3369 | Address(THR, target::Thread::lazy_specialize_type_test_stub_offset())); |
3370 | __ EnterStubFrame(); |
3371 | InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub); |
3372 | __ LeaveStubFrame(); |
3373 | __ Ret(); |
3374 | } |
3375 | |
3376 | // Used instead of LazySpecializeTypeTestStub when null is assignable. |
3377 | void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub( |
3378 | Assembler* assembler) { |
3379 | Label done; |
3380 | |
3381 | __ CompareObject(TypeTestABI::kInstanceReg, NullObject()); |
3382 | __ BranchIf(EQUAL, &done); |
3383 | |
3384 | __ ldr(CODE_REG, |
3385 | Address(THR, target::Thread::lazy_specialize_type_test_stub_offset())); |
3386 | __ EnterStubFrame(); |
3387 | InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub); |
3388 | __ LeaveStubFrame(); |
3389 | |
3390 | __ Bind(&done); |
3391 | __ Ret(); |
3392 | } |
3393 | |
3394 | void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) { |
3395 | Label done, call_runtime; |
3396 | |
3397 | if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) { |
3398 | __ ldr(CODE_REG, |
3399 | Address(THR, target::Thread::slow_type_test_stub_offset())); |
3400 | } |
3401 | __ EnterStubFrame(); |
3402 | |
3403 | // If the subtype-cache is null, it needs to be lazily-created by the runtime. |
3404 | __ CompareObject(TypeTestABI::kSubtypeTestCacheReg, NullObject()); |
3405 | __ BranchIf(EQUAL, &call_runtime); |
3406 | |
3407 | const Register kTmp = R9; |
3408 | |
3409 | // If this is not a [Type] object, we'll go to the runtime. |
3410 | Label is_simple_case, is_complex_case; |
3411 | __ LoadClassId(kTmp, TypeTestABI::kDstTypeReg); |
3412 | __ cmp(kTmp, Operand(kTypeCid)); |
3413 | __ BranchIf(NOT_EQUAL, &is_complex_case); |
3414 | |
3415 | // Check whether this [Type] is instantiated/uninstantiated. |
3416 | __ ldr( |
3417 | kTmp, |
3418 | FieldAddress(TypeTestABI::kDstTypeReg, target::Type::type_state_offset()), |
3419 | kByte); |
3420 | __ cmp(kTmp, |
3421 | Operand(target::AbstractTypeLayout::kTypeStateFinalizedInstantiated)); |
3422 | __ BranchIf(NOT_EQUAL, &is_complex_case); |
3423 | |
3424 | // Check whether this [Type] is a function type. |
3425 | __ ldr(kTmp, FieldAddress(TypeTestABI::kDstTypeReg, |
3426 | target::Type::signature_offset())); |
3427 | __ CompareObject(kTmp, NullObject()); |
3428 | __ BranchIf(NOT_EQUAL, &is_complex_case); |
3429 | |
3430 | // This [Type] could be a FutureOr. Subtype2TestCache does not support Smi. |
3431 | __ BranchIfSmi(TypeTestABI::kInstanceReg, &is_complex_case); |
3432 | |
3433 | // Fall through to &is_simple_case |
3434 | |
3435 | __ Bind(&is_simple_case); |
3436 | { |
3437 | __ PushPair(TypeTestABI::kFunctionTypeArgumentsReg, |
3438 | TypeTestABI::kSubtypeTestCacheReg); |
3439 | __ BranchLink(StubCodeSubtype2TestCache()); |
3440 | __ CompareObject(R1, CastHandle<Object>(TrueObject())); |
3441 | __ PopPair(TypeTestABI::kFunctionTypeArgumentsReg, |
3442 | TypeTestABI::kSubtypeTestCacheReg); |
3443 | __ BranchIf(EQUAL, &done); // Cache said: yes. |
3444 | __ Jump(&call_runtime); |
3445 | } |
3446 | |
3447 | __ Bind(&is_complex_case); |
3448 | { |
3449 | __ PushPair(TypeTestABI::kFunctionTypeArgumentsReg, |
3450 | TypeTestABI::kSubtypeTestCacheReg); |
3451 | __ BranchLink(StubCodeSubtype6TestCache()); |
3452 | __ CompareObject(R1, CastHandle<Object>(TrueObject())); |
3453 | __ PopPair(TypeTestABI::kFunctionTypeArgumentsReg, |
3454 | TypeTestABI::kSubtypeTestCacheReg); |
3455 | __ BranchIf(EQUAL, &done); // Cache said: yes. |
3456 | // Fall through to runtime_call |
3457 | } |
3458 | |
3459 | __ Bind(&call_runtime); |
3460 | |
3461 | InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub); |
3462 | |
3463 | __ Bind(&done); |
3464 | __ LeaveStubFrame(); |
3465 | __ Ret(); |
3466 | } |
3467 | |
3468 | void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) { |
3469 | __ mov(R0, CSP); |
3470 | __ ret(); |
3471 | } |
3472 | |
3473 | // Jump to a frame on the call stack. |
3474 | // LR: return address. |
3475 | // R0: program_counter. |
3476 | // R1: stack_pointer. |
3477 | // R2: frame_pointer. |
3478 | // R3: thread. |
3479 | // Does not return. |
3480 | void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) { |
3481 | ASSERT(kExceptionObjectReg == R0); |
3482 | ASSERT(kStackTraceObjectReg == R1); |
3483 | __ mov(LR, R0); // Program counter. |
3484 | __ mov(SP, R1); // Stack pointer. |
3485 | __ mov(FP, R2); // Frame_pointer. |
3486 | __ mov(THR, R3); |
3487 | __ SetupCSPFromThread(THR); |
3488 | #if defined(TARGET_OS_FUCHSIA) |
3489 | __ ldr(R18, Address(THR, target::Thread::saved_shadow_call_stack_offset())); |
3490 | #elif defined(USING_SHADOW_CALL_STACK) |
3491 | #error Unimplemented |
3492 | #endif |
3493 | Label exit_through_non_ffi; |
3494 | Register tmp1 = R0, tmp2 = R1; |
3495 | // Check if we exited generated from FFI. If so do transition. |
3496 | __ LoadFromOffset(tmp1, THR, |
3497 | compiler::target::Thread::exit_through_ffi_offset()); |
3498 | __ LoadImmediate(tmp2, target::Thread::exit_through_ffi()); |
3499 | __ cmp(tmp1, Operand(tmp2)); |
3500 | __ b(&exit_through_non_ffi, NE); |
3501 | __ TransitionNativeToGenerated(tmp1, /*leave_safepoint=*/true); |
3502 | __ Bind(&exit_through_non_ffi); |
3503 | |
3504 | // Refresh pinned registers values (inc. write barrier mask and null object). |
3505 | __ RestorePinnedRegisters(); |
3506 | // Set the tag. |
3507 | __ LoadImmediate(R2, VMTag::kDartCompiledTagId); |
3508 | __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset()); |
3509 | // Clear top exit frame. |
3510 | __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset()); |
3511 | // Restore the pool pointer. |
3512 | __ RestoreCodePointer(); |
3513 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { |
3514 | __ SetupGlobalPoolAndDispatchTable(); |
3515 | } else { |
3516 | __ LoadPoolPointer(); |
3517 | } |
3518 | __ ret(); // Jump to continuation point. |
3519 | } |
3520 | |
3521 | // Run an exception handler. Execution comes from JumpToFrame |
3522 | // stub or from the simulator. |
3523 | // |
3524 | // The arguments are stored in the Thread object. |
3525 | // Does not return. |
3526 | void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) { |
3527 | __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()); |
3528 | |
3529 | word offset_from_thread = 0; |
3530 | bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread); |
3531 | ASSERT(ok); |
3532 | __ LoadFromOffset(R2, THR, offset_from_thread); |
3533 | |
3534 | // Exception object. |
3535 | __ LoadFromOffset(R0, THR, target::Thread::active_exception_offset()); |
3536 | __ StoreToOffset(R2, THR, target::Thread::active_exception_offset()); |
3537 | |
3538 | // StackTrace object. |
3539 | __ LoadFromOffset(R1, THR, target::Thread::active_stacktrace_offset()); |
3540 | __ StoreToOffset(R2, THR, target::Thread::active_stacktrace_offset()); |
3541 | |
3542 | __ ret(); // Jump to the exception handler code. |
3543 | } |
3544 | |
3545 | // Deoptimize a frame on the call stack before rewinding. |
3546 | // The arguments are stored in the Thread object. |
3547 | // No result. |
3548 | void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) { |
3549 | // Push zap value instead of CODE_REG. |
3550 | __ LoadImmediate(TMP, kZapCodeReg); |
3551 | __ Push(TMP); |
3552 | |
3553 | // Load the deopt pc into LR. |
3554 | __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()); |
3555 | GenerateDeoptimizationSequence(assembler, kEagerDeopt); |
3556 | |
3557 | // After we have deoptimized, jump to the correct frame. |
3558 | __ EnterStubFrame(); |
3559 | __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0); |
3560 | __ LeaveStubFrame(); |
3561 | __ brk(0); |
3562 | } |
3563 | |
3564 | // Calls to the runtime to optimize the given function. |
3565 | // R6: function to be re-optimized. |
3566 | // R4: argument descriptor (preserved). |
3567 | void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) { |
3568 | __ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset()); |
3569 | __ EnterStubFrame(); |
3570 | __ Push(R4); |
3571 | // Setup space on stack for the return value. |
3572 | __ Push(ZR); |
3573 | __ Push(R6); |
3574 | __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); |
3575 | __ Pop(R0); // Discard argument. |
3576 | __ Pop(R0); // Get Function object |
3577 | __ Pop(R4); // Restore argument descriptor. |
3578 | __ LoadFieldFromOffset(CODE_REG, R0, target::Function::code_offset()); |
3579 | __ LoadFieldFromOffset(R1, R0, target::Function::entry_point_offset()); |
3580 | __ LeaveStubFrame(); |
3581 | __ br(R1); |
3582 | __ brk(0); |
3583 | } |
3584 | |
3585 | // Does identical check (object references are equal or not equal) with special |
3586 | // checks for boxed numbers. |
3587 | // Left and right are pushed on stack. |
3588 | // Return Zero condition flag set if equal. |
3589 | // Note: A Mint cannot contain a value that would fit in Smi. |
3590 | static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler, |
3591 | const Register left, |
3592 | const Register right) { |
3593 | Label reference_compare, done, check_mint; |
3594 | // If any of the arguments is Smi do reference compare. |
3595 | __ BranchIfSmi(left, &reference_compare); |
3596 | __ BranchIfSmi(right, &reference_compare); |
3597 | |
3598 | // Value compare for two doubles. |
3599 | __ CompareClassId(left, kDoubleCid); |
3600 | __ b(&check_mint, NE); |
3601 | __ CompareClassId(right, kDoubleCid); |
3602 | __ b(&done, NE); |
3603 | |
3604 | // Double values bitwise compare. |
3605 | __ LoadFieldFromOffset(left, left, target::Double::value_offset()); |
3606 | __ LoadFieldFromOffset(right, right, target::Double::value_offset()); |
3607 | __ b(&reference_compare); |
3608 | |
3609 | __ Bind(&check_mint); |
3610 | __ CompareClassId(left, kMintCid); |
3611 | __ b(&reference_compare, NE); |
3612 | __ CompareClassId(right, kMintCid); |
3613 | __ b(&done, NE); |
3614 | __ LoadFieldFromOffset(left, left, target::Mint::value_offset()); |
3615 | __ LoadFieldFromOffset(right, right, target::Mint::value_offset()); |
3616 | |
3617 | __ Bind(&reference_compare); |
3618 | __ CompareRegisters(left, right); |
3619 | __ Bind(&done); |
3620 | } |
3621 | |
3622 | // Called only from unoptimized code. All relevant registers have been saved. |
3623 | // LR: return address. |
3624 | // SP + 4: left operand. |
3625 | // SP + 0: right operand. |
3626 | // Return Zero condition flag set if equal. |
3627 | void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub( |
3628 | Assembler* assembler) { |
3629 | #if !defined(PRODUCT) |
3630 | // Check single stepping. |
3631 | Label stepping, done_stepping; |
3632 | __ LoadIsolate(R1); |
3633 | __ LoadFromOffset(R1, R1, target::Isolate::single_step_offset(), |
3634 | kUnsignedByte); |
3635 | __ CompareImmediate(R1, 0); |
3636 | __ b(&stepping, NE); |
3637 | __ Bind(&done_stepping); |
3638 | #endif |
3639 | |
3640 | const Register left = R1; |
3641 | const Register right = R0; |
3642 | __ LoadFromOffset(left, SP, 1 * target::kWordSize); |
3643 | __ LoadFromOffset(right, SP, 0 * target::kWordSize); |
3644 | GenerateIdenticalWithNumberCheckStub(assembler, left, right); |
3645 | __ ret(); |
3646 | |
3647 | #if !defined(PRODUCT) |
3648 | __ Bind(&stepping); |
3649 | __ EnterStubFrame(); |
3650 | __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
3651 | __ RestoreCodePointer(); |
3652 | __ LeaveStubFrame(); |
3653 | __ b(&done_stepping); |
3654 | #endif |
3655 | } |
3656 | |
3657 | // Called from optimized code only. |
3658 | // LR: return address. |
3659 | // SP + 4: left operand. |
3660 | // SP + 0: right operand. |
3661 | // Return Zero condition flag set if equal. |
3662 | void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub( |
3663 | Assembler* assembler) { |
3664 | const Register left = R1; |
3665 | const Register right = R0; |
3666 | __ LoadFromOffset(left, SP, 1 * target::kWordSize); |
3667 | __ LoadFromOffset(right, SP, 0 * target::kWordSize); |
3668 | GenerateIdenticalWithNumberCheckStub(assembler, left, right); |
3669 | __ ret(); |
3670 | } |
3671 | |
3672 | // Called from megamorphic call sites. |
3673 | // R0: receiver (passed to target) |
3674 | // R5: MegamorphicCache (preserved) |
3675 | // Passed to target: |
3676 | // R0: receiver |
3677 | // CODE_REG: target Code |
3678 | // R4: arguments descriptor |
3679 | // R5: MegamorphicCache |
3680 | void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) { |
3681 | // Jump if receiver is a smi. |
3682 | Label smi_case; |
3683 | __ BranchIfSmi(R0, &smi_case); |
3684 | |
3685 | // Loads the cid of the object. |
3686 | __ LoadClassId(R8, R0); |
3687 | |
3688 | Label cid_loaded; |
3689 | __ Bind(&cid_loaded); |
3690 | __ ldr(R2, FieldAddress(R5, target::MegamorphicCache::buckets_offset())); |
3691 | __ ldr(R1, FieldAddress(R5, target::MegamorphicCache::mask_offset())); |
3692 | // R2: cache buckets array. |
3693 | // R1: mask as a smi. |
3694 | |
3695 | // Make the cid into a smi. |
3696 | __ SmiTag(R8); |
3697 | // R8: class ID of the receiver (smi). |
3698 | |
3699 | // Compute the table index. |
3700 | ASSERT(target::MegamorphicCache::kSpreadFactor == 7); |
3701 | // Use lsl and sub to multiply with 7 == 8 - 1. |
3702 | __ LslImmediate(R3, R8, 3); |
3703 | __ sub(R3, R3, Operand(R8)); |
3704 | // R3: probe. |
3705 | Label loop; |
3706 | __ Bind(&loop); |
3707 | __ and_(R3, R3, Operand(R1)); |
3708 | |
3709 | const intptr_t base = target::Array::data_offset(); |
3710 | // R3 is smi tagged, but table entries are 16 bytes, so LSL 3. |
3711 | __ add(TMP, R2, Operand(R3, LSL, 3)); |
3712 | __ ldr(R6, FieldAddress(TMP, base)); |
3713 | Label probe_failed; |
3714 | __ CompareRegisters(R6, R8); |
3715 | __ b(&probe_failed, NE); |
3716 | |
3717 | Label load_target; |
3718 | __ Bind(&load_target); |
3719 | // Call the target found in the cache. For a class id match, this is a |
3720 | // proper target for the given name and arguments descriptor. If the |
3721 | // illegal class id was found, the target is a cache miss handler that can |
3722 | // be invoked as a normal Dart function. |
3723 | const auto target_address = FieldAddress(TMP, base + target::kWordSize); |
3724 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { |
3725 | __ ldr(R1, target_address); |
3726 | __ ldr( |
3727 | ARGS_DESC_REG, |
3728 | FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset())); |
3729 | } else { |
3730 | __ ldr(R0, target_address); |
3731 | __ ldr(R1, FieldAddress(R0, target::Function::entry_point_offset())); |
3732 | __ ldr( |
3733 | ARGS_DESC_REG, |
3734 | FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset())); |
3735 | __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset())); |
3736 | } |
3737 | __ br(R1); |
3738 | |
3739 | // Probe failed, check if it is a miss. |
3740 | __ Bind(&probe_failed); |
3741 | ASSERT(kIllegalCid == 0); |
3742 | __ tst(R6, Operand(R6)); |
3743 | Label miss; |
3744 | __ b(&miss, EQ); // branch if miss. |
3745 | |
3746 | // Try next extry in the table. |
3747 | __ AddImmediate(R3, target::ToRawSmi(1)); |
3748 | __ b(&loop); |
3749 | |
3750 | // Load cid for the Smi case. |
3751 | __ Bind(&smi_case); |
3752 | __ LoadImmediate(R8, kSmiCid); |
3753 | __ b(&cid_loaded); |
3754 | |
3755 | __ Bind(&miss); |
3756 | GenerateSwitchableCallMissStub(assembler); |
3757 | } |
3758 | |
3759 | // Input: |
3760 | // R0 - receiver |
3761 | // R5 - icdata |
3762 | void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) { |
3763 | Label loop, found, miss; |
3764 | __ ldr(R8, FieldAddress(R5, target::ICData::entries_offset())); |
3765 | __ ldr(R4, |
3766 | FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset())); |
3767 | __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag); |
3768 | // R8: first IC entry |
3769 | __ LoadTaggedClassIdMayBeSmi(R1, R0); |
3770 | // R1: receiver cid as Smi |
3771 | |
3772 | __ Bind(&loop); |
3773 | __ ldr(R2, Address(R8, 0)); |
3774 | __ cmp(R1, Operand(R2)); |
3775 | __ b(&found, EQ); |
3776 | __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid)); |
3777 | __ b(&miss, EQ); |
3778 | |
3779 | const intptr_t entry_length = |
3780 | target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) * |
3781 | target::kWordSize; |
3782 | __ AddImmediate(R8, entry_length); // Next entry. |
3783 | __ b(&loop); |
3784 | |
3785 | __ Bind(&found); |
3786 | const intptr_t code_offset = |
3787 | target::ICData::CodeIndexFor(1) * target::kWordSize; |
3788 | const intptr_t entry_offset = |
3789 | target::ICData::EntryPointIndexFor(1) * target::kWordSize; |
3790 | __ ldr(R1, Address(R8, entry_offset)); |
3791 | if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) { |
3792 | __ ldr(CODE_REG, Address(R8, code_offset)); |
3793 | } |
3794 | __ br(R1); |
3795 | |
3796 | __ Bind(&miss); |
3797 | __ LoadIsolate(R2); |
3798 | __ ldr(CODE_REG, Address(R2, target::Isolate::ic_miss_code_offset())); |
3799 | __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset())); |
3800 | __ br(R1); |
3801 | } |
3802 | |
3803 | // Implement the monomorphic entry check for call-sites where the receiver |
3804 | // might be a Smi. |
3805 | // |
3806 | // R0: receiver |
3807 | // R5: MonomorphicSmiableCall object |
3808 | // |
3809 | // R1: clobbered |
3810 | void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub( |
3811 | Assembler* assembler) { |
3812 | Label miss; |
3813 | __ LoadClassIdMayBeSmi(IP0, R0); |
3814 | |
3815 | if (FLAG_use_bare_instructions) { |
3816 | __ LoadField( |
3817 | IP1, FieldAddress( |
3818 | R5, target::MonomorphicSmiableCall::expected_cid_offset())); |
3819 | __ LoadField( |
3820 | R1, |
3821 | FieldAddress(R5, target::MonomorphicSmiableCall::entrypoint_offset())); |
3822 | __ cmp(IP0, Operand(IP1)); |
3823 | __ b(&miss, NE); |
3824 | __ br(R1); |
3825 | } else { |
3826 | __ LoadField( |
3827 | IP1, FieldAddress( |
3828 | R5, target::MonomorphicSmiableCall::expected_cid_offset())); |
3829 | __ LoadField( |
3830 | CODE_REG, |
3831 | FieldAddress(R5, target::MonomorphicSmiableCall::target_offset())); |
3832 | __ LoadField( |
3833 | R1, |
3834 | FieldAddress(R5, target::MonomorphicSmiableCall::entrypoint_offset())); |
3835 | __ cmp(IP0, Operand(IP1)); |
3836 | __ b(&miss, NE); |
3837 | __ br(R1); |
3838 | } |
3839 | |
3840 | __ Bind(&miss); |
3841 | __ ldr(IP0, |
3842 | Address(THR, target::Thread::switchable_call_miss_entry_offset())); |
3843 | __ br(IP0); |
3844 | } |
3845 | |
3846 | // Called from switchable IC calls. |
3847 | // R0: receiver |
3848 | void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) { |
3849 | __ ldr(CODE_REG, |
3850 | Address(THR, target::Thread::switchable_call_miss_stub_offset())); |
3851 | __ EnterStubFrame(); |
3852 | __ Push(R0); // Preserve receiver. |
3853 | |
3854 | __ Push(ZR); // Result slot. |
3855 | __ Push(ZR); // Arg0: stub out. |
3856 | __ Push(R0); // Arg1: Receiver |
3857 | __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2); |
3858 | __ Drop(1); |
3859 | __ Pop(CODE_REG); // result = stub |
3860 | __ Pop(R5); // result = IC |
3861 | |
3862 | __ Pop(R0); // Restore receiver. |
3863 | __ LeaveStubFrame(); |
3864 | |
3865 | __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset( |
3866 | CodeEntryKind::kNormal))); |
3867 | __ br(R1); |
3868 | } |
3869 | |
3870 | // Called from switchable IC calls. |
3871 | // R0: receiver |
3872 | // R5: SingleTargetCache |
3873 | // Passed to target: |
3874 | // CODE_REG: target Code object |
3875 | void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) { |
3876 | Label miss; |
3877 | __ LoadClassIdMayBeSmi(R1, R0); |
3878 | __ ldr(R2, FieldAddress(R5, target::SingleTargetCache::lower_limit_offset()), |
3879 | kUnsignedHalfword); |
3880 | __ ldr(R3, FieldAddress(R5, target::SingleTargetCache::upper_limit_offset()), |
3881 | kUnsignedHalfword); |
3882 | |
3883 | __ cmp(R1, Operand(R2)); |
3884 | __ b(&miss, LT); |
3885 | __ cmp(R1, Operand(R3)); |
3886 | __ b(&miss, GT); |
3887 | |
3888 | __ ldr(R1, FieldAddress(R5, target::SingleTargetCache::entry_point_offset())); |
3889 | __ ldr(CODE_REG, |
3890 | FieldAddress(R5, target::SingleTargetCache::target_offset())); |
3891 | __ br(R1); |
3892 | |
3893 | __ Bind(&miss); |
3894 | __ EnterStubFrame(); |
3895 | __ Push(R0); // Preserve receiver. |
3896 | |
3897 | __ Push(ZR); // Result slot. |
3898 | __ Push(ZR); // Arg0: Stub out. |
3899 | __ Push(R0); // Arg1: Receiver |
3900 | __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2); |
3901 | __ Drop(1); |
3902 | __ Pop(CODE_REG); // result = stub |
3903 | __ Pop(R5); // result = IC |
3904 | |
3905 | __ Pop(R0); // Restore receiver. |
3906 | __ LeaveStubFrame(); |
3907 | |
3908 | __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset( |
3909 | CodeEntryKind::kMonomorphic))); |
3910 | __ br(R1); |
3911 | } |
3912 | |
3913 | void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub( |
3914 | Assembler* assembler) { |
3915 | __ brk(0); |
3916 | } |
3917 | |
3918 | void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) { |
3919 | __ brk(0); |
3920 | } |
3921 | |
3922 | void StubCodeCompiler::GenerateNotLoadedStub(Assembler* assembler) { |
3923 | __ EnterStubFrame(); |
3924 | __ CallRuntime(kNotLoadedRuntimeEntry, 0); |
3925 | __ brk(0); |
3926 | } |
3927 | |
3928 | // Instantiate type arguments from instantiator and function type args. |
3929 | // R3 uninstantiated type arguments. |
3930 | // R2 instantiator type arguments. |
3931 | // R1: function type arguments. |
3932 | // Returns instantiated type arguments in R0. |
3933 | void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub( |
3934 | Assembler* assembler) { |
3935 | // Lookup cache before calling runtime. |
3936 | __ LoadFieldFromOffset(R0, InstantiationABI::kUninstantiatedTypeArgumentsReg, |
3937 | target::TypeArguments::instantiations_offset()); |
3938 | __ AddImmediate(R0, Array::data_offset() - kHeapObjectTag); |
3939 | // The instantiations cache is initialized with Object::zero_array() and is |
3940 | // therefore guaranteed to contain kNoInstantiator. No length check needed. |
3941 | compiler::Label loop, next, found, call_runtime; |
3942 | __ Bind(&loop); |
3943 | |
3944 | // Use load-acquire to test for sentinel, if we found non-sentinel it is safe |
3945 | // to access the other entries. If we found a sentinel we go to runtime. |
3946 | __ LoadAcquire(R5, R0, |
3947 | TypeArguments::Instantiation::kInstantiatorTypeArgsIndex * |
3948 | target::kWordSize); |
3949 | __ CompareImmediate(R5, Smi::RawValue(TypeArguments::kNoInstantiator)); |
3950 | __ b(&call_runtime, EQ); |
3951 | |
3952 | __ CompareRegisters(R5, InstantiationABI::kInstantiatorTypeArgumentsReg); |
3953 | __ b(&next, NE); |
3954 | __ LoadFromOffset( |
3955 | R4, R0, |
3956 | TypeArguments::Instantiation::kFunctionTypeArgsIndex * target::kWordSize); |
3957 | __ CompareRegisters(R4, InstantiationABI::kFunctionTypeArgumentsReg); |
3958 | __ b(&found, EQ); |
3959 | __ Bind(&next); |
3960 | __ AddImmediate( |
3961 | R0, TypeArguments::Instantiation::kSizeInWords * target::kWordSize); |
3962 | __ b(&loop); |
3963 | |
3964 | // Instantiate non-null type arguments. |
3965 | // A runtime call to instantiate the type arguments is required. |
3966 | __ Bind(&call_runtime); |
3967 | __ EnterStubFrame(); |
3968 | __ PushPair(InstantiationABI::kUninstantiatedTypeArgumentsReg, NULL_REG); |
3969 | __ PushPair(InstantiationABI::kFunctionTypeArgumentsReg, |
3970 | InstantiationABI::kInstantiatorTypeArgumentsReg); |
3971 | __ CallRuntime(kInstantiateTypeArgumentsRuntimeEntry, 3); |
3972 | __ Drop(3); // Drop 2 type vectors, and uninstantiated type. |
3973 | __ Pop(InstantiationABI::kResultTypeArgumentsReg); |
3974 | __ LeaveStubFrame(); |
3975 | __ Ret(); |
3976 | |
3977 | __ Bind(&found); |
3978 | __ LoadFromOffset(InstantiationABI::kResultTypeArgumentsReg, R0, |
3979 | TypeArguments::Instantiation::kInstantiatedTypeArgsIndex * |
3980 | target::kWordSize); |
3981 | __ Ret(); |
3982 | } |
3983 | |
3984 | void StubCodeCompiler:: |
3985 | GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub( |
3986 | Assembler* assembler) { |
3987 | // Return the instantiator type arguments if its nullability is compatible for |
3988 | // sharing, otherwise proceed to instantiation cache lookup. |
3989 | compiler::Label cache_lookup; |
3990 | __ LoadFieldFromOffset(R0, InstantiationABI::kUninstantiatedTypeArgumentsReg, |
3991 | target::TypeArguments::nullability_offset()); |
3992 | __ LoadFieldFromOffset(R4, InstantiationABI::kInstantiatorTypeArgumentsReg, |
3993 | target::TypeArguments::nullability_offset()); |
3994 | __ and_(R4, R4, Operand(R0)); |
3995 | __ cmp(R4, Operand(R0)); |
3996 | __ b(&cache_lookup, NE); |
3997 | __ mov(InstantiationABI::kResultTypeArgumentsReg, |
3998 | InstantiationABI::kInstantiatorTypeArgumentsReg); |
3999 | __ Ret(); |
4000 | |
4001 | __ Bind(&cache_lookup); |
4002 | GenerateInstantiateTypeArgumentsStub(assembler); |
4003 | } |
4004 | |
4005 | void StubCodeCompiler::GenerateInstantiateTypeArgumentsMayShareFunctionTAStub( |
4006 | Assembler* assembler) { |
4007 | // Return the function type arguments if its nullability is compatible for |
4008 | // sharing, otherwise proceed to instantiation cache lookup. |
4009 | compiler::Label cache_lookup; |
4010 | __ LoadFieldFromOffset(R0, InstantiationABI::kUninstantiatedTypeArgumentsReg, |
4011 | target::TypeArguments::nullability_offset()); |
4012 | __ LoadFieldFromOffset(R4, InstantiationABI::kFunctionTypeArgumentsReg, |
4013 | target::TypeArguments::nullability_offset()); |
4014 | __ and_(R4, R4, Operand(R0)); |
4015 | __ cmp(R4, Operand(R0)); |
4016 | __ b(&cache_lookup, NE); |
4017 | __ mov(InstantiationABI::kResultTypeArgumentsReg, |
4018 | InstantiationABI::kFunctionTypeArgumentsReg); |
4019 | __ Ret(); |
4020 | |
4021 | __ Bind(&cache_lookup); |
4022 | GenerateInstantiateTypeArgumentsStub(assembler); |
4023 | } |
4024 | |
4025 | } // namespace compiler |
4026 | |
4027 | } // namespace dart |
4028 | |
4029 | #endif // defined(TARGET_ARCH_ARM64) |
4030 | |