1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h"
6
7// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
8#include "vm/compiler/backend/il.h"
9
10#define SHOULD_NOT_INCLUDE_RUNTIME
11
12#include "vm/compiler/stub_code_compiler.h"
13
14#if defined(TARGET_ARCH_IA32)
15
16#include "vm/class_id.h"
17#include "vm/code_entry_kind.h"
18#include "vm/compiler/api/type_check_mode.h"
19#include "vm/compiler/assembler/assembler.h"
20#include "vm/compiler/backend/locations.h"
21#include "vm/constants.h"
22#include "vm/instructions.h"
23#include "vm/static_type_exactness_state.h"
24#include "vm/tags.h"
25
26#define __ assembler->
27
28namespace dart {
29
30DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects.");
31DEFINE_FLAG(bool,
32 use_slow_path,
33 false,
34 "Set to true for debugging & verifying the slow paths.");
35
36namespace compiler {
37
38// Ensures that [EAX] is a new object, if not it will be added to the remembered
39// set via a leaf runtime call.
40//
41// WARNING: This might clobber all registers except for [EAX], [THR] and [FP].
42// The caller should simply call LeaveFrame() and return.
43static void EnsureIsNewOrRemembered(Assembler* assembler,
44 bool preserve_registers = true) {
45 // If the object is not remembered we call a leaf-runtime to add it to the
46 // remembered set.
47 Label done;
48 __ testl(EAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
49 __ BranchIf(NOT_ZERO, &done);
50
51 if (preserve_registers) {
52 __ EnterCallRuntimeFrame(2 * target::kWordSize);
53 } else {
54 __ ReserveAlignedFrameSpace(2 * target::kWordSize);
55 }
56 __ movl(Address(ESP, 1 * target::kWordSize), THR);
57 __ movl(Address(ESP, 0 * target::kWordSize), EAX);
58 __ CallRuntime(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
59 if (preserve_registers) {
60 __ LeaveCallRuntimeFrame();
61 }
62
63 __ Bind(&done);
64}
65
66// Input parameters:
67// ESP : points to return address.
68// ESP + 4 : address of last argument in argument array.
69// ESP + 4*EDX : address of first argument in argument array.
70// ESP + 4*EDX + 4 : address of return value.
71// ECX : address of the runtime function to call.
72// EDX : number of arguments to the call.
73// Must preserve callee saved registers EDI and EBX.
74void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
75 const intptr_t thread_offset = target::NativeArguments::thread_offset();
76 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
77 const intptr_t argv_offset = target::NativeArguments::argv_offset();
78 const intptr_t retval_offset = target::NativeArguments::retval_offset();
79
80 __ movl(CODE_REG,
81 Address(THR, target::Thread::call_to_runtime_stub_offset()));
82 __ EnterStubFrame();
83
84 // Save exit frame information to enable stack walking as we are about
85 // to transition to Dart VM C++ code.
86 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()), EBP);
87
88 // Mark that the thread exited generated code through a runtime call.
89 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
90 Immediate(target::Thread::exit_through_runtime_call()));
91
92#if defined(DEBUG)
93 {
94 Label ok;
95 // Check that we are always entering from Dart code.
96 __ cmpl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
97 __ j(EQUAL, &ok, Assembler::kNearJump);
98 __ Stop("Not coming from Dart code.");
99 __ Bind(&ok);
100 }
101#endif
102
103 // Mark that the thread is executing VM code.
104 __ movl(Assembler::VMTagAddress(), ECX);
105
106 // Reserve space for arguments and align frame before entering C++ world.
107 __ AddImmediate(
108 ESP,
109 Immediate(-static_cast<int32_t>(target::NativeArguments::StructSize())));
110 if (OS::ActivationFrameAlignment() > 1) {
111 __ andl(ESP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
112 }
113
114 // Pass NativeArguments structure by value and call runtime.
115 __ movl(Address(ESP, thread_offset), THR); // Set thread in NativeArgs.
116 // There are no runtime calls to closures, so we do not need to set the tag
117 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
118 __ movl(Address(ESP, argc_tag_offset), EDX); // Set argc in NativeArguments.
119 // Compute argv.
120 __ leal(EAX,
121 Address(EBP, EDX, TIMES_4,
122 target::frame_layout.param_end_from_fp * target::kWordSize));
123 __ movl(Address(ESP, argv_offset), EAX); // Set argv in NativeArguments.
124 __ addl(EAX,
125 Immediate(1 * target::kWordSize)); // Retval is next to 1st argument.
126 __ movl(Address(ESP, retval_offset), EAX); // Set retval in NativeArguments.
127 __ call(ECX);
128
129 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
130
131 // Mark that the thread has not exited generated Dart code.
132 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
133 Immediate(0));
134
135 // Reset exit frame information in Isolate's mutator thread structure.
136 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
137 Immediate(0));
138
139 __ LeaveFrame();
140
141 // The following return can jump to a lazy-deopt stub, which assumes EAX
142 // contains a return value and will save it in a GC-visible way. We therefore
143 // have to ensure EAX does not contain any garbage value left from the C
144 // function we called (which has return type "void").
145 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
146 __ xorl(EAX, EAX);
147 __ ret();
148}
149
150void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
151 __ pushal();
152 __ subl(SPREG, Immediate(8));
153 __ movsd(Address(SPREG, 0), XMM0);
154
155 __ EnterFrame(0);
156 __ ReserveAlignedFrameSpace(0);
157 __ movl(EAX, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
158 __ call(EAX);
159 __ LeaveFrame();
160
161 __ movsd(XMM0, Address(SPREG, 0));
162 __ addl(SPREG, Immediate(8));
163 __ popal();
164 __ ret();
165}
166
167void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
168 __ pushal();
169 __ subl(SPREG, Immediate(8));
170 __ movsd(Address(SPREG, 0), XMM0);
171
172 __ EnterFrame(0);
173 __ ReserveAlignedFrameSpace(0);
174
175 // Set the execution state to VM while waiting for the safepoint to end.
176 // This isn't strictly necessary but enables tests to check that we're not
177 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
178 __ movl(Address(THR, target::Thread::execution_state_offset()),
179 Immediate(target::Thread::vm_execution_state()));
180
181 __ movl(EAX, Address(THR, kExitSafepointRuntimeEntry.OffsetFromThread()));
182 __ call(EAX);
183 __ LeaveFrame();
184
185 __ movsd(XMM0, Address(SPREG, 0));
186 __ addl(SPREG, Immediate(8));
187 __ popal();
188 __ ret();
189}
190
191// Calls a native function inside a safepoint.
192//
193// On entry:
194// Stack: set up for native call
195// EAX: target to call
196//
197// On exit:
198// Stack: preserved
199// EBX: clobbered (even though it's normally callee-saved)
200void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
201 Assembler* assembler) {
202 __ popl(EBX);
203
204 __ movl(ECX, compiler::Immediate(target::Thread::exit_through_ffi()));
205 __ TransitionGeneratedToNative(EAX, FPREG, ECX /*volatile*/,
206 /*enter_safepoint=*/true);
207 __ call(EAX);
208 __ TransitionNativeToGenerated(ECX /*volatile*/, /*leave_safepoint=*/true);
209
210 __ jmp(EBX);
211}
212
213void StubCodeCompiler::GenerateJITCallbackTrampolines(
214 Assembler* assembler,
215 intptr_t next_callback_id) {
216 Label done;
217
218 // EAX is volatile and doesn't hold any arguments.
219 COMPILE_ASSERT(!IsArgumentRegister(EAX) && !IsCalleeSavedRegister(EAX));
220
221 for (intptr_t i = 0;
222 i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) {
223 __ movl(EAX, compiler::Immediate(next_callback_id + i));
224 __ jmp(&done);
225 }
226
227 ASSERT(__ CodeSize() ==
228 kNativeCallbackTrampolineSize *
229 NativeCallbackTrampolines::NumCallbackTrampolinesPerPage());
230
231 __ Bind(&done);
232
233 const intptr_t shared_stub_start = __ CodeSize();
234
235 // Save THR which is callee-saved.
236 __ pushl(THR);
237
238 // THR & return address
239 COMPILE_ASSERT(StubCodeCompiler::kNativeCallbackTrampolineStackDelta == 2);
240
241 // Load the thread, verify the callback ID and exit the safepoint.
242 //
243 // We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline
244 // in order to safe code size on this shared stub.
245 {
246 __ EnterFrame(0);
247 __ ReserveAlignedFrameSpace(compiler::target::kWordSize);
248
249 __ movl(compiler::Address(SPREG, 0), EAX);
250 __ movl(EAX, compiler::Immediate(reinterpret_cast<int64_t>(
251 DLRT_GetThreadForNativeCallbackTrampoline)));
252 __ call(EAX);
253 __ movl(THR, EAX);
254 __ movl(EAX, compiler::Address(SPREG, 0));
255
256 __ LeaveFrame();
257 }
258
259 COMPILE_ASSERT(!IsCalleeSavedRegister(ECX) && !IsArgumentRegister(ECX));
260 COMPILE_ASSERT(ECX != THR);
261
262 // Load the target from the thread.
263 __ movl(ECX, compiler::Address(
264 THR, compiler::target::Thread::callback_code_offset()));
265 __ movl(ECX, compiler::FieldAddress(
266 ECX, compiler::target::GrowableObjectArray::data_offset()));
267 __ movl(ECX, __ ElementAddressForRegIndex(
268 /*external=*/false,
269 /*array_cid=*/kArrayCid,
270 /*index, smi-tagged=*/compiler::target::kWordSize * 2,
271 /*index_unboxed=*/false,
272 /*array=*/ECX,
273 /*index=*/EAX));
274 __ movl(ECX, compiler::FieldAddress(
275 ECX, compiler::target::Code::entry_point_offset()));
276
277 // On entry to the function, there will be two extra slots on the stack:
278 // the saved THR and the return address. The target will know to skip them.
279 __ call(ECX);
280
281 // EnterSafepoint takes care to not clobber *any* registers (besides scratch).
282 __ EnterSafepoint(/*scratch=*/ECX);
283
284 // Restore THR (callee-saved).
285 __ popl(THR);
286
287 __ ret();
288
289 // 'kNativeCallbackSharedStubSize' is an upper bound because the exact
290 // instruction size can vary slightly based on OS calling conventions.
291 ASSERT((__ CodeSize() - shared_stub_start) <= kNativeCallbackSharedStubSize);
292 ASSERT(__ CodeSize() <= VirtualMemory::PageSize());
293
294#if defined(DEBUG)
295 while (__ CodeSize() < VirtualMemory::PageSize()) {
296 __ Breakpoint();
297 }
298#endif
299}
300
301void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
302 Assembler* assembler) {
303 // Only used in AOT.
304 __ Breakpoint();
305}
306
307void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
308 Assembler* assembler) {
309 __ Breakpoint();
310}
311
312void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
313 Assembler* assembler) {
314 __ Breakpoint();
315}
316
317void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub(
318 Assembler* assembler) {
319 __ Breakpoint();
320}
321
322void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub(
323 Assembler* assembler) {
324 __ Breakpoint();
325}
326
327void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub(
328 Assembler* assembler) {
329 __ Breakpoint();
330}
331
332void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub(
333 Assembler* assembler) {
334 __ Breakpoint();
335}
336
337void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub(
338 Assembler* assembler) {
339 __ Breakpoint();
340}
341
342void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub(
343 Assembler* assembler) {
344 __ Breakpoint();
345}
346
347void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
348 Assembler* assembler) {
349 // TODO(sjindel): implement.
350 __ Breakpoint();
351}
352
353void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
354 Assembler* assembler) {
355 // TODO(sjindel): implement.
356 __ Breakpoint();
357}
358
359// Input parameters:
360// ESP : points to return address.
361// ESP + 4 : address of return value.
362// EAX : address of first argument in argument array.
363// ECX : address of the native function to call.
364// EDX : argc_tag including number of arguments and function kind.
365static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
366 Address wrapper_address) {
367 const intptr_t native_args_struct_offset =
368 target::NativeEntry::kNumCallWrapperArguments * target::kWordSize;
369 const intptr_t thread_offset =
370 target::NativeArguments::thread_offset() + native_args_struct_offset;
371 const intptr_t argc_tag_offset =
372 target::NativeArguments::argc_tag_offset() + native_args_struct_offset;
373 const intptr_t argv_offset =
374 target::NativeArguments::argv_offset() + native_args_struct_offset;
375 const intptr_t retval_offset =
376 target::NativeArguments::retval_offset() + native_args_struct_offset;
377
378 __ EnterStubFrame();
379
380 // Save exit frame information to enable stack walking as we are about
381 // to transition to dart VM code.
382 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()), EBP);
383
384 // Mark that the thread exited generated code through a runtime call.
385 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
386 Immediate(target::Thread::exit_through_runtime_call()));
387
388#if defined(DEBUG)
389 {
390 Label ok;
391 // Check that we are always entering from Dart code.
392 __ cmpl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
393 __ j(EQUAL, &ok, Assembler::kNearJump);
394 __ Stop("Not coming from Dart code.");
395 __ Bind(&ok);
396 }
397#endif
398
399 // Mark that the thread is executing native code.
400 __ movl(Assembler::VMTagAddress(), ECX);
401
402 // Reserve space for the native arguments structure, the outgoing parameters
403 // (pointer to the native arguments structure, the C function entry point)
404 // and align frame before entering the C++ world.
405 __ AddImmediate(
406 ESP,
407 Immediate(-static_cast<int32_t>(target::NativeArguments::StructSize()) -
408 (2 * target::kWordSize)));
409 if (OS::ActivationFrameAlignment() > 1) {
410 __ andl(ESP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
411 }
412
413 // Pass NativeArguments structure by value and call native function.
414 __ movl(Address(ESP, thread_offset), THR); // Set thread in NativeArgs.
415 __ movl(Address(ESP, argc_tag_offset), EDX); // Set argc in NativeArguments.
416 __ movl(Address(ESP, argv_offset), EAX); // Set argv in NativeArguments.
417 __ leal(EAX,
418 Address(EBP, 2 * target::kWordSize)); // Compute return value addr.
419 __ movl(Address(ESP, retval_offset), EAX); // Set retval in NativeArguments.
420 __ leal(
421 EAX,
422 Address(ESP, 2 * target::kWordSize)); // Pointer to the NativeArguments.
423 __ movl(Address(ESP, 0), EAX); // Pass the pointer to the NativeArguments.
424
425 __ movl(Address(ESP, target::kWordSize), ECX); // Function to call.
426 __ call(wrapper_address);
427
428 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
429
430 // Mark that the thread has not exited generated Dart code.
431 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
432 Immediate(0));
433
434 // Reset exit frame information in Isolate's mutator thread structure.
435 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
436 Immediate(0));
437
438 __ LeaveFrame();
439 __ ret();
440}
441
442void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
443 GenerateCallNativeWithWrapperStub(
444 assembler,
445 Address(THR,
446 target::Thread::no_scope_native_wrapper_entry_point_offset()));
447}
448
449void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
450 GenerateCallNativeWithWrapperStub(
451 assembler,
452 Address(THR,
453 target::Thread::auto_scope_native_wrapper_entry_point_offset()));
454}
455
456// Input parameters:
457// ESP : points to return address.
458// ESP + 4 : address of return value.
459// EAX : address of first argument in argument array.
460// ECX : address of the native function to call.
461// EDX : argc_tag including number of arguments and function kind.
462void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
463 GenerateCallNativeWithWrapperStub(
464 assembler,
465 Address(THR,
466 target::Thread::bootstrap_native_wrapper_entry_point_offset()));
467}
468
469// Input parameters:
470// EDX: arguments descriptor array.
471void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
472 __ EnterStubFrame();
473 __ pushl(EDX); // Preserve arguments descriptor array.
474 __ pushl(Immediate(0)); // Setup space on stack for return value.
475 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
476 __ popl(EAX); // Get Code object result.
477 __ popl(EDX); // Restore arguments descriptor array.
478 // Remove the stub frame as we are about to jump to the dart function.
479 __ LeaveFrame();
480
481 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
482}
483
484// Called from a static call only when an invalid code has been entered
485// (invalid because its function was optimized or deoptimized).
486// EDX: arguments descriptor array.
487void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
488 Label monomorphic;
489 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
490
491 // This was a static call.
492 __ EnterStubFrame();
493 __ pushl(EDX); // Preserve arguments descriptor array.
494 __ pushl(Immediate(0)); // Setup space on stack for return value.
495 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
496 __ popl(EAX); // Get Code object.
497 __ popl(EDX); // Restore arguments descriptor array.
498 __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
499 __ LeaveFrame();
500 __ jmp(EAX);
501 __ int3();
502
503 __ Bind(&monomorphic);
504 // This was a switchable call.
505 __ EnterStubFrame();
506 __ pushl(ECX); // Preserve cache (guarded CID as Smi).
507 __ pushl(EBX); // Preserve receiver.
508 __ pushl(Immediate(0)); // Result slot.
509 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 0);
510 __ popl(CODE_REG); // Get Code object.
511 __ popl(EBX); // Restore receiver.
512 __ popl(ECX); // Restore cache (guarded CID as Smi).
513 __ movl(EAX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
514 CodeEntryKind::kMonomorphic)));
515 __ LeaveFrame();
516 __ jmp(EAX);
517 __ int3();
518}
519
520// Called from object allocate instruction when the allocation stub has been
521// disabled.
522void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
523 Assembler* assembler) {
524 __ EnterStubFrame();
525 __ pushl(Immediate(0)); // Setup space on stack for return value.
526 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
527 __ popl(EAX); // Get Code object.
528 __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
529 __ LeaveFrame();
530 __ jmp(EAX);
531 __ int3();
532}
533
534// Input parameters:
535// EDX: smi-tagged argument count, may be zero.
536// EBP[target::frame_layout.param_end_from_fp + 1]: last argument.
537// Uses EAX, EBX, ECX, EDX, EDI.
538static void PushArrayOfArguments(Assembler* assembler) {
539 // Allocate array to store arguments of caller.
540 const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
541 __ movl(ECX, raw_null); // Null element type for raw Array.
542 __ Call(StubCodeAllocateArray());
543 __ SmiUntag(EDX);
544 // EAX: newly allocated array.
545 // EDX: length of the array (was preserved by the stub).
546 __ pushl(EAX); // Array is in EAX and on top of stack.
547 __ leal(EBX,
548 Address(EBP, EDX, TIMES_4,
549 target::frame_layout.param_end_from_fp * target::kWordSize));
550 __ leal(ECX, FieldAddress(EAX, target::Array::data_offset()));
551 // EBX: address of first argument on stack.
552 // ECX: address of first argument in array.
553 Label loop, loop_condition;
554 __ jmp(&loop_condition, Assembler::kNearJump);
555 __ Bind(&loop);
556 __ movl(EDI, Address(EBX, 0));
557 // Generational barrier is needed, array is not necessarily in new space.
558 __ StoreIntoObject(EAX, Address(ECX, 0), EDI);
559 __ AddImmediate(ECX, Immediate(target::kWordSize));
560 __ AddImmediate(EBX, Immediate(-target::kWordSize));
561 __ Bind(&loop_condition);
562 __ decl(EDX);
563 __ j(POSITIVE, &loop, Assembler::kNearJump);
564}
565
566// Used by eager and lazy deoptimization. Preserve result in EAX if necessary.
567// This stub translates optimized frame into unoptimized frame. The optimized
568// frame can contain values in registers and on stack, the unoptimized
569// frame contains all values on stack.
570// Deoptimization occurs in following steps:
571// - Push all registers that can contain values.
572// - Call C routine to copy the stack and saved registers into temporary buffer.
573// - Adjust caller's frame to correct unoptimized frame size.
574// - Fill the unoptimized frame.
575// - Materialize objects that require allocation (e.g. Double instances).
576// GC can occur only after frame is fully rewritten.
577// Stack after EnterDartFrame(0) below:
578// +------------------+
579// | PC marker | <- TOS
580// +------------------+
581// | Saved FP | <- FP of stub
582// +------------------+
583// | return-address | (deoptimization point)
584// +------------------+
585// | ... | <- SP of optimized frame
586//
587// Parts of the code cannot GC, part of the code can GC.
588static void GenerateDeoptimizationSequence(Assembler* assembler,
589 DeoptStubKind kind) {
590 // Leaf runtime function DeoptimizeCopyFrame expects a Dart frame.
591 __ EnterDartFrame(0);
592 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
593 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
594 const intptr_t saved_result_slot_from_fp =
595 target::frame_layout.first_local_from_fp + 1 -
596 (kNumberOfCpuRegisters - EAX);
597 const intptr_t saved_exception_slot_from_fp =
598 target::frame_layout.first_local_from_fp + 1 -
599 (kNumberOfCpuRegisters - EAX);
600 const intptr_t saved_stacktrace_slot_from_fp =
601 target::frame_layout.first_local_from_fp + 1 -
602 (kNumberOfCpuRegisters - EDX);
603 // Result in EAX is preserved as part of pushing all registers below.
604
605 // Push registers in their enumeration order: lowest register number at
606 // lowest address.
607 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
608 if (i == CODE_REG) {
609 // Save the original value of CODE_REG pushed before invoking this stub
610 // instead of the value used to call this stub.
611 __ pushl(Address(EBP, 2 * target::kWordSize));
612 } else {
613 __ pushl(static_cast<Register>(i));
614 }
615 }
616 __ subl(ESP, Immediate(kNumberOfXmmRegisters * kFpuRegisterSize));
617 intptr_t offset = 0;
618 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
619 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
620 __ movups(Address(ESP, offset), xmm_reg);
621 offset += kFpuRegisterSize;
622 }
623
624 __ movl(ECX, ESP); // Preserve saved registers block.
625 __ ReserveAlignedFrameSpace(2 * target::kWordSize);
626 __ movl(Address(ESP, 0 * target::kWordSize),
627 ECX); // Start of register block.
628 bool is_lazy =
629 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
630 __ movl(Address(ESP, 1 * target::kWordSize), Immediate(is_lazy ? 1 : 0));
631 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
632 // Result (EAX) is stack-size (FP - SP) in bytes.
633
634 if (kind == kLazyDeoptFromReturn) {
635 // Restore result into EBX temporarily.
636 __ movl(EBX, Address(EBP, saved_result_slot_from_fp * target::kWordSize));
637 } else if (kind == kLazyDeoptFromThrow) {
638 // Restore result into EBX temporarily.
639 __ movl(EBX,
640 Address(EBP, saved_exception_slot_from_fp * target::kWordSize));
641 __ movl(ECX,
642 Address(EBP, saved_stacktrace_slot_from_fp * target::kWordSize));
643 }
644
645 __ LeaveFrame();
646 __ popl(EDX); // Preserve return address.
647 __ movl(ESP, EBP); // Discard optimized frame.
648 __ subl(ESP, EAX); // Reserve space for deoptimized frame.
649 __ pushl(EDX); // Restore return address.
650
651 // Leaf runtime function DeoptimizeFillFrame expects a Dart frame.
652 __ EnterDartFrame(0);
653 if (kind == kLazyDeoptFromReturn) {
654 __ pushl(EBX); // Preserve result as first local.
655 } else if (kind == kLazyDeoptFromThrow) {
656 __ pushl(EBX); // Preserve exception as first local.
657 __ pushl(ECX); // Preserve stacktrace as first local.
658 }
659 __ ReserveAlignedFrameSpace(1 * target::kWordSize);
660 __ movl(Address(ESP, 0), EBP); // Pass last FP as parameter on stack.
661 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
662 if (kind == kLazyDeoptFromReturn) {
663 // Restore result into EBX.
664 __ movl(EBX, Address(EBP, target::frame_layout.first_local_from_fp *
665 target::kWordSize));
666 } else if (kind == kLazyDeoptFromThrow) {
667 // Restore result into EBX.
668 __ movl(EBX, Address(EBP, target::frame_layout.first_local_from_fp *
669 target::kWordSize));
670 __ movl(ECX, Address(EBP, (target::frame_layout.first_local_from_fp - 1) *
671 target::kWordSize));
672 }
673 // Code above cannot cause GC.
674 __ LeaveFrame();
675
676 // Frame is fully rewritten at this point and it is safe to perform a GC.
677 // Materialize any objects that were deferred by FillFrame because they
678 // require allocation.
679 __ EnterStubFrame();
680 if (kind == kLazyDeoptFromReturn) {
681 __ pushl(EBX); // Preserve result, it will be GC-d here.
682 } else if (kind == kLazyDeoptFromThrow) {
683 __ pushl(EBX); // Preserve exception, it will be GC-d here.
684 __ pushl(ECX); // Preserve stacktrace, it will be GC-d here.
685 }
686 __ pushl(Immediate(target::ToRawSmi(0))); // Space for the result.
687 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
688 // Result tells stub how many bytes to remove from the expression stack
689 // of the bottom-most frame. They were used as materialization arguments.
690 __ popl(EBX);
691 __ SmiUntag(EBX);
692 if (kind == kLazyDeoptFromReturn) {
693 __ popl(EAX); // Restore result.
694 } else if (kind == kLazyDeoptFromThrow) {
695 __ popl(EDX); // Restore exception.
696 __ popl(EAX); // Restore stacktrace.
697 }
698 __ LeaveFrame();
699
700 __ popl(ECX); // Pop return address.
701 __ addl(ESP, EBX); // Remove materialization arguments.
702 __ pushl(ECX); // Push return address.
703 // The caller is responsible for emitting the return instruction.
704}
705
706// EAX: result, must be preserved
707void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
708 Assembler* assembler) {
709 // Return address for "call" to deopt stub.
710 __ pushl(Immediate(kZapReturnAddress));
711 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
712 __ ret();
713}
714
715// EAX: exception, must be preserved
716// EDX: stacktrace, must be preserved
717void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
718 Assembler* assembler) {
719 // Return address for "call" to deopt stub.
720 __ pushl(Immediate(kZapReturnAddress));
721 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
722 __ ret();
723}
724
725void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
726 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
727 __ ret();
728}
729
730static void GenerateNoSuchMethodDispatcherCode(Assembler* assembler) {
731 __ EnterStubFrame();
732 __ movl(EDX, FieldAddress(
733 ECX, target::CallSiteData::arguments_descriptor_offset()));
734
735 // Load the receiver.
736 __ movl(EDI, FieldAddress(EDX, target::ArgumentsDescriptor::size_offset()));
737 __ movl(EAX,
738 Address(EBP, EDI, TIMES_HALF_WORD_SIZE,
739 target::frame_layout.param_end_from_fp * target::kWordSize));
740 __ pushl(Immediate(0)); // Setup space on stack for result.
741 __ pushl(EAX); // Receiver.
742 __ pushl(ECX); // ICData/MegamorphicCache.
743 __ pushl(EDX); // Arguments descriptor array.
744
745 // Adjust arguments count.
746 __ cmpl(
747 FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
748 Immediate(0));
749 __ movl(EDX, EDI);
750 Label args_count_ok;
751 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
752 __ addl(EDX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
753 __ Bind(&args_count_ok);
754
755 // EDX: Smi-tagged arguments array length.
756 PushArrayOfArguments(assembler);
757 const intptr_t kNumArgs = 4;
758 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
759 __ Drop(4);
760 __ popl(EAX); // Return value.
761 __ LeaveFrame();
762 __ ret();
763}
764
765static void GenerateDispatcherCode(Assembler* assembler,
766 Label* call_target_function) {
767 __ Comment("NoSuchMethodDispatch");
768 // When lazily generated invocation dispatchers are disabled, the
769 // miss-handler may return null.
770 const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
771 __ cmpl(EAX, raw_null);
772 __ j(NOT_EQUAL, call_target_function);
773 GenerateNoSuchMethodDispatcherCode(assembler);
774}
775
776void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
777 Assembler* assembler) {
778 GenerateNoSuchMethodDispatcherCode(assembler);
779}
780
781// Called for inline allocation of arrays.
782// Input parameters:
783// EDX : Array length as Smi (must be preserved).
784// ECX : array element type (either NULL or an instantiated type).
785// Uses EAX, EBX, ECX, EDI as temporary registers.
786// The newly allocated object is returned in EAX.
787void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
788 Label slow_case;
789 // Compute the size to be allocated, it is based on the array length
790 // and is computed as:
791 // RoundedAllocationSize(
792 // (array_length * kwordSize) + target::Array::header_size()).
793 // Assert that length is a Smi.
794 __ testl(EDX, Immediate(kSmiTagMask));
795
796 if (!FLAG_use_slow_path) {
797 __ j(NOT_ZERO, &slow_case);
798
799 __ cmpl(EDX, Immediate(0));
800 __ j(LESS, &slow_case);
801
802 // Check for maximum allowed length.
803 const Immediate& max_len =
804 Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
805 __ cmpl(EDX, max_len);
806 __ j(GREATER, &slow_case);
807
808 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, EAX, &slow_case,
809 Assembler::kFarJump));
810
811 const intptr_t fixed_size_plus_alignment_padding =
812 target::Array::header_size() +
813 target::ObjectAlignment::kObjectAlignment - 1;
814 // EDX is Smi.
815 __ leal(EBX, Address(EDX, TIMES_2, fixed_size_plus_alignment_padding));
816 ASSERT(kSmiTagShift == 1);
817 __ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
818
819 // ECX: array element type.
820 // EDX: array length as Smi.
821 // EBX: allocation size.
822
823 const intptr_t cid = kArrayCid;
824 __ movl(EAX, Address(THR, target::Thread::top_offset()));
825 __ addl(EBX, EAX);
826 __ j(CARRY, &slow_case);
827
828 // Check if the allocation fits into the remaining space.
829 // EAX: potential new object start.
830 // EBX: potential next object start.
831 // ECX: array element type.
832 // EDX: array length as Smi).
833 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
834 __ j(ABOVE_EQUAL, &slow_case);
835
836 // Successfully allocated the object(s), now update top to point to
837 // next object start and initialize the object.
838 __ movl(Address(THR, target::Thread::top_offset()), EBX);
839 __ subl(EBX, EAX);
840 __ addl(EAX, Immediate(kHeapObjectTag));
841
842 // Initialize the tags.
843 // EAX: new object start as a tagged pointer.
844 // EBX: allocation size.
845 // ECX: array element type.
846 // EDX: array length as Smi.
847 {
848 Label size_tag_overflow, done;
849 __ movl(EDI, EBX);
850 __ cmpl(EDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
851 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
852 __ shll(EDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
853 target::ObjectAlignment::kObjectAlignmentLog2));
854 __ jmp(&done, Assembler::kNearJump);
855
856 __ Bind(&size_tag_overflow);
857 __ movl(EDI, Immediate(0));
858 __ Bind(&done);
859
860 // Get the class index and insert it into the tags.
861 uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
862 __ orl(EDI, Immediate(tags));
863 __ movl(FieldAddress(EAX, target::Object::tags_offset()), EDI); // Tags.
864 }
865 // EAX: new object start as a tagged pointer.
866 // EBX: allocation size.
867 // ECX: array element type.
868 // EDX: Array length as Smi (preserved).
869 // Store the type argument field.
870 // No generational barrier needed, since we store into a new object.
871 __ StoreIntoObjectNoBarrier(
872 EAX, FieldAddress(EAX, target::Array::type_arguments_offset()), ECX);
873
874 // Set the length field.
875 __ StoreIntoObjectNoBarrier(
876 EAX, FieldAddress(EAX, target::Array::length_offset()), EDX);
877
878 // Initialize all array elements to raw_null.
879 // EAX: new object start as a tagged pointer.
880 // EBX: allocation size.
881 // EDI: iterator which initially points to the start of the variable
882 // data area to be initialized.
883 // ECX: array element type.
884 // EDX: array length as Smi.
885 __ leal(EBX, FieldAddress(EAX, EBX, TIMES_1, 0));
886 __ leal(EDI, FieldAddress(EAX, target::Array::header_size()));
887 Label done;
888 Label init_loop;
889 __ Bind(&init_loop);
890 __ cmpl(EDI, EBX);
891 __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
892 // No generational barrier needed, since we are storing null.
893 __ StoreIntoObjectNoBarrier(EAX, Address(EDI, 0), NullObject());
894 __ addl(EDI, Immediate(target::kWordSize));
895 __ jmp(&init_loop, Assembler::kNearJump);
896 __ Bind(&done);
897 __ ret(); // returns the newly allocated object in EAX.
898
899 // Unable to allocate the array using the fast inline code, just call
900 // into the runtime.
901 __ Bind(&slow_case);
902 }
903 // Create a stub frame as we are pushing some objects on the stack before
904 // calling into the runtime.
905 __ EnterStubFrame();
906 __ pushl(Immediate(0)); // Setup space on stack for return value.
907 __ pushl(EDX); // Array length as Smi.
908 __ pushl(ECX); // Element type.
909 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
910 __ popl(EAX); // Pop element type argument.
911 __ popl(EDX); // Pop array length argument (preserved).
912 __ popl(EAX); // Pop return value from return slot.
913
914 // Write-barrier elimination might be enabled for this array (depending on the
915 // array length). To be sure we will check if the allocated object is in old
916 // space and if so call a leaf runtime to add it to the remembered set.
917 EnsureIsNewOrRemembered(assembler);
918
919 __ LeaveFrame();
920 __ ret();
921}
922
923// Called when invoking dart code from C++ (VM code).
924// Input parameters:
925// ESP : points to return address.
926// ESP + 4 : code object of the dart function to call.
927// ESP + 8 : arguments descriptor array.
928// ESP + 12 : arguments array.
929// ESP + 16 : current thread.
930// Uses EAX, EDX, ECX, EDI as temporary registers.
931void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
932 const intptr_t kTargetCodeOffset = 3 * target::kWordSize;
933 const intptr_t kArgumentsDescOffset = 4 * target::kWordSize;
934 const intptr_t kArgumentsOffset = 5 * target::kWordSize;
935 const intptr_t kThreadOffset = 6 * target::kWordSize;
936
937 __ pushl(Address(ESP, 0)); // Marker for the profiler.
938 __ EnterFrame(0);
939
940 // Push code object to PC marker slot.
941 __ movl(EAX, Address(EBP, kThreadOffset));
942 __ pushl(Address(EAX, target::Thread::invoke_dart_code_stub_offset()));
943
944 // Save C++ ABI callee-saved registers.
945 __ pushl(EBX);
946 __ pushl(ESI);
947 __ pushl(EDI);
948
949 // Set up THR, which caches the current thread in Dart code.
950 __ movl(THR, EAX);
951
952#if defined(USING_SHADOW_CALL_STACK)
953#error Unimplemented
954#endif
955
956 // Save the current VMTag on the stack.
957 __ movl(ECX, Assembler::VMTagAddress());
958 __ pushl(ECX);
959
960 // Save top resource and top exit frame info. Use EDX as a temporary register.
961 // StackFrameIterator reads the top exit frame info saved in this frame.
962 __ movl(EDX, Address(THR, target::Thread::top_resource_offset()));
963 __ pushl(EDX);
964 __ movl(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
965 __ movl(EAX, Address(THR, target::Thread::exit_through_ffi_offset()));
966 __ pushl(EAX);
967 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
968 Immediate(0));
969 // The constant target::frame_layout.exit_link_slot_from_entry_fp must be
970 // kept in sync with the code below.
971 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -8);
972 __ movl(EDX, Address(THR, target::Thread::top_exit_frame_info_offset()));
973 __ pushl(EDX);
974 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
975 Immediate(0));
976
977 // In debug mode, verify that we've pushed the top exit frame info at the
978 // correct offset from FP.
979 __ EmitEntryFrameVerification();
980
981 // Mark that the thread is executing Dart code. Do this after initializing the
982 // exit link for the profiler.
983 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
984
985 // Load arguments descriptor array into EDX.
986 __ movl(EDX, Address(EBP, kArgumentsDescOffset));
987 __ movl(EDX, Address(EDX, VMHandles::kOffsetOfRawPtrInHandle));
988
989 // Load number of arguments into EBX and adjust count for type arguments.
990 __ movl(EBX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
991 __ cmpl(
992 FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
993 Immediate(0));
994 Label args_count_ok;
995 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
996 __ addl(EBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
997 __ Bind(&args_count_ok);
998 // Save number of arguments as Smi on stack, replacing ArgumentsDesc.
999 __ movl(Address(EBP, kArgumentsDescOffset), EBX);
1000 __ SmiUntag(EBX);
1001
1002 // Set up arguments for the dart call.
1003 Label push_arguments;
1004 Label done_push_arguments;
1005 __ testl(EBX, EBX); // check if there are arguments.
1006 __ j(ZERO, &done_push_arguments, Assembler::kNearJump);
1007 __ movl(EAX, Immediate(0));
1008
1009 // Compute address of 'arguments array' data area into EDI.
1010 __ movl(EDI, Address(EBP, kArgumentsOffset));
1011 __ movl(EDI, Address(EDI, VMHandles::kOffsetOfRawPtrInHandle));
1012 __ leal(EDI, FieldAddress(EDI, target::Array::data_offset()));
1013
1014 __ Bind(&push_arguments);
1015 __ movl(ECX, Address(EDI, EAX, TIMES_4, 0));
1016 __ pushl(ECX);
1017 __ incl(EAX);
1018 __ cmpl(EAX, EBX);
1019 __ j(LESS, &push_arguments, Assembler::kNearJump);
1020 __ Bind(&done_push_arguments);
1021
1022 // Call the dart code entrypoint.
1023 __ movl(EAX, Address(EBP, kTargetCodeOffset));
1024 __ movl(EAX, Address(EAX, VMHandles::kOffsetOfRawPtrInHandle));
1025 __ call(FieldAddress(EAX, target::Code::entry_point_offset()));
1026
1027 // Read the saved number of passed arguments as Smi.
1028 __ movl(EDX, Address(EBP, kArgumentsDescOffset));
1029 // Get rid of arguments pushed on the stack.
1030 __ leal(ESP, Address(ESP, EDX, TIMES_2, 0)); // EDX is a Smi.
1031
1032 // Restore the saved top exit frame info and top resource back into the
1033 // Isolate structure.
1034 __ popl(Address(THR, target::Thread::top_exit_frame_info_offset()));
1035 __ popl(Address(THR, target::Thread::exit_through_ffi_offset()));
1036 __ popl(Address(THR, target::Thread::top_resource_offset()));
1037
1038 // Restore the current VMTag from the stack.
1039 __ popl(Assembler::VMTagAddress());
1040
1041#if defined(USING_SHADOW_CALL_STACK)
1042#error Unimplemented
1043#endif
1044
1045 // Restore C++ ABI callee-saved registers.
1046 __ popl(EDI);
1047 __ popl(ESI);
1048 __ popl(EBX);
1049
1050 // Restore the frame pointer.
1051 __ LeaveFrame();
1052 __ popl(ECX);
1053
1054 __ ret();
1055}
1056
1057// Called when invoking compiled Dart code from interpreted Dart code.
1058// Input parameters:
1059// ESP : points to return address.
1060// ESP + 4 : target raw code
1061// ESP + 8 : arguments raw descriptor array.
1062// ESP + 12: address of first argument.
1063// ESP + 16 : current thread.
1064void StubCodeCompiler::GenerateInvokeDartCodeFromBytecodeStub(
1065 Assembler* assembler) {
1066 const intptr_t kTargetCodeOffset = 3 * target::kWordSize;
1067 const intptr_t kArgumentsDescOffset = 4 * target::kWordSize;
1068 const intptr_t kArgumentsOffset = 5 * target::kWordSize;
1069 const intptr_t kThreadOffset = 6 * target::kWordSize;
1070
1071 __ pushl(Address(ESP, 0)); // Marker for the profiler.
1072 __ EnterFrame(0);
1073
1074 // Push code object to PC marker slot.
1075 __ movl(EAX, Address(EBP, kThreadOffset));
1076 __ pushl(Address(EAX, target::Thread::invoke_dart_code_stub_offset()));
1077
1078 // Save C++ ABI callee-saved registers.
1079 __ pushl(EBX);
1080 __ pushl(ESI);
1081 __ pushl(EDI);
1082
1083 // Set up THR, which caches the current thread in Dart code.
1084 __ movl(THR, EAX);
1085
1086#if defined(USING_SHADOW_CALL_STACK)
1087#error Unimplemented
1088#endif
1089
1090 // Save the current VMTag on the stack.
1091 __ movl(ECX, Assembler::VMTagAddress());
1092 __ pushl(ECX);
1093
1094 // Save top resource and top exit frame info. Use EDX as a temporary register.
1095 // StackFrameIterator reads the top exit frame info saved in this frame.
1096 __ movl(EDX, Address(THR, target::Thread::top_resource_offset()));
1097 __ pushl(EDX);
1098 __ movl(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
1099
1100 __ movl(EAX, Address(THR, target::Thread::exit_through_ffi_offset()));
1101 __ pushl(EAX);
1102 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
1103 Immediate(0));
1104
1105 // The constant target::frame_layout.exit_link_slot_from_entry_fp must be
1106 // kept in sync with the code below.
1107 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -8);
1108 __ movl(EDX, Address(THR, target::Thread::top_exit_frame_info_offset()));
1109 __ pushl(EDX);
1110 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
1111 Immediate(0));
1112
1113 // Mark that the thread is executing Dart code. Do this after initializing the
1114 // exit link for the profiler.
1115 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
1116
1117 // Load arguments descriptor array into EDX.
1118 __ movl(EDX, Address(EBP, kArgumentsDescOffset));
1119
1120 // Load number of arguments into EBX and adjust count for type arguments.
1121 __ movl(EBX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
1122 __ cmpl(
1123 FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
1124 Immediate(0));
1125 Label args_count_ok;
1126 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1127 __ addl(EBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1128 __ Bind(&args_count_ok);
1129 // Save number of arguments as Smi on stack, replacing ArgumentsDesc.
1130 __ movl(Address(EBP, kArgumentsDescOffset), EBX);
1131 __ SmiUntag(EBX);
1132
1133 // Set up arguments for the dart call.
1134 Label push_arguments;
1135 Label done_push_arguments;
1136 __ testl(EBX, EBX); // check if there are arguments.
1137 __ j(ZERO, &done_push_arguments, Assembler::kNearJump);
1138 __ movl(EAX, Immediate(0));
1139
1140 // Compute address of 'arguments array' data area into EDI.
1141 __ movl(EDI, Address(EBP, kArgumentsOffset));
1142
1143 __ Bind(&push_arguments);
1144 __ movl(ECX, Address(EDI, EAX, TIMES_4, 0));
1145 __ pushl(ECX);
1146 __ incl(EAX);
1147 __ cmpl(EAX, EBX);
1148 __ j(LESS, &push_arguments, Assembler::kNearJump);
1149 __ Bind(&done_push_arguments);
1150
1151 // Call the dart code entrypoint.
1152 __ movl(EAX, Address(EBP, kTargetCodeOffset));
1153 __ call(FieldAddress(EAX, target::Code::entry_point_offset()));
1154
1155 // Read the saved number of passed arguments as Smi.
1156 __ movl(EDX, Address(EBP, kArgumentsDescOffset));
1157 // Get rid of arguments pushed on the stack.
1158 __ leal(ESP, Address(ESP, EDX, TIMES_2, 0)); // EDX is a Smi.
1159
1160 // Restore the saved top exit frame info and top resource back into the
1161 // Isolate structure.
1162 __ popl(Address(THR, target::Thread::top_exit_frame_info_offset()));
1163 __ popl(Address(THR, target::Thread::exit_through_ffi_offset()));
1164 __ popl(Address(THR, target::Thread::top_resource_offset()));
1165
1166 // Restore the current VMTag from the stack.
1167 __ popl(Assembler::VMTagAddress());
1168
1169#if defined(USING_SHADOW_CALL_STACK)
1170#error Unimplemented
1171#endif
1172
1173 // Restore C++ ABI callee-saved registers.
1174 __ popl(EDI);
1175 __ popl(ESI);
1176 __ popl(EBX);
1177
1178 // Restore the frame pointer.
1179 __ LeaveFrame();
1180 __ popl(ECX);
1181
1182 __ ret();
1183}
1184
1185// Helper to generate space allocation of context stub.
1186// This does not initialise the fields of the context.
1187// Input:
1188// EDX: number of context variables.
1189// Output:
1190// EAX: new allocated RawContext object.
1191// Clobbered:
1192// EBX
1193static void GenerateAllocateContextSpaceStub(Assembler* assembler,
1194 Label* slow_case) {
1195 // First compute the rounded instance size.
1196 // EDX: number of context variables.
1197 intptr_t fixed_size_plus_alignment_padding =
1198 (target::Context::header_size() +
1199 target::ObjectAlignment::kObjectAlignment - 1);
1200 __ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
1201 __ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
1202
1203 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, EAX, slow_case,
1204 Assembler::kFarJump));
1205
1206 // Now allocate the object.
1207 // EDX: number of context variables.
1208 __ movl(EAX, Address(THR, target::Thread::top_offset()));
1209 __ addl(EBX, EAX);
1210 // Check if the allocation fits into the remaining space.
1211 // EAX: potential new object.
1212 // EBX: potential next object start.
1213 // EDX: number of context variables.
1214 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
1215#if defined(DEBUG)
1216 static const bool kJumpLength = Assembler::kFarJump;
1217#else
1218 static const bool kJumpLength = Assembler::kNearJump;
1219#endif // DEBUG
1220 __ j(ABOVE_EQUAL, slow_case, kJumpLength);
1221
1222 // Successfully allocated the object, now update top to point to
1223 // next object start and initialize the object.
1224 // EAX: new object.
1225 // EBX: next object start.
1226 // EDX: number of context variables.
1227 __ movl(Address(THR, target::Thread::top_offset()), EBX);
1228 // EBX: Size of allocation in bytes.
1229 __ subl(EBX, EAX);
1230 __ addl(EAX, Immediate(kHeapObjectTag));
1231 // Generate isolate-independent code to allow sharing between isolates.
1232
1233 // Calculate the size tag.
1234 // EAX: new object.
1235 // EDX: number of context variables.
1236 {
1237 Label size_tag_overflow, done;
1238 __ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
1239 __ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
1240 __ cmpl(EBX, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
1241 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
1242 __ shll(EBX, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
1243 target::ObjectAlignment::kObjectAlignmentLog2));
1244 __ jmp(&done);
1245
1246 __ Bind(&size_tag_overflow);
1247 // Set overflow size tag value.
1248 __ movl(EBX, Immediate(0));
1249
1250 __ Bind(&done);
1251 // EAX: new object.
1252 // EDX: number of context variables.
1253 // EBX: size and bit tags.
1254 uint32_t tags = target::MakeTagWordForNewSpaceObject(kContextCid, 0);
1255 __ orl(EBX, Immediate(tags));
1256 __ movl(FieldAddress(EAX, target::Object::tags_offset()), EBX); // Tags.
1257 }
1258
1259 // Setup up number of context variables field.
1260 // EAX: new object.
1261 // EDX: number of context variables as integer value (not object).
1262 __ movl(FieldAddress(EAX, target::Context::num_variables_offset()), EDX);
1263}
1264
1265// Called for inline allocation of contexts.
1266// Input:
1267// EDX: number of context variables.
1268// Output:
1269// EAX: new allocated RawContext object.
1270// Clobbered:
1271// EBX, EDX
1272void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
1273 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1274 Label slow_case;
1275
1276 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1277
1278 // Setup the parent field.
1279 // EAX: new object.
1280 // EDX: number of context variables.
1281 // No generational barrier needed, since we are storing null.
1282 __ StoreIntoObjectNoBarrier(
1283 EAX, FieldAddress(EAX, target::Context::parent_offset()), NullObject());
1284
1285 // Initialize the context variables.
1286 // EAX: new object.
1287 // EDX: number of context variables.
1288 {
1289 Label loop, entry;
1290 __ leal(EBX, FieldAddress(EAX, target::Context::variable_offset(0)));
1291
1292 __ jmp(&entry, Assembler::kNearJump);
1293 __ Bind(&loop);
1294 __ decl(EDX);
1295 // No generational barrier needed, since we are storing null.
1296 __ StoreIntoObjectNoBarrier(EAX, Address(EBX, EDX, TIMES_4, 0),
1297 NullObject());
1298 __ Bind(&entry);
1299 __ cmpl(EDX, Immediate(0));
1300 __ j(NOT_EQUAL, &loop, Assembler::kNearJump);
1301 }
1302
1303 // Done allocating and initializing the context.
1304 // EAX: new object.
1305 __ ret();
1306
1307 __ Bind(&slow_case);
1308 }
1309 // Create a stub frame as we are pushing some objects on the stack before
1310 // calling into the runtime.
1311 __ EnterStubFrame();
1312 __ pushl(Immediate(0)); // Setup space on stack for return value.
1313 __ SmiTag(EDX);
1314 __ pushl(EDX);
1315 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1316 __ popl(EAX); // Pop number of context variables argument.
1317 __ popl(EAX); // Pop the new context object.
1318
1319 // Write-barrier elimination might be enabled for this context (depending on
1320 // the size). To be sure we will check if the allocated object is in old
1321 // space and if so call a leaf runtime to add it to the remembered set.
1322 EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
1323
1324 // EAX: new object
1325 // Restore the frame pointer.
1326 __ LeaveFrame();
1327
1328 __ ret();
1329}
1330
1331// Called for clone of contexts.
1332// Input:
1333// ECX: context variable.
1334// Output:
1335// EAX: new allocated RawContext object.
1336// Clobbered:
1337// EBX, ECX, EDX
1338void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
1339 {
1340 Label slow_case;
1341
1342 // Load num. variable in the existing context.
1343 __ movl(EDX, FieldAddress(ECX, target::Context::num_variables_offset()));
1344
1345 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1346
1347 // Setup the parent field.
1348 // EAX: new object.
1349 // ECX: old object to clone.
1350 __ movl(EBX, FieldAddress(ECX, target::Context::parent_offset()));
1351 __ StoreIntoObjectNoBarrier(
1352 EAX, FieldAddress(EAX, target::Context::parent_offset()), EBX);
1353
1354 // Initialize the context variables.
1355 // EAX: new context.
1356 // ECX: context to clone.
1357 // EDX: number of context variables.
1358 {
1359 Label loop, entry;
1360 __ jmp(&entry, Assembler::kNearJump);
1361
1362 __ Bind(&loop);
1363 __ decl(EDX);
1364
1365 __ movl(EBX, FieldAddress(ECX, EDX, TIMES_4,
1366 target::Context::variable_offset(0)));
1367 __ StoreIntoObjectNoBarrier(
1368 EAX,
1369 FieldAddress(EAX, EDX, TIMES_4, target::Context::variable_offset(0)),
1370 EBX);
1371
1372 __ Bind(&entry);
1373 __ cmpl(EDX, Immediate(0));
1374 __ j(NOT_EQUAL, &loop, Assembler::kNearJump);
1375 }
1376
1377 // Done allocating and initializing the context.
1378 // EAX: new object.
1379 __ ret();
1380
1381 __ Bind(&slow_case);
1382 }
1383
1384 // Create a stub frame as we are pushing some objects on the stack before
1385 // calling into the runtime.
1386 __ EnterStubFrame();
1387 __ pushl(Immediate(0)); // Setup space on stack for return value.
1388 __ pushl(ECX);
1389 __ CallRuntime(kCloneContextRuntimeEntry, 1); // Allocate context.
1390 __ popl(EAX); // Pop number of context variables argument.
1391 __ popl(EAX); // Pop the new context object.
1392
1393 // Write-barrier elimination might be enabled for this context (depending on
1394 // the size). To be sure we will check if the allocated object is in old
1395 // space and if so call a leaf runtime to add it to the remembered set.
1396 EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
1397
1398 // EAX: new object
1399 // Restore the frame pointer.
1400 __ LeaveFrame();
1401 __ ret();
1402}
1403
1404void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
1405 // Not used on IA32.
1406 __ Breakpoint();
1407}
1408
1409// Helper stub to implement Assembler::StoreIntoObject/Array.
1410// Input parameters:
1411// EDX: Object (old)
1412// EDI: Slot
1413// If EDX is not remembered, mark as remembered and add to the store buffer.
1414COMPILE_ASSERT(kWriteBarrierObjectReg == EDX);
1415COMPILE_ASSERT(kWriteBarrierValueReg == kNoRegister);
1416COMPILE_ASSERT(kWriteBarrierSlotReg == EDI);
1417static void GenerateWriteBarrierStubHelper(Assembler* assembler,
1418 Address stub_code,
1419 bool cards) {
1420 Label remember_card;
1421
1422 // Save values being destroyed.
1423 __ pushl(EAX);
1424 __ pushl(ECX);
1425
1426 Label add_to_buffer;
1427 // Check whether this object has already been remembered. Skip adding to the
1428 // store buffer if the object is in the store buffer already.
1429 // Spilled: EAX, ECX
1430 // EDX: Address being stored
1431 __ movl(EAX, FieldAddress(EDX, target::Object::tags_offset()));
1432 __ testl(EAX, Immediate(1 << target::ObjectLayout::kOldAndNotRememberedBit));
1433 __ j(NOT_EQUAL, &add_to_buffer, Assembler::kNearJump);
1434 __ popl(ECX);
1435 __ popl(EAX);
1436 __ ret();
1437
1438 // Update the tags that this object has been remembered.
1439 // EDX: Address being stored
1440 // EAX: Current tag value
1441 __ Bind(&add_to_buffer);
1442
1443 if (cards) {
1444 // Check if this object is using remembered cards.
1445 __ testl(EAX, Immediate(1 << target::ObjectLayout::kCardRememberedBit));
1446 __ j(NOT_EQUAL, &remember_card, Assembler::kFarJump); // Unlikely.
1447 } else {
1448#if defined(DEBUG)
1449 Label ok;
1450 __ testl(EAX, Immediate(1 << target::ObjectLayout::kCardRememberedBit));
1451 __ j(ZERO, &ok, Assembler::kFarJump); // Unlikely.
1452 __ Stop("Wrong barrier");
1453 __ Bind(&ok);
1454#endif
1455 }
1456
1457 // lock+andl is an atomic read-modify-write.
1458 __ lock();
1459 __ andl(FieldAddress(EDX, target::Object::tags_offset()),
1460 Immediate(~(1 << target::ObjectLayout::kOldAndNotRememberedBit)));
1461
1462 // Load the StoreBuffer block out of the thread. Then load top_ out of the
1463 // StoreBufferBlock and add the address to the pointers_.
1464 // Spilled: EAX, ECX
1465 // EDX: Address being stored
1466 __ movl(EAX, Address(THR, target::Thread::store_buffer_block_offset()));
1467 __ movl(ECX, Address(EAX, target::StoreBufferBlock::top_offset()));
1468 __ movl(
1469 Address(EAX, ECX, TIMES_4, target::StoreBufferBlock::pointers_offset()),
1470 EDX);
1471
1472 // Increment top_ and check for overflow.
1473 // Spilled: EAX, ECX
1474 // ECX: top_
1475 // EAX: StoreBufferBlock
1476 Label overflow;
1477 __ incl(ECX);
1478 __ movl(Address(EAX, target::StoreBufferBlock::top_offset()), ECX);
1479 __ cmpl(ECX, Immediate(target::StoreBufferBlock::kSize));
1480 // Restore values.
1481 // Spilled: EAX, ECX
1482 __ popl(ECX);
1483 __ popl(EAX);
1484 __ j(EQUAL, &overflow, Assembler::kNearJump);
1485 __ ret();
1486
1487 // Handle overflow: Call the runtime leaf function.
1488 __ Bind(&overflow);
1489 // Setup frame, push callee-saved registers.
1490
1491 __ EnterCallRuntimeFrame(1 * target::kWordSize);
1492 __ movl(Address(ESP, 0), THR); // Push the thread as the only argument.
1493 __ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
1494 // Restore callee-saved registers, tear down frame.
1495 __ LeaveCallRuntimeFrame();
1496 __ ret();
1497
1498 if (cards) {
1499 Label remember_card_slow;
1500
1501 // Get card table.
1502 __ Bind(&remember_card);
1503 __ movl(EAX, EDX); // Object.
1504 __ andl(EAX, Immediate(target::kOldPageMask)); // OldPage.
1505 __ cmpl(Address(EAX, target::OldPage::card_table_offset()), Immediate(0));
1506 __ j(EQUAL, &remember_card_slow, Assembler::kNearJump);
1507
1508 // Dirty the card.
1509 __ subl(EDI, EAX); // Offset in page.
1510 __ movl(EAX,
1511 Address(EAX, target::OldPage::card_table_offset())); // Card table.
1512 __ shrl(
1513 EDI,
1514 Immediate(target::OldPage::kBytesPerCardLog2)); // Index in card table.
1515 __ movb(Address(EAX, EDI, TIMES_1, 0), Immediate(1));
1516 __ popl(ECX);
1517 __ popl(EAX);
1518 __ ret();
1519
1520 // Card table not yet allocated.
1521 __ Bind(&remember_card_slow);
1522 __ EnterCallRuntimeFrame(2 * target::kWordSize);
1523 __ movl(Address(ESP, 0 * target::kWordSize), EDX); // Object
1524 __ movl(Address(ESP, 1 * target::kWordSize), EDI); // Slot
1525 __ CallRuntime(kRememberCardRuntimeEntry, 2);
1526 __ LeaveCallRuntimeFrame();
1527 __ popl(ECX);
1528 __ popl(EAX);
1529 __ ret();
1530 }
1531}
1532
1533void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
1534 GenerateWriteBarrierStubHelper(
1535 assembler, Address(THR, target::Thread::write_barrier_code_offset()),
1536 false);
1537}
1538
1539void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
1540 GenerateWriteBarrierStubHelper(
1541 assembler,
1542 Address(THR, target::Thread::array_write_barrier_code_offset()), true);
1543}
1544
1545void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
1546 __ int3();
1547}
1548
1549void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
1550 Assembler* assembler) {
1551 __ int3();
1552}
1553
1554void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
1555 __ int3();
1556}
1557
1558// Called for inline allocation of objects.
1559// Input parameters:
1560// ESP : points to return address.
1561// kAllocationStubTypeArgumentsReg (EDX) : type arguments object
1562// (only if class is parameterized).
1563// Uses EAX, EBX, ECX, EDX, EDI as temporary registers.
1564// Returns patch_code_pc offset where patching code for disabling the stub
1565// has been generated (similar to regularly generated Dart code).
1566void StubCodeCompiler::GenerateAllocationStubForClass(
1567 Assembler* assembler,
1568 UnresolvedPcRelativeCalls* unresolved_calls,
1569 const Class& cls,
1570 const Code& allocate_object,
1571 const Code& allocat_object_parametrized) {
1572 const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
1573 // The generated code is different if the class is parameterized.
1574 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
1575 ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
1576 cls) != target::Class::kNoTypeArguments);
1577 // kInlineInstanceSize is a constant used as a threshold for determining
1578 // when the object initialization should be done as a loop or as
1579 // straight line code.
1580 const int kInlineInstanceSize = 12; // In words.
1581 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
1582 ASSERT(instance_size > 0);
1583
1584 // EDX: instantiated type arguments (if is_cls_parameterized).
1585 static_assert(kAllocationStubTypeArgumentsReg == EDX,
1586 "Adjust register allocation in the AllocationStub");
1587
1588 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
1589 target::Heap::IsAllocatableInNewSpace(instance_size) &&
1590 !target::Class::TraceAllocation(cls)) {
1591 Label slow_case;
1592 // Allocate the object and update top to point to
1593 // next object start and initialize the allocated object.
1594 // EDX: instantiated type arguments (if is_cls_parameterized).
1595 __ movl(EAX, Address(THR, target::Thread::top_offset()));
1596 __ leal(EBX, Address(EAX, instance_size));
1597 // Check if the allocation fits into the remaining space.
1598 // EAX: potential new object start.
1599 // EBX: potential next object start.
1600 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
1601 __ j(ABOVE_EQUAL, &slow_case);
1602 __ movl(Address(THR, target::Thread::top_offset()), EBX);
1603
1604 // EAX: new object start (untagged).
1605 // EBX: next object start.
1606 // EDX: new object type arguments (if is_cls_parameterized).
1607 // Set the tags.
1608 ASSERT(target::Class::GetId(cls) != kIllegalCid);
1609 uint32_t tags = target::MakeTagWordForNewSpaceObject(
1610 target::Class::GetId(cls), instance_size);
1611 __ movl(Address(EAX, target::Object::tags_offset()), Immediate(tags));
1612 __ addl(EAX, Immediate(kHeapObjectTag));
1613
1614 // Initialize the remaining words of the object.
1615
1616 // EAX: new object (tagged).
1617 // EBX: next object start.
1618 // EDX: new object type arguments (if is_cls_parameterized).
1619 // First try inlining the initialization without a loop.
1620 if (instance_size < (kInlineInstanceSize * target::kWordSize)) {
1621 // Check if the object contains any non-header fields.
1622 // Small objects are initialized using a consecutive set of writes.
1623 for (intptr_t current_offset = target::Instance::first_field_offset();
1624 current_offset < instance_size;
1625 current_offset += target::kWordSize) {
1626 __ StoreIntoObjectNoBarrier(EAX, FieldAddress(EAX, current_offset),
1627 NullObject());
1628 }
1629 } else {
1630 __ leal(ECX, FieldAddress(EAX, target::Instance::first_field_offset()));
1631 // Loop until the whole object is initialized.
1632 // EAX: new object (tagged).
1633 // EBX: next object start.
1634 // ECX: next word to be initialized.
1635 // EDX: new object type arguments (if is_cls_parameterized).
1636 Label init_loop;
1637 Label done;
1638 __ Bind(&init_loop);
1639 __ cmpl(ECX, EBX);
1640 __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
1641 __ StoreIntoObjectNoBarrier(EAX, Address(ECX, 0), NullObject());
1642 __ addl(ECX, Immediate(target::kWordSize));
1643 __ jmp(&init_loop, Assembler::kNearJump);
1644 __ Bind(&done);
1645 }
1646 if (is_cls_parameterized) {
1647 // EAX: new object (tagged).
1648 // EDX: new object type arguments.
1649 // Set the type arguments in the new object.
1650 const intptr_t offset = target::Class::TypeArgumentsFieldOffset(cls);
1651 __ StoreIntoObjectNoBarrier(EAX, FieldAddress(EAX, offset),
1652 kAllocationStubTypeArgumentsReg);
1653 }
1654 // Done allocating and initializing the instance.
1655 // EAX: new object (tagged).
1656 __ ret();
1657
1658 __ Bind(&slow_case);
1659 }
1660 // If is_cls_parameterized:
1661 // EDX: new object type arguments.
1662 // Create a stub frame as we are pushing some objects on the stack before
1663 // calling into the runtime.
1664 __ EnterStubFrame();
1665 __ pushl(raw_null); // Setup space on stack for return value.
1666 __ PushObject(
1667 CastHandle<Object>(cls)); // Push class of object to be allocated.
1668 if (is_cls_parameterized) {
1669 // Push type arguments of object to be allocated.
1670 __ pushl(kAllocationStubTypeArgumentsReg);
1671 } else {
1672 __ pushl(raw_null); // Push null type arguments.
1673 }
1674 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
1675 __ popl(EAX); // Pop argument (type arguments of object).
1676 __ popl(EAX); // Pop argument (class of object).
1677 __ popl(EAX); // Pop result (newly allocated object).
1678
1679 if (AllocateObjectInstr::WillAllocateNewOrRemembered(cls)) {
1680 // Write-barrier elimination is enabled for [cls] and we therefore need to
1681 // ensure that the object is in new-space or has remembered bit set.
1682 EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
1683 }
1684
1685 // EAX: new object
1686 // Restore the frame pointer.
1687 __ LeaveFrame();
1688 __ ret();
1689}
1690
1691// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
1692// from the entry code of a dart function after an error in passed argument
1693// name or number is detected.
1694// Input parameters:
1695// ESP : points to return address.
1696// ESP + 4 : address of last argument.
1697// EDX : arguments descriptor array.
1698// Uses EAX, EBX, EDI as temporary registers.
1699void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
1700 Assembler* assembler) {
1701 __ EnterStubFrame();
1702
1703 // Load the receiver.
1704 __ movl(EDI, FieldAddress(EDX, target::ArgumentsDescriptor::size_offset()));
1705 __ movl(EAX,
1706 Address(EBP, EDI, TIMES_2,
1707 target::frame_layout.param_end_from_fp * target::kWordSize));
1708
1709 // Load the function.
1710 __ movl(EBX, FieldAddress(EAX, target::Closure::function_offset()));
1711
1712 __ pushl(Immediate(0)); // Setup space on stack for result from noSuchMethod.
1713 __ pushl(EAX); // Receiver.
1714 __ pushl(EBX); // Function.
1715 __ pushl(EDX); // Arguments descriptor array.
1716
1717 // Adjust arguments count.
1718 __ cmpl(
1719 FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
1720 Immediate(0));
1721 __ movl(EDX, EDI);
1722 Label args_count_ok;
1723 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1724 __ addl(EDX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1725 __ Bind(&args_count_ok);
1726
1727 // EDX: Smi-tagged arguments array length.
1728 PushArrayOfArguments(assembler);
1729
1730 const intptr_t kNumArgs = 4;
1731 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
1732 // noSuchMethod on closures always throws an error, so it will never return.
1733 __ int3();
1734}
1735
1736// Cannot use function object from ICData as it may be the inlined
1737// function and not the top-scope function.
1738void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
1739 Assembler* assembler) {
1740 Register ic_reg = ECX;
1741 Register func_reg = EAX;
1742 if (FLAG_trace_optimized_ic_calls) {
1743 __ EnterStubFrame();
1744 __ pushl(func_reg); // Preserve
1745 __ pushl(ic_reg); // Preserve.
1746 __ pushl(ic_reg); // Argument.
1747 __ pushl(func_reg); // Argument.
1748 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
1749 __ popl(EAX); // Discard argument;
1750 __ popl(EAX); // Discard argument;
1751 __ popl(ic_reg); // Restore.
1752 __ popl(func_reg); // Restore.
1753 __ LeaveFrame();
1754 }
1755 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
1756}
1757
1758// Loads function into 'temp_reg'.
1759void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
1760 Register temp_reg) {
1761 if (FLAG_optimization_counter_threshold >= 0) {
1762 Register ic_reg = ECX;
1763 Register func_reg = temp_reg;
1764 ASSERT(ic_reg != func_reg);
1765 __ Comment("Increment function counter");
1766 __ movl(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
1767 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
1768 }
1769}
1770
1771// Note: ECX must be preserved.
1772// Attempt a quick Smi operation for known operations ('kind'). The ICData
1773// must have been primed with a Smi/Smi check that will be used for counting
1774// the invocations.
1775static void EmitFastSmiOp(Assembler* assembler,
1776 Token::Kind kind,
1777 intptr_t num_args,
1778 Label* not_smi_or_overflow) {
1779 __ Comment("Fast Smi op");
1780 ASSERT(num_args == 2);
1781 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Left
1782 __ movl(EDI, Address(ESP, +1 * target::kWordSize)); // Right
1783 __ movl(EBX, EDI);
1784 __ orl(EBX, EAX);
1785 __ testl(EBX, Immediate(kSmiTagMask));
1786 __ j(NOT_ZERO, not_smi_or_overflow, Assembler::kNearJump);
1787 switch (kind) {
1788 case Token::kADD: {
1789 __ addl(EAX, EDI);
1790 __ j(OVERFLOW, not_smi_or_overflow, Assembler::kNearJump);
1791 break;
1792 }
1793 case Token::kLT: {
1794 Label done, is_true;
1795 __ cmpl(EAX, EDI);
1796 __ setcc(GREATER_EQUAL, AL);
1797 __ movzxb(EAX, AL); // EAX := EAX < EDI ? 0 : 1
1798 __ movl(EAX,
1799 Address(THR, EAX, TIMES_4, target::Thread::bool_true_offset()));
1800 ASSERT(target::Thread::bool_true_offset() + 4 ==
1801 target::Thread::bool_false_offset());
1802 break;
1803 }
1804 case Token::kEQ: {
1805 Label done, is_true;
1806 __ cmpl(EAX, EDI);
1807 __ setcc(NOT_EQUAL, AL);
1808 __ movzxb(EAX, AL); // EAX := EAX == EDI ? 0 : 1
1809 __ movl(EAX,
1810 Address(THR, EAX, TIMES_4, target::Thread::bool_true_offset()));
1811 ASSERT(target::Thread::bool_true_offset() + 4 ==
1812 target::Thread::bool_false_offset());
1813 break;
1814 }
1815 default:
1816 UNIMPLEMENTED();
1817 }
1818
1819 // ECX: IC data object.
1820 __ movl(EBX, FieldAddress(ECX, target::ICData::entries_offset()));
1821 // EBX: ic_data_array with check entries: classes and target functions.
1822 __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
1823#if defined(DEBUG)
1824 // Check that first entry is for Smi/Smi.
1825 Label error, ok;
1826 const Immediate& imm_smi_cid = Immediate(target::ToRawSmi(kSmiCid));
1827 __ cmpl(Address(EBX, 0 * target::kWordSize), imm_smi_cid);
1828 __ j(NOT_EQUAL, &error, Assembler::kNearJump);
1829 __ cmpl(Address(EBX, 1 * target::kWordSize), imm_smi_cid);
1830 __ j(EQUAL, &ok, Assembler::kNearJump);
1831 __ Bind(&error);
1832 __ Stop("Incorrect IC data");
1833 __ Bind(&ok);
1834#endif
1835 if (FLAG_optimization_counter_threshold >= 0) {
1836 const intptr_t count_offset =
1837 target::ICData::CountIndexFor(num_args) * target::kWordSize;
1838 // Update counter, ignore overflow.
1839 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
1840 }
1841 __ ret();
1842}
1843
1844// Generate inline cache check for 'num_args'.
1845// EBX: receiver (if instance call)
1846// ECX: ICData
1847// ESP[0]: return address
1848// Control flow:
1849// - If receiver is null -> jump to IC miss.
1850// - If receiver is Smi -> load Smi class.
1851// - If receiver is not-Smi -> load receiver's class.
1852// - Check if 'num_args' (including receiver) match any IC data group.
1853// - Match found -> jump to target.
1854// - Match not found -> jump to IC miss.
1855void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
1856 Assembler* assembler,
1857 intptr_t num_args,
1858 const RuntimeEntry& handle_ic_miss,
1859 Token::Kind kind,
1860 Optimized optimized,
1861 CallType type,
1862 Exactness exactness) {
1863 GenerateNArgsCheckInlineCacheStubForEntryKind(
1864 assembler, num_args, handle_ic_miss, kind, optimized, type, exactness,
1865 CodeEntryKind::kNormal);
1866 __ BindUncheckedEntryPoint();
1867 GenerateNArgsCheckInlineCacheStubForEntryKind(
1868 assembler, num_args, handle_ic_miss, kind, optimized, type, exactness,
1869 CodeEntryKind::kUnchecked);
1870}
1871
1872void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
1873 Assembler* assembler,
1874 intptr_t num_args,
1875 const RuntimeEntry& handle_ic_miss,
1876 Token::Kind kind,
1877 Optimized optimized,
1878 CallType type,
1879 Exactness exactness,
1880 CodeEntryKind entry_kind) {
1881 if (optimized == kOptimized) {
1882 GenerateOptimizedUsageCounterIncrement(assembler);
1883 } else {
1884 GenerateUsageCounterIncrement(assembler, /* scratch */ EAX);
1885 }
1886
1887 ASSERT(exactness == kIgnoreExactness); // Unimplemented.
1888 ASSERT(num_args == 1 || num_args == 2);
1889#if defined(DEBUG)
1890 {
1891 Label ok;
1892 // Check that the IC data array has NumArgsTested() == num_args.
1893 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1894 __ movl(EAX, FieldAddress(ECX, target::ICData::state_bits_offset()));
1895 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
1896 __ andl(EAX, Immediate(target::ICData::NumArgsTestedMask()));
1897 __ cmpl(EAX, Immediate(num_args));
1898 __ j(EQUAL, &ok, Assembler::kNearJump);
1899 __ Stop("Incorrect stub for IC data");
1900 __ Bind(&ok);
1901 }
1902#endif // DEBUG
1903
1904#if !defined(PRODUCT)
1905 Label stepping, done_stepping;
1906 if (optimized == kUnoptimized) {
1907 __ Comment("Check single stepping");
1908 __ LoadIsolate(EAX);
1909 __ cmpb(Address(EAX, target::Isolate::single_step_offset()), Immediate(0));
1910 __ j(NOT_EQUAL, &stepping);
1911 __ Bind(&done_stepping);
1912 }
1913#endif
1914 Label not_smi_or_overflow;
1915 if (kind != Token::kILLEGAL) {
1916 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
1917 }
1918 __ Bind(&not_smi_or_overflow);
1919
1920 __ Comment("Extract ICData initial values and receiver cid");
1921 // ECX: IC data object (preserved).
1922 // Load arguments descriptor into EDX.
1923 __ movl(EDX, FieldAddress(
1924 ECX, target::CallSiteData::arguments_descriptor_offset()));
1925 // Loop that checks if there is an IC data match.
1926 Label loop, found, miss;
1927 // ECX: IC data object (preserved).
1928 __ movl(EBX, FieldAddress(ECX, target::ICData::entries_offset()));
1929 // EBX: ic_data_array with check entries: classes and target functions.
1930 __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
1931 // EBX: points directly to the first ic data array element.
1932
1933 // Get argument descriptor into EAX. In the 1-argument case this is the
1934 // last time we need the argument descriptor, and we reuse EAX for the
1935 // class IDs from the IC descriptor. In the 2-argument case we preserve
1936 // the argument descriptor in EAX.
1937 __ movl(EAX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
1938 if (num_args == 1) {
1939 // Load receiver into EDI.
1940 __ movl(EDI,
1941 Address(ESP, EAX, TIMES_2, 0)); // EAX (argument count) is Smi.
1942 __ LoadTaggedClassIdMayBeSmi(EAX, EDI);
1943 // EAX: receiver class ID as Smi.
1944 }
1945
1946 __ Comment("ICData loop");
1947
1948 // We unroll the generic one that is generated once more than the others.
1949 bool optimize = kind == Token::kILLEGAL;
1950 const intptr_t target_offset =
1951 target::ICData::TargetIndexFor(num_args) * target::kWordSize;
1952 const intptr_t count_offset =
1953 target::ICData::CountIndexFor(num_args) * target::kWordSize;
1954 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
1955 num_args, exactness == kCheckExactness) *
1956 target::kWordSize;
1957
1958 __ Bind(&loop);
1959 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
1960 Label update;
1961 if (num_args == 1) {
1962 __ movl(EDI, Address(EBX, 0));
1963 __ cmpl(EDI, EAX); // Class id match?
1964 __ j(EQUAL, &found); // Break.
1965 __ addl(EBX, Immediate(entry_size)); // Next entry.
1966 __ cmpl(EDI, Immediate(target::ToRawSmi(kIllegalCid))); // Done?
1967 } else {
1968 ASSERT(num_args == 2);
1969 // Load receiver into EDI.
1970 __ movl(EDI, Address(ESP, EAX, TIMES_2, 0));
1971 __ LoadTaggedClassIdMayBeSmi(EDI, EDI);
1972 __ cmpl(EDI, Address(EBX, 0)); // Class id match?
1973 __ j(NOT_EQUAL, &update); // Continue.
1974
1975 // Load second argument into EDI.
1976 __ movl(EDI, Address(ESP, EAX, TIMES_2, -target::kWordSize));
1977 __ LoadTaggedClassIdMayBeSmi(EDI, EDI);
1978 __ cmpl(EDI, Address(EBX, target::kWordSize)); // Class id match?
1979 __ j(EQUAL, &found); // Break.
1980
1981 __ Bind(&update);
1982 __ addl(EBX, Immediate(entry_size)); // Next entry.
1983 __ cmpl(Address(EBX, -entry_size),
1984 Immediate(target::ToRawSmi(kIllegalCid))); // Done?
1985 }
1986
1987 if (unroll == 0) {
1988 __ j(NOT_EQUAL, &loop);
1989 } else {
1990 __ j(EQUAL, &miss);
1991 }
1992 }
1993
1994 __ Bind(&miss);
1995 __ Comment("IC miss");
1996 // Compute address of arguments (first read number of arguments from
1997 // arguments descriptor array and then compute address on the stack).
1998 __ movl(EAX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
1999 __ leal(EAX, Address(ESP, EAX, TIMES_2, 0)); // EAX is Smi.
2000 // Create a stub frame as we are pushing some objects on the stack before
2001 // calling into the runtime.
2002 __ EnterStubFrame();
2003 __ pushl(EDX); // Preserve arguments descriptor array.
2004 __ pushl(ECX); // Preserve IC data object.
2005 __ pushl(Immediate(0)); // Result slot.
2006 // Push call arguments.
2007 for (intptr_t i = 0; i < num_args; i++) {
2008 __ movl(EBX, Address(EAX, -target::kWordSize * i));
2009 __ pushl(EBX);
2010 }
2011 __ pushl(ECX); // Pass IC data object.
2012 __ CallRuntime(handle_ic_miss, num_args + 1);
2013 // Remove the call arguments pushed earlier, including the IC data object.
2014 for (intptr_t i = 0; i < num_args + 1; i++) {
2015 __ popl(EAX);
2016 }
2017 __ popl(EAX); // Pop returned function object into EAX.
2018 __ popl(ECX); // Restore IC data array.
2019 __ popl(EDX); // Restore arguments descriptor array.
2020 __ LeaveFrame();
2021 Label call_target_function;
2022 if (!FLAG_lazy_dispatchers) {
2023 GenerateDispatcherCode(assembler, &call_target_function);
2024 } else {
2025 __ jmp(&call_target_function);
2026 }
2027
2028 __ Bind(&found);
2029
2030 // EBX: Pointer to an IC data check group.
2031 if (FLAG_optimization_counter_threshold >= 0) {
2032 __ Comment("Update caller's counter");
2033 // Ignore overflow.
2034 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
2035 }
2036
2037 __ movl(EAX, Address(EBX, target_offset));
2038 __ Bind(&call_target_function);
2039 __ Comment("Call target");
2040 // EAX: Target function.
2041 __ jmp(FieldAddress(EAX, target::Function::entry_point_offset(entry_kind)));
2042
2043#if !defined(PRODUCT)
2044 if (optimized == kUnoptimized) {
2045 __ Bind(&stepping);
2046 __ EnterStubFrame();
2047 __ pushl(EBX); // Preserve receiver.
2048 __ pushl(ECX); // Preserve ICData.
2049 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2050 __ popl(ECX); // Restore ICData.
2051 __ popl(EBX); // Restore receiver.
2052 __ LeaveFrame();
2053 __ jmp(&done_stepping);
2054 }
2055#endif
2056}
2057
2058// EBX: receiver
2059// ECX: ICData
2060// ESP[0]: return address
2061void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
2062 Assembler* assembler) {
2063 GenerateNArgsCheckInlineCacheStub(
2064 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2065 kUnoptimized, kInstanceCall, kIgnoreExactness);
2066}
2067
2068// EBX: receiver
2069// ECX: ICData
2070// ESP[0]: return address
2071void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
2072 Assembler* assembler) {
2073 __ Stop("Unimplemented");
2074}
2075
2076void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
2077 Assembler* assembler) {
2078 __ Stop("Unimplemented");
2079}
2080
2081void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
2082 Assembler* assembler) {
2083 __ Stop("Unimplemented");
2084}
2085
2086// EBX: receiver
2087// ECX: ICData
2088// ESP[0]: return address
2089void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
2090 Assembler* assembler) {
2091 GenerateNArgsCheckInlineCacheStub(
2092 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2093 kUnoptimized, kInstanceCall, kIgnoreExactness);
2094}
2095
2096// EBX: receiver
2097// ECX: ICData
2098// ESP[0]: return address
2099void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
2100 GenerateNArgsCheckInlineCacheStub(
2101 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
2102 kUnoptimized, kInstanceCall, kIgnoreExactness);
2103}
2104
2105// EBX: receiver
2106// ECX: ICData
2107// ESP[0]: return address
2108void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
2109 GenerateNArgsCheckInlineCacheStub(
2110 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
2111 kUnoptimized, kInstanceCall, kIgnoreExactness);
2112}
2113
2114// EBX: receiver
2115// ECX: ICData
2116// ESP[0]: return address
2117void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
2118 GenerateNArgsCheckInlineCacheStub(
2119 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
2120 kUnoptimized, kInstanceCall, kIgnoreExactness);
2121}
2122
2123// EBX: receiver
2124// ECX: ICData
2125// EAX: Function
2126// ESP[0]: return address
2127void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
2128 Assembler* assembler) {
2129 GenerateNArgsCheckInlineCacheStub(
2130 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2131 kOptimized, kInstanceCall, kIgnoreExactness);
2132}
2133
2134// EBX: receiver
2135// ECX: ICData
2136// EAX: Function
2137// ESP[0]: return address
2138void StubCodeCompiler::
2139 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
2140 Assembler* assembler) {
2141 __ Stop("Unimplemented");
2142}
2143
2144// EBX: receiver
2145// ECX: ICData
2146// EAX: Function
2147// ESP[0]: return address
2148void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
2149 Assembler* assembler) {
2150 GenerateNArgsCheckInlineCacheStub(
2151 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2152 kOptimized, kInstanceCall, kIgnoreExactness);
2153}
2154
2155// ECX: ICData
2156// ESP[0]: return address
2157static void GenerateZeroArgsUnoptimizedStaticCallForEntryKind(
2158 Assembler* assembler,
2159 CodeEntryKind entry_kind) {
2160 StubCodeCompiler::GenerateUsageCounterIncrement(assembler, /* scratch */ EAX);
2161
2162#if defined(DEBUG)
2163 {
2164 Label ok;
2165 // Check that the IC data array has NumArgsTested() == num_args.
2166 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2167 __ movl(EBX, FieldAddress(ECX, target::ICData::state_bits_offset()));
2168 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2169 __ andl(EBX, Immediate(target::ICData::NumArgsTestedMask()));
2170 __ cmpl(EBX, Immediate(0));
2171 __ j(EQUAL, &ok, Assembler::kNearJump);
2172 __ Stop("Incorrect IC data for unoptimized static call");
2173 __ Bind(&ok);
2174 }
2175#endif // DEBUG
2176
2177#if !defined(PRODUCT)
2178 // Check single stepping.
2179 Label stepping, done_stepping;
2180 __ LoadIsolate(EAX);
2181 __ cmpb(Address(EAX, target::Isolate::single_step_offset()), Immediate(0));
2182 __ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
2183 __ Bind(&done_stepping);
2184#endif
2185
2186 // ECX: IC data object (preserved).
2187 __ movl(EBX, FieldAddress(ECX, target::ICData::entries_offset()));
2188 // EBX: ic_data_array with entries: target functions and count.
2189 __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
2190 // EBX: points directly to the first ic data array element.
2191 const intptr_t target_offset =
2192 target::ICData::TargetIndexFor(0) * target::kWordSize;
2193 const intptr_t count_offset =
2194 target::ICData::CountIndexFor(0) * target::kWordSize;
2195
2196 if (FLAG_optimization_counter_threshold >= 0) {
2197 // Increment count for this call, ignore overflow.
2198 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
2199 }
2200
2201 // Load arguments descriptor into EDX.
2202 __ movl(EDX, FieldAddress(
2203 ECX, target::CallSiteData::arguments_descriptor_offset()));
2204
2205 // Get function and call it, if possible.
2206 __ movl(EAX, Address(EBX, target_offset));
2207 __ jmp(FieldAddress(EAX, target::Function::entry_point_offset(entry_kind)));
2208
2209#if !defined(PRODUCT)
2210 __ Bind(&stepping);
2211 __ EnterStubFrame();
2212 __ pushl(ECX);
2213 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2214 __ popl(ECX);
2215 __ LeaveFrame();
2216 __ jmp(&done_stepping, Assembler::kNearJump);
2217#endif
2218}
2219
2220void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
2221 Assembler* assembler) {
2222 GenerateZeroArgsUnoptimizedStaticCallForEntryKind(assembler,
2223 CodeEntryKind::kNormal);
2224 __ BindUncheckedEntryPoint();
2225 GenerateZeroArgsUnoptimizedStaticCallForEntryKind(assembler,
2226 CodeEntryKind::kUnchecked);
2227}
2228
2229// ECX: ICData
2230// ESP[0]: return address
2231void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
2232 Assembler* assembler) {
2233 GenerateNArgsCheckInlineCacheStub(
2234 assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2235 kUnoptimized, kStaticCall, kIgnoreExactness);
2236}
2237
2238// ECX: ICData
2239// ESP[0]: return address
2240void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
2241 Assembler* assembler) {
2242 GenerateNArgsCheckInlineCacheStub(
2243 assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2244 kUnoptimized, kStaticCall, kIgnoreExactness);
2245}
2246
2247// Stub for compiling a function and jumping to the compiled code.
2248// EDX: Arguments descriptor.
2249// EAX: Function.
2250void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
2251 __ EnterStubFrame();
2252 __ pushl(EDX); // Preserve arguments descriptor array.
2253 __ pushl(EAX); // Pass function.
2254 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2255 __ popl(EAX); // Restore function.
2256 __ popl(EDX); // Restore arguments descriptor array.
2257 __ LeaveFrame();
2258
2259 // When using the interpreter, the function's code may now point to the
2260 // InterpretCall stub. Make sure EAX, ECX, and EDX are preserved.
2261 __ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
2262}
2263
2264// Stub for interpreting a function call.
2265// EDX: Arguments descriptor.
2266// EAX: Function.
2267void StubCodeCompiler::GenerateInterpretCallStub(Assembler* assembler) {
2268 __ EnterStubFrame();
2269
2270#if defined(DEBUG)
2271 {
2272 Label ok;
2273 // Check that we are always entering from Dart code.
2274 __ cmpl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
2275 __ j(EQUAL, &ok, Assembler::kNearJump);
2276 __ Stop("Not coming from Dart code.");
2277 __ Bind(&ok);
2278 }
2279#endif
2280
2281 // Adjust arguments count for type arguments vector.
2282 __ movl(ECX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
2283 __ SmiUntag(ECX);
2284 __ cmpl(
2285 FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
2286 Immediate(0));
2287 Label args_count_ok;
2288 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
2289 __ incl(ECX);
2290 __ Bind(&args_count_ok);
2291
2292 // Compute argv.
2293 __ leal(EBX,
2294 Address(EBP, ECX, TIMES_4,
2295 target::frame_layout.param_end_from_fp * target::kWordSize));
2296
2297 // Indicate decreasing memory addresses of arguments with negative argc.
2298 __ negl(ECX);
2299
2300 __ pushl(THR); // Arg 4: Thread.
2301 __ pushl(EBX); // Arg 3: Argv.
2302 __ pushl(ECX); // Arg 2: Negative argc.
2303 __ pushl(EDX); // Arg 1: Arguments descriptor
2304 __ pushl(EAX); // Arg 0: Function
2305
2306 // Save exit frame information to enable stack walking as we are about
2307 // to transition to Dart VM C++ code.
2308 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()), EBP);
2309
2310 // Mark that the thread exited generated code through a runtime call.
2311 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
2312 Immediate(target::Thread::exit_through_runtime_call()));
2313
2314 // Mark that the thread is executing VM code.
2315 __ movl(EAX,
2316 Address(THR, target::Thread::interpret_call_entry_point_offset()));
2317 __ movl(Assembler::VMTagAddress(), EAX);
2318
2319 __ call(EAX);
2320
2321 __ Drop(5);
2322
2323 // Mark that the thread is executing Dart code.
2324 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
2325
2326 // Mark that the thread has not exited generated Dart code.
2327 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
2328 Immediate(0));
2329
2330 // Reset exit frame information in Isolate's mutator thread structure.
2331 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
2332 Immediate(0));
2333
2334 __ LeaveFrame();
2335 __ ret();
2336}
2337
2338// ECX: Contains an ICData.
2339void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
2340#if defined(PRODUCT)
2341 __ Stop("No debugging in PRODUCT mode");
2342#else
2343 __ EnterStubFrame();
2344 __ pushl(EBX); // Preserve receiver.
2345 __ pushl(ECX); // Preserve ICData.
2346 __ pushl(Immediate(0)); // Room for result.
2347 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2348 __ popl(EAX); // Code of original stub.
2349 __ popl(ECX); // Restore ICData.
2350 __ popl(EBX); // Restore receiver.
2351 __ LeaveFrame();
2352 // Jump to original stub.
2353 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
2354#endif // defined(PRODUCT)
2355}
2356
2357void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
2358 Assembler* assembler) {
2359#if defined(PRODUCT)
2360 __ Stop("No debugging in PRODUCT mode");
2361#else
2362 __ EnterStubFrame();
2363 __ pushl(ECX); // Preserve ICData.
2364 __ pushl(Immediate(0)); // Room for result.
2365 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2366 __ popl(EAX); // Code of original stub.
2367 __ popl(ECX); // Restore ICData.
2368 __ LeaveFrame();
2369 // Jump to original stub.
2370 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
2371#endif // defined(PRODUCT)
2372}
2373
2374void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
2375#if defined(PRODUCT)
2376 __ Stop("No debugging in PRODUCT mode");
2377#else
2378 __ EnterStubFrame();
2379 // Room for result. Debugger stub returns address of the
2380 // unpatched runtime stub.
2381 __ pushl(Immediate(0)); // Room for result.
2382 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2383 __ popl(EAX); // Code of the original stub
2384 __ LeaveFrame();
2385 // Jump to original stub.
2386 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
2387#endif // defined(PRODUCT)
2388}
2389
2390// Called only from unoptimized code.
2391void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
2392#if defined(PRODUCT)
2393 __ Stop("No debugging in PRODUCT mode");
2394#else
2395 // Check single stepping.
2396 Label stepping, done_stepping;
2397 __ LoadIsolate(EAX);
2398 __ movzxb(EAX, Address(EAX, target::Isolate::single_step_offset()));
2399 __ cmpl(EAX, Immediate(0));
2400 __ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
2401 __ Bind(&done_stepping);
2402 __ ret();
2403
2404 __ Bind(&stepping);
2405 __ EnterStubFrame();
2406 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2407 __ LeaveFrame();
2408 __ jmp(&done_stepping, Assembler::kNearJump);
2409#endif // defined(PRODUCT)
2410}
2411
2412// Used to check class and type arguments. Arguments passed on stack:
2413// TOS + 0: return address.
2414// TOS + 1: function type arguments (only if n == 4, can be raw_null).
2415// TOS + 2: instantiator type arguments (only if n == 4, can be raw_null).
2416// TOS + 3: instance.
2417// TOS + 4: SubtypeTestCache.
2418// Result in ECX: null -> not found, otherwise result (true or false).
2419static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
2420 ASSERT(n == 1 || n == 2 || n == 4 || n == 6);
2421
2422 static intptr_t kFunctionTypeArgumentsInBytes = 1 * target::kWordSize;
2423 static intptr_t kInstantiatorTypeArgumentsInBytes = 2 * target::kWordSize;
2424 static intptr_t kInstanceOffsetInBytes = 3 * target::kWordSize;
2425 static intptr_t kCacheOffsetInBytes = 4 * target::kWordSize;
2426
2427 const Register kInstanceCidOrFunction = ECX;
2428 const Register kInstanceInstantiatorTypeArgumentsReg = EBX;
2429
2430 const auto& raw_null = Immediate(target::ToRawPointer(NullObject()));
2431
2432 __ movl(TypeTestABI::kInstanceReg, Address(ESP, kInstanceOffsetInBytes));
2433
2434 // Loop initialization (moved up here to avoid having all dependent loads
2435 // after each other)
2436 __ movl(EDX, Address(ESP, kCacheOffsetInBytes));
2437 // We avoid a load-acquire barrier here by relying on the fact that all other
2438 // loads from the array are data-dependent loads.
2439 __ movl(EDX, FieldAddress(EDX, target::SubtypeTestCache::cache_offset()));
2440 __ addl(EDX, Immediate(target::Array::data_offset() - kHeapObjectTag));
2441
2442 Label loop, not_closure;
2443 if (n >= 4) {
2444 __ LoadClassIdMayBeSmi(kInstanceCidOrFunction, TypeTestABI::kInstanceReg);
2445 } else {
2446 __ LoadClassId(kInstanceCidOrFunction, TypeTestABI::kInstanceReg);
2447 }
2448 __ cmpl(kInstanceCidOrFunction, Immediate(kClosureCid));
2449 __ j(NOT_EQUAL, &not_closure, Assembler::kNearJump);
2450
2451 // Closure handling.
2452 {
2453 __ movl(kInstanceCidOrFunction,
2454 FieldAddress(TypeTestABI::kInstanceReg,
2455 target::Closure::function_offset()));
2456 if (n >= 2) {
2457 __ movl(
2458 kInstanceInstantiatorTypeArgumentsReg,
2459 FieldAddress(TypeTestABI::kInstanceReg,
2460 target::Closure::instantiator_type_arguments_offset()));
2461 if (n >= 6) {
2462 __ pushl(
2463 FieldAddress(TypeTestABI::kInstanceReg,
2464 target::Closure::delayed_type_arguments_offset()));
2465 __ pushl(
2466 FieldAddress(TypeTestABI::kInstanceReg,
2467 target::Closure::function_type_arguments_offset()));
2468 }
2469 }
2470 __ jmp(&loop, Assembler::kNearJump);
2471 }
2472
2473 // Non-Closure handling.
2474 {
2475 __ Bind(&not_closure);
2476 if (n >= 2) {
2477 Label has_no_type_arguments;
2478 __ LoadClassById(EDI, kInstanceCidOrFunction);
2479 __ movl(kInstanceInstantiatorTypeArgumentsReg, raw_null);
2480 __ movl(EDI,
2481 FieldAddress(
2482 EDI, target::Class::
2483 host_type_arguments_field_offset_in_words_offset()));
2484 __ cmpl(EDI, Immediate(target::Class::kNoTypeArguments));
2485 __ j(EQUAL, &has_no_type_arguments, Assembler::kNearJump);
2486 __ movl(kInstanceInstantiatorTypeArgumentsReg,
2487 FieldAddress(TypeTestABI::kInstanceReg, EDI, TIMES_4, 0));
2488 __ Bind(&has_no_type_arguments);
2489
2490 if (n >= 6) {
2491 __ pushl(raw_null); // delayed.
2492 __ pushl(raw_null); // function.
2493 }
2494 }
2495 __ SmiTag(kInstanceCidOrFunction);
2496 }
2497
2498 const intptr_t kInstanceParentFunctionTypeArgumentsFromSp = 0;
2499 const intptr_t kInstanceDelayedFunctionTypeArgumentsFromSp =
2500 target::kWordSize;
2501 const intptr_t args_offset = n >= 6 ? 2 * target::kWordSize : 0;
2502
2503 Label found, not_found, next_iteration;
2504
2505 // Loop header.
2506 __ Bind(&loop);
2507 __ movl(
2508 EDI,
2509 Address(EDX, target::kWordSize *
2510 target::SubtypeTestCache::kInstanceClassIdOrFunction));
2511 __ cmpl(EDI, raw_null);
2512 __ j(EQUAL, &not_found, Assembler::kNearJump);
2513 __ cmpl(EDI, kInstanceCidOrFunction);
2514 if (n == 1) {
2515 __ j(EQUAL, &found, Assembler::kNearJump);
2516 } else {
2517 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
2518 __ cmpl(kInstanceInstantiatorTypeArgumentsReg,
2519 Address(EDX, target::kWordSize *
2520 target::SubtypeTestCache::kInstanceTypeArguments));
2521 if (n == 2) {
2522 __ j(EQUAL, &found, Assembler::kNearJump);
2523 } else {
2524 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
2525 __ movl(
2526 EDI,
2527 Address(EDX,
2528 target::kWordSize *
2529 target::SubtypeTestCache::kInstantiatorTypeArguments));
2530 __ cmpl(EDI,
2531 Address(ESP, args_offset + kInstantiatorTypeArgumentsInBytes));
2532 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
2533 __ movl(
2534 EDI,
2535 Address(EDX, target::kWordSize *
2536 target::SubtypeTestCache::kFunctionTypeArguments));
2537 __ cmpl(EDI, Address(ESP, args_offset + kFunctionTypeArgumentsInBytes));
2538 if (n == 4) {
2539 __ j(EQUAL, &found, Assembler::kNearJump);
2540 } else {
2541 ASSERT(n == 6);
2542 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
2543
2544 __ movl(EDI,
2545 Address(EDX, target::kWordSize *
2546 target::SubtypeTestCache::
2547 kInstanceParentFunctionTypeArguments));
2548 __ cmpl(EDI, Address(ESP, kInstanceParentFunctionTypeArgumentsFromSp));
2549 __ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
2550 __ movl(EDI,
2551 Address(EDX, target::kWordSize *
2552 target::SubtypeTestCache::
2553 kInstanceDelayedFunctionTypeArguments));
2554 __ cmpl(EDI, Address(ESP, kInstanceDelayedFunctionTypeArgumentsFromSp));
2555 __ j(EQUAL, &found, Assembler::kNearJump);
2556 }
2557 }
2558 }
2559 __ Bind(&next_iteration);
2560 __ addl(EDX, Immediate(target::kWordSize *
2561 target::SubtypeTestCache::kTestEntryLength));
2562 __ jmp(&loop, Assembler::kNearJump);
2563
2564 __ Bind(&found);
2565 __ movl(ECX, Address(EDX, target::kWordSize *
2566 target::SubtypeTestCache::kTestResult));
2567 if (n == 6) {
2568 __ Drop(2);
2569 }
2570 __ ret();
2571
2572 __ Bind(&not_found);
2573 __ movl(ECX, raw_null);
2574 if (n == 6) {
2575 __ Drop(2);
2576 }
2577 __ ret();
2578}
2579
2580// See comment on [GenerateSubtypeNTestCacheStub].
2581void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
2582 GenerateSubtypeNTestCacheStub(assembler, 1);
2583}
2584
2585// See comment on [GenerateSubtypeNTestCacheStub].
2586void StubCodeCompiler::GenerateSubtype2TestCacheStub(Assembler* assembler) {
2587 GenerateSubtypeNTestCacheStub(assembler, 2);
2588}
2589
2590// See comment on [GenerateSubtypeNTestCacheStub].
2591void StubCodeCompiler::GenerateSubtype4TestCacheStub(Assembler* assembler) {
2592 GenerateSubtypeNTestCacheStub(assembler, 4);
2593}
2594
2595// See comment on [GenerateSubtypeNTestCacheStub].
2596void StubCodeCompiler::GenerateSubtype6TestCacheStub(Assembler* assembler) {
2597 GenerateSubtypeNTestCacheStub(assembler, 6);
2598}
2599
2600void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
2601 // Not implemented on ia32.
2602 __ Breakpoint();
2603}
2604
2605void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
2606 Assembler* assembler) {
2607 // Not implemented on ia32.
2608 __ Breakpoint();
2609}
2610
2611void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
2612 // Not implemented on ia32.
2613 __ Breakpoint();
2614}
2615
2616void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
2617 // Not implemented on ia32.
2618 __ Breakpoint();
2619}
2620
2621void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
2622 Assembler* assembler) {
2623 // Not implemented on ia32.
2624 __ Breakpoint();
2625}
2626
2627void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
2628 Assembler* assembler) {
2629 // Not implemented on ia32.
2630 __ Breakpoint();
2631}
2632
2633void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
2634 // Not implemented on ia32.
2635 __ Breakpoint();
2636}
2637
2638// Return the current stack pointer address, used to do stack alignment checks.
2639// TOS + 0: return address
2640// Result in EAX.
2641void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
2642 __ leal(EAX, Address(ESP, target::kWordSize));
2643 __ ret();
2644}
2645
2646// Jump to a frame on the call stack.
2647// TOS + 0: return address
2648// TOS + 1: program_counter
2649// TOS + 2: stack_pointer
2650// TOS + 3: frame_pointer
2651// TOS + 4: thread
2652// No Result.
2653void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
2654 __ movl(THR, Address(ESP, 4 * target::kWordSize)); // Load target thread.
2655 __ movl(EBP,
2656 Address(ESP, 3 * target::kWordSize)); // Load target frame_pointer.
2657 __ movl(EBX,
2658 Address(ESP, 1 * target::kWordSize)); // Load target PC into EBX.
2659 __ movl(ESP,
2660 Address(ESP, 2 * target::kWordSize)); // Load target stack_pointer.
2661#if defined(USING_SHADOW_CALL_STACK)
2662#error Unimplemented
2663#endif
2664
2665 Label exit_through_non_ffi;
2666 // Check if we exited generated from FFI. If so do transition.
2667 __ cmpl(compiler::Address(
2668 THR, compiler::target::Thread::exit_through_ffi_offset()),
2669 compiler::Immediate(target::Thread::exit_through_ffi()));
2670 __ j(NOT_EQUAL, &exit_through_non_ffi, compiler::Assembler::kNearJump);
2671 __ TransitionNativeToGenerated(ECX, /*leave_safepoint=*/true);
2672 __ Bind(&exit_through_non_ffi);
2673
2674 // Set tag.
2675 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
2676 // Clear top exit frame.
2677 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
2678 Immediate(0));
2679 __ jmp(EBX); // Jump to the exception handler code.
2680}
2681
2682// Run an exception handler. Execution comes from JumpToFrame stub.
2683//
2684// The arguments are stored in the Thread object.
2685// No result.
2686void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
2687 ASSERT(kExceptionObjectReg == EAX);
2688 ASSERT(kStackTraceObjectReg == EDX);
2689 __ movl(EBX, Address(THR, target::Thread::resume_pc_offset()));
2690
2691 ASSERT(target::CanLoadFromThread(NullObject()));
2692 __ movl(ECX, Address(THR, target::Thread::OffsetFromThread(NullObject())));
2693
2694 // Load the exception from the current thread.
2695 Address exception_addr(THR, target::Thread::active_exception_offset());
2696 __ movl(kExceptionObjectReg, exception_addr);
2697 __ movl(exception_addr, ECX);
2698
2699 // Load the stacktrace from the current thread.
2700 Address stacktrace_addr(THR, target::Thread::active_stacktrace_offset());
2701 __ movl(kStackTraceObjectReg, stacktrace_addr);
2702 __ movl(stacktrace_addr, ECX);
2703
2704 __ jmp(EBX); // Jump to continuation point.
2705}
2706
2707// Deoptimize a frame on the call stack before rewinding.
2708// The arguments are stored in the Thread object.
2709// No result.
2710void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
2711 // Push the deopt pc.
2712 __ pushl(Address(THR, target::Thread::resume_pc_offset()));
2713 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
2714
2715 // After we have deoptimized, jump to the correct frame.
2716 __ EnterStubFrame();
2717 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
2718 __ LeaveFrame();
2719 __ int3();
2720}
2721
2722// Calls to the runtime to optimize the given function.
2723// EBX: function to be reoptimized.
2724// EDX: argument descriptor (preserved).
2725void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
2726 __ movl(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
2727 __ EnterStubFrame();
2728 __ pushl(EDX);
2729 __ pushl(Immediate(0)); // Setup space on stack for return value.
2730 __ pushl(EBX);
2731 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
2732 __ popl(EAX); // Discard argument.
2733 __ popl(EAX); // Get Function object
2734 __ popl(EDX); // Restore argument descriptor.
2735 __ LeaveFrame();
2736 __ movl(CODE_REG, FieldAddress(EAX, target::Function::code_offset()));
2737 __ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
2738 __ int3();
2739}
2740
2741// Does identical check (object references are equal or not equal) with special
2742// checks for boxed numbers.
2743// Return ZF set.
2744// Note: A Mint cannot contain a value that would fit in Smi.
2745static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
2746 const Register left,
2747 const Register right,
2748 const Register temp) {
2749 Label reference_compare, done, check_mint;
2750 // If any of the arguments is Smi do reference compare.
2751 __ testl(left, Immediate(kSmiTagMask));
2752 __ j(ZERO, &reference_compare, Assembler::kNearJump);
2753 __ testl(right, Immediate(kSmiTagMask));
2754 __ j(ZERO, &reference_compare, Assembler::kNearJump);
2755
2756 // Value compare for two doubles.
2757 __ CompareClassId(left, kDoubleCid, temp);
2758 __ j(NOT_EQUAL, &check_mint, Assembler::kNearJump);
2759 __ CompareClassId(right, kDoubleCid, temp);
2760 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
2761
2762 // Double values bitwise compare.
2763 __ movl(temp, FieldAddress(left, target::Double::value_offset() +
2764 0 * target::kWordSize));
2765 __ cmpl(temp, FieldAddress(right, target::Double::value_offset() +
2766 0 * target::kWordSize));
2767 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
2768 __ movl(temp, FieldAddress(left, target::Double::value_offset() +
2769 1 * target::kWordSize));
2770 __ cmpl(temp, FieldAddress(right, target::Double::value_offset() +
2771 1 * target::kWordSize));
2772 __ jmp(&done, Assembler::kNearJump);
2773
2774 __ Bind(&check_mint);
2775 __ CompareClassId(left, kMintCid, temp);
2776 __ j(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
2777 __ CompareClassId(right, kMintCid, temp);
2778 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
2779 __ movl(temp, FieldAddress(left, target::Mint::value_offset() +
2780 0 * target::kWordSize));
2781 __ cmpl(temp, FieldAddress(right, target::Mint::value_offset() +
2782 0 * target::kWordSize));
2783 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
2784 __ movl(temp, FieldAddress(left, target::Mint::value_offset() +
2785 1 * target::kWordSize));
2786 __ cmpl(temp, FieldAddress(right, target::Mint::value_offset() +
2787 1 * target::kWordSize));
2788 __ jmp(&done, Assembler::kNearJump);
2789
2790 __ Bind(&reference_compare);
2791 __ cmpl(left, right);
2792 __ Bind(&done);
2793}
2794
2795// Called only from unoptimized code. All relevant registers have been saved.
2796// TOS + 0: return address
2797// TOS + 1: right argument.
2798// TOS + 2: left argument.
2799// Returns ZF set.
2800void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
2801 Assembler* assembler) {
2802#if !defined(PRODUCT)
2803 // Check single stepping.
2804 Label stepping, done_stepping;
2805 __ LoadIsolate(EAX);
2806 __ movzxb(EAX, Address(EAX, target::Isolate::single_step_offset()));
2807 __ cmpl(EAX, Immediate(0));
2808 __ j(NOT_EQUAL, &stepping);
2809 __ Bind(&done_stepping);
2810#endif
2811
2812 const Register left = EAX;
2813 const Register right = EDX;
2814 const Register temp = ECX;
2815 __ movl(left, Address(ESP, 2 * target::kWordSize));
2816 __ movl(right, Address(ESP, 1 * target::kWordSize));
2817 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
2818 __ ret();
2819
2820#if !defined(PRODUCT)
2821 __ Bind(&stepping);
2822 __ EnterStubFrame();
2823 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2824 __ LeaveFrame();
2825 __ jmp(&done_stepping);
2826#endif
2827}
2828
2829// Called from optimized code only.
2830// TOS + 0: return address
2831// TOS + 1: right argument.
2832// TOS + 2: left argument.
2833// Returns ZF set.
2834void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
2835 Assembler* assembler) {
2836 const Register left = EAX;
2837 const Register right = EDX;
2838 const Register temp = ECX;
2839 __ movl(left, Address(ESP, 2 * target::kWordSize));
2840 __ movl(right, Address(ESP, 1 * target::kWordSize));
2841 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
2842 __ ret();
2843}
2844
2845// Called from megamorphic calls.
2846// EBX: receiver (passed to target)
2847// ECX: target::MegamorphicCache (preserved)
2848// Passed to target:
2849// EBX: target entry point
2850// EDX: argument descriptor
2851void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
2852 // Jump if receiver is a smi.
2853 Label smi_case;
2854 // Check if object (in tmp) is a Smi.
2855 __ testl(EBX, Immediate(kSmiTagMask));
2856 // Jump out of line for smi case.
2857 __ j(ZERO, &smi_case, Assembler::kNearJump);
2858
2859 // Loads the cid of the instance.
2860 __ LoadClassId(EAX, EBX);
2861
2862 Label cid_loaded;
2863 __ Bind(&cid_loaded);
2864 __ pushl(EBX); // save receiver
2865 __ movl(EBX, FieldAddress(ECX, target::MegamorphicCache::mask_offset()));
2866 __ movl(EDI, FieldAddress(ECX, target::MegamorphicCache::buckets_offset()));
2867 // EDI: cache buckets array.
2868 // EBX: mask as a smi.
2869
2870 // Tag cid as a smi.
2871 __ addl(EAX, EAX);
2872
2873 // Compute the table index.
2874 ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
2875 // Use leal and subl multiply with 7 == 8 - 1.
2876 __ leal(EDX, Address(EAX, TIMES_8, 0));
2877 __ subl(EDX, EAX);
2878
2879 Label loop;
2880 __ Bind(&loop);
2881 __ andl(EDX, EBX);
2882
2883 const intptr_t base = target::Array::data_offset();
2884 Label probe_failed;
2885 // EDX is smi tagged, but table entries are two words, so TIMES_4.
2886 __ cmpl(EAX, FieldAddress(EDI, EDX, TIMES_4, base));
2887 __ j(NOT_EQUAL, &probe_failed, Assembler::kNearJump);
2888
2889 Label load_target;
2890 __ Bind(&load_target);
2891 // Call the target found in the cache. For a class id match, this is a
2892 // proper target for the given name and arguments descriptor. If the
2893 // illegal class id was found, the target is a cache miss handler that can
2894 // be invoked as a normal Dart function.
2895 __ movl(EAX, FieldAddress(EDI, EDX, TIMES_4, base + target::kWordSize));
2896 __ movl(EDX, FieldAddress(
2897 ECX, target::CallSiteData::arguments_descriptor_offset()));
2898 __ popl(EBX); // restore receiver
2899 __ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
2900
2901 __ Bind(&probe_failed);
2902 // Probe failed, check if it is a miss.
2903 __ cmpl(FieldAddress(EDI, EDX, TIMES_4, base),
2904 Immediate(target::ToRawSmi(kIllegalCid)));
2905 Label miss;
2906 __ j(ZERO, &miss, Assembler::kNearJump);
2907
2908 // Try next entry in the table.
2909 __ AddImmediate(EDX, Immediate(target::ToRawSmi(1)));
2910 __ jmp(&loop);
2911
2912 // Load cid for the Smi case.
2913 __ Bind(&smi_case);
2914 __ movl(EAX, Immediate(kSmiCid));
2915 __ jmp(&cid_loaded);
2916
2917 __ Bind(&miss);
2918 __ popl(EBX); // restore receiver
2919 GenerateSwitchableCallMissStub(assembler);
2920}
2921
2922void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
2923 __ int3(); // AOT only.
2924}
2925
2926void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
2927 Assembler* assembler) {
2928 __ int3(); // AOT only.
2929}
2930
2931// Called from switchable IC calls.
2932// EBX: receiver
2933void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
2934 __ movl(CODE_REG,
2935 Address(THR, target::Thread::switchable_call_miss_stub_offset()));
2936 __ EnterStubFrame();
2937 __ pushl(EBX); // Preserve receiver.
2938
2939 __ pushl(Immediate(0)); // Result slot.
2940 __ pushl(Immediate(0)); // Arg0: stub out.
2941 __ pushl(EBX); // Arg1: Receiver
2942 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
2943 __ popl(ECX);
2944 __ popl(CODE_REG); // result = stub
2945 __ popl(ECX); // result = IC
2946
2947 __ popl(EBX); // Restore receiver.
2948 __ LeaveFrame();
2949
2950 __ movl(EAX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
2951 CodeEntryKind::kNormal)));
2952 __ jmp(EAX);
2953}
2954
2955void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
2956 __ int3(); // AOT only.
2957}
2958
2959void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
2960 Assembler* assembler) {
2961 __ int3(); // Marker stub.
2962}
2963
2964void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
2965 __ int3(); // Marker stub.
2966}
2967
2968void StubCodeCompiler::GenerateNotLoadedStub(Assembler* assembler) {
2969 __ EnterStubFrame();
2970 __ CallRuntime(kNotLoadedRuntimeEntry, 0);
2971 __ int3();
2972}
2973
2974// Instantiate type arguments from instantiator and function type args.
2975// EBX: uninstantiated type arguments.
2976// EDX: instantiator type arguments.
2977// ECX: function type arguments.
2978// Returns instantiated type arguments in EAX.
2979void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub(
2980 Assembler* assembler) {
2981 // Lookup cache before calling runtime.
2982 __ pushl(InstantiationABI::kUninstantiatedTypeArgumentsReg); // Preserve reg.
2983 __ movl(EAX, compiler::FieldAddress(
2984 InstantiationABI::kUninstantiatedTypeArgumentsReg,
2985 target::TypeArguments::instantiations_offset()));
2986 __ leal(EAX, compiler::FieldAddress(EAX, Array::data_offset()));
2987 // The instantiations cache is initialized with Object::zero_array() and is
2988 // therefore guaranteed to contain kNoInstantiator. No length check needed.
2989 compiler::Label loop, next, found, call_runtime;
2990 __ Bind(&loop);
2991
2992 // Use load-acquire to test for sentinel, if we found non-sentinel it is safe
2993 // to access the other entries. If we found a sentinel we go to runtime.
2994 __ LoadAcquire(EDI, EAX,
2995 TypeArguments::Instantiation::kInstantiatorTypeArgsIndex *
2996 target::kWordSize);
2997 __ CompareImmediate(EDI, Smi::RawValue(TypeArguments::kNoInstantiator));
2998 __ j(EQUAL, &call_runtime, compiler::Assembler::kNearJump);
2999
3000 __ cmpl(EDI, InstantiationABI::kInstantiatorTypeArgumentsReg);
3001 __ j(NOT_EQUAL, &next, compiler::Assembler::kNearJump);
3002 __ movl(EBX, compiler::Address(
3003 EAX, TypeArguments::Instantiation::kFunctionTypeArgsIndex *
3004 target::kWordSize));
3005 __ cmpl(EBX, InstantiationABI::kFunctionTypeArgumentsReg);
3006 __ j(EQUAL, &found, compiler::Assembler::kNearJump);
3007 __ Bind(&next);
3008 __ addl(EAX, compiler::Immediate(TypeArguments::Instantiation::kSizeInWords *
3009 target::kWordSize));
3010 __ jmp(&loop, compiler::Assembler::kNearJump);
3011
3012 // Instantiate non-null type arguments.
3013 // A runtime call to instantiate the type arguments is required.
3014 __ Bind(&call_runtime);
3015 __ popl(InstantiationABI::kUninstantiatedTypeArgumentsReg); // Restore reg.
3016 __ EnterStubFrame();
3017 __ PushObject(Object::null_object()); // Make room for the result.
3018 __ pushl(InstantiationABI::kUninstantiatedTypeArgumentsReg);
3019 __ pushl(InstantiationABI::kInstantiatorTypeArgumentsReg);
3020 __ pushl(InstantiationABI::kFunctionTypeArgumentsReg);
3021 __ CallRuntime(kInstantiateTypeArgumentsRuntimeEntry, 3);
3022 __ Drop(3); // Drop 2 type vectors, and uninstantiated args.
3023 __ popl(InstantiationABI::kResultTypeArgumentsReg);
3024 __ LeaveFrame();
3025 __ ret();
3026
3027 __ Bind(&found);
3028 __ popl(InstantiationABI::kUninstantiatedTypeArgumentsReg); // Drop reg.
3029 __ movl(InstantiationABI::kResultTypeArgumentsReg,
3030 compiler::Address(
3031 EAX, TypeArguments::Instantiation::kInstantiatedTypeArgsIndex *
3032 target::kWordSize));
3033 __ ret();
3034}
3035
3036void StubCodeCompiler::
3037 GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub(
3038 Assembler* assembler) {
3039 // Return the instantiator type arguments if its nullability is compatible for
3040 // sharing, otherwise proceed to instantiation cache lookup.
3041 compiler::Label cache_lookup;
3042 __ movl(EAX, compiler::FieldAddress(
3043 InstantiationABI::kUninstantiatedTypeArgumentsReg,
3044 target::TypeArguments::nullability_offset()));
3045 __ movl(EDI, compiler::FieldAddress(
3046 InstantiationABI::kInstantiatorTypeArgumentsReg,
3047 target::TypeArguments::nullability_offset()));
3048 __ andl(EDI, EAX);
3049 __ cmpl(EDI, EAX);
3050 __ j(NOT_EQUAL, &cache_lookup, compiler::Assembler::kNearJump);
3051 __ movl(InstantiationABI::kResultTypeArgumentsReg,
3052 InstantiationABI::kInstantiatorTypeArgumentsReg);
3053 __ ret();
3054
3055 __ Bind(&cache_lookup);
3056 GenerateInstantiateTypeArgumentsStub(assembler);
3057}
3058
3059void StubCodeCompiler::GenerateInstantiateTypeArgumentsMayShareFunctionTAStub(
3060 Assembler* assembler) {
3061 // Return the function type arguments if its nullability is compatible for
3062 // sharing, otherwise proceed to instantiation cache lookup.
3063 compiler::Label cache_lookup;
3064 __ movl(EAX, compiler::FieldAddress(
3065 InstantiationABI::kUninstantiatedTypeArgumentsReg,
3066 target::TypeArguments::nullability_offset()));
3067 __ movl(EDI,
3068 compiler::FieldAddress(InstantiationABI::kFunctionTypeArgumentsReg,
3069 target::TypeArguments::nullability_offset()));
3070 __ andl(EDI, EAX);
3071 __ cmpl(EDI, EAX);
3072 __ j(NOT_EQUAL, &cache_lookup, compiler::Assembler::kNearJump);
3073 __ movl(InstantiationABI::kResultTypeArgumentsReg,
3074 InstantiationABI::kFunctionTypeArgumentsReg);
3075 __ ret();
3076
3077 __ Bind(&cache_lookup);
3078 GenerateInstantiateTypeArgumentsStub(assembler);
3079}
3080
3081} // namespace compiler
3082
3083} // namespace dart
3084
3085#endif // defined(TARGET_ARCH_IA32)
3086