1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#if !defined(DART_PRECOMPILED_RUNTIME)
6
7#include "vm/deferred_objects.h"
8
9#include "vm/code_patcher.h"
10#include "vm/compiler/jit/compiler.h"
11#include "vm/deopt_instructions.h"
12#include "vm/flags.h"
13#include "vm/object.h"
14
15namespace dart {
16
17DECLARE_FLAG(bool, trace_deoptimization);
18DECLARE_FLAG(bool, trace_deoptimization_verbose);
19
20void DeferredDouble::Materialize(DeoptContext* deopt_context) {
21 DoublePtr* double_slot = reinterpret_cast<DoublePtr*>(slot());
22 *double_slot = Double::New(value());
23
24 if (FLAG_trace_deoptimization_verbose) {
25 OS::PrintErr("materializing double at %" Px ": %g\n",
26 reinterpret_cast<uword>(slot()), value());
27 }
28}
29
30void DeferredMint::Materialize(DeoptContext* deopt_context) {
31 MintPtr* mint_slot = reinterpret_cast<MintPtr*>(slot());
32 ASSERT(!Smi::IsValid(value()));
33 Mint& mint = Mint::Handle();
34 mint ^= Integer::New(value());
35 *mint_slot = mint.raw();
36
37 if (FLAG_trace_deoptimization_verbose) {
38 OS::PrintErr("materializing mint at %" Px ": %" Pd64 "\n",
39 reinterpret_cast<uword>(slot()), value());
40 }
41}
42
43void DeferredFloat32x4::Materialize(DeoptContext* deopt_context) {
44 Float32x4Ptr* float32x4_slot = reinterpret_cast<Float32x4Ptr*>(slot());
45 Float32x4Ptr raw_float32x4 = Float32x4::New(value());
46 *float32x4_slot = raw_float32x4;
47
48 if (FLAG_trace_deoptimization_verbose) {
49 float x = raw_float32x4->ptr()->x();
50 float y = raw_float32x4->ptr()->y();
51 float z = raw_float32x4->ptr()->z();
52 float w = raw_float32x4->ptr()->w();
53 OS::PrintErr("materializing Float32x4 at %" Px ": %g,%g,%g,%g\n",
54 reinterpret_cast<uword>(slot()), x, y, z, w);
55 }
56}
57
58void DeferredFloat64x2::Materialize(DeoptContext* deopt_context) {
59 Float64x2Ptr* float64x2_slot = reinterpret_cast<Float64x2Ptr*>(slot());
60 Float64x2Ptr raw_float64x2 = Float64x2::New(value());
61 *float64x2_slot = raw_float64x2;
62
63 if (FLAG_trace_deoptimization_verbose) {
64 double x = raw_float64x2->ptr()->x();
65 double y = raw_float64x2->ptr()->y();
66 OS::PrintErr("materializing Float64x2 at %" Px ": %g,%g\n",
67 reinterpret_cast<uword>(slot()), x, y);
68 }
69}
70
71void DeferredInt32x4::Materialize(DeoptContext* deopt_context) {
72 Int32x4Ptr* int32x4_slot = reinterpret_cast<Int32x4Ptr*>(slot());
73 Int32x4Ptr raw_int32x4 = Int32x4::New(value());
74 *int32x4_slot = raw_int32x4;
75
76 if (FLAG_trace_deoptimization_verbose) {
77 uint32_t x = raw_int32x4->ptr()->x();
78 uint32_t y = raw_int32x4->ptr()->y();
79 uint32_t z = raw_int32x4->ptr()->z();
80 uint32_t w = raw_int32x4->ptr()->w();
81 OS::PrintErr("materializing Int32x4 at %" Px ": %x,%x,%x,%x\n",
82 reinterpret_cast<uword>(slot()), x, y, z, w);
83 }
84}
85
86void DeferredObjectRef::Materialize(DeoptContext* deopt_context) {
87 DeferredObject* obj = deopt_context->GetDeferredObject(index());
88 *slot() = obj->object();
89 if (FLAG_trace_deoptimization_verbose) {
90 const Class& cls = Class::Handle(Isolate::Current()->class_table()->At(
91 Object::Handle(obj->object()).GetClassId()));
92 OS::PrintErr("writing instance of class %s ref at %" Px ".\n",
93 cls.ToCString(), reinterpret_cast<uword>(slot()));
94 }
95}
96
97void DeferredRetAddr::Materialize(DeoptContext* deopt_context) {
98 Thread* thread = deopt_context->thread();
99 Zone* zone = deopt_context->zone();
100 Function& function = Function::Handle(zone);
101 function ^= deopt_context->ObjectAt(index_);
102 const Error& error =
103 Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, function));
104 if (!error.IsNull()) {
105 Exceptions::PropagateError(error);
106 }
107 const Code& code = Code::Handle(zone, function.unoptimized_code());
108
109 uword continue_at_pc =
110 code.GetPcForDeoptId(deopt_id_, PcDescriptorsLayout::kDeopt);
111 if (continue_at_pc == 0) {
112 FATAL2("Can't locate continuation PC for deoptid %" Pd " within %s\n",
113 deopt_id_, function.ToFullyQualifiedCString());
114 }
115 uword* dest_addr = reinterpret_cast<uword*>(slot());
116 *dest_addr = continue_at_pc;
117
118 if (FLAG_trace_deoptimization_verbose) {
119 OS::PrintErr("materializing return addr at 0x%" Px ": 0x%" Px "\n",
120 reinterpret_cast<uword>(slot()), continue_at_pc);
121 }
122
123 uword pc = code.GetPcForDeoptId(deopt_id_, PcDescriptorsLayout::kIcCall);
124 if (pc != 0) {
125 // If the deoptimization happened at an IC call, update the IC data
126 // to avoid repeated deoptimization at the same site next time around.
127 // We cannot use CodePatcher::GetInstanceCallAt because the call site
128 // may have switched to from referencing an ICData to a target Code or
129 // MegamorphicCache.
130 ICData& ic_data = ICData::Handle(zone, function.FindICData(deopt_id_));
131 ic_data.AddDeoptReason(deopt_context->deopt_reason());
132 // Propagate the reason to all ICData-s with same deopt_id since
133 // only unoptimized-code ICData (IC calls) are propagated.
134 function.SetDeoptReasonForAll(ic_data.deopt_id(),
135 deopt_context->deopt_reason());
136 } else {
137 if (deopt_context->HasDeoptFlag(ICData::kHoisted)) {
138 // Prevent excessive deoptimization.
139 function.SetProhibitsHoistingCheckClass(true);
140 }
141
142 if (deopt_context->HasDeoptFlag(ICData::kGeneralized)) {
143 function.SetProhibitsBoundsCheckGeneralization(true);
144 }
145 }
146}
147
148void DeferredPcMarker::Materialize(DeoptContext* deopt_context) {
149 Thread* thread = deopt_context->thread();
150 Zone* zone = deopt_context->zone();
151 uword* dest_addr = reinterpret_cast<uword*>(slot());
152 Function& function = Function::Handle(zone);
153 function ^= deopt_context->ObjectAt(index_);
154 ASSERT(!function.IsNull());
155 const Error& error =
156 Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, function));
157 if (!error.IsNull()) {
158 Exceptions::PropagateError(error);
159 }
160 const Code& code = Code::Handle(zone, function.unoptimized_code());
161 ASSERT(!code.IsNull());
162 ASSERT(function.HasCode());
163 *reinterpret_cast<ObjectPtr*>(dest_addr) = code.raw();
164
165 if (FLAG_trace_deoptimization_verbose) {
166 THR_Print("materializing pc marker at 0x%" Px ": %s, %s\n",
167 reinterpret_cast<uword>(slot()), code.ToCString(),
168 function.ToCString());
169 }
170
171 // Increment the deoptimization counter. This effectively increments each
172 // function occurring in the optimized frame.
173 if (deopt_context->deoptimizing_code()) {
174 function.set_deoptimization_counter(function.deoptimization_counter() + 1);
175 }
176 if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) {
177 THR_Print("Deoptimizing '%s' (count %d)\n",
178 function.ToFullyQualifiedCString(),
179 function.deoptimization_counter());
180 }
181 // Clear invocation counter so that hopefully the function gets reoptimized
182 // only after more feedback has been collected.
183 function.SetUsageCounter(0);
184 if (function.HasOptimizedCode()) {
185 function.SwitchToUnoptimizedCode();
186 }
187}
188
189void DeferredPp::Materialize(DeoptContext* deopt_context) {
190 Thread* thread = deopt_context->thread();
191 Zone* zone = deopt_context->zone();
192 Function& function = Function::Handle(zone);
193 function ^= deopt_context->ObjectAt(index_);
194 ASSERT(!function.IsNull());
195 const Error& error =
196 Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, function));
197 if (!error.IsNull()) {
198 Exceptions::PropagateError(error);
199 }
200 const Code& code = Code::Handle(zone, function.unoptimized_code());
201 ASSERT(!code.IsNull());
202 ASSERT(code.GetObjectPool() != Object::null());
203 *slot() = code.GetObjectPool();
204
205 if (FLAG_trace_deoptimization_verbose) {
206 OS::PrintErr("materializing pp at 0x%" Px ": 0x%" Px "\n",
207 reinterpret_cast<uword>(slot()),
208 static_cast<uword>(code.GetObjectPool()));
209 }
210}
211
212ObjectPtr DeferredObject::object() {
213 if (object_ == NULL) {
214 Create();
215 }
216 return object_->raw();
217}
218
219void DeferredObject::Create() {
220 if (object_ != NULL) {
221 return;
222 }
223
224 Class& cls = Class::Handle();
225 cls ^= GetClass();
226
227 if (cls.raw() == Object::context_class()) {
228 intptr_t num_variables = Smi::Cast(Object::Handle(GetLength())).Value();
229 if (FLAG_trace_deoptimization_verbose) {
230 OS::PrintErr("materializing context of length %" Pd " (%" Px ", %" Pd
231 " vars)\n",
232 num_variables, reinterpret_cast<uword>(args_), field_count_);
233 }
234 object_ = &Context::ZoneHandle(Context::New(num_variables));
235
236 } else {
237 if (FLAG_trace_deoptimization_verbose) {
238 OS::PrintErr("materializing instance of %s (%" Px ", %" Pd " fields)\n",
239 cls.ToCString(), reinterpret_cast<uword>(args_),
240 field_count_);
241 }
242
243 object_ = &Instance::ZoneHandle(Instance::New(cls));
244 }
245}
246
247static intptr_t ToContextIndex(intptr_t offset_in_bytes) {
248 intptr_t result = (offset_in_bytes - Context::variable_offset(0)) / kWordSize;
249 ASSERT(result >= 0);
250 return result;
251}
252
253void DeferredObject::Fill() {
254 Create(); // Ensure instance is created.
255
256 Class& cls = Class::Handle();
257 cls ^= GetClass();
258
259 if (cls.raw() == Object::context_class()) {
260 const Context& context = Context::Cast(*object_);
261
262 Smi& offset = Smi::Handle();
263 Object& value = Object::Handle();
264
265 for (intptr_t i = 0; i < field_count_; i++) {
266 offset ^= GetFieldOffset(i);
267 if (offset.Value() == Context::parent_offset()) {
268 // Copy parent.
269 Context& parent = Context::Handle();
270 parent ^= GetValue(i);
271 context.set_parent(parent);
272 if (FLAG_trace_deoptimization_verbose) {
273 OS::PrintErr(" ctx@parent (offset %" Pd ") <- %s\n",
274 offset.Value(), value.ToCString());
275 }
276 } else {
277 intptr_t context_index = ToContextIndex(offset.Value());
278 value = GetValue(i);
279 context.SetAt(context_index, value);
280 if (FLAG_trace_deoptimization_verbose) {
281 OS::PrintErr(" ctx@%" Pd " (offset %" Pd ") <- %s\n",
282 context_index, offset.Value(), value.ToCString());
283 }
284 }
285 }
286 } else {
287 const Instance& obj = Instance::Cast(*object_);
288
289 Smi& offset = Smi::Handle();
290 Field& field = Field::Handle();
291 Object& value = Object::Handle();
292 const Array& offset_map = Array::Handle(cls.OffsetToFieldMap());
293
294 for (intptr_t i = 0; i < field_count_; i++) {
295 offset ^= GetFieldOffset(i);
296 field ^= offset_map.At(offset.Value() / kWordSize);
297 value = GetValue(i);
298 if (!field.IsNull()) {
299 obj.SetField(field, value);
300 if (FLAG_trace_deoptimization_verbose) {
301 OS::PrintErr(" %s <- %s\n",
302 String::Handle(field.name()).ToCString(),
303 value.ToCString());
304 }
305 } else {
306 // In addition to the type arguments vector we can also have lazy
307 // materialization of e.g. _ByteDataView objects which don't have
308 // explicit fields in Dart (all accesses to the fields are done via
309 // recognized native methods).
310 ASSERT(offset.Value() < cls.host_instance_size());
311 obj.SetFieldAtOffset(offset.Value(), value);
312 if (FLAG_trace_deoptimization_verbose) {
313 OS::PrintErr(" null Field @ offset(%" Pd ") <- %s\n",
314 offset.Value(), value.ToCString());
315 }
316 }
317 }
318 }
319}
320
321} // namespace dart
322
323#endif // !defined(DART_PRECOMPILED_RUNTIME)
324