1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/compiler/assembler/assembler_base.h" |
6 | |
7 | #include "platform/utils.h" |
8 | #include "vm/cpu.h" |
9 | #include "vm/heap/heap.h" |
10 | #include "vm/memory_region.h" |
11 | #include "vm/os.h" |
12 | #include "vm/zone.h" |
13 | |
14 | namespace dart { |
15 | |
16 | DEFINE_FLAG(bool, |
17 | check_code_pointer, |
18 | false, |
19 | "Verify instructions offset in code object." |
20 | "NOTE: This breaks the profiler." ); |
21 | #if defined(TARGET_ARCH_ARM) |
22 | DEFINE_FLAG(bool, use_far_branches, false, "Enable far branches for ARM." ); |
23 | #endif |
24 | |
25 | namespace compiler { |
26 | |
27 | AssemblerBase::~AssemblerBase() {} |
28 | |
29 | intptr_t AssemblerBase::InsertAlignedRelocation(BSS::Relocation reloc) { |
30 | // We cannot put a relocation at the very start (it's not a valid |
31 | // instruction)! |
32 | ASSERT(CodeSize() != 0); |
33 | |
34 | // Align to a target word boundary. |
35 | const intptr_t offset = |
36 | Utils::RoundUp(CodeSize(), compiler::target::kWordSize); |
37 | |
38 | while (CodeSize() < offset) { |
39 | Breakpoint(); |
40 | } |
41 | ASSERT(CodeSize() == offset); |
42 | |
43 | AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
44 | buffer_.Emit<compiler::target::word>(BSS::RelocationIndex(reloc) * |
45 | compiler::target::kWordSize); |
46 | |
47 | ASSERT(CodeSize() == (offset + compiler::target::kWordSize)); |
48 | |
49 | return offset; |
50 | } |
51 | |
52 | #if defined(DEBUG) |
53 | static void InitializeMemoryWithBreakpoints(uword data, intptr_t length) { |
54 | #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) |
55 | ASSERT(Utils::IsAligned(data, 4)); |
56 | ASSERT(Utils::IsAligned(length, 4)); |
57 | const uword end = data + length; |
58 | while (data < end) { |
59 | *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; |
60 | data += 4; |
61 | } |
62 | #else |
63 | memset(reinterpret_cast<void*>(data), Instr::kBreakPointInstruction, length); |
64 | #endif |
65 | } |
66 | #endif |
67 | |
68 | static uword NewContents(intptr_t capacity) { |
69 | Zone* zone = Thread::Current()->zone(); |
70 | uword result = zone->AllocUnsafe(capacity); |
71 | #if defined(DEBUG) |
72 | // Initialize the buffer with kBreakPointInstruction to force a break |
73 | // point if we ever execute an uninitialized part of the code buffer. |
74 | InitializeMemoryWithBreakpoints(result, capacity); |
75 | #endif |
76 | return result; |
77 | } |
78 | |
79 | #if defined(DEBUG) |
80 | AssemblerBuffer::EnsureCapacity::EnsureCapacity(AssemblerBuffer* buffer) { |
81 | if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity(); |
82 | // In debug mode, we save the assembler buffer along with the gap |
83 | // size before we start emitting to the buffer. This allows us to |
84 | // check that any single generated instruction doesn't overflow the |
85 | // limit implied by the minimum gap size. |
86 | buffer_ = buffer; |
87 | gap_ = ComputeGap(); |
88 | // Make sure that extending the capacity leaves a big enough gap |
89 | // for any kind of instruction. |
90 | ASSERT(gap_ >= kMinimumGap); |
91 | // Mark the buffer as having ensured the capacity. |
92 | ASSERT(!buffer->HasEnsuredCapacity()); // Cannot nest. |
93 | buffer->has_ensured_capacity_ = true; |
94 | } |
95 | |
96 | AssemblerBuffer::EnsureCapacity::~EnsureCapacity() { |
97 | // Unmark the buffer, so we cannot emit after this. |
98 | buffer_->has_ensured_capacity_ = false; |
99 | // Make sure the generated instruction doesn't take up more |
100 | // space than the minimum gap. |
101 | intptr_t delta = gap_ - ComputeGap(); |
102 | ASSERT(delta <= kMinimumGap); |
103 | } |
104 | #endif |
105 | |
106 | AssemblerBuffer::AssemblerBuffer() |
107 | : pointer_offsets_(new ZoneGrowableArray<intptr_t>(16)) { |
108 | static const intptr_t kInitialBufferCapacity = 4 * KB; |
109 | contents_ = NewContents(kInitialBufferCapacity); |
110 | cursor_ = contents_; |
111 | limit_ = ComputeLimit(contents_, kInitialBufferCapacity); |
112 | fixup_ = NULL; |
113 | #if defined(DEBUG) |
114 | has_ensured_capacity_ = false; |
115 | fixups_processed_ = false; |
116 | #endif |
117 | |
118 | // Verify internal state. |
119 | ASSERT(Capacity() == kInitialBufferCapacity); |
120 | ASSERT(Size() == 0); |
121 | } |
122 | |
123 | AssemblerBuffer::~AssemblerBuffer() {} |
124 | |
125 | void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) { |
126 | AssemblerFixup* fixup = fixup_; |
127 | while (fixup != NULL) { |
128 | fixup->Process(region, fixup->position()); |
129 | fixup = fixup->previous(); |
130 | } |
131 | } |
132 | |
133 | void AssemblerBuffer::FinalizeInstructions(const MemoryRegion& instructions) { |
134 | // Copy the instructions from the buffer. |
135 | MemoryRegion from(reinterpret_cast<void*>(contents()), Size()); |
136 | instructions.CopyFrom(0, from); |
137 | |
138 | // Process fixups in the instructions. |
139 | ProcessFixups(instructions); |
140 | #if defined(DEBUG) |
141 | fixups_processed_ = true; |
142 | #endif |
143 | } |
144 | |
145 | void AssemblerBuffer::ExtendCapacity() { |
146 | intptr_t old_size = Size(); |
147 | intptr_t old_capacity = Capacity(); |
148 | intptr_t new_capacity = |
149 | Utils::Minimum(old_capacity * 2, old_capacity + 1 * MB); |
150 | if (new_capacity < old_capacity) { |
151 | FATAL("Unexpected overflow in AssemblerBuffer::ExtendCapacity" ); |
152 | } |
153 | |
154 | // Allocate the new data area and copy contents of the old one to it. |
155 | uword new_contents = NewContents(new_capacity); |
156 | memmove(reinterpret_cast<void*>(new_contents), |
157 | reinterpret_cast<void*>(contents_), old_size); |
158 | |
159 | // Compute the relocation delta and switch to the new contents area. |
160 | intptr_t delta = new_contents - contents_; |
161 | contents_ = new_contents; |
162 | |
163 | // Update the cursor and recompute the limit. |
164 | cursor_ += delta; |
165 | limit_ = ComputeLimit(new_contents, new_capacity); |
166 | |
167 | // Verify internal state. |
168 | ASSERT(Capacity() == new_capacity); |
169 | ASSERT(Size() == old_size); |
170 | } |
171 | |
172 | class PatchCodeWithHandle : public AssemblerFixup { |
173 | public: |
174 | PatchCodeWithHandle(ZoneGrowableArray<intptr_t>* pointer_offsets, |
175 | const Object& object) |
176 | : pointer_offsets_(pointer_offsets), object_(object) {} |
177 | |
178 | void Process(const MemoryRegion& region, intptr_t position) { |
179 | // Patch the handle into the code. Once the instructions are installed into |
180 | // a raw code object and the pointer offsets are setup, the handle is |
181 | // resolved. |
182 | region.StoreUnaligned<const Object*>(position, &object_); |
183 | pointer_offsets_->Add(position); |
184 | } |
185 | |
186 | virtual bool IsPointerOffset() const { return true; } |
187 | |
188 | private: |
189 | ZoneGrowableArray<intptr_t>* pointer_offsets_; |
190 | const Object& object_; |
191 | }; |
192 | |
193 | intptr_t AssemblerBuffer::CountPointerOffsets() const { |
194 | intptr_t count = 0; |
195 | AssemblerFixup* current = fixup_; |
196 | while (current != NULL) { |
197 | if (current->IsPointerOffset()) ++count; |
198 | current = current->previous_; |
199 | } |
200 | return count; |
201 | } |
202 | |
203 | #if defined(TARGET_ARCH_IA32) |
204 | void AssemblerBuffer::EmitObject(const Object& object) { |
205 | // Since we are going to store the handle as part of the fixup information |
206 | // the handle needs to be a zone handle. |
207 | ASSERT(IsNotTemporaryScopedHandle(object)); |
208 | ASSERT(IsInOldSpace(object)); |
209 | EmitFixup(new PatchCodeWithHandle(pointer_offsets_, object)); |
210 | cursor_ += target::kWordSize; // Reserve space for pointer. |
211 | } |
212 | #endif |
213 | |
214 | // Shared macros are implemented here. |
215 | void AssemblerBase::Unimplemented(const char* message) { |
216 | const char* format = "Unimplemented: %s" ; |
217 | const intptr_t len = Utils::SNPrint(NULL, 0, format, message); |
218 | char* buffer = reinterpret_cast<char*>(malloc(len + 1)); |
219 | Utils::SNPrint(buffer, len + 1, format, message); |
220 | Stop(buffer); |
221 | } |
222 | |
223 | void AssemblerBase::Untested(const char* message) { |
224 | const char* format = "Untested: %s" ; |
225 | const intptr_t len = Utils::SNPrint(NULL, 0, format, message); |
226 | char* buffer = reinterpret_cast<char*>(malloc(len + 1)); |
227 | Utils::SNPrint(buffer, len + 1, format, message); |
228 | Stop(buffer); |
229 | } |
230 | |
231 | void AssemblerBase::Unreachable(const char* message) { |
232 | const char* format = "Unreachable: %s" ; |
233 | const intptr_t len = Utils::SNPrint(NULL, 0, format, message); |
234 | char* buffer = reinterpret_cast<char*>(malloc(len + 1)); |
235 | Utils::SNPrint(buffer, len + 1, format, message); |
236 | Stop(buffer); |
237 | } |
238 | |
239 | void AssemblerBase::(const char* format, ...) { |
240 | if (EmittingComments()) { |
241 | char buffer[1024]; |
242 | |
243 | va_list args; |
244 | va_start(args, format); |
245 | Utils::VSNPrint(buffer, sizeof(buffer), format, args); |
246 | va_end(args); |
247 | |
248 | comments_.Add( |
249 | new CodeComment(buffer_.GetPosition(), AllocateString(buffer))); |
250 | } |
251 | } |
252 | |
253 | bool AssemblerBase::() { |
254 | return FLAG_code_comments || FLAG_disassemble || FLAG_disassemble_optimized; |
255 | } |
256 | |
257 | void AssemblerBase::Stop(const char* message) { |
258 | Comment("Stop: %s" , message); |
259 | Breakpoint(); |
260 | } |
261 | |
262 | intptr_t ObjIndexPair::Hashcode(Key key) { |
263 | if (key.type() != ObjectPoolBuilderEntry::kTaggedObject) { |
264 | return key.raw_value_; |
265 | } |
266 | |
267 | return ObjectHash(*key.obj_); |
268 | } |
269 | |
270 | void ObjectPoolBuilder::Reset() { |
271 | // Null out the handles we've accumulated. |
272 | for (intptr_t i = 0; i < object_pool_.length(); ++i) { |
273 | if (object_pool_[i].type() == ObjectPoolBuilderEntry::kTaggedObject) { |
274 | SetToNull(const_cast<Object*>(object_pool_[i].obj_)); |
275 | SetToNull(const_cast<Object*>(object_pool_[i].equivalence_)); |
276 | } |
277 | } |
278 | |
279 | object_pool_.Clear(); |
280 | object_pool_index_table_.Clear(); |
281 | } |
282 | |
283 | intptr_t ObjectPoolBuilder::AddObject( |
284 | const Object& obj, |
285 | ObjectPoolBuilderEntry::Patchability patchable) { |
286 | ASSERT(IsNotTemporaryScopedHandle(obj)); |
287 | return AddObject(ObjectPoolBuilderEntry(&obj, patchable)); |
288 | } |
289 | |
290 | intptr_t ObjectPoolBuilder::AddImmediate(uword imm) { |
291 | return AddObject( |
292 | ObjectPoolBuilderEntry(imm, ObjectPoolBuilderEntry::kImmediate, |
293 | ObjectPoolBuilderEntry::kNotPatchable)); |
294 | } |
295 | |
296 | intptr_t ObjectPoolBuilder::AddObject(ObjectPoolBuilderEntry entry) { |
297 | ASSERT((entry.type() != ObjectPoolBuilderEntry::kTaggedObject) || |
298 | (IsNotTemporaryScopedHandle(*entry.obj_) && |
299 | (entry.equivalence_ == NULL || |
300 | IsNotTemporaryScopedHandle(*entry.equivalence_)))); |
301 | |
302 | if (entry.type() == ObjectPoolBuilderEntry::kTaggedObject) { |
303 | // If the owner of the object pool wrapper specified a specific zone we |
304 | // should use we'll do so. |
305 | if (zone_ != NULL) { |
306 | entry.obj_ = &NewZoneHandle(zone_, *entry.obj_); |
307 | if (entry.equivalence_ != NULL) { |
308 | entry.equivalence_ = &NewZoneHandle(zone_, *entry.equivalence_); |
309 | } |
310 | } |
311 | } |
312 | |
313 | const intptr_t idx = base_index_ + object_pool_.length(); |
314 | object_pool_.Add(entry); |
315 | if (entry.patchable() == ObjectPoolBuilderEntry::kNotPatchable) { |
316 | // The object isn't patchable. Record the index for fast lookup. |
317 | object_pool_index_table_.Insert(ObjIndexPair(entry, idx)); |
318 | } |
319 | return idx; |
320 | } |
321 | |
322 | intptr_t ObjectPoolBuilder::FindObject(ObjectPoolBuilderEntry entry) { |
323 | // If the object is not patchable, check if we've already got it in the |
324 | // object pool. |
325 | if (entry.patchable() == ObjectPoolBuilderEntry::kNotPatchable) { |
326 | // First check in the parent pool if we have one. |
327 | if (parent_ != nullptr) { |
328 | const intptr_t idx = parent_->object_pool_index_table_.LookupValue(entry); |
329 | if (idx != ObjIndexPair::kNoIndex) { |
330 | return idx; |
331 | } |
332 | } |
333 | |
334 | const intptr_t idx = object_pool_index_table_.LookupValue(entry); |
335 | if (idx != ObjIndexPair::kNoIndex) { |
336 | return idx; |
337 | } |
338 | } |
339 | return AddObject(entry); |
340 | } |
341 | |
342 | intptr_t ObjectPoolBuilder::FindObject( |
343 | const Object& obj, |
344 | ObjectPoolBuilderEntry::Patchability patchable) { |
345 | return FindObject(ObjectPoolBuilderEntry(&obj, patchable)); |
346 | } |
347 | |
348 | intptr_t ObjectPoolBuilder::FindObject(const Object& obj, |
349 | const Object& equivalence) { |
350 | return FindObject(ObjectPoolBuilderEntry( |
351 | &obj, &equivalence, ObjectPoolBuilderEntry::kNotPatchable)); |
352 | } |
353 | |
354 | intptr_t ObjectPoolBuilder::FindImmediate(uword imm) { |
355 | return FindObject( |
356 | ObjectPoolBuilderEntry(imm, ObjectPoolBuilderEntry::kImmediate, |
357 | ObjectPoolBuilderEntry::kNotPatchable)); |
358 | } |
359 | |
360 | intptr_t ObjectPoolBuilder::FindNativeFunction( |
361 | const ExternalLabel* label, |
362 | ObjectPoolBuilderEntry::Patchability patchable) { |
363 | return FindObject(ObjectPoolBuilderEntry( |
364 | label->address(), ObjectPoolBuilderEntry::kNativeFunction, patchable)); |
365 | } |
366 | |
367 | intptr_t ObjectPoolBuilder::FindNativeFunctionWrapper( |
368 | const ExternalLabel* label, |
369 | ObjectPoolBuilderEntry::Patchability patchable) { |
370 | return FindObject(ObjectPoolBuilderEntry( |
371 | label->address(), ObjectPoolBuilderEntry::kNativeFunctionWrapper, |
372 | patchable)); |
373 | } |
374 | |
375 | bool ObjectPoolBuilder::TryCommitToParent() { |
376 | ASSERT(parent_ != nullptr); |
377 | if (parent_->CurrentLength() != base_index_) { |
378 | return false; |
379 | } |
380 | for (intptr_t i = 0; i < object_pool_.length(); i++) { |
381 | intptr_t idx = parent_->AddObject(object_pool_[i]); |
382 | ASSERT(idx == (base_index_ + i)); |
383 | } |
384 | return true; |
385 | } |
386 | |
387 | } // namespace compiler |
388 | |
389 | } // namespace dart |
390 | |