1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/class_table.h" |
6 | |
7 | #include <limits> |
8 | #include <memory> |
9 | |
10 | #include "platform/atomic.h" |
11 | #include "vm/flags.h" |
12 | #include "vm/growable_array.h" |
13 | #include "vm/heap/heap.h" |
14 | #include "vm/object.h" |
15 | #include "vm/object_graph.h" |
16 | #include "vm/raw_object.h" |
17 | #include "vm/visitor.h" |
18 | |
19 | namespace dart { |
20 | |
21 | DEFINE_FLAG(bool, print_class_table, false, "Print initial class table." ); |
22 | |
23 | SharedClassTable::SharedClassTable() |
24 | : top_(kNumPredefinedCids), |
25 | capacity_(0), |
26 | old_tables_(new MallocGrowableArray<void*>()) { |
27 | if (Dart::vm_isolate() == NULL) { |
28 | ASSERT(kInitialCapacity >= kNumPredefinedCids); |
29 | capacity_ = kInitialCapacity; |
30 | // Note that [calloc] will zero-initialize the memory. |
31 | table_.store(reinterpret_cast<RelaxedAtomic<intptr_t>*>( |
32 | calloc(capacity_, sizeof(RelaxedAtomic<intptr_t>)))); |
33 | } else { |
34 | // Duplicate the class table from the VM isolate. |
35 | auto vm_shared_class_table = |
36 | Dart::vm_isolate()->group()->shared_class_table(); |
37 | capacity_ = vm_shared_class_table->capacity_; |
38 | // Note that [calloc] will zero-initialize the memory. |
39 | RelaxedAtomic<intptr_t>* table = reinterpret_cast<RelaxedAtomic<intptr_t>*>( |
40 | calloc(capacity_, sizeof(RelaxedAtomic<intptr_t>))); |
41 | // The following cids don't have a corresponding class object in Dart code. |
42 | // We therefore need to initialize them eagerly. |
43 | for (intptr_t i = kObjectCid; i < kInstanceCid; i++) { |
44 | table[i] = vm_shared_class_table->SizeAt(i); |
45 | } |
46 | table[kTypeArgumentsCid] = vm_shared_class_table->SizeAt(kTypeArgumentsCid); |
47 | table[kFreeListElement] = vm_shared_class_table->SizeAt(kFreeListElement); |
48 | table[kForwardingCorpse] = vm_shared_class_table->SizeAt(kForwardingCorpse); |
49 | table[kDynamicCid] = vm_shared_class_table->SizeAt(kDynamicCid); |
50 | table[kVoidCid] = vm_shared_class_table->SizeAt(kVoidCid); |
51 | table_.store(table); |
52 | } |
53 | #if defined(SUPPORT_UNBOXED_INSTANCE_FIELDS) |
54 | // Note that [calloc] will zero-initialize the memory. |
55 | unboxed_fields_map_ = static_cast<UnboxedFieldBitmap*>( |
56 | calloc(capacity_, sizeof(UnboxedFieldBitmap))); |
57 | #endif // defined(SUPPORT_UNBOXED_INSTANCE_FIELDS) |
58 | #ifndef PRODUCT |
59 | // Note that [calloc] will zero-initialize the memory. |
60 | trace_allocation_table_.store( |
61 | static_cast<uint8_t*>(calloc(capacity_, sizeof(uint8_t)))); |
62 | #endif // !PRODUCT |
63 | } |
64 | SharedClassTable::~SharedClassTable() { |
65 | if (old_tables_ != NULL) { |
66 | FreeOldTables(); |
67 | delete old_tables_; |
68 | } |
69 | free(table_.load()); |
70 | free(unboxed_fields_map_); |
71 | |
72 | NOT_IN_PRODUCT(free(trace_allocation_table_.load())); |
73 | } |
74 | |
75 | void ClassTable::set_table(ClassPtr* table) { |
76 | Isolate* isolate = Isolate::Current(); |
77 | ASSERT(isolate != nullptr); |
78 | table_.store(table); |
79 | isolate->set_cached_class_table_table(table); |
80 | } |
81 | |
82 | ClassTable::ClassTable(SharedClassTable* shared_class_table) |
83 | : top_(kNumPredefinedCids), |
84 | capacity_(0), |
85 | tlc_top_(0), |
86 | tlc_capacity_(0), |
87 | table_(nullptr), |
88 | tlc_table_(nullptr), |
89 | old_class_tables_(new MallocGrowableArray<ClassPtr*>()), |
90 | shared_class_table_(shared_class_table) { |
91 | if (Dart::vm_isolate() == NULL) { |
92 | ASSERT(kInitialCapacity >= kNumPredefinedCids); |
93 | capacity_ = kInitialCapacity; |
94 | // Note that [calloc] will zero-initialize the memory. |
95 | // Don't use set_table because caller is supposed to set up isolates |
96 | // cached copy when constructing ClassTable. Isolate::Current might not |
97 | // be available at this point yet. |
98 | table_.store(static_cast<ClassPtr*>(calloc(capacity_, sizeof(ClassPtr)))); |
99 | } else { |
100 | // Duplicate the class table from the VM isolate. |
101 | ClassTable* vm_class_table = Dart::vm_isolate()->class_table(); |
102 | capacity_ = vm_class_table->capacity_; |
103 | // Note that [calloc] will zero-initialize the memory. |
104 | ClassPtr* table = |
105 | static_cast<ClassPtr*>(calloc(capacity_, sizeof(ClassPtr))); |
106 | // The following cids don't have a corresponding class object in Dart code. |
107 | // We therefore need to initialize them eagerly. |
108 | for (intptr_t i = kObjectCid; i < kInstanceCid; i++) { |
109 | table[i] = vm_class_table->At(i); |
110 | } |
111 | table[kTypeArgumentsCid] = vm_class_table->At(kTypeArgumentsCid); |
112 | table[kFreeListElement] = vm_class_table->At(kFreeListElement); |
113 | table[kForwardingCorpse] = vm_class_table->At(kForwardingCorpse); |
114 | table[kDynamicCid] = vm_class_table->At(kDynamicCid); |
115 | table[kVoidCid] = vm_class_table->At(kVoidCid); |
116 | // Don't use set_table because caller is supposed to set up isolates |
117 | // cached copy when constructing ClassTable. Isolate::Current might not |
118 | // be available at this point yet. |
119 | table_.store(table); |
120 | } |
121 | } |
122 | |
123 | ClassTable::~ClassTable() { |
124 | if (old_class_tables_ != nullptr) { |
125 | FreeOldTables(); |
126 | delete old_class_tables_; |
127 | } |
128 | free(table_.load()); |
129 | free(tlc_table_.load()); |
130 | } |
131 | |
132 | void ClassTable::AddOldTable(ClassPtr* old_class_table) { |
133 | ASSERT(Thread::Current()->IsMutatorThread()); |
134 | old_class_tables_->Add(old_class_table); |
135 | } |
136 | |
137 | void ClassTable::FreeOldTables() { |
138 | while (old_class_tables_->length() > 0) { |
139 | free(old_class_tables_->RemoveLast()); |
140 | } |
141 | } |
142 | |
143 | void SharedClassTable::AddOldTable(intptr_t* old_table) { |
144 | ASSERT(Thread::Current()->IsMutatorThread()); |
145 | old_tables_->Add(old_table); |
146 | } |
147 | |
148 | void SharedClassTable::FreeOldTables() { |
149 | while (old_tables_->length() > 0) { |
150 | free(old_tables_->RemoveLast()); |
151 | } |
152 | } |
153 | |
154 | void ClassTable::Register(const Class& cls) { |
155 | ASSERT(Thread::Current()->IsMutatorThread()); |
156 | |
157 | const classid_t cid = cls.id(); |
158 | ASSERT(!IsTopLevelCid(cid)); |
159 | |
160 | // During the transition period we would like [SharedClassTable] to operate in |
161 | // parallel to [ClassTable]. |
162 | |
163 | const intptr_t instance_size = |
164 | cls.is_abstract() ? 0 : Class::host_instance_size(cls.raw()); |
165 | |
166 | const intptr_t expected_cid = |
167 | shared_class_table_->Register(cid, instance_size); |
168 | |
169 | if (cid != kIllegalCid) { |
170 | ASSERT(cid > 0 && cid < kNumPredefinedCids && cid < top_); |
171 | ASSERT(table_.load()[cid] == nullptr); |
172 | table_.load()[cid] = cls.raw(); |
173 | } else { |
174 | if (top_ == capacity_) { |
175 | const intptr_t new_capacity = capacity_ + kCapacityIncrement; |
176 | Grow(new_capacity); |
177 | } |
178 | ASSERT(top_ < capacity_); |
179 | cls.set_id(top_); |
180 | table_.load()[top_] = cls.raw(); |
181 | top_++; // Increment next index. |
182 | } |
183 | ASSERT(expected_cid == cls.id()); |
184 | } |
185 | |
186 | void ClassTable::RegisterTopLevel(const Class& cls) { |
187 | if (top_ >= std::numeric_limits<classid_t>::max()) { |
188 | FATAL1("Fatal error in ClassTable::RegisterTopLevel: invalid index %" Pd |
189 | "\n" , |
190 | top_); |
191 | } |
192 | |
193 | ASSERT(Thread::Current()->IsMutatorThread()); |
194 | |
195 | const intptr_t index = cls.id(); |
196 | ASSERT(index == kIllegalCid); |
197 | |
198 | if (tlc_top_ == tlc_capacity_) { |
199 | const intptr_t new_capacity = tlc_capacity_ + kCapacityIncrement; |
200 | GrowTopLevel(new_capacity); |
201 | } |
202 | ASSERT(tlc_top_ < tlc_capacity_); |
203 | cls.set_id(ClassTable::CidFromTopLevelIndex(tlc_top_)); |
204 | tlc_table_.load()[tlc_top_] = cls.raw(); |
205 | tlc_top_++; // Increment next index. |
206 | } |
207 | |
208 | intptr_t SharedClassTable::Register(intptr_t index, intptr_t size) { |
209 | if (!Class::is_valid_id(top_)) { |
210 | FATAL1("Fatal error in SharedClassTable::Register: invalid index %" Pd "\n" , |
211 | top_); |
212 | } |
213 | |
214 | ASSERT(Thread::Current()->IsMutatorThread()); |
215 | if (index != kIllegalCid) { |
216 | // We are registring the size of a predefined class. |
217 | ASSERT(index > 0 && index < kNumPredefinedCids); |
218 | SetSizeAt(index, size); |
219 | return index; |
220 | } else { |
221 | ASSERT(size == 0); |
222 | if (top_ == capacity_) { |
223 | const intptr_t new_capacity = capacity_ + kCapacityIncrement; |
224 | Grow(new_capacity); |
225 | } |
226 | ASSERT(top_ < capacity_); |
227 | table_.load()[top_] = size; |
228 | return top_++; // Increment next index. |
229 | } |
230 | } |
231 | |
232 | void ClassTable::AllocateIndex(intptr_t index) { |
233 | if (IsTopLevelCid(index)) { |
234 | AllocateTopLevelIndex(index); |
235 | return; |
236 | } |
237 | |
238 | // This is called by a snapshot reader. |
239 | shared_class_table_->AllocateIndex(index); |
240 | ASSERT(Class::is_valid_id(index)); |
241 | |
242 | if (index >= capacity_) { |
243 | const intptr_t new_capacity = index + kCapacityIncrement; |
244 | Grow(new_capacity); |
245 | } |
246 | |
247 | ASSERT(table_.load()[index] == nullptr); |
248 | if (index >= top_) { |
249 | top_ = index + 1; |
250 | } |
251 | |
252 | ASSERT(top_ == shared_class_table_->top_); |
253 | ASSERT(capacity_ == shared_class_table_->capacity_); |
254 | } |
255 | |
256 | void ClassTable::AllocateTopLevelIndex(intptr_t cid) { |
257 | ASSERT(IsTopLevelCid(cid)); |
258 | const intptr_t tlc_index = IndexFromTopLevelCid(cid); |
259 | |
260 | if (tlc_index >= tlc_capacity_) { |
261 | const intptr_t new_capacity = tlc_index + kCapacityIncrement; |
262 | GrowTopLevel(new_capacity); |
263 | } |
264 | |
265 | ASSERT(tlc_table_.load()[tlc_index] == nullptr); |
266 | if (tlc_index >= tlc_top_) { |
267 | tlc_top_ = tlc_index + 1; |
268 | } |
269 | } |
270 | |
271 | void ClassTable::Grow(intptr_t new_capacity) { |
272 | ASSERT(new_capacity > capacity_); |
273 | |
274 | auto old_table = table_.load(); |
275 | auto new_table = static_cast<ClassPtr*>( |
276 | malloc(new_capacity * sizeof(ClassPtr))); // NOLINT |
277 | intptr_t i; |
278 | for (i = 0; i < capacity_; i++) { |
279 | // Don't use memmove, which changes this from a relaxed atomic operation |
280 | // to a non-atomic operation. |
281 | new_table[i] = old_table[i]; |
282 | } |
283 | for (; i < new_capacity; i++) { |
284 | // Don't use memset, which changes this from a relaxed atomic operation |
285 | // to a non-atomic operation. |
286 | new_table[i] = 0; |
287 | } |
288 | old_class_tables_->Add(old_table); |
289 | set_table(new_table); |
290 | |
291 | capacity_ = new_capacity; |
292 | } |
293 | |
294 | void ClassTable::GrowTopLevel(intptr_t new_capacity) { |
295 | ASSERT(new_capacity > tlc_capacity_); |
296 | |
297 | auto old_table = tlc_table_.load(); |
298 | auto new_table = static_cast<ClassPtr*>( |
299 | malloc(new_capacity * sizeof(ClassPtr))); // NOLINT |
300 | intptr_t i; |
301 | for (i = 0; i < tlc_capacity_; i++) { |
302 | // Don't use memmove, which changes this from a relaxed atomic operation |
303 | // to a non-atomic operation. |
304 | new_table[i] = old_table[i]; |
305 | } |
306 | for (; i < new_capacity; i++) { |
307 | // Don't use memset, which changes this from a relaxed atomic operation |
308 | // to a non-atomic operation. |
309 | new_table[i] = 0; |
310 | } |
311 | old_class_tables_->Add(old_table); |
312 | |
313 | tlc_table_.store(new_table); |
314 | tlc_capacity_ = new_capacity; |
315 | } |
316 | |
317 | void SharedClassTable::AllocateIndex(intptr_t index) { |
318 | // This is called by a snapshot reader. |
319 | ASSERT(Class::is_valid_id(index)); |
320 | |
321 | if (index >= capacity_) { |
322 | const intptr_t new_capacity = index + kCapacityIncrement; |
323 | Grow(new_capacity); |
324 | } |
325 | |
326 | ASSERT(table_.load()[index] == 0); |
327 | if (index >= top_) { |
328 | top_ = index + 1; |
329 | } |
330 | } |
331 | |
332 | void SharedClassTable::Grow(intptr_t new_capacity) { |
333 | ASSERT(new_capacity >= capacity_); |
334 | |
335 | RelaxedAtomic<intptr_t>* old_table = table_.load(); |
336 | RelaxedAtomic<intptr_t>* new_table = |
337 | reinterpret_cast<RelaxedAtomic<intptr_t>*>( |
338 | malloc(new_capacity * sizeof(RelaxedAtomic<intptr_t>))); // NOLINT |
339 | |
340 | intptr_t i; |
341 | for (i = 0; i < capacity_; i++) { |
342 | // Don't use memmove, which changes this from a relaxed atomic operation |
343 | // to a non-atomic operation. |
344 | new_table[i] = old_table[i]; |
345 | } |
346 | for (; i < new_capacity; i++) { |
347 | // Don't use memset, which changes this from a relaxed atomic operation |
348 | // to a non-atomic operation. |
349 | new_table[i] = 0; |
350 | } |
351 | |
352 | #if !defined(PRODUCT) |
353 | auto old_trace_table = trace_allocation_table_.load(); |
354 | auto new_trace_table = |
355 | static_cast<uint8_t*>(malloc(new_capacity * sizeof(uint8_t))); // NOLINT |
356 | for (i = 0; i < capacity_; i++) { |
357 | // Don't use memmove, which changes this from a relaxed atomic operation |
358 | // to a non-atomic operation. |
359 | new_trace_table[i] = old_trace_table[i]; |
360 | } |
361 | for (; i < new_capacity; i++) { |
362 | // Don't use memset, which changes this from a relaxed atomic operation |
363 | // to a non-atomic operation. |
364 | new_trace_table[i] = 0; |
365 | } |
366 | #endif |
367 | |
368 | old_tables_->Add(old_table); |
369 | table_.store(new_table); |
370 | NOT_IN_PRODUCT(old_tables_->Add(old_trace_table)); |
371 | NOT_IN_PRODUCT(trace_allocation_table_.store(new_trace_table)); |
372 | |
373 | #if defined(SUPPORT_UNBOXED_INSTANCE_FIELDS) |
374 | auto old_unboxed_fields_map = unboxed_fields_map_; |
375 | auto new_unboxed_fields_map = static_cast<UnboxedFieldBitmap*>( |
376 | malloc(new_capacity * sizeof(UnboxedFieldBitmap))); |
377 | for (i = 0; i < capacity_; i++) { |
378 | // Don't use memmove, which changes this from a relaxed atomic operation |
379 | // to a non-atomic operation. |
380 | new_unboxed_fields_map[i] = old_unboxed_fields_map[i]; |
381 | } |
382 | for (; i < new_capacity; i++) { |
383 | // Don't use memset, which changes this from a relaxed atomic operation |
384 | // to a non-atomic operation. |
385 | new_unboxed_fields_map[i] = UnboxedFieldBitmap(0); |
386 | } |
387 | old_tables_->Add(old_unboxed_fields_map); |
388 | unboxed_fields_map_ = new_unboxed_fields_map; |
389 | #endif // defined(SUPPORT_UNBOXED_INSTANCE_FIELDS) |
390 | |
391 | capacity_ = new_capacity; |
392 | } |
393 | |
394 | void ClassTable::Unregister(intptr_t cid) { |
395 | ASSERT(!IsTopLevelCid(cid)); |
396 | shared_class_table_->Unregister(cid); |
397 | table_.load()[cid] = nullptr; |
398 | } |
399 | |
400 | void ClassTable::UnregisterTopLevel(intptr_t cid) { |
401 | ASSERT(IsTopLevelCid(cid)); |
402 | const intptr_t tlc_index = IndexFromTopLevelCid(cid); |
403 | tlc_table_.load()[tlc_index] = nullptr; |
404 | } |
405 | |
406 | void SharedClassTable::Unregister(intptr_t index) { |
407 | table_.load()[index] = 0; |
408 | #if defined(SUPPORT_UNBOXED_INSTANCE_FIELDS) |
409 | unboxed_fields_map_[index].Reset(); |
410 | #endif // defined(SUPPORT_UNBOXED_INSTANCE_FIELDS) |
411 | } |
412 | |
413 | void ClassTable::Remap(intptr_t* old_to_new_cid) { |
414 | ASSERT(Thread::Current()->IsAtSafepoint()); |
415 | const intptr_t num_cids = NumCids(); |
416 | std::unique_ptr<ClassPtr[]> cls_by_old_cid(new ClassPtr[num_cids]); |
417 | auto* table = table_.load(); |
418 | memmove(cls_by_old_cid.get(), table, sizeof(ClassPtr) * num_cids); |
419 | for (intptr_t i = 0; i < num_cids; i++) { |
420 | table[old_to_new_cid[i]] = cls_by_old_cid[i]; |
421 | } |
422 | } |
423 | |
424 | void SharedClassTable::Remap(intptr_t* old_to_new_cid) { |
425 | ASSERT(Thread::Current()->IsAtSafepoint()); |
426 | const intptr_t num_cids = NumCids(); |
427 | std::unique_ptr<intptr_t[]> size_by_old_cid(new intptr_t[num_cids]); |
428 | auto* table = table_.load(); |
429 | for (intptr_t i = 0; i < num_cids; i++) { |
430 | size_by_old_cid[i] = table[i]; |
431 | } |
432 | for (intptr_t i = 0; i < num_cids; i++) { |
433 | table[old_to_new_cid[i]] = size_by_old_cid[i]; |
434 | } |
435 | |
436 | #if defined(SUPPORT_UNBOXED_INSTANCE_FIELDS) |
437 | std::unique_ptr<UnboxedFieldBitmap[]> unboxed_fields_by_old_cid( |
438 | new UnboxedFieldBitmap[num_cids]); |
439 | for (intptr_t i = 0; i < num_cids; i++) { |
440 | unboxed_fields_by_old_cid[i] = unboxed_fields_map_[i]; |
441 | } |
442 | for (intptr_t i = 0; i < num_cids; i++) { |
443 | unboxed_fields_map_[old_to_new_cid[i]] = unboxed_fields_by_old_cid[i]; |
444 | } |
445 | #endif // defined(SUPPORT_UNBOXED_INSTANCE_FIELDS) |
446 | } |
447 | |
448 | void ClassTable::VisitObjectPointers(ObjectPointerVisitor* visitor) { |
449 | ASSERT(visitor != NULL); |
450 | visitor->set_gc_root_type("class table" ); |
451 | if (top_ != 0) { |
452 | auto* table = table_.load(); |
453 | ObjectPtr* from = reinterpret_cast<ObjectPtr*>(&table[0]); |
454 | ObjectPtr* to = reinterpret_cast<ObjectPtr*>(&table[top_ - 1]); |
455 | visitor->VisitPointers(from, to); |
456 | } |
457 | if (tlc_top_ != 0) { |
458 | auto* tlc_table = tlc_table_.load(); |
459 | ObjectPtr* from = reinterpret_cast<ObjectPtr*>(&tlc_table[0]); |
460 | ObjectPtr* to = reinterpret_cast<ObjectPtr*>(&tlc_table[tlc_top_ - 1]); |
461 | visitor->VisitPointers(from, to); |
462 | } |
463 | visitor->clear_gc_root_type(); |
464 | } |
465 | |
466 | void ClassTable::CopySizesFromClassObjects() { |
467 | ASSERT(kIllegalCid == 0); |
468 | for (intptr_t i = 1; i < top_; i++) { |
469 | SetAt(i, At(i)); |
470 | } |
471 | } |
472 | |
473 | void ClassTable::Validate() { |
474 | Class& cls = Class::Handle(); |
475 | for (intptr_t cid = kNumPredefinedCids; cid < top_; cid++) { |
476 | // Some of the class table entries maybe NULL as we create some |
477 | // top level classes but do not add them to the list of anonymous |
478 | // classes in a library if there are no top level fields or functions. |
479 | // Since there are no references to these top level classes they are |
480 | // not written into a full snapshot and will not be recreated when |
481 | // we read back the full snapshot. These class slots end up with NULL |
482 | // entries. |
483 | if (HasValidClassAt(cid)) { |
484 | cls = At(cid); |
485 | ASSERT(cls.IsClass()); |
486 | ASSERT(cls.id() == cid); |
487 | } |
488 | } |
489 | } |
490 | |
491 | void ClassTable::Print() { |
492 | Class& cls = Class::Handle(); |
493 | String& name = String::Handle(); |
494 | |
495 | for (intptr_t i = 1; i < top_; i++) { |
496 | if (!HasValidClassAt(i)) { |
497 | continue; |
498 | } |
499 | cls = At(i); |
500 | if (cls.raw() != nullptr) { |
501 | name = cls.Name(); |
502 | OS::PrintErr("%" Pd ": %s\n" , i, name.ToCString()); |
503 | } |
504 | } |
505 | } |
506 | |
507 | void ClassTable::SetAt(intptr_t cid, ClassPtr raw_cls) { |
508 | if (IsTopLevelCid(cid)) { |
509 | tlc_table_.load()[IndexFromTopLevelCid(cid)] = raw_cls; |
510 | return; |
511 | } |
512 | |
513 | // This is called by snapshot reader and class finalizer. |
514 | ASSERT(cid < capacity_); |
515 | const intptr_t size = |
516 | raw_cls == nullptr ? 0 : Class::host_instance_size(raw_cls); |
517 | shared_class_table_->SetSizeAt(cid, size); |
518 | table_.load()[cid] = raw_cls; |
519 | } |
520 | |
521 | #ifndef PRODUCT |
522 | void ClassTable::PrintToJSONObject(JSONObject* object) { |
523 | Class& cls = Class::Handle(); |
524 | object->AddProperty("type" , "ClassList" ); |
525 | { |
526 | JSONArray members(object, "classes" ); |
527 | for (intptr_t i = 1; i < top_; i++) { |
528 | if (HasValidClassAt(i)) { |
529 | cls = At(i); |
530 | members.AddValue(cls); |
531 | } |
532 | } |
533 | } |
534 | } |
535 | |
536 | bool SharedClassTable::ShouldUpdateSizeForClassId(intptr_t cid) { |
537 | return !IsVariableSizeClassId(cid); |
538 | } |
539 | |
540 | intptr_t SharedClassTable::ClassOffsetFor(intptr_t cid) { |
541 | return cid * sizeof(uint8_t); // NOLINT |
542 | } |
543 | |
544 | |
545 | void ClassTable::AllocationProfilePrintJSON(JSONStream* stream, bool internal) { |
546 | Isolate* isolate = Isolate::Current(); |
547 | ASSERT(isolate != NULL); |
548 | auto isolate_group = isolate->group(); |
549 | Heap* heap = isolate_group->heap(); |
550 | ASSERT(heap != NULL); |
551 | JSONObject obj(stream); |
552 | obj.AddProperty("type" , "AllocationProfile" ); |
553 | if (isolate_group->last_allocationprofile_accumulator_reset_timestamp() != |
554 | 0) { |
555 | obj.AddPropertyF( |
556 | "dateLastAccumulatorReset" , "%" Pd64 "" , |
557 | isolate_group->last_allocationprofile_accumulator_reset_timestamp()); |
558 | } |
559 | if (isolate_group->last_allocationprofile_gc_timestamp() != 0) { |
560 | obj.AddPropertyF("dateLastServiceGC" , "%" Pd64 "" , |
561 | isolate_group->last_allocationprofile_gc_timestamp()); |
562 | } |
563 | |
564 | if (internal) { |
565 | JSONObject heaps(&obj, "_heaps" ); |
566 | { heap->PrintToJSONObject(Heap::kNew, &heaps); } |
567 | { heap->PrintToJSONObject(Heap::kOld, &heaps); } |
568 | } |
569 | |
570 | { |
571 | JSONObject memory(&obj, "memoryUsage" ); |
572 | { heap->PrintMemoryUsageJSON(&memory); } |
573 | } |
574 | |
575 | Thread* thread = Thread::Current(); |
576 | CountObjectsVisitor visitor(thread, NumCids()); |
577 | { |
578 | HeapIterationScope iter(thread); |
579 | iter.IterateObjects(&visitor); |
580 | isolate->group()->VisitWeakPersistentHandles(&visitor); |
581 | } |
582 | |
583 | { |
584 | JSONArray arr(&obj, "members" ); |
585 | Class& cls = Class::Handle(); |
586 | for (intptr_t i = 3; i < top_; i++) { |
587 | if (!HasValidClassAt(i)) continue; |
588 | |
589 | cls = At(i); |
590 | if (cls.IsNull()) continue; |
591 | |
592 | JSONObject obj(&arr); |
593 | obj.AddProperty("type" , "ClassHeapStats" ); |
594 | obj.AddProperty("class" , cls); |
595 | intptr_t count = visitor.new_count_[i] + visitor.old_count_[i]; |
596 | intptr_t size = visitor.new_size_[i] + visitor.old_size_[i]; |
597 | obj.AddProperty64("instancesAccumulated" , count); |
598 | obj.AddProperty64("accumulatedSize" , size); |
599 | obj.AddProperty64("instancesCurrent" , count); |
600 | obj.AddProperty64("bytesCurrent" , size); |
601 | |
602 | if (internal) { |
603 | { |
604 | JSONArray new_stats(&obj, "_new" ); |
605 | new_stats.AddValue(visitor.new_count_[i]); |
606 | new_stats.AddValue(visitor.new_size_[i]); |
607 | new_stats.AddValue(visitor.new_external_size_[i]); |
608 | } |
609 | { |
610 | JSONArray old_stats(&obj, "_old" ); |
611 | old_stats.AddValue(visitor.old_count_[i]); |
612 | old_stats.AddValue(visitor.old_size_[i]); |
613 | old_stats.AddValue(visitor.old_external_size_[i]); |
614 | } |
615 | } |
616 | } |
617 | } |
618 | } |
619 | #endif // !PRODUCT |
620 | |
621 | } // namespace dart |
622 | |