1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_CLASS_TABLE_H_
6#define RUNTIME_VM_CLASS_TABLE_H_
7
8#include <memory>
9
10#include "platform/assert.h"
11#include "platform/atomic.h"
12#include "platform/utils.h"
13
14#include "vm/bitfield.h"
15#include "vm/class_id.h"
16#include "vm/flags.h"
17#include "vm/globals.h"
18#include "vm/tagged_pointer.h"
19
20namespace dart {
21
22class Class;
23class ClassTable;
24class Isolate;
25class IsolateGroup;
26class IsolateGroupReloadContext;
27class IsolateReloadContext;
28class JSONArray;
29class JSONObject;
30class JSONStream;
31template <typename T>
32class MallocGrowableArray;
33class ObjectPointerVisitor;
34
35// Wraps a 64-bit integer to represent the bitmap of unboxed fields
36// stored in the shared class table.
37class UnboxedFieldBitmap {
38 public:
39 UnboxedFieldBitmap() : bitmap_(0) {}
40 explicit UnboxedFieldBitmap(uint64_t bitmap) : bitmap_(bitmap) {}
41 UnboxedFieldBitmap(const UnboxedFieldBitmap&) = default;
42 UnboxedFieldBitmap& operator=(const UnboxedFieldBitmap&) = default;
43
44 DART_FORCE_INLINE bool Get(intptr_t position) const {
45 if (position >= Length()) return false;
46 return Utils::TestBit(bitmap_, position);
47 }
48 DART_FORCE_INLINE void Set(intptr_t position) {
49 ASSERT(position < Length());
50 bitmap_ |= Utils::Bit<decltype(bitmap_)>(position);
51 }
52 DART_FORCE_INLINE uint64_t Value() const { return bitmap_; }
53 DART_FORCE_INLINE bool IsEmpty() const { return bitmap_ == 0; }
54 DART_FORCE_INLINE void Reset() { bitmap_ = 0; }
55
56 DART_FORCE_INLINE static constexpr intptr_t Length() {
57 return sizeof(decltype(bitmap_)) * kBitsPerByte;
58 }
59
60 private:
61 uint64_t bitmap_;
62};
63
64// Registry of all known classes and their sizes.
65//
66// The GC will only need the information in this shared class table to scan
67// object pointers.
68class SharedClassTable {
69 public:
70 SharedClassTable();
71 ~SharedClassTable();
72
73 // Thread-safe.
74 intptr_t SizeAt(intptr_t index) const {
75 ASSERT(IsValidIndex(index));
76 return table_.load()[index];
77 }
78
79 bool HasValidClassAt(intptr_t index) const {
80 ASSERT(IsValidIndex(index));
81 ASSERT(table_.load()[index] >= 0);
82 return table_.load()[index] != 0;
83 }
84
85 void SetSizeAt(intptr_t index, intptr_t size) {
86 ASSERT(IsValidIndex(index));
87
88 // Ensure we never change size for a given cid from one non-zero size to
89 // another non-zero size.
90 intptr_t old_size = 0;
91 if (!table_.load()[index].compare_exchange_strong(old_size, size)) {
92 RELEASE_ASSERT(old_size == size);
93 }
94 }
95
96 bool IsValidIndex(intptr_t index) const { return index > 0 && index < top_; }
97
98 intptr_t NumCids() const { return top_; }
99 intptr_t Capacity() const { return capacity_; }
100
101 UnboxedFieldBitmap GetUnboxedFieldsMapAt(intptr_t index) const {
102 ASSERT(IsValidIndex(index));
103 return FLAG_precompiled_mode ? unboxed_fields_map_[index]
104 : UnboxedFieldBitmap();
105 }
106
107 void SetUnboxedFieldsMapAt(intptr_t index,
108 UnboxedFieldBitmap unboxed_fields_map) {
109 ASSERT(IsValidIndex(index));
110 ASSERT(unboxed_fields_map_[index].IsEmpty());
111 unboxed_fields_map_[index] = unboxed_fields_map;
112 }
113
114 // Used to drop recently added classes.
115 void SetNumCids(intptr_t num_cids) {
116 ASSERT(num_cids <= top_);
117 top_ = num_cids;
118 }
119
120#if !defined(PRODUCT)
121 void SetTraceAllocationFor(intptr_t cid, bool trace) {
122 ASSERT(cid > 0);
123 ASSERT(cid < top_);
124 trace_allocation_table_.load()[cid] = trace ? 1 : 0;
125 }
126 bool TraceAllocationFor(intptr_t cid);
127#endif // !defined(PRODUCT)
128
129 void CopyBeforeHotReload(intptr_t** copy, intptr_t* copy_num_cids) {
130 // The [IsolateGroupReloadContext] will need to maintain a copy of the old
131 // class table until instances have been morphed.
132 const intptr_t num_cids = NumCids();
133 const intptr_t bytes = sizeof(intptr_t) * num_cids;
134 auto size_table = static_cast<intptr_t*>(malloc(bytes));
135 auto table = table_.load();
136 for (intptr_t i = 0; i < num_cids; i++) {
137 // Don't use memmove, which changes this from a relaxed atomic operation
138 // to a non-atomic operation.
139 size_table[i] = table[i];
140 }
141 *copy_num_cids = num_cids;
142 *copy = size_table;
143 }
144
145 void ResetBeforeHotReload() {
146 // The [IsolateReloadContext] is now source-of-truth for GC.
147 auto table = table_.load();
148 for (intptr_t i = 0; i < top_; i++) {
149 // Don't use memset, which changes this from a relaxed atomic operation
150 // to a non-atomic operation.
151 table[i] = 0;
152 }
153 }
154
155 void ResetAfterHotReload(intptr_t* old_table,
156 intptr_t num_old_cids,
157 bool is_rollback) {
158 // The [IsolateReloadContext] is no longer source-of-truth for GC after we
159 // return, so we restore size information for all classes.
160 if (is_rollback) {
161 SetNumCids(num_old_cids);
162 auto table = table_.load();
163 for (intptr_t i = 0; i < num_old_cids; i++) {
164 // Don't use memmove, which changes this from a relaxed atomic operation
165 // to a non-atomic operation.
166 table[i] = old_table[i];
167 }
168 }
169
170 // Can't free this table immediately as another thread (e.g., concurrent
171 // marker or sweeper) may be between loading the table pointer and loading
172 // the table element. The table will be freed at the next major GC or
173 // isolate shutdown.
174 AddOldTable(old_table);
175 }
176
177 // Deallocates table copies. Do not call during concurrent access to table.
178 void FreeOldTables();
179
180 // Deallocates bitmap copies. Do not call during concurrent access to table.
181 void FreeOldUnboxedFieldsMaps();
182
183#if !defined(DART_PRECOMPILED_RUNTIME)
184 bool IsReloading() const { return reload_context_ != nullptr; }
185
186 IsolateGroupReloadContext* reload_context() { return reload_context_; }
187#endif // !defined(DART_PRECOMPILED_RUNTIME)
188
189 // Returns the newly allocated cid.
190 //
191 // [index] is kIllegalCid or a predefined cid.
192 intptr_t Register(intptr_t index, intptr_t size);
193 void AllocateIndex(intptr_t index);
194 void Unregister(intptr_t index);
195
196 void Remap(intptr_t* old_to_new_cids);
197
198 // Used by the generated code.
199#ifndef PRODUCT
200 static intptr_t class_heap_stats_table_offset() {
201 return OFFSET_OF(SharedClassTable, trace_allocation_table_);
202 }
203#endif
204
205 // Used by the generated code.
206 static intptr_t ClassOffsetFor(intptr_t cid);
207
208 static const int kInitialCapacity = 512;
209 static const int kCapacityIncrement = 256;
210
211 private:
212 friend class ClassTable;
213 friend class GCMarker;
214 friend class MarkingWeakVisitor;
215 friend class Scavenger;
216 friend class ScavengerWeakVisitor;
217
218 static bool ShouldUpdateSizeForClassId(intptr_t cid);
219
220#ifndef PRODUCT
221 // Copy-on-write is used for trace_allocation_table_, with old copies stored
222 // in old_tables_.
223 AcqRelAtomic<uint8_t*> trace_allocation_table_ = {nullptr};
224#endif // !PRODUCT
225
226 void AddOldTable(intptr_t* old_table);
227
228 void Grow(intptr_t new_capacity);
229
230 intptr_t top_;
231 intptr_t capacity_;
232
233 // Copy-on-write is used for table_, with old copies stored in old_tables_.
234 // Maps the cid to the instance size.
235 AcqRelAtomic<RelaxedAtomic<intptr_t>*> table_ = {nullptr};
236 MallocGrowableArray<void*>* old_tables_;
237
238 IsolateGroupReloadContext* reload_context_ = nullptr;
239
240 // Stores a 64-bit bitmap for each class. There is one bit for each word in an
241 // instance of the class. A 0 bit indicates that the word contains a pointer
242 // the GC has to scan, a 1 indicates that the word is part of e.g. an unboxed
243 // double and does not need to be scanned. (see Class::Calculate...() where
244 // the bitmap is constructed)
245 UnboxedFieldBitmap* unboxed_fields_map_ = nullptr;
246
247 DISALLOW_COPY_AND_ASSIGN(SharedClassTable);
248};
249
250class ClassTable {
251 public:
252 explicit ClassTable(SharedClassTable* shared_class_table_);
253 ~ClassTable();
254
255 SharedClassTable* shared_class_table() const { return shared_class_table_; }
256
257 void CopyBeforeHotReload(ClassPtr** copy,
258 ClassPtr** tlc_copy,
259 intptr_t* copy_num_cids,
260 intptr_t* copy_num_tlc_cids) {
261 // The [IsolateReloadContext] will need to maintain a copy of the old class
262 // table until instances have been morphed.
263 const intptr_t num_cids = NumCids();
264 const intptr_t num_tlc_cids = NumTopLevelCids();
265 auto class_table =
266 static_cast<ClassPtr*>(malloc(sizeof(ClassPtr) * num_cids));
267 auto tlc_class_table =
268 static_cast<ClassPtr*>(malloc(sizeof(ClassPtr) * num_tlc_cids));
269
270 // Don't use memmove, which changes this from a relaxed atomic operation
271 // to a non-atomic operation.
272 auto table = table_.load();
273 for (intptr_t i = 0; i < num_cids; i++) {
274 class_table[i] = table[i];
275 }
276 auto tlc_table = tlc_table_.load();
277 for (intptr_t i = 0; i < num_tlc_cids; i++) {
278 tlc_class_table[i] = tlc_table[i];
279 }
280
281 *copy = class_table;
282 *tlc_copy = tlc_class_table;
283 *copy_num_cids = num_cids;
284 *copy_num_tlc_cids = num_tlc_cids;
285 }
286
287 void ResetBeforeHotReload() {
288 // We cannot clear out the class pointers, because a hot-reload
289 // contains only a diff: If e.g. a class included in the hot-reload has a
290 // super class not included in the diff, it will look up in this class table
291 // to find the super class (e.g. `cls.SuperClass` will cause us to come
292 // here).
293 }
294
295 void ResetAfterHotReload(ClassPtr* old_table,
296 ClassPtr* old_tlc_table,
297 intptr_t num_old_cids,
298 intptr_t num_old_tlc_cids,
299 bool is_rollback) {
300 // The [IsolateReloadContext] is no longer source-of-truth for GC after we
301 // return, so we restore size information for all classes.
302 if (is_rollback) {
303 SetNumCids(num_old_cids, num_old_tlc_cids);
304
305 // Don't use memmove, which changes this from a relaxed atomic operation
306 // to a non-atomic operation.
307 auto table = table_.load();
308 for (intptr_t i = 0; i < num_old_cids; i++) {
309 table[i] = old_table[i];
310 }
311 auto tlc_table = tlc_table_.load();
312 for (intptr_t i = 0; i < num_old_tlc_cids; i++) {
313 tlc_table[i] = old_tlc_table[i];
314 }
315 } else {
316 CopySizesFromClassObjects();
317 }
318
319 // Can't free these tables immediately as another thread (e.g., concurrent
320 // marker or sweeper) may be between loading the table pointer and loading
321 // the table element. The table will be freed at the next major GC or
322 // isolate shutdown.
323 AddOldTable(old_table);
324 AddOldTable(old_tlc_table);
325 }
326
327 // Thread-safe.
328 ClassPtr At(intptr_t cid) const {
329 ASSERT(IsValidIndex(cid));
330 if (IsTopLevelCid(cid)) {
331 return tlc_table_.load()[IndexFromTopLevelCid(cid)];
332 }
333 return table_.load()[cid];
334 }
335
336 intptr_t SizeAt(intptr_t index) const {
337 if (IsTopLevelCid(index)) {
338 return 0;
339 }
340 return shared_class_table_->SizeAt(index);
341 }
342
343 void SetAt(intptr_t index, ClassPtr raw_cls);
344
345 bool IsValidIndex(intptr_t cid) const {
346 if (IsTopLevelCid(cid)) {
347 return IndexFromTopLevelCid(cid) < tlc_top_;
348 }
349 return shared_class_table_->IsValidIndex(cid);
350 }
351
352 bool HasValidClassAt(intptr_t cid) const {
353 ASSERT(IsValidIndex(cid));
354 if (IsTopLevelCid(cid)) {
355 return tlc_table_.load()[IndexFromTopLevelCid(cid)] != nullptr;
356 }
357 return table_.load()[cid] != nullptr;
358 }
359
360 intptr_t NumCids() const { return shared_class_table_->NumCids(); }
361 intptr_t NumTopLevelCids() const { return tlc_top_; }
362 intptr_t Capacity() const { return shared_class_table_->Capacity(); }
363
364 void Register(const Class& cls);
365 void RegisterTopLevel(const Class& cls);
366 void AllocateIndex(intptr_t index);
367 void Unregister(intptr_t index);
368 void UnregisterTopLevel(intptr_t index);
369
370 void Remap(intptr_t* old_to_new_cids);
371
372 void VisitObjectPointers(ObjectPointerVisitor* visitor);
373
374 // If a snapshot reader has populated the class table then the
375 // sizes in the class table are not correct. Iterates through the
376 // table, updating the sizes.
377 void CopySizesFromClassObjects();
378
379 void Validate();
380
381 void Print();
382
383#ifndef PRODUCT
384 // Describes layout of heap stats for code generation. See offset_extractor.cc
385 struct ArrayTraits {
386 static intptr_t elements_start_offset() { return 0; }
387
388 static constexpr intptr_t kElementSize = sizeof(uint8_t);
389 };
390#endif
391
392#ifndef PRODUCT
393
394 void AllocationProfilePrintJSON(JSONStream* stream, bool internal);
395
396 void PrintToJSONObject(JSONObject* object);
397#endif // !PRODUCT
398
399 // Deallocates table copies. Do not call during concurrent access to table.
400 void FreeOldTables();
401
402 static bool IsTopLevelCid(intptr_t cid) { return cid >= kTopLevelCidOffset; }
403
404 static intptr_t IndexFromTopLevelCid(intptr_t cid) {
405 ASSERT(IsTopLevelCid(cid));
406 return cid - kTopLevelCidOffset;
407 }
408
409 static intptr_t CidFromTopLevelIndex(intptr_t index) {
410 return kTopLevelCidOffset + index;
411 }
412
413 private:
414 friend class GCMarker;
415 friend class MarkingWeakVisitor;
416 friend class Scavenger;
417 friend class ScavengerWeakVisitor;
418 friend class Dart;
419 friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup* group,
420 const char* name,
421 char** error);
422 friend class Isolate; // for table()
423 static const int kInitialCapacity = SharedClassTable::kInitialCapacity;
424 static const int kCapacityIncrement = SharedClassTable::kCapacityIncrement;
425
426 static const intptr_t kTopLevelCidOffset = (1 << 16);
427
428 void AddOldTable(ClassPtr* old_table);
429 void AllocateTopLevelIndex(intptr_t index);
430
431 void Grow(intptr_t index);
432 void GrowTopLevel(intptr_t index);
433
434 ClassPtr* table() { return table_.load(); }
435 void set_table(ClassPtr* table);
436
437 // Used to drop recently added classes.
438 void SetNumCids(intptr_t num_cids, intptr_t num_tlc_cids) {
439 shared_class_table_->SetNumCids(num_cids);
440
441 ASSERT(num_cids <= top_);
442 top_ = num_cids;
443
444 ASSERT(num_tlc_cids <= tlc_top_);
445 tlc_top_ = num_tlc_cids;
446 }
447
448 intptr_t top_;
449 intptr_t capacity_;
450
451 intptr_t tlc_top_;
452 intptr_t tlc_capacity_;
453
454 // Copy-on-write is used for table_, with old copies stored in
455 // old_class_tables_.
456 AcqRelAtomic<ClassPtr*> table_;
457 AcqRelAtomic<ClassPtr*> tlc_table_;
458 MallocGrowableArray<ClassPtr*>* old_class_tables_;
459 SharedClassTable* shared_class_table_;
460
461 DISALLOW_COPY_AND_ASSIGN(ClassTable);
462};
463
464#if !defined(PRODUCT)
465DART_FORCE_INLINE bool SharedClassTable::TraceAllocationFor(intptr_t cid) {
466 ASSERT(cid > 0);
467 if (ClassTable::IsTopLevelCid(cid)) {
468 return false;
469 }
470 ASSERT(cid < top_);
471 return trace_allocation_table_.load()[cid] != 0;
472}
473#endif // !defined(PRODUCT)
474
475} // namespace dart
476
477#endif // RUNTIME_VM_CLASS_TABLE_H_
478