| 1 | // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
| 2 | // for details. All rights reserved. Use of this source code is governed by a |
| 3 | // BSD-style license that can be found in the LICENSE file. |
| 4 | |
| 5 | #include "vm/heap/freelist.h" |
| 6 | |
| 7 | #include "vm/bit_set.h" |
| 8 | #include "vm/hash_map.h" |
| 9 | #include "vm/lockers.h" |
| 10 | #include "vm/object.h" |
| 11 | #include "vm/os_thread.h" |
| 12 | #include "vm/raw_object.h" |
| 13 | |
| 14 | namespace dart { |
| 15 | |
| 16 | FreeListElement* FreeListElement::AsElement(uword addr, intptr_t size) { |
| 17 | // Precondition: the (page containing the) header of the element is |
| 18 | // writable. |
| 19 | ASSERT(size >= kObjectAlignment); |
| 20 | ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
| 21 | |
| 22 | FreeListElement* result = reinterpret_cast<FreeListElement*>(addr); |
| 23 | |
| 24 | uint32_t tags = 0; |
| 25 | tags = ObjectLayout::SizeTag::update(size, tags); |
| 26 | tags = ObjectLayout::ClassIdTag::update(kFreeListElement, tags); |
| 27 | ASSERT((addr & kNewObjectAlignmentOffset) == kOldObjectAlignmentOffset); |
| 28 | tags = ObjectLayout::OldBit::update(true, tags); |
| 29 | tags = ObjectLayout::OldAndNotMarkedBit::update(true, tags); |
| 30 | tags = ObjectLayout::OldAndNotRememberedBit::update(true, tags); |
| 31 | tags = ObjectLayout::NewBit::update(false, tags); |
| 32 | result->tags_ = tags; |
| 33 | #if defined(HASH_IN_OBJECT_HEADER) |
| 34 | // Clearing this is mostly for neatness. The identityHashCode |
| 35 | // of free list entries is not used. |
| 36 | result->hash_ = 0; |
| 37 | #endif |
| 38 | if (size > ObjectLayout::SizeTag::kMaxSizeTag) { |
| 39 | *result->SizeAddress() = size; |
| 40 | } |
| 41 | result->set_next(NULL); |
| 42 | return result; |
| 43 | // Postcondition: the (page containing the) header of the element is |
| 44 | // writable. |
| 45 | } |
| 46 | |
| 47 | void FreeListElement::Init() { |
| 48 | ASSERT(sizeof(FreeListElement) == kObjectAlignment); |
| 49 | ASSERT(OFFSET_OF(FreeListElement, tags_) == Object::tags_offset()); |
| 50 | } |
| 51 | |
| 52 | intptr_t FreeListElement::(intptr_t size) { |
| 53 | if (size == 0) return 0; |
| 54 | return ((size > ObjectLayout::SizeTag::kMaxSizeTag) ? 3 : 2) * kWordSize; |
| 55 | } |
| 56 | |
| 57 | FreeList::FreeList() : mutex_() { |
| 58 | Reset(); |
| 59 | } |
| 60 | |
| 61 | FreeList::~FreeList() { |
| 62 | } |
| 63 | |
| 64 | uword FreeList::TryAllocate(intptr_t size, bool is_protected) { |
| 65 | MutexLocker ml(&mutex_); |
| 66 | return TryAllocateLocked(size, is_protected); |
| 67 | } |
| 68 | |
| 69 | uword FreeList::TryAllocateLocked(intptr_t size, bool is_protected) { |
| 70 | DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread()); |
| 71 | // Precondition: is_protected is false or else all free list elements are |
| 72 | // in non-writable pages. |
| 73 | |
| 74 | // Postcondition: if allocation succeeds, the allocated block is writable. |
| 75 | int index = IndexForSize(size); |
| 76 | if ((index != kNumLists) && free_map_.Test(index)) { |
| 77 | FreeListElement* element = DequeueElement(index); |
| 78 | if (is_protected) { |
| 79 | VirtualMemory::Protect(reinterpret_cast<void*>(element), size, |
| 80 | VirtualMemory::kReadWrite); |
| 81 | } |
| 82 | return reinterpret_cast<uword>(element); |
| 83 | } |
| 84 | |
| 85 | if ((index + 1) < kNumLists) { |
| 86 | intptr_t next_index = free_map_.Next(index + 1); |
| 87 | if (next_index != -1) { |
| 88 | // Dequeue an element from the list, split and enqueue the remainder in |
| 89 | // the appropriate list. |
| 90 | FreeListElement* element = DequeueElement(next_index); |
| 91 | if (is_protected) { |
| 92 | // Make the allocated block and the header of the remainder element |
| 93 | // writable. The remainder will be non-writable if necessary after |
| 94 | // the call to SplitElementAfterAndEnqueue. |
| 95 | // If the remainder size is zero, only the element itself needs to |
| 96 | // be made writable. |
| 97 | intptr_t remainder_size = element->HeapSize() - size; |
| 98 | intptr_t region_size = |
| 99 | size + FreeListElement::HeaderSizeFor(remainder_size); |
| 100 | VirtualMemory::Protect(reinterpret_cast<void*>(element), region_size, |
| 101 | VirtualMemory::kReadWrite); |
| 102 | } |
| 103 | SplitElementAfterAndEnqueue(element, size, is_protected); |
| 104 | return reinterpret_cast<uword>(element); |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | FreeListElement* previous = NULL; |
| 109 | FreeListElement* current = free_lists_[kNumLists]; |
| 110 | // We are willing to search the freelist further for a big block. |
| 111 | // For each successful free-list search we: |
| 112 | // * increase the search budget by #allocated-words |
| 113 | // * decrease the search budget by #free-list-entries-traversed |
| 114 | // which guarantees us to not waste more than around 1 search step per |
| 115 | // word of allocation |
| 116 | // |
| 117 | // If we run out of search budget we fall back to allocating a new page and |
| 118 | // reset the search budget. |
| 119 | intptr_t tries_left = freelist_search_budget_ + (size >> kWordSizeLog2); |
| 120 | while (current != NULL) { |
| 121 | if (current->HeapSize() >= size) { |
| 122 | // Found an element large enough to hold the requested size. Dequeue, |
| 123 | // split and enqueue the remainder. |
| 124 | intptr_t remainder_size = current->HeapSize() - size; |
| 125 | intptr_t region_size = |
| 126 | size + FreeListElement::HeaderSizeFor(remainder_size); |
| 127 | if (is_protected) { |
| 128 | // Make the allocated block and the header of the remainder element |
| 129 | // writable. The remainder will be non-writable if necessary after |
| 130 | // the call to SplitElementAfterAndEnqueue. |
| 131 | VirtualMemory::Protect(reinterpret_cast<void*>(current), region_size, |
| 132 | VirtualMemory::kReadWrite); |
| 133 | } |
| 134 | |
| 135 | if (previous == NULL) { |
| 136 | free_lists_[kNumLists] = current->next(); |
| 137 | } else { |
| 138 | // If the previous free list element's next field is protected, it |
| 139 | // needs to be unprotected before storing to it and reprotected |
| 140 | // after. |
| 141 | bool target_is_protected = false; |
| 142 | uword target_address = 0L; |
| 143 | if (is_protected) { |
| 144 | uword writable_start = reinterpret_cast<uword>(current); |
| 145 | uword writable_end = writable_start + region_size - 1; |
| 146 | target_address = previous->next_address(); |
| 147 | target_is_protected = |
| 148 | !VirtualMemory::InSamePage(target_address, writable_start) && |
| 149 | !VirtualMemory::InSamePage(target_address, writable_end); |
| 150 | } |
| 151 | if (target_is_protected) { |
| 152 | VirtualMemory::Protect(reinterpret_cast<void*>(target_address), |
| 153 | kWordSize, VirtualMemory::kReadWrite); |
| 154 | } |
| 155 | previous->set_next(current->next()); |
| 156 | if (target_is_protected) { |
| 157 | VirtualMemory::Protect(reinterpret_cast<void*>(target_address), |
| 158 | kWordSize, VirtualMemory::kReadExecute); |
| 159 | } |
| 160 | } |
| 161 | SplitElementAfterAndEnqueue(current, size, is_protected); |
| 162 | freelist_search_budget_ = |
| 163 | Utils::Minimum(tries_left, kInitialFreeListSearchBudget); |
| 164 | return reinterpret_cast<uword>(current); |
| 165 | } else if (tries_left-- < 0) { |
| 166 | freelist_search_budget_ = kInitialFreeListSearchBudget; |
| 167 | return 0; // Trigger allocation of new page. |
| 168 | } |
| 169 | previous = current; |
| 170 | current = current->next(); |
| 171 | } |
| 172 | return 0; |
| 173 | } |
| 174 | |
| 175 | void FreeList::Free(uword addr, intptr_t size) { |
| 176 | MutexLocker ml(&mutex_); |
| 177 | FreeLocked(addr, size); |
| 178 | } |
| 179 | |
| 180 | void FreeList::FreeLocked(uword addr, intptr_t size) { |
| 181 | DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread()); |
| 182 | // Precondition required by AsElement and EnqueueElement: the (page |
| 183 | // containing the) header of the freed block should be writable. This is |
| 184 | // the case when called for newly allocated pages because they are |
| 185 | // allocated as writable. It is the case when called during GC sweeping |
| 186 | // because the entire heap is writable. |
| 187 | intptr_t index = IndexForSize(size); |
| 188 | FreeListElement* element = FreeListElement::AsElement(addr, size); |
| 189 | EnqueueElement(element, index); |
| 190 | // Postcondition: the (page containing the) header is left writable. |
| 191 | } |
| 192 | |
| 193 | void FreeList::Reset() { |
| 194 | MutexLocker ml(&mutex_); |
| 195 | free_map_.Reset(); |
| 196 | last_free_small_size_ = -1; |
| 197 | for (int i = 0; i < (kNumLists + 1); i++) { |
| 198 | free_lists_[i] = NULL; |
| 199 | } |
| 200 | } |
| 201 | |
| 202 | void FreeList::EnqueueElement(FreeListElement* element, intptr_t index) { |
| 203 | FreeListElement* next = free_lists_[index]; |
| 204 | if (next == NULL && index != kNumLists) { |
| 205 | free_map_.Set(index, true); |
| 206 | last_free_small_size_ = |
| 207 | Utils::Maximum(last_free_small_size_, index << kObjectAlignmentLog2); |
| 208 | } |
| 209 | element->set_next(next); |
| 210 | free_lists_[index] = element; |
| 211 | } |
| 212 | |
| 213 | intptr_t FreeList::LengthLocked(int index) const { |
| 214 | DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread()); |
| 215 | ASSERT(index >= 0); |
| 216 | ASSERT(index < kNumLists); |
| 217 | intptr_t result = 0; |
| 218 | FreeListElement* element = free_lists_[index]; |
| 219 | while (element != NULL) { |
| 220 | ++result; |
| 221 | element = element->next(); |
| 222 | } |
| 223 | return result; |
| 224 | } |
| 225 | |
| 226 | void FreeList::PrintSmall() const { |
| 227 | int small_sizes = 0; |
| 228 | int small_objects = 0; |
| 229 | intptr_t small_bytes = 0; |
| 230 | for (int i = 0; i < kNumLists; ++i) { |
| 231 | if (free_lists_[i] == NULL) { |
| 232 | continue; |
| 233 | } |
| 234 | small_sizes += 1; |
| 235 | intptr_t list_length = LengthLocked(i); |
| 236 | small_objects += list_length; |
| 237 | intptr_t list_bytes = list_length * i * kObjectAlignment; |
| 238 | small_bytes += list_bytes; |
| 239 | OS::PrintErr( |
| 240 | "small %3d [%8d bytes] : " |
| 241 | "%8" Pd " objs; %8.1f KB; %8.1f cum KB\n" , |
| 242 | i, static_cast<int>(i * kObjectAlignment), list_length, |
| 243 | list_bytes / static_cast<double>(KB), |
| 244 | small_bytes / static_cast<double>(KB)); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | class IntptrPair { |
| 249 | public: |
| 250 | IntptrPair() : first_(-1), second_(-1) {} |
| 251 | IntptrPair(intptr_t first, intptr_t second) |
| 252 | : first_(first), second_(second) {} |
| 253 | |
| 254 | intptr_t first() const { return first_; } |
| 255 | intptr_t second() const { return second_; } |
| 256 | void set_second(intptr_t s) { second_ = s; } |
| 257 | |
| 258 | bool operator==(const IntptrPair& other) { |
| 259 | return (first_ == other.first_) && (second_ == other.second_); |
| 260 | } |
| 261 | |
| 262 | bool operator!=(const IntptrPair& other) { |
| 263 | return (first_ != other.first_) || (second_ != other.second_); |
| 264 | } |
| 265 | |
| 266 | private: |
| 267 | intptr_t first_; |
| 268 | intptr_t second_; |
| 269 | }; |
| 270 | |
| 271 | void FreeList::PrintLarge() const { |
| 272 | int large_sizes = 0; |
| 273 | int large_objects = 0; |
| 274 | intptr_t large_bytes = 0; |
| 275 | MallocDirectChainedHashMap<NumbersKeyValueTrait<IntptrPair> > map; |
| 276 | FreeListElement* node; |
| 277 | for (node = free_lists_[kNumLists]; node != NULL; node = node->next()) { |
| 278 | IntptrPair* pair = map.Lookup(node->HeapSize()); |
| 279 | if (pair == NULL) { |
| 280 | large_sizes += 1; |
| 281 | map.Insert(IntptrPair(node->HeapSize(), 1)); |
| 282 | } else { |
| 283 | pair->set_second(pair->second() + 1); |
| 284 | } |
| 285 | large_objects += 1; |
| 286 | } |
| 287 | |
| 288 | MallocDirectChainedHashMap<NumbersKeyValueTrait<IntptrPair> >::Iterator it = |
| 289 | map.GetIterator(); |
| 290 | IntptrPair* pair; |
| 291 | while ((pair = it.Next()) != NULL) { |
| 292 | intptr_t size = pair->first(); |
| 293 | intptr_t list_length = pair->second(); |
| 294 | intptr_t list_bytes = list_length * size; |
| 295 | large_bytes += list_bytes; |
| 296 | OS::PrintErr("large %3" Pd " [%8" Pd |
| 297 | " bytes] : " |
| 298 | "%8" Pd " objs; %8.1f KB; %8.1f cum KB\n" , |
| 299 | size / kObjectAlignment, size, list_length, |
| 300 | list_bytes / static_cast<double>(KB), |
| 301 | large_bytes / static_cast<double>(KB)); |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | void FreeList::Print() const { |
| 306 | MutexLocker ml(&mutex_); |
| 307 | PrintSmall(); |
| 308 | PrintLarge(); |
| 309 | } |
| 310 | |
| 311 | void FreeList::SplitElementAfterAndEnqueue(FreeListElement* element, |
| 312 | intptr_t size, |
| 313 | bool is_protected) { |
| 314 | // Precondition required by AsElement and EnqueueElement: either |
| 315 | // element->Size() == size, or else the (page containing the) header of |
| 316 | // the remainder element starting at element + size is writable. |
| 317 | intptr_t remainder_size = element->HeapSize() - size; |
| 318 | if (remainder_size == 0) return; |
| 319 | |
| 320 | uword remainder_address = reinterpret_cast<uword>(element) + size; |
| 321 | element = FreeListElement::AsElement(remainder_address, remainder_size); |
| 322 | intptr_t remainder_index = IndexForSize(remainder_size); |
| 323 | EnqueueElement(element, remainder_index); |
| 324 | |
| 325 | // Postcondition: when allocating in a protected page, the fraction of the |
| 326 | // remainder element which does not share a page with the allocated element is |
| 327 | // no longer writable. This means that if the remainder's header is not fully |
| 328 | // contained in the last page of the allocation, we need to re-protect the |
| 329 | // page it ends on. |
| 330 | if (is_protected) { |
| 331 | const uword remainder_header_size = |
| 332 | FreeListElement::HeaderSizeFor(remainder_size); |
| 333 | if (!VirtualMemory::InSamePage( |
| 334 | remainder_address - 1, |
| 335 | remainder_address + remainder_header_size - 1)) { |
| 336 | VirtualMemory::Protect( |
| 337 | reinterpret_cast<void*>( |
| 338 | Utils::RoundUp(remainder_address, VirtualMemory::PageSize())), |
| 339 | remainder_address + remainder_header_size - |
| 340 | Utils::RoundUp(remainder_address, VirtualMemory::PageSize()), |
| 341 | VirtualMemory::kReadExecute); |
| 342 | } |
| 343 | } |
| 344 | } |
| 345 | |
| 346 | FreeListElement* FreeList::TryAllocateLarge(intptr_t minimum_size) { |
| 347 | MutexLocker ml(&mutex_); |
| 348 | return TryAllocateLargeLocked(minimum_size); |
| 349 | } |
| 350 | |
| 351 | FreeListElement* FreeList::TryAllocateLargeLocked(intptr_t minimum_size) { |
| 352 | DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread()); |
| 353 | FreeListElement* previous = NULL; |
| 354 | FreeListElement* current = free_lists_[kNumLists]; |
| 355 | // TODO(koda): Find largest. |
| 356 | // We are willing to search the freelist further for a big block. |
| 357 | intptr_t tries_left = |
| 358 | freelist_search_budget_ + (minimum_size >> kWordSizeLog2); |
| 359 | while (current != NULL) { |
| 360 | FreeListElement* next = current->next(); |
| 361 | if (current->HeapSize() >= minimum_size) { |
| 362 | if (previous == NULL) { |
| 363 | free_lists_[kNumLists] = next; |
| 364 | } else { |
| 365 | previous->set_next(next); |
| 366 | } |
| 367 | freelist_search_budget_ = |
| 368 | Utils::Minimum(tries_left, kInitialFreeListSearchBudget); |
| 369 | return current; |
| 370 | } else if (tries_left-- < 0) { |
| 371 | freelist_search_budget_ = kInitialFreeListSearchBudget; |
| 372 | return 0; // Trigger allocation of new page. |
| 373 | } |
| 374 | previous = current; |
| 375 | current = next; |
| 376 | } |
| 377 | return NULL; |
| 378 | } |
| 379 | |
| 380 | void FreeList::MergeFrom(FreeList* donor, bool is_protected) { |
| 381 | // The [other] free list is from a dying isolate. There are no other threads |
| 382 | // accessing it, so there is no need to lock here. |
| 383 | MutexLocker ml(&mutex_); |
| 384 | for (intptr_t i = 0; i < (kNumLists + 1); ++i) { |
| 385 | FreeListElement* donor_head = donor->free_lists_[i]; |
| 386 | if (donor_head != nullptr) { |
| 387 | // If we didn't have a freelist element before we have to set the bit now, |
| 388 | // since we will get 1+ elements from [other]. |
| 389 | FreeListElement* old_head = free_lists_[i]; |
| 390 | if (old_head == nullptr && i != kNumLists) { |
| 391 | free_map_.Set(i, true); |
| 392 | } |
| 393 | |
| 394 | // Chain other's list in. |
| 395 | FreeListElement* last = donor_head; |
| 396 | while (last->next() != nullptr) { |
| 397 | last = last->next(); |
| 398 | } |
| 399 | |
| 400 | if (is_protected) { |
| 401 | VirtualMemory::Protect(reinterpret_cast<void*>(last), sizeof(*last), |
| 402 | VirtualMemory::kReadWrite); |
| 403 | } |
| 404 | last->set_next(old_head); |
| 405 | if (is_protected) { |
| 406 | VirtualMemory::Protect(reinterpret_cast<void*>(last), sizeof(*last), |
| 407 | VirtualMemory::kReadExecute); |
| 408 | } |
| 409 | free_lists_[i] = donor_head; |
| 410 | } |
| 411 | } |
| 412 | |
| 413 | last_free_small_size_ = |
| 414 | Utils::Maximum(last_free_small_size_, donor->last_free_small_size_); |
| 415 | } |
| 416 | |
| 417 | } // namespace dart |
| 418 | |