1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "memory/allocation.inline.hpp"
27#include "memory/resourceArea.hpp"
28#include "runtime/atomic.hpp"
29#include "utilities/bitMap.inline.hpp"
30#include "utilities/copy.hpp"
31#include "utilities/debug.hpp"
32
33STATIC_ASSERT(sizeof(BitMap::bm_word_t) == BytesPerWord); // "Implementation assumption."
34
35typedef BitMap::bm_word_t bm_word_t;
36typedef BitMap::idx_t idx_t;
37
38class ResourceBitMapAllocator : StackObj {
39 public:
40 bm_word_t* allocate(idx_t size_in_words) const {
41 return NEW_RESOURCE_ARRAY(bm_word_t, size_in_words);
42 }
43 void free(bm_word_t* map, idx_t size_in_words) const {
44 // Don't free resource allocated arrays.
45 }
46};
47
48class CHeapBitMapAllocator : StackObj {
49 MEMFLAGS _flags;
50
51 public:
52 CHeapBitMapAllocator(MEMFLAGS flags) : _flags(flags) {}
53 bm_word_t* allocate(size_t size_in_words) const {
54 return ArrayAllocator<bm_word_t>::allocate(size_in_words, _flags);
55 }
56 void free(bm_word_t* map, idx_t size_in_words) const {
57 ArrayAllocator<bm_word_t>::free(map, size_in_words);
58 }
59};
60
61class ArenaBitMapAllocator : StackObj {
62 Arena* _arena;
63
64 public:
65 ArenaBitMapAllocator(Arena* arena) : _arena(arena) {}
66 bm_word_t* allocate(idx_t size_in_words) const {
67 return (bm_word_t*)_arena->Amalloc(size_in_words * BytesPerWord);
68 }
69 void free(bm_word_t* map, idx_t size_in_words) const {
70 // ArenaBitMaps currently don't free memory.
71 }
72};
73
74template <class Allocator>
75BitMap::bm_word_t* BitMap::reallocate(const Allocator& allocator, bm_word_t* old_map, idx_t old_size_in_bits, idx_t new_size_in_bits, bool clear) {
76 size_t old_size_in_words = calc_size_in_words(old_size_in_bits);
77 size_t new_size_in_words = calc_size_in_words(new_size_in_bits);
78
79 bm_word_t* map = NULL;
80
81 if (new_size_in_words > 0) {
82 map = allocator.allocate(new_size_in_words);
83
84 if (old_map != NULL) {
85 Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) map,
86 MIN2(old_size_in_words, new_size_in_words));
87 }
88
89 if (clear && new_size_in_words > old_size_in_words) {
90 clear_range_of_words(map, old_size_in_words, new_size_in_words);
91 }
92 }
93
94 if (old_map != NULL) {
95 allocator.free(old_map, old_size_in_words);
96 }
97
98 return map;
99}
100
101template <class Allocator>
102bm_word_t* BitMap::allocate(const Allocator& allocator, idx_t size_in_bits, bool clear) {
103 // Reuse reallocate to ensure that the new memory is cleared.
104 return reallocate(allocator, NULL, 0, size_in_bits, clear);
105}
106
107template <class Allocator>
108void BitMap::free(const Allocator& allocator, bm_word_t* map, idx_t size_in_bits) {
109 bm_word_t* ret = reallocate(allocator, map, size_in_bits, 0);
110 assert(ret == NULL, "Reallocate shouldn't have allocated");
111}
112
113template <class Allocator>
114void BitMap::resize(const Allocator& allocator, idx_t new_size_in_bits, bool clear) {
115 bm_word_t* new_map = reallocate(allocator, map(), size(), new_size_in_bits, clear);
116
117 update(new_map, new_size_in_bits);
118}
119
120template <class Allocator>
121void BitMap::initialize(const Allocator& allocator, idx_t size_in_bits, bool clear) {
122 assert(map() == NULL, "precondition");
123 assert(size() == 0, "precondition");
124
125 resize(allocator, size_in_bits, clear);
126}
127
128template <class Allocator>
129void BitMap::reinitialize(const Allocator& allocator, idx_t new_size_in_bits, bool clear) {
130 // Remove previous bits - no need to clear
131 resize(allocator, 0, false /* clear */);
132
133 initialize(allocator, new_size_in_bits, clear);
134}
135
136ResourceBitMap::ResourceBitMap(idx_t size_in_bits)
137 : BitMap(allocate(ResourceBitMapAllocator(), size_in_bits), size_in_bits) {
138}
139
140void ResourceBitMap::resize(idx_t new_size_in_bits) {
141 BitMap::resize(ResourceBitMapAllocator(), new_size_in_bits, true /* clear */);
142}
143
144void ResourceBitMap::initialize(idx_t size_in_bits) {
145 BitMap::initialize(ResourceBitMapAllocator(), size_in_bits, true /* clear */);
146}
147
148void ResourceBitMap::reinitialize(idx_t size_in_bits) {
149 BitMap::reinitialize(ResourceBitMapAllocator(), size_in_bits, true /* clear */);
150}
151
152ArenaBitMap::ArenaBitMap(Arena* arena, idx_t size_in_bits)
153 : BitMap(allocate(ArenaBitMapAllocator(arena), size_in_bits), size_in_bits) {
154}
155
156CHeapBitMap::CHeapBitMap(idx_t size_in_bits, MEMFLAGS flags, bool clear)
157 : BitMap(allocate(CHeapBitMapAllocator(flags), size_in_bits, clear), size_in_bits), _flags(flags) {
158}
159
160CHeapBitMap::~CHeapBitMap() {
161 free(CHeapBitMapAllocator(_flags), map(), size());
162}
163
164void CHeapBitMap::resize(idx_t new_size_in_bits, bool clear) {
165 BitMap::resize(CHeapBitMapAllocator(_flags), new_size_in_bits, clear);
166}
167
168void CHeapBitMap::initialize(idx_t size_in_bits, bool clear) {
169 BitMap::initialize(CHeapBitMapAllocator(_flags), size_in_bits, clear);
170}
171
172void CHeapBitMap::reinitialize(idx_t size_in_bits, bool clear) {
173 BitMap::reinitialize(CHeapBitMapAllocator(_flags), size_in_bits, clear);
174}
175
176#ifdef ASSERT
177void BitMap::verify_index(idx_t index) const {
178 assert(index < _size, "BitMap index out of bounds");
179}
180
181void BitMap::verify_range(idx_t beg_index, idx_t end_index) const {
182 assert(beg_index <= end_index, "BitMap range error");
183 // Note that [0,0) and [size,size) are both valid ranges.
184 if (end_index != _size) verify_index(end_index);
185}
186#endif // #ifdef ASSERT
187
188void BitMap::pretouch() {
189 os::pretouch_memory(word_addr(0), word_addr(size()));
190}
191
192void BitMap::set_range_within_word(idx_t beg, idx_t end) {
193 // With a valid range (beg <= end), this test ensures that end != 0, as
194 // required by inverted_bit_mask_for_range. Also avoids an unnecessary write.
195 if (beg != end) {
196 bm_word_t mask = inverted_bit_mask_for_range(beg, end);
197 *word_addr(beg) |= ~mask;
198 }
199}
200
201void BitMap::clear_range_within_word(idx_t beg, idx_t end) {
202 // With a valid range (beg <= end), this test ensures that end != 0, as
203 // required by inverted_bit_mask_for_range. Also avoids an unnecessary write.
204 if (beg != end) {
205 bm_word_t mask = inverted_bit_mask_for_range(beg, end);
206 *word_addr(beg) &= mask;
207 }
208}
209
210void BitMap::par_put_range_within_word(idx_t beg, idx_t end, bool value) {
211 assert(value == 0 || value == 1, "0 for clear, 1 for set");
212 // With a valid range (beg <= end), this test ensures that end != 0, as
213 // required by inverted_bit_mask_for_range. Also avoids an unnecessary write.
214 if (beg != end) {
215 bm_word_t* pw = word_addr(beg);
216 bm_word_t w = *pw;
217 bm_word_t mr = inverted_bit_mask_for_range(beg, end);
218 bm_word_t nw = value ? (w | ~mr) : (w & mr);
219 while (true) {
220 bm_word_t res = Atomic::cmpxchg(nw, pw, w);
221 if (res == w) break;
222 w = res;
223 nw = value ? (w | ~mr) : (w & mr);
224 }
225 }
226}
227
228void BitMap::set_range(idx_t beg, idx_t end) {
229 verify_range(beg, end);
230
231 idx_t beg_full_word = word_index_round_up(beg);
232 idx_t end_full_word = word_index(end);
233
234 if (beg_full_word < end_full_word) {
235 // The range includes at least one full word.
236 set_range_within_word(beg, bit_index(beg_full_word));
237 set_range_of_words(beg_full_word, end_full_word);
238 set_range_within_word(bit_index(end_full_word), end);
239 } else {
240 // The range spans at most 2 partial words.
241 idx_t boundary = MIN2(bit_index(beg_full_word), end);
242 set_range_within_word(beg, boundary);
243 set_range_within_word(boundary, end);
244 }
245}
246
247void BitMap::clear_range(idx_t beg, idx_t end) {
248 verify_range(beg, end);
249
250 idx_t beg_full_word = word_index_round_up(beg);
251 idx_t end_full_word = word_index(end);
252
253 if (beg_full_word < end_full_word) {
254 // The range includes at least one full word.
255 clear_range_within_word(beg, bit_index(beg_full_word));
256 clear_range_of_words(beg_full_word, end_full_word);
257 clear_range_within_word(bit_index(end_full_word), end);
258 } else {
259 // The range spans at most 2 partial words.
260 idx_t boundary = MIN2(bit_index(beg_full_word), end);
261 clear_range_within_word(beg, boundary);
262 clear_range_within_word(boundary, end);
263 }
264}
265
266bool BitMap::is_small_range_of_words(idx_t beg_full_word, idx_t end_full_word) {
267 // There is little point to call large version on small ranges.
268 // Need to check carefully, keeping potential idx_t underflow in mind.
269 // The threshold should be at least one word.
270 STATIC_ASSERT(small_range_words >= 1);
271 return (beg_full_word + small_range_words >= end_full_word);
272}
273
274void BitMap::set_large_range(idx_t beg, idx_t end) {
275 verify_range(beg, end);
276
277 idx_t beg_full_word = word_index_round_up(beg);
278 idx_t end_full_word = word_index(end);
279
280 if (is_small_range_of_words(beg_full_word, end_full_word)) {
281 set_range(beg, end);
282 return;
283 }
284
285 // The range includes at least one full word.
286 set_range_within_word(beg, bit_index(beg_full_word));
287 set_large_range_of_words(beg_full_word, end_full_word);
288 set_range_within_word(bit_index(end_full_word), end);
289}
290
291void BitMap::clear_large_range(idx_t beg, idx_t end) {
292 verify_range(beg, end);
293
294 idx_t beg_full_word = word_index_round_up(beg);
295 idx_t end_full_word = word_index(end);
296
297 if (is_small_range_of_words(beg_full_word, end_full_word)) {
298 clear_range(beg, end);
299 return;
300 }
301
302 // The range includes at least one full word.
303 clear_range_within_word(beg, bit_index(beg_full_word));
304 clear_large_range_of_words(beg_full_word, end_full_word);
305 clear_range_within_word(bit_index(end_full_word), end);
306}
307
308void BitMap::at_put(idx_t offset, bool value) {
309 if (value) {
310 set_bit(offset);
311 } else {
312 clear_bit(offset);
313 }
314}
315
316// Return true to indicate that this thread changed
317// the bit, false to indicate that someone else did.
318// In either case, the requested bit is in the
319// requested state some time during the period that
320// this thread is executing this call. More importantly,
321// if no other thread is executing an action to
322// change the requested bit to a state other than
323// the one that this thread is trying to set it to,
324// then the the bit is in the expected state
325// at exit from this method. However, rather than
326// make such a strong assertion here, based on
327// assuming such constrained use (which though true
328// today, could change in the future to service some
329// funky parallel algorithm), we encourage callers
330// to do such verification, as and when appropriate.
331bool BitMap::par_at_put(idx_t bit, bool value) {
332 return value ? par_set_bit(bit) : par_clear_bit(bit);
333}
334
335void BitMap::at_put_range(idx_t start_offset, idx_t end_offset, bool value) {
336 if (value) {
337 set_range(start_offset, end_offset);
338 } else {
339 clear_range(start_offset, end_offset);
340 }
341}
342
343void BitMap::par_at_put_range(idx_t beg, idx_t end, bool value) {
344 verify_range(beg, end);
345
346 idx_t beg_full_word = word_index_round_up(beg);
347 idx_t end_full_word = word_index(end);
348
349 if (beg_full_word < end_full_word) {
350 // The range includes at least one full word.
351 par_put_range_within_word(beg, bit_index(beg_full_word), value);
352 if (value) {
353 set_range_of_words(beg_full_word, end_full_word);
354 } else {
355 clear_range_of_words(beg_full_word, end_full_word);
356 }
357 par_put_range_within_word(bit_index(end_full_word), end, value);
358 } else {
359 // The range spans at most 2 partial words.
360 idx_t boundary = MIN2(bit_index(beg_full_word), end);
361 par_put_range_within_word(beg, boundary, value);
362 par_put_range_within_word(boundary, end, value);
363 }
364
365}
366
367void BitMap::at_put_large_range(idx_t beg, idx_t end, bool value) {
368 if (value) {
369 set_large_range(beg, end);
370 } else {
371 clear_large_range(beg, end);
372 }
373}
374
375void BitMap::par_at_put_large_range(idx_t beg, idx_t end, bool value) {
376 verify_range(beg, end);
377
378 idx_t beg_full_word = word_index_round_up(beg);
379 idx_t end_full_word = word_index(end);
380
381 if (is_small_range_of_words(beg_full_word, end_full_word)) {
382 par_at_put_range(beg, end, value);
383 return;
384 }
385
386 // The range includes at least one full word.
387 par_put_range_within_word(beg, bit_index(beg_full_word), value);
388 if (value) {
389 set_large_range_of_words(beg_full_word, end_full_word);
390 } else {
391 clear_large_range_of_words(beg_full_word, end_full_word);
392 }
393 par_put_range_within_word(bit_index(end_full_word), end, value);
394}
395
396inline bm_word_t tail_mask(idx_t tail_bits) {
397 assert(tail_bits != 0, "precondition"); // Works, but shouldn't be called.
398 assert(tail_bits < (idx_t)BitsPerWord, "precondition");
399 return (bm_word_t(1) << tail_bits) - 1;
400}
401
402// Get the low tail_bits of value, which is the last partial word of a map.
403inline bm_word_t tail_of_map(bm_word_t value, idx_t tail_bits) {
404 return value & tail_mask(tail_bits);
405}
406
407// Compute the new last word of a map with a non-aligned length.
408// new_value has the new trailing bits of the map in the low tail_bits.
409// old_value is the last word of the map, including bits beyond the end.
410// Returns old_value with the low tail_bits replaced by the corresponding
411// bits in new_value.
412inline bm_word_t merge_tail_of_map(bm_word_t new_value,
413 bm_word_t old_value,
414 idx_t tail_bits) {
415 bm_word_t mask = tail_mask(tail_bits);
416 return (new_value & mask) | (old_value & ~mask);
417}
418
419bool BitMap::contains(const BitMap& other) const {
420 assert(size() == other.size(), "must have same size");
421 const bm_word_t* dest_map = map();
422 const bm_word_t* other_map = other.map();
423 idx_t limit = word_index(size());
424 for (idx_t index = 0; index < limit; ++index) {
425 // false if other bitmap has bits set which are clear in this bitmap.
426 if ((~dest_map[index] & other_map[index]) != 0) return false;
427 }
428 idx_t rest = bit_in_word(size());
429 // true unless there is a partial-word tail in which the other
430 // bitmap has bits set which are clear in this bitmap.
431 return (rest == 0) || tail_of_map(~dest_map[limit] & other_map[limit], rest) == 0;
432}
433
434bool BitMap::intersects(const BitMap& other) const {
435 assert(size() == other.size(), "must have same size");
436 const bm_word_t* dest_map = map();
437 const bm_word_t* other_map = other.map();
438 idx_t limit = word_index(size());
439 for (idx_t index = 0; index < limit; ++index) {
440 if ((dest_map[index] & other_map[index]) != 0) return true;
441 }
442 idx_t rest = bit_in_word(size());
443 // false unless there is a partial-word tail with non-empty intersection.
444 return (rest > 0) && tail_of_map(dest_map[limit] & other_map[limit], rest) != 0;
445}
446
447void BitMap::set_union(const BitMap& other) {
448 assert(size() == other.size(), "must have same size");
449 bm_word_t* dest_map = map();
450 const bm_word_t* other_map = other.map();
451 idx_t limit = word_index(size());
452 for (idx_t index = 0; index < limit; ++index) {
453 dest_map[index] |= other_map[index];
454 }
455 idx_t rest = bit_in_word(size());
456 if (rest > 0) {
457 bm_word_t orig = dest_map[limit];
458 dest_map[limit] = merge_tail_of_map(orig | other_map[limit], orig, rest);
459 }
460}
461
462void BitMap::set_difference(const BitMap& other) {
463 assert(size() == other.size(), "must have same size");
464 bm_word_t* dest_map = map();
465 const bm_word_t* other_map = other.map();
466 idx_t limit = word_index(size());
467 for (idx_t index = 0; index < limit; ++index) {
468 dest_map[index] &= ~other_map[index];
469 }
470 idx_t rest = bit_in_word(size());
471 if (rest > 0) {
472 bm_word_t orig = dest_map[limit];
473 dest_map[limit] = merge_tail_of_map(orig & ~other_map[limit], orig, rest);
474 }
475}
476
477void BitMap::set_intersection(const BitMap& other) {
478 assert(size() == other.size(), "must have same size");
479 bm_word_t* dest_map = map();
480 const bm_word_t* other_map = other.map();
481 idx_t limit = word_index(size());
482 for (idx_t index = 0; index < limit; ++index) {
483 dest_map[index] &= other_map[index];
484 }
485 idx_t rest = bit_in_word(size());
486 if (rest > 0) {
487 bm_word_t orig = dest_map[limit];
488 dest_map[limit] = merge_tail_of_map(orig & other_map[limit], orig, rest);
489 }
490}
491
492bool BitMap::set_union_with_result(const BitMap& other) {
493 assert(size() == other.size(), "must have same size");
494 bool changed = false;
495 bm_word_t* dest_map = map();
496 const bm_word_t* other_map = other.map();
497 idx_t limit = word_index(size());
498 for (idx_t index = 0; index < limit; ++index) {
499 bm_word_t orig = dest_map[index];
500 bm_word_t temp = orig | other_map[index];
501 changed = changed || (temp != orig);
502 dest_map[index] = temp;
503 }
504 idx_t rest = bit_in_word(size());
505 if (rest > 0) {
506 bm_word_t orig = dest_map[limit];
507 bm_word_t temp = merge_tail_of_map(orig | other_map[limit], orig, rest);
508 changed = changed || (temp != orig);
509 dest_map[limit] = temp;
510 }
511 return changed;
512}
513
514bool BitMap::set_difference_with_result(const BitMap& other) {
515 assert(size() == other.size(), "must have same size");
516 bool changed = false;
517 bm_word_t* dest_map = map();
518 const bm_word_t* other_map = other.map();
519 idx_t limit = word_index(size());
520 for (idx_t index = 0; index < limit; ++index) {
521 bm_word_t orig = dest_map[index];
522 bm_word_t temp = orig & ~other_map[index];
523 changed = changed || (temp != orig);
524 dest_map[index] = temp;
525 }
526 idx_t rest = bit_in_word(size());
527 if (rest > 0) {
528 bm_word_t orig = dest_map[limit];
529 bm_word_t temp = merge_tail_of_map(orig & ~other_map[limit], orig, rest);
530 changed = changed || (temp != orig);
531 dest_map[limit] = temp;
532 }
533 return changed;
534}
535
536bool BitMap::set_intersection_with_result(const BitMap& other) {
537 assert(size() == other.size(), "must have same size");
538 bool changed = false;
539 bm_word_t* dest_map = map();
540 const bm_word_t* other_map = other.map();
541 idx_t limit = word_index(size());
542 for (idx_t index = 0; index < limit; ++index) {
543 bm_word_t orig = dest_map[index];
544 bm_word_t temp = orig & other_map[index];
545 changed = changed || (temp != orig);
546 dest_map[index] = temp;
547 }
548 idx_t rest = bit_in_word(size());
549 if (rest > 0) {
550 bm_word_t orig = dest_map[limit];
551 bm_word_t temp = merge_tail_of_map(orig & other_map[limit], orig, rest);
552 changed = changed || (temp != orig);
553 dest_map[limit] = temp;
554 }
555 return changed;
556}
557
558void BitMap::set_from(const BitMap& other) {
559 assert(size() == other.size(), "must have same size");
560 bm_word_t* dest_map = map();
561 const bm_word_t* other_map = other.map();
562 idx_t copy_words = word_index(size());
563 Copy::disjoint_words((HeapWord*)other_map, (HeapWord*)dest_map, copy_words);
564 idx_t rest = bit_in_word(size());
565 if (rest > 0) {
566 dest_map[copy_words] = merge_tail_of_map(other_map[copy_words],
567 dest_map[copy_words],
568 rest);
569 }
570}
571
572bool BitMap::is_same(const BitMap& other) const {
573 assert(size() == other.size(), "must have same size");
574 const bm_word_t* dest_map = map();
575 const bm_word_t* other_map = other.map();
576 idx_t limit = word_index(size());
577 for (idx_t index = 0; index < limit; ++index) {
578 if (dest_map[index] != other_map[index]) return false;
579 }
580 idx_t rest = bit_in_word(size());
581 return (rest == 0) || (tail_of_map(dest_map[limit] ^ other_map[limit], rest) == 0);
582}
583
584bool BitMap::is_full() const {
585 const bm_word_t* words = map();
586 idx_t limit = word_index(size());
587 for (idx_t index = 0; index < limit; ++index) {
588 if (~words[index] != 0) return false;
589 }
590 idx_t rest = bit_in_word(size());
591 return (rest == 0) || (tail_of_map(~words[limit], rest) == 0);
592}
593
594bool BitMap::is_empty() const {
595 const bm_word_t* words = map();
596 idx_t limit = word_index(size());
597 for (idx_t index = 0; index < limit; ++index) {
598 if (words[index] != 0) return false;
599 }
600 idx_t rest = bit_in_word(size());
601 return (rest == 0) || (tail_of_map(words[limit], rest) == 0);
602}
603
604void BitMap::clear_large() {
605 clear_large_range_of_words(0, size_in_words());
606}
607
608// Note that if the closure itself modifies the bitmap
609// then modifications in and to the left of the _bit_ being
610// currently sampled will not be seen. Note also that the
611// interval [leftOffset, rightOffset) is right open.
612bool BitMap::iterate(BitMapClosure* blk, idx_t leftOffset, idx_t rightOffset) {
613 verify_range(leftOffset, rightOffset);
614
615 idx_t startIndex = word_index(leftOffset);
616 idx_t endIndex = MIN2(word_index(rightOffset) + 1, size_in_words());
617 for (idx_t index = startIndex, offset = leftOffset;
618 offset < rightOffset && index < endIndex;
619 offset = (++index) << LogBitsPerWord) {
620 idx_t rest = map(index) >> (offset & (BitsPerWord - 1));
621 for (; offset < rightOffset && rest != 0; offset++) {
622 if (rest & 1) {
623 if (!blk->do_bit(offset)) return false;
624 // resample at each closure application
625 // (see, for instance, CMS bug 4525989)
626 rest = map(index) >> (offset & (BitsPerWord -1));
627 }
628 rest = rest >> 1;
629 }
630 }
631 return true;
632}
633
634const BitMap::idx_t* BitMap::_pop_count_table = NULL;
635
636void BitMap::init_pop_count_table() {
637 if (_pop_count_table == NULL) {
638 BitMap::idx_t *table = NEW_C_HEAP_ARRAY(idx_t, 256, mtInternal);
639 for (uint i = 0; i < 256; i++) {
640 table[i] = num_set_bits(i);
641 }
642
643 if (!Atomic::replace_if_null(table, &_pop_count_table)) {
644 guarantee(_pop_count_table != NULL, "invariant");
645 FREE_C_HEAP_ARRAY(idx_t, table);
646 }
647 }
648}
649
650BitMap::idx_t BitMap::num_set_bits(bm_word_t w) {
651 idx_t bits = 0;
652
653 while (w != 0) {
654 while ((w & 1) == 0) {
655 w >>= 1;
656 }
657 bits++;
658 w >>= 1;
659 }
660 return bits;
661}
662
663BitMap::idx_t BitMap::num_set_bits_from_table(unsigned char c) {
664 assert(_pop_count_table != NULL, "precondition");
665 return _pop_count_table[c];
666}
667
668BitMap::idx_t BitMap::count_one_bits() const {
669 init_pop_count_table(); // If necessary.
670 idx_t sum = 0;
671 typedef unsigned char uchar;
672 for (idx_t i = 0; i < size_in_words(); i++) {
673 bm_word_t w = map()[i];
674 for (size_t j = 0; j < sizeof(bm_word_t); j++) {
675 sum += num_set_bits_from_table(uchar(w & 255));
676 w >>= 8;
677 }
678 }
679 return sum;
680}
681
682void BitMap::print_on_error(outputStream* st, const char* prefix) const {
683 st->print_cr("%s[" PTR_FORMAT ", " PTR_FORMAT ")",
684 prefix, p2i(map()), p2i((char*)map() + (size() >> LogBitsPerByte)));
685}
686
687void BitMap::write_to(bm_word_t* buffer, size_t buffer_size_in_bytes) const {
688 assert(buffer_size_in_bytes == size_in_bytes(), "must be");
689 memcpy(buffer, _map, size_in_bytes());
690}
691
692#ifndef PRODUCT
693
694void BitMap::print_on(outputStream* st) const {
695 tty->print("Bitmap(" SIZE_FORMAT "):", size());
696 for (idx_t index = 0; index < size(); index++) {
697 tty->print("%c", at(index) ? '1' : '0');
698 }
699 tty->cr();
700}
701
702#endif
703