| 1 | #define JEMALLOC_EXTENT_DSS_C_ |
| 2 | #include "jemalloc/internal/jemalloc_preamble.h" |
| 3 | #include "jemalloc/internal/jemalloc_internal_includes.h" |
| 4 | |
| 5 | #include "jemalloc/internal/assert.h" |
| 6 | #include "jemalloc/internal/extent_dss.h" |
| 7 | #include "jemalloc/internal/spin.h" |
| 8 | |
| 9 | /******************************************************************************/ |
| 10 | /* Data. */ |
| 11 | |
| 12 | const char *opt_dss = DSS_DEFAULT; |
| 13 | |
| 14 | const char *dss_prec_names[] = { |
| 15 | "disabled" , |
| 16 | "primary" , |
| 17 | "secondary" , |
| 18 | "N/A" |
| 19 | }; |
| 20 | |
| 21 | /* |
| 22 | * Current dss precedence default, used when creating new arenas. NB: This is |
| 23 | * stored as unsigned rather than dss_prec_t because in principle there's no |
| 24 | * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use |
| 25 | * atomic operations to synchronize the setting. |
| 26 | */ |
| 27 | static atomic_u_t dss_prec_default = ATOMIC_INIT( |
| 28 | (unsigned)DSS_PREC_DEFAULT); |
| 29 | |
| 30 | /* Base address of the DSS. */ |
| 31 | static void *dss_base; |
| 32 | /* Atomic boolean indicating whether a thread is currently extending DSS. */ |
| 33 | static atomic_b_t dss_extending; |
| 34 | /* Atomic boolean indicating whether the DSS is exhausted. */ |
| 35 | static atomic_b_t dss_exhausted; |
| 36 | /* Atomic current upper limit on DSS addresses. */ |
| 37 | static atomic_p_t dss_max; |
| 38 | |
| 39 | /******************************************************************************/ |
| 40 | |
| 41 | static void * |
| 42 | extent_dss_sbrk(intptr_t increment) { |
| 43 | #ifdef JEMALLOC_DSS |
| 44 | return sbrk(increment); |
| 45 | #else |
| 46 | not_implemented(); |
| 47 | return NULL; |
| 48 | #endif |
| 49 | } |
| 50 | |
| 51 | dss_prec_t |
| 52 | extent_dss_prec_get(void) { |
| 53 | dss_prec_t ret; |
| 54 | |
| 55 | if (!have_dss) { |
| 56 | return dss_prec_disabled; |
| 57 | } |
| 58 | ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE); |
| 59 | return ret; |
| 60 | } |
| 61 | |
| 62 | bool |
| 63 | extent_dss_prec_set(dss_prec_t dss_prec) { |
| 64 | if (!have_dss) { |
| 65 | return (dss_prec != dss_prec_disabled); |
| 66 | } |
| 67 | atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE); |
| 68 | return false; |
| 69 | } |
| 70 | |
| 71 | static void |
| 72 | extent_dss_extending_start(void) { |
| 73 | spin_t spinner = SPIN_INITIALIZER; |
| 74 | while (true) { |
| 75 | bool expected = false; |
| 76 | if (atomic_compare_exchange_weak_b(&dss_extending, &expected, |
| 77 | true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { |
| 78 | break; |
| 79 | } |
| 80 | spin_adaptive(&spinner); |
| 81 | } |
| 82 | } |
| 83 | |
| 84 | static void |
| 85 | extent_dss_extending_finish(void) { |
| 86 | assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED)); |
| 87 | |
| 88 | atomic_store_b(&dss_extending, false, ATOMIC_RELEASE); |
| 89 | } |
| 90 | |
| 91 | static void * |
| 92 | extent_dss_max_update(void *new_addr) { |
| 93 | /* |
| 94 | * Get the current end of the DSS as max_cur and assure that dss_max is |
| 95 | * up to date. |
| 96 | */ |
| 97 | void *max_cur = extent_dss_sbrk(0); |
| 98 | if (max_cur == (void *)-1) { |
| 99 | return NULL; |
| 100 | } |
| 101 | atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE); |
| 102 | /* Fixed new_addr can only be supported if it is at the edge of DSS. */ |
| 103 | if (new_addr != NULL && max_cur != new_addr) { |
| 104 | return NULL; |
| 105 | } |
| 106 | return max_cur; |
| 107 | } |
| 108 | |
| 109 | void * |
| 110 | extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, |
| 111 | size_t alignment, bool *zero, bool *commit) { |
| 112 | extent_t *gap; |
| 113 | |
| 114 | cassert(have_dss); |
| 115 | assert(size > 0); |
| 116 | assert(alignment > 0); |
| 117 | |
| 118 | /* |
| 119 | * sbrk() uses a signed increment argument, so take care not to |
| 120 | * interpret a large allocation request as a negative increment. |
| 121 | */ |
| 122 | if ((intptr_t)size < 0) { |
| 123 | return NULL; |
| 124 | } |
| 125 | |
| 126 | gap = extent_alloc(tsdn, arena); |
| 127 | if (gap == NULL) { |
| 128 | return NULL; |
| 129 | } |
| 130 | |
| 131 | extent_dss_extending_start(); |
| 132 | if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) { |
| 133 | /* |
| 134 | * The loop is necessary to recover from races with other |
| 135 | * threads that are using the DSS for something other than |
| 136 | * malloc. |
| 137 | */ |
| 138 | while (true) { |
| 139 | void *max_cur = extent_dss_max_update(new_addr); |
| 140 | if (max_cur == NULL) { |
| 141 | goto label_oom; |
| 142 | } |
| 143 | |
| 144 | /* |
| 145 | * Compute how much page-aligned gap space (if any) is |
| 146 | * necessary to satisfy alignment. This space can be |
| 147 | * recycled for later use. |
| 148 | */ |
| 149 | void *gap_addr_page = (void *)(PAGE_CEILING( |
| 150 | (uintptr_t)max_cur)); |
| 151 | void *ret = (void *)ALIGNMENT_CEILING( |
| 152 | (uintptr_t)gap_addr_page, alignment); |
| 153 | size_t gap_size_page = (uintptr_t)ret - |
| 154 | (uintptr_t)gap_addr_page; |
| 155 | if (gap_size_page != 0) { |
| 156 | extent_init(gap, arena, gap_addr_page, |
| 157 | gap_size_page, false, SC_NSIZES, |
| 158 | arena_extent_sn_next(arena), |
| 159 | extent_state_active, false, true, true); |
| 160 | } |
| 161 | /* |
| 162 | * Compute the address just past the end of the desired |
| 163 | * allocation space. |
| 164 | */ |
| 165 | void *dss_next = (void *)((uintptr_t)ret + size); |
| 166 | if ((uintptr_t)ret < (uintptr_t)max_cur || |
| 167 | (uintptr_t)dss_next < (uintptr_t)max_cur) { |
| 168 | goto label_oom; /* Wrap-around. */ |
| 169 | } |
| 170 | /* Compute the increment, including subpage bytes. */ |
| 171 | void *gap_addr_subpage = max_cur; |
| 172 | size_t gap_size_subpage = (uintptr_t)ret - |
| 173 | (uintptr_t)gap_addr_subpage; |
| 174 | intptr_t incr = gap_size_subpage + size; |
| 175 | |
| 176 | assert((uintptr_t)max_cur + incr == (uintptr_t)ret + |
| 177 | size); |
| 178 | |
| 179 | /* Try to allocate. */ |
| 180 | void *dss_prev = extent_dss_sbrk(incr); |
| 181 | if (dss_prev == max_cur) { |
| 182 | /* Success. */ |
| 183 | atomic_store_p(&dss_max, dss_next, |
| 184 | ATOMIC_RELEASE); |
| 185 | extent_dss_extending_finish(); |
| 186 | |
| 187 | if (gap_size_page != 0) { |
| 188 | extent_dalloc_gap(tsdn, arena, gap); |
| 189 | } else { |
| 190 | extent_dalloc(tsdn, arena, gap); |
| 191 | } |
| 192 | if (!*commit) { |
| 193 | *commit = pages_decommit(ret, size); |
| 194 | } |
| 195 | if (*zero && *commit) { |
| 196 | extent_hooks_t *extent_hooks = |
| 197 | EXTENT_HOOKS_INITIALIZER; |
| 198 | extent_t extent; |
| 199 | |
| 200 | extent_init(&extent, arena, ret, size, |
| 201 | size, false, SC_NSIZES, |
| 202 | extent_state_active, false, true, |
| 203 | true); |
| 204 | if (extent_purge_forced_wrapper(tsdn, |
| 205 | arena, &extent_hooks, &extent, 0, |
| 206 | size)) { |
| 207 | memset(ret, 0, size); |
| 208 | } |
| 209 | } |
| 210 | return ret; |
| 211 | } |
| 212 | /* |
| 213 | * Failure, whether due to OOM or a race with a raw |
| 214 | * sbrk() call from outside the allocator. |
| 215 | */ |
| 216 | if (dss_prev == (void *)-1) { |
| 217 | /* OOM. */ |
| 218 | atomic_store_b(&dss_exhausted, true, |
| 219 | ATOMIC_RELEASE); |
| 220 | goto label_oom; |
| 221 | } |
| 222 | } |
| 223 | } |
| 224 | label_oom: |
| 225 | extent_dss_extending_finish(); |
| 226 | extent_dalloc(tsdn, arena, gap); |
| 227 | return NULL; |
| 228 | } |
| 229 | |
| 230 | static bool |
| 231 | extent_in_dss_helper(void *addr, void *max) { |
| 232 | return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < |
| 233 | (uintptr_t)max); |
| 234 | } |
| 235 | |
| 236 | bool |
| 237 | extent_in_dss(void *addr) { |
| 238 | cassert(have_dss); |
| 239 | |
| 240 | return extent_in_dss_helper(addr, atomic_load_p(&dss_max, |
| 241 | ATOMIC_ACQUIRE)); |
| 242 | } |
| 243 | |
| 244 | bool |
| 245 | extent_dss_mergeable(void *addr_a, void *addr_b) { |
| 246 | void *max; |
| 247 | |
| 248 | cassert(have_dss); |
| 249 | |
| 250 | if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < |
| 251 | (uintptr_t)dss_base) { |
| 252 | return true; |
| 253 | } |
| 254 | |
| 255 | max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE); |
| 256 | return (extent_in_dss_helper(addr_a, max) == |
| 257 | extent_in_dss_helper(addr_b, max)); |
| 258 | } |
| 259 | |
| 260 | void |
| 261 | extent_dss_boot(void) { |
| 262 | cassert(have_dss); |
| 263 | |
| 264 | dss_base = extent_dss_sbrk(0); |
| 265 | atomic_store_b(&dss_extending, false, ATOMIC_RELAXED); |
| 266 | atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED); |
| 267 | atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); |
| 268 | } |
| 269 | |
| 270 | /******************************************************************************/ |
| 271 | |