1/* $Id$ $Revision$ */
2/* vim:set shiftwidth=4 ts=8: */
3
4/*************************************************************************
5 * Copyright (c) 2011 AT&T Intellectual Property
6 * All rights reserved. This program and the accompanying materials
7 * are made available under the terms of the Eclipse Public License v1.0
8 * which accompanies this distribution, and is available at
9 * http://www.eclipse.org/legal/epl-v10.html
10 *
11 * Contributors: See CVS logs. Details at http://www.graphviz.org/
12 *************************************************************************/
13
14#include "vmhdr.h"
15
16/* for VirtualAlloc and friends */
17#if defined(_WIN32)
18#include <windows.h>
19#endif
20
21/* Best-fit allocation method. This is based on a best-fit strategy
22** using a splay tree for storage of lists of free blocks of the same
23** size. Recent free blocks may be cached for fast reuse.
24**
25** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
26*/
27
28#ifdef DEBUG
29static int N_free; /* # of free calls */
30static int N_alloc; /* # of alloc calls */
31static int N_resize; /* # of resize calls */
32static int N_wild; /* # allocated from the wild block */
33static int N_cache; /* # allocated from cache */
34static int N_last; /* # allocated from last free block */
35static int P_junk; /* # of semi-free pieces */
36static int P_free; /* # of free pieces */
37static int P_busy; /* # of busy pieces */
38static size_t M_junk; /* max size of a junk piece */
39static size_t M_free; /* max size of a free piece */
40static size_t M_busy; /* max size of a busy piece */
41static size_t S_free; /* total free space */
42static size_t S_junk; /* total junk space */
43static int Vmcheck = 0; /* 1 if checking */
44
45/* Check to see if a block is in the free tree */
46static int vmintree(Block_t * node, Block_t * b)
47{
48 Block_t *t;
49
50 for (t = node; t; t = LINK(t))
51 if (t == b)
52 return 1;
53 if (LEFT(node) && vmintree(LEFT(node), b))
54 return 1;
55 if (RIGHT(node) && vmintree(RIGHT(node), b))
56 return 1;
57 return 0;
58}
59
60/* Check to see if a block is known to be free */
61static int vmisfree(Vmdata_t * vd, Block_t * b)
62{
63 Block_t *t;
64 size_t s;
65
66 if (b == vd->wild)
67 return 1;
68 else if ((s = SIZE(b)) < MAXTINY) {
69 for (t = TINY(vd)[INDEX(s)]; t; t = LINK(t))
70 if (b == t)
71 return 1;
72 } else if (vd->root && vmintree(vd->root, b))
73 return 1;
74
75 return 0;
76}
77
78/* check to see if the tree is in good shape */
79static int vmchktree(Block_t * node)
80{
81 Block_t *t;
82
83 /**/ ASSERT(!ISBUSY(SIZE(node)) && !ISJUNK(SIZE(node)));
84
85 for (t = LINK(node); t; t = LINK(t)) {
86 /**/ ASSERT(SIZE(t) == SIZE(node));
87 /**/ ASSERT(!ISBUSY(SIZE(t)) && !ISJUNK(SIZE(t)));
88 }
89 if ((t = LEFT(node))) {
90 /**/ ASSERT(SIZE(t) < SIZE(node));
91 vmchktree(t);
92 }
93 if ((t = RIGHT(node))) {
94 /**/ ASSERT(SIZE(t) > SIZE(node));
95 vmchktree(t);
96 }
97 return 1;
98}
99
100static int vmonlist(Block_t * list, Block_t * b)
101{
102 for (; list; list = LINK(list))
103 if (list == b)
104 return 1;
105 return 0;
106}
107
108/**
109 * @param vd
110 * @param size if > 0, checking that no large free block >size
111 * @param wild if != 0, do above but allow wild to be >size
112 */
113static int vmcheck(Vmdata_t * vd, size_t size, int wild)
114{
115 reg Seg_t *seg;
116 reg Block_t *b, *endb, *t, *np;
117 reg size_t s;
118
119 if (!Vmcheck)
120 return 1;
121
122 /**/ ASSERT(size <= 0 || !vd->free);
123 /**/ ASSERT(!vd->root || vmchktree(vd->root));
124
125 P_junk = P_free = P_busy = 0;
126 M_junk = M_free = M_busy = S_free = 0;
127 for (seg = vd->seg; seg; seg = seg->next) {
128 b = SEGBLOCK(seg);
129 endb = (Block_t *) (seg->baddr - sizeof(Head_t));
130 while (b < endb) {
131 s = SIZE(b) & ~BITS;
132 np = (Block_t *) ((Vmuchar_t *) DATA(b) + s);
133
134 if (!ISBUSY(SIZE(b))) {
135 /**/ ASSERT(!ISJUNK(SIZE(b)));
136 /**/ ASSERT(!ISPFREE(SIZE(b)));
137 /**/ ASSERT(TINIEST(b) || SEG(b) == seg);
138 /**/ ASSERT(ISBUSY(SIZE(np)));
139 /**/ ASSERT(ISPFREE(SIZE(np)));
140 /**/ ASSERT(*SELF(b) == b);
141 /**/ ASSERT(size <= 0 || SIZE(b) < size ||
142 SIZE(b) < MAXTINY || (wild && b == vd->wild));
143 P_free += 1;
144 S_free += s;
145 if (s > M_free)
146 M_free = s;
147
148 if (s < MAXTINY) {
149 for (t = TINY(vd)[INDEX(s)]; t; t = LINK(t))
150 if (b == t)
151 goto fine;
152 }
153 if (b == vd->wild) {
154 /**/ ASSERT(VMWILD(vd, b));
155 goto fine;
156 }
157 if (vd->root && vmintree(vd->root, b))
158 goto fine;
159
160 /**/ ASSERT(0);
161 } else if (ISJUNK(SIZE(b))) {
162 /**/ ASSERT(ISBUSY(SIZE(b)));
163 /**/ ASSERT(!ISPFREE(SIZE(np)));
164 P_junk += 1;
165 S_junk += s;
166 if (s > M_junk)
167 M_junk = s;
168
169 if (b == vd->free)
170 goto fine;
171 if (s < MAXCACHE) {
172 for (t = CACHE(vd)[INDEX(s)]; t; t = LINK(t))
173 if (b == t)
174 goto fine;
175 }
176 for (t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
177 if (b == t)
178 goto fine;
179 /**/ ASSERT(0);
180 } else {
181 /**/ ASSERT(!ISPFREE(SIZE(b)) || !ISBUSY(SIZE(LAST(b))));
182 /**/ ASSERT(SEG(b) == seg);
183 /**/ ASSERT(!ISPFREE(SIZE(np)));
184 P_busy += 1;
185 if (s > M_busy)
186 M_busy = s;
187 goto fine;
188 }
189 fine:
190 b = np;
191 }
192 }
193
194 return 1;
195}
196
197#endif /*DEBUG*/
198/* Tree rotation functions */
199#define RROTATE(x,y) (LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
200#define LROTATE(x,y) (RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
201#define RLINK(s,x) ((s) = LEFT(s) = (x))
202#define LLINK(s,x) ((s) = RIGHT(s) = (x))
203/* Find and delete a suitable element in the free tree. */
204static Block_t *bestsearch(Vmdata_t * vd, reg size_t size,
205 Block_t * wanted)
206{
207 reg size_t s;
208 reg Block_t *t, *root, *l, *r;
209 Block_t link;
210
211 /* extracting a tiniest block from its list */
212 if ((root = wanted) && size == TINYSIZE) {
213 reg Seg_t *seg;
214
215 l = TLEFT(root);
216 if ((r = LINK(root)))
217 TLEFT(r) = l;
218 if (l)
219 LINK(l) = r;
220 else
221 TINY(vd)[0] = r;
222
223 seg = vd->seg;
224 if (!seg->next)
225 SEG(root) = seg;
226 else
227 for (;; seg = seg->next) {
228 if ((Vmuchar_t *) root > (Vmuchar_t *) seg->addr &&
229 (Vmuchar_t *) root < seg->baddr) {
230 SEG(root) = seg;
231 break;
232 }
233 }
234
235 return root;
236 }
237
238 /**/ ASSERT(!vd->root || vmchktree(vd->root));
239
240 /* find the right one to delete */
241 l = r = &link;
242 if ((root = vd->root))
243 do {
244 /**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
245 if (size == (s = SIZE(root)))
246 break;
247 if (size < s) {
248 if ((t = LEFT(root))) {
249 if (size <= (s = SIZE(t))) {
250 RROTATE(root, t);
251 if (size == s)
252 break;
253 t = LEFT(root);
254 } else {
255 LLINK(l, t);
256 t = RIGHT(t);
257 }
258 }
259 RLINK(r, root);
260 } else {
261 if ((t = RIGHT(root))) {
262 if (size >= (s = SIZE(t))) {
263 LROTATE(root, t);
264 if (size == s)
265 break;
266 t = RIGHT(root);
267 } else {
268 RLINK(r, t);
269 t = LEFT(t);
270 }
271 }
272 LLINK(l, root);
273 }
274 /**/ ASSERT(root != t);
275 } while ((root = t));
276
277 if (root) { /* found it, now isolate it */
278 RIGHT(l) = LEFT(root);
279 LEFT(r) = RIGHT(root);
280 } else { /* nothing exactly fit */
281 LEFT(r) = NIL(Block_t *);
282 RIGHT(l) = NIL(Block_t *);
283
284 /* grab the least one from the right tree */
285 if ((root = LEFT(&link))) {
286 while ((t = LEFT(root)))
287 RROTATE(root, t);
288 LEFT(&link) = RIGHT(root);
289 }
290 }
291
292 if (root && (r = LINK(root))) { /* head of a link list, use next one for root */
293 LEFT(r) = RIGHT(&link);
294 RIGHT(r) = LEFT(&link);
295 } else if (!(r = LEFT(&link)))
296 r = RIGHT(&link);
297 else { /* graft left tree to right tree */
298 while ((t = LEFT(r)))
299 RROTATE(r, t);
300 LEFT(r) = RIGHT(&link);
301 }
302 vd->root = r;
303 /**/ ASSERT(!r || !ISBITS(SIZE(r)));
304
305 /**/ ASSERT(!wanted || wanted == root);
306 return root;
307}
308
309/* Reclaim all delayed free blocks into the free tree */
310static int bestreclaim(reg Vmdata_t * vd, Block_t * wanted, int c)
311{
312 reg size_t size, s;
313 reg Block_t *fp, *np, *t, *list, **cache;
314 reg int n, count;
315 reg Seg_t *seg;
316 Block_t tree;
317
318 /**/ ASSERT(!vd->root || vmchktree(vd->root));
319
320 if ((fp = vd->free)) {
321 LINK(fp) = *(cache = CACHE(vd) + S_CACHE);
322 *cache = fp;
323 vd->free = NIL(Block_t *);
324 }
325
326 LINK(&tree) = NIL(Block_t *);
327 count = 0;
328 for (n = S_CACHE; n >= c; --n) {
329 list = *(cache = CACHE(vd) + n);
330 *cache = NIL(Block_t *);
331 while ((fp = list)) { /* Note that below here we allow ISJUNK blocks to be
332 ** forward-merged even though they are not removed from
333 ** the list immediately. In this way, the list is
334 ** scanned only once. It works because the LINK and SIZE
335 ** fields are not destroyed during the merging. This can
336 ** be seen by observing that a tiniest block has a 2-word
337 ** header and a 2-word body. Merging a tiniest block
338 ** (1seg) and the next block (2seg) looks like this:
339 ** 1seg size link left 2seg size link left ....
340 ** 1seg size link left rite xxxx xxxx .... self
341 ** After the merge, the 2seg word is replaced by the RIGHT
342 ** pointer of the new block and somewhere beyond the
343 ** two xxxx fields, the SELF pointer will replace some
344 ** other word. The important part is that the two xxxx
345 ** fields are kept intact.
346 */
347 count += 1;
348 list = LINK(list);
349 /**/ ASSERT(!vmonlist(list, fp));
350
351 size = SIZE(fp);
352 if (!ISJUNK(size)) /* already done */
353 continue;
354
355 /* see if this address is from region */
356 for (seg = vd->seg; seg; seg = seg->next)
357 if (fp >= SEGBLOCK(seg) && fp < (Block_t *) seg->baddr)
358 break;
359 if (!seg) { /* must be a bug in application code! */
360 /**/ ASSERT(seg != NIL(Seg_t *));
361 continue;
362 }
363
364 if (ISPFREE(size)) { /* backward merge */
365 fp = LAST(fp);
366 s = SIZE(fp);
367 REMOVE(vd, fp, INDEX(s), t, bestsearch);
368 size = (size & ~BITS) + s + sizeof(Head_t);
369 } else
370 size &= ~BITS;
371
372 for (;;) { /* forward merge */
373 np = (Block_t *) ((Vmuchar_t *) fp + size +
374 sizeof(Head_t));
375 s = SIZE(np);
376 /**/ ASSERT(s > 0);
377 if (!ISBUSY(s)) {
378 if (np == vd->wild)
379 vd->wild = NIL(Block_t *);
380 else
381 REMOVE(vd, np, INDEX(s), t, bestsearch);
382 } else if (ISJUNK(s)) {
383 if ((int) C_INDEX(s) < c)
384 c = C_INDEX(s);
385 SIZE(np) = 0;
386 CLRBITS(s);
387 } else
388 break;
389 size += s + sizeof(Head_t);
390 }
391 SIZE(fp) = size;
392
393 if (fp == wanted) /* about to be consumed by bestresize */
394 continue;
395
396 /* tell next block that this one is free */
397 SETPFREE(SIZE(np));
398 /**/ ASSERT(ISBUSY(SIZE(np)));
399 *(SELF(fp)) = fp;
400
401 if (np->body.data >= vd->seg->baddr) {
402 vd->wild = fp;
403 continue;
404 }
405
406 /* tiny block goes to tiny list */
407 if (size < MAXTINY) {
408 s = INDEX(size);
409 np = LINK(fp) = TINY(vd)[s];
410 if (s == 0) { /* TINIEST block */
411 if (np)
412 TLEFT(np) = fp;
413 TLEFT(fp) = NIL(Block_t *);
414 } else {
415 if (np)
416 LEFT(np) = fp;
417 LEFT(fp) = NIL(Block_t *);
418 SETLINK(fp);
419 }
420 TINY(vd)[s] = fp;
421 continue;
422 }
423
424 /* don't put in free tree yet because they may be merged soon */
425 np = &tree;
426 if ((LINK(fp) = LINK(np)))
427 LEFT(LINK(fp)) = fp;
428 LINK(np) = fp;
429 LEFT(fp) = np;
430 SETLINK(fp);
431 }
432 }
433
434 /* insert all free blocks into the free tree */
435 for (list = LINK(&tree); list;) {
436 fp = list;
437 list = LINK(list);
438
439 /**/ ASSERT(!ISBITS(SIZE(fp)));
440 /**/ ASSERT(ISBUSY(SIZE(NEXT(fp))));
441 /**/ ASSERT(ISPFREE(SIZE(NEXT(fp))));
442 LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t *);
443 if (!(np = vd->root)) { /* inserting into an empty tree */
444 vd->root = fp;
445 continue;
446 }
447
448 size = SIZE(fp);
449 while (1) { /* leaf insertion */
450 /**/ ASSERT(np != fp);
451 if ((s = SIZE(np)) > size) {
452 if ((t = LEFT(np))) {
453 /**/ ASSERT(np != t);
454 np = t;
455 } else {
456 LEFT(np) = fp;
457 break;
458 }
459 } else if (s < size) {
460 if ((t = RIGHT(np))) {
461 /**/ ASSERT(np != t);
462 np = t;
463 } else {
464 RIGHT(np) = fp;
465 break;
466 }
467 } else { /* s == size */
468 if ((t = LINK(np))) {
469 LINK(fp) = t;
470 LEFT(t) = fp;
471 }
472 LINK(np) = fp;
473 LEFT(fp) = np;
474 SETLINK(fp);
475 break;
476 }
477 }
478 }
479
480 /**/ ASSERT(!vd->root || vmchktree(vd->root));
481 return count;
482}
483
484/**
485 * @param vm region allocating from
486 * @param size desired block size
487 */
488static void *bestalloc(Vmalloc_t * vm, reg size_t size)
489{
490 reg Vmdata_t *vd = vm->data;
491 reg size_t s;
492 reg Block_t *tp, *np, **cache;
493 reg int local;
494 size_t orgsize = 0;
495
496 /**/ COUNT(N_alloc);
497
498 if (!(local = vd->mode & VM_TRUST)) {
499 GETLOCAL(vd, local);
500 if (ISLOCK(vd, local))
501 return NIL(void *);
502 SETLOCK(vd, local);
503 orgsize = size;
504 }
505
506 /**/ ASSERT(HEADSIZE == sizeof(Head_t));
507 /**/ ASSERT(BODYSIZE == sizeof(Body_t));
508 /**/ ASSERT((ALIGN % (BITS + 1)) == 0);
509 /**/ ASSERT((sizeof(Head_t) % ALIGN) == 0);
510 /**/ ASSERT((sizeof(Body_t) % ALIGN) == 0);
511 /**/ ASSERT((TINYSIZE % ALIGN) == 0);
512 /**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t) + sizeof(Head_t)));
513
514 /* for ANSI requirement that malloc(0) returns non-NULL pointer */
515 size = size <= TINYSIZE ? TINYSIZE : ROUND(size, ALIGN);
516
517 if (size < MAXCACHE && (tp = *(cache = CACHE(vd) + INDEX(size)))) {
518 *cache = LINK(tp);
519 CLRJUNK(SIZE(tp));
520 /**/ COUNT(N_cache);
521 goto done;
522 }
523
524 if ((tp = vd->free)) { /* allocate from last free piece */
525 /**/ ASSERT(ISBUSY(SIZE(tp)));
526 /**/ ASSERT(ISJUNK(SIZE(tp)));
527 /**/ COUNT(N_last);
528
529 vd->free = NIL(Block_t *);
530 if ((s = SIZE(tp)) < size) {
531 LINK(tp) = *(cache = CACHE(vd) + S_CACHE);
532 *cache = tp;
533 } else {
534 if (s >= size + (sizeof(Head_t) + TINYSIZE)) {
535 SIZE(tp) = size;
536 np = NEXT(tp);
537 SEG(np) = SEG(tp);
538 SIZE(np) =
539 ((s & ~BITS) - (size + sizeof(Head_t))) | JUNK | BUSY;
540 vd->free = np;
541 SIZE(tp) |= s & BITS;
542 }
543 CLRJUNK(SIZE(tp));
544 goto done;
545 }
546 }
547
548 for (;;) {
549 for (;;) { /* best-fit - more or less */
550 for (s = INDEX(size); s < S_TINY; ++s) {
551 if ((tp = TINY(vd)[s])) {
552 REMOVE(vd, tp, s, np, bestsearch);
553 CLRPFREE(SIZE(NEXT(tp)));
554 goto got_block;
555 }
556 }
557
558 if (CACHE(vd)[S_CACHE]) /* reclaim big pieces */
559 bestreclaim(vd, NIL(Block_t *), S_CACHE);
560 if (vd->root && (tp = bestsearch(vd, size, NIL(Block_t *))))
561 goto got_block;
562 if (bestreclaim(vd, NIL(Block_t *), 0) == 0)
563 break;
564 }
565
566 /**/ ASSERT(!vd->free);
567 if ((tp = vd->wild) && SIZE(tp) >= size) {
568 /**/ ASSERT(vmcheck(vd, size, 1));
569 /**/ COUNT(N_wild);
570 vd->wild = NIL(Block_t *);
571 goto got_block;
572 }
573
574 /**/ ASSERT(vmcheck(vd, size, 0));
575 if ((tp = (*_Vmextend) (vm, size, bestsearch)))
576 goto got_block;
577 else if (vd->mode & VM_AGAIN)
578 vd->mode &= ~VM_AGAIN;
579 else {
580 CLRLOCK(vd, local);
581 return NIL(void *);
582 }
583 }
584
585 got_block:
586 /**/ ASSERT(!ISBITS(SIZE(tp)));
587 /**/ ASSERT(SIZE(tp) >= size);
588 /**/ ASSERT((SIZE(tp) % ALIGN) == 0);
589 /**/ ASSERT(!vd->free);
590
591 /* tell next block that we are no longer a free block */
592 CLRPFREE(SIZE(NEXT(tp)));
593 /**/ ASSERT(ISBUSY(SIZE(NEXT(tp))));
594
595 if ((s = SIZE(tp) - size) >= (sizeof(Head_t) + TINYSIZE)) {
596 SIZE(tp) = size;
597
598 np = NEXT(tp);
599 SEG(np) = SEG(tp);
600 SIZE(np) = (s - sizeof(Head_t)) | BUSY | JUNK;
601
602 if (!vd->root || !VMWILD(vd, np))
603 vd->free = np;
604 else {
605 SIZE(np) &= ~BITS;
606 *SELF(np) = np;
607 SETPFREE(SIZE(NEXT(np)));
608 vd->wild = np;
609 }
610 }
611
612 SETBUSY(SIZE(tp));
613
614 done:
615 if (!local && (vd->mode & VM_TRACE) && _Vmtrace
616 && VMETHOD(vd) == VM_MTBEST)
617 (*_Vmtrace) (vm, NIL(Vmuchar_t *), (Vmuchar_t *) DATA(tp), orgsize,
618 0);
619
620 /**/ ASSERT(!vd->root || vmchktree(vd->root));
621
622 CLRLOCK(vd, local);
623 return DATA(tp);
624}
625
626/**
627 * @param vm region allocating from
628 * @param addr address to check
629 */
630static long bestaddr(Vmalloc_t * vm, void * addr)
631{
632 reg Seg_t *seg;
633 reg Block_t *b, *endb;
634 reg long offset;
635 reg Vmdata_t *vd = vm->data;
636 reg int local;
637 b = 0;
638 endb = 0;
639
640 if (!(local = vd->mode & VM_TRUST)) {
641 GETLOCAL(vd, local);
642 if (ISLOCK(vd, local))
643 return -1L;
644 SETLOCK(vd, local);
645 }
646
647 offset = -1L;
648 for (seg = vd->seg; seg; seg = seg->next) {
649 b = SEGBLOCK(seg);
650 endb = (Block_t *) (seg->baddr - sizeof(Head_t));
651 if ((Vmuchar_t *) addr > (Vmuchar_t *) b &&
652 (Vmuchar_t *) addr < (Vmuchar_t *) endb)
653 break;
654 }
655
656 if (local && !(vd->mode & VM_TRUST)) { /* from bestfree or bestresize */
657 b = BLOCK(addr);
658 if (seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)))
659 offset = 0;
660 if (offset != 0 && vm->disc->exceptf)
661 (void) (*vm->disc->exceptf) (vm, VM_BADADDR, addr, vm->disc);
662 } else if (seg) {
663 while (b < endb) {
664 reg Vmuchar_t *data = (Vmuchar_t *) DATA(b);
665 reg size_t size = SIZE(b) & ~BITS;
666
667 if ((Vmuchar_t *) addr >= data
668 && (Vmuchar_t *) addr < data + size) {
669 if (ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
670 offset = -1L;
671 else
672 offset = (Vmuchar_t *) addr - data;
673 goto done;
674 }
675
676 b = (Block_t *) ((Vmuchar_t *) DATA(b) + size);
677 }
678 }
679
680 done:
681 CLRLOCK(vd, local);
682 return offset;
683}
684
685static int bestfree(Vmalloc_t * vm, void * data)
686{
687 reg Vmdata_t *vd = vm->data;
688 reg Block_t *bp, **cache;
689 reg size_t s;
690 reg int local;
691
692 /**/ COUNT(N_free);
693
694 if (!data) /* ANSI-ism */
695 return 0;
696
697 if (!(local = vd->mode & VM_TRUST)) {
698 if (ISLOCK(vd, 0))
699 return -1;
700 if (KPVADDR(vm, data, bestaddr) != 0)
701 return -1;
702 SETLOCK(vd, 0);
703 }
704
705 bp = BLOCK(data);
706 /**/ ASSERT(ISBUSY(SIZE(bp)) && !ISJUNK(SIZE(bp)));
707 SETJUNK(SIZE(bp));
708 if ((s = SIZE(bp)) < MAXCACHE) {
709 /**/ ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp));
710 LINK(bp) = *(cache = CACHE(vd) + INDEX(s));
711 *cache = bp;
712 } else if (!vd->free)
713 vd->free = bp;
714 else {
715 /**/ ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp));
716 LINK(bp) = *(cache = CACHE(vd) + S_CACHE);
717 *cache = bp;
718 }
719
720 /* coalesce large free blocks to avoid fragmentation */
721 if (s >= _Vmpagesize && ISPFREE(s))
722 bestreclaim(vd, NIL(Block_t *), 0);
723
724 if (!local && _Vmtrace && (vd->mode & VM_TRACE)
725 && VMETHOD(vd) == VM_MTBEST)
726 (*_Vmtrace) (vm, (Vmuchar_t *) data, NIL(Vmuchar_t *), (s & ~BITS),
727 0);
728
729 /**/ ASSERT(!vd->root || vmchktree(vd->root));
730
731 CLRLOCK(vd, 0);
732 return 0;
733}
734
735/**
736 * @param vm region allocation from
737 * @param data old block of data
738 * @param size new size
739 * @param type !=0 to move, <0 for not copy
740 */
741static void *bestresize(Vmalloc_t * vm, void * data, reg size_t size,
742 int type)
743{
744 reg Vmdata_t *vd = vm->data;
745 reg Block_t *rp, *np, *t, **cache;
746 reg size_t s, bs;
747 reg int local, *d, *ed;
748 size_t oldsize = 0, orgsize = 0;
749 void *orgdata;
750 orgdata = 0;
751
752 /**/ COUNT(N_resize);
753
754 if (!data) {
755 if ((data = bestalloc(vm, size))) {
756 oldsize = 0;
757 size = size <= TINYSIZE ? TINYSIZE : ROUND(size, ALIGN);
758 }
759 goto done;
760 }
761 if (size == 0) {
762 (void) bestfree(vm, data);
763 return NIL(void *);
764 }
765
766 if (!(local = vd->mode & VM_TRUST)) {
767 GETLOCAL(vd, local);
768 if (ISLOCK(vd, local))
769 return NIL(void *);
770 if (!local && KPVADDR(vm, data, bestaddr) != 0)
771 return NIL(void *);
772 SETLOCK(vd, local);
773
774 orgdata = data; /* for tracing */
775 orgsize = size;
776 }
777
778 /**/ ASSERT(!vd->root || vmchktree(vd->root));
779
780 size = size <= TINYSIZE ? TINYSIZE : ROUND(size, ALIGN);
781 rp = BLOCK(data);
782 /**/ ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
783 if ((bs = oldsize = SIZE(rp)) < size) {
784 CLRBITS(SIZE(rp));
785 np = NEXT(rp);
786 do { /* forward merge as much as possible */
787 s = SIZE(np);
788 if (np == vd->free) {
789 vd->free = NIL(Block_t *);
790 CLRBITS(s);
791 } else if (ISJUNK(s)) {
792 CPYBITS(SIZE(rp), bs);
793 bestreclaim(vd, np, C_INDEX(s));
794 s = SIZE(np);
795 bs = SIZE(rp);
796 CLRBITS(SIZE(rp));
797 } else if (!ISBUSY(s)) {
798 if (np == vd->wild)
799 vd->wild = NIL(Block_t *);
800 else
801 REMOVE(vd, np, INDEX(s), t, bestsearch);
802 } else
803 break;
804
805 SIZE(rp) += (s += sizeof(Head_t));
806 np = (Block_t *) ((Vmuchar_t *) np + s);
807 CLRPFREE(SIZE(np));
808 } while (SIZE(rp) < size);
809
810 if (SIZE(rp) < size && size > vd->incr && SEGWILD(rp)) {
811 reg Seg_t *seg;
812
813 s = (size - SIZE(rp)) + sizeof(Head_t);
814 s = ROUND(s, vd->incr);
815 seg = SEG(rp);
816 if ((*vm->disc->memoryf) (vm, seg->addr, seg->extent,
817 seg->extent + s,
818 vm->disc) == seg->addr) {
819 SIZE(rp) += s;
820 seg->extent += s;
821 seg->size += s;
822 seg->baddr += s;
823 SEG(NEXT(rp)) = seg;
824 SIZE(NEXT(rp)) = BUSY;
825 }
826 }
827
828 CPYBITS(SIZE(rp), bs);
829 }
830
831 /* If a buffer is resized, it is likely to be resized again.
832 So we increase a bit more to reduce future work */
833 bs = size < (BODYSIZE << 1) ? size : size < 1024 ? (size >> 1) : 1024;
834 if ((s = SIZE(rp)) >= (size + bs + (TINYSIZE + sizeof(Head_t)))) {
835 SIZE(rp) = size;
836 np = NEXT(rp);
837 SEG(np) = SEG(rp);
838 SIZE(np) = (((s & ~BITS) - size) - sizeof(Head_t)) | BUSY | JUNK;
839 CPYBITS(SIZE(rp), s);
840 rp = np;
841 goto do_free;
842 } else if (s < size) {
843 if (!(type & (VM_RSMOVE | VM_RSCOPY))) /* see if old data is moveable */
844 data = NIL(void *);
845 else {
846 ed = (int *) data;
847 if (size < ((s & ~BITS) + bs))
848 size = (s & ~BITS) + bs;
849 if ((data = KPVALLOC(vm, size, bestalloc))) {
850 if (type & VM_RSCOPY) { /* old data must be copied */
851 d = (int *) data;
852 INTCOPY(d, ed, s);
853 }
854 do_free: /* delay reusing these blocks as long as possible */
855 SETJUNK(SIZE(rp));
856 LINK(rp) = *(cache = CACHE(vd) + S_CACHE);
857 *cache = rp;
858 if ((rp = vd->free)) {
859 vd->free = NIL(Block_t *);
860 LINK(rp) = *cache;
861 *cache = rp;
862 }
863 }
864 }
865 }
866
867 if (!local && _Vmtrace && data && (vd->mode & VM_TRACE)
868 && VMETHOD(vd) == VM_MTBEST)
869 (*_Vmtrace) (vm, (Vmuchar_t *) orgdata, (Vmuchar_t *) data,
870 orgsize, 0);
871 CLRLOCK(vd, local);
872
873 done:if (data && (type & VM_RSZERO) && size > CLRBITS(oldsize)) {
874 d = (int *) ((char *) data + oldsize);
875 size -= oldsize;
876 INTZERO(d, size);
877 }
878
879 /**/ ASSERT(!vd->root || vmchktree(vd->root));
880
881 return data;
882}
883
884/**
885 * @param vm region allocating from
886 * @param addr address to check
887 */
888static long bestsize(Vmalloc_t * vm, void * addr)
889{
890 reg Seg_t *seg;
891 reg Block_t *b, *endb;
892 reg long size;
893 reg Vmdata_t *vd = vm->data;
894
895 if (!(vd->mode & VM_TRUST)) {
896 if (ISLOCK(vd, 0))
897 return -1L;
898 SETLOCK(vd, 0);
899 }
900
901 size = -1L;
902 for (seg = vd->seg; seg; seg = seg->next) {
903 b = SEGBLOCK(seg);
904 endb = (Block_t *) (seg->baddr - sizeof(Head_t));
905 if ((Vmuchar_t *) addr <= (Vmuchar_t *) b ||
906 (Vmuchar_t *) addr >= (Vmuchar_t *) endb)
907 continue;
908 while (b < endb) {
909 if (addr == DATA(b)) {
910 if (!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)))
911 size = -1L;
912 else
913 size = (long) SIZE(b) & ~BITS;
914 goto done;
915 } else if ((Vmuchar_t *) addr <= (Vmuchar_t *) b)
916 break;
917
918 b = (Block_t *) ((Vmuchar_t *) DATA(b) + (SIZE(b) & ~BITS));
919 }
920 }
921
922 done:
923 CLRLOCK(vd, 0);
924 return size;
925}
926
927static int bestcompact(Vmalloc_t * vm)
928{
929 reg Seg_t *seg, *next;
930 reg Block_t *bp, *t;
931 reg size_t size, segsize;
932 reg Vmdata_t *vd = vm->data;
933
934 if (!(vd->mode & VM_TRUST)) {
935 if (ISLOCK(vd, 0))
936 return -1;
937 SETLOCK(vd, 0);
938 }
939
940 bestreclaim(vd, NIL(Block_t *), 0);
941 /**/ ASSERT(!vd->root || vmchktree(vd->root));
942
943 for (seg = vd->seg; seg; seg = next) {
944 next = seg->next;
945
946 bp = BLOCK(seg->baddr);
947 if (!ISPFREE(SIZE(bp)))
948 continue;
949
950 bp = LAST(bp);
951 /**/ ASSERT(!ISBUSY(SIZE(bp)) && vmisfree(vd, bp));
952 size = SIZE(bp);
953 if (bp == vd->wild)
954 vd->wild = NIL(Block_t *);
955 else
956 REMOVE(vd, bp, INDEX(size), t, bestsearch);
957 CLRPFREE(SIZE(NEXT(bp)));
958
959 if (size < (segsize = seg->size))
960 size += sizeof(Head_t);
961
962 if ((*_Vmtruncate) (vm, seg, size, 1) >= 0) {
963 if (size >= segsize) /* entire segment deleted */
964 continue;
965
966 if ((size =
967 (seg->baddr - ((Vmuchar_t *) bp) - sizeof(Head_t))) > 0)
968 SIZE(bp) = size - sizeof(Head_t);
969 else
970 bp = NIL(Block_t *);
971 }
972 /**/ ASSERT(!vd->root || vmchktree(vd->root));
973
974 if (bp) {
975 /**/ ASSERT(SIZE(bp) >= TINYSIZE);
976 /**/ ASSERT(SEGWILD(bp));
977 /**/ ASSERT(!vd->root || !vmintree(vd->root, bp));
978 SIZE(bp) |= BUSY | JUNK;
979 LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))];
980 CACHE(vd)[C_INDEX(SIZE(bp))] = bp;
981 }
982 /**/ ASSERT(!vd->root || vmchktree(vd->root));
983 }
984 /**/ ASSERT(!vd->root || vmchktree(vd->root));
985
986 if (_Vmtrace && (vd->mode & VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
987 (*_Vmtrace) (vm, (Vmuchar_t *) 0, (Vmuchar_t *) 0, 0, 0);
988
989 CLRLOCK(vd, 0);
990
991 return 0;
992}
993
994static void *bestalign(Vmalloc_t * vm, size_t size, size_t align)
995{
996 reg Vmuchar_t *data;
997 reg Block_t *tp, *np;
998 reg Seg_t *seg;
999 reg size_t s, orgsize = 0, orgalign = 0, extra;
1000 reg int local;
1001 reg Vmdata_t *vd = vm->data;
1002
1003 if (size <= 0 || align <= 0)
1004 return NIL(void *);
1005
1006 if (!(local = vd->mode & VM_TRUST)) {
1007 GETLOCAL(vd, local);
1008 if (ISLOCK(vd, local))
1009 return NIL(void *);
1010 SETLOCK(vd, local);
1011 orgsize = size;
1012 orgalign = align;
1013 }
1014
1015 size = size <= TINYSIZE ? TINYSIZE : ROUND(size, ALIGN);
1016 align = MULTIPLE(align, ALIGN);
1017
1018 /* hack so that dbalign() can store header data */
1019 if (VMETHOD(vd) != VM_MTDEBUG)
1020 extra = 0;
1021 else {
1022 extra = DB_HEAD;
1023 while (align < extra || (align - extra) < sizeof(Block_t))
1024 align *= 2;
1025 }
1026
1027 /* reclaim all free blocks now to avoid fragmentation */
1028 bestreclaim(vd, NIL(Block_t *), 0);
1029
1030 s = size + 2 * (align + sizeof(Head_t) + extra);
1031 if (!(data = (Vmuchar_t *) KPVALLOC(vm, s, bestalloc)))
1032 goto done;
1033
1034 tp = BLOCK(data);
1035 seg = SEG(tp);
1036
1037 /* get an aligned address that we can live with */
1038 if ((s = (size_t) ((VLONG(data) + extra) % align)) != 0)
1039 data += align - s;
1040 /**/ ASSERT(((VLONG(data) + extra) % align) == 0);
1041
1042 if ((np = BLOCK(data)) != tp) { /* need to free left part */
1043 if (((Vmuchar_t *) np - (Vmuchar_t *) tp) <
1044 (ssize_t) (sizeof(Block_t) + extra)) {
1045 data += align;
1046 np = BLOCK(data);
1047 }
1048 /**/ ASSERT(((VLONG(data) + extra) % align) == 0);
1049
1050 s = (Vmuchar_t *) np - (Vmuchar_t *) tp;
1051 SIZE(np) = ((SIZE(tp) & ~BITS) - s) | BUSY;
1052 SEG(np) = seg;
1053
1054 SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp) & BITS) | JUNK;
1055 /**/ ASSERT(SIZE(tp) >= sizeof(Body_t));
1056 LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1057 CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1058 }
1059
1060 /* free left-over if too big */
1061 if ((s = SIZE(np) - size) >= sizeof(Block_t)) {
1062 SIZE(np) = size;
1063
1064 tp = NEXT(np);
1065 SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK;
1066 SEG(tp) = seg;
1067 LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1068 CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1069
1070 SIZE(np) |= s & BITS;
1071 }
1072
1073 bestreclaim(vd, NIL(Block_t *), 0); /* coalesce all free blocks */
1074
1075 if (!local && !(vd->mode & VM_TRUST) && _Vmtrace
1076 && (vd->mode & VM_TRACE))
1077 (*_Vmtrace) (vm, NIL(Vmuchar_t *), data, orgsize, orgalign);
1078
1079 done:
1080 CLRLOCK(vd, local);
1081
1082 /**/ ASSERT(!vd->root || vmchktree(vd->root));
1083
1084 return (void *) data;
1085}
1086
1087/* A discipline to get memory using sbrk() or VirtualAlloc on win32 */
1088/**
1089 * @param vm region doing allocation from
1090 * @param caddr current address
1091 * @param csize current size
1092 * @param nsize new size
1093 * @param disc discipline structure
1094 */
1095static void *sbrkmem(Vmalloc_t * vm, void * caddr,
1096 size_t csize, size_t nsize, Vmdisc_t * disc)
1097{
1098#if _BLD_INSTRUMENT || cray
1099 NOTUSED(vm);
1100 NOTUSED(disc);
1101
1102 if (csize == 0)
1103 return (void *) malloc(nsize);
1104 if (nsize == 0)
1105 free(caddr);
1106 return NIL(void *);
1107#else
1108#if defined(_WIN32)
1109 NOTUSED(vm);
1110 NOTUSED(disc);
1111
1112 if (csize == 0)
1113 return (void *) VirtualAlloc(NIL(LPVOID), nsize, MEM_COMMIT,
1114 PAGE_READWRITE);
1115 else if (nsize == 0)
1116 return VirtualFree((LPVOID) caddr, 0,
1117 MEM_RELEASE) ? caddr : NIL(void *);
1118 else
1119 return NIL(void *);
1120#else
1121 reg Vmuchar_t *addr;
1122 reg ssize_t size;
1123 NOTUSED(vm);
1124 NOTUSED(disc);
1125
1126 /* sbrk, see if still own current address */
1127 if (csize > 0 && sbrk(0) != (Vmuchar_t *) caddr + csize)
1128 return NIL(void *);
1129
1130 /* do this because sbrk() uses 'ssize_t' argument */
1131 size =
1132 nsize >
1133 csize ? (ssize_t) (nsize - csize) : -(ssize_t) (csize - nsize);
1134
1135 if ((addr = sbrk(size)) == (Vmuchar_t *) (-1))
1136 return NIL(void *);
1137 else
1138 return csize == 0 ? (void *) addr : caddr;
1139#endif
1140#endif
1141}
1142
1143static Vmdisc_t _Vmdcsbrk = { sbrkmem, NIL(Vmexcept_f), 0 };
1144
1145static Vmethod_t _Vmbest = {
1146 bestalloc,
1147 bestresize,
1148 bestfree,
1149 bestaddr,
1150 bestsize,
1151 bestcompact,
1152 bestalign,
1153 VM_MTBEST
1154};
1155
1156/* The heap region */
1157static Vmdata_t _Vmdata = {
1158 VM_MTBEST | VM_TRUST, /* mode */
1159 0, /* incr */
1160 0, /* pool */
1161 NIL(Seg_t *), /* seg */
1162 NIL(Block_t *), /* free */
1163 NIL(Block_t *), /* wild */
1164 NIL(Block_t *), /* root */
1165};
1166static Vmalloc_t _Vmheap = {
1167 {bestalloc,
1168 bestresize,
1169 bestfree,
1170 bestaddr,
1171 bestsize,
1172 bestcompact,
1173 bestalign,
1174 VM_MTBEST},
1175 NIL(char *), /* file */
1176 0, /* line */
1177 &_Vmdcsbrk, /* disc */
1178 &_Vmdata, /* data */
1179 NIL(Vmalloc_t *) /* next */
1180};
1181
1182Vmalloc_t* Vmheap = &_Vmheap;
1183Vmalloc_t* Vmregion = &_Vmheap;
1184Vmethod_t* Vmbest = &_Vmbest;
1185Vmdisc_t* Vmdcsbrk = &_Vmdcsbrk;
1186