1 | /* $Id$ $Revision$ */ |
2 | /* vim:set shiftwidth=4 ts=8: */ |
3 | |
4 | /************************************************************************* |
5 | * Copyright (c) 2011 AT&T Intellectual Property |
6 | * All rights reserved. This program and the accompanying materials |
7 | * are made available under the terms of the Eclipse Public License v1.0 |
8 | * which accompanies this distribution, and is available at |
9 | * http://www.eclipse.org/legal/epl-v10.html |
10 | * |
11 | * Contributors: See CVS logs. Details at http://www.graphviz.org/ |
12 | *************************************************************************/ |
13 | |
14 | #include "vmhdr.h" |
15 | |
16 | /* Allocation with freeing and reallocing of last allocated block only. |
17 | ** |
18 | ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. |
19 | */ |
20 | |
21 | static void *lastalloc(Vmalloc_t * vm, size_t size) |
22 | { |
23 | reg Block_t *tp, *next; |
24 | reg Seg_t *seg, *last; |
25 | reg size_t s; |
26 | reg Vmdata_t *vd = vm->data; |
27 | reg int local; |
28 | size_t orgsize = 0; |
29 | |
30 | if (!(local = vd->mode & VM_TRUST)) { |
31 | GETLOCAL(vd, local); |
32 | if (ISLOCK(vd, local)) |
33 | return NIL(void *); |
34 | SETLOCK(vd, local); |
35 | orgsize = size; |
36 | } |
37 | |
38 | size = size < ALIGN ? ALIGN : ROUND(size, ALIGN); |
39 | for (;;) { |
40 | for (last = NIL(Seg_t *), seg = vd->seg; seg; |
41 | last = seg, seg = seg->next) { |
42 | if (!(tp = seg->free) || (SIZE(tp) + sizeof(Head_t)) < size) |
43 | continue; |
44 | if (last) { |
45 | last->next = seg->next; |
46 | seg->next = vd->seg; |
47 | vd->seg = seg; |
48 | } |
49 | goto got_block; |
50 | } |
51 | |
52 | /* there is no usable free space in region, try extending */ |
53 | if ((tp = (*_Vmextend) (vm, size, NIL(Vmsearch_f)))) { |
54 | seg = SEG(tp); |
55 | goto got_block; |
56 | } else if (vd->mode & VM_AGAIN) |
57 | vd->mode &= ~VM_AGAIN; |
58 | else |
59 | goto done; |
60 | } |
61 | |
62 | got_block: |
63 | if ((s = SIZE(tp)) >= size) { |
64 | next = (Block_t *) ((Vmuchar_t *) tp + size); |
65 | SIZE(next) = s - size; |
66 | SEG(next) = seg; |
67 | seg->free = next; |
68 | } else |
69 | seg->free = NIL(Block_t *); |
70 | |
71 | vd->free = seg->last = tp; |
72 | |
73 | if (!local && (vd->mode & VM_TRACE) && _Vmtrace) |
74 | (*_Vmtrace) (vm, NIL(Vmuchar_t *), (Vmuchar_t *) tp, orgsize, 0); |
75 | |
76 | done: |
77 | CLRLOCK(vd, local); |
78 | return (void *) tp; |
79 | } |
80 | |
81 | static int lastfree(Vmalloc_t * vm, reg void * data) |
82 | { |
83 | reg Seg_t *seg; |
84 | reg Block_t *fp; |
85 | reg size_t s; |
86 | reg Vmdata_t *vd = vm->data; |
87 | reg int local; |
88 | |
89 | if (!data) |
90 | return 0; |
91 | if (!(local = vd->mode & VM_TRUST)) { |
92 | if (ISLOCK(vd, 0)) |
93 | return -1; |
94 | SETLOCK(vd, 0); |
95 | } |
96 | if (data != (void *) vd->free) { |
97 | if (!local && vm->disc->exceptf) |
98 | (void) (*vm->disc->exceptf) (vm, VM_BADADDR, data, vm->disc); |
99 | CLRLOCK(vd, 0); |
100 | return -1; |
101 | } |
102 | |
103 | seg = vd->seg; |
104 | if (!local && (vd->mode & VM_TRACE) && _Vmtrace) { |
105 | if (seg->free) |
106 | s = (Vmuchar_t *) (seg->free) - (Vmuchar_t *) data; |
107 | else |
108 | s = (Vmuchar_t *) BLOCK(seg->baddr) - (Vmuchar_t *) data; |
109 | (*_Vmtrace) (vm, (Vmuchar_t *) data, NIL(Vmuchar_t *), s, 0); |
110 | } |
111 | |
112 | vd->free = NIL(Block_t *); |
113 | fp = (Block_t *) data; |
114 | SEG(fp) = seg; |
115 | SIZE(fp) = |
116 | ((Vmuchar_t *) BLOCK(seg->baddr) - (Vmuchar_t *) data) - |
117 | sizeof(Head_t); |
118 | seg->free = fp; |
119 | seg->last = NIL(Block_t *); |
120 | |
121 | CLRLOCK(vd, 0); |
122 | return 0; |
123 | } |
124 | |
125 | static void *lastresize(Vmalloc_t * vm, reg void * data, size_t size, |
126 | int type) |
127 | { |
128 | reg Block_t *tp; |
129 | reg Seg_t *seg; |
130 | reg int *d, *ed; |
131 | reg size_t oldsize; |
132 | reg ssize_t s, ds; |
133 | reg Vmdata_t *vd = vm->data; |
134 | reg int local; |
135 | reg void *addr; |
136 | void *orgdata = 0; |
137 | size_t orgsize = 0; |
138 | |
139 | if (!data) { |
140 | oldsize = 0; |
141 | data = lastalloc(vm, size); |
142 | goto done; |
143 | } |
144 | if (size <= 0) { |
145 | (void) lastfree(vm, data); |
146 | return NIL(void *); |
147 | } |
148 | |
149 | if (!(local = vd->mode & VM_TRUST)) { |
150 | if (ISLOCK(vd, 0)) |
151 | return NIL(void *); |
152 | SETLOCK(vd, 0); |
153 | orgdata = data; |
154 | orgsize = size; |
155 | } |
156 | |
157 | if (data == (void *) vd->free) |
158 | seg = vd->seg; |
159 | else { /* see if it was one of ours */ |
160 | for (seg = vd->seg; seg; seg = seg->next) |
161 | if (data >= seg->addr && data < (void *) seg->baddr) |
162 | break; |
163 | if (!seg || (VLONG(data) % ALIGN) != 0 || |
164 | (seg->last && (Vmuchar_t *) data > (Vmuchar_t *) seg->last)) { |
165 | CLRLOCK(vd, 0); |
166 | return NIL(void *); |
167 | } |
168 | } |
169 | |
170 | /* set 's' to be the current available space */ |
171 | if (data != seg->last) { |
172 | if (seg->last && (Vmuchar_t *) data < (Vmuchar_t *) seg->last) |
173 | oldsize = (Vmuchar_t *) seg->last - (Vmuchar_t *) data; |
174 | else |
175 | oldsize = (Vmuchar_t *) BLOCK(seg->baddr) - (Vmuchar_t *) data; |
176 | s = -1; |
177 | } else { |
178 | s = (Vmuchar_t *) BLOCK(seg->baddr) - (Vmuchar_t *) data; |
179 | if (!(tp = seg->free)) |
180 | oldsize = s; |
181 | else { |
182 | oldsize = (Vmuchar_t *) tp - (Vmuchar_t *) data; |
183 | seg->free = NIL(Block_t *); |
184 | } |
185 | } |
186 | |
187 | size = size < ALIGN ? ALIGN : ROUND(size, ALIGN); |
188 | if (s < 0 || (ssize_t) size > s) { |
189 | if (s >= 0) { /* amount to extend */ |
190 | ds = size - s; |
191 | ds = ROUND(ds, vd->incr); |
192 | addr = (*vm->disc->memoryf) (vm, seg->addr, seg->extent, |
193 | seg->extent + ds, vm->disc); |
194 | if (addr == seg->addr) { |
195 | s += ds; |
196 | seg->size += ds; |
197 | seg->extent += ds; |
198 | seg->baddr += ds; |
199 | SIZE(BLOCK(seg->baddr)) = BUSY; |
200 | } else |
201 | goto do_alloc; |
202 | } else { |
203 | do_alloc: |
204 | if (!(type & (VM_RSMOVE | VM_RSCOPY))) |
205 | data = NIL(void *); |
206 | else { |
207 | tp = vd->free; |
208 | if (!(addr = KPVALLOC(vm, size, lastalloc))) { |
209 | vd->free = tp; |
210 | data = NIL(void *); |
211 | } else { |
212 | if (type & VM_RSCOPY) { |
213 | ed = (int *) data; |
214 | d = (int *) addr; |
215 | ds = oldsize < size ? oldsize : size; |
216 | INTCOPY(d, ed, ds); |
217 | } |
218 | |
219 | if (s >= 0 && seg != vd->seg) { |
220 | tp = (Block_t *) data; |
221 | SEG(tp) = seg; |
222 | SIZE(tp) = s - sizeof(Head_t); |
223 | seg->free = tp; |
224 | } |
225 | |
226 | /* new block and size */ |
227 | data = addr; |
228 | seg = vd->seg; |
229 | s = (Vmuchar_t *) BLOCK(seg->baddr) - |
230 | (Vmuchar_t *) data; |
231 | seg->free = NIL(Block_t *); |
232 | } |
233 | } |
234 | } |
235 | } |
236 | |
237 | if (data) { |
238 | if (s >= (ssize_t) (size + sizeof(Head_t))) { |
239 | tp = (Block_t *) ((Vmuchar_t *) data + size); |
240 | SEG(tp) = seg; |
241 | SIZE(tp) = (s - size) - sizeof(Head_t); |
242 | seg->free = tp; |
243 | } |
244 | |
245 | vd->free = seg->last = (Block_t *) data; |
246 | |
247 | if (!local && (vd->mode & VM_TRACE) && _Vmtrace) |
248 | (*_Vmtrace) (vm, (Vmuchar_t *) orgdata, (Vmuchar_t *) data, |
249 | orgsize, 0); |
250 | } |
251 | |
252 | CLRLOCK(vd, 0); |
253 | |
254 | done:if (data && (type & VM_RSZERO) && size > oldsize) { |
255 | d = (int *) ((char *) data + oldsize); |
256 | size -= oldsize; |
257 | INTZERO(d, size); |
258 | } |
259 | |
260 | return data; |
261 | } |
262 | |
263 | |
264 | static long lastaddr(Vmalloc_t * vm, void * addr) |
265 | { |
266 | reg Vmdata_t *vd = vm->data; |
267 | |
268 | if (!(vd->mode & VM_TRUST) && ISLOCK(vd, 0)) |
269 | return -1L; |
270 | if (!vd->free || addr < (void *) vd->free |
271 | || addr >= (void *) vd->seg->baddr) |
272 | return -1L; |
273 | else |
274 | return (Vmuchar_t *) addr - (Vmuchar_t *) vd->free; |
275 | } |
276 | |
277 | static long lastsize(Vmalloc_t * vm, void * addr) |
278 | { |
279 | reg Vmdata_t *vd = vm->data; |
280 | |
281 | if (!(vd->mode & VM_TRUST) && ISLOCK(vd, 0)) |
282 | return -1L; |
283 | if (!vd->free || addr != (void *) vd->free) |
284 | return -1L; |
285 | else if (vd->seg->free) |
286 | return (Vmuchar_t *) vd->seg->free - (Vmuchar_t *) addr; |
287 | else |
288 | return (Vmuchar_t *) vd->seg->baddr - (Vmuchar_t *) addr - |
289 | sizeof(Head_t); |
290 | } |
291 | |
292 | static int lastcompact(Vmalloc_t * vm) |
293 | { |
294 | reg Block_t *fp; |
295 | reg Seg_t *seg, *next; |
296 | reg size_t s; |
297 | reg Vmdata_t *vd = vm->data; |
298 | |
299 | if (!(vd->mode & VM_TRUST)) { |
300 | if (ISLOCK(vd, 0)) |
301 | return -1; |
302 | SETLOCK(vd, 0); |
303 | } |
304 | |
305 | for (seg = vd->seg; seg; seg = next) { |
306 | next = seg->next; |
307 | |
308 | if (!(fp = seg->free)) |
309 | continue; |
310 | |
311 | seg->free = NIL(Block_t *); |
312 | if (seg->size == (s = SIZE(fp) & ~BITS)) |
313 | s = seg->extent; |
314 | else |
315 | s += sizeof(Head_t); |
316 | |
317 | if ((*_Vmtruncate) (vm, seg, s, 1) < 0) |
318 | seg->free = fp; |
319 | } |
320 | |
321 | if ((vd->mode & VM_TRACE) && _Vmtrace) |
322 | (*_Vmtrace) (vm, (Vmuchar_t *) 0, (Vmuchar_t *) 0, 0, 0); |
323 | |
324 | CLRLOCK(vd, 0); |
325 | return 0; |
326 | } |
327 | |
328 | static void *lastalign(Vmalloc_t * vm, size_t size, size_t align) |
329 | { |
330 | reg Vmuchar_t *data; |
331 | reg size_t s, orgsize = 0, orgalign = 0; |
332 | reg Seg_t *seg; |
333 | reg Block_t *next; |
334 | reg int local; |
335 | reg Vmdata_t *vd = vm->data; |
336 | |
337 | if (size <= 0 || align <= 0) |
338 | return NIL(void *); |
339 | |
340 | if (!(local = vd->mode & VM_TRUST)) { |
341 | GETLOCAL(vd, local); |
342 | if (ISLOCK(vd, local)) |
343 | return NIL(void *); |
344 | SETLOCK(vd, local); |
345 | orgsize = size; |
346 | orgalign = align; |
347 | } |
348 | |
349 | size = size <= TINYSIZE ? TINYSIZE : ROUND(size, ALIGN); |
350 | align = MULTIPLE(align, ALIGN); |
351 | |
352 | s = size + align; |
353 | if (!(data = (Vmuchar_t *) KPVALLOC(vm, s, lastalloc))) |
354 | goto done; |
355 | |
356 | /* find the segment containing this block */ |
357 | for (seg = vd->seg; seg; seg = seg->next) |
358 | if (seg->last == (Block_t *) data) |
359 | break; |
360 | /**/ ASSERT(seg); |
361 | |
362 | /* get a suitably aligned address */ |
363 | if ((s = (size_t) (VLONG(data) % align)) != 0) |
364 | data += align - s; |
365 | /**/ ASSERT((VLONG(data) % align) == 0); |
366 | |
367 | /* free the unused tail */ |
368 | next = (Block_t *) (data + size); |
369 | if ((s = (seg->baddr - (Vmuchar_t *) next)) >= sizeof(Block_t)) { |
370 | SEG(next) = seg; |
371 | SIZE(next) = s - sizeof(Head_t); |
372 | seg->free = next; |
373 | } |
374 | |
375 | vd->free = seg->last = (Block_t *) data; |
376 | |
377 | if (!local && !(vd->mode & VM_TRUST) && _Vmtrace |
378 | && (vd->mode & VM_TRACE)) |
379 | (*_Vmtrace) (vm, NIL(Vmuchar_t *), data, orgsize, orgalign); |
380 | |
381 | done: |
382 | CLRLOCK(vd, local); |
383 | |
384 | return (void *) data; |
385 | } |
386 | |
387 | /* Public method for free-1 allocation */ |
388 | static Vmethod_t _Vmlast = { |
389 | lastalloc, |
390 | lastresize, |
391 | lastfree, |
392 | lastaddr, |
393 | lastsize, |
394 | lastcompact, |
395 | lastalign, |
396 | VM_MTLAST |
397 | }; |
398 | |
399 | Vmethod_t* Vmlast = &_Vmlast; |
400 | |