1#include "mupdf/fitz.h"
2#include "fitz-imp.h"
3
4#include <assert.h>
5#include <limits.h>
6#include <stdio.h>
7#include <string.h>
8
9typedef struct fz_item_s fz_item;
10
11struct fz_item_s
12{
13 void *key;
14 fz_storable *val;
15 size_t size;
16 fz_item *next;
17 fz_item *prev;
18 fz_store *store;
19 const fz_store_type *type;
20};
21
22/* Every entry in fz_store is protected by the alloc lock */
23struct fz_store_s
24{
25 int refs;
26
27 /* Every item in the store is kept in a doubly linked list, ordered
28 * by usage (so LRU entries are at the end). */
29 fz_item *head;
30 fz_item *tail;
31
32 /* We have a hash table that allows to quickly find a subset of the
33 * entries (those whose keys are indirect objects). */
34 fz_hash_table *hash;
35
36 /* We keep track of the size of the store, and keep it below max. */
37 size_t max;
38 size_t size;
39
40 int defer_reap_count;
41 int needs_reaping;
42};
43
44/*
45 Create a new store inside the context
46
47 max: The maximum size (in bytes) that the store is allowed to grow
48 to. FZ_STORE_UNLIMITED means no limit.
49*/
50void
51fz_new_store_context(fz_context *ctx, size_t max)
52{
53 fz_store *store;
54 store = fz_malloc_struct(ctx, fz_store);
55 fz_try(ctx)
56 {
57 store->hash = fz_new_hash_table(ctx, 4096, sizeof(fz_store_hash), FZ_LOCK_ALLOC, NULL);
58 }
59 fz_catch(ctx)
60 {
61 fz_free(ctx, store);
62 fz_rethrow(ctx);
63 }
64 store->refs = 1;
65 store->head = NULL;
66 store->tail = NULL;
67 store->size = 0;
68 store->max = max;
69 store->defer_reap_count = 0;
70 store->needs_reaping = 0;
71 ctx->store = store;
72}
73
74void *
75fz_keep_storable(fz_context *ctx, const fz_storable *sc)
76{
77 /* Explicitly drop const to allow us to use const
78 * sanely throughout the code. */
79 fz_storable *s = (fz_storable *)sc;
80
81 return fz_keep_imp(ctx, s, &s->refs);
82}
83
84void
85fz_drop_storable(fz_context *ctx, const fz_storable *sc)
86{
87 /* Explicitly drop const to allow us to use const
88 * sanely throughout the code. */
89 fz_storable *s = (fz_storable *)sc;
90
91 /*
92 If we are dropping the last reference to an object, then
93 it cannot possibly be in the store (as the store always
94 keeps a ref to everything in it, and doesn't drop via
95 this method. So we can simply drop the storable object
96 itself without any operations on the fz_store.
97 */
98 if (fz_drop_imp(ctx, s, &s->refs))
99 s->drop(ctx, s);
100}
101
102void *fz_keep_key_storable(fz_context *ctx, const fz_key_storable *sc)
103{
104 return fz_keep_storable(ctx, &sc->storable);
105}
106
107/*
108 Entered with FZ_LOCK_ALLOC held.
109 Drops FZ_LOCK_ALLOC.
110*/
111static void
112do_reap(fz_context *ctx)
113{
114 fz_store *store = ctx->store;
115 fz_item *item, *prev, *remove;
116
117 if (store == NULL)
118 {
119 fz_unlock(ctx, FZ_LOCK_ALLOC);
120 return;
121 }
122
123 fz_assert_lock_held(ctx, FZ_LOCK_ALLOC);
124
125 ctx->store->needs_reaping = 0;
126
127 /* Reap the items */
128 remove = NULL;
129 for (item = store->tail; item; item = prev)
130 {
131 prev = item->prev;
132
133 if (item->type->needs_reap == NULL || item->type->needs_reap(ctx, item->key) == 0)
134 continue;
135
136 /* We have to drop it */
137 store->size -= item->size;
138
139 /* Unlink from the linked list */
140 if (item->next)
141 item->next->prev = item->prev;
142 else
143 store->tail = item->prev;
144 if (item->prev)
145 item->prev->next = item->next;
146 else
147 store->head = item->next;
148
149 /* Remove from the hash table */
150 if (item->type->make_hash_key)
151 {
152 fz_store_hash hash = { NULL };
153 hash.drop = item->val->drop;
154 if (item->type->make_hash_key(ctx, &hash, item->key))
155 fz_hash_remove(ctx, store->hash, &hash);
156 }
157
158 /* Store whether to drop this value or not in 'prev' */
159 if (item->val->refs > 0)
160 (void)Memento_dropRef(item->val);
161 item->prev = (item->val->refs > 0 && --item->val->refs == 0) ? item : NULL;
162
163 /* Store it in our removal chain - just singly linked */
164 item->next = remove;
165 remove = item;
166 }
167 fz_unlock(ctx, FZ_LOCK_ALLOC);
168
169 /* Now drop the remove chain */
170 for (item = remove; item != NULL; item = remove)
171 {
172 remove = item->next;
173
174 /* Drop a reference to the value (freeing if required) */
175 if (item->prev)
176 item->val->drop(ctx, item->val);
177
178 /* Always drops the key and drop the item */
179 item->type->drop_key(ctx, item->key);
180 fz_free(ctx, item);
181 }
182}
183
184void fz_drop_key_storable(fz_context *ctx, const fz_key_storable *sc)
185{
186 /* Explicitly drop const to allow us to use const
187 * sanely throughout the code. */
188 fz_key_storable *s = (fz_key_storable *)sc;
189 int drop;
190 int unlock = 1;
191
192 if (s == NULL)
193 return;
194
195 fz_lock(ctx, FZ_LOCK_ALLOC);
196 if (s->storable.refs > 0)
197 {
198 (void)Memento_dropRef(s);
199 drop = --s->storable.refs == 0;
200 if (!drop && s->storable.refs == s->store_key_refs)
201 {
202 if (ctx->store->defer_reap_count > 0)
203 {
204 ctx->store->needs_reaping = 1;
205 }
206 else
207 {
208 do_reap(ctx);
209 unlock = 0;
210 }
211 }
212 }
213 else
214 drop = 0;
215 if (unlock)
216 fz_unlock(ctx, FZ_LOCK_ALLOC);
217 /*
218 If we are dropping the last reference to an object, then
219 it cannot possibly be in the store (as the store always
220 keeps a ref to everything in it, and doesn't drop via
221 this method. So we can simply drop the storable object
222 itself without any operations on the fz_store.
223 */
224 if (drop)
225 s->storable.drop(ctx, &s->storable);
226}
227
228void *fz_keep_key_storable_key(fz_context *ctx, const fz_key_storable *sc)
229{
230 /* Explicitly drop const to allow us to use const
231 * sanely throughout the code. */
232 fz_key_storable *s = (fz_key_storable *)sc;
233
234 if (s == NULL)
235 return NULL;
236
237 fz_lock(ctx, FZ_LOCK_ALLOC);
238 if (s->storable.refs > 0)
239 {
240 (void)Memento_takeRef(s);
241 ++s->storable.refs;
242 ++s->store_key_refs;
243 }
244 fz_unlock(ctx, FZ_LOCK_ALLOC);
245 return s;
246}
247
248void fz_drop_key_storable_key(fz_context *ctx, const fz_key_storable *sc)
249{
250 /* Explicitly drop const to allow us to use const
251 * sanely throughout the code. */
252 fz_key_storable *s = (fz_key_storable *)sc;
253 int drop;
254
255 if (s == NULL)
256 return;
257
258 fz_lock(ctx, FZ_LOCK_ALLOC);
259 assert(s->store_key_refs > 0 && s->storable.refs >= s->store_key_refs);
260 (void)Memento_dropRef(s);
261 drop = --s->storable.refs == 0;
262 --s->store_key_refs;
263 fz_unlock(ctx, FZ_LOCK_ALLOC);
264 /*
265 If we are dropping the last reference to an object, then
266 it cannot possibly be in the store (as the store always
267 keeps a ref to everything in it, and doesn't drop via
268 this method. So we can simply drop the storable object
269 itself without any operations on the fz_store.
270 */
271 if (drop)
272 s->storable.drop(ctx, &s->storable);
273}
274
275static void
276evict(fz_context *ctx, fz_item *item)
277{
278 fz_store *store = ctx->store;
279 int drop;
280
281 store->size -= item->size;
282 /* Unlink from the linked list */
283 if (item->next)
284 item->next->prev = item->prev;
285 else
286 store->tail = item->prev;
287 if (item->prev)
288 item->prev->next = item->next;
289 else
290 store->head = item->next;
291
292 /* Drop a reference to the value (freeing if required) */
293 if (item->val->refs > 0)
294 (void)Memento_dropRef(item->val);
295 drop = (item->val->refs > 0 && --item->val->refs == 0);
296
297 /* Remove from the hash table */
298 if (item->type->make_hash_key)
299 {
300 fz_store_hash hash = { NULL };
301 hash.drop = item->val->drop;
302 if (item->type->make_hash_key(ctx, &hash, item->key))
303 fz_hash_remove(ctx, store->hash, &hash);
304 }
305 fz_unlock(ctx, FZ_LOCK_ALLOC);
306 if (drop)
307 item->val->drop(ctx, item->val);
308
309 /* Always drops the key and drop the item */
310 item->type->drop_key(ctx, item->key);
311 fz_free(ctx, item);
312 fz_lock(ctx, FZ_LOCK_ALLOC);
313}
314
315static size_t
316ensure_space(fz_context *ctx, size_t tofree)
317{
318 fz_item *item, *prev;
319 size_t count;
320 fz_store *store = ctx->store;
321 fz_item *to_be_freed = NULL;
322
323 fz_assert_lock_held(ctx, FZ_LOCK_ALLOC);
324
325 /* First check that we *can* free tofree; if not, we'd rather not
326 * cache this. */
327 count = 0;
328 for (item = store->tail; item; item = item->prev)
329 {
330 if (item->val->refs == 1)
331 {
332 count += item->size;
333 if (count >= tofree)
334 break;
335 }
336 }
337
338 /* If we ran out of items to search, then we can never free enough */
339 if (item == NULL)
340 {
341 return 0;
342 }
343
344 /* Now move all the items to be freed onto 'to_be_freed' */
345 count = 0;
346 for (item = store->tail; item; item = prev)
347 {
348 prev = item->prev;
349 if (item->val->refs != 1)
350 continue;
351
352 store->size -= item->size;
353
354 /* Unlink from the linked list */
355 if (item->next)
356 item->next->prev = item->prev;
357 else
358 store->tail = item->prev;
359 if (item->prev)
360 item->prev->next = item->next;
361 else
362 store->head = item->next;
363
364 /* Remove from the hash table */
365 if (item->type->make_hash_key)
366 {
367 fz_store_hash hash = { NULL };
368 hash.drop = item->val->drop;
369 if (item->type->make_hash_key(ctx, &hash, item->key))
370 fz_hash_remove(ctx, store->hash, &hash);
371 }
372
373 /* Link into to_be_freed */
374 item->next = to_be_freed;
375 to_be_freed = item;
376
377 count += item->size;
378 if (count >= tofree)
379 break;
380 }
381
382 /* Now we can safely drop the lock and free our pending items. These
383 * have all been removed from both the store list, and the hash table,
384 * so they can't be 'found' by anyone else in the meantime. */
385
386 while (to_be_freed)
387 {
388 fz_item *item = to_be_freed;
389 int drop;
390
391 to_be_freed = to_be_freed->next;
392
393 /* Drop a reference to the value (freeing if required) */
394 if (item->val->refs > 0)
395 (void)Memento_dropRef(item->val);
396 drop = (item->val->refs > 0 && --item->val->refs == 0);
397
398 fz_unlock(ctx, FZ_LOCK_ALLOC);
399 if (drop)
400 item->val->drop(ctx, item->val);
401
402 /* Always drops the key and drop the item */
403 item->type->drop_key(ctx, item->key);
404 fz_free(ctx, item);
405 fz_lock(ctx, FZ_LOCK_ALLOC);
406 }
407
408 return count;
409}
410
411static void
412touch(fz_store *store, fz_item *item)
413{
414 if (item->next != item)
415 {
416 /* Already in the list - unlink it */
417 if (item->next)
418 item->next->prev = item->prev;
419 else
420 store->tail = item->prev;
421 if (item->prev)
422 item->prev->next = item->next;
423 else
424 store->head = item->next;
425 }
426 /* Now relink it at the start of the LRU chain */
427 item->next = store->head;
428 if (item->next)
429 item->next->prev = item;
430 else
431 store->tail = item;
432 store->head = item;
433 item->prev = NULL;
434}
435
436/*
437 Add an item to the store.
438
439 Add an item into the store, returning NULL for success. If an item
440 with the same key is found in the store, then our item will not be
441 inserted, and the function will return a pointer to that value
442 instead. This function takes its own reference to val, as required
443 (i.e. the caller maintains ownership of its own reference).
444
445 key: The key used to index the item.
446
447 val: The value to store.
448
449 itemsize: The size in bytes of the value (as counted towards the
450 store size).
451
452 type: Functions used to manipulate the key.
453*/
454void *
455fz_store_item(fz_context *ctx, void *key, void *val_, size_t itemsize, const fz_store_type *type)
456{
457 fz_item *item = NULL;
458 size_t size;
459 fz_storable *val = (fz_storable *)val_;
460 fz_store *store = ctx->store;
461 fz_store_hash hash = { NULL };
462 int use_hash = 0;
463
464 if (!store)
465 return NULL;
466
467 /* If we fail for any reason, we swallow the exception and continue.
468 * All that the above program will see is that we failed to store
469 * the item. */
470
471 item = fz_malloc_no_throw(ctx, sizeof (fz_item));
472 if (!item)
473 return NULL;
474 memset(item, 0, sizeof (fz_item));
475
476 if (type->make_hash_key)
477 {
478 hash.drop = val->drop;
479 use_hash = type->make_hash_key(ctx, &hash, key);
480 }
481
482 type->keep_key(ctx, key);
483 fz_lock(ctx, FZ_LOCK_ALLOC);
484
485 /* Fill out the item. To start with, we always set item->next == item
486 * and item->prev == item. This is so that we can spot items that have
487 * been put into the hash table without having made it into the linked
488 * list yet. */
489 item->key = key;
490 item->val = val;
491 item->size = itemsize;
492 item->next = item;
493 item->prev = item;
494 item->type = type;
495
496 /* If we can index it fast, put it into the hash table. This serves
497 * to check whether we have one there already. */
498 if (use_hash)
499 {
500 fz_item *existing = NULL;
501
502 fz_try(ctx)
503 {
504 /* May drop and retake the lock */
505 existing = fz_hash_insert(ctx, store->hash, &hash, item);
506 }
507 fz_catch(ctx)
508 {
509 /* Any error here means that item never made it into the
510 * hash - so no one else can have a reference. */
511 fz_unlock(ctx, FZ_LOCK_ALLOC);
512 fz_free(ctx, item);
513 type->drop_key(ctx, key);
514 return NULL;
515 }
516 if (existing)
517 {
518 /* There was one there already! Take a new reference
519 * to the existing one, and drop our current one. */
520 touch(store, existing);
521 if (existing->val->refs > 0)
522 {
523 (void)Memento_takeRef(existing->val);
524 existing->val->refs++;
525 }
526 fz_unlock(ctx, FZ_LOCK_ALLOC);
527 fz_free(ctx, item);
528 type->drop_key(ctx, key);
529 return existing->val;
530 }
531 }
532
533 /* Now bump the ref */
534 if (val->refs > 0)
535 {
536 (void)Memento_takeRef(val);
537 val->refs++;
538 }
539
540 /* If we haven't got an infinite store, check for space within it */
541 if (store->max != FZ_STORE_UNLIMITED)
542 {
543 size = store->size + itemsize;
544 while (size > store->max)
545 {
546 size_t saved;
547
548 /* First, do any outstanding reaping, even if defer_reap_count > 0 */
549 if (store->needs_reaping)
550 {
551 do_reap(ctx); /* Drops alloc lock */
552 fz_lock(ctx, FZ_LOCK_ALLOC);
553 }
554 size = store->size + itemsize;
555 if (size <= store->max)
556 break;
557
558 /* ensure_space may drop, then retake the lock */
559 saved = ensure_space(ctx, size - store->max);
560 size -= saved;
561 if (saved == 0)
562 {
563 /* Failed to free any space. */
564 /* We used to 'unstore' it here, but that's wrong.
565 * If we've already spent the memory to malloc it
566 * then not putting it in the store just means that
567 * a resource used multiple times will just be malloced
568 * again. Better to put it in the store, have the
569 * store account for it, and for it to potentially be reused.
570 * When the caller drops the reference to it, it can then
571 * be dropped from the store on the next attempt to store
572 * anything else. */
573 break;
574 }
575 }
576 }
577 store->size += itemsize;
578
579 /* Regardless of whether it's indexed, it goes into the linked list */
580 touch(store, item);
581 fz_unlock(ctx, FZ_LOCK_ALLOC);
582
583 return NULL;
584}
585
586/*
587 Find an item within the store.
588
589 drop: The function used to free the value (to ensure we get a value
590 of the correct type).
591
592 key: The key used to index the item.
593
594 type: Functions used to manipulate the key.
595
596 Returns NULL for not found, otherwise returns a pointer to the value
597 indexed by key to which a reference has been taken.
598*/
599void *
600fz_find_item(fz_context *ctx, fz_store_drop_fn *drop, void *key, const fz_store_type *type)
601{
602 fz_item *item;
603 fz_store *store = ctx->store;
604 fz_store_hash hash = { NULL };
605 int use_hash = 0;
606
607 if (!store)
608 return NULL;
609
610 if (!key)
611 return NULL;
612
613 if (type->make_hash_key)
614 {
615 hash.drop = drop;
616 use_hash = type->make_hash_key(ctx, &hash, key);
617 }
618
619 fz_lock(ctx, FZ_LOCK_ALLOC);
620 if (use_hash)
621 {
622 /* We can find objects keyed on indirected objects quickly */
623 item = fz_hash_find(ctx, store->hash, &hash);
624 }
625 else
626 {
627 /* Others we have to hunt for slowly */
628 for (item = store->head; item; item = item->next)
629 {
630 if (item->val->drop == drop && !type->cmp_key(ctx, item->key, key))
631 break;
632 }
633 }
634 if (item)
635 {
636 /* LRU the block. This also serves to ensure that any item
637 * picked up from the hash before it has made it into the
638 * linked list does not get whipped out again due to the
639 * store being full. */
640 touch(store, item);
641 /* And bump the refcount before returning */
642 if (item->val->refs > 0)
643 {
644 (void)Memento_takeRef(item->val);
645 item->val->refs++;
646 }
647 fz_unlock(ctx, FZ_LOCK_ALLOC);
648 return (void *)item->val;
649 }
650 fz_unlock(ctx, FZ_LOCK_ALLOC);
651
652 return NULL;
653}
654
655/*
656 Remove an item from the store.
657
658 If an item indexed by the given key exists in the store, remove it.
659
660 drop: The function used to free the value (to ensure we get a value
661 of the correct type).
662
663 key: The key used to find the item to remove.
664
665 type: Functions used to manipulate the key.
666*/
667void
668fz_remove_item(fz_context *ctx, fz_store_drop_fn *drop, void *key, const fz_store_type *type)
669{
670 fz_item *item;
671 fz_store *store = ctx->store;
672 int dodrop;
673 fz_store_hash hash = { NULL };
674 int use_hash = 0;
675
676 if (type->make_hash_key)
677 {
678 hash.drop = drop;
679 use_hash = type->make_hash_key(ctx, &hash, key);
680 }
681
682 fz_lock(ctx, FZ_LOCK_ALLOC);
683 if (use_hash)
684 {
685 /* We can find objects keyed on indirect objects quickly */
686 item = fz_hash_find(ctx, store->hash, &hash);
687 if (item)
688 fz_hash_remove(ctx, store->hash, &hash);
689 }
690 else
691 {
692 /* Others we have to hunt for slowly */
693 for (item = store->head; item; item = item->next)
694 if (item->val->drop == drop && !type->cmp_key(ctx, item->key, key))
695 break;
696 }
697 if (item)
698 {
699 /* Momentarily things can be in the hash table without being
700 * in the list. Don't attempt to unlink these. We indicate
701 * such items by setting item->next == item. */
702 if (item->next != item)
703 {
704 if (item->next)
705 item->next->prev = item->prev;
706 else
707 store->tail = item->prev;
708 if (item->prev)
709 item->prev->next = item->next;
710 else
711 store->head = item->next;
712 }
713 if (item->val->refs > 0)
714 (void)Memento_dropRef(item->val);
715 dodrop = (item->val->refs > 0 && --item->val->refs == 0);
716 fz_unlock(ctx, FZ_LOCK_ALLOC);
717 if (dodrop)
718 item->val->drop(ctx, item->val);
719 type->drop_key(ctx, item->key);
720 fz_free(ctx, item);
721 }
722 else
723 fz_unlock(ctx, FZ_LOCK_ALLOC);
724}
725
726void
727fz_empty_store(fz_context *ctx)
728{
729 fz_store *store = ctx->store;
730
731 if (store == NULL)
732 return;
733
734 fz_lock(ctx, FZ_LOCK_ALLOC);
735 /* Run through all the items in the store */
736 while (store->head)
737 evict(ctx, store->head); /* Drops then retakes lock */
738 fz_unlock(ctx, FZ_LOCK_ALLOC);
739}
740
741fz_store *
742fz_keep_store_context(fz_context *ctx)
743{
744 if (ctx == NULL || ctx->store == NULL)
745 return NULL;
746 return fz_keep_imp(ctx, ctx->store, &ctx->store->refs);
747}
748
749void
750fz_drop_store_context(fz_context *ctx)
751{
752 if (!ctx)
753 return;
754 if (fz_drop_imp(ctx, ctx->store, &ctx->store->refs))
755 {
756 fz_empty_store(ctx);
757 fz_drop_hash_table(ctx, ctx->store->hash);
758 fz_free(ctx, ctx->store);
759 ctx->store = NULL;
760 }
761}
762
763static void
764fz_debug_store_item(fz_context *ctx, void *state, void *key_, int keylen, void *item_)
765{
766 unsigned char *key = key_;
767 fz_item *item = item_;
768 int i;
769 char buf[256];
770 fz_unlock(ctx, FZ_LOCK_ALLOC);
771 item->type->format_key(ctx, buf, sizeof buf, item->key);
772 fz_lock(ctx, FZ_LOCK_ALLOC);
773 printf("hash[");
774 for (i=0; i < keylen; ++i)
775 printf("%02x", key[i]);
776 printf("][refs=%d][size=%d] key=%s val=%p\n", item->val->refs, (int)item->size, buf, (void *)item->val);
777}
778
779static void
780fz_debug_store_locked(fz_context *ctx)
781{
782 fz_item *item, *next;
783 char buf[256];
784 fz_store *store = ctx->store;
785
786 printf("-- resource store contents --\n");
787
788 for (item = store->head; item; item = next)
789 {
790 next = item->next;
791 if (next)
792 {
793 (void)Memento_takeRef(next->val);
794 next->val->refs++;
795 }
796 fz_unlock(ctx, FZ_LOCK_ALLOC);
797 item->type->format_key(ctx, buf, sizeof buf, item->key);
798 fz_lock(ctx, FZ_LOCK_ALLOC);
799 printf("store[*][refs=%d][size=%d] key=%s val=%p\n",
800 item->val->refs, (int)item->size, buf, (void *)item->val);
801 if (next)
802 {
803 (void)Memento_dropRef(next->val);
804 next->val->refs--;
805 }
806 }
807
808 printf("-- resource store hash contents --\n");
809 fz_hash_for_each(ctx, store->hash, NULL, fz_debug_store_item);
810 printf("-- end --\n");
811}
812
813void
814fz_debug_store(fz_context *ctx)
815{
816 fz_lock(ctx, FZ_LOCK_ALLOC);
817 fz_debug_store_locked(ctx);
818 fz_unlock(ctx, FZ_LOCK_ALLOC);
819}
820
821/* This is now an n^2 algorithm - not ideal, but it'll only be bad if we are
822 * actually managing to scavenge lots of blocks back. */
823static int
824scavenge(fz_context *ctx, size_t tofree)
825{
826 fz_store *store = ctx->store;
827 size_t count = 0;
828 fz_item *item, *prev;
829
830 /* Free the items */
831 for (item = store->tail; item; item = prev)
832 {
833 prev = item->prev;
834 if (item->val->refs == 1)
835 {
836 /* Free this item */
837 count += item->size;
838 evict(ctx, item); /* Drops then retakes lock */
839
840 if (count >= tofree)
841 break;
842
843 /* Have to restart search again, as prev may no longer
844 * be valid due to release of lock in evict. */
845 prev = store->tail;
846 }
847 }
848 /* Success is managing to evict any blocks */
849 return count != 0;
850}
851
852/*
853 External function for callers to use
854 to scavenge while trying allocations.
855
856 size: The number of bytes we are trying to have free.
857
858 phase: What phase of the scavenge we are in. Updated on exit.
859
860 Returns non zero if we managed to free any memory.
861*/
862int fz_store_scavenge_external(fz_context *ctx, size_t size, int *phase)
863{
864 int ret;
865
866 fz_lock(ctx, FZ_LOCK_ALLOC);
867 ret = fz_store_scavenge(ctx, size, phase);
868 fz_unlock(ctx, FZ_LOCK_ALLOC);
869
870 return ret;
871}
872
873/*
874 Internal function used as part of the scavenging
875 allocator; when we fail to allocate memory, before returning a
876 failure to the caller, we try to scavenge space within the store by
877 evicting at least 'size' bytes. The allocator then retries.
878
879 size: The number of bytes we are trying to have free.
880
881 phase: What phase of the scavenge we are in. Updated on exit.
882
883 Returns non zero if we managed to free any memory.
884*/
885int fz_store_scavenge(fz_context *ctx, size_t size, int *phase)
886{
887 fz_store *store;
888 size_t max;
889
890 store = ctx->store;
891 if (store == NULL)
892 return 0;
893
894#ifdef DEBUG_SCAVENGING
895 printf("Scavenging: store=" FZ_FMT_zu " size=" FZ_FMT_zu " phase=%d\n", store->size, size, *phase);
896 fz_debug_store_locked(ctx);
897 Memento_stats();
898#endif
899 do
900 {
901 size_t tofree;
902
903 /* Calculate 'max' as the maximum size of the store for this phase */
904 if (*phase >= 16)
905 max = 0;
906 else if (store->max != FZ_STORE_UNLIMITED)
907 max = store->max / 16 * (16 - *phase);
908 else
909 max = store->size / (16 - *phase) * (15 - *phase);
910 (*phase)++;
911
912 /* Slightly baroque calculations to avoid overflow */
913 if (size > SIZE_MAX - store->size)
914 tofree = SIZE_MAX - max;
915 else if (size + store->size > max)
916 continue;
917 else
918 tofree = size + store->size - max;
919
920 if (scavenge(ctx, tofree))
921 {
922#ifdef DEBUG_SCAVENGING
923 printf("scavenged: store=" FZ_FMT_zu "\n", store->size);
924 fz_debug_store(ctx);
925 Memento_stats();
926#endif
927 return 1;
928 }
929 }
930 while (max > 0);
931
932#ifdef DEBUG_SCAVENGING
933 printf("scavenging failed\n");
934 fz_debug_store(ctx);
935 Memento_listBlocks();
936#endif
937 return 0;
938}
939
940/*
941 Evict items from the store until the total size of
942 the objects in the store is reduced to a given percentage of its
943 current size.
944
945 percent: %age of current size to reduce the store to.
946
947 Returns non zero if we managed to free enough memory, zero otherwise.
948*/
949int
950fz_shrink_store(fz_context *ctx, unsigned int percent)
951{
952 int success;
953 fz_store *store;
954 size_t new_size;
955
956 if (percent >= 100)
957 return 1;
958
959 store = ctx->store;
960 if (store == NULL)
961 return 0;
962
963#ifdef DEBUG_SCAVENGING
964 printf("fz_shrink_store: " FZ_FMT_zu "\n", store->size/(1024*1024));
965#endif
966 fz_lock(ctx, FZ_LOCK_ALLOC);
967
968 new_size = (size_t)(((uint64_t)store->size * percent) / 100);
969 if (store->size > new_size)
970 scavenge(ctx, store->size - new_size);
971
972 success = (store->size <= new_size) ? 1 : 0;
973 fz_unlock(ctx, FZ_LOCK_ALLOC);
974#ifdef DEBUG_SCAVENGING
975 printf("fz_shrink_store after: " FZ_FMT_zu "\n", store->size/(1024*1024));
976#endif
977
978 return success;
979}
980
981void fz_filter_store(fz_context *ctx, fz_store_filter_fn *fn, void *arg, const fz_store_type *type)
982{
983 fz_store *store;
984 fz_item *item, *prev, *remove;
985
986 store = ctx->store;
987 if (store == NULL)
988 return;
989
990 fz_lock(ctx, FZ_LOCK_ALLOC);
991
992 /* Filter the items */
993 remove = NULL;
994 for (item = store->tail; item; item = prev)
995 {
996 prev = item->prev;
997 if (item->type != type)
998 continue;
999
1000 if (fn(ctx, arg, item->key) == 0)
1001 continue;
1002
1003 /* We have to drop it */
1004 store->size -= item->size;
1005
1006 /* Unlink from the linked list */
1007 if (item->next)
1008 item->next->prev = item->prev;
1009 else
1010 store->tail = item->prev;
1011 if (item->prev)
1012 item->prev->next = item->next;
1013 else
1014 store->head = item->next;
1015
1016 /* Remove from the hash table */
1017 if (item->type->make_hash_key)
1018 {
1019 fz_store_hash hash = { NULL };
1020 hash.drop = item->val->drop;
1021 if (item->type->make_hash_key(ctx, &hash, item->key))
1022 fz_hash_remove(ctx, store->hash, &hash);
1023 }
1024
1025 /* Store whether to drop this value or not in 'prev' */
1026 if (item->val->refs > 0)
1027 (void)Memento_dropRef(item->val);
1028 item->prev = (item->val->refs > 0 && --item->val->refs == 0) ? item : NULL;
1029
1030 /* Store it in our removal chain - just singly linked */
1031 item->next = remove;
1032 remove = item;
1033 }
1034 fz_unlock(ctx, FZ_LOCK_ALLOC);
1035
1036 /* Now drop the remove chain */
1037 for (item = remove; item != NULL; item = remove)
1038 {
1039 remove = item->next;
1040
1041 /* Drop a reference to the value (freeing if required) */
1042 if (item->prev) /* See above for our abuse of prev here */
1043 item->val->drop(ctx, item->val);
1044
1045 /* Always drops the key and drop the item */
1046 item->type->drop_key(ctx, item->key);
1047 fz_free(ctx, item);
1048 }
1049}
1050
1051/*
1052 Increment the defer reap count.
1053
1054 No reap operations will take place (except for those
1055 triggered by an immediate failed malloc) until the
1056 defer reap count returns to 0.
1057
1058 Call this at the start of a process during which you
1059 potentially might drop many reapable objects.
1060
1061 It is vital that every fz_defer_reap_start is matched
1062 by a fz_defer_reap_end call.
1063*/
1064void fz_defer_reap_start(fz_context *ctx)
1065{
1066 if (ctx->store == NULL)
1067 return;
1068
1069 fz_lock(ctx, FZ_LOCK_ALLOC);
1070 ctx->store->defer_reap_count++;
1071 fz_unlock(ctx, FZ_LOCK_ALLOC);
1072}
1073
1074/*
1075 Decrement the defer reap count.
1076
1077 If the defer reap count returns to 0, and the store
1078 has reapable objects in, a reap pass will begin.
1079
1080 Call this at the end of a process during which you
1081 potentially might drop many reapable objects.
1082
1083 It is vital that every fz_defer_reap_start is matched
1084 by a fz_defer_reap_end call.
1085*/
1086void fz_defer_reap_end(fz_context *ctx)
1087{
1088 int reap;
1089
1090 if (ctx->store == NULL)
1091 return;
1092
1093 fz_lock(ctx, FZ_LOCK_ALLOC);
1094 --ctx->store->defer_reap_count;
1095 reap = ctx->store->defer_reap_count == 0 && ctx->store->needs_reaping;
1096 if (reap)
1097 do_reap(ctx); /* Drops FZ_LOCK_ALLOC */
1098 else
1099 fz_unlock(ctx, FZ_LOCK_ALLOC);
1100}
1101