1// © 2016 and later: Unicode, Inc. and others.
2// License & terms of use: http://www.unicode.org/copyright.html
3/*
4******************************************************************************
5* Copyright (C) 2015, International Business Machines Corporation and
6* others. All Rights Reserved.
7******************************************************************************
8*
9* File unifiedcache.cpp
10******************************************************************************
11*/
12
13#include "unifiedcache.h"
14
15#include <algorithm> // For std::max()
16#include <mutex>
17
18#include "uassert.h"
19#include "uhash.h"
20#include "ucln_cmn.h"
21
22static icu::UnifiedCache *gCache = nullptr;
23static std::mutex *gCacheMutex = nullptr;
24static std::condition_variable *gInProgressValueAddedCond;
25static icu::UInitOnce gCacheInitOnce {};
26
27static const int32_t MAX_EVICT_ITERATIONS = 10;
28static const int32_t DEFAULT_MAX_UNUSED = 1000;
29static const int32_t DEFAULT_PERCENTAGE_OF_IN_USE = 100;
30
31
32U_CDECL_BEGIN
33static UBool U_CALLCONV unifiedcache_cleanup() {
34 gCacheInitOnce.reset();
35 delete gCache;
36 gCache = nullptr;
37 gCacheMutex->~mutex();
38 gCacheMutex = nullptr;
39 gInProgressValueAddedCond->~condition_variable();
40 gInProgressValueAddedCond = nullptr;
41 return true;
42}
43U_CDECL_END
44
45
46U_NAMESPACE_BEGIN
47
48int32_t U_EXPORT2
49ucache_hashKeys(const UHashTok key) {
50 const CacheKeyBase *ckey = (const CacheKeyBase *) key.pointer;
51 return ckey->hashCode();
52}
53
54UBool U_EXPORT2
55ucache_compareKeys(const UHashTok key1, const UHashTok key2) {
56 const CacheKeyBase *p1 = (const CacheKeyBase *) key1.pointer;
57 const CacheKeyBase *p2 = (const CacheKeyBase *) key2.pointer;
58 return *p1 == *p2;
59}
60
61void U_EXPORT2
62ucache_deleteKey(void *obj) {
63 CacheKeyBase *p = (CacheKeyBase *) obj;
64 delete p;
65}
66
67CacheKeyBase::~CacheKeyBase() {
68}
69
70static void U_CALLCONV cacheInit(UErrorCode &status) {
71 U_ASSERT(gCache == nullptr);
72 ucln_common_registerCleanup(
73 UCLN_COMMON_UNIFIED_CACHE, unifiedcache_cleanup);
74
75 gCacheMutex = STATIC_NEW(std::mutex);
76 gInProgressValueAddedCond = STATIC_NEW(std::condition_variable);
77 gCache = new UnifiedCache(status);
78 if (gCache == nullptr) {
79 status = U_MEMORY_ALLOCATION_ERROR;
80 }
81 if (U_FAILURE(status)) {
82 delete gCache;
83 gCache = nullptr;
84 return;
85 }
86}
87
88UnifiedCache *UnifiedCache::getInstance(UErrorCode &status) {
89 umtx_initOnce(gCacheInitOnce, &cacheInit, status);
90 if (U_FAILURE(status)) {
91 return nullptr;
92 }
93 U_ASSERT(gCache != nullptr);
94 return gCache;
95}
96
97UnifiedCache::UnifiedCache(UErrorCode &status) :
98 fHashtable(nullptr),
99 fEvictPos(UHASH_FIRST),
100 fNumValuesTotal(0),
101 fNumValuesInUse(0),
102 fMaxUnused(DEFAULT_MAX_UNUSED),
103 fMaxPercentageOfInUse(DEFAULT_PERCENTAGE_OF_IN_USE),
104 fAutoEvictedCount(0),
105 fNoValue(nullptr) {
106 if (U_FAILURE(status)) {
107 return;
108 }
109 fNoValue = new SharedObject();
110 if (fNoValue == nullptr) {
111 status = U_MEMORY_ALLOCATION_ERROR;
112 return;
113 }
114 fNoValue->softRefCount = 1; // Add fake references to prevent fNoValue from being deleted
115 fNoValue->hardRefCount = 1; // when other references to it are removed.
116 fNoValue->cachePtr = this;
117
118 fHashtable = uhash_open(
119 &ucache_hashKeys,
120 &ucache_compareKeys,
121 nullptr,
122 &status);
123 if (U_FAILURE(status)) {
124 return;
125 }
126 uhash_setKeyDeleter(fHashtable, &ucache_deleteKey);
127}
128
129void UnifiedCache::setEvictionPolicy(
130 int32_t count, int32_t percentageOfInUseItems, UErrorCode &status) {
131 if (U_FAILURE(status)) {
132 return;
133 }
134 if (count < 0 || percentageOfInUseItems < 0) {
135 status = U_ILLEGAL_ARGUMENT_ERROR;
136 return;
137 }
138 std::lock_guard<std::mutex> lock(*gCacheMutex);
139 fMaxUnused = count;
140 fMaxPercentageOfInUse = percentageOfInUseItems;
141}
142
143int32_t UnifiedCache::unusedCount() const {
144 std::lock_guard<std::mutex> lock(*gCacheMutex);
145 return uhash_count(fHashtable) - fNumValuesInUse;
146}
147
148int64_t UnifiedCache::autoEvictedCount() const {
149 std::lock_guard<std::mutex> lock(*gCacheMutex);
150 return fAutoEvictedCount;
151}
152
153int32_t UnifiedCache::keyCount() const {
154 std::lock_guard<std::mutex> lock(*gCacheMutex);
155 return uhash_count(fHashtable);
156}
157
158void UnifiedCache::flush() const {
159 std::lock_guard<std::mutex> lock(*gCacheMutex);
160
161 // Use a loop in case cache items that are flushed held hard references to
162 // other cache items making those additional cache items eligible for
163 // flushing.
164 while (_flush(false));
165}
166
167void UnifiedCache::handleUnreferencedObject() const {
168 std::lock_guard<std::mutex> lock(*gCacheMutex);
169 --fNumValuesInUse;
170 _runEvictionSlice();
171}
172
173#ifdef UNIFIED_CACHE_DEBUG
174#include <stdio.h>
175
176void UnifiedCache::dump() {
177 UErrorCode status = U_ZERO_ERROR;
178 const UnifiedCache *cache = getInstance(status);
179 if (U_FAILURE(status)) {
180 fprintf(stderr, "Unified Cache: Error fetching cache.\n");
181 return;
182 }
183 cache->dumpContents();
184}
185
186void UnifiedCache::dumpContents() const {
187 std::lock_guard<std::mutex> lock(*gCacheMutex);
188 _dumpContents();
189}
190
191// Dumps content of cache.
192// On entry, gCacheMutex must be held.
193// On exit, cache contents dumped to stderr.
194void UnifiedCache::_dumpContents() const {
195 int32_t pos = UHASH_FIRST;
196 const UHashElement *element = uhash_nextElement(fHashtable, &pos);
197 char buffer[256];
198 int32_t cnt = 0;
199 for (; element != nullptr; element = uhash_nextElement(fHashtable, &pos)) {
200 const SharedObject *sharedObject =
201 (const SharedObject *) element->value.pointer;
202 const CacheKeyBase *key =
203 (const CacheKeyBase *) element->key.pointer;
204 if (sharedObject->hasHardReferences()) {
205 ++cnt;
206 fprintf(
207 stderr,
208 "Unified Cache: Key '%s', error %d, value %p, total refcount %d, soft refcount %d\n",
209 key->writeDescription(buffer, 256),
210 key->creationStatus,
211 sharedObject == fNoValue ? nullptr :sharedObject,
212 sharedObject->getRefCount(),
213 sharedObject->getSoftRefCount());
214 }
215 }
216 fprintf(stderr, "Unified Cache: %d out of a total of %d still have hard references\n", cnt, uhash_count(fHashtable));
217}
218#endif
219
220UnifiedCache::~UnifiedCache() {
221 // Try our best to clean up first.
222 flush();
223 {
224 // Now all that should be left in the cache are entries that refer to
225 // each other and entries with hard references from outside the cache.
226 // Nothing we can do about these so proceed to wipe out the cache.
227 std::lock_guard<std::mutex> lock(*gCacheMutex);
228 _flush(true);
229 }
230 uhash_close(fHashtable);
231 fHashtable = nullptr;
232 delete fNoValue;
233 fNoValue = nullptr;
234}
235
236const UHashElement *
237UnifiedCache::_nextElement() const {
238 const UHashElement *element = uhash_nextElement(fHashtable, &fEvictPos);
239 if (element == nullptr) {
240 fEvictPos = UHASH_FIRST;
241 return uhash_nextElement(fHashtable, &fEvictPos);
242 }
243 return element;
244}
245
246UBool UnifiedCache::_flush(UBool all) const {
247 UBool result = false;
248 int32_t origSize = uhash_count(fHashtable);
249 for (int32_t i = 0; i < origSize; ++i) {
250 const UHashElement *element = _nextElement();
251 if (element == nullptr) {
252 break;
253 }
254 if (all || _isEvictable(element)) {
255 const SharedObject *sharedObject =
256 (const SharedObject *) element->value.pointer;
257 U_ASSERT(sharedObject->cachePtr == this);
258 uhash_removeElement(fHashtable, element);
259 removeSoftRef(sharedObject); // Deletes the sharedObject when softRefCount goes to zero.
260 result = true;
261 }
262 }
263 return result;
264}
265
266int32_t UnifiedCache::_computeCountOfItemsToEvict() const {
267 int32_t totalItems = uhash_count(fHashtable);
268 int32_t evictableItems = totalItems - fNumValuesInUse;
269
270 int32_t unusedLimitByPercentage = fNumValuesInUse * fMaxPercentageOfInUse / 100;
271 int32_t unusedLimit = std::max(unusedLimitByPercentage, fMaxUnused);
272 int32_t countOfItemsToEvict = std::max(0, evictableItems - unusedLimit);
273 return countOfItemsToEvict;
274}
275
276void UnifiedCache::_runEvictionSlice() const {
277 int32_t maxItemsToEvict = _computeCountOfItemsToEvict();
278 if (maxItemsToEvict <= 0) {
279 return;
280 }
281 for (int32_t i = 0; i < MAX_EVICT_ITERATIONS; ++i) {
282 const UHashElement *element = _nextElement();
283 if (element == nullptr) {
284 break;
285 }
286 if (_isEvictable(element)) {
287 const SharedObject *sharedObject =
288 (const SharedObject *) element->value.pointer;
289 uhash_removeElement(fHashtable, element);
290 removeSoftRef(sharedObject); // Deletes sharedObject when SoftRefCount goes to zero.
291 ++fAutoEvictedCount;
292 if (--maxItemsToEvict == 0) {
293 break;
294 }
295 }
296 }
297}
298
299void UnifiedCache::_putNew(
300 const CacheKeyBase &key,
301 const SharedObject *value,
302 const UErrorCode creationStatus,
303 UErrorCode &status) const {
304 if (U_FAILURE(status)) {
305 return;
306 }
307 CacheKeyBase *keyToAdopt = key.clone();
308 if (keyToAdopt == nullptr) {
309 status = U_MEMORY_ALLOCATION_ERROR;
310 return;
311 }
312 keyToAdopt->fCreationStatus = creationStatus;
313 if (value->softRefCount == 0) {
314 _registerPrimary(keyToAdopt, value);
315 }
316 void *oldValue = uhash_put(fHashtable, keyToAdopt, (void *) value, &status);
317 U_ASSERT(oldValue == nullptr);
318 (void)oldValue;
319 if (U_SUCCESS(status)) {
320 value->softRefCount++;
321 }
322}
323
324void UnifiedCache::_putIfAbsentAndGet(
325 const CacheKeyBase &key,
326 const SharedObject *&value,
327 UErrorCode &status) const {
328 std::lock_guard<std::mutex> lock(*gCacheMutex);
329 const UHashElement *element = uhash_find(fHashtable, &key);
330 if (element != nullptr && !_inProgress(element)) {
331 _fetch(element, value, status);
332 return;
333 }
334 if (element == nullptr) {
335 UErrorCode putError = U_ZERO_ERROR;
336 // best-effort basis only.
337 _putNew(key, value, status, putError);
338 } else {
339 _put(element, value, status);
340 }
341 // Run an eviction slice. This will run even if we added a primary entry
342 // which doesn't increase the unused count, but that is still o.k
343 _runEvictionSlice();
344}
345
346
347UBool UnifiedCache::_poll(
348 const CacheKeyBase &key,
349 const SharedObject *&value,
350 UErrorCode &status) const {
351 U_ASSERT(value == nullptr);
352 U_ASSERT(status == U_ZERO_ERROR);
353 std::unique_lock<std::mutex> lock(*gCacheMutex);
354 const UHashElement *element = uhash_find(fHashtable, &key);
355
356 // If the hash table contains an inProgress placeholder entry for this key,
357 // this means that another thread is currently constructing the value object.
358 // Loop, waiting for that construction to complete.
359 while (element != nullptr && _inProgress(element)) {
360 gInProgressValueAddedCond->wait(lock);
361 element = uhash_find(fHashtable, &key);
362 }
363
364 // If the hash table contains an entry for the key,
365 // fetch out the contents and return them.
366 if (element != nullptr) {
367 _fetch(element, value, status);
368 return true;
369 }
370
371 // The hash table contained nothing for this key.
372 // Insert an inProgress place holder value.
373 // Our caller will create the final value and update the hash table.
374 _putNew(key, fNoValue, U_ZERO_ERROR, status);
375 return false;
376}
377
378void UnifiedCache::_get(
379 const CacheKeyBase &key,
380 const SharedObject *&value,
381 const void *creationContext,
382 UErrorCode &status) const {
383 U_ASSERT(value == nullptr);
384 U_ASSERT(status == U_ZERO_ERROR);
385 if (_poll(key, value, status)) {
386 if (value == fNoValue) {
387 SharedObject::clearPtr(value);
388 }
389 return;
390 }
391 if (U_FAILURE(status)) {
392 return;
393 }
394 value = key.createObject(creationContext, status);
395 U_ASSERT(value == nullptr || value->hasHardReferences());
396 U_ASSERT(value != nullptr || status != U_ZERO_ERROR);
397 if (value == nullptr) {
398 SharedObject::copyPtr(fNoValue, value);
399 }
400 _putIfAbsentAndGet(key, value, status);
401 if (value == fNoValue) {
402 SharedObject::clearPtr(value);
403 }
404}
405
406void UnifiedCache::_registerPrimary(
407 const CacheKeyBase *theKey, const SharedObject *value) const {
408 theKey->fIsPrimary = true;
409 value->cachePtr = this;
410 ++fNumValuesTotal;
411 ++fNumValuesInUse;
412}
413
414void UnifiedCache::_put(
415 const UHashElement *element,
416 const SharedObject *value,
417 const UErrorCode status) const {
418 U_ASSERT(_inProgress(element));
419 const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
420 const SharedObject *oldValue = (const SharedObject *) element->value.pointer;
421 theKey->fCreationStatus = status;
422 if (value->softRefCount == 0) {
423 _registerPrimary(theKey, value);
424 }
425 value->softRefCount++;
426 UHashElement *ptr = const_cast<UHashElement *>(element);
427 ptr->value.pointer = (void *) value;
428 U_ASSERT(oldValue == fNoValue);
429 removeSoftRef(oldValue);
430
431 // Tell waiting threads that we replace in-progress status with
432 // an error.
433 gInProgressValueAddedCond->notify_all();
434}
435
436void UnifiedCache::_fetch(
437 const UHashElement *element,
438 const SharedObject *&value,
439 UErrorCode &status) const {
440 const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
441 status = theKey->fCreationStatus;
442
443 // Since we have the cache lock, calling regular SharedObject add/removeRef
444 // could cause us to deadlock on ourselves since they may need to lock
445 // the cache mutex.
446 removeHardRef(value);
447 value = static_cast<const SharedObject *>(element->value.pointer);
448 addHardRef(value);
449}
450
451
452UBool UnifiedCache::_inProgress(const UHashElement* element) const {
453 UErrorCode status = U_ZERO_ERROR;
454 const SharedObject * value = nullptr;
455 _fetch(element, value, status);
456 UBool result = _inProgress(value, status);
457 removeHardRef(value);
458 return result;
459}
460
461UBool UnifiedCache::_inProgress(
462 const SharedObject* theValue, UErrorCode creationStatus) const {
463 return (theValue == fNoValue && creationStatus == U_ZERO_ERROR);
464}
465
466UBool UnifiedCache::_isEvictable(const UHashElement *element) const
467{
468 const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
469 const SharedObject *theValue =
470 (const SharedObject *) element->value.pointer;
471
472 // Entries that are under construction are never evictable
473 if (_inProgress(theValue, theKey->fCreationStatus)) {
474 return false;
475 }
476
477 // We can evict entries that are either not a primary or have just
478 // one reference (The one reference being from the cache itself).
479 return (!theKey->fIsPrimary || (theValue->softRefCount == 1 && theValue->noHardReferences()));
480}
481
482void UnifiedCache::removeSoftRef(const SharedObject *value) const {
483 U_ASSERT(value->cachePtr == this);
484 U_ASSERT(value->softRefCount > 0);
485 if (--value->softRefCount == 0) {
486 --fNumValuesTotal;
487 if (value->noHardReferences()) {
488 delete value;
489 } else {
490 // This path only happens from flush(all). Which only happens from the
491 // UnifiedCache destructor. Nulling out value.cacheptr changes the behavior
492 // of value.removeRef(), causing the deletion to be done there.
493 value->cachePtr = nullptr;
494 }
495 }
496}
497
498int32_t UnifiedCache::removeHardRef(const SharedObject *value) const {
499 int refCount = 0;
500 if (value) {
501 refCount = umtx_atomic_dec(&value->hardRefCount);
502 U_ASSERT(refCount >= 0);
503 if (refCount == 0) {
504 --fNumValuesInUse;
505 }
506 }
507 return refCount;
508}
509
510int32_t UnifiedCache::addHardRef(const SharedObject *value) const {
511 int refCount = 0;
512 if (value) {
513 refCount = umtx_atomic_inc(&value->hardRefCount);
514 U_ASSERT(refCount >= 1);
515 if (refCount == 1) {
516 fNumValuesInUse++;
517 }
518 }
519 return refCount;
520}
521
522U_NAMESPACE_END
523