1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_concurrent_lru_cache_H
18#define __TBB_concurrent_lru_cache_H
19
20#if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE
21 #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h
22#endif
23
24#include "tbb_stddef.h"
25
26#include <map>
27#include <list>
28#include <algorithm> // std::find
29#if __TBB_CPP11_RVALUE_REF_PRESENT
30#include <utility> // std::move
31#endif
32
33#include "atomic.h"
34#include "internal/_aggregator_impl.h"
35
36namespace tbb{
37namespace interface6 {
38
39
40template <typename key_type, typename value_type, typename value_functor_type = value_type (*)(key_type) >
41class concurrent_lru_cache : internal::no_assign{
42private:
43 typedef concurrent_lru_cache self_type;
44 typedef value_functor_type value_function_type;
45 typedef std::size_t ref_counter_type;
46 struct map_value_type;
47 typedef std::map<key_type, map_value_type> map_storage_type;
48 typedef std::list<typename map_storage_type::iterator> lru_list_type;
49 struct map_value_type {
50 value_type my_value;
51 ref_counter_type my_ref_counter;
52 typename lru_list_type::iterator my_lru_list_iterator;
53 bool my_is_ready;
54
55 map_value_type (value_type const& a_value, ref_counter_type a_ref_counter, typename lru_list_type::iterator a_lru_list_iterator, bool a_is_ready)
56 : my_value(a_value), my_ref_counter(a_ref_counter), my_lru_list_iterator (a_lru_list_iterator), my_is_ready(a_is_ready)
57 {}
58 };
59
60 class handle_object;
61
62 struct aggregator_operation;
63 typedef aggregator_operation aggregated_operation_type;
64 typedef tbb::internal::aggregating_functor<self_type,aggregated_operation_type> aggregator_function_type;
65 friend class tbb::internal::aggregating_functor<self_type,aggregated_operation_type>;
66 typedef tbb::internal::aggregator<aggregator_function_type, aggregated_operation_type> aggregator_type;
67
68private:
69 value_function_type my_value_function;
70 std::size_t const my_number_of_lru_history_items;
71 map_storage_type my_map_storage;
72 lru_list_type my_lru_list;
73 aggregator_type my_aggregator;
74
75public:
76 typedef handle_object handle;
77
78public:
79 concurrent_lru_cache(value_function_type f, std::size_t number_of_lru_history_items)
80 : my_value_function(f),my_number_of_lru_history_items(number_of_lru_history_items)
81 {
82 my_aggregator.initialize_handler(aggregator_function_type(this));
83 }
84
85 handle_object operator[](key_type k){
86 retrieve_aggregator_operation op(k);
87 my_aggregator.execute(&op);
88 if (op.is_new_value_needed()){
89 op.result().second.my_value = my_value_function(k);
90 __TBB_store_with_release(op.result().second.my_is_ready, true);
91 }else{
92 tbb::internal::spin_wait_while_eq(op.result().second.my_is_ready,false);
93 }
94 return handle_object(*this,op.result());
95 }
96private:
97 void signal_end_of_usage(typename map_storage_type::reference value_ref){
98 signal_end_of_usage_aggregator_operation op(value_ref);
99 my_aggregator.execute(&op);
100 }
101
102private:
103#if !__TBB_CPP11_RVALUE_REF_PRESENT
104 struct handle_move_t:no_assign{
105 concurrent_lru_cache & my_cache_ref;
106 typename map_storage_type::reference my_map_record_ref;
107 handle_move_t(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_map_record_ref(value_ref) {};
108 };
109#endif
110 class handle_object {
111 concurrent_lru_cache * my_cache_pointer;
112 typename map_storage_type::pointer my_map_record_ptr;
113 public:
114 handle_object() : my_cache_pointer(), my_map_record_ptr() {}
115 handle_object(concurrent_lru_cache& cache_ref, typename map_storage_type::reference value_ref) : my_cache_pointer(&cache_ref), my_map_record_ptr(&value_ref) {}
116 operator bool() const {
117 return (my_cache_pointer && my_map_record_ptr);
118 }
119#if __TBB_CPP11_RVALUE_REF_PRESENT
120 // TODO: add check for double moved objects by special dedicated field
121 handle_object(handle_object&& src) : my_cache_pointer(src.my_cache_pointer), my_map_record_ptr(src.my_map_record_ptr) {
122 __TBB_ASSERT((src.my_cache_pointer && src.my_map_record_ptr) || (!src.my_cache_pointer && !src.my_map_record_ptr), "invalid state of moving object?");
123 src.my_cache_pointer = NULL;
124 src.my_map_record_ptr = NULL;
125 }
126 handle_object& operator=(handle_object&& src) {
127 __TBB_ASSERT((src.my_cache_pointer && src.my_map_record_ptr) || (!src.my_cache_pointer && !src.my_map_record_ptr), "invalid state of moving object?");
128 if (my_cache_pointer) {
129 my_cache_pointer->signal_end_of_usage(*my_map_record_ptr);
130 }
131 my_cache_pointer = src.my_cache_pointer;
132 my_map_record_ptr = src.my_map_record_ptr;
133 src.my_cache_pointer = NULL;
134 src.my_map_record_ptr = NULL;
135 return *this;
136 }
137#else
138 handle_object(handle_move_t m) : my_cache_pointer(&m.my_cache_ref), my_map_record_ptr(&m.my_map_record_ref) {}
139 handle_object& operator=(handle_move_t m) {
140 if (my_cache_pointer) {
141 my_cache_pointer->signal_end_of_usage(*my_map_record_ptr);
142 }
143 my_cache_pointer = &m.my_cache_ref;
144 my_map_record_ptr = &m.my_map_record_ref;
145 return *this;
146 }
147 operator handle_move_t(){
148 return move(*this);
149 }
150#endif // __TBB_CPP11_RVALUE_REF_PRESENT
151 value_type& value(){
152 __TBB_ASSERT(my_cache_pointer,"get value from already moved object?");
153 __TBB_ASSERT(my_map_record_ptr,"get value from an invalid or already moved object?");
154 return my_map_record_ptr->second.my_value;
155 }
156 ~handle_object(){
157 if (my_cache_pointer){
158 my_cache_pointer->signal_end_of_usage(*my_map_record_ptr);
159 }
160 }
161 private:
162#if __TBB_CPP11_RVALUE_REF_PRESENT
163 // For source compatibility with C++03
164 friend handle_object&& move(handle_object& h){
165 return std::move(h);
166 }
167#else
168 friend handle_move_t move(handle_object& h){
169 return handle_object::move(h);
170 }
171 // TODO: add check for double moved objects by special dedicated field
172 static handle_move_t move(handle_object& h){
173 __TBB_ASSERT((h.my_cache_pointer && h.my_map_record_ptr) || (!h.my_cache_pointer && !h.my_map_record_ptr), "invalid state of moving object?");
174 concurrent_lru_cache * cache_pointer = h.my_cache_pointer;
175 typename map_storage_type::pointer map_record_ptr = h.my_map_record_ptr;
176 h.my_cache_pointer = NULL;
177 h.my_map_record_ptr = NULL;
178 return handle_move_t(*cache_pointer, *map_record_ptr);
179 }
180#endif // __TBB_CPP11_RVALUE_REF_PRESENT
181 private:
182 void operator=(handle_object&);
183#if __SUNPRO_CC
184 // Presumably due to a compiler error, private copy constructor
185 // breaks expressions like handle h = cache[key];
186 public:
187#endif
188 handle_object(handle_object &);
189 };
190private:
191 //TODO: looks like aggregator_operation is a perfect match for statically typed variant type
192 struct aggregator_operation : tbb::internal::aggregated_operation<aggregator_operation>{
193 enum e_op_type {op_retive, op_signal_end_of_usage};
194 //TODO: try to use pointer to function apply_visitor here
195 //TODO: try virtual functions and measure the difference
196 e_op_type my_operation_type;
197 aggregator_operation(e_op_type operation_type): my_operation_type(operation_type) {}
198 void cast_and_handle(self_type& container ){
199 if (my_operation_type==op_retive){
200 static_cast<retrieve_aggregator_operation*>(this)->handle(container);
201 }else{
202 static_cast<signal_end_of_usage_aggregator_operation*>(this)->handle(container);
203 }
204 }
205 };
206 struct retrieve_aggregator_operation : aggregator_operation, private internal::no_assign {
207 key_type my_key;
208 typename map_storage_type::pointer my_result_map_record_pointer;
209 bool my_is_new_value_needed;
210 retrieve_aggregator_operation(key_type key):aggregator_operation(aggregator_operation::op_retive),my_key(key),my_is_new_value_needed(false){}
211 void handle(self_type& container ){
212 my_result_map_record_pointer = & container.retrieve_serial(my_key,my_is_new_value_needed);
213 }
214 typename map_storage_type::reference result(){ return * my_result_map_record_pointer; }
215 bool is_new_value_needed(){return my_is_new_value_needed;}
216 };
217 struct signal_end_of_usage_aggregator_operation : aggregator_operation, private internal::no_assign {
218 typename map_storage_type::reference my_map_record_ref;
219 signal_end_of_usage_aggregator_operation(typename map_storage_type::reference map_record_ref):aggregator_operation(aggregator_operation::op_signal_end_of_usage),my_map_record_ref(map_record_ref){}
220 void handle(self_type& container ){
221 container.signal_end_of_usage_serial(my_map_record_ref);
222 }
223 };
224
225private:
226 void handle_operations(aggregator_operation* op_list){
227 while(op_list){
228 op_list->cast_and_handle(*this);
229 aggregator_operation* tmp = op_list;
230 op_list=op_list->next;
231 tbb::internal::itt_store_word_with_release(tmp->status, uintptr_t(1));
232 }
233 }
234
235private:
236 typename map_storage_type::reference retrieve_serial(key_type k, bool& is_new_value_needed){
237 typename map_storage_type::iterator it = my_map_storage.find(k);
238 if (it == my_map_storage.end()){
239 it = my_map_storage.insert(it,std::make_pair(k,map_value_type(value_type(),0,my_lru_list.end(),false)));
240 is_new_value_needed = true;
241 }else {
242 typename lru_list_type::iterator list_it = it->second.my_lru_list_iterator;
243 if (list_it!=my_lru_list.end()) {
244 __TBB_ASSERT(!it->second.my_ref_counter,"item to be evicted should not have a live references");
245 //item is going to be used. Therefore it is not a subject for eviction
246 //so - remove it from LRU history.
247 my_lru_list.erase(list_it);
248 it->second.my_lru_list_iterator= my_lru_list.end();
249 }
250 }
251 ++(it->second.my_ref_counter);
252 return *it;
253 }
254
255 void signal_end_of_usage_serial(typename map_storage_type::reference map_record_ref){
256 typename map_storage_type::iterator it = my_map_storage.find(map_record_ref.first);
257 __TBB_ASSERT(it!=my_map_storage.end(),"cache should not return past-end iterators to outer world");
258 __TBB_ASSERT(&(*it) == &map_record_ref,"dangling reference has been returned to outside world? data race ?");
259 __TBB_ASSERT( my_lru_list.end()== std::find(my_lru_list.begin(),my_lru_list.end(),it),
260 "object in use should not be in list of unused objects ");
261 if (! --(it->second.my_ref_counter)){
262 //it was the last reference so put it to the LRU history
263 if (my_lru_list.size()>=my_number_of_lru_history_items){
264 //evict items in order to get a space
265 size_t number_of_elements_to_evict = 1 + my_lru_list.size() - my_number_of_lru_history_items;
266 for (size_t i=0; i<number_of_elements_to_evict; ++i){
267 typename map_storage_type::iterator it_to_evict = my_lru_list.back();
268 __TBB_ASSERT(!it_to_evict->second.my_ref_counter,"item to be evicted should not have a live references");
269 my_lru_list.pop_back();
270 my_map_storage.erase(it_to_evict);
271 }
272 }
273 my_lru_list.push_front(it);
274 it->second.my_lru_list_iterator = my_lru_list.begin();
275 }
276 }
277};
278} // namespace interface6
279
280using interface6::concurrent_lru_cache;
281
282} // namespace tbb
283#endif //__TBB_concurrent_lru_cache_H
284