1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#if _MSC_VER
18 #pragma warning (disable: 4503) // Suppress "decorated name length exceeded, name was truncated" warning
19#endif
20
21#ifdef TEST_COARSE_GRAINED_LOCK_IMPLEMENTATION
22 #include "../perf/coarse_grained_raii_lru_cache.h"
23 #define selected_raii_lru_cache_impl coarse_grained_raii_lru_cache
24#else
25 #define TBB_PREVIEW_CONCURRENT_LRU_CACHE 1
26 #include "tbb/concurrent_lru_cache.h"
27 #define selected_raii_lru_cache_impl tbb::concurrent_lru_cache
28#endif
29
30#include "harness_test_cases_framework.h"
31#include "harness.h"
32#include "harness_barrier.h"
33
34#include <utility>
35
36#include "tbb/task_scheduler_init.h"
37
38namespace helpers{
39 // Busy work and calibration helpers
40 unsigned int one_us_iters = 345; // default value
41
42 // if user wants to calibrate to microseconds on particular machine, call
43 // this at beginning of program; sets one_us_iters to number of iters to
44 // busy_wait for approx. 1 us
45// void calibrate_busy_wait() {
46// tbb::tick_count t0, t1;
47//
48// t0 = tbb::tick_count::now();
49// for (volatile unsigned int i=0; i<1000000; ++i) continue;
50// t1 = tbb::tick_count::now();
51//
52// one_us_iters = (unsigned int)((1000000.0/(t1-t0).seconds())*0.000001);
53// printf("one_us_iters: %d\n", one_us_iters);
54// }
55 void busy_wait(int us)
56 {
57 unsigned int iter = us*one_us_iters;
58 for (volatile unsigned int i=0; i<iter; ++i) continue;
59 }
60}
61namespace helpers{
62 template<class T> void ignore( const T& ) { }
63 //TODO: add test cases for prevent_optimizing_out function
64 template<typename type>
65 void prevent_optimizing_out(type volatile const& s){
66 volatile const type* dummy = &s;
67 ignore(dummy);
68 }
69
70 struct empty_fixture{};
71
72 template<typename argument_type>
73 struct native_for_concurrent_op_repeated:NoAssign{
74 typedef void (*test_function_pointer_type)(argument_type&);
75
76 argument_type& m_counter_ref;
77 test_function_pointer_type m_test_function_pointer_type;
78 std::size_t m_repeat_number;
79 native_for_concurrent_op_repeated(argument_type& counter_ref, test_function_pointer_type action, std::size_t repeat_number)
80 :m_counter_ref(counter_ref), m_test_function_pointer_type(action), m_repeat_number(repeat_number)
81 {}
82 template <typename ignored_parameter_type>
83 void operator()(ignored_parameter_type const&)const{
84 for (size_t i=0; i<m_repeat_number;++i){
85 m_test_function_pointer_type(m_counter_ref);
86 }
87 }
88
89 };
90
91 template <typename counter_type = size_t>
92 struct object_instances_counting_type{
93 counter_type * m_p_count;
94 object_instances_counting_type(): m_p_count (new counter_type){*m_p_count =1; } //to overcome absence of constructor in tbb::atomic
95 ~object_instances_counting_type(){ if (! --(*m_p_count)){delete(m_p_count);}}
96 object_instances_counting_type(object_instances_counting_type const& other): m_p_count(other.m_p_count){
97 ++(*m_p_count);
98 }
99 object_instances_counting_type& operator=(object_instances_counting_type other){
100 std::swap(this->m_p_count,other.m_p_count);
101 return *this;
102 }
103 size_t instances_count()const {return *m_p_count;}
104 };
105 typedef object_instances_counting_type<> object_instances_counting_serial_type;
106 typedef object_instances_counting_type<tbb::atomic<std::size_t> > object_instances_counting_concurrent_type;
107
108 namespace object_instances_counting_type_test_cases{
109 namespace serial_tests{
110 TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_creation,empty_fixture){
111 ASSERT(object_instances_counting_serial_type().instances_count()==1,"newly created instance by definition has instances_count equal to 1");
112 }
113 TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_copy,empty_fixture){
114 object_instances_counting_serial_type source;
115 ASSERT(object_instances_counting_serial_type(source).instances_count()==2,"copy should increase ref count");
116 }
117 TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_assignment,empty_fixture){
118 object_instances_counting_serial_type source;
119 object_instances_counting_serial_type assigned;
120 assigned = source;
121 ASSERT(source.instances_count()==2,"assign should increase ref count");
122 ASSERT(assigned.instances_count()==2,"assign should increase ref count");
123 }
124 }
125 namespace concurrent_tests{
126 typedef native_for_concurrent_op_repeated<object_instances_counting_concurrent_type> native_for_concurrent_op;
127
128 struct native_for_single_op_repeated_fixture{
129 object_instances_counting_concurrent_type source;
130 void run_native_for_and_assert_source_is_unique(native_for_concurrent_op::test_function_pointer_type operation,const char* msg){
131 //TODO: refactor number of threads into separate fixture
132 const size_t number_of_threads = min(4,tbb::task_scheduler_init::default_num_threads());
133 const size_t repeats_per_thread = 1000000;
134
135 NativeParallelFor(number_of_threads , native_for_concurrent_op(source,operation,repeats_per_thread));
136 ASSERT(source.instances_count()==1,msg);
137 }
138
139 };
140 TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_copy,native_for_single_op_repeated_fixture){
141 struct _{ static void copy(object_instances_counting_concurrent_type& a_source){
142 object_instances_counting_concurrent_type copy(a_source);
143 helpers::prevent_optimizing_out(copy);
144 }};
145 run_native_for_and_assert_source_is_unique(&_::copy,"reference counting during copy construction/destruction is not thread safe ?");
146 }
147 TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_assignment,native_for_single_op_repeated_fixture){
148 struct _{ static void assign(object_instances_counting_concurrent_type& a_source){
149 object_instances_counting_concurrent_type assigned;
150 assigned = a_source;
151 helpers::prevent_optimizing_out(assigned);
152 }};
153 run_native_for_and_assert_source_is_unique(&_::assign,"reference counting during assigning/destruction is not thread safe ?");
154 }
155
156 }
157}
158}
159
160struct get_lru_cache_type{
161
162 template< typename parameter1, typename parameter2, typename parameter3=void>
163 struct apply{
164 typedef selected_raii_lru_cache_impl<parameter1,parameter2,parameter3> type;
165 };
166 template< typename parameter1, typename parameter2>
167 struct apply<parameter1,parameter2,void>{
168 typedef selected_raii_lru_cache_impl<parameter1,parameter2> type;
169 };
170
171};
172
173// these includes are needed for test_task_handle_mv_sem*
174#include <vector>
175#include <string>
176#include <functional>
177
178namespace serial_tests{
179 using namespace helpers;
180 namespace usability{
181 namespace compilation_only{
182 TEST_CASE_WITH_FIXTURE(test_creation_and_use_interface,empty_fixture){
183 struct dummy_function{static int _(int key){return key;}};
184 typedef get_lru_cache_type::apply<int,int>::type cache_type;
185 size_t number_of_lru_history_items = 8;
186 cache_type cache((&dummy_function::_),(number_of_lru_history_items));
187 int dummy_key=0;
188 cache_type::handle h = cache[dummy_key];
189 int value = h.value();
190 (void)value;
191 }
192 }
193 namespace behaviour {
194 namespace helpers {
195 template <size_t id> struct tag {};
196 template< typename tag, typename value_and_key_type>
197 struct call_counting_function {
198 static int calls_count;
199 static value_and_key_type _(value_and_key_type key) {
200 ++calls_count;
201 return key;
202 }
203 };
204 template< typename tag, typename value_and_key_type>
205 int call_counting_function<tag, value_and_key_type>::calls_count = 0;
206 }
207
208 using std::string;
209 struct mv_sem_fixture {
210 struct item_init{
211 static string init(string key) {
212 return key;
213 }
214 };
215 typedef tbb::concurrent_lru_cache<string, string> cache_type;
216 typedef cache_type::handle handle_type;
217 cache_type cache;
218 mv_sem_fixture() : cache((&item_init::init), 1) {};
219
220 handle_type default_ctor_check;
221 };
222
223 TEST_CASE_WITH_FIXTURE(test_task_handle_mv_sem, mv_sem_fixture) {
224 handle_type handle;
225 handle_type foobar = handle_type();
226
227 //c++03 : handle_move_t assignment
228 handle = cache["handle"];
229
230 //c++03 : init ctor from handle_move_t
231 handle_type foo = cache["bar"];
232
233 //c++03 : init ctor from handle_move_t
234 handle_type handle1(move(handle));
235
236 //c++03 : handle_move_t assignment
237 handle = move(handle1);
238
239 ASSERT(!handle_type(), "user-defined to-bool conversion does not work");
240 ASSERT(handle, "user-defined to-bool conversion does not work");
241
242 handle = handle_type();
243 }
244
245 TEST_CASE_WITH_FIXTURE(test_task_handle_mv_sem_certain_case, mv_sem_fixture) {
246 // there is no way to use handle_object as vector argument in C++03
247 // because argument must meet requirements of CopyAssignable and
248 // CopyConstructible (C++ documentation)
249#if __TBB_CPP11_RVALUE_REF_PRESENT
250 // retain handle_object to keep an item in the cache if it is still active without aging
251 handle_type sheep = cache["sheep"];
252 handle_type horse = cache["horse"];
253 handle_type bull = cache["bull"];
254
255 std::vector<handle_type> animals;
256 animals.reserve(5);
257 animals.emplace_back(std::move(sheep));
258 animals.emplace_back(std::move(horse));
259 animals[0] = std::move(bull);
260 // after resize() vec will be full of default constructed handlers with null pointers
261 // on item in cache and on cache which item belongs to
262 animals.resize(10);
263#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */
264 }
265
266 TEST_CASE_WITH_FIXTURE(test_cache_returns_only_values_from_value_function,empty_fixture){
267 struct dummy_function{static int _(int /*key*/){return 0xDEADBEEF;}};
268 typedef get_lru_cache_type::apply<int,int>::type cache_type;
269 size_t number_of_lru_history_items = 8;
270 int dummy_key=1;
271 cache_type cache((&dummy_function::_),(number_of_lru_history_items));
272 ASSERT(dummy_function::_(dummy_key)==cache[dummy_key].value(),"cache operator() must return only values obtained from value_function ");
273 }
274
275 TEST_CASE_WITH_FIXTURE(test_value_function_called_only_on_cache_miss,empty_fixture){
276 typedef helpers::tag<__LINE__> tag;
277 typedef helpers::call_counting_function<tag,int> function;
278 typedef get_lru_cache_type::apply<int,int>::type cache_type;
279 size_t number_of_lru_history_items = 8;
280 cache_type cache((&function::_),(number_of_lru_history_items));
281
282 int dummy_key=0;
283 cache[dummy_key];
284 cache[dummy_key];
285 ASSERT(function::calls_count==1,"value function should be called only on a cache miss");
286 }
287 }
288 namespace helpers{
289 using ::helpers::object_instances_counting_serial_type;
290 }
291 namespace helpers{
292 template<typename value_type>
293 struct clonning_function:NoAssign{
294 value_type& m_ref_original;
295 clonning_function(value_type& ref_original):m_ref_original(ref_original){}
296 template<typename key_type>
297 value_type operator()(key_type)const{ return m_ref_original;}
298 };
299 }
300 struct instance_counting_fixture{
301 static const size_t number_of_lru_history_items = 8;
302
303 typedef helpers::clonning_function<helpers::object_instances_counting_serial_type> cloner_type;
304 typedef get_lru_cache_type::apply<size_t,helpers::object_instances_counting_serial_type,cloner_type>::type cache_type;
305 helpers::object_instances_counting_serial_type source;
306 cloner_type cloner;
307 cache_type cache;
308
309 instance_counting_fixture():cloner((source)),cache(cloner,number_of_lru_history_items){}
310 };
311
312 TEST_CASE_WITH_FIXTURE(test_cache_stores_unused_objects,instance_counting_fixture){
313 for (size_t i=0;i<number_of_lru_history_items;++i){
314 cache[i];
315 }
316 ASSERT(source.instances_count()> 1,"cache should store some unused objects ");
317 }
318
319 TEST_CASE_WITH_FIXTURE(test_cache_stores_no_more_then_X_number_of_unused_objects,instance_counting_fixture){
320 for (size_t i=0;i<number_of_lru_history_items+1;++i){
321 cache[i];
322 }
323 ASSERT(source.instances_count()== number_of_lru_history_items+1,"cache should respect number of stored unused objects to number passed in constructor");
324 }
325
326 namespace helpers{
327 template< typename key_type, typename value_type>
328 struct map_searcher:NoAssign{
329 typedef std::map<key_type,value_type> map_type;
330 map_type & m_map_ref;
331 map_searcher(map_type & map_ref): m_map_ref(map_ref) {}
332 value_type& operator()(key_type k){
333 typename map_type::iterator it =m_map_ref.find(k);
334 if (it==m_map_ref.end()){
335 it = m_map_ref.insert(it,std::make_pair(k,value_type()));
336 }
337 return it->second;
338 }
339 };
340 }
341
342 struct filled_instance_counting_fixture_with_external_map{
343 static const size_t number_of_lru_history_items = 8;
344
345 typedef helpers::map_searcher<size_t,helpers::object_instances_counting_serial_type> map_searcher_type;
346 typedef map_searcher_type::map_type objects_map_type;
347 typedef get_lru_cache_type::apply<size_t,helpers::object_instances_counting_serial_type,map_searcher_type>::type cache_type;
348 map_searcher_type::map_type objects_map;
349 cache_type cache;
350 filled_instance_counting_fixture_with_external_map():cache(map_searcher_type(objects_map),number_of_lru_history_items){}
351 bool is_evicted(size_t k){
352 objects_map_type::iterator it =objects_map.find(k);
353 ASSERT(it!=objects_map.end(),"no value for key - error in test logic ?");
354 return it->second.instances_count()==1;
355 }
356 void fill_up_cache(size_t lower_bound, size_t upper_bound){
357 for (size_t i=lower_bound;i<upper_bound;++i){
358 cache[i];
359 }
360 }
361 };
362
363 TEST_CASE_WITH_FIXTURE(test_cache_should_evict_unused_objects_lru_order,filled_instance_counting_fixture_with_external_map){
364 ASSERT(number_of_lru_history_items > 2,"incorrect test setup");
365 fill_up_cache(0,number_of_lru_history_items);
366 //heat up first element
367 cache[0];
368 //cause eviction
369 cache[number_of_lru_history_items];
370 ASSERT(is_evicted(1) && !is_evicted(0),"cache should evict items in lru order");
371 }
372
373 TEST_CASE_WITH_FIXTURE(test_live_handler_object_prevents_item_from_eviction,filled_instance_counting_fixture_with_external_map){
374 cache_type::handle h = cache[0];
375 //cause eviction
376 fill_up_cache(1,number_of_lru_history_items+2);
377 ASSERT(is_evicted(1) && !is_evicted(0),"cache should not evict items in use");
378 }
379 TEST_CASE_WITH_FIXTURE(test_live_handler_object_is_ref_counted,filled_instance_counting_fixture_with_external_map){
380 cache_type::handle h = cache[0];
381 {
382 cache_type::handle h1 = cache[0];
383 }
384 //cause eviction
385 fill_up_cache(1,number_of_lru_history_items+2);
386 ASSERT(is_evicted(1) && !is_evicted(0),"cache should not evict items in use");
387 }
388 }
389}
390
391
392namespace concurrency_tests{
393 namespace helpers{
394 using namespace ::helpers;
395 }
396 namespace helpers{
397 //key_type must be convertible to array index
398 template< typename key_type, typename value_type, size_t array_size>
399 struct array_searcher:NoAssign{
400 typedef value_type array_type[array_size];
401 array_type const& m_array_ref;
402 array_searcher(array_type const& array_ref): m_array_ref(array_ref) {}
403 const value_type& operator()(key_type k)const{
404 size_t index = k;
405 ASSERT(k < array_size,"incorrect test setup");
406 return m_array_ref[index];
407 }
408 };
409 }
410
411 struct filled_instance_counting_fixture_with_external_array{
412 static const size_t number_of_lru_history_items = 8;
413 static const size_t array_size = 16*number_of_lru_history_items;
414
415 typedef helpers::array_searcher<size_t,helpers::object_instances_counting_concurrent_type,array_size> array_searcher_type;
416 typedef array_searcher_type::array_type objects_array_type;
417 typedef get_lru_cache_type::apply<size_t,helpers::object_instances_counting_concurrent_type,array_searcher_type>::type cache_type;
418 array_searcher_type::array_type objects_array;
419 cache_type cache;
420 filled_instance_counting_fixture_with_external_array():cache(array_searcher_type(objects_array),number_of_lru_history_items){}
421 bool is_evicted(size_t k)const{
422 return array_searcher_type(objects_array)(k).instances_count()==1;
423 }
424 void fill_up_cache(size_t lower_bound, size_t upper_bound){
425 for (size_t i=lower_bound;i<upper_bound;++i){
426 cache[i];
427 }
428 }
429 size_t number_of_non_evicted_from_cache()const{
430 size_t result=0;
431 for (size_t i=0; i<array_size; ++i){
432 if (!this->is_evicted(i)){
433 ++result;
434 }
435 }
436 return result;
437 }
438 };
439
440
441 //TODO: make this more reproducible
442 //TODO: split this test case in two parts
443 TEST_CASE_WITH_FIXTURE(correctness_of_braces_and_handle_destructor,filled_instance_counting_fixture_with_external_array){
444 typedef correctness_of_braces_and_handle_destructor self_type;
445 struct _{static void use_cache(self_type& tc){
446 for (size_t i=0;i<array_size;++i){
447 cache_type::handle h=tc.cache[i];
448 helpers::prevent_optimizing_out(h.value());
449 }
450
451 }};
452 static const size_t repeat_number = 2;
453 static const size_t number_of_threads = 4 * tbb::task_scheduler_init::default_num_threads(); //have 4x over subscription
454 static const size_t repeats_per_thread = 4;
455
456 for (size_t i=0; i < repeat_number; i++){
457 NativeParallelFor(number_of_threads,helpers::native_for_concurrent_op_repeated<self_type>(*this,&_::use_cache,repeats_per_thread));
458 fill_up_cache(0,array_size);
459 ASSERT(number_of_non_evicted_from_cache()==number_of_lru_history_items,"thread safety is broken for cache ");
460 }
461 }
462}
463