| 1 | /* |
| 2 | Copyright 2005-2013 Intel Corporation. All Rights Reserved. |
| 3 | |
| 4 | This file is part of Threading Building Blocks. |
| 5 | |
| 6 | Threading Building Blocks is free software; you can redistribute it |
| 7 | and/or modify it under the terms of the GNU General Public License |
| 8 | version 2 as published by the Free Software Foundation. |
| 9 | |
| 10 | Threading Building Blocks is distributed in the hope that it will be |
| 11 | useful, but WITHOUT ANY WARRANTY; without even the implied warranty |
| 12 | of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | GNU General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU General Public License |
| 16 | along with Threading Building Blocks; if not, write to the Free Software |
| 17 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 18 | |
| 19 | As a special exception, you may use this file as part of a free software |
| 20 | library without restriction. Specifically, if other files instantiate |
| 21 | templates or use macros or inline functions from this file, or you compile |
| 22 | this file and link it with other files to produce an executable, this |
| 23 | file does not by itself cause the resulting executable to be covered by |
| 24 | the GNU General Public License. This exception does not however |
| 25 | invalidate any other reasons why the executable file might be covered by |
| 26 | the GNU General Public License. |
| 27 | */ |
| 28 | |
| 29 | #ifndef __TBB_mutex_padding_H |
| 30 | #define __TBB_mutex_padding_H |
| 31 | |
| 32 | // wrapper for padding mutexes to be alone on a cache line, without requiring they be allocated |
| 33 | // from a pool. Because we allow them to be defined anywhere they must be two cache lines in size. |
| 34 | |
| 35 | namespace tbb { |
| 36 | namespace interface7 { |
| 37 | namespace internal { |
| 38 | |
| 39 | static const size_t cache_line_size = 64; |
| 40 | |
| 41 | // Pad a mutex to occupy a number of full cache lines sufficient to avoid false sharing |
| 42 | // with other data; space overhead is up to 2*cache_line_size-1. |
| 43 | template<typename Mutex> |
| 44 | class padded_mutex { |
| 45 | typedef long pad_type; |
| 46 | pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)]; |
| 47 | |
| 48 | Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);} |
| 49 | |
| 50 | public: |
| 51 | static const bool is_rw_mutex = Mutex::is_rw_mutex; |
| 52 | static const bool is_recursive_mutex = Mutex::is_recursive_mutex; |
| 53 | static const bool is_fair_mutex = Mutex::is_fair_mutex; |
| 54 | |
| 55 | padded_mutex() { new(impl()) Mutex(); } |
| 56 | ~padded_mutex() { impl()->~Mutex(); } |
| 57 | |
| 58 | //! Represents acquisition of a mutex. |
| 59 | class scoped_lock : tbb::internal::no_copy { |
| 60 | typename Mutex::scoped_lock my_scoped_lock; |
| 61 | public: |
| 62 | scoped_lock() : my_scoped_lock() {} |
| 63 | scoped_lock( padded_mutex& m ) : my_scoped_lock(*m.impl()) { } |
| 64 | ~scoped_lock() { } |
| 65 | |
| 66 | void acquire( padded_mutex& m ) { my_scoped_lock.acquire(*m.impl()); } |
| 67 | bool try_acquire( padded_mutex& m ) { return my_scoped_lock.try_acquire(*m.impl()); } |
| 68 | void release() { my_scoped_lock.release(); } |
| 69 | }; |
| 70 | }; |
| 71 | |
| 72 | } // namespace internal |
| 73 | } // namespace interface7 |
| 74 | } // namespace tbb |
| 75 | |
| 76 | #endif /* __TBB_mutex_padding_H */ |
| 77 | |