1 | /* |
2 | Copyright (c) 2005-2019 Intel Corporation |
3 | |
4 | Licensed under the Apache License, Version 2.0 (the "License"); |
5 | you may not use this file except in compliance with the License. |
6 | You may obtain a copy of the License at |
7 | |
8 | http://www.apache.org/licenses/LICENSE-2.0 |
9 | |
10 | Unless required by applicable law or agreed to in writing, software |
11 | distributed under the License is distributed on an "AS IS" BASIS, |
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | See the License for the specific language governing permissions and |
14 | limitations under the License. |
15 | */ |
16 | |
17 | #ifndef _TBB_mailbox_H |
18 | #define _TBB_mailbox_H |
19 | |
20 | #include "tbb/tbb_stddef.h" |
21 | #include "tbb/cache_aligned_allocator.h" |
22 | |
23 | #include "scheduler_common.h" |
24 | #include "tbb/atomic.h" |
25 | |
26 | namespace tbb { |
27 | namespace internal { |
28 | |
29 | struct task_proxy : public task { |
30 | static const intptr_t pool_bit = 1<<0; |
31 | static const intptr_t mailbox_bit = 1<<1; |
32 | static const intptr_t location_mask = pool_bit | mailbox_bit; |
33 | /* All but two low-order bits represent a (task*). |
34 | Two low-order bits mean: |
35 | 1 = proxy is/was/will be in task pool |
36 | 2 = proxy is/was/will be in mailbox */ |
37 | intptr_t task_and_tag; |
38 | |
39 | //! Pointer to next task_proxy in a mailbox |
40 | task_proxy *__TBB_atomic next_in_mailbox; |
41 | |
42 | //! Mailbox to which this was mailed. |
43 | mail_outbox* outbox; |
44 | |
45 | //! True if the proxy is stored both in its sender's pool and in the destination mailbox. |
46 | static bool is_shared ( intptr_t tat ) { |
47 | return (tat & location_mask) == location_mask; |
48 | } |
49 | |
50 | //! Returns a pointer to the encapsulated task or NULL. |
51 | static task* task_ptr ( intptr_t tat ) { |
52 | return (task*)(tat & ~location_mask); |
53 | } |
54 | |
55 | //! Returns a pointer to the encapsulated task or NULL, and frees proxy if necessary. |
56 | template<intptr_t from_bit> |
57 | inline task* () { |
58 | __TBB_ASSERT( prefix().extra_state == es_task_proxy, "Normal task misinterpreted as a proxy?" ); |
59 | intptr_t tat = __TBB_load_with_acquire(task_and_tag); |
60 | __TBB_ASSERT( tat == from_bit || (is_shared(tat) && task_ptr(tat)), |
61 | "Proxy's tag cannot specify both locations if the proxy " |
62 | "was retrieved from one of its original locations" ); |
63 | if ( tat != from_bit ) { |
64 | const intptr_t cleaner_bit = location_mask & ~from_bit; |
65 | // Attempt to transition the proxy to the "empty" state with |
66 | // cleaner_bit specifying entity responsible for its eventual freeing. |
67 | // Explicit cast to void* is to work around a seeming ICC 11.1 bug. |
68 | if ( as_atomic(task_and_tag).compare_and_swap(cleaner_bit, tat) == tat ) { |
69 | // Successfully grabbed the task, and left new owner with the job of freeing the proxy |
70 | return task_ptr(tat); |
71 | } |
72 | } |
73 | // Proxied task has already been claimed from another proxy location. |
74 | __TBB_ASSERT( task_and_tag == from_bit, "Empty proxy cannot contain non-zero task pointer" ); |
75 | return NULL; |
76 | } |
77 | }; // struct task_proxy |
78 | |
79 | //! Internal representation of mail_outbox, without padding. |
80 | class unpadded_mail_outbox { |
81 | protected: |
82 | typedef task_proxy*__TBB_atomic proxy_ptr; |
83 | |
84 | //! Pointer to first task_proxy in mailbox, or NULL if box is empty. |
85 | proxy_ptr my_first; |
86 | |
87 | //! Pointer to pointer that will point to next item in the queue. Never NULL. |
88 | proxy_ptr* __TBB_atomic my_last; |
89 | |
90 | //! Owner of mailbox is not executing a task, and has drained its own task pool. |
91 | bool my_is_idle; |
92 | }; |
93 | |
94 | //! Class representing where mail is put. |
95 | /** Padded to occupy a cache line. */ |
96 | class mail_outbox : padded<unpadded_mail_outbox> { |
97 | |
98 | task_proxy* internal_pop( __TBB_ISOLATION_EXPR(isolation_tag isolation) ) { |
99 | task_proxy* curr = __TBB_load_relaxed( my_first ); |
100 | if ( !curr ) |
101 | return NULL; |
102 | task_proxy **prev_ptr = &my_first; |
103 | #if __TBB_TASK_ISOLATION |
104 | if ( isolation != no_isolation ) { |
105 | while ( curr->prefix().isolation != isolation ) { |
106 | prev_ptr = &curr->next_in_mailbox; |
107 | curr = curr->next_in_mailbox; |
108 | if ( !curr ) |
109 | return NULL; |
110 | } |
111 | } |
112 | #endif /* __TBB_TASK_ISOLATION */ |
113 | __TBB_control_consistency_helper(); // on my_first |
114 | // There is a first item in the mailbox. See if there is a second. |
115 | if ( task_proxy* second = curr->next_in_mailbox ) { |
116 | // There are at least two items, so first item can be popped easily. |
117 | *prev_ptr = second; |
118 | } else { |
119 | // There is only one item. Some care is required to pop it. |
120 | *prev_ptr = NULL; |
121 | if ( as_atomic( my_last ).compare_and_swap( prev_ptr, &curr->next_in_mailbox ) == &curr->next_in_mailbox ) { |
122 | // Successfully transitioned mailbox from having one item to having none. |
123 | __TBB_ASSERT( !curr->next_in_mailbox, NULL ); |
124 | } else { |
125 | // Some other thread updated my_last but has not filled in first->next_in_mailbox |
126 | // Wait until first item points to second item. |
127 | atomic_backoff backoff; |
128 | while ( !(second = curr->next_in_mailbox) ) backoff.pause(); |
129 | *prev_ptr = second; |
130 | } |
131 | } |
132 | __TBB_ASSERT( curr, NULL ); |
133 | return curr; |
134 | } |
135 | public: |
136 | friend class mail_inbox; |
137 | |
138 | //! Push task_proxy onto the mailbox queue of another thread. |
139 | /** Implementation is wait-free. */ |
140 | void push( task_proxy* t ) { |
141 | __TBB_ASSERT(t, NULL); |
142 | t->next_in_mailbox = NULL; |
143 | proxy_ptr * const link = (proxy_ptr *)__TBB_FetchAndStoreW(&my_last,(intptr_t)&t->next_in_mailbox); |
144 | // No release fence required for the next store, because there are no memory operations |
145 | // between the previous fully fenced atomic operation and the store. |
146 | __TBB_store_relaxed(*link, t); |
147 | } |
148 | |
149 | //! Return true if mailbox is empty |
150 | bool empty() { |
151 | return __TBB_load_relaxed(my_first) == NULL; |
152 | } |
153 | |
154 | //! Construct *this as a mailbox from zeroed memory. |
155 | /** Raise assertion if *this is not previously zeroed, or sizeof(this) is wrong. |
156 | This method is provided instead of a full constructor since we know the object |
157 | will be constructed in zeroed memory. */ |
158 | void construct() { |
159 | __TBB_ASSERT( sizeof(*this)==NFS_MaxLineSize, NULL ); |
160 | __TBB_ASSERT( !my_first, NULL ); |
161 | __TBB_ASSERT( !my_last, NULL ); |
162 | __TBB_ASSERT( !my_is_idle, NULL ); |
163 | my_last=&my_first; |
164 | suppress_unused_warning(pad); |
165 | } |
166 | |
167 | //! Drain the mailbox |
168 | intptr_t drain() { |
169 | intptr_t k = 0; |
170 | // No fences here because other threads have already quit. |
171 | for( ; task_proxy* t = my_first; ++k ) { |
172 | my_first = t->next_in_mailbox; |
173 | NFS_Free((char*)t - task_prefix_reservation_size); |
174 | } |
175 | return k; |
176 | } |
177 | |
178 | //! True if thread that owns this mailbox is looking for work. |
179 | bool recipient_is_idle() { |
180 | return my_is_idle; |
181 | } |
182 | }; // class mail_outbox |
183 | |
184 | //! Class representing source of mail. |
185 | class mail_inbox { |
186 | //! Corresponding sink where mail that we receive will be put. |
187 | mail_outbox* my_putter; |
188 | public: |
189 | //! Construct unattached inbox |
190 | mail_inbox() : my_putter(NULL) {} |
191 | |
192 | //! Attach inbox to a corresponding outbox. |
193 | void attach( mail_outbox& putter ) { |
194 | my_putter = &putter; |
195 | } |
196 | //! Detach inbox from its outbox |
197 | void detach() { |
198 | __TBB_ASSERT(my_putter,"not attached" ); |
199 | my_putter = NULL; |
200 | } |
201 | //! Get next piece of mail, or NULL if mailbox is empty. |
202 | task_proxy* pop( __TBB_ISOLATION_EXPR( isolation_tag isolation ) ) { |
203 | return my_putter->internal_pop( __TBB_ISOLATION_EXPR( isolation ) ); |
204 | } |
205 | //! Return true if mailbox is empty |
206 | bool empty() { |
207 | return my_putter->empty(); |
208 | } |
209 | //! Indicate whether thread that reads this mailbox is idle. |
210 | /** Raises assertion failure if mailbox is redundantly marked as not idle. */ |
211 | void set_is_idle( bool value ) { |
212 | if( my_putter ) { |
213 | __TBB_ASSERT( my_putter->my_is_idle || value, "attempt to redundantly mark mailbox as not idle" ); |
214 | my_putter->my_is_idle = value; |
215 | } |
216 | } |
217 | //! Indicate whether thread that reads this mailbox is idle. |
218 | bool is_idle_state ( bool value ) const { |
219 | return !my_putter || my_putter->my_is_idle == value; |
220 | } |
221 | |
222 | #if DO_ITT_NOTIFY |
223 | //! Get pointer to corresponding outbox used for ITT_NOTIFY calls. |
224 | void* outbox() const {return my_putter;} |
225 | #endif /* DO_ITT_NOTIFY */ |
226 | }; // class mail_inbox |
227 | |
228 | } // namespace internal |
229 | } // namespace tbb |
230 | |
231 | #endif /* _TBB_mailbox_H */ |
232 | |