1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef _TBB_observer_proxy_H
18#define _TBB_observer_proxy_H
19
20#if __TBB_SCHEDULER_OBSERVER
21
22#include "scheduler_common.h" // to include task.h
23#include "tbb/task_scheduler_observer.h"
24#include "tbb/spin_rw_mutex.h"
25#include "tbb/aligned_space.h"
26
27namespace tbb {
28namespace internal {
29
30class observer_list {
31 friend class arena;
32
33 // Mutex is wrapped with aligned_space to shut up warnings when its destructor
34 // is called while threads are still using it.
35 typedef aligned_space<spin_rw_mutex> my_mutex_type;
36
37 //! Pointer to the head of this list.
38 observer_proxy* my_head;
39
40 //! Pointer to the tail of this list.
41 observer_proxy* my_tail;
42
43 //! Mutex protecting this list.
44 my_mutex_type my_mutex;
45
46 //! Back-pointer to the arena this list belongs to.
47 arena* my_arena;
48
49 //! Decrement refcount of the proxy p if there are other outstanding references.
50 /** In case of success sets p to NULL. Must be invoked from under the list lock. **/
51 inline static void remove_ref_fast( observer_proxy*& p );
52
53 //! Implements notify_entry_observers functionality.
54 void do_notify_entry_observers( observer_proxy*& last, bool worker );
55
56 //! Implements notify_exit_observers functionality.
57 void do_notify_exit_observers( observer_proxy* last, bool worker );
58
59public:
60 observer_list () : my_head(NULL), my_tail(NULL) {}
61
62 //! Removes and destroys all observer proxies from the list.
63 /** Cannot be used concurrently with other methods. **/
64 void clear ();
65
66 //! Add observer proxy to the tail of the list.
67 void insert ( observer_proxy* p );
68
69 //! Remove observer proxy from the list.
70 void remove ( observer_proxy* p );
71
72 //! Decrement refcount of the proxy and destroy it if necessary.
73 /** When refcount reaches zero removes the proxy from the list and destructs it. **/
74 void remove_ref( observer_proxy* p );
75
76 //! Type of the scoped lock for the reader-writer mutex associated with the list.
77 typedef spin_rw_mutex::scoped_lock scoped_lock;
78
79 //! Accessor to the reader-writer mutex associated with the list.
80 spin_rw_mutex& mutex () { return my_mutex.begin()[0]; }
81
82 bool empty () const { return my_head == NULL; }
83
84 //! Call entry notifications on observers added after last was notified.
85 /** Updates last to become the last notified observer proxy (in the global list)
86 or leaves it to be NULL. The proxy has its refcount incremented. **/
87 inline void notify_entry_observers( observer_proxy*& last, bool worker );
88
89 //! Call exit notifications on last and observers added before it.
90 inline void notify_exit_observers( observer_proxy*& last, bool worker );
91}; // class observer_list
92
93//! Wrapper for an observer object
94/** To maintain shared lists of observers the scheduler first wraps each observer
95 object into a proxy so that a list item remained valid even after the corresponding
96 proxy object is destroyed by the user code. **/
97class observer_proxy {
98 friend class task_scheduler_observer_v3;
99 friend class observer_list;
100 //! Reference count used for garbage collection.
101 /** 1 for reference from my task_scheduler_observer.
102 1 for each task dispatcher's last observer pointer.
103 No accounting for neighbors in the shared list. */
104 atomic<int> my_ref_count;
105 //! Reference to the list this observer belongs to.
106 observer_list* my_list;
107 //! Pointer to next observer in the list specified by my_head.
108 /** NULL for the last item in the list. **/
109 observer_proxy* my_next;
110 //! Pointer to the previous observer in the list specified by my_head.
111 /** For the head of the list points to the last item. **/
112 observer_proxy* my_prev;
113 //! Associated observer
114 task_scheduler_observer_v3* my_observer;
115 //! Version
116 char my_version;
117
118#if __TBB_ARENA_OBSERVER
119 interface6::task_scheduler_observer* get_v6_observer();
120#endif
121#if __TBB_ARENA_OBSERVER
122 bool is_global(); //TODO: move them back inline when un-CPF'ing
123#endif
124
125 //! Constructs proxy for the given observer and adds it to the specified list.
126 observer_proxy( task_scheduler_observer_v3& );
127
128#if TBB_USE_ASSERT
129 ~observer_proxy();
130#endif /* TBB_USE_ASSERT */
131
132 //! Shut up the warning
133 observer_proxy& operator = ( const observer_proxy& );
134}; // class observer_proxy
135
136inline void observer_list::remove_ref_fast( observer_proxy*& p ) {
137 if( p->my_observer ) {
138 // Can decrement refcount quickly, as it cannot drop to zero while under the lock.
139 int r = --p->my_ref_count;
140 __TBB_ASSERT_EX( r, NULL );
141 p = NULL;
142 } else {
143 // Use slow form of refcount decrementing, after the lock is released.
144 }
145}
146
147inline void observer_list::notify_entry_observers( observer_proxy*& last, bool worker ) {
148 if ( last == my_tail )
149 return;
150 do_notify_entry_observers( last, worker );
151}
152
153inline void observer_list::notify_exit_observers( observer_proxy*& last, bool worker ) {
154 if ( !last )
155 return;
156 __TBB_ASSERT(is_alive((uintptr_t)last), NULL);
157 do_notify_exit_observers( last, worker );
158 __TBB_ASSERT(last, NULL);
159 poison_value(last);
160}
161
162extern padded<observer_list> the_global_observer_list;
163
164} // namespace internal
165} // namespace tbb
166
167#endif /* __TBB_SCHEDULER_OBSERVER */
168
169#endif /* _TBB_observer_proxy_H */
170