1/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; version 2 of the License.
6
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
11
12 You should have received a copy of the GNU General Public License
13 along with this program; if not, write to the Free Software Foundation,
14 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
15
16/**
17 @file storage/perfschema/pfs_events_waits.cc
18 Events waits data structures (implementation).
19*/
20
21#include "my_global.h"
22#include "my_sys.h"
23#include "pfs_global.h"
24#include "pfs_instr_class.h"
25#include "pfs_instr.h"
26#include "pfs_user.h"
27#include "pfs_host.h"
28#include "pfs_account.h"
29#include "pfs_events_waits.h"
30#include "pfs_atomic.h"
31#include "m_string.h"
32
33ulong events_waits_history_long_size= 0;
34/** Consumer flag for table EVENTS_WAITS_CURRENT. */
35bool flag_events_waits_current= false;
36/** Consumer flag for table EVENTS_WAITS_HISTORY. */
37bool flag_events_waits_history= false;
38/** Consumer flag for table EVENTS_WAITS_HISTORY_LONG. */
39bool flag_events_waits_history_long= false;
40/** Consumer flag for the global instrumentation. */
41bool flag_global_instrumentation= false;
42/** Consumer flag for the per thread instrumentation. */
43bool flag_thread_instrumentation= false;
44
45/** True if EVENTS_WAITS_HISTORY_LONG circular buffer is full. */
46bool events_waits_history_long_full= false;
47/** Index in EVENTS_WAITS_HISTORY_LONG circular buffer. */
48volatile uint32 events_waits_history_long_index= 0;
49/** EVENTS_WAITS_HISTORY_LONG circular buffer. */
50PFS_events_waits *events_waits_history_long_array= NULL;
51
52/**
53 Initialize table EVENTS_WAITS_HISTORY_LONG.
54 @param events_waits_history_long_sizing table sizing
55*/
56int init_events_waits_history_long(uint events_waits_history_long_sizing)
57{
58 events_waits_history_long_size= events_waits_history_long_sizing;
59 events_waits_history_long_full= false;
60 PFS_atomic::store_u32(&events_waits_history_long_index, 0);
61
62 if (events_waits_history_long_size == 0)
63 return 0;
64
65 events_waits_history_long_array=
66 PFS_MALLOC_ARRAY(events_waits_history_long_size, sizeof(PFS_events_waits),
67 PFS_events_waits, MYF(MY_ZEROFILL));
68
69 return (events_waits_history_long_array ? 0 : 1);
70}
71
72/** Cleanup table EVENTS_WAITS_HISTORY_LONG. */
73void cleanup_events_waits_history_long(void)
74{
75 pfs_free(events_waits_history_long_array);
76 events_waits_history_long_array= NULL;
77}
78
79static inline void copy_events_waits(PFS_events_waits *dest,
80 const PFS_events_waits *source)
81{
82 memcpy(dest, source, sizeof(PFS_events_waits));
83}
84
85/**
86 Insert a wait record in table EVENTS_WAITS_HISTORY.
87 @param thread thread that executed the wait
88 @param wait record to insert
89*/
90void insert_events_waits_history(PFS_thread *thread, PFS_events_waits *wait)
91{
92 if (unlikely(events_waits_history_per_thread == 0))
93 return;
94
95 uint index= thread->m_waits_history_index;
96
97 /*
98 A concurrent thread executing TRUNCATE TABLE EVENTS_WAITS_CURRENT
99 could alter the data that this thread is inserting,
100 causing a potential race condition.
101 We are not testing for this and insert a possibly empty record,
102 to make this thread (the writer) faster.
103 This is ok, the readers of m_waits_history will filter this out.
104 */
105 copy_events_waits(&thread->m_waits_history[index], wait);
106
107 index++;
108 if (index >= events_waits_history_per_thread)
109 {
110 index= 0;
111 thread->m_waits_history_full= true;
112 }
113 thread->m_waits_history_index= index;
114}
115
116/**
117 Insert a wait record in table EVENTS_WAITS_HISTORY_LONG.
118 @param wait record to insert
119*/
120void insert_events_waits_history_long(PFS_events_waits *wait)
121{
122 if (unlikely(events_waits_history_long_size == 0))
123 return;
124
125 uint index= PFS_atomic::add_u32(&events_waits_history_long_index, 1);
126
127 index= index % events_waits_history_long_size;
128 if (index == 0)
129 events_waits_history_long_full= true;
130
131 /* See related comment in insert_events_waits_history. */
132 copy_events_waits(&events_waits_history_long_array[index], wait);
133}
134
135/** Reset table EVENTS_WAITS_CURRENT data. */
136void reset_events_waits_current(void)
137{
138 PFS_thread *pfs_thread= thread_array;
139 PFS_thread *pfs_thread_last= thread_array + thread_max;
140
141 for ( ; pfs_thread < pfs_thread_last; pfs_thread++)
142 {
143 PFS_events_waits *pfs_wait= pfs_thread->m_events_waits_stack;
144 PFS_events_waits *pfs_wait_last= pfs_wait + WAIT_STACK_SIZE;
145
146 for ( ; pfs_wait < pfs_wait_last; pfs_wait++)
147 pfs_wait->m_wait_class= NO_WAIT_CLASS;
148 }
149}
150
151/** Reset table EVENTS_WAITS_HISTORY data. */
152void reset_events_waits_history(void)
153{
154 PFS_thread *pfs_thread= thread_array;
155 PFS_thread *pfs_thread_last= thread_array + thread_max;
156
157 for ( ; pfs_thread < pfs_thread_last; pfs_thread++)
158 {
159 PFS_events_waits *wait= pfs_thread->m_waits_history;
160 PFS_events_waits *wait_last= wait + events_waits_history_per_thread;
161
162 pfs_thread->m_waits_history_index= 0;
163 pfs_thread->m_waits_history_full= false;
164 for ( ; wait < wait_last; wait++)
165 wait->m_wait_class= NO_WAIT_CLASS;
166 }
167}
168
169/** Reset table EVENTS_WAITS_HISTORY_LONG data. */
170void reset_events_waits_history_long(void)
171{
172 PFS_atomic::store_u32(&events_waits_history_long_index, 0);
173 events_waits_history_long_full= false;
174
175 PFS_events_waits *wait= events_waits_history_long_array;
176 PFS_events_waits *wait_last= wait + events_waits_history_long_size;
177 for ( ; wait < wait_last; wait++)
178 wait->m_wait_class= NO_WAIT_CLASS;
179}
180
181/** Reset table EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME data. */
182void reset_events_waits_by_thread()
183{
184 PFS_thread *thread= thread_array;
185 PFS_thread *thread_last= thread_array + thread_max;
186 PFS_account *account;
187 PFS_user *user;
188 PFS_host *host;
189
190 for ( ; thread < thread_last; thread++)
191 {
192 if (thread->m_lock.is_populated())
193 {
194 account= sanitize_account(thread->m_account);
195 user= sanitize_user(thread->m_user);
196 host= sanitize_host(thread->m_host);
197 aggregate_thread_waits(thread, account, user, host);
198 }
199 }
200}
201
202/** Reset table EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME data. */
203void reset_events_waits_by_account()
204{
205 PFS_account *pfs= account_array;
206 PFS_account *pfs_last= account_array + account_max;
207 PFS_user *user;
208 PFS_host *host;
209
210 for ( ; pfs < pfs_last; pfs++)
211 {
212 if (pfs->m_lock.is_populated())
213 {
214 user= sanitize_user(pfs->m_user);
215 host= sanitize_host(pfs->m_host);
216 pfs->aggregate_waits(user, host);
217 }
218 }
219}
220
221/** Reset table EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME data. */
222void reset_events_waits_by_user()
223{
224 PFS_user *pfs= user_array;
225 PFS_user *pfs_last= user_array + user_max;
226
227 for ( ; pfs < pfs_last; pfs++)
228 {
229 if (pfs->m_lock.is_populated())
230 pfs->aggregate_waits();
231 }
232}
233
234/** Reset table EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME data. */
235void reset_events_waits_by_host()
236{
237 PFS_host *pfs= host_array;
238 PFS_host *pfs_last= host_array + host_max;
239
240 for ( ; pfs < pfs_last; pfs++)
241 {
242 if (pfs->m_lock.is_populated())
243 pfs->aggregate_waits();
244 }
245}
246
247void reset_table_waits_by_table()
248{
249 PFS_table_share *pfs= table_share_array;
250 PFS_table_share *pfs_last= pfs + table_share_max;
251
252 for ( ; pfs < pfs_last; pfs++)
253 {
254 if (pfs->m_lock.is_populated())
255 pfs->aggregate();
256 }
257}
258
259void reset_table_io_waits_by_table()
260{
261 PFS_table_share *pfs= table_share_array;
262 PFS_table_share *pfs_last= pfs + table_share_max;
263
264 for ( ; pfs < pfs_last; pfs++)
265 {
266 if (pfs->m_lock.is_populated())
267 pfs->aggregate_io();
268 }
269}
270
271void reset_table_lock_waits_by_table()
272{
273 PFS_table_share *pfs= table_share_array;
274 PFS_table_share *pfs_last= pfs + table_share_max;
275
276 for ( ; pfs < pfs_last; pfs++)
277 {
278 if (pfs->m_lock.is_populated())
279 pfs->aggregate_lock();
280 }
281}
282
283void reset_table_waits_by_table_handle()
284{
285 PFS_table *pfs= table_array;
286 PFS_table *pfs_last= pfs + table_max;
287
288 for ( ; pfs < pfs_last; pfs++)
289 {
290 if (pfs->m_lock.is_populated())
291 pfs->sanitized_aggregate();
292 }
293}
294
295void reset_table_io_waits_by_table_handle()
296{
297 PFS_table *pfs= table_array;
298 PFS_table *pfs_last= pfs + table_max;
299
300 for ( ; pfs < pfs_last; pfs++)
301 {
302 if (pfs->m_lock.is_populated())
303 pfs->sanitized_aggregate_io();
304 }
305}
306
307void reset_table_lock_waits_by_table_handle()
308{
309 PFS_table *pfs= table_array;
310 PFS_table *pfs_last= pfs + table_max;
311
312 for ( ; pfs < pfs_last; pfs++)
313 {
314 if (pfs->m_lock.is_populated())
315 pfs->sanitized_aggregate_lock();
316 }
317}
318
319