1/* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; version 2 of the License.
6
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
11
12 You should have received a copy of the GNU General Public License
13 along with this program; if not, write to the Free Software Foundation,
14 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
15
16/**
17 @file storage/perfschema/pfs_events_stages.cc
18 Events stages data structures (implementation).
19*/
20
21#include "my_global.h"
22#include "my_sys.h"
23#include "pfs_global.h"
24#include "pfs_instr_class.h"
25#include "pfs_instr.h"
26#include "pfs_account.h"
27#include "pfs_host.h"
28#include "pfs_user.h"
29#include "pfs_events_stages.h"
30#include "pfs_atomic.h"
31#include "m_string.h"
32
33ulong events_stages_history_long_size= 0;
34/** Consumer flag for table EVENTS_STAGES_CURRENT. */
35bool flag_events_stages_current= false;
36/** Consumer flag for table EVENTS_STAGES_HISTORY. */
37bool flag_events_stages_history= false;
38/** Consumer flag for table EVENTS_STAGES_HISTORY_LONG. */
39bool flag_events_stages_history_long= false;
40
41/** True if EVENTS_STAGES_HISTORY_LONG circular buffer is full. */
42bool events_stages_history_long_full= false;
43/** Index in EVENTS_STAGES_HISTORY_LONG circular buffer. */
44volatile uint32 events_stages_history_long_index= 0;
45/** EVENTS_STAGES_HISTORY_LONG circular buffer. */
46PFS_events_stages *events_stages_history_long_array= NULL;
47
48/**
49 Initialize table EVENTS_STAGES_HISTORY_LONG.
50 @param events_stages_history_long_sizing table sizing
51*/
52int init_events_stages_history_long(uint events_stages_history_long_sizing)
53{
54 events_stages_history_long_size= events_stages_history_long_sizing;
55 events_stages_history_long_full= false;
56 PFS_atomic::store_u32(&events_stages_history_long_index, 0);
57
58 if (events_stages_history_long_size == 0)
59 return 0;
60
61 events_stages_history_long_array=
62 PFS_MALLOC_ARRAY(events_stages_history_long_size, sizeof(PFS_events_stages),
63 PFS_events_stages, MYF(MY_ZEROFILL));
64
65 return (events_stages_history_long_array ? 0 : 1);
66}
67
68/** Cleanup table EVENTS_STAGES_HISTORY_LONG. */
69void cleanup_events_stages_history_long(void)
70{
71 pfs_free(events_stages_history_long_array);
72 events_stages_history_long_array= NULL;
73}
74
75static inline void copy_events_stages(PFS_events_stages *dest,
76 const PFS_events_stages *source)
77{
78 memcpy(dest, source, sizeof(PFS_events_stages));
79}
80
81/**
82 Insert a stage record in table EVENTS_STAGES_HISTORY.
83 @param thread thread that executed the wait
84 @param stage record to insert
85*/
86void insert_events_stages_history(PFS_thread *thread, PFS_events_stages *stage)
87{
88 if (unlikely(events_stages_history_per_thread == 0))
89 return;
90
91 DBUG_ASSERT(thread->m_stages_history != NULL);
92
93 uint index= thread->m_stages_history_index;
94
95 /*
96 A concurrent thread executing TRUNCATE TABLE EVENTS_STAGES_CURRENT
97 could alter the data that this thread is inserting,
98 causing a potential race condition.
99 We are not testing for this and insert a possibly empty record,
100 to make this thread (the writer) faster.
101 This is ok, the readers of m_stages_history will filter this out.
102 */
103 copy_events_stages(&thread->m_stages_history[index], stage);
104
105 index++;
106 if (index >= events_stages_history_per_thread)
107 {
108 index= 0;
109 thread->m_stages_history_full= true;
110 }
111 thread->m_stages_history_index= index;
112}
113
114/**
115 Insert a stage record in table EVENTS_STAGES_HISTORY_LONG.
116 @param stage record to insert
117*/
118void insert_events_stages_history_long(PFS_events_stages *stage)
119{
120 if (unlikely(events_stages_history_long_size == 0))
121 return;
122
123 DBUG_ASSERT(events_stages_history_long_array != NULL);
124
125 uint index= PFS_atomic::add_u32(&events_stages_history_long_index, 1);
126
127 index= index % events_stages_history_long_size;
128 if (index == 0)
129 events_stages_history_long_full= true;
130
131 /* See related comment in insert_events_stages_history. */
132 copy_events_stages(&events_stages_history_long_array[index], stage);
133}
134
135/** Reset table EVENTS_STAGES_CURRENT data. */
136void reset_events_stages_current(void)
137{
138 PFS_thread *pfs_thread= thread_array;
139 PFS_thread *pfs_thread_last= thread_array + thread_max;
140
141 for ( ; pfs_thread < pfs_thread_last; pfs_thread++)
142 {
143 pfs_thread->m_stage_current.m_class= NULL;
144 }
145}
146
147/** Reset table EVENTS_STAGES_HISTORY data. */
148void reset_events_stages_history(void)
149{
150 PFS_thread *pfs_thread= thread_array;
151 PFS_thread *pfs_thread_last= thread_array + thread_max;
152
153 for ( ; pfs_thread < pfs_thread_last; pfs_thread++)
154 {
155 PFS_events_stages *pfs= pfs_thread->m_stages_history;
156 PFS_events_stages *pfs_last= pfs + events_stages_history_per_thread;
157
158 pfs_thread->m_stages_history_index= 0;
159 pfs_thread->m_stages_history_full= false;
160 for ( ; pfs < pfs_last; pfs++)
161 pfs->m_class= NULL;
162 }
163}
164
165/** Reset table EVENTS_STAGES_HISTORY_LONG data. */
166void reset_events_stages_history_long(void)
167{
168 PFS_atomic::store_u32(&events_stages_history_long_index, 0);
169 events_stages_history_long_full= false;
170
171 PFS_events_stages *pfs= events_stages_history_long_array;
172 PFS_events_stages *pfs_last= pfs + events_stages_history_long_size;
173 for ( ; pfs < pfs_last; pfs++)
174 pfs->m_class= NULL;
175}
176
177/** Reset table EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME data. */
178void reset_events_stages_by_thread()
179{
180 PFS_thread *thread= thread_array;
181 PFS_thread *thread_last= thread_array + thread_max;
182 PFS_account *account;
183 PFS_user *user;
184 PFS_host *host;
185
186 for ( ; thread < thread_last; thread++)
187 {
188 if (thread->m_lock.is_populated())
189 {
190 account= sanitize_account(thread->m_account);
191 user= sanitize_user(thread->m_user);
192 host= sanitize_host(thread->m_host);
193 aggregate_thread_stages(thread, account, user, host);
194 }
195 }
196}
197
198/** Reset table EVENTS_STAGES_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME data. */
199void reset_events_stages_by_account()
200{
201 PFS_account *pfs= account_array;
202 PFS_account *pfs_last= account_array + account_max;
203 PFS_user *user;
204 PFS_host *host;
205
206 for ( ; pfs < pfs_last; pfs++)
207 {
208 if (pfs->m_lock.is_populated())
209 {
210 user= sanitize_user(pfs->m_user);
211 host= sanitize_host(pfs->m_host);
212 pfs->aggregate_stages(user, host);
213 }
214 }
215}
216
217/** Reset table EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME data. */
218void reset_events_stages_by_user()
219{
220 PFS_user *pfs= user_array;
221 PFS_user *pfs_last= user_array + user_max;
222
223 for ( ; pfs < pfs_last; pfs++)
224 {
225 if (pfs->m_lock.is_populated())
226 pfs->aggregate_stages();
227 }
228}
229
230/** Reset table EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME data. */
231void reset_events_stages_by_host()
232{
233 PFS_host *pfs= host_array;
234 PFS_host *pfs_last= host_array + host_max;
235
236 for ( ; pfs < pfs_last; pfs++)
237 {
238 if (pfs->m_lock.is_populated())
239 pfs->aggregate_stages();
240 }
241}
242
243/** Reset table EVENTS_STAGES_GLOBAL_BY_EVENT_NAME data. */
244void reset_events_stages_global()
245{
246 PFS_stage_stat *stat= global_instr_class_stages_array;
247 PFS_stage_stat *stat_last= global_instr_class_stages_array + stage_class_max;
248
249 for ( ; stat < stat_last; stat++)
250 stat->reset();
251}
252
253