1 | /* This file is autogenerated by tracetool, do not edit. */ |
2 | |
3 | #ifndef TRACE_UTIL_GENERATED_TRACERS_H |
4 | #define TRACE_UTIL_GENERATED_TRACERS_H |
5 | |
6 | #include "trace/control.h" |
7 | |
8 | extern TraceEvent _TRACE_RUN_POLL_HANDLERS_BEGIN_EVENT; |
9 | extern TraceEvent _TRACE_RUN_POLL_HANDLERS_END_EVENT; |
10 | extern TraceEvent _TRACE_POLL_SHRINK_EVENT; |
11 | extern TraceEvent _TRACE_POLL_GROW_EVENT; |
12 | extern TraceEvent _TRACE_AIO_CO_SCHEDULE_EVENT; |
13 | extern TraceEvent _TRACE_AIO_CO_SCHEDULE_BH_CB_EVENT; |
14 | extern TraceEvent _TRACE_THREAD_POOL_SUBMIT_EVENT; |
15 | extern TraceEvent _TRACE_THREAD_POOL_COMPLETE_EVENT; |
16 | extern TraceEvent _TRACE_THREAD_POOL_CANCEL_EVENT; |
17 | extern TraceEvent _TRACE_BUFFER_RESIZE_EVENT; |
18 | extern TraceEvent _TRACE_BUFFER_MOVE_EMPTY_EVENT; |
19 | extern TraceEvent _TRACE_BUFFER_MOVE_EVENT; |
20 | extern TraceEvent _TRACE_BUFFER_FREE_EVENT; |
21 | extern TraceEvent _TRACE_QEMU_FILE_MONITOR_ADD_WATCH_EVENT; |
22 | extern TraceEvent _TRACE_QEMU_FILE_MONITOR_REMOVE_WATCH_EVENT; |
23 | extern TraceEvent _TRACE_QEMU_FILE_MONITOR_NEW_EVENT; |
24 | extern TraceEvent _TRACE_QEMU_FILE_MONITOR_ENABLE_WATCH_EVENT; |
25 | extern TraceEvent _TRACE_QEMU_FILE_MONITOR_DISABLE_WATCH_EVENT; |
26 | extern TraceEvent _TRACE_QEMU_FILE_MONITOR_EVENT_EVENT; |
27 | extern TraceEvent _TRACE_QEMU_FILE_MONITOR_DISPATCH_EVENT; |
28 | extern TraceEvent _TRACE_QEMU_AIO_COROUTINE_ENTER_EVENT; |
29 | extern TraceEvent _TRACE_QEMU_COROUTINE_YIELD_EVENT; |
30 | extern TraceEvent _TRACE_QEMU_COROUTINE_TERMINATE_EVENT; |
31 | extern TraceEvent _TRACE_QEMU_CO_MUTEX_LOCK_UNCONTENDED_EVENT; |
32 | extern TraceEvent _TRACE_QEMU_CO_MUTEX_LOCK_ENTRY_EVENT; |
33 | extern TraceEvent _TRACE_QEMU_CO_MUTEX_LOCK_RETURN_EVENT; |
34 | extern TraceEvent _TRACE_QEMU_CO_MUTEX_UNLOCK_ENTRY_EVENT; |
35 | extern TraceEvent _TRACE_QEMU_CO_MUTEX_UNLOCK_RETURN_EVENT; |
36 | extern TraceEvent _TRACE_QEMU_MEMALIGN_EVENT; |
37 | extern TraceEvent _TRACE_QEMU_ANON_RAM_ALLOC_EVENT; |
38 | extern TraceEvent _TRACE_QEMU_VFREE_EVENT; |
39 | extern TraceEvent _TRACE_QEMU_ANON_RAM_FREE_EVENT; |
40 | extern TraceEvent _TRACE_HBITMAP_ITER_SKIP_WORDS_EVENT; |
41 | extern TraceEvent _TRACE_HBITMAP_RESET_EVENT; |
42 | extern TraceEvent _TRACE_HBITMAP_SET_EVENT; |
43 | extern TraceEvent _TRACE_LOCKCNT_FAST_PATH_ATTEMPT_EVENT; |
44 | extern TraceEvent _TRACE_LOCKCNT_FAST_PATH_SUCCESS_EVENT; |
45 | extern TraceEvent _TRACE_LOCKCNT_UNLOCK_ATTEMPT_EVENT; |
46 | extern TraceEvent _TRACE_LOCKCNT_UNLOCK_SUCCESS_EVENT; |
47 | extern TraceEvent _TRACE_LOCKCNT_FUTEX_WAIT_PREPARE_EVENT; |
48 | extern TraceEvent _TRACE_LOCKCNT_FUTEX_WAIT_EVENT; |
49 | extern TraceEvent _TRACE_LOCKCNT_FUTEX_WAIT_RESUME_EVENT; |
50 | extern TraceEvent _TRACE_LOCKCNT_FUTEX_WAKE_EVENT; |
51 | extern TraceEvent _TRACE_SOCKET_LISTEN_EVENT; |
52 | extern TraceEvent _TRACE_QEMU_MUTEX_LOCK_EVENT; |
53 | extern TraceEvent _TRACE_QEMU_MUTEX_LOCKED_EVENT; |
54 | extern TraceEvent _TRACE_QEMU_MUTEX_UNLOCK_EVENT; |
55 | extern TraceEvent _TRACE_QEMU_VFIO_DMA_RESET_TEMPORARY_EVENT; |
56 | extern TraceEvent _TRACE_QEMU_VFIO_RAM_BLOCK_ADDED_EVENT; |
57 | extern TraceEvent _TRACE_QEMU_VFIO_RAM_BLOCK_REMOVED_EVENT; |
58 | extern TraceEvent _TRACE_QEMU_VFIO_FIND_MAPPING_EVENT; |
59 | extern TraceEvent _TRACE_QEMU_VFIO_NEW_MAPPING_EVENT; |
60 | extern TraceEvent _TRACE_QEMU_VFIO_DO_MAPPING_EVENT; |
61 | extern TraceEvent _TRACE_QEMU_VFIO_DMA_MAP_EVENT; |
62 | extern TraceEvent _TRACE_QEMU_VFIO_DMA_UNMAP_EVENT; |
63 | extern uint16_t _TRACE_RUN_POLL_HANDLERS_BEGIN_DSTATE; |
64 | extern uint16_t _TRACE_RUN_POLL_HANDLERS_END_DSTATE; |
65 | extern uint16_t _TRACE_POLL_SHRINK_DSTATE; |
66 | extern uint16_t _TRACE_POLL_GROW_DSTATE; |
67 | extern uint16_t _TRACE_AIO_CO_SCHEDULE_DSTATE; |
68 | extern uint16_t _TRACE_AIO_CO_SCHEDULE_BH_CB_DSTATE; |
69 | extern uint16_t _TRACE_THREAD_POOL_SUBMIT_DSTATE; |
70 | extern uint16_t _TRACE_THREAD_POOL_COMPLETE_DSTATE; |
71 | extern uint16_t _TRACE_THREAD_POOL_CANCEL_DSTATE; |
72 | extern uint16_t _TRACE_BUFFER_RESIZE_DSTATE; |
73 | extern uint16_t _TRACE_BUFFER_MOVE_EMPTY_DSTATE; |
74 | extern uint16_t _TRACE_BUFFER_MOVE_DSTATE; |
75 | extern uint16_t _TRACE_BUFFER_FREE_DSTATE; |
76 | extern uint16_t _TRACE_QEMU_FILE_MONITOR_ADD_WATCH_DSTATE; |
77 | extern uint16_t _TRACE_QEMU_FILE_MONITOR_REMOVE_WATCH_DSTATE; |
78 | extern uint16_t _TRACE_QEMU_FILE_MONITOR_NEW_DSTATE; |
79 | extern uint16_t _TRACE_QEMU_FILE_MONITOR_ENABLE_WATCH_DSTATE; |
80 | extern uint16_t _TRACE_QEMU_FILE_MONITOR_DISABLE_WATCH_DSTATE; |
81 | extern uint16_t _TRACE_QEMU_FILE_MONITOR_EVENT_DSTATE; |
82 | extern uint16_t _TRACE_QEMU_FILE_MONITOR_DISPATCH_DSTATE; |
83 | extern uint16_t _TRACE_QEMU_AIO_COROUTINE_ENTER_DSTATE; |
84 | extern uint16_t _TRACE_QEMU_COROUTINE_YIELD_DSTATE; |
85 | extern uint16_t _TRACE_QEMU_COROUTINE_TERMINATE_DSTATE; |
86 | extern uint16_t _TRACE_QEMU_CO_MUTEX_LOCK_UNCONTENDED_DSTATE; |
87 | extern uint16_t _TRACE_QEMU_CO_MUTEX_LOCK_ENTRY_DSTATE; |
88 | extern uint16_t _TRACE_QEMU_CO_MUTEX_LOCK_RETURN_DSTATE; |
89 | extern uint16_t _TRACE_QEMU_CO_MUTEX_UNLOCK_ENTRY_DSTATE; |
90 | extern uint16_t _TRACE_QEMU_CO_MUTEX_UNLOCK_RETURN_DSTATE; |
91 | extern uint16_t _TRACE_QEMU_MEMALIGN_DSTATE; |
92 | extern uint16_t _TRACE_QEMU_ANON_RAM_ALLOC_DSTATE; |
93 | extern uint16_t _TRACE_QEMU_VFREE_DSTATE; |
94 | extern uint16_t _TRACE_QEMU_ANON_RAM_FREE_DSTATE; |
95 | extern uint16_t _TRACE_HBITMAP_ITER_SKIP_WORDS_DSTATE; |
96 | extern uint16_t _TRACE_HBITMAP_RESET_DSTATE; |
97 | extern uint16_t _TRACE_HBITMAP_SET_DSTATE; |
98 | extern uint16_t _TRACE_LOCKCNT_FAST_PATH_ATTEMPT_DSTATE; |
99 | extern uint16_t _TRACE_LOCKCNT_FAST_PATH_SUCCESS_DSTATE; |
100 | extern uint16_t _TRACE_LOCKCNT_UNLOCK_ATTEMPT_DSTATE; |
101 | extern uint16_t _TRACE_LOCKCNT_UNLOCK_SUCCESS_DSTATE; |
102 | extern uint16_t _TRACE_LOCKCNT_FUTEX_WAIT_PREPARE_DSTATE; |
103 | extern uint16_t _TRACE_LOCKCNT_FUTEX_WAIT_DSTATE; |
104 | extern uint16_t _TRACE_LOCKCNT_FUTEX_WAIT_RESUME_DSTATE; |
105 | extern uint16_t _TRACE_LOCKCNT_FUTEX_WAKE_DSTATE; |
106 | extern uint16_t _TRACE_SOCKET_LISTEN_DSTATE; |
107 | extern uint16_t _TRACE_QEMU_MUTEX_LOCK_DSTATE; |
108 | extern uint16_t _TRACE_QEMU_MUTEX_LOCKED_DSTATE; |
109 | extern uint16_t _TRACE_QEMU_MUTEX_UNLOCK_DSTATE; |
110 | extern uint16_t _TRACE_QEMU_VFIO_DMA_RESET_TEMPORARY_DSTATE; |
111 | extern uint16_t _TRACE_QEMU_VFIO_RAM_BLOCK_ADDED_DSTATE; |
112 | extern uint16_t _TRACE_QEMU_VFIO_RAM_BLOCK_REMOVED_DSTATE; |
113 | extern uint16_t _TRACE_QEMU_VFIO_FIND_MAPPING_DSTATE; |
114 | extern uint16_t _TRACE_QEMU_VFIO_NEW_MAPPING_DSTATE; |
115 | extern uint16_t _TRACE_QEMU_VFIO_DO_MAPPING_DSTATE; |
116 | extern uint16_t _TRACE_QEMU_VFIO_DMA_MAP_DSTATE; |
117 | extern uint16_t _TRACE_QEMU_VFIO_DMA_UNMAP_DSTATE; |
118 | #define TRACE_RUN_POLL_HANDLERS_BEGIN_ENABLED 1 |
119 | #define TRACE_RUN_POLL_HANDLERS_END_ENABLED 1 |
120 | #define TRACE_POLL_SHRINK_ENABLED 1 |
121 | #define TRACE_POLL_GROW_ENABLED 1 |
122 | #define TRACE_AIO_CO_SCHEDULE_ENABLED 1 |
123 | #define TRACE_AIO_CO_SCHEDULE_BH_CB_ENABLED 1 |
124 | #define TRACE_THREAD_POOL_SUBMIT_ENABLED 1 |
125 | #define TRACE_THREAD_POOL_COMPLETE_ENABLED 1 |
126 | #define TRACE_THREAD_POOL_CANCEL_ENABLED 1 |
127 | #define TRACE_BUFFER_RESIZE_ENABLED 1 |
128 | #define TRACE_BUFFER_MOVE_EMPTY_ENABLED 1 |
129 | #define TRACE_BUFFER_MOVE_ENABLED 1 |
130 | #define TRACE_BUFFER_FREE_ENABLED 1 |
131 | #define TRACE_QEMU_FILE_MONITOR_ADD_WATCH_ENABLED 1 |
132 | #define TRACE_QEMU_FILE_MONITOR_REMOVE_WATCH_ENABLED 1 |
133 | #define TRACE_QEMU_FILE_MONITOR_NEW_ENABLED 1 |
134 | #define TRACE_QEMU_FILE_MONITOR_ENABLE_WATCH_ENABLED 1 |
135 | #define TRACE_QEMU_FILE_MONITOR_DISABLE_WATCH_ENABLED 1 |
136 | #define TRACE_QEMU_FILE_MONITOR_EVENT_ENABLED 1 |
137 | #define TRACE_QEMU_FILE_MONITOR_DISPATCH_ENABLED 1 |
138 | #define TRACE_QEMU_AIO_COROUTINE_ENTER_ENABLED 1 |
139 | #define TRACE_QEMU_COROUTINE_YIELD_ENABLED 1 |
140 | #define TRACE_QEMU_COROUTINE_TERMINATE_ENABLED 1 |
141 | #define TRACE_QEMU_CO_MUTEX_LOCK_UNCONTENDED_ENABLED 1 |
142 | #define TRACE_QEMU_CO_MUTEX_LOCK_ENTRY_ENABLED 1 |
143 | #define TRACE_QEMU_CO_MUTEX_LOCK_RETURN_ENABLED 1 |
144 | #define TRACE_QEMU_CO_MUTEX_UNLOCK_ENTRY_ENABLED 1 |
145 | #define TRACE_QEMU_CO_MUTEX_UNLOCK_RETURN_ENABLED 1 |
146 | #define TRACE_QEMU_MEMALIGN_ENABLED 1 |
147 | #define TRACE_QEMU_ANON_RAM_ALLOC_ENABLED 1 |
148 | #define TRACE_QEMU_VFREE_ENABLED 1 |
149 | #define TRACE_QEMU_ANON_RAM_FREE_ENABLED 1 |
150 | #define TRACE_HBITMAP_ITER_SKIP_WORDS_ENABLED 1 |
151 | #define TRACE_HBITMAP_RESET_ENABLED 1 |
152 | #define TRACE_HBITMAP_SET_ENABLED 1 |
153 | #define TRACE_LOCKCNT_FAST_PATH_ATTEMPT_ENABLED 1 |
154 | #define TRACE_LOCKCNT_FAST_PATH_SUCCESS_ENABLED 1 |
155 | #define TRACE_LOCKCNT_UNLOCK_ATTEMPT_ENABLED 1 |
156 | #define TRACE_LOCKCNT_UNLOCK_SUCCESS_ENABLED 1 |
157 | #define TRACE_LOCKCNT_FUTEX_WAIT_PREPARE_ENABLED 1 |
158 | #define TRACE_LOCKCNT_FUTEX_WAIT_ENABLED 1 |
159 | #define TRACE_LOCKCNT_FUTEX_WAIT_RESUME_ENABLED 1 |
160 | #define TRACE_LOCKCNT_FUTEX_WAKE_ENABLED 1 |
161 | #define TRACE_SOCKET_LISTEN_ENABLED 1 |
162 | #define TRACE_QEMU_MUTEX_LOCK_ENABLED 1 |
163 | #define TRACE_QEMU_MUTEX_LOCKED_ENABLED 1 |
164 | #define TRACE_QEMU_MUTEX_UNLOCK_ENABLED 1 |
165 | #define TRACE_QEMU_VFIO_DMA_RESET_TEMPORARY_ENABLED 1 |
166 | #define TRACE_QEMU_VFIO_RAM_BLOCK_ADDED_ENABLED 1 |
167 | #define TRACE_QEMU_VFIO_RAM_BLOCK_REMOVED_ENABLED 1 |
168 | #define TRACE_QEMU_VFIO_FIND_MAPPING_ENABLED 1 |
169 | #define TRACE_QEMU_VFIO_NEW_MAPPING_ENABLED 1 |
170 | #define TRACE_QEMU_VFIO_DO_MAPPING_ENABLED 1 |
171 | #define TRACE_QEMU_VFIO_DMA_MAP_ENABLED 1 |
172 | #define TRACE_QEMU_VFIO_DMA_UNMAP_ENABLED 1 |
173 | #include "qemu/log-for-trace.h" |
174 | |
175 | |
176 | #define TRACE_RUN_POLL_HANDLERS_BEGIN_BACKEND_DSTATE() ( \ |
177 | trace_event_get_state_dynamic_by_id(TRACE_RUN_POLL_HANDLERS_BEGIN) || \ |
178 | false) |
179 | |
180 | static inline void _nocheck__trace_run_poll_handlers_begin(void * ctx, int64_t max_ns, int64_t timeout) |
181 | { |
182 | if (trace_event_get_state(TRACE_RUN_POLL_HANDLERS_BEGIN) && qemu_loglevel_mask(LOG_TRACE)) { |
183 | struct timeval _now; |
184 | gettimeofday(&_now, NULL); |
185 | qemu_log("%d@%zu.%06zu:run_poll_handlers_begin " "ctx %p max_ns %" PRId64 " timeout %" PRId64 "\n" , |
186 | qemu_get_thread_id(), |
187 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
188 | , ctx, max_ns, timeout); |
189 | } |
190 | } |
191 | |
192 | static inline void trace_run_poll_handlers_begin(void * ctx, int64_t max_ns, int64_t timeout) |
193 | { |
194 | if (true) { |
195 | _nocheck__trace_run_poll_handlers_begin(ctx, max_ns, timeout); |
196 | } |
197 | } |
198 | |
199 | #define TRACE_RUN_POLL_HANDLERS_END_BACKEND_DSTATE() ( \ |
200 | trace_event_get_state_dynamic_by_id(TRACE_RUN_POLL_HANDLERS_END) || \ |
201 | false) |
202 | |
203 | static inline void _nocheck__trace_run_poll_handlers_end(void * ctx, bool progress, int64_t timeout) |
204 | { |
205 | if (trace_event_get_state(TRACE_RUN_POLL_HANDLERS_END) && qemu_loglevel_mask(LOG_TRACE)) { |
206 | struct timeval _now; |
207 | gettimeofday(&_now, NULL); |
208 | qemu_log("%d@%zu.%06zu:run_poll_handlers_end " "ctx %p progress %d new timeout %" PRId64 "\n" , |
209 | qemu_get_thread_id(), |
210 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
211 | , ctx, progress, timeout); |
212 | } |
213 | } |
214 | |
215 | static inline void trace_run_poll_handlers_end(void * ctx, bool progress, int64_t timeout) |
216 | { |
217 | if (true) { |
218 | _nocheck__trace_run_poll_handlers_end(ctx, progress, timeout); |
219 | } |
220 | } |
221 | |
222 | #define TRACE_POLL_SHRINK_BACKEND_DSTATE() ( \ |
223 | trace_event_get_state_dynamic_by_id(TRACE_POLL_SHRINK) || \ |
224 | false) |
225 | |
226 | static inline void _nocheck__trace_poll_shrink(void * ctx, int64_t old, int64_t new) |
227 | { |
228 | if (trace_event_get_state(TRACE_POLL_SHRINK) && qemu_loglevel_mask(LOG_TRACE)) { |
229 | struct timeval _now; |
230 | gettimeofday(&_now, NULL); |
231 | qemu_log("%d@%zu.%06zu:poll_shrink " "ctx %p old %" PRId64" new %" PRId64 "\n" , |
232 | qemu_get_thread_id(), |
233 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
234 | , ctx, old, new); |
235 | } |
236 | } |
237 | |
238 | static inline void trace_poll_shrink(void * ctx, int64_t old, int64_t new) |
239 | { |
240 | if (true) { |
241 | _nocheck__trace_poll_shrink(ctx, old, new); |
242 | } |
243 | } |
244 | |
245 | #define TRACE_POLL_GROW_BACKEND_DSTATE() ( \ |
246 | trace_event_get_state_dynamic_by_id(TRACE_POLL_GROW) || \ |
247 | false) |
248 | |
249 | static inline void _nocheck__trace_poll_grow(void * ctx, int64_t old, int64_t new) |
250 | { |
251 | if (trace_event_get_state(TRACE_POLL_GROW) && qemu_loglevel_mask(LOG_TRACE)) { |
252 | struct timeval _now; |
253 | gettimeofday(&_now, NULL); |
254 | qemu_log("%d@%zu.%06zu:poll_grow " "ctx %p old %" PRId64" new %" PRId64 "\n" , |
255 | qemu_get_thread_id(), |
256 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
257 | , ctx, old, new); |
258 | } |
259 | } |
260 | |
261 | static inline void trace_poll_grow(void * ctx, int64_t old, int64_t new) |
262 | { |
263 | if (true) { |
264 | _nocheck__trace_poll_grow(ctx, old, new); |
265 | } |
266 | } |
267 | |
268 | #define TRACE_AIO_CO_SCHEDULE_BACKEND_DSTATE() ( \ |
269 | trace_event_get_state_dynamic_by_id(TRACE_AIO_CO_SCHEDULE) || \ |
270 | false) |
271 | |
272 | static inline void _nocheck__trace_aio_co_schedule(void * ctx, void * co) |
273 | { |
274 | if (trace_event_get_state(TRACE_AIO_CO_SCHEDULE) && qemu_loglevel_mask(LOG_TRACE)) { |
275 | struct timeval _now; |
276 | gettimeofday(&_now, NULL); |
277 | qemu_log("%d@%zu.%06zu:aio_co_schedule " "ctx %p co %p" "\n" , |
278 | qemu_get_thread_id(), |
279 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
280 | , ctx, co); |
281 | } |
282 | } |
283 | |
284 | static inline void trace_aio_co_schedule(void * ctx, void * co) |
285 | { |
286 | if (true) { |
287 | _nocheck__trace_aio_co_schedule(ctx, co); |
288 | } |
289 | } |
290 | |
291 | #define TRACE_AIO_CO_SCHEDULE_BH_CB_BACKEND_DSTATE() ( \ |
292 | trace_event_get_state_dynamic_by_id(TRACE_AIO_CO_SCHEDULE_BH_CB) || \ |
293 | false) |
294 | |
295 | static inline void _nocheck__trace_aio_co_schedule_bh_cb(void * ctx, void * co) |
296 | { |
297 | if (trace_event_get_state(TRACE_AIO_CO_SCHEDULE_BH_CB) && qemu_loglevel_mask(LOG_TRACE)) { |
298 | struct timeval _now; |
299 | gettimeofday(&_now, NULL); |
300 | qemu_log("%d@%zu.%06zu:aio_co_schedule_bh_cb " "ctx %p co %p" "\n" , |
301 | qemu_get_thread_id(), |
302 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
303 | , ctx, co); |
304 | } |
305 | } |
306 | |
307 | static inline void trace_aio_co_schedule_bh_cb(void * ctx, void * co) |
308 | { |
309 | if (true) { |
310 | _nocheck__trace_aio_co_schedule_bh_cb(ctx, co); |
311 | } |
312 | } |
313 | |
314 | #define TRACE_THREAD_POOL_SUBMIT_BACKEND_DSTATE() ( \ |
315 | trace_event_get_state_dynamic_by_id(TRACE_THREAD_POOL_SUBMIT) || \ |
316 | false) |
317 | |
318 | static inline void _nocheck__trace_thread_pool_submit(void * pool, void * req, void * opaque) |
319 | { |
320 | if (trace_event_get_state(TRACE_THREAD_POOL_SUBMIT) && qemu_loglevel_mask(LOG_TRACE)) { |
321 | struct timeval _now; |
322 | gettimeofday(&_now, NULL); |
323 | qemu_log("%d@%zu.%06zu:thread_pool_submit " "pool %p req %p opaque %p" "\n" , |
324 | qemu_get_thread_id(), |
325 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
326 | , pool, req, opaque); |
327 | } |
328 | } |
329 | |
330 | static inline void trace_thread_pool_submit(void * pool, void * req, void * opaque) |
331 | { |
332 | if (true) { |
333 | _nocheck__trace_thread_pool_submit(pool, req, opaque); |
334 | } |
335 | } |
336 | |
337 | #define TRACE_THREAD_POOL_COMPLETE_BACKEND_DSTATE() ( \ |
338 | trace_event_get_state_dynamic_by_id(TRACE_THREAD_POOL_COMPLETE) || \ |
339 | false) |
340 | |
341 | static inline void _nocheck__trace_thread_pool_complete(void * pool, void * req, void * opaque, int ret) |
342 | { |
343 | if (trace_event_get_state(TRACE_THREAD_POOL_COMPLETE) && qemu_loglevel_mask(LOG_TRACE)) { |
344 | struct timeval _now; |
345 | gettimeofday(&_now, NULL); |
346 | qemu_log("%d@%zu.%06zu:thread_pool_complete " "pool %p req %p opaque %p ret %d" "\n" , |
347 | qemu_get_thread_id(), |
348 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
349 | , pool, req, opaque, ret); |
350 | } |
351 | } |
352 | |
353 | static inline void trace_thread_pool_complete(void * pool, void * req, void * opaque, int ret) |
354 | { |
355 | if (true) { |
356 | _nocheck__trace_thread_pool_complete(pool, req, opaque, ret); |
357 | } |
358 | } |
359 | |
360 | #define TRACE_THREAD_POOL_CANCEL_BACKEND_DSTATE() ( \ |
361 | trace_event_get_state_dynamic_by_id(TRACE_THREAD_POOL_CANCEL) || \ |
362 | false) |
363 | |
364 | static inline void _nocheck__trace_thread_pool_cancel(void * req, void * opaque) |
365 | { |
366 | if (trace_event_get_state(TRACE_THREAD_POOL_CANCEL) && qemu_loglevel_mask(LOG_TRACE)) { |
367 | struct timeval _now; |
368 | gettimeofday(&_now, NULL); |
369 | qemu_log("%d@%zu.%06zu:thread_pool_cancel " "req %p opaque %p" "\n" , |
370 | qemu_get_thread_id(), |
371 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
372 | , req, opaque); |
373 | } |
374 | } |
375 | |
376 | static inline void trace_thread_pool_cancel(void * req, void * opaque) |
377 | { |
378 | if (true) { |
379 | _nocheck__trace_thread_pool_cancel(req, opaque); |
380 | } |
381 | } |
382 | |
383 | #define TRACE_BUFFER_RESIZE_BACKEND_DSTATE() ( \ |
384 | trace_event_get_state_dynamic_by_id(TRACE_BUFFER_RESIZE) || \ |
385 | false) |
386 | |
387 | static inline void _nocheck__trace_buffer_resize(const char * buf, size_t olen, size_t len) |
388 | { |
389 | if (trace_event_get_state(TRACE_BUFFER_RESIZE) && qemu_loglevel_mask(LOG_TRACE)) { |
390 | struct timeval _now; |
391 | gettimeofday(&_now, NULL); |
392 | qemu_log("%d@%zu.%06zu:buffer_resize " "%s: old %zd, new %zd" "\n" , |
393 | qemu_get_thread_id(), |
394 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
395 | , buf, olen, len); |
396 | } |
397 | } |
398 | |
399 | static inline void trace_buffer_resize(const char * buf, size_t olen, size_t len) |
400 | { |
401 | if (true) { |
402 | _nocheck__trace_buffer_resize(buf, olen, len); |
403 | } |
404 | } |
405 | |
406 | #define TRACE_BUFFER_MOVE_EMPTY_BACKEND_DSTATE() ( \ |
407 | trace_event_get_state_dynamic_by_id(TRACE_BUFFER_MOVE_EMPTY) || \ |
408 | false) |
409 | |
410 | static inline void _nocheck__trace_buffer_move_empty(const char * buf, size_t len, const char * from) |
411 | { |
412 | if (trace_event_get_state(TRACE_BUFFER_MOVE_EMPTY) && qemu_loglevel_mask(LOG_TRACE)) { |
413 | struct timeval _now; |
414 | gettimeofday(&_now, NULL); |
415 | qemu_log("%d@%zu.%06zu:buffer_move_empty " "%s: %zd bytes from %s" "\n" , |
416 | qemu_get_thread_id(), |
417 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
418 | , buf, len, from); |
419 | } |
420 | } |
421 | |
422 | static inline void trace_buffer_move_empty(const char * buf, size_t len, const char * from) |
423 | { |
424 | if (true) { |
425 | _nocheck__trace_buffer_move_empty(buf, len, from); |
426 | } |
427 | } |
428 | |
429 | #define TRACE_BUFFER_MOVE_BACKEND_DSTATE() ( \ |
430 | trace_event_get_state_dynamic_by_id(TRACE_BUFFER_MOVE) || \ |
431 | false) |
432 | |
433 | static inline void _nocheck__trace_buffer_move(const char * buf, size_t len, const char * from) |
434 | { |
435 | if (trace_event_get_state(TRACE_BUFFER_MOVE) && qemu_loglevel_mask(LOG_TRACE)) { |
436 | struct timeval _now; |
437 | gettimeofday(&_now, NULL); |
438 | qemu_log("%d@%zu.%06zu:buffer_move " "%s: %zd bytes from %s" "\n" , |
439 | qemu_get_thread_id(), |
440 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
441 | , buf, len, from); |
442 | } |
443 | } |
444 | |
445 | static inline void trace_buffer_move(const char * buf, size_t len, const char * from) |
446 | { |
447 | if (true) { |
448 | _nocheck__trace_buffer_move(buf, len, from); |
449 | } |
450 | } |
451 | |
452 | #define TRACE_BUFFER_FREE_BACKEND_DSTATE() ( \ |
453 | trace_event_get_state_dynamic_by_id(TRACE_BUFFER_FREE) || \ |
454 | false) |
455 | |
456 | static inline void _nocheck__trace_buffer_free(const char * buf, size_t len) |
457 | { |
458 | if (trace_event_get_state(TRACE_BUFFER_FREE) && qemu_loglevel_mask(LOG_TRACE)) { |
459 | struct timeval _now; |
460 | gettimeofday(&_now, NULL); |
461 | qemu_log("%d@%zu.%06zu:buffer_free " "%s: capacity %zd" "\n" , |
462 | qemu_get_thread_id(), |
463 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
464 | , buf, len); |
465 | } |
466 | } |
467 | |
468 | static inline void trace_buffer_free(const char * buf, size_t len) |
469 | { |
470 | if (true) { |
471 | _nocheck__trace_buffer_free(buf, len); |
472 | } |
473 | } |
474 | |
475 | #define TRACE_QEMU_FILE_MONITOR_ADD_WATCH_BACKEND_DSTATE() ( \ |
476 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_FILE_MONITOR_ADD_WATCH) || \ |
477 | false) |
478 | |
479 | static inline void _nocheck__trace_qemu_file_monitor_add_watch(void * mon, const char * dirpath, const char * filename, void * cb, void * opaque, int64_t id) |
480 | { |
481 | if (trace_event_get_state(TRACE_QEMU_FILE_MONITOR_ADD_WATCH) && qemu_loglevel_mask(LOG_TRACE)) { |
482 | struct timeval _now; |
483 | gettimeofday(&_now, NULL); |
484 | qemu_log("%d@%zu.%06zu:qemu_file_monitor_add_watch " "File monitor %p add watch dir='%s' file='%s' cb=%p opaque=%p id=%" PRId64 "\n" , |
485 | qemu_get_thread_id(), |
486 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
487 | , mon, dirpath, filename, cb, opaque, id); |
488 | } |
489 | } |
490 | |
491 | static inline void trace_qemu_file_monitor_add_watch(void * mon, const char * dirpath, const char * filename, void * cb, void * opaque, int64_t id) |
492 | { |
493 | if (true) { |
494 | _nocheck__trace_qemu_file_monitor_add_watch(mon, dirpath, filename, cb, opaque, id); |
495 | } |
496 | } |
497 | |
498 | #define TRACE_QEMU_FILE_MONITOR_REMOVE_WATCH_BACKEND_DSTATE() ( \ |
499 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_FILE_MONITOR_REMOVE_WATCH) || \ |
500 | false) |
501 | |
502 | static inline void _nocheck__trace_qemu_file_monitor_remove_watch(void * mon, const char * dirpath, int64_t id) |
503 | { |
504 | if (trace_event_get_state(TRACE_QEMU_FILE_MONITOR_REMOVE_WATCH) && qemu_loglevel_mask(LOG_TRACE)) { |
505 | struct timeval _now; |
506 | gettimeofday(&_now, NULL); |
507 | qemu_log("%d@%zu.%06zu:qemu_file_monitor_remove_watch " "File monitor %p remove watch dir='%s' id=%" PRId64 "\n" , |
508 | qemu_get_thread_id(), |
509 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
510 | , mon, dirpath, id); |
511 | } |
512 | } |
513 | |
514 | static inline void trace_qemu_file_monitor_remove_watch(void * mon, const char * dirpath, int64_t id) |
515 | { |
516 | if (true) { |
517 | _nocheck__trace_qemu_file_monitor_remove_watch(mon, dirpath, id); |
518 | } |
519 | } |
520 | |
521 | #define TRACE_QEMU_FILE_MONITOR_NEW_BACKEND_DSTATE() ( \ |
522 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_FILE_MONITOR_NEW) || \ |
523 | false) |
524 | |
525 | static inline void _nocheck__trace_qemu_file_monitor_new(void * mon, int fd) |
526 | { |
527 | if (trace_event_get_state(TRACE_QEMU_FILE_MONITOR_NEW) && qemu_loglevel_mask(LOG_TRACE)) { |
528 | struct timeval _now; |
529 | gettimeofday(&_now, NULL); |
530 | qemu_log("%d@%zu.%06zu:qemu_file_monitor_new " "File monitor %p created fd=%d" "\n" , |
531 | qemu_get_thread_id(), |
532 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
533 | , mon, fd); |
534 | } |
535 | } |
536 | |
537 | static inline void trace_qemu_file_monitor_new(void * mon, int fd) |
538 | { |
539 | if (true) { |
540 | _nocheck__trace_qemu_file_monitor_new(mon, fd); |
541 | } |
542 | } |
543 | |
544 | #define TRACE_QEMU_FILE_MONITOR_ENABLE_WATCH_BACKEND_DSTATE() ( \ |
545 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_FILE_MONITOR_ENABLE_WATCH) || \ |
546 | false) |
547 | |
548 | static inline void _nocheck__trace_qemu_file_monitor_enable_watch(void * mon, const char * dirpath, int id) |
549 | { |
550 | if (trace_event_get_state(TRACE_QEMU_FILE_MONITOR_ENABLE_WATCH) && qemu_loglevel_mask(LOG_TRACE)) { |
551 | struct timeval _now; |
552 | gettimeofday(&_now, NULL); |
553 | qemu_log("%d@%zu.%06zu:qemu_file_monitor_enable_watch " "File monitor %p enable watch dir='%s' id=%u" "\n" , |
554 | qemu_get_thread_id(), |
555 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
556 | , mon, dirpath, id); |
557 | } |
558 | } |
559 | |
560 | static inline void trace_qemu_file_monitor_enable_watch(void * mon, const char * dirpath, int id) |
561 | { |
562 | if (true) { |
563 | _nocheck__trace_qemu_file_monitor_enable_watch(mon, dirpath, id); |
564 | } |
565 | } |
566 | |
567 | #define TRACE_QEMU_FILE_MONITOR_DISABLE_WATCH_BACKEND_DSTATE() ( \ |
568 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_FILE_MONITOR_DISABLE_WATCH) || \ |
569 | false) |
570 | |
571 | static inline void _nocheck__trace_qemu_file_monitor_disable_watch(void * mon, const char * dirpath, int id) |
572 | { |
573 | if (trace_event_get_state(TRACE_QEMU_FILE_MONITOR_DISABLE_WATCH) && qemu_loglevel_mask(LOG_TRACE)) { |
574 | struct timeval _now; |
575 | gettimeofday(&_now, NULL); |
576 | qemu_log("%d@%zu.%06zu:qemu_file_monitor_disable_watch " "Fle monitor %p disable watch dir='%s' id=%u" "\n" , |
577 | qemu_get_thread_id(), |
578 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
579 | , mon, dirpath, id); |
580 | } |
581 | } |
582 | |
583 | static inline void trace_qemu_file_monitor_disable_watch(void * mon, const char * dirpath, int id) |
584 | { |
585 | if (true) { |
586 | _nocheck__trace_qemu_file_monitor_disable_watch(mon, dirpath, id); |
587 | } |
588 | } |
589 | |
590 | #define TRACE_QEMU_FILE_MONITOR_EVENT_BACKEND_DSTATE() ( \ |
591 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_FILE_MONITOR_EVENT) || \ |
592 | false) |
593 | |
594 | static inline void _nocheck__trace_qemu_file_monitor_event(void * mon, const char * dirpath, const char * filename, int mask, unsigned int id) |
595 | { |
596 | if (trace_event_get_state(TRACE_QEMU_FILE_MONITOR_EVENT) && qemu_loglevel_mask(LOG_TRACE)) { |
597 | struct timeval _now; |
598 | gettimeofday(&_now, NULL); |
599 | qemu_log("%d@%zu.%06zu:qemu_file_monitor_event " "File monitor %p event dir='%s' file='%s' mask=0x%x id=%u" "\n" , |
600 | qemu_get_thread_id(), |
601 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
602 | , mon, dirpath, filename, mask, id); |
603 | } |
604 | } |
605 | |
606 | static inline void trace_qemu_file_monitor_event(void * mon, const char * dirpath, const char * filename, int mask, unsigned int id) |
607 | { |
608 | if (true) { |
609 | _nocheck__trace_qemu_file_monitor_event(mon, dirpath, filename, mask, id); |
610 | } |
611 | } |
612 | |
613 | #define TRACE_QEMU_FILE_MONITOR_DISPATCH_BACKEND_DSTATE() ( \ |
614 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_FILE_MONITOR_DISPATCH) || \ |
615 | false) |
616 | |
617 | static inline void _nocheck__trace_qemu_file_monitor_dispatch(void * mon, const char * dirpath, const char * filename, int ev, void * cb, void * opaque, int64_t id) |
618 | { |
619 | if (trace_event_get_state(TRACE_QEMU_FILE_MONITOR_DISPATCH) && qemu_loglevel_mask(LOG_TRACE)) { |
620 | struct timeval _now; |
621 | gettimeofday(&_now, NULL); |
622 | qemu_log("%d@%zu.%06zu:qemu_file_monitor_dispatch " "File monitor %p dispatch dir='%s' file='%s' ev=%d cb=%p opaque=%p id=%" PRId64 "\n" , |
623 | qemu_get_thread_id(), |
624 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
625 | , mon, dirpath, filename, ev, cb, opaque, id); |
626 | } |
627 | } |
628 | |
629 | static inline void trace_qemu_file_monitor_dispatch(void * mon, const char * dirpath, const char * filename, int ev, void * cb, void * opaque, int64_t id) |
630 | { |
631 | if (true) { |
632 | _nocheck__trace_qemu_file_monitor_dispatch(mon, dirpath, filename, ev, cb, opaque, id); |
633 | } |
634 | } |
635 | |
636 | #define TRACE_QEMU_AIO_COROUTINE_ENTER_BACKEND_DSTATE() ( \ |
637 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_AIO_COROUTINE_ENTER) || \ |
638 | false) |
639 | |
640 | static inline void _nocheck__trace_qemu_aio_coroutine_enter(void * ctx, void * from, void * to, void * opaque) |
641 | { |
642 | if (trace_event_get_state(TRACE_QEMU_AIO_COROUTINE_ENTER) && qemu_loglevel_mask(LOG_TRACE)) { |
643 | struct timeval _now; |
644 | gettimeofday(&_now, NULL); |
645 | qemu_log("%d@%zu.%06zu:qemu_aio_coroutine_enter " "ctx %p from %p to %p opaque %p" "\n" , |
646 | qemu_get_thread_id(), |
647 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
648 | , ctx, from, to, opaque); |
649 | } |
650 | } |
651 | |
652 | static inline void trace_qemu_aio_coroutine_enter(void * ctx, void * from, void * to, void * opaque) |
653 | { |
654 | if (true) { |
655 | _nocheck__trace_qemu_aio_coroutine_enter(ctx, from, to, opaque); |
656 | } |
657 | } |
658 | |
659 | #define TRACE_QEMU_COROUTINE_YIELD_BACKEND_DSTATE() ( \ |
660 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_COROUTINE_YIELD) || \ |
661 | false) |
662 | |
663 | static inline void _nocheck__trace_qemu_coroutine_yield(void * from, void * to) |
664 | { |
665 | if (trace_event_get_state(TRACE_QEMU_COROUTINE_YIELD) && qemu_loglevel_mask(LOG_TRACE)) { |
666 | struct timeval _now; |
667 | gettimeofday(&_now, NULL); |
668 | qemu_log("%d@%zu.%06zu:qemu_coroutine_yield " "from %p to %p" "\n" , |
669 | qemu_get_thread_id(), |
670 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
671 | , from, to); |
672 | } |
673 | } |
674 | |
675 | static inline void trace_qemu_coroutine_yield(void * from, void * to) |
676 | { |
677 | if (true) { |
678 | _nocheck__trace_qemu_coroutine_yield(from, to); |
679 | } |
680 | } |
681 | |
682 | #define TRACE_QEMU_COROUTINE_TERMINATE_BACKEND_DSTATE() ( \ |
683 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_COROUTINE_TERMINATE) || \ |
684 | false) |
685 | |
686 | static inline void _nocheck__trace_qemu_coroutine_terminate(void * co) |
687 | { |
688 | if (trace_event_get_state(TRACE_QEMU_COROUTINE_TERMINATE) && qemu_loglevel_mask(LOG_TRACE)) { |
689 | struct timeval _now; |
690 | gettimeofday(&_now, NULL); |
691 | qemu_log("%d@%zu.%06zu:qemu_coroutine_terminate " "self %p" "\n" , |
692 | qemu_get_thread_id(), |
693 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
694 | , co); |
695 | } |
696 | } |
697 | |
698 | static inline void trace_qemu_coroutine_terminate(void * co) |
699 | { |
700 | if (true) { |
701 | _nocheck__trace_qemu_coroutine_terminate(co); |
702 | } |
703 | } |
704 | |
705 | #define TRACE_QEMU_CO_MUTEX_LOCK_UNCONTENDED_BACKEND_DSTATE() ( \ |
706 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_CO_MUTEX_LOCK_UNCONTENDED) || \ |
707 | false) |
708 | |
709 | static inline void _nocheck__trace_qemu_co_mutex_lock_uncontended(void * mutex, void * self) |
710 | { |
711 | if (trace_event_get_state(TRACE_QEMU_CO_MUTEX_LOCK_UNCONTENDED) && qemu_loglevel_mask(LOG_TRACE)) { |
712 | struct timeval _now; |
713 | gettimeofday(&_now, NULL); |
714 | qemu_log("%d@%zu.%06zu:qemu_co_mutex_lock_uncontended " "mutex %p self %p" "\n" , |
715 | qemu_get_thread_id(), |
716 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
717 | , mutex, self); |
718 | } |
719 | } |
720 | |
721 | static inline void trace_qemu_co_mutex_lock_uncontended(void * mutex, void * self) |
722 | { |
723 | if (true) { |
724 | _nocheck__trace_qemu_co_mutex_lock_uncontended(mutex, self); |
725 | } |
726 | } |
727 | |
728 | #define TRACE_QEMU_CO_MUTEX_LOCK_ENTRY_BACKEND_DSTATE() ( \ |
729 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_CO_MUTEX_LOCK_ENTRY) || \ |
730 | false) |
731 | |
732 | static inline void _nocheck__trace_qemu_co_mutex_lock_entry(void * mutex, void * self) |
733 | { |
734 | if (trace_event_get_state(TRACE_QEMU_CO_MUTEX_LOCK_ENTRY) && qemu_loglevel_mask(LOG_TRACE)) { |
735 | struct timeval _now; |
736 | gettimeofday(&_now, NULL); |
737 | qemu_log("%d@%zu.%06zu:qemu_co_mutex_lock_entry " "mutex %p self %p" "\n" , |
738 | qemu_get_thread_id(), |
739 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
740 | , mutex, self); |
741 | } |
742 | } |
743 | |
744 | static inline void trace_qemu_co_mutex_lock_entry(void * mutex, void * self) |
745 | { |
746 | if (true) { |
747 | _nocheck__trace_qemu_co_mutex_lock_entry(mutex, self); |
748 | } |
749 | } |
750 | |
751 | #define TRACE_QEMU_CO_MUTEX_LOCK_RETURN_BACKEND_DSTATE() ( \ |
752 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_CO_MUTEX_LOCK_RETURN) || \ |
753 | false) |
754 | |
755 | static inline void _nocheck__trace_qemu_co_mutex_lock_return(void * mutex, void * self) |
756 | { |
757 | if (trace_event_get_state(TRACE_QEMU_CO_MUTEX_LOCK_RETURN) && qemu_loglevel_mask(LOG_TRACE)) { |
758 | struct timeval _now; |
759 | gettimeofday(&_now, NULL); |
760 | qemu_log("%d@%zu.%06zu:qemu_co_mutex_lock_return " "mutex %p self %p" "\n" , |
761 | qemu_get_thread_id(), |
762 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
763 | , mutex, self); |
764 | } |
765 | } |
766 | |
767 | static inline void trace_qemu_co_mutex_lock_return(void * mutex, void * self) |
768 | { |
769 | if (true) { |
770 | _nocheck__trace_qemu_co_mutex_lock_return(mutex, self); |
771 | } |
772 | } |
773 | |
774 | #define TRACE_QEMU_CO_MUTEX_UNLOCK_ENTRY_BACKEND_DSTATE() ( \ |
775 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_CO_MUTEX_UNLOCK_ENTRY) || \ |
776 | false) |
777 | |
778 | static inline void _nocheck__trace_qemu_co_mutex_unlock_entry(void * mutex, void * self) |
779 | { |
780 | if (trace_event_get_state(TRACE_QEMU_CO_MUTEX_UNLOCK_ENTRY) && qemu_loglevel_mask(LOG_TRACE)) { |
781 | struct timeval _now; |
782 | gettimeofday(&_now, NULL); |
783 | qemu_log("%d@%zu.%06zu:qemu_co_mutex_unlock_entry " "mutex %p self %p" "\n" , |
784 | qemu_get_thread_id(), |
785 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
786 | , mutex, self); |
787 | } |
788 | } |
789 | |
790 | static inline void trace_qemu_co_mutex_unlock_entry(void * mutex, void * self) |
791 | { |
792 | if (true) { |
793 | _nocheck__trace_qemu_co_mutex_unlock_entry(mutex, self); |
794 | } |
795 | } |
796 | |
797 | #define TRACE_QEMU_CO_MUTEX_UNLOCK_RETURN_BACKEND_DSTATE() ( \ |
798 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_CO_MUTEX_UNLOCK_RETURN) || \ |
799 | false) |
800 | |
801 | static inline void _nocheck__trace_qemu_co_mutex_unlock_return(void * mutex, void * self) |
802 | { |
803 | if (trace_event_get_state(TRACE_QEMU_CO_MUTEX_UNLOCK_RETURN) && qemu_loglevel_mask(LOG_TRACE)) { |
804 | struct timeval _now; |
805 | gettimeofday(&_now, NULL); |
806 | qemu_log("%d@%zu.%06zu:qemu_co_mutex_unlock_return " "mutex %p self %p" "\n" , |
807 | qemu_get_thread_id(), |
808 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
809 | , mutex, self); |
810 | } |
811 | } |
812 | |
813 | static inline void trace_qemu_co_mutex_unlock_return(void * mutex, void * self) |
814 | { |
815 | if (true) { |
816 | _nocheck__trace_qemu_co_mutex_unlock_return(mutex, self); |
817 | } |
818 | } |
819 | |
820 | #define TRACE_QEMU_MEMALIGN_BACKEND_DSTATE() ( \ |
821 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_MEMALIGN) || \ |
822 | false) |
823 | |
824 | static inline void _nocheck__trace_qemu_memalign(size_t alignment, size_t size, void * ptr) |
825 | { |
826 | if (trace_event_get_state(TRACE_QEMU_MEMALIGN) && qemu_loglevel_mask(LOG_TRACE)) { |
827 | struct timeval _now; |
828 | gettimeofday(&_now, NULL); |
829 | qemu_log("%d@%zu.%06zu:qemu_memalign " "alignment %zu size %zu ptr %p" "\n" , |
830 | qemu_get_thread_id(), |
831 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
832 | , alignment, size, ptr); |
833 | } |
834 | } |
835 | |
836 | static inline void trace_qemu_memalign(size_t alignment, size_t size, void * ptr) |
837 | { |
838 | if (true) { |
839 | _nocheck__trace_qemu_memalign(alignment, size, ptr); |
840 | } |
841 | } |
842 | |
843 | #define TRACE_QEMU_ANON_RAM_ALLOC_BACKEND_DSTATE() ( \ |
844 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_ANON_RAM_ALLOC) || \ |
845 | false) |
846 | |
847 | static inline void _nocheck__trace_qemu_anon_ram_alloc(size_t size, void * ptr) |
848 | { |
849 | if (trace_event_get_state(TRACE_QEMU_ANON_RAM_ALLOC) && qemu_loglevel_mask(LOG_TRACE)) { |
850 | struct timeval _now; |
851 | gettimeofday(&_now, NULL); |
852 | qemu_log("%d@%zu.%06zu:qemu_anon_ram_alloc " "size %zu ptr %p" "\n" , |
853 | qemu_get_thread_id(), |
854 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
855 | , size, ptr); |
856 | } |
857 | } |
858 | |
859 | static inline void trace_qemu_anon_ram_alloc(size_t size, void * ptr) |
860 | { |
861 | if (true) { |
862 | _nocheck__trace_qemu_anon_ram_alloc(size, ptr); |
863 | } |
864 | } |
865 | |
866 | #define TRACE_QEMU_VFREE_BACKEND_DSTATE() ( \ |
867 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_VFREE) || \ |
868 | false) |
869 | |
870 | static inline void _nocheck__trace_qemu_vfree(void * ptr) |
871 | { |
872 | if (trace_event_get_state(TRACE_QEMU_VFREE) && qemu_loglevel_mask(LOG_TRACE)) { |
873 | struct timeval _now; |
874 | gettimeofday(&_now, NULL); |
875 | qemu_log("%d@%zu.%06zu:qemu_vfree " "ptr %p" "\n" , |
876 | qemu_get_thread_id(), |
877 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
878 | , ptr); |
879 | } |
880 | } |
881 | |
882 | static inline void trace_qemu_vfree(void * ptr) |
883 | { |
884 | if (true) { |
885 | _nocheck__trace_qemu_vfree(ptr); |
886 | } |
887 | } |
888 | |
889 | #define TRACE_QEMU_ANON_RAM_FREE_BACKEND_DSTATE() ( \ |
890 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_ANON_RAM_FREE) || \ |
891 | false) |
892 | |
893 | static inline void _nocheck__trace_qemu_anon_ram_free(void * ptr, size_t size) |
894 | { |
895 | if (trace_event_get_state(TRACE_QEMU_ANON_RAM_FREE) && qemu_loglevel_mask(LOG_TRACE)) { |
896 | struct timeval _now; |
897 | gettimeofday(&_now, NULL); |
898 | qemu_log("%d@%zu.%06zu:qemu_anon_ram_free " "ptr %p size %zu" "\n" , |
899 | qemu_get_thread_id(), |
900 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
901 | , ptr, size); |
902 | } |
903 | } |
904 | |
905 | static inline void trace_qemu_anon_ram_free(void * ptr, size_t size) |
906 | { |
907 | if (true) { |
908 | _nocheck__trace_qemu_anon_ram_free(ptr, size); |
909 | } |
910 | } |
911 | |
912 | #define TRACE_HBITMAP_ITER_SKIP_WORDS_BACKEND_DSTATE() ( \ |
913 | trace_event_get_state_dynamic_by_id(TRACE_HBITMAP_ITER_SKIP_WORDS) || \ |
914 | false) |
915 | |
916 | static inline void _nocheck__trace_hbitmap_iter_skip_words(const void * hb, void * hbi, uint64_t pos, unsigned long cur) |
917 | { |
918 | if (trace_event_get_state(TRACE_HBITMAP_ITER_SKIP_WORDS) && qemu_loglevel_mask(LOG_TRACE)) { |
919 | struct timeval _now; |
920 | gettimeofday(&_now, NULL); |
921 | qemu_log("%d@%zu.%06zu:hbitmap_iter_skip_words " "hb %p hbi %p pos %" PRId64" cur 0x%lx" "\n" , |
922 | qemu_get_thread_id(), |
923 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
924 | , hb, hbi, pos, cur); |
925 | } |
926 | } |
927 | |
928 | static inline void trace_hbitmap_iter_skip_words(const void * hb, void * hbi, uint64_t pos, unsigned long cur) |
929 | { |
930 | if (true) { |
931 | _nocheck__trace_hbitmap_iter_skip_words(hb, hbi, pos, cur); |
932 | } |
933 | } |
934 | |
935 | #define TRACE_HBITMAP_RESET_BACKEND_DSTATE() ( \ |
936 | trace_event_get_state_dynamic_by_id(TRACE_HBITMAP_RESET) || \ |
937 | false) |
938 | |
939 | static inline void _nocheck__trace_hbitmap_reset(void * hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) |
940 | { |
941 | if (trace_event_get_state(TRACE_HBITMAP_RESET) && qemu_loglevel_mask(LOG_TRACE)) { |
942 | struct timeval _now; |
943 | gettimeofday(&_now, NULL); |
944 | qemu_log("%d@%zu.%06zu:hbitmap_reset " "hb %p items %" PRIu64",%" PRIu64" bits %" PRIu64"..%" PRIu64 "\n" , |
945 | qemu_get_thread_id(), |
946 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
947 | , hb, start, count, sbit, ebit); |
948 | } |
949 | } |
950 | |
951 | static inline void trace_hbitmap_reset(void * hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) |
952 | { |
953 | if (true) { |
954 | _nocheck__trace_hbitmap_reset(hb, start, count, sbit, ebit); |
955 | } |
956 | } |
957 | |
958 | #define TRACE_HBITMAP_SET_BACKEND_DSTATE() ( \ |
959 | trace_event_get_state_dynamic_by_id(TRACE_HBITMAP_SET) || \ |
960 | false) |
961 | |
962 | static inline void _nocheck__trace_hbitmap_set(void * hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) |
963 | { |
964 | if (trace_event_get_state(TRACE_HBITMAP_SET) && qemu_loglevel_mask(LOG_TRACE)) { |
965 | struct timeval _now; |
966 | gettimeofday(&_now, NULL); |
967 | qemu_log("%d@%zu.%06zu:hbitmap_set " "hb %p items %" PRIu64",%" PRIu64" bits %" PRIu64"..%" PRIu64 "\n" , |
968 | qemu_get_thread_id(), |
969 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
970 | , hb, start, count, sbit, ebit); |
971 | } |
972 | } |
973 | |
974 | static inline void trace_hbitmap_set(void * hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) |
975 | { |
976 | if (true) { |
977 | _nocheck__trace_hbitmap_set(hb, start, count, sbit, ebit); |
978 | } |
979 | } |
980 | |
981 | #define TRACE_LOCKCNT_FAST_PATH_ATTEMPT_BACKEND_DSTATE() ( \ |
982 | trace_event_get_state_dynamic_by_id(TRACE_LOCKCNT_FAST_PATH_ATTEMPT) || \ |
983 | false) |
984 | |
985 | static inline void _nocheck__trace_lockcnt_fast_path_attempt(const void * lockcnt, int expected, int new) |
986 | { |
987 | if (trace_event_get_state(TRACE_LOCKCNT_FAST_PATH_ATTEMPT) && qemu_loglevel_mask(LOG_TRACE)) { |
988 | struct timeval _now; |
989 | gettimeofday(&_now, NULL); |
990 | qemu_log("%d@%zu.%06zu:lockcnt_fast_path_attempt " "lockcnt %p fast path %d->%d" "\n" , |
991 | qemu_get_thread_id(), |
992 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
993 | , lockcnt, expected, new); |
994 | } |
995 | } |
996 | |
997 | static inline void trace_lockcnt_fast_path_attempt(const void * lockcnt, int expected, int new) |
998 | { |
999 | if (true) { |
1000 | _nocheck__trace_lockcnt_fast_path_attempt(lockcnt, expected, new); |
1001 | } |
1002 | } |
1003 | |
1004 | #define TRACE_LOCKCNT_FAST_PATH_SUCCESS_BACKEND_DSTATE() ( \ |
1005 | trace_event_get_state_dynamic_by_id(TRACE_LOCKCNT_FAST_PATH_SUCCESS) || \ |
1006 | false) |
1007 | |
1008 | static inline void _nocheck__trace_lockcnt_fast_path_success(const void * lockcnt, int expected, int new) |
1009 | { |
1010 | if (trace_event_get_state(TRACE_LOCKCNT_FAST_PATH_SUCCESS) && qemu_loglevel_mask(LOG_TRACE)) { |
1011 | struct timeval _now; |
1012 | gettimeofday(&_now, NULL); |
1013 | qemu_log("%d@%zu.%06zu:lockcnt_fast_path_success " "lockcnt %p fast path %d->%d succeeded" "\n" , |
1014 | qemu_get_thread_id(), |
1015 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1016 | , lockcnt, expected, new); |
1017 | } |
1018 | } |
1019 | |
1020 | static inline void trace_lockcnt_fast_path_success(const void * lockcnt, int expected, int new) |
1021 | { |
1022 | if (true) { |
1023 | _nocheck__trace_lockcnt_fast_path_success(lockcnt, expected, new); |
1024 | } |
1025 | } |
1026 | |
1027 | #define TRACE_LOCKCNT_UNLOCK_ATTEMPT_BACKEND_DSTATE() ( \ |
1028 | trace_event_get_state_dynamic_by_id(TRACE_LOCKCNT_UNLOCK_ATTEMPT) || \ |
1029 | false) |
1030 | |
1031 | static inline void _nocheck__trace_lockcnt_unlock_attempt(const void * lockcnt, int expected, int new) |
1032 | { |
1033 | if (trace_event_get_state(TRACE_LOCKCNT_UNLOCK_ATTEMPT) && qemu_loglevel_mask(LOG_TRACE)) { |
1034 | struct timeval _now; |
1035 | gettimeofday(&_now, NULL); |
1036 | qemu_log("%d@%zu.%06zu:lockcnt_unlock_attempt " "lockcnt %p unlock %d->%d" "\n" , |
1037 | qemu_get_thread_id(), |
1038 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1039 | , lockcnt, expected, new); |
1040 | } |
1041 | } |
1042 | |
1043 | static inline void trace_lockcnt_unlock_attempt(const void * lockcnt, int expected, int new) |
1044 | { |
1045 | if (true) { |
1046 | _nocheck__trace_lockcnt_unlock_attempt(lockcnt, expected, new); |
1047 | } |
1048 | } |
1049 | |
1050 | #define TRACE_LOCKCNT_UNLOCK_SUCCESS_BACKEND_DSTATE() ( \ |
1051 | trace_event_get_state_dynamic_by_id(TRACE_LOCKCNT_UNLOCK_SUCCESS) || \ |
1052 | false) |
1053 | |
1054 | static inline void _nocheck__trace_lockcnt_unlock_success(const void * lockcnt, int expected, int new) |
1055 | { |
1056 | if (trace_event_get_state(TRACE_LOCKCNT_UNLOCK_SUCCESS) && qemu_loglevel_mask(LOG_TRACE)) { |
1057 | struct timeval _now; |
1058 | gettimeofday(&_now, NULL); |
1059 | qemu_log("%d@%zu.%06zu:lockcnt_unlock_success " "lockcnt %p unlock %d->%d succeeded" "\n" , |
1060 | qemu_get_thread_id(), |
1061 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1062 | , lockcnt, expected, new); |
1063 | } |
1064 | } |
1065 | |
1066 | static inline void trace_lockcnt_unlock_success(const void * lockcnt, int expected, int new) |
1067 | { |
1068 | if (true) { |
1069 | _nocheck__trace_lockcnt_unlock_success(lockcnt, expected, new); |
1070 | } |
1071 | } |
1072 | |
1073 | #define TRACE_LOCKCNT_FUTEX_WAIT_PREPARE_BACKEND_DSTATE() ( \ |
1074 | trace_event_get_state_dynamic_by_id(TRACE_LOCKCNT_FUTEX_WAIT_PREPARE) || \ |
1075 | false) |
1076 | |
1077 | static inline void _nocheck__trace_lockcnt_futex_wait_prepare(const void * lockcnt, int expected, int new) |
1078 | { |
1079 | if (trace_event_get_state(TRACE_LOCKCNT_FUTEX_WAIT_PREPARE) && qemu_loglevel_mask(LOG_TRACE)) { |
1080 | struct timeval _now; |
1081 | gettimeofday(&_now, NULL); |
1082 | qemu_log("%d@%zu.%06zu:lockcnt_futex_wait_prepare " "lockcnt %p preparing slow path %d->%d" "\n" , |
1083 | qemu_get_thread_id(), |
1084 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1085 | , lockcnt, expected, new); |
1086 | } |
1087 | } |
1088 | |
1089 | static inline void trace_lockcnt_futex_wait_prepare(const void * lockcnt, int expected, int new) |
1090 | { |
1091 | if (true) { |
1092 | _nocheck__trace_lockcnt_futex_wait_prepare(lockcnt, expected, new); |
1093 | } |
1094 | } |
1095 | |
1096 | #define TRACE_LOCKCNT_FUTEX_WAIT_BACKEND_DSTATE() ( \ |
1097 | trace_event_get_state_dynamic_by_id(TRACE_LOCKCNT_FUTEX_WAIT) || \ |
1098 | false) |
1099 | |
1100 | static inline void _nocheck__trace_lockcnt_futex_wait(const void * lockcnt, int val) |
1101 | { |
1102 | if (trace_event_get_state(TRACE_LOCKCNT_FUTEX_WAIT) && qemu_loglevel_mask(LOG_TRACE)) { |
1103 | struct timeval _now; |
1104 | gettimeofday(&_now, NULL); |
1105 | qemu_log("%d@%zu.%06zu:lockcnt_futex_wait " "lockcnt %p waiting on %d" "\n" , |
1106 | qemu_get_thread_id(), |
1107 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1108 | , lockcnt, val); |
1109 | } |
1110 | } |
1111 | |
1112 | static inline void trace_lockcnt_futex_wait(const void * lockcnt, int val) |
1113 | { |
1114 | if (true) { |
1115 | _nocheck__trace_lockcnt_futex_wait(lockcnt, val); |
1116 | } |
1117 | } |
1118 | |
1119 | #define TRACE_LOCKCNT_FUTEX_WAIT_RESUME_BACKEND_DSTATE() ( \ |
1120 | trace_event_get_state_dynamic_by_id(TRACE_LOCKCNT_FUTEX_WAIT_RESUME) || \ |
1121 | false) |
1122 | |
1123 | static inline void _nocheck__trace_lockcnt_futex_wait_resume(const void * lockcnt, int new) |
1124 | { |
1125 | if (trace_event_get_state(TRACE_LOCKCNT_FUTEX_WAIT_RESUME) && qemu_loglevel_mask(LOG_TRACE)) { |
1126 | struct timeval _now; |
1127 | gettimeofday(&_now, NULL); |
1128 | qemu_log("%d@%zu.%06zu:lockcnt_futex_wait_resume " "lockcnt %p after wait: %d" "\n" , |
1129 | qemu_get_thread_id(), |
1130 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1131 | , lockcnt, new); |
1132 | } |
1133 | } |
1134 | |
1135 | static inline void trace_lockcnt_futex_wait_resume(const void * lockcnt, int new) |
1136 | { |
1137 | if (true) { |
1138 | _nocheck__trace_lockcnt_futex_wait_resume(lockcnt, new); |
1139 | } |
1140 | } |
1141 | |
1142 | #define TRACE_LOCKCNT_FUTEX_WAKE_BACKEND_DSTATE() ( \ |
1143 | trace_event_get_state_dynamic_by_id(TRACE_LOCKCNT_FUTEX_WAKE) || \ |
1144 | false) |
1145 | |
1146 | static inline void _nocheck__trace_lockcnt_futex_wake(const void * lockcnt) |
1147 | { |
1148 | if (trace_event_get_state(TRACE_LOCKCNT_FUTEX_WAKE) && qemu_loglevel_mask(LOG_TRACE)) { |
1149 | struct timeval _now; |
1150 | gettimeofday(&_now, NULL); |
1151 | qemu_log("%d@%zu.%06zu:lockcnt_futex_wake " "lockcnt %p waking up one waiter" "\n" , |
1152 | qemu_get_thread_id(), |
1153 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1154 | , lockcnt); |
1155 | } |
1156 | } |
1157 | |
1158 | static inline void trace_lockcnt_futex_wake(const void * lockcnt) |
1159 | { |
1160 | if (true) { |
1161 | _nocheck__trace_lockcnt_futex_wake(lockcnt); |
1162 | } |
1163 | } |
1164 | |
1165 | #define TRACE_SOCKET_LISTEN_BACKEND_DSTATE() ( \ |
1166 | trace_event_get_state_dynamic_by_id(TRACE_SOCKET_LISTEN) || \ |
1167 | false) |
1168 | |
1169 | static inline void _nocheck__trace_socket_listen(int num) |
1170 | { |
1171 | if (trace_event_get_state(TRACE_SOCKET_LISTEN) && qemu_loglevel_mask(LOG_TRACE)) { |
1172 | struct timeval _now; |
1173 | gettimeofday(&_now, NULL); |
1174 | qemu_log("%d@%zu.%06zu:socket_listen " "backlog: %d" "\n" , |
1175 | qemu_get_thread_id(), |
1176 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1177 | , num); |
1178 | } |
1179 | } |
1180 | |
1181 | static inline void trace_socket_listen(int num) |
1182 | { |
1183 | if (true) { |
1184 | _nocheck__trace_socket_listen(num); |
1185 | } |
1186 | } |
1187 | |
1188 | #define TRACE_QEMU_MUTEX_LOCK_BACKEND_DSTATE() ( \ |
1189 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_MUTEX_LOCK) || \ |
1190 | false) |
1191 | |
1192 | static inline void _nocheck__trace_qemu_mutex_lock(void * mutex, const char * file, const int line) |
1193 | { |
1194 | if (trace_event_get_state(TRACE_QEMU_MUTEX_LOCK) && qemu_loglevel_mask(LOG_TRACE)) { |
1195 | struct timeval _now; |
1196 | gettimeofday(&_now, NULL); |
1197 | qemu_log("%d@%zu.%06zu:qemu_mutex_lock " "waiting on mutex %p (%s:%d)" "\n" , |
1198 | qemu_get_thread_id(), |
1199 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1200 | , mutex, file, line); |
1201 | } |
1202 | } |
1203 | |
1204 | static inline void trace_qemu_mutex_lock(void * mutex, const char * file, const int line) |
1205 | { |
1206 | if (true) { |
1207 | _nocheck__trace_qemu_mutex_lock(mutex, file, line); |
1208 | } |
1209 | } |
1210 | |
1211 | #define TRACE_QEMU_MUTEX_LOCKED_BACKEND_DSTATE() ( \ |
1212 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_MUTEX_LOCKED) || \ |
1213 | false) |
1214 | |
1215 | static inline void _nocheck__trace_qemu_mutex_locked(void * mutex, const char * file, const int line) |
1216 | { |
1217 | if (trace_event_get_state(TRACE_QEMU_MUTEX_LOCKED) && qemu_loglevel_mask(LOG_TRACE)) { |
1218 | struct timeval _now; |
1219 | gettimeofday(&_now, NULL); |
1220 | qemu_log("%d@%zu.%06zu:qemu_mutex_locked " "taken mutex %p (%s:%d)" "\n" , |
1221 | qemu_get_thread_id(), |
1222 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1223 | , mutex, file, line); |
1224 | } |
1225 | } |
1226 | |
1227 | static inline void trace_qemu_mutex_locked(void * mutex, const char * file, const int line) |
1228 | { |
1229 | if (true) { |
1230 | _nocheck__trace_qemu_mutex_locked(mutex, file, line); |
1231 | } |
1232 | } |
1233 | |
1234 | #define TRACE_QEMU_MUTEX_UNLOCK_BACKEND_DSTATE() ( \ |
1235 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_MUTEX_UNLOCK) || \ |
1236 | false) |
1237 | |
1238 | static inline void _nocheck__trace_qemu_mutex_unlock(void * mutex, const char * file, const int line) |
1239 | { |
1240 | if (trace_event_get_state(TRACE_QEMU_MUTEX_UNLOCK) && qemu_loglevel_mask(LOG_TRACE)) { |
1241 | struct timeval _now; |
1242 | gettimeofday(&_now, NULL); |
1243 | qemu_log("%d@%zu.%06zu:qemu_mutex_unlock " "released mutex %p (%s:%d)" "\n" , |
1244 | qemu_get_thread_id(), |
1245 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1246 | , mutex, file, line); |
1247 | } |
1248 | } |
1249 | |
1250 | static inline void trace_qemu_mutex_unlock(void * mutex, const char * file, const int line) |
1251 | { |
1252 | if (true) { |
1253 | _nocheck__trace_qemu_mutex_unlock(mutex, file, line); |
1254 | } |
1255 | } |
1256 | |
1257 | #define TRACE_QEMU_VFIO_DMA_RESET_TEMPORARY_BACKEND_DSTATE() ( \ |
1258 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_VFIO_DMA_RESET_TEMPORARY) || \ |
1259 | false) |
1260 | |
1261 | static inline void _nocheck__trace_qemu_vfio_dma_reset_temporary(void * s) |
1262 | { |
1263 | if (trace_event_get_state(TRACE_QEMU_VFIO_DMA_RESET_TEMPORARY) && qemu_loglevel_mask(LOG_TRACE)) { |
1264 | struct timeval _now; |
1265 | gettimeofday(&_now, NULL); |
1266 | qemu_log("%d@%zu.%06zu:qemu_vfio_dma_reset_temporary " "s %p" "\n" , |
1267 | qemu_get_thread_id(), |
1268 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1269 | , s); |
1270 | } |
1271 | } |
1272 | |
1273 | static inline void trace_qemu_vfio_dma_reset_temporary(void * s) |
1274 | { |
1275 | if (true) { |
1276 | _nocheck__trace_qemu_vfio_dma_reset_temporary(s); |
1277 | } |
1278 | } |
1279 | |
1280 | #define TRACE_QEMU_VFIO_RAM_BLOCK_ADDED_BACKEND_DSTATE() ( \ |
1281 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_VFIO_RAM_BLOCK_ADDED) || \ |
1282 | false) |
1283 | |
1284 | static inline void _nocheck__trace_qemu_vfio_ram_block_added(void * s, void * p, size_t size) |
1285 | { |
1286 | if (trace_event_get_state(TRACE_QEMU_VFIO_RAM_BLOCK_ADDED) && qemu_loglevel_mask(LOG_TRACE)) { |
1287 | struct timeval _now; |
1288 | gettimeofday(&_now, NULL); |
1289 | qemu_log("%d@%zu.%06zu:qemu_vfio_ram_block_added " "s %p host %p size 0x%zx" "\n" , |
1290 | qemu_get_thread_id(), |
1291 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1292 | , s, p, size); |
1293 | } |
1294 | } |
1295 | |
1296 | static inline void trace_qemu_vfio_ram_block_added(void * s, void * p, size_t size) |
1297 | { |
1298 | if (true) { |
1299 | _nocheck__trace_qemu_vfio_ram_block_added(s, p, size); |
1300 | } |
1301 | } |
1302 | |
1303 | #define TRACE_QEMU_VFIO_RAM_BLOCK_REMOVED_BACKEND_DSTATE() ( \ |
1304 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_VFIO_RAM_BLOCK_REMOVED) || \ |
1305 | false) |
1306 | |
1307 | static inline void _nocheck__trace_qemu_vfio_ram_block_removed(void * s, void * p, size_t size) |
1308 | { |
1309 | if (trace_event_get_state(TRACE_QEMU_VFIO_RAM_BLOCK_REMOVED) && qemu_loglevel_mask(LOG_TRACE)) { |
1310 | struct timeval _now; |
1311 | gettimeofday(&_now, NULL); |
1312 | qemu_log("%d@%zu.%06zu:qemu_vfio_ram_block_removed " "s %p host %p size 0x%zx" "\n" , |
1313 | qemu_get_thread_id(), |
1314 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1315 | , s, p, size); |
1316 | } |
1317 | } |
1318 | |
1319 | static inline void trace_qemu_vfio_ram_block_removed(void * s, void * p, size_t size) |
1320 | { |
1321 | if (true) { |
1322 | _nocheck__trace_qemu_vfio_ram_block_removed(s, p, size); |
1323 | } |
1324 | } |
1325 | |
1326 | #define TRACE_QEMU_VFIO_FIND_MAPPING_BACKEND_DSTATE() ( \ |
1327 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_VFIO_FIND_MAPPING) || \ |
1328 | false) |
1329 | |
1330 | static inline void _nocheck__trace_qemu_vfio_find_mapping(void * s, void * p) |
1331 | { |
1332 | if (trace_event_get_state(TRACE_QEMU_VFIO_FIND_MAPPING) && qemu_loglevel_mask(LOG_TRACE)) { |
1333 | struct timeval _now; |
1334 | gettimeofday(&_now, NULL); |
1335 | qemu_log("%d@%zu.%06zu:qemu_vfio_find_mapping " "s %p host %p" "\n" , |
1336 | qemu_get_thread_id(), |
1337 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1338 | , s, p); |
1339 | } |
1340 | } |
1341 | |
1342 | static inline void trace_qemu_vfio_find_mapping(void * s, void * p) |
1343 | { |
1344 | if (true) { |
1345 | _nocheck__trace_qemu_vfio_find_mapping(s, p); |
1346 | } |
1347 | } |
1348 | |
1349 | #define TRACE_QEMU_VFIO_NEW_MAPPING_BACKEND_DSTATE() ( \ |
1350 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_VFIO_NEW_MAPPING) || \ |
1351 | false) |
1352 | |
1353 | static inline void _nocheck__trace_qemu_vfio_new_mapping(void * s, void * host, size_t size, int index, uint64_t iova) |
1354 | { |
1355 | if (trace_event_get_state(TRACE_QEMU_VFIO_NEW_MAPPING) && qemu_loglevel_mask(LOG_TRACE)) { |
1356 | struct timeval _now; |
1357 | gettimeofday(&_now, NULL); |
1358 | qemu_log("%d@%zu.%06zu:qemu_vfio_new_mapping " "s %p host %p size %zu index %d iova 0x%" PRIx64 "\n" , |
1359 | qemu_get_thread_id(), |
1360 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1361 | , s, host, size, index, iova); |
1362 | } |
1363 | } |
1364 | |
1365 | static inline void trace_qemu_vfio_new_mapping(void * s, void * host, size_t size, int index, uint64_t iova) |
1366 | { |
1367 | if (true) { |
1368 | _nocheck__trace_qemu_vfio_new_mapping(s, host, size, index, iova); |
1369 | } |
1370 | } |
1371 | |
1372 | #define TRACE_QEMU_VFIO_DO_MAPPING_BACKEND_DSTATE() ( \ |
1373 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_VFIO_DO_MAPPING) || \ |
1374 | false) |
1375 | |
1376 | static inline void _nocheck__trace_qemu_vfio_do_mapping(void * s, void * host, size_t size, uint64_t iova) |
1377 | { |
1378 | if (trace_event_get_state(TRACE_QEMU_VFIO_DO_MAPPING) && qemu_loglevel_mask(LOG_TRACE)) { |
1379 | struct timeval _now; |
1380 | gettimeofday(&_now, NULL); |
1381 | qemu_log("%d@%zu.%06zu:qemu_vfio_do_mapping " "s %p host %p size %zu iova 0x%" PRIx64 "\n" , |
1382 | qemu_get_thread_id(), |
1383 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1384 | , s, host, size, iova); |
1385 | } |
1386 | } |
1387 | |
1388 | static inline void trace_qemu_vfio_do_mapping(void * s, void * host, size_t size, uint64_t iova) |
1389 | { |
1390 | if (true) { |
1391 | _nocheck__trace_qemu_vfio_do_mapping(s, host, size, iova); |
1392 | } |
1393 | } |
1394 | |
1395 | #define TRACE_QEMU_VFIO_DMA_MAP_BACKEND_DSTATE() ( \ |
1396 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_VFIO_DMA_MAP) || \ |
1397 | false) |
1398 | |
1399 | static inline void _nocheck__trace_qemu_vfio_dma_map(void * s, void * host, size_t size, bool temporary, uint64_t * iova) |
1400 | { |
1401 | if (trace_event_get_state(TRACE_QEMU_VFIO_DMA_MAP) && qemu_loglevel_mask(LOG_TRACE)) { |
1402 | struct timeval _now; |
1403 | gettimeofday(&_now, NULL); |
1404 | qemu_log("%d@%zu.%06zu:qemu_vfio_dma_map " "s %p host %p size %zu temporary %d iova %p" "\n" , |
1405 | qemu_get_thread_id(), |
1406 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1407 | , s, host, size, temporary, iova); |
1408 | } |
1409 | } |
1410 | |
1411 | static inline void trace_qemu_vfio_dma_map(void * s, void * host, size_t size, bool temporary, uint64_t * iova) |
1412 | { |
1413 | if (true) { |
1414 | _nocheck__trace_qemu_vfio_dma_map(s, host, size, temporary, iova); |
1415 | } |
1416 | } |
1417 | |
1418 | #define TRACE_QEMU_VFIO_DMA_UNMAP_BACKEND_DSTATE() ( \ |
1419 | trace_event_get_state_dynamic_by_id(TRACE_QEMU_VFIO_DMA_UNMAP) || \ |
1420 | false) |
1421 | |
1422 | static inline void _nocheck__trace_qemu_vfio_dma_unmap(void * s, void * host) |
1423 | { |
1424 | if (trace_event_get_state(TRACE_QEMU_VFIO_DMA_UNMAP) && qemu_loglevel_mask(LOG_TRACE)) { |
1425 | struct timeval _now; |
1426 | gettimeofday(&_now, NULL); |
1427 | qemu_log("%d@%zu.%06zu:qemu_vfio_dma_unmap " "s %p host %p" "\n" , |
1428 | qemu_get_thread_id(), |
1429 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1430 | , s, host); |
1431 | } |
1432 | } |
1433 | |
1434 | static inline void trace_qemu_vfio_dma_unmap(void * s, void * host) |
1435 | { |
1436 | if (true) { |
1437 | _nocheck__trace_qemu_vfio_dma_unmap(s, host); |
1438 | } |
1439 | } |
1440 | #endif /* TRACE_UTIL_GENERATED_TRACERS_H */ |
1441 | |