1 | /* This file is autogenerated by tracetool, do not edit. */ |
2 | |
3 | #ifndef TRACE_BLOCK_GENERATED_TRACERS_H |
4 | #define TRACE_BLOCK_GENERATED_TRACERS_H |
5 | |
6 | #include "trace/control.h" |
7 | |
8 | extern TraceEvent _TRACE_BDRV_OPEN_COMMON_EVENT; |
9 | extern TraceEvent _TRACE_BDRV_LOCK_MEDIUM_EVENT; |
10 | extern TraceEvent _TRACE_BLK_CO_PREADV_EVENT; |
11 | extern TraceEvent _TRACE_BLK_CO_PWRITEV_EVENT; |
12 | extern TraceEvent _TRACE_BLK_ROOT_ATTACH_EVENT; |
13 | extern TraceEvent _TRACE_BLK_ROOT_DETACH_EVENT; |
14 | extern TraceEvent _TRACE_BDRV_CO_PREADV_EVENT; |
15 | extern TraceEvent _TRACE_BDRV_CO_PWRITEV_EVENT; |
16 | extern TraceEvent _TRACE_BDRV_CO_PWRITE_ZEROES_EVENT; |
17 | extern TraceEvent _TRACE_BDRV_CO_DO_COPY_ON_READV_EVENT; |
18 | extern TraceEvent _TRACE_BDRV_CO_COPY_RANGE_FROM_EVENT; |
19 | extern TraceEvent _TRACE_BDRV_CO_COPY_RANGE_TO_EVENT; |
20 | extern TraceEvent _TRACE_STREAM_ONE_ITERATION_EVENT; |
21 | extern TraceEvent _TRACE_STREAM_START_EVENT; |
22 | extern TraceEvent _TRACE_COMMIT_ONE_ITERATION_EVENT; |
23 | extern TraceEvent _TRACE_COMMIT_START_EVENT; |
24 | extern TraceEvent _TRACE_MIRROR_START_EVENT; |
25 | extern TraceEvent _TRACE_MIRROR_RESTART_ITER_EVENT; |
26 | extern TraceEvent _TRACE_MIRROR_BEFORE_FLUSH_EVENT; |
27 | extern TraceEvent _TRACE_MIRROR_BEFORE_DRAIN_EVENT; |
28 | extern TraceEvent _TRACE_MIRROR_BEFORE_SLEEP_EVENT; |
29 | extern TraceEvent _TRACE_MIRROR_ONE_ITERATION_EVENT; |
30 | extern TraceEvent _TRACE_MIRROR_ITERATION_DONE_EVENT; |
31 | extern TraceEvent _TRACE_MIRROR_YIELD_EVENT; |
32 | extern TraceEvent _TRACE_MIRROR_YIELD_IN_FLIGHT_EVENT; |
33 | extern TraceEvent _TRACE_BACKUP_DO_COW_ENTER_EVENT; |
34 | extern TraceEvent _TRACE_BACKUP_DO_COW_RETURN_EVENT; |
35 | extern TraceEvent _TRACE_BACKUP_DO_COW_SKIP_EVENT; |
36 | extern TraceEvent _TRACE_BACKUP_DO_COW_SKIP_RANGE_EVENT; |
37 | extern TraceEvent _TRACE_BACKUP_DO_COW_PROCESS_EVENT; |
38 | extern TraceEvent _TRACE_BACKUP_DO_COW_READ_FAIL_EVENT; |
39 | extern TraceEvent _TRACE_BACKUP_DO_COW_WRITE_FAIL_EVENT; |
40 | extern TraceEvent _TRACE_BACKUP_DO_COW_COPY_RANGE_FAIL_EVENT; |
41 | extern TraceEvent _TRACE_QMP_BLOCK_JOB_CANCEL_EVENT; |
42 | extern TraceEvent _TRACE_QMP_BLOCK_JOB_PAUSE_EVENT; |
43 | extern TraceEvent _TRACE_QMP_BLOCK_JOB_RESUME_EVENT; |
44 | extern TraceEvent _TRACE_QMP_BLOCK_JOB_COMPLETE_EVENT; |
45 | extern TraceEvent _TRACE_QMP_BLOCK_JOB_FINALIZE_EVENT; |
46 | extern TraceEvent _TRACE_QMP_BLOCK_JOB_DISMISS_EVENT; |
47 | extern TraceEvent _TRACE_QMP_BLOCK_STREAM_EVENT; |
48 | extern TraceEvent _TRACE_FILE_PAIO_SUBMIT_EVENT; |
49 | extern TraceEvent _TRACE_FILE_COPY_FILE_RANGE_EVENT; |
50 | extern TraceEvent _TRACE_QCOW2_WRITEV_START_REQ_EVENT; |
51 | extern TraceEvent _TRACE_QCOW2_WRITEV_DONE_REQ_EVENT; |
52 | extern TraceEvent _TRACE_QCOW2_WRITEV_START_PART_EVENT; |
53 | extern TraceEvent _TRACE_QCOW2_WRITEV_DONE_PART_EVENT; |
54 | extern TraceEvent _TRACE_QCOW2_WRITEV_DATA_EVENT; |
55 | extern TraceEvent _TRACE_QCOW2_PWRITE_ZEROES_START_REQ_EVENT; |
56 | extern TraceEvent _TRACE_QCOW2_PWRITE_ZEROES_EVENT; |
57 | extern TraceEvent _TRACE_QCOW2_SKIP_COW_EVENT; |
58 | extern TraceEvent _TRACE_QCOW2_ALLOC_CLUSTERS_OFFSET_EVENT; |
59 | extern TraceEvent _TRACE_QCOW2_HANDLE_COPIED_EVENT; |
60 | extern TraceEvent _TRACE_QCOW2_HANDLE_ALLOC_EVENT; |
61 | extern TraceEvent _TRACE_QCOW2_DO_ALLOC_CLUSTERS_OFFSET_EVENT; |
62 | extern TraceEvent _TRACE_QCOW2_CLUSTER_ALLOC_PHYS_EVENT; |
63 | extern TraceEvent _TRACE_QCOW2_CLUSTER_LINK_L2_EVENT; |
64 | extern TraceEvent _TRACE_QCOW2_L2_ALLOCATE_EVENT; |
65 | extern TraceEvent _TRACE_QCOW2_L2_ALLOCATE_GET_EMPTY_EVENT; |
66 | extern TraceEvent _TRACE_QCOW2_L2_ALLOCATE_WRITE_L2_EVENT; |
67 | extern TraceEvent _TRACE_QCOW2_L2_ALLOCATE_WRITE_L1_EVENT; |
68 | extern TraceEvent _TRACE_QCOW2_L2_ALLOCATE_DONE_EVENT; |
69 | extern TraceEvent _TRACE_QCOW2_CACHE_GET_EVENT; |
70 | extern TraceEvent _TRACE_QCOW2_CACHE_GET_REPLACE_ENTRY_EVENT; |
71 | extern TraceEvent _TRACE_QCOW2_CACHE_GET_READ_EVENT; |
72 | extern TraceEvent _TRACE_QCOW2_CACHE_GET_DONE_EVENT; |
73 | extern TraceEvent _TRACE_QCOW2_CACHE_FLUSH_EVENT; |
74 | extern TraceEvent _TRACE_QCOW2_CACHE_ENTRY_FLUSH_EVENT; |
75 | extern TraceEvent _TRACE_QCOW2_PROCESS_DISCARDS_FAILED_REGION_EVENT; |
76 | extern TraceEvent _TRACE_QED_ALLOC_L2_CACHE_ENTRY_EVENT; |
77 | extern TraceEvent _TRACE_QED_UNREF_L2_CACHE_ENTRY_EVENT; |
78 | extern TraceEvent _TRACE_QED_FIND_L2_CACHE_ENTRY_EVENT; |
79 | extern TraceEvent _TRACE_QED_READ_TABLE_EVENT; |
80 | extern TraceEvent _TRACE_QED_READ_TABLE_CB_EVENT; |
81 | extern TraceEvent _TRACE_QED_WRITE_TABLE_EVENT; |
82 | extern TraceEvent _TRACE_QED_WRITE_TABLE_CB_EVENT; |
83 | extern TraceEvent _TRACE_QED_NEED_CHECK_TIMER_CB_EVENT; |
84 | extern TraceEvent _TRACE_QED_START_NEED_CHECK_TIMER_EVENT; |
85 | extern TraceEvent _TRACE_QED_CANCEL_NEED_CHECK_TIMER_EVENT; |
86 | extern TraceEvent _TRACE_QED_AIO_COMPLETE_EVENT; |
87 | extern TraceEvent _TRACE_QED_AIO_SETUP_EVENT; |
88 | extern TraceEvent _TRACE_QED_AIO_NEXT_IO_EVENT; |
89 | extern TraceEvent _TRACE_QED_AIO_READ_DATA_EVENT; |
90 | extern TraceEvent _TRACE_QED_AIO_WRITE_DATA_EVENT; |
91 | extern TraceEvent _TRACE_QED_AIO_WRITE_PREFILL_EVENT; |
92 | extern TraceEvent _TRACE_QED_AIO_WRITE_POSTFILL_EVENT; |
93 | extern TraceEvent _TRACE_QED_AIO_WRITE_MAIN_EVENT; |
94 | extern TraceEvent _TRACE_VXHS_IIO_CALLBACK_EVENT; |
95 | extern TraceEvent _TRACE_VXHS_IIO_CALLBACK_CHNFAIL_EVENT; |
96 | extern TraceEvent _TRACE_VXHS_IIO_CALLBACK_UNKNWN_EVENT; |
97 | extern TraceEvent _TRACE_VXHS_AIO_RW_INVALID_EVENT; |
98 | extern TraceEvent _TRACE_VXHS_AIO_RW_IOERR_EVENT; |
99 | extern TraceEvent _TRACE_VXHS_GET_VDISK_STAT_ERR_EVENT; |
100 | extern TraceEvent _TRACE_VXHS_GET_VDISK_STAT_EVENT; |
101 | extern TraceEvent _TRACE_VXHS_COMPLETE_AIO_EVENT; |
102 | extern TraceEvent _TRACE_VXHS_PARSE_URI_FILENAME_EVENT; |
103 | extern TraceEvent _TRACE_VXHS_OPEN_VDISKID_EVENT; |
104 | extern TraceEvent _TRACE_VXHS_OPEN_HOSTINFO_EVENT; |
105 | extern TraceEvent _TRACE_VXHS_OPEN_IIO_OPEN_EVENT; |
106 | extern TraceEvent _TRACE_VXHS_PARSE_URI_HOSTINFO_EVENT; |
107 | extern TraceEvent _TRACE_VXHS_CLOSE_EVENT; |
108 | extern TraceEvent _TRACE_VXHS_GET_CREDS_EVENT; |
109 | extern TraceEvent _TRACE_NVME_KICK_EVENT; |
110 | extern TraceEvent _TRACE_NVME_DMA_FLUSH_QUEUE_WAIT_EVENT; |
111 | extern TraceEvent _TRACE_NVME_ERROR_EVENT; |
112 | extern TraceEvent _TRACE_NVME_PROCESS_COMPLETION_EVENT; |
113 | extern TraceEvent _TRACE_NVME_PROCESS_COMPLETION_QUEUE_BUSY_EVENT; |
114 | extern TraceEvent _TRACE_NVME_COMPLETE_COMMAND_EVENT; |
115 | extern TraceEvent _TRACE_NVME_SUBMIT_COMMAND_EVENT; |
116 | extern TraceEvent _TRACE_NVME_SUBMIT_COMMAND_RAW_EVENT; |
117 | extern TraceEvent _TRACE_NVME_HANDLE_EVENT_EVENT; |
118 | extern TraceEvent _TRACE_NVME_POLL_CB_EVENT; |
119 | extern TraceEvent _TRACE_NVME_PRW_ALIGNED_EVENT; |
120 | extern TraceEvent _TRACE_NVME_QIOV_UNALIGNED_EVENT; |
121 | extern TraceEvent _TRACE_NVME_PRW_BUFFERED_EVENT; |
122 | extern TraceEvent _TRACE_NVME_RW_DONE_EVENT; |
123 | extern TraceEvent _TRACE_NVME_DMA_MAP_FLUSH_EVENT; |
124 | extern TraceEvent _TRACE_NVME_FREE_REQ_QUEUE_WAIT_EVENT; |
125 | extern TraceEvent _TRACE_NVME_CMD_MAP_QIOV_EVENT; |
126 | extern TraceEvent _TRACE_NVME_CMD_MAP_QIOV_PAGES_EVENT; |
127 | extern TraceEvent _TRACE_NVME_CMD_MAP_QIOV_IOV_EVENT; |
128 | extern TraceEvent _TRACE_ISCSI_XCOPY_EVENT; |
129 | extern TraceEvent _TRACE_NBD_PARSE_BLOCKSTATUS_COMPLIANCE_EVENT; |
130 | extern TraceEvent _TRACE_NBD_STRUCTURED_READ_COMPLIANCE_EVENT; |
131 | extern TraceEvent _TRACE_NBD_READ_REPLY_ENTRY_FAIL_EVENT; |
132 | extern TraceEvent _TRACE_NBD_CO_REQUEST_FAIL_EVENT; |
133 | extern TraceEvent _TRACE_NBD_CLIENT_CONNECT_EVENT; |
134 | extern TraceEvent _TRACE_NBD_CLIENT_CONNECT_SUCCESS_EVENT; |
135 | extern TraceEvent _TRACE_SSH_RESTART_COROUTINE_EVENT; |
136 | extern TraceEvent _TRACE_SSH_FLUSH_EVENT; |
137 | extern TraceEvent _TRACE_SSH_CHECK_HOST_KEY_KNOWNHOSTS_EVENT; |
138 | extern TraceEvent _TRACE_SSH_CONNECT_TO_SSH_EVENT; |
139 | extern TraceEvent _TRACE_SSH_CO_YIELD_EVENT; |
140 | extern TraceEvent _TRACE_SSH_CO_YIELD_BACK_EVENT; |
141 | extern TraceEvent _TRACE_SSH_GETLENGTH_EVENT; |
142 | extern TraceEvent _TRACE_SSH_CO_CREATE_OPTS_EVENT; |
143 | extern TraceEvent _TRACE_SSH_READ_EVENT; |
144 | extern TraceEvent _TRACE_SSH_READ_BUF_EVENT; |
145 | extern TraceEvent _TRACE_SSH_READ_RETURN_EVENT; |
146 | extern TraceEvent _TRACE_SSH_WRITE_EVENT; |
147 | extern TraceEvent _TRACE_SSH_WRITE_BUF_EVENT; |
148 | extern TraceEvent _TRACE_SSH_WRITE_RETURN_EVENT; |
149 | extern TraceEvent _TRACE_SSH_SEEK_EVENT; |
150 | extern TraceEvent _TRACE_SSH_AUTH_METHODS_EVENT; |
151 | extern TraceEvent _TRACE_SSH_SERVER_STATUS_EVENT; |
152 | extern TraceEvent _TRACE_CURL_TIMER_CB_EVENT; |
153 | extern TraceEvent _TRACE_CURL_SOCK_CB_EVENT; |
154 | extern TraceEvent _TRACE_CURL_READ_CB_EVENT; |
155 | extern TraceEvent _TRACE_CURL_OPEN_EVENT; |
156 | extern TraceEvent _TRACE_CURL_OPEN_SIZE_EVENT; |
157 | extern TraceEvent _TRACE_CURL_SETUP_PREADV_EVENT; |
158 | extern TraceEvent _TRACE_CURL_CLOSE_EVENT; |
159 | extern TraceEvent _TRACE_FILE_XFS_WRITE_ZEROES_EVENT; |
160 | extern TraceEvent _TRACE_FILE_XFS_DISCARD_EVENT; |
161 | extern TraceEvent _TRACE_FILE_FINDEJECTABLEOPTICALMEDIA_EVENT; |
162 | extern TraceEvent _TRACE_FILE_SETUP_CDROM_EVENT; |
163 | extern TraceEvent _TRACE_FILE_HDEV_IS_SG_EVENT; |
164 | extern TraceEvent _TRACE_SHEEPDOG_RECONNECT_TO_SDOG_EVENT; |
165 | extern TraceEvent _TRACE_SHEEPDOG_AIO_READ_RESPONSE_EVENT; |
166 | extern TraceEvent _TRACE_SHEEPDOG_OPEN_EVENT; |
167 | extern TraceEvent _TRACE_SHEEPDOG_CLOSE_EVENT; |
168 | extern TraceEvent _TRACE_SHEEPDOG_CREATE_BRANCH_SNAPSHOT_EVENT; |
169 | extern TraceEvent _TRACE_SHEEPDOG_CREATE_BRANCH_CREATED_EVENT; |
170 | extern TraceEvent _TRACE_SHEEPDOG_CREATE_BRANCH_NEW_EVENT; |
171 | extern TraceEvent _TRACE_SHEEPDOG_CO_RW_VECTOR_UPDATE_EVENT; |
172 | extern TraceEvent _TRACE_SHEEPDOG_CO_RW_VECTOR_NEW_EVENT; |
173 | extern TraceEvent _TRACE_SHEEPDOG_SNAPSHOT_CREATE_INFO_EVENT; |
174 | extern TraceEvent _TRACE_SHEEPDOG_SNAPSHOT_CREATE_EVENT; |
175 | extern TraceEvent _TRACE_SHEEPDOG_SNAPSHOT_CREATE_INODE_EVENT; |
176 | extern TraceEvent _TRACE_SFTP_ERROR_EVENT; |
177 | extern uint16_t _TRACE_BDRV_OPEN_COMMON_DSTATE; |
178 | extern uint16_t _TRACE_BDRV_LOCK_MEDIUM_DSTATE; |
179 | extern uint16_t _TRACE_BLK_CO_PREADV_DSTATE; |
180 | extern uint16_t _TRACE_BLK_CO_PWRITEV_DSTATE; |
181 | extern uint16_t _TRACE_BLK_ROOT_ATTACH_DSTATE; |
182 | extern uint16_t _TRACE_BLK_ROOT_DETACH_DSTATE; |
183 | extern uint16_t _TRACE_BDRV_CO_PREADV_DSTATE; |
184 | extern uint16_t _TRACE_BDRV_CO_PWRITEV_DSTATE; |
185 | extern uint16_t _TRACE_BDRV_CO_PWRITE_ZEROES_DSTATE; |
186 | extern uint16_t _TRACE_BDRV_CO_DO_COPY_ON_READV_DSTATE; |
187 | extern uint16_t _TRACE_BDRV_CO_COPY_RANGE_FROM_DSTATE; |
188 | extern uint16_t _TRACE_BDRV_CO_COPY_RANGE_TO_DSTATE; |
189 | extern uint16_t _TRACE_STREAM_ONE_ITERATION_DSTATE; |
190 | extern uint16_t _TRACE_STREAM_START_DSTATE; |
191 | extern uint16_t _TRACE_COMMIT_ONE_ITERATION_DSTATE; |
192 | extern uint16_t _TRACE_COMMIT_START_DSTATE; |
193 | extern uint16_t _TRACE_MIRROR_START_DSTATE; |
194 | extern uint16_t _TRACE_MIRROR_RESTART_ITER_DSTATE; |
195 | extern uint16_t _TRACE_MIRROR_BEFORE_FLUSH_DSTATE; |
196 | extern uint16_t _TRACE_MIRROR_BEFORE_DRAIN_DSTATE; |
197 | extern uint16_t _TRACE_MIRROR_BEFORE_SLEEP_DSTATE; |
198 | extern uint16_t _TRACE_MIRROR_ONE_ITERATION_DSTATE; |
199 | extern uint16_t _TRACE_MIRROR_ITERATION_DONE_DSTATE; |
200 | extern uint16_t _TRACE_MIRROR_YIELD_DSTATE; |
201 | extern uint16_t _TRACE_MIRROR_YIELD_IN_FLIGHT_DSTATE; |
202 | extern uint16_t _TRACE_BACKUP_DO_COW_ENTER_DSTATE; |
203 | extern uint16_t _TRACE_BACKUP_DO_COW_RETURN_DSTATE; |
204 | extern uint16_t _TRACE_BACKUP_DO_COW_SKIP_DSTATE; |
205 | extern uint16_t _TRACE_BACKUP_DO_COW_SKIP_RANGE_DSTATE; |
206 | extern uint16_t _TRACE_BACKUP_DO_COW_PROCESS_DSTATE; |
207 | extern uint16_t _TRACE_BACKUP_DO_COW_READ_FAIL_DSTATE; |
208 | extern uint16_t _TRACE_BACKUP_DO_COW_WRITE_FAIL_DSTATE; |
209 | extern uint16_t _TRACE_BACKUP_DO_COW_COPY_RANGE_FAIL_DSTATE; |
210 | extern uint16_t _TRACE_QMP_BLOCK_JOB_CANCEL_DSTATE; |
211 | extern uint16_t _TRACE_QMP_BLOCK_JOB_PAUSE_DSTATE; |
212 | extern uint16_t _TRACE_QMP_BLOCK_JOB_RESUME_DSTATE; |
213 | extern uint16_t _TRACE_QMP_BLOCK_JOB_COMPLETE_DSTATE; |
214 | extern uint16_t _TRACE_QMP_BLOCK_JOB_FINALIZE_DSTATE; |
215 | extern uint16_t _TRACE_QMP_BLOCK_JOB_DISMISS_DSTATE; |
216 | extern uint16_t _TRACE_QMP_BLOCK_STREAM_DSTATE; |
217 | extern uint16_t _TRACE_FILE_PAIO_SUBMIT_DSTATE; |
218 | extern uint16_t _TRACE_FILE_COPY_FILE_RANGE_DSTATE; |
219 | extern uint16_t _TRACE_QCOW2_WRITEV_START_REQ_DSTATE; |
220 | extern uint16_t _TRACE_QCOW2_WRITEV_DONE_REQ_DSTATE; |
221 | extern uint16_t _TRACE_QCOW2_WRITEV_START_PART_DSTATE; |
222 | extern uint16_t _TRACE_QCOW2_WRITEV_DONE_PART_DSTATE; |
223 | extern uint16_t _TRACE_QCOW2_WRITEV_DATA_DSTATE; |
224 | extern uint16_t _TRACE_QCOW2_PWRITE_ZEROES_START_REQ_DSTATE; |
225 | extern uint16_t _TRACE_QCOW2_PWRITE_ZEROES_DSTATE; |
226 | extern uint16_t _TRACE_QCOW2_SKIP_COW_DSTATE; |
227 | extern uint16_t _TRACE_QCOW2_ALLOC_CLUSTERS_OFFSET_DSTATE; |
228 | extern uint16_t _TRACE_QCOW2_HANDLE_COPIED_DSTATE; |
229 | extern uint16_t _TRACE_QCOW2_HANDLE_ALLOC_DSTATE; |
230 | extern uint16_t _TRACE_QCOW2_DO_ALLOC_CLUSTERS_OFFSET_DSTATE; |
231 | extern uint16_t _TRACE_QCOW2_CLUSTER_ALLOC_PHYS_DSTATE; |
232 | extern uint16_t _TRACE_QCOW2_CLUSTER_LINK_L2_DSTATE; |
233 | extern uint16_t _TRACE_QCOW2_L2_ALLOCATE_DSTATE; |
234 | extern uint16_t _TRACE_QCOW2_L2_ALLOCATE_GET_EMPTY_DSTATE; |
235 | extern uint16_t _TRACE_QCOW2_L2_ALLOCATE_WRITE_L2_DSTATE; |
236 | extern uint16_t _TRACE_QCOW2_L2_ALLOCATE_WRITE_L1_DSTATE; |
237 | extern uint16_t _TRACE_QCOW2_L2_ALLOCATE_DONE_DSTATE; |
238 | extern uint16_t _TRACE_QCOW2_CACHE_GET_DSTATE; |
239 | extern uint16_t _TRACE_QCOW2_CACHE_GET_REPLACE_ENTRY_DSTATE; |
240 | extern uint16_t _TRACE_QCOW2_CACHE_GET_READ_DSTATE; |
241 | extern uint16_t _TRACE_QCOW2_CACHE_GET_DONE_DSTATE; |
242 | extern uint16_t _TRACE_QCOW2_CACHE_FLUSH_DSTATE; |
243 | extern uint16_t _TRACE_QCOW2_CACHE_ENTRY_FLUSH_DSTATE; |
244 | extern uint16_t _TRACE_QCOW2_PROCESS_DISCARDS_FAILED_REGION_DSTATE; |
245 | extern uint16_t _TRACE_QED_ALLOC_L2_CACHE_ENTRY_DSTATE; |
246 | extern uint16_t _TRACE_QED_UNREF_L2_CACHE_ENTRY_DSTATE; |
247 | extern uint16_t _TRACE_QED_FIND_L2_CACHE_ENTRY_DSTATE; |
248 | extern uint16_t _TRACE_QED_READ_TABLE_DSTATE; |
249 | extern uint16_t _TRACE_QED_READ_TABLE_CB_DSTATE; |
250 | extern uint16_t _TRACE_QED_WRITE_TABLE_DSTATE; |
251 | extern uint16_t _TRACE_QED_WRITE_TABLE_CB_DSTATE; |
252 | extern uint16_t _TRACE_QED_NEED_CHECK_TIMER_CB_DSTATE; |
253 | extern uint16_t _TRACE_QED_START_NEED_CHECK_TIMER_DSTATE; |
254 | extern uint16_t _TRACE_QED_CANCEL_NEED_CHECK_TIMER_DSTATE; |
255 | extern uint16_t _TRACE_QED_AIO_COMPLETE_DSTATE; |
256 | extern uint16_t _TRACE_QED_AIO_SETUP_DSTATE; |
257 | extern uint16_t _TRACE_QED_AIO_NEXT_IO_DSTATE; |
258 | extern uint16_t _TRACE_QED_AIO_READ_DATA_DSTATE; |
259 | extern uint16_t _TRACE_QED_AIO_WRITE_DATA_DSTATE; |
260 | extern uint16_t _TRACE_QED_AIO_WRITE_PREFILL_DSTATE; |
261 | extern uint16_t _TRACE_QED_AIO_WRITE_POSTFILL_DSTATE; |
262 | extern uint16_t _TRACE_QED_AIO_WRITE_MAIN_DSTATE; |
263 | extern uint16_t _TRACE_VXHS_IIO_CALLBACK_DSTATE; |
264 | extern uint16_t _TRACE_VXHS_IIO_CALLBACK_CHNFAIL_DSTATE; |
265 | extern uint16_t _TRACE_VXHS_IIO_CALLBACK_UNKNWN_DSTATE; |
266 | extern uint16_t _TRACE_VXHS_AIO_RW_INVALID_DSTATE; |
267 | extern uint16_t _TRACE_VXHS_AIO_RW_IOERR_DSTATE; |
268 | extern uint16_t _TRACE_VXHS_GET_VDISK_STAT_ERR_DSTATE; |
269 | extern uint16_t _TRACE_VXHS_GET_VDISK_STAT_DSTATE; |
270 | extern uint16_t _TRACE_VXHS_COMPLETE_AIO_DSTATE; |
271 | extern uint16_t _TRACE_VXHS_PARSE_URI_FILENAME_DSTATE; |
272 | extern uint16_t _TRACE_VXHS_OPEN_VDISKID_DSTATE; |
273 | extern uint16_t _TRACE_VXHS_OPEN_HOSTINFO_DSTATE; |
274 | extern uint16_t _TRACE_VXHS_OPEN_IIO_OPEN_DSTATE; |
275 | extern uint16_t _TRACE_VXHS_PARSE_URI_HOSTINFO_DSTATE; |
276 | extern uint16_t _TRACE_VXHS_CLOSE_DSTATE; |
277 | extern uint16_t _TRACE_VXHS_GET_CREDS_DSTATE; |
278 | extern uint16_t _TRACE_NVME_KICK_DSTATE; |
279 | extern uint16_t _TRACE_NVME_DMA_FLUSH_QUEUE_WAIT_DSTATE; |
280 | extern uint16_t _TRACE_NVME_ERROR_DSTATE; |
281 | extern uint16_t _TRACE_NVME_PROCESS_COMPLETION_DSTATE; |
282 | extern uint16_t _TRACE_NVME_PROCESS_COMPLETION_QUEUE_BUSY_DSTATE; |
283 | extern uint16_t _TRACE_NVME_COMPLETE_COMMAND_DSTATE; |
284 | extern uint16_t _TRACE_NVME_SUBMIT_COMMAND_DSTATE; |
285 | extern uint16_t _TRACE_NVME_SUBMIT_COMMAND_RAW_DSTATE; |
286 | extern uint16_t _TRACE_NVME_HANDLE_EVENT_DSTATE; |
287 | extern uint16_t _TRACE_NVME_POLL_CB_DSTATE; |
288 | extern uint16_t _TRACE_NVME_PRW_ALIGNED_DSTATE; |
289 | extern uint16_t _TRACE_NVME_QIOV_UNALIGNED_DSTATE; |
290 | extern uint16_t _TRACE_NVME_PRW_BUFFERED_DSTATE; |
291 | extern uint16_t _TRACE_NVME_RW_DONE_DSTATE; |
292 | extern uint16_t _TRACE_NVME_DMA_MAP_FLUSH_DSTATE; |
293 | extern uint16_t _TRACE_NVME_FREE_REQ_QUEUE_WAIT_DSTATE; |
294 | extern uint16_t _TRACE_NVME_CMD_MAP_QIOV_DSTATE; |
295 | extern uint16_t _TRACE_NVME_CMD_MAP_QIOV_PAGES_DSTATE; |
296 | extern uint16_t _TRACE_NVME_CMD_MAP_QIOV_IOV_DSTATE; |
297 | extern uint16_t _TRACE_ISCSI_XCOPY_DSTATE; |
298 | extern uint16_t _TRACE_NBD_PARSE_BLOCKSTATUS_COMPLIANCE_DSTATE; |
299 | extern uint16_t _TRACE_NBD_STRUCTURED_READ_COMPLIANCE_DSTATE; |
300 | extern uint16_t _TRACE_NBD_READ_REPLY_ENTRY_FAIL_DSTATE; |
301 | extern uint16_t _TRACE_NBD_CO_REQUEST_FAIL_DSTATE; |
302 | extern uint16_t _TRACE_NBD_CLIENT_CONNECT_DSTATE; |
303 | extern uint16_t _TRACE_NBD_CLIENT_CONNECT_SUCCESS_DSTATE; |
304 | extern uint16_t _TRACE_SSH_RESTART_COROUTINE_DSTATE; |
305 | extern uint16_t _TRACE_SSH_FLUSH_DSTATE; |
306 | extern uint16_t _TRACE_SSH_CHECK_HOST_KEY_KNOWNHOSTS_DSTATE; |
307 | extern uint16_t _TRACE_SSH_CONNECT_TO_SSH_DSTATE; |
308 | extern uint16_t _TRACE_SSH_CO_YIELD_DSTATE; |
309 | extern uint16_t _TRACE_SSH_CO_YIELD_BACK_DSTATE; |
310 | extern uint16_t _TRACE_SSH_GETLENGTH_DSTATE; |
311 | extern uint16_t _TRACE_SSH_CO_CREATE_OPTS_DSTATE; |
312 | extern uint16_t _TRACE_SSH_READ_DSTATE; |
313 | extern uint16_t _TRACE_SSH_READ_BUF_DSTATE; |
314 | extern uint16_t _TRACE_SSH_READ_RETURN_DSTATE; |
315 | extern uint16_t _TRACE_SSH_WRITE_DSTATE; |
316 | extern uint16_t _TRACE_SSH_WRITE_BUF_DSTATE; |
317 | extern uint16_t _TRACE_SSH_WRITE_RETURN_DSTATE; |
318 | extern uint16_t _TRACE_SSH_SEEK_DSTATE; |
319 | extern uint16_t _TRACE_SSH_AUTH_METHODS_DSTATE; |
320 | extern uint16_t _TRACE_SSH_SERVER_STATUS_DSTATE; |
321 | extern uint16_t _TRACE_CURL_TIMER_CB_DSTATE; |
322 | extern uint16_t _TRACE_CURL_SOCK_CB_DSTATE; |
323 | extern uint16_t _TRACE_CURL_READ_CB_DSTATE; |
324 | extern uint16_t _TRACE_CURL_OPEN_DSTATE; |
325 | extern uint16_t _TRACE_CURL_OPEN_SIZE_DSTATE; |
326 | extern uint16_t _TRACE_CURL_SETUP_PREADV_DSTATE; |
327 | extern uint16_t _TRACE_CURL_CLOSE_DSTATE; |
328 | extern uint16_t _TRACE_FILE_XFS_WRITE_ZEROES_DSTATE; |
329 | extern uint16_t _TRACE_FILE_XFS_DISCARD_DSTATE; |
330 | extern uint16_t _TRACE_FILE_FINDEJECTABLEOPTICALMEDIA_DSTATE; |
331 | extern uint16_t _TRACE_FILE_SETUP_CDROM_DSTATE; |
332 | extern uint16_t _TRACE_FILE_HDEV_IS_SG_DSTATE; |
333 | extern uint16_t _TRACE_SHEEPDOG_RECONNECT_TO_SDOG_DSTATE; |
334 | extern uint16_t _TRACE_SHEEPDOG_AIO_READ_RESPONSE_DSTATE; |
335 | extern uint16_t _TRACE_SHEEPDOG_OPEN_DSTATE; |
336 | extern uint16_t _TRACE_SHEEPDOG_CLOSE_DSTATE; |
337 | extern uint16_t _TRACE_SHEEPDOG_CREATE_BRANCH_SNAPSHOT_DSTATE; |
338 | extern uint16_t _TRACE_SHEEPDOG_CREATE_BRANCH_CREATED_DSTATE; |
339 | extern uint16_t _TRACE_SHEEPDOG_CREATE_BRANCH_NEW_DSTATE; |
340 | extern uint16_t _TRACE_SHEEPDOG_CO_RW_VECTOR_UPDATE_DSTATE; |
341 | extern uint16_t _TRACE_SHEEPDOG_CO_RW_VECTOR_NEW_DSTATE; |
342 | extern uint16_t _TRACE_SHEEPDOG_SNAPSHOT_CREATE_INFO_DSTATE; |
343 | extern uint16_t _TRACE_SHEEPDOG_SNAPSHOT_CREATE_DSTATE; |
344 | extern uint16_t _TRACE_SHEEPDOG_SNAPSHOT_CREATE_INODE_DSTATE; |
345 | extern uint16_t _TRACE_SFTP_ERROR_DSTATE; |
346 | #define TRACE_BDRV_OPEN_COMMON_ENABLED 1 |
347 | #define TRACE_BDRV_LOCK_MEDIUM_ENABLED 1 |
348 | #define TRACE_BLK_CO_PREADV_ENABLED 1 |
349 | #define TRACE_BLK_CO_PWRITEV_ENABLED 1 |
350 | #define TRACE_BLK_ROOT_ATTACH_ENABLED 1 |
351 | #define TRACE_BLK_ROOT_DETACH_ENABLED 1 |
352 | #define TRACE_BDRV_CO_PREADV_ENABLED 1 |
353 | #define TRACE_BDRV_CO_PWRITEV_ENABLED 1 |
354 | #define TRACE_BDRV_CO_PWRITE_ZEROES_ENABLED 1 |
355 | #define TRACE_BDRV_CO_DO_COPY_ON_READV_ENABLED 1 |
356 | #define TRACE_BDRV_CO_COPY_RANGE_FROM_ENABLED 1 |
357 | #define TRACE_BDRV_CO_COPY_RANGE_TO_ENABLED 1 |
358 | #define TRACE_STREAM_ONE_ITERATION_ENABLED 1 |
359 | #define TRACE_STREAM_START_ENABLED 1 |
360 | #define TRACE_COMMIT_ONE_ITERATION_ENABLED 1 |
361 | #define TRACE_COMMIT_START_ENABLED 1 |
362 | #define TRACE_MIRROR_START_ENABLED 1 |
363 | #define TRACE_MIRROR_RESTART_ITER_ENABLED 1 |
364 | #define TRACE_MIRROR_BEFORE_FLUSH_ENABLED 1 |
365 | #define TRACE_MIRROR_BEFORE_DRAIN_ENABLED 1 |
366 | #define TRACE_MIRROR_BEFORE_SLEEP_ENABLED 1 |
367 | #define TRACE_MIRROR_ONE_ITERATION_ENABLED 1 |
368 | #define TRACE_MIRROR_ITERATION_DONE_ENABLED 1 |
369 | #define TRACE_MIRROR_YIELD_ENABLED 1 |
370 | #define TRACE_MIRROR_YIELD_IN_FLIGHT_ENABLED 1 |
371 | #define TRACE_BACKUP_DO_COW_ENTER_ENABLED 1 |
372 | #define TRACE_BACKUP_DO_COW_RETURN_ENABLED 1 |
373 | #define TRACE_BACKUP_DO_COW_SKIP_ENABLED 1 |
374 | #define TRACE_BACKUP_DO_COW_SKIP_RANGE_ENABLED 1 |
375 | #define TRACE_BACKUP_DO_COW_PROCESS_ENABLED 1 |
376 | #define TRACE_BACKUP_DO_COW_READ_FAIL_ENABLED 1 |
377 | #define TRACE_BACKUP_DO_COW_WRITE_FAIL_ENABLED 1 |
378 | #define TRACE_BACKUP_DO_COW_COPY_RANGE_FAIL_ENABLED 1 |
379 | #define TRACE_QMP_BLOCK_JOB_CANCEL_ENABLED 1 |
380 | #define TRACE_QMP_BLOCK_JOB_PAUSE_ENABLED 1 |
381 | #define TRACE_QMP_BLOCK_JOB_RESUME_ENABLED 1 |
382 | #define TRACE_QMP_BLOCK_JOB_COMPLETE_ENABLED 1 |
383 | #define TRACE_QMP_BLOCK_JOB_FINALIZE_ENABLED 1 |
384 | #define TRACE_QMP_BLOCK_JOB_DISMISS_ENABLED 1 |
385 | #define TRACE_QMP_BLOCK_STREAM_ENABLED 1 |
386 | #define TRACE_FILE_PAIO_SUBMIT_ENABLED 1 |
387 | #define TRACE_FILE_COPY_FILE_RANGE_ENABLED 1 |
388 | #define TRACE_QCOW2_WRITEV_START_REQ_ENABLED 1 |
389 | #define TRACE_QCOW2_WRITEV_DONE_REQ_ENABLED 1 |
390 | #define TRACE_QCOW2_WRITEV_START_PART_ENABLED 1 |
391 | #define TRACE_QCOW2_WRITEV_DONE_PART_ENABLED 1 |
392 | #define TRACE_QCOW2_WRITEV_DATA_ENABLED 1 |
393 | #define TRACE_QCOW2_PWRITE_ZEROES_START_REQ_ENABLED 1 |
394 | #define TRACE_QCOW2_PWRITE_ZEROES_ENABLED 1 |
395 | #define TRACE_QCOW2_SKIP_COW_ENABLED 1 |
396 | #define TRACE_QCOW2_ALLOC_CLUSTERS_OFFSET_ENABLED 1 |
397 | #define TRACE_QCOW2_HANDLE_COPIED_ENABLED 1 |
398 | #define TRACE_QCOW2_HANDLE_ALLOC_ENABLED 1 |
399 | #define TRACE_QCOW2_DO_ALLOC_CLUSTERS_OFFSET_ENABLED 1 |
400 | #define TRACE_QCOW2_CLUSTER_ALLOC_PHYS_ENABLED 1 |
401 | #define TRACE_QCOW2_CLUSTER_LINK_L2_ENABLED 1 |
402 | #define TRACE_QCOW2_L2_ALLOCATE_ENABLED 1 |
403 | #define TRACE_QCOW2_L2_ALLOCATE_GET_EMPTY_ENABLED 1 |
404 | #define TRACE_QCOW2_L2_ALLOCATE_WRITE_L2_ENABLED 1 |
405 | #define TRACE_QCOW2_L2_ALLOCATE_WRITE_L1_ENABLED 1 |
406 | #define TRACE_QCOW2_L2_ALLOCATE_DONE_ENABLED 1 |
407 | #define TRACE_QCOW2_CACHE_GET_ENABLED 1 |
408 | #define TRACE_QCOW2_CACHE_GET_REPLACE_ENTRY_ENABLED 1 |
409 | #define TRACE_QCOW2_CACHE_GET_READ_ENABLED 1 |
410 | #define TRACE_QCOW2_CACHE_GET_DONE_ENABLED 1 |
411 | #define TRACE_QCOW2_CACHE_FLUSH_ENABLED 1 |
412 | #define TRACE_QCOW2_CACHE_ENTRY_FLUSH_ENABLED 1 |
413 | #define TRACE_QCOW2_PROCESS_DISCARDS_FAILED_REGION_ENABLED 1 |
414 | #define TRACE_QED_ALLOC_L2_CACHE_ENTRY_ENABLED 1 |
415 | #define TRACE_QED_UNREF_L2_CACHE_ENTRY_ENABLED 1 |
416 | #define TRACE_QED_FIND_L2_CACHE_ENTRY_ENABLED 1 |
417 | #define TRACE_QED_READ_TABLE_ENABLED 1 |
418 | #define TRACE_QED_READ_TABLE_CB_ENABLED 1 |
419 | #define TRACE_QED_WRITE_TABLE_ENABLED 1 |
420 | #define TRACE_QED_WRITE_TABLE_CB_ENABLED 1 |
421 | #define TRACE_QED_NEED_CHECK_TIMER_CB_ENABLED 1 |
422 | #define TRACE_QED_START_NEED_CHECK_TIMER_ENABLED 1 |
423 | #define TRACE_QED_CANCEL_NEED_CHECK_TIMER_ENABLED 1 |
424 | #define TRACE_QED_AIO_COMPLETE_ENABLED 1 |
425 | #define TRACE_QED_AIO_SETUP_ENABLED 1 |
426 | #define TRACE_QED_AIO_NEXT_IO_ENABLED 1 |
427 | #define TRACE_QED_AIO_READ_DATA_ENABLED 1 |
428 | #define TRACE_QED_AIO_WRITE_DATA_ENABLED 1 |
429 | #define TRACE_QED_AIO_WRITE_PREFILL_ENABLED 1 |
430 | #define TRACE_QED_AIO_WRITE_POSTFILL_ENABLED 1 |
431 | #define TRACE_QED_AIO_WRITE_MAIN_ENABLED 1 |
432 | #define TRACE_VXHS_IIO_CALLBACK_ENABLED 1 |
433 | #define TRACE_VXHS_IIO_CALLBACK_CHNFAIL_ENABLED 1 |
434 | #define TRACE_VXHS_IIO_CALLBACK_UNKNWN_ENABLED 1 |
435 | #define TRACE_VXHS_AIO_RW_INVALID_ENABLED 1 |
436 | #define TRACE_VXHS_AIO_RW_IOERR_ENABLED 1 |
437 | #define TRACE_VXHS_GET_VDISK_STAT_ERR_ENABLED 1 |
438 | #define TRACE_VXHS_GET_VDISK_STAT_ENABLED 1 |
439 | #define TRACE_VXHS_COMPLETE_AIO_ENABLED 1 |
440 | #define TRACE_VXHS_PARSE_URI_FILENAME_ENABLED 1 |
441 | #define TRACE_VXHS_OPEN_VDISKID_ENABLED 1 |
442 | #define TRACE_VXHS_OPEN_HOSTINFO_ENABLED 1 |
443 | #define TRACE_VXHS_OPEN_IIO_OPEN_ENABLED 1 |
444 | #define TRACE_VXHS_PARSE_URI_HOSTINFO_ENABLED 1 |
445 | #define TRACE_VXHS_CLOSE_ENABLED 1 |
446 | #define TRACE_VXHS_GET_CREDS_ENABLED 1 |
447 | #define TRACE_NVME_KICK_ENABLED 1 |
448 | #define TRACE_NVME_DMA_FLUSH_QUEUE_WAIT_ENABLED 1 |
449 | #define TRACE_NVME_ERROR_ENABLED 1 |
450 | #define TRACE_NVME_PROCESS_COMPLETION_ENABLED 1 |
451 | #define TRACE_NVME_PROCESS_COMPLETION_QUEUE_BUSY_ENABLED 1 |
452 | #define TRACE_NVME_COMPLETE_COMMAND_ENABLED 1 |
453 | #define TRACE_NVME_SUBMIT_COMMAND_ENABLED 1 |
454 | #define TRACE_NVME_SUBMIT_COMMAND_RAW_ENABLED 1 |
455 | #define TRACE_NVME_HANDLE_EVENT_ENABLED 1 |
456 | #define TRACE_NVME_POLL_CB_ENABLED 1 |
457 | #define TRACE_NVME_PRW_ALIGNED_ENABLED 1 |
458 | #define TRACE_NVME_QIOV_UNALIGNED_ENABLED 1 |
459 | #define TRACE_NVME_PRW_BUFFERED_ENABLED 1 |
460 | #define TRACE_NVME_RW_DONE_ENABLED 1 |
461 | #define TRACE_NVME_DMA_MAP_FLUSH_ENABLED 1 |
462 | #define TRACE_NVME_FREE_REQ_QUEUE_WAIT_ENABLED 1 |
463 | #define TRACE_NVME_CMD_MAP_QIOV_ENABLED 1 |
464 | #define TRACE_NVME_CMD_MAP_QIOV_PAGES_ENABLED 1 |
465 | #define TRACE_NVME_CMD_MAP_QIOV_IOV_ENABLED 1 |
466 | #define TRACE_ISCSI_XCOPY_ENABLED 1 |
467 | #define TRACE_NBD_PARSE_BLOCKSTATUS_COMPLIANCE_ENABLED 1 |
468 | #define TRACE_NBD_STRUCTURED_READ_COMPLIANCE_ENABLED 1 |
469 | #define TRACE_NBD_READ_REPLY_ENTRY_FAIL_ENABLED 1 |
470 | #define TRACE_NBD_CO_REQUEST_FAIL_ENABLED 1 |
471 | #define TRACE_NBD_CLIENT_CONNECT_ENABLED 1 |
472 | #define TRACE_NBD_CLIENT_CONNECT_SUCCESS_ENABLED 1 |
473 | #define TRACE_SSH_RESTART_COROUTINE_ENABLED 1 |
474 | #define TRACE_SSH_FLUSH_ENABLED 1 |
475 | #define TRACE_SSH_CHECK_HOST_KEY_KNOWNHOSTS_ENABLED 1 |
476 | #define TRACE_SSH_CONNECT_TO_SSH_ENABLED 1 |
477 | #define TRACE_SSH_CO_YIELD_ENABLED 1 |
478 | #define TRACE_SSH_CO_YIELD_BACK_ENABLED 1 |
479 | #define TRACE_SSH_GETLENGTH_ENABLED 1 |
480 | #define TRACE_SSH_CO_CREATE_OPTS_ENABLED 1 |
481 | #define TRACE_SSH_READ_ENABLED 1 |
482 | #define TRACE_SSH_READ_BUF_ENABLED 1 |
483 | #define TRACE_SSH_READ_RETURN_ENABLED 1 |
484 | #define TRACE_SSH_WRITE_ENABLED 1 |
485 | #define TRACE_SSH_WRITE_BUF_ENABLED 1 |
486 | #define TRACE_SSH_WRITE_RETURN_ENABLED 1 |
487 | #define TRACE_SSH_SEEK_ENABLED 1 |
488 | #define TRACE_SSH_AUTH_METHODS_ENABLED 1 |
489 | #define TRACE_SSH_SERVER_STATUS_ENABLED 1 |
490 | #define TRACE_CURL_TIMER_CB_ENABLED 1 |
491 | #define TRACE_CURL_SOCK_CB_ENABLED 1 |
492 | #define TRACE_CURL_READ_CB_ENABLED 1 |
493 | #define TRACE_CURL_OPEN_ENABLED 1 |
494 | #define TRACE_CURL_OPEN_SIZE_ENABLED 1 |
495 | #define TRACE_CURL_SETUP_PREADV_ENABLED 1 |
496 | #define TRACE_CURL_CLOSE_ENABLED 1 |
497 | #define TRACE_FILE_XFS_WRITE_ZEROES_ENABLED 1 |
498 | #define TRACE_FILE_XFS_DISCARD_ENABLED 1 |
499 | #define TRACE_FILE_FINDEJECTABLEOPTICALMEDIA_ENABLED 1 |
500 | #define TRACE_FILE_SETUP_CDROM_ENABLED 1 |
501 | #define TRACE_FILE_HDEV_IS_SG_ENABLED 1 |
502 | #define TRACE_SHEEPDOG_RECONNECT_TO_SDOG_ENABLED 1 |
503 | #define TRACE_SHEEPDOG_AIO_READ_RESPONSE_ENABLED 1 |
504 | #define TRACE_SHEEPDOG_OPEN_ENABLED 1 |
505 | #define TRACE_SHEEPDOG_CLOSE_ENABLED 1 |
506 | #define TRACE_SHEEPDOG_CREATE_BRANCH_SNAPSHOT_ENABLED 1 |
507 | #define TRACE_SHEEPDOG_CREATE_BRANCH_CREATED_ENABLED 1 |
508 | #define TRACE_SHEEPDOG_CREATE_BRANCH_NEW_ENABLED 1 |
509 | #define TRACE_SHEEPDOG_CO_RW_VECTOR_UPDATE_ENABLED 1 |
510 | #define TRACE_SHEEPDOG_CO_RW_VECTOR_NEW_ENABLED 1 |
511 | #define TRACE_SHEEPDOG_SNAPSHOT_CREATE_INFO_ENABLED 1 |
512 | #define TRACE_SHEEPDOG_SNAPSHOT_CREATE_ENABLED 1 |
513 | #define TRACE_SHEEPDOG_SNAPSHOT_CREATE_INODE_ENABLED 1 |
514 | #define TRACE_SFTP_ERROR_ENABLED 1 |
515 | #include "qemu/log-for-trace.h" |
516 | |
517 | |
518 | #define TRACE_BDRV_OPEN_COMMON_BACKEND_DSTATE() ( \ |
519 | trace_event_get_state_dynamic_by_id(TRACE_BDRV_OPEN_COMMON) || \ |
520 | false) |
521 | |
522 | static inline void _nocheck__trace_bdrv_open_common(void * bs, const char * filename, int flags, const char * format_name) |
523 | { |
524 | if (trace_event_get_state(TRACE_BDRV_OPEN_COMMON) && qemu_loglevel_mask(LOG_TRACE)) { |
525 | struct timeval _now; |
526 | gettimeofday(&_now, NULL); |
527 | qemu_log("%d@%zu.%06zu:bdrv_open_common " "bs %p filename \"%s\" flags 0x%x format_name \"%s\"" "\n" , |
528 | qemu_get_thread_id(), |
529 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
530 | , bs, filename, flags, format_name); |
531 | } |
532 | } |
533 | |
534 | static inline void trace_bdrv_open_common(void * bs, const char * filename, int flags, const char * format_name) |
535 | { |
536 | if (true) { |
537 | _nocheck__trace_bdrv_open_common(bs, filename, flags, format_name); |
538 | } |
539 | } |
540 | |
541 | #define TRACE_BDRV_LOCK_MEDIUM_BACKEND_DSTATE() ( \ |
542 | trace_event_get_state_dynamic_by_id(TRACE_BDRV_LOCK_MEDIUM) || \ |
543 | false) |
544 | |
545 | static inline void _nocheck__trace_bdrv_lock_medium(void * bs, bool locked) |
546 | { |
547 | if (trace_event_get_state(TRACE_BDRV_LOCK_MEDIUM) && qemu_loglevel_mask(LOG_TRACE)) { |
548 | struct timeval _now; |
549 | gettimeofday(&_now, NULL); |
550 | qemu_log("%d@%zu.%06zu:bdrv_lock_medium " "bs %p locked %d" "\n" , |
551 | qemu_get_thread_id(), |
552 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
553 | , bs, locked); |
554 | } |
555 | } |
556 | |
557 | static inline void trace_bdrv_lock_medium(void * bs, bool locked) |
558 | { |
559 | if (true) { |
560 | _nocheck__trace_bdrv_lock_medium(bs, locked); |
561 | } |
562 | } |
563 | |
564 | #define TRACE_BLK_CO_PREADV_BACKEND_DSTATE() ( \ |
565 | trace_event_get_state_dynamic_by_id(TRACE_BLK_CO_PREADV) || \ |
566 | false) |
567 | |
568 | static inline void _nocheck__trace_blk_co_preadv(void * blk, void * bs, int64_t offset, unsigned int bytes, int flags) |
569 | { |
570 | if (trace_event_get_state(TRACE_BLK_CO_PREADV) && qemu_loglevel_mask(LOG_TRACE)) { |
571 | struct timeval _now; |
572 | gettimeofday(&_now, NULL); |
573 | qemu_log("%d@%zu.%06zu:blk_co_preadv " "blk %p bs %p offset %" PRId64" bytes %u flags 0x%x" "\n" , |
574 | qemu_get_thread_id(), |
575 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
576 | , blk, bs, offset, bytes, flags); |
577 | } |
578 | } |
579 | |
580 | static inline void trace_blk_co_preadv(void * blk, void * bs, int64_t offset, unsigned int bytes, int flags) |
581 | { |
582 | if (true) { |
583 | _nocheck__trace_blk_co_preadv(blk, bs, offset, bytes, flags); |
584 | } |
585 | } |
586 | |
587 | #define TRACE_BLK_CO_PWRITEV_BACKEND_DSTATE() ( \ |
588 | trace_event_get_state_dynamic_by_id(TRACE_BLK_CO_PWRITEV) || \ |
589 | false) |
590 | |
591 | static inline void _nocheck__trace_blk_co_pwritev(void * blk, void * bs, int64_t offset, unsigned int bytes, int flags) |
592 | { |
593 | if (trace_event_get_state(TRACE_BLK_CO_PWRITEV) && qemu_loglevel_mask(LOG_TRACE)) { |
594 | struct timeval _now; |
595 | gettimeofday(&_now, NULL); |
596 | qemu_log("%d@%zu.%06zu:blk_co_pwritev " "blk %p bs %p offset %" PRId64" bytes %u flags 0x%x" "\n" , |
597 | qemu_get_thread_id(), |
598 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
599 | , blk, bs, offset, bytes, flags); |
600 | } |
601 | } |
602 | |
603 | static inline void trace_blk_co_pwritev(void * blk, void * bs, int64_t offset, unsigned int bytes, int flags) |
604 | { |
605 | if (true) { |
606 | _nocheck__trace_blk_co_pwritev(blk, bs, offset, bytes, flags); |
607 | } |
608 | } |
609 | |
610 | #define TRACE_BLK_ROOT_ATTACH_BACKEND_DSTATE() ( \ |
611 | trace_event_get_state_dynamic_by_id(TRACE_BLK_ROOT_ATTACH) || \ |
612 | false) |
613 | |
614 | static inline void _nocheck__trace_blk_root_attach(void * child, void * blk, void * bs) |
615 | { |
616 | if (trace_event_get_state(TRACE_BLK_ROOT_ATTACH) && qemu_loglevel_mask(LOG_TRACE)) { |
617 | struct timeval _now; |
618 | gettimeofday(&_now, NULL); |
619 | qemu_log("%d@%zu.%06zu:blk_root_attach " "child %p blk %p bs %p" "\n" , |
620 | qemu_get_thread_id(), |
621 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
622 | , child, blk, bs); |
623 | } |
624 | } |
625 | |
626 | static inline void trace_blk_root_attach(void * child, void * blk, void * bs) |
627 | { |
628 | if (true) { |
629 | _nocheck__trace_blk_root_attach(child, blk, bs); |
630 | } |
631 | } |
632 | |
633 | #define TRACE_BLK_ROOT_DETACH_BACKEND_DSTATE() ( \ |
634 | trace_event_get_state_dynamic_by_id(TRACE_BLK_ROOT_DETACH) || \ |
635 | false) |
636 | |
637 | static inline void _nocheck__trace_blk_root_detach(void * child, void * blk, void * bs) |
638 | { |
639 | if (trace_event_get_state(TRACE_BLK_ROOT_DETACH) && qemu_loglevel_mask(LOG_TRACE)) { |
640 | struct timeval _now; |
641 | gettimeofday(&_now, NULL); |
642 | qemu_log("%d@%zu.%06zu:blk_root_detach " "child %p blk %p bs %p" "\n" , |
643 | qemu_get_thread_id(), |
644 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
645 | , child, blk, bs); |
646 | } |
647 | } |
648 | |
649 | static inline void trace_blk_root_detach(void * child, void * blk, void * bs) |
650 | { |
651 | if (true) { |
652 | _nocheck__trace_blk_root_detach(child, blk, bs); |
653 | } |
654 | } |
655 | |
656 | #define TRACE_BDRV_CO_PREADV_BACKEND_DSTATE() ( \ |
657 | trace_event_get_state_dynamic_by_id(TRACE_BDRV_CO_PREADV) || \ |
658 | false) |
659 | |
660 | static inline void _nocheck__trace_bdrv_co_preadv(void * bs, int64_t offset, int64_t nbytes, unsigned int flags) |
661 | { |
662 | if (trace_event_get_state(TRACE_BDRV_CO_PREADV) && qemu_loglevel_mask(LOG_TRACE)) { |
663 | struct timeval _now; |
664 | gettimeofday(&_now, NULL); |
665 | qemu_log("%d@%zu.%06zu:bdrv_co_preadv " "bs %p offset %" PRId64" nbytes %" PRId64" flags 0x%x" "\n" , |
666 | qemu_get_thread_id(), |
667 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
668 | , bs, offset, nbytes, flags); |
669 | } |
670 | } |
671 | |
672 | static inline void trace_bdrv_co_preadv(void * bs, int64_t offset, int64_t nbytes, unsigned int flags) |
673 | { |
674 | if (true) { |
675 | _nocheck__trace_bdrv_co_preadv(bs, offset, nbytes, flags); |
676 | } |
677 | } |
678 | |
679 | #define TRACE_BDRV_CO_PWRITEV_BACKEND_DSTATE() ( \ |
680 | trace_event_get_state_dynamic_by_id(TRACE_BDRV_CO_PWRITEV) || \ |
681 | false) |
682 | |
683 | static inline void _nocheck__trace_bdrv_co_pwritev(void * bs, int64_t offset, int64_t nbytes, unsigned int flags) |
684 | { |
685 | if (trace_event_get_state(TRACE_BDRV_CO_PWRITEV) && qemu_loglevel_mask(LOG_TRACE)) { |
686 | struct timeval _now; |
687 | gettimeofday(&_now, NULL); |
688 | qemu_log("%d@%zu.%06zu:bdrv_co_pwritev " "bs %p offset %" PRId64" nbytes %" PRId64" flags 0x%x" "\n" , |
689 | qemu_get_thread_id(), |
690 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
691 | , bs, offset, nbytes, flags); |
692 | } |
693 | } |
694 | |
695 | static inline void trace_bdrv_co_pwritev(void * bs, int64_t offset, int64_t nbytes, unsigned int flags) |
696 | { |
697 | if (true) { |
698 | _nocheck__trace_bdrv_co_pwritev(bs, offset, nbytes, flags); |
699 | } |
700 | } |
701 | |
702 | #define TRACE_BDRV_CO_PWRITE_ZEROES_BACKEND_DSTATE() ( \ |
703 | trace_event_get_state_dynamic_by_id(TRACE_BDRV_CO_PWRITE_ZEROES) || \ |
704 | false) |
705 | |
706 | static inline void _nocheck__trace_bdrv_co_pwrite_zeroes(void * bs, int64_t offset, int count, int flags) |
707 | { |
708 | if (trace_event_get_state(TRACE_BDRV_CO_PWRITE_ZEROES) && qemu_loglevel_mask(LOG_TRACE)) { |
709 | struct timeval _now; |
710 | gettimeofday(&_now, NULL); |
711 | qemu_log("%d@%zu.%06zu:bdrv_co_pwrite_zeroes " "bs %p offset %" PRId64" count %d flags 0x%x" "\n" , |
712 | qemu_get_thread_id(), |
713 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
714 | , bs, offset, count, flags); |
715 | } |
716 | } |
717 | |
718 | static inline void trace_bdrv_co_pwrite_zeroes(void * bs, int64_t offset, int count, int flags) |
719 | { |
720 | if (true) { |
721 | _nocheck__trace_bdrv_co_pwrite_zeroes(bs, offset, count, flags); |
722 | } |
723 | } |
724 | |
725 | #define TRACE_BDRV_CO_DO_COPY_ON_READV_BACKEND_DSTATE() ( \ |
726 | trace_event_get_state_dynamic_by_id(TRACE_BDRV_CO_DO_COPY_ON_READV) || \ |
727 | false) |
728 | |
729 | static inline void _nocheck__trace_bdrv_co_do_copy_on_readv(void * bs, int64_t offset, unsigned int bytes, int64_t cluster_offset, int64_t cluster_bytes) |
730 | { |
731 | if (trace_event_get_state(TRACE_BDRV_CO_DO_COPY_ON_READV) && qemu_loglevel_mask(LOG_TRACE)) { |
732 | struct timeval _now; |
733 | gettimeofday(&_now, NULL); |
734 | qemu_log("%d@%zu.%06zu:bdrv_co_do_copy_on_readv " "bs %p offset %" PRId64" bytes %u cluster_offset %" PRId64" cluster_bytes %" PRId64 "\n" , |
735 | qemu_get_thread_id(), |
736 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
737 | , bs, offset, bytes, cluster_offset, cluster_bytes); |
738 | } |
739 | } |
740 | |
741 | static inline void trace_bdrv_co_do_copy_on_readv(void * bs, int64_t offset, unsigned int bytes, int64_t cluster_offset, int64_t cluster_bytes) |
742 | { |
743 | if (true) { |
744 | _nocheck__trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, cluster_offset, cluster_bytes); |
745 | } |
746 | } |
747 | |
748 | #define TRACE_BDRV_CO_COPY_RANGE_FROM_BACKEND_DSTATE() ( \ |
749 | trace_event_get_state_dynamic_by_id(TRACE_BDRV_CO_COPY_RANGE_FROM) || \ |
750 | false) |
751 | |
752 | static inline void _nocheck__trace_bdrv_co_copy_range_from(void * src, uint64_t src_offset, void * dst, uint64_t dst_offset, uint64_t bytes, int read_flags, int write_flags) |
753 | { |
754 | if (trace_event_get_state(TRACE_BDRV_CO_COPY_RANGE_FROM) && qemu_loglevel_mask(LOG_TRACE)) { |
755 | struct timeval _now; |
756 | gettimeofday(&_now, NULL); |
757 | qemu_log("%d@%zu.%06zu:bdrv_co_copy_range_from " "src %p offset %" PRIu64" dst %p offset %" PRIu64" bytes %" PRIu64" rw flags 0x%x 0x%x" "\n" , |
758 | qemu_get_thread_id(), |
759 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
760 | , src, src_offset, dst, dst_offset, bytes, read_flags, write_flags); |
761 | } |
762 | } |
763 | |
764 | static inline void trace_bdrv_co_copy_range_from(void * src, uint64_t src_offset, void * dst, uint64_t dst_offset, uint64_t bytes, int read_flags, int write_flags) |
765 | { |
766 | if (true) { |
767 | _nocheck__trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, read_flags, write_flags); |
768 | } |
769 | } |
770 | |
771 | #define TRACE_BDRV_CO_COPY_RANGE_TO_BACKEND_DSTATE() ( \ |
772 | trace_event_get_state_dynamic_by_id(TRACE_BDRV_CO_COPY_RANGE_TO) || \ |
773 | false) |
774 | |
775 | static inline void _nocheck__trace_bdrv_co_copy_range_to(void * src, uint64_t src_offset, void * dst, uint64_t dst_offset, uint64_t bytes, int read_flags, int write_flags) |
776 | { |
777 | if (trace_event_get_state(TRACE_BDRV_CO_COPY_RANGE_TO) && qemu_loglevel_mask(LOG_TRACE)) { |
778 | struct timeval _now; |
779 | gettimeofday(&_now, NULL); |
780 | qemu_log("%d@%zu.%06zu:bdrv_co_copy_range_to " "src %p offset %" PRIu64" dst %p offset %" PRIu64" bytes %" PRIu64" rw flags 0x%x 0x%x" "\n" , |
781 | qemu_get_thread_id(), |
782 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
783 | , src, src_offset, dst, dst_offset, bytes, read_flags, write_flags); |
784 | } |
785 | } |
786 | |
787 | static inline void trace_bdrv_co_copy_range_to(void * src, uint64_t src_offset, void * dst, uint64_t dst_offset, uint64_t bytes, int read_flags, int write_flags) |
788 | { |
789 | if (true) { |
790 | _nocheck__trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, read_flags, write_flags); |
791 | } |
792 | } |
793 | |
794 | #define TRACE_STREAM_ONE_ITERATION_BACKEND_DSTATE() ( \ |
795 | trace_event_get_state_dynamic_by_id(TRACE_STREAM_ONE_ITERATION) || \ |
796 | false) |
797 | |
798 | static inline void _nocheck__trace_stream_one_iteration(void * s, int64_t offset, uint64_t bytes, int is_allocated) |
799 | { |
800 | if (trace_event_get_state(TRACE_STREAM_ONE_ITERATION) && qemu_loglevel_mask(LOG_TRACE)) { |
801 | struct timeval _now; |
802 | gettimeofday(&_now, NULL); |
803 | qemu_log("%d@%zu.%06zu:stream_one_iteration " "s %p offset %" PRId64 " bytes %" PRIu64 " is_allocated %d" "\n" , |
804 | qemu_get_thread_id(), |
805 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
806 | , s, offset, bytes, is_allocated); |
807 | } |
808 | } |
809 | |
810 | static inline void trace_stream_one_iteration(void * s, int64_t offset, uint64_t bytes, int is_allocated) |
811 | { |
812 | if (true) { |
813 | _nocheck__trace_stream_one_iteration(s, offset, bytes, is_allocated); |
814 | } |
815 | } |
816 | |
817 | #define TRACE_STREAM_START_BACKEND_DSTATE() ( \ |
818 | trace_event_get_state_dynamic_by_id(TRACE_STREAM_START) || \ |
819 | false) |
820 | |
821 | static inline void _nocheck__trace_stream_start(void * bs, void * base, void * s) |
822 | { |
823 | if (trace_event_get_state(TRACE_STREAM_START) && qemu_loglevel_mask(LOG_TRACE)) { |
824 | struct timeval _now; |
825 | gettimeofday(&_now, NULL); |
826 | qemu_log("%d@%zu.%06zu:stream_start " "bs %p base %p s %p" "\n" , |
827 | qemu_get_thread_id(), |
828 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
829 | , bs, base, s); |
830 | } |
831 | } |
832 | |
833 | static inline void trace_stream_start(void * bs, void * base, void * s) |
834 | { |
835 | if (true) { |
836 | _nocheck__trace_stream_start(bs, base, s); |
837 | } |
838 | } |
839 | |
840 | #define TRACE_COMMIT_ONE_ITERATION_BACKEND_DSTATE() ( \ |
841 | trace_event_get_state_dynamic_by_id(TRACE_COMMIT_ONE_ITERATION) || \ |
842 | false) |
843 | |
844 | static inline void _nocheck__trace_commit_one_iteration(void * s, int64_t offset, uint64_t bytes, int is_allocated) |
845 | { |
846 | if (trace_event_get_state(TRACE_COMMIT_ONE_ITERATION) && qemu_loglevel_mask(LOG_TRACE)) { |
847 | struct timeval _now; |
848 | gettimeofday(&_now, NULL); |
849 | qemu_log("%d@%zu.%06zu:commit_one_iteration " "s %p offset %" PRId64 " bytes %" PRIu64 " is_allocated %d" "\n" , |
850 | qemu_get_thread_id(), |
851 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
852 | , s, offset, bytes, is_allocated); |
853 | } |
854 | } |
855 | |
856 | static inline void trace_commit_one_iteration(void * s, int64_t offset, uint64_t bytes, int is_allocated) |
857 | { |
858 | if (true) { |
859 | _nocheck__trace_commit_one_iteration(s, offset, bytes, is_allocated); |
860 | } |
861 | } |
862 | |
863 | #define TRACE_COMMIT_START_BACKEND_DSTATE() ( \ |
864 | trace_event_get_state_dynamic_by_id(TRACE_COMMIT_START) || \ |
865 | false) |
866 | |
867 | static inline void _nocheck__trace_commit_start(void * bs, void * base, void * top, void * s) |
868 | { |
869 | if (trace_event_get_state(TRACE_COMMIT_START) && qemu_loglevel_mask(LOG_TRACE)) { |
870 | struct timeval _now; |
871 | gettimeofday(&_now, NULL); |
872 | qemu_log("%d@%zu.%06zu:commit_start " "bs %p base %p top %p s %p" "\n" , |
873 | qemu_get_thread_id(), |
874 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
875 | , bs, base, top, s); |
876 | } |
877 | } |
878 | |
879 | static inline void trace_commit_start(void * bs, void * base, void * top, void * s) |
880 | { |
881 | if (true) { |
882 | _nocheck__trace_commit_start(bs, base, top, s); |
883 | } |
884 | } |
885 | |
886 | #define TRACE_MIRROR_START_BACKEND_DSTATE() ( \ |
887 | trace_event_get_state_dynamic_by_id(TRACE_MIRROR_START) || \ |
888 | false) |
889 | |
890 | static inline void _nocheck__trace_mirror_start(void * bs, void * s, void * opaque) |
891 | { |
892 | if (trace_event_get_state(TRACE_MIRROR_START) && qemu_loglevel_mask(LOG_TRACE)) { |
893 | struct timeval _now; |
894 | gettimeofday(&_now, NULL); |
895 | qemu_log("%d@%zu.%06zu:mirror_start " "bs %p s %p opaque %p" "\n" , |
896 | qemu_get_thread_id(), |
897 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
898 | , bs, s, opaque); |
899 | } |
900 | } |
901 | |
902 | static inline void trace_mirror_start(void * bs, void * s, void * opaque) |
903 | { |
904 | if (true) { |
905 | _nocheck__trace_mirror_start(bs, s, opaque); |
906 | } |
907 | } |
908 | |
909 | #define TRACE_MIRROR_RESTART_ITER_BACKEND_DSTATE() ( \ |
910 | trace_event_get_state_dynamic_by_id(TRACE_MIRROR_RESTART_ITER) || \ |
911 | false) |
912 | |
913 | static inline void _nocheck__trace_mirror_restart_iter(void * s, int64_t cnt) |
914 | { |
915 | if (trace_event_get_state(TRACE_MIRROR_RESTART_ITER) && qemu_loglevel_mask(LOG_TRACE)) { |
916 | struct timeval _now; |
917 | gettimeofday(&_now, NULL); |
918 | qemu_log("%d@%zu.%06zu:mirror_restart_iter " "s %p dirty count %" PRId64 "\n" , |
919 | qemu_get_thread_id(), |
920 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
921 | , s, cnt); |
922 | } |
923 | } |
924 | |
925 | static inline void trace_mirror_restart_iter(void * s, int64_t cnt) |
926 | { |
927 | if (true) { |
928 | _nocheck__trace_mirror_restart_iter(s, cnt); |
929 | } |
930 | } |
931 | |
932 | #define TRACE_MIRROR_BEFORE_FLUSH_BACKEND_DSTATE() ( \ |
933 | trace_event_get_state_dynamic_by_id(TRACE_MIRROR_BEFORE_FLUSH) || \ |
934 | false) |
935 | |
936 | static inline void _nocheck__trace_mirror_before_flush(void * s) |
937 | { |
938 | if (trace_event_get_state(TRACE_MIRROR_BEFORE_FLUSH) && qemu_loglevel_mask(LOG_TRACE)) { |
939 | struct timeval _now; |
940 | gettimeofday(&_now, NULL); |
941 | qemu_log("%d@%zu.%06zu:mirror_before_flush " "s %p" "\n" , |
942 | qemu_get_thread_id(), |
943 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
944 | , s); |
945 | } |
946 | } |
947 | |
948 | static inline void trace_mirror_before_flush(void * s) |
949 | { |
950 | if (true) { |
951 | _nocheck__trace_mirror_before_flush(s); |
952 | } |
953 | } |
954 | |
955 | #define TRACE_MIRROR_BEFORE_DRAIN_BACKEND_DSTATE() ( \ |
956 | trace_event_get_state_dynamic_by_id(TRACE_MIRROR_BEFORE_DRAIN) || \ |
957 | false) |
958 | |
959 | static inline void _nocheck__trace_mirror_before_drain(void * s, int64_t cnt) |
960 | { |
961 | if (trace_event_get_state(TRACE_MIRROR_BEFORE_DRAIN) && qemu_loglevel_mask(LOG_TRACE)) { |
962 | struct timeval _now; |
963 | gettimeofday(&_now, NULL); |
964 | qemu_log("%d@%zu.%06zu:mirror_before_drain " "s %p dirty count %" PRId64 "\n" , |
965 | qemu_get_thread_id(), |
966 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
967 | , s, cnt); |
968 | } |
969 | } |
970 | |
971 | static inline void trace_mirror_before_drain(void * s, int64_t cnt) |
972 | { |
973 | if (true) { |
974 | _nocheck__trace_mirror_before_drain(s, cnt); |
975 | } |
976 | } |
977 | |
978 | #define TRACE_MIRROR_BEFORE_SLEEP_BACKEND_DSTATE() ( \ |
979 | trace_event_get_state_dynamic_by_id(TRACE_MIRROR_BEFORE_SLEEP) || \ |
980 | false) |
981 | |
982 | static inline void _nocheck__trace_mirror_before_sleep(void * s, int64_t cnt, int synced, uint64_t delay_ns) |
983 | { |
984 | if (trace_event_get_state(TRACE_MIRROR_BEFORE_SLEEP) && qemu_loglevel_mask(LOG_TRACE)) { |
985 | struct timeval _now; |
986 | gettimeofday(&_now, NULL); |
987 | qemu_log("%d@%zu.%06zu:mirror_before_sleep " "s %p dirty count %" PRId64" synced %d delay %" PRIu64"ns" "\n" , |
988 | qemu_get_thread_id(), |
989 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
990 | , s, cnt, synced, delay_ns); |
991 | } |
992 | } |
993 | |
994 | static inline void trace_mirror_before_sleep(void * s, int64_t cnt, int synced, uint64_t delay_ns) |
995 | { |
996 | if (true) { |
997 | _nocheck__trace_mirror_before_sleep(s, cnt, synced, delay_ns); |
998 | } |
999 | } |
1000 | |
1001 | #define TRACE_MIRROR_ONE_ITERATION_BACKEND_DSTATE() ( \ |
1002 | trace_event_get_state_dynamic_by_id(TRACE_MIRROR_ONE_ITERATION) || \ |
1003 | false) |
1004 | |
1005 | static inline void _nocheck__trace_mirror_one_iteration(void * s, int64_t offset, uint64_t bytes) |
1006 | { |
1007 | if (trace_event_get_state(TRACE_MIRROR_ONE_ITERATION) && qemu_loglevel_mask(LOG_TRACE)) { |
1008 | struct timeval _now; |
1009 | gettimeofday(&_now, NULL); |
1010 | qemu_log("%d@%zu.%06zu:mirror_one_iteration " "s %p offset %" PRId64 " bytes %" PRIu64 "\n" , |
1011 | qemu_get_thread_id(), |
1012 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1013 | , s, offset, bytes); |
1014 | } |
1015 | } |
1016 | |
1017 | static inline void trace_mirror_one_iteration(void * s, int64_t offset, uint64_t bytes) |
1018 | { |
1019 | if (true) { |
1020 | _nocheck__trace_mirror_one_iteration(s, offset, bytes); |
1021 | } |
1022 | } |
1023 | |
1024 | #define TRACE_MIRROR_ITERATION_DONE_BACKEND_DSTATE() ( \ |
1025 | trace_event_get_state_dynamic_by_id(TRACE_MIRROR_ITERATION_DONE) || \ |
1026 | false) |
1027 | |
1028 | static inline void _nocheck__trace_mirror_iteration_done(void * s, int64_t offset, uint64_t bytes, int ret) |
1029 | { |
1030 | if (trace_event_get_state(TRACE_MIRROR_ITERATION_DONE) && qemu_loglevel_mask(LOG_TRACE)) { |
1031 | struct timeval _now; |
1032 | gettimeofday(&_now, NULL); |
1033 | qemu_log("%d@%zu.%06zu:mirror_iteration_done " "s %p offset %" PRId64 " bytes %" PRIu64 " ret %d" "\n" , |
1034 | qemu_get_thread_id(), |
1035 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1036 | , s, offset, bytes, ret); |
1037 | } |
1038 | } |
1039 | |
1040 | static inline void trace_mirror_iteration_done(void * s, int64_t offset, uint64_t bytes, int ret) |
1041 | { |
1042 | if (true) { |
1043 | _nocheck__trace_mirror_iteration_done(s, offset, bytes, ret); |
1044 | } |
1045 | } |
1046 | |
1047 | #define TRACE_MIRROR_YIELD_BACKEND_DSTATE() ( \ |
1048 | trace_event_get_state_dynamic_by_id(TRACE_MIRROR_YIELD) || \ |
1049 | false) |
1050 | |
1051 | static inline void _nocheck__trace_mirror_yield(void * s, int64_t cnt, int buf_free_count, int in_flight) |
1052 | { |
1053 | if (trace_event_get_state(TRACE_MIRROR_YIELD) && qemu_loglevel_mask(LOG_TRACE)) { |
1054 | struct timeval _now; |
1055 | gettimeofday(&_now, NULL); |
1056 | qemu_log("%d@%zu.%06zu:mirror_yield " "s %p dirty count %" PRId64" free buffers %d in_flight %d" "\n" , |
1057 | qemu_get_thread_id(), |
1058 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1059 | , s, cnt, buf_free_count, in_flight); |
1060 | } |
1061 | } |
1062 | |
1063 | static inline void trace_mirror_yield(void * s, int64_t cnt, int buf_free_count, int in_flight) |
1064 | { |
1065 | if (true) { |
1066 | _nocheck__trace_mirror_yield(s, cnt, buf_free_count, in_flight); |
1067 | } |
1068 | } |
1069 | |
1070 | #define TRACE_MIRROR_YIELD_IN_FLIGHT_BACKEND_DSTATE() ( \ |
1071 | trace_event_get_state_dynamic_by_id(TRACE_MIRROR_YIELD_IN_FLIGHT) || \ |
1072 | false) |
1073 | |
1074 | static inline void _nocheck__trace_mirror_yield_in_flight(void * s, int64_t offset, int in_flight) |
1075 | { |
1076 | if (trace_event_get_state(TRACE_MIRROR_YIELD_IN_FLIGHT) && qemu_loglevel_mask(LOG_TRACE)) { |
1077 | struct timeval _now; |
1078 | gettimeofday(&_now, NULL); |
1079 | qemu_log("%d@%zu.%06zu:mirror_yield_in_flight " "s %p offset %" PRId64 " in_flight %d" "\n" , |
1080 | qemu_get_thread_id(), |
1081 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1082 | , s, offset, in_flight); |
1083 | } |
1084 | } |
1085 | |
1086 | static inline void trace_mirror_yield_in_flight(void * s, int64_t offset, int in_flight) |
1087 | { |
1088 | if (true) { |
1089 | _nocheck__trace_mirror_yield_in_flight(s, offset, in_flight); |
1090 | } |
1091 | } |
1092 | |
1093 | #define TRACE_BACKUP_DO_COW_ENTER_BACKEND_DSTATE() ( \ |
1094 | trace_event_get_state_dynamic_by_id(TRACE_BACKUP_DO_COW_ENTER) || \ |
1095 | false) |
1096 | |
1097 | static inline void _nocheck__trace_backup_do_cow_enter(void * job, int64_t start, int64_t offset, uint64_t bytes) |
1098 | { |
1099 | if (trace_event_get_state(TRACE_BACKUP_DO_COW_ENTER) && qemu_loglevel_mask(LOG_TRACE)) { |
1100 | struct timeval _now; |
1101 | gettimeofday(&_now, NULL); |
1102 | qemu_log("%d@%zu.%06zu:backup_do_cow_enter " "job %p start %" PRId64 " offset %" PRId64 " bytes %" PRIu64 "\n" , |
1103 | qemu_get_thread_id(), |
1104 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1105 | , job, start, offset, bytes); |
1106 | } |
1107 | } |
1108 | |
1109 | static inline void trace_backup_do_cow_enter(void * job, int64_t start, int64_t offset, uint64_t bytes) |
1110 | { |
1111 | if (true) { |
1112 | _nocheck__trace_backup_do_cow_enter(job, start, offset, bytes); |
1113 | } |
1114 | } |
1115 | |
1116 | #define TRACE_BACKUP_DO_COW_RETURN_BACKEND_DSTATE() ( \ |
1117 | trace_event_get_state_dynamic_by_id(TRACE_BACKUP_DO_COW_RETURN) || \ |
1118 | false) |
1119 | |
1120 | static inline void _nocheck__trace_backup_do_cow_return(void * job, int64_t offset, uint64_t bytes, int ret) |
1121 | { |
1122 | if (trace_event_get_state(TRACE_BACKUP_DO_COW_RETURN) && qemu_loglevel_mask(LOG_TRACE)) { |
1123 | struct timeval _now; |
1124 | gettimeofday(&_now, NULL); |
1125 | qemu_log("%d@%zu.%06zu:backup_do_cow_return " "job %p offset %" PRId64 " bytes %" PRIu64 " ret %d" "\n" , |
1126 | qemu_get_thread_id(), |
1127 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1128 | , job, offset, bytes, ret); |
1129 | } |
1130 | } |
1131 | |
1132 | static inline void trace_backup_do_cow_return(void * job, int64_t offset, uint64_t bytes, int ret) |
1133 | { |
1134 | if (true) { |
1135 | _nocheck__trace_backup_do_cow_return(job, offset, bytes, ret); |
1136 | } |
1137 | } |
1138 | |
1139 | #define TRACE_BACKUP_DO_COW_SKIP_BACKEND_DSTATE() ( \ |
1140 | trace_event_get_state_dynamic_by_id(TRACE_BACKUP_DO_COW_SKIP) || \ |
1141 | false) |
1142 | |
1143 | static inline void _nocheck__trace_backup_do_cow_skip(void * job, int64_t start) |
1144 | { |
1145 | if (trace_event_get_state(TRACE_BACKUP_DO_COW_SKIP) && qemu_loglevel_mask(LOG_TRACE)) { |
1146 | struct timeval _now; |
1147 | gettimeofday(&_now, NULL); |
1148 | qemu_log("%d@%zu.%06zu:backup_do_cow_skip " "job %p start %" PRId64 "\n" , |
1149 | qemu_get_thread_id(), |
1150 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1151 | , job, start); |
1152 | } |
1153 | } |
1154 | |
1155 | static inline void trace_backup_do_cow_skip(void * job, int64_t start) |
1156 | { |
1157 | if (true) { |
1158 | _nocheck__trace_backup_do_cow_skip(job, start); |
1159 | } |
1160 | } |
1161 | |
1162 | #define TRACE_BACKUP_DO_COW_SKIP_RANGE_BACKEND_DSTATE() ( \ |
1163 | trace_event_get_state_dynamic_by_id(TRACE_BACKUP_DO_COW_SKIP_RANGE) || \ |
1164 | false) |
1165 | |
1166 | static inline void _nocheck__trace_backup_do_cow_skip_range(void * job, int64_t start, uint64_t bytes) |
1167 | { |
1168 | if (trace_event_get_state(TRACE_BACKUP_DO_COW_SKIP_RANGE) && qemu_loglevel_mask(LOG_TRACE)) { |
1169 | struct timeval _now; |
1170 | gettimeofday(&_now, NULL); |
1171 | qemu_log("%d@%zu.%06zu:backup_do_cow_skip_range " "job %p start %" PRId64" bytes %" PRId64 "\n" , |
1172 | qemu_get_thread_id(), |
1173 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1174 | , job, start, bytes); |
1175 | } |
1176 | } |
1177 | |
1178 | static inline void trace_backup_do_cow_skip_range(void * job, int64_t start, uint64_t bytes) |
1179 | { |
1180 | if (true) { |
1181 | _nocheck__trace_backup_do_cow_skip_range(job, start, bytes); |
1182 | } |
1183 | } |
1184 | |
1185 | #define TRACE_BACKUP_DO_COW_PROCESS_BACKEND_DSTATE() ( \ |
1186 | trace_event_get_state_dynamic_by_id(TRACE_BACKUP_DO_COW_PROCESS) || \ |
1187 | false) |
1188 | |
1189 | static inline void _nocheck__trace_backup_do_cow_process(void * job, int64_t start) |
1190 | { |
1191 | if (trace_event_get_state(TRACE_BACKUP_DO_COW_PROCESS) && qemu_loglevel_mask(LOG_TRACE)) { |
1192 | struct timeval _now; |
1193 | gettimeofday(&_now, NULL); |
1194 | qemu_log("%d@%zu.%06zu:backup_do_cow_process " "job %p start %" PRId64 "\n" , |
1195 | qemu_get_thread_id(), |
1196 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1197 | , job, start); |
1198 | } |
1199 | } |
1200 | |
1201 | static inline void trace_backup_do_cow_process(void * job, int64_t start) |
1202 | { |
1203 | if (true) { |
1204 | _nocheck__trace_backup_do_cow_process(job, start); |
1205 | } |
1206 | } |
1207 | |
1208 | #define TRACE_BACKUP_DO_COW_READ_FAIL_BACKEND_DSTATE() ( \ |
1209 | trace_event_get_state_dynamic_by_id(TRACE_BACKUP_DO_COW_READ_FAIL) || \ |
1210 | false) |
1211 | |
1212 | static inline void _nocheck__trace_backup_do_cow_read_fail(void * job, int64_t start, int ret) |
1213 | { |
1214 | if (trace_event_get_state(TRACE_BACKUP_DO_COW_READ_FAIL) && qemu_loglevel_mask(LOG_TRACE)) { |
1215 | struct timeval _now; |
1216 | gettimeofday(&_now, NULL); |
1217 | qemu_log("%d@%zu.%06zu:backup_do_cow_read_fail " "job %p start %" PRId64" ret %d" "\n" , |
1218 | qemu_get_thread_id(), |
1219 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1220 | , job, start, ret); |
1221 | } |
1222 | } |
1223 | |
1224 | static inline void trace_backup_do_cow_read_fail(void * job, int64_t start, int ret) |
1225 | { |
1226 | if (true) { |
1227 | _nocheck__trace_backup_do_cow_read_fail(job, start, ret); |
1228 | } |
1229 | } |
1230 | |
1231 | #define TRACE_BACKUP_DO_COW_WRITE_FAIL_BACKEND_DSTATE() ( \ |
1232 | trace_event_get_state_dynamic_by_id(TRACE_BACKUP_DO_COW_WRITE_FAIL) || \ |
1233 | false) |
1234 | |
1235 | static inline void _nocheck__trace_backup_do_cow_write_fail(void * job, int64_t start, int ret) |
1236 | { |
1237 | if (trace_event_get_state(TRACE_BACKUP_DO_COW_WRITE_FAIL) && qemu_loglevel_mask(LOG_TRACE)) { |
1238 | struct timeval _now; |
1239 | gettimeofday(&_now, NULL); |
1240 | qemu_log("%d@%zu.%06zu:backup_do_cow_write_fail " "job %p start %" PRId64" ret %d" "\n" , |
1241 | qemu_get_thread_id(), |
1242 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1243 | , job, start, ret); |
1244 | } |
1245 | } |
1246 | |
1247 | static inline void trace_backup_do_cow_write_fail(void * job, int64_t start, int ret) |
1248 | { |
1249 | if (true) { |
1250 | _nocheck__trace_backup_do_cow_write_fail(job, start, ret); |
1251 | } |
1252 | } |
1253 | |
1254 | #define TRACE_BACKUP_DO_COW_COPY_RANGE_FAIL_BACKEND_DSTATE() ( \ |
1255 | trace_event_get_state_dynamic_by_id(TRACE_BACKUP_DO_COW_COPY_RANGE_FAIL) || \ |
1256 | false) |
1257 | |
1258 | static inline void _nocheck__trace_backup_do_cow_copy_range_fail(void * job, int64_t start, int ret) |
1259 | { |
1260 | if (trace_event_get_state(TRACE_BACKUP_DO_COW_COPY_RANGE_FAIL) && qemu_loglevel_mask(LOG_TRACE)) { |
1261 | struct timeval _now; |
1262 | gettimeofday(&_now, NULL); |
1263 | qemu_log("%d@%zu.%06zu:backup_do_cow_copy_range_fail " "job %p start %" PRId64" ret %d" "\n" , |
1264 | qemu_get_thread_id(), |
1265 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1266 | , job, start, ret); |
1267 | } |
1268 | } |
1269 | |
1270 | static inline void trace_backup_do_cow_copy_range_fail(void * job, int64_t start, int ret) |
1271 | { |
1272 | if (true) { |
1273 | _nocheck__trace_backup_do_cow_copy_range_fail(job, start, ret); |
1274 | } |
1275 | } |
1276 | |
1277 | #define TRACE_QMP_BLOCK_JOB_CANCEL_BACKEND_DSTATE() ( \ |
1278 | trace_event_get_state_dynamic_by_id(TRACE_QMP_BLOCK_JOB_CANCEL) || \ |
1279 | false) |
1280 | |
1281 | static inline void _nocheck__trace_qmp_block_job_cancel(void * job) |
1282 | { |
1283 | if (trace_event_get_state(TRACE_QMP_BLOCK_JOB_CANCEL) && qemu_loglevel_mask(LOG_TRACE)) { |
1284 | struct timeval _now; |
1285 | gettimeofday(&_now, NULL); |
1286 | qemu_log("%d@%zu.%06zu:qmp_block_job_cancel " "job %p" "\n" , |
1287 | qemu_get_thread_id(), |
1288 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1289 | , job); |
1290 | } |
1291 | } |
1292 | |
1293 | static inline void trace_qmp_block_job_cancel(void * job) |
1294 | { |
1295 | if (true) { |
1296 | _nocheck__trace_qmp_block_job_cancel(job); |
1297 | } |
1298 | } |
1299 | |
1300 | #define TRACE_QMP_BLOCK_JOB_PAUSE_BACKEND_DSTATE() ( \ |
1301 | trace_event_get_state_dynamic_by_id(TRACE_QMP_BLOCK_JOB_PAUSE) || \ |
1302 | false) |
1303 | |
1304 | static inline void _nocheck__trace_qmp_block_job_pause(void * job) |
1305 | { |
1306 | if (trace_event_get_state(TRACE_QMP_BLOCK_JOB_PAUSE) && qemu_loglevel_mask(LOG_TRACE)) { |
1307 | struct timeval _now; |
1308 | gettimeofday(&_now, NULL); |
1309 | qemu_log("%d@%zu.%06zu:qmp_block_job_pause " "job %p" "\n" , |
1310 | qemu_get_thread_id(), |
1311 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1312 | , job); |
1313 | } |
1314 | } |
1315 | |
1316 | static inline void trace_qmp_block_job_pause(void * job) |
1317 | { |
1318 | if (true) { |
1319 | _nocheck__trace_qmp_block_job_pause(job); |
1320 | } |
1321 | } |
1322 | |
1323 | #define TRACE_QMP_BLOCK_JOB_RESUME_BACKEND_DSTATE() ( \ |
1324 | trace_event_get_state_dynamic_by_id(TRACE_QMP_BLOCK_JOB_RESUME) || \ |
1325 | false) |
1326 | |
1327 | static inline void _nocheck__trace_qmp_block_job_resume(void * job) |
1328 | { |
1329 | if (trace_event_get_state(TRACE_QMP_BLOCK_JOB_RESUME) && qemu_loglevel_mask(LOG_TRACE)) { |
1330 | struct timeval _now; |
1331 | gettimeofday(&_now, NULL); |
1332 | qemu_log("%d@%zu.%06zu:qmp_block_job_resume " "job %p" "\n" , |
1333 | qemu_get_thread_id(), |
1334 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1335 | , job); |
1336 | } |
1337 | } |
1338 | |
1339 | static inline void trace_qmp_block_job_resume(void * job) |
1340 | { |
1341 | if (true) { |
1342 | _nocheck__trace_qmp_block_job_resume(job); |
1343 | } |
1344 | } |
1345 | |
1346 | #define TRACE_QMP_BLOCK_JOB_COMPLETE_BACKEND_DSTATE() ( \ |
1347 | trace_event_get_state_dynamic_by_id(TRACE_QMP_BLOCK_JOB_COMPLETE) || \ |
1348 | false) |
1349 | |
1350 | static inline void _nocheck__trace_qmp_block_job_complete(void * job) |
1351 | { |
1352 | if (trace_event_get_state(TRACE_QMP_BLOCK_JOB_COMPLETE) && qemu_loglevel_mask(LOG_TRACE)) { |
1353 | struct timeval _now; |
1354 | gettimeofday(&_now, NULL); |
1355 | qemu_log("%d@%zu.%06zu:qmp_block_job_complete " "job %p" "\n" , |
1356 | qemu_get_thread_id(), |
1357 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1358 | , job); |
1359 | } |
1360 | } |
1361 | |
1362 | static inline void trace_qmp_block_job_complete(void * job) |
1363 | { |
1364 | if (true) { |
1365 | _nocheck__trace_qmp_block_job_complete(job); |
1366 | } |
1367 | } |
1368 | |
1369 | #define TRACE_QMP_BLOCK_JOB_FINALIZE_BACKEND_DSTATE() ( \ |
1370 | trace_event_get_state_dynamic_by_id(TRACE_QMP_BLOCK_JOB_FINALIZE) || \ |
1371 | false) |
1372 | |
1373 | static inline void _nocheck__trace_qmp_block_job_finalize(void * job) |
1374 | { |
1375 | if (trace_event_get_state(TRACE_QMP_BLOCK_JOB_FINALIZE) && qemu_loglevel_mask(LOG_TRACE)) { |
1376 | struct timeval _now; |
1377 | gettimeofday(&_now, NULL); |
1378 | qemu_log("%d@%zu.%06zu:qmp_block_job_finalize " "job %p" "\n" , |
1379 | qemu_get_thread_id(), |
1380 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1381 | , job); |
1382 | } |
1383 | } |
1384 | |
1385 | static inline void trace_qmp_block_job_finalize(void * job) |
1386 | { |
1387 | if (true) { |
1388 | _nocheck__trace_qmp_block_job_finalize(job); |
1389 | } |
1390 | } |
1391 | |
1392 | #define TRACE_QMP_BLOCK_JOB_DISMISS_BACKEND_DSTATE() ( \ |
1393 | trace_event_get_state_dynamic_by_id(TRACE_QMP_BLOCK_JOB_DISMISS) || \ |
1394 | false) |
1395 | |
1396 | static inline void _nocheck__trace_qmp_block_job_dismiss(void * job) |
1397 | { |
1398 | if (trace_event_get_state(TRACE_QMP_BLOCK_JOB_DISMISS) && qemu_loglevel_mask(LOG_TRACE)) { |
1399 | struct timeval _now; |
1400 | gettimeofday(&_now, NULL); |
1401 | qemu_log("%d@%zu.%06zu:qmp_block_job_dismiss " "job %p" "\n" , |
1402 | qemu_get_thread_id(), |
1403 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1404 | , job); |
1405 | } |
1406 | } |
1407 | |
1408 | static inline void trace_qmp_block_job_dismiss(void * job) |
1409 | { |
1410 | if (true) { |
1411 | _nocheck__trace_qmp_block_job_dismiss(job); |
1412 | } |
1413 | } |
1414 | |
1415 | #define TRACE_QMP_BLOCK_STREAM_BACKEND_DSTATE() ( \ |
1416 | trace_event_get_state_dynamic_by_id(TRACE_QMP_BLOCK_STREAM) || \ |
1417 | false) |
1418 | |
1419 | static inline void _nocheck__trace_qmp_block_stream(void * bs) |
1420 | { |
1421 | if (trace_event_get_state(TRACE_QMP_BLOCK_STREAM) && qemu_loglevel_mask(LOG_TRACE)) { |
1422 | struct timeval _now; |
1423 | gettimeofday(&_now, NULL); |
1424 | qemu_log("%d@%zu.%06zu:qmp_block_stream " "bs %p" "\n" , |
1425 | qemu_get_thread_id(), |
1426 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1427 | , bs); |
1428 | } |
1429 | } |
1430 | |
1431 | static inline void trace_qmp_block_stream(void * bs) |
1432 | { |
1433 | if (true) { |
1434 | _nocheck__trace_qmp_block_stream(bs); |
1435 | } |
1436 | } |
1437 | |
1438 | #define TRACE_FILE_PAIO_SUBMIT_BACKEND_DSTATE() ( \ |
1439 | trace_event_get_state_dynamic_by_id(TRACE_FILE_PAIO_SUBMIT) || \ |
1440 | false) |
1441 | |
1442 | static inline void _nocheck__trace_file_paio_submit(void * acb, void * opaque, int64_t offset, int count, int type) |
1443 | { |
1444 | if (trace_event_get_state(TRACE_FILE_PAIO_SUBMIT) && qemu_loglevel_mask(LOG_TRACE)) { |
1445 | struct timeval _now; |
1446 | gettimeofday(&_now, NULL); |
1447 | qemu_log("%d@%zu.%06zu:file_paio_submit " "acb %p opaque %p offset %" PRId64" count %d type %d" "\n" , |
1448 | qemu_get_thread_id(), |
1449 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1450 | , acb, opaque, offset, count, type); |
1451 | } |
1452 | } |
1453 | |
1454 | static inline void trace_file_paio_submit(void * acb, void * opaque, int64_t offset, int count, int type) |
1455 | { |
1456 | if (true) { |
1457 | _nocheck__trace_file_paio_submit(acb, opaque, offset, count, type); |
1458 | } |
1459 | } |
1460 | |
1461 | #define TRACE_FILE_COPY_FILE_RANGE_BACKEND_DSTATE() ( \ |
1462 | trace_event_get_state_dynamic_by_id(TRACE_FILE_COPY_FILE_RANGE) || \ |
1463 | false) |
1464 | |
1465 | static inline void _nocheck__trace_file_copy_file_range(void * bs, int src, int64_t src_off, int dst, int64_t dst_off, int64_t bytes, int flags, int64_t ret) |
1466 | { |
1467 | if (trace_event_get_state(TRACE_FILE_COPY_FILE_RANGE) && qemu_loglevel_mask(LOG_TRACE)) { |
1468 | struct timeval _now; |
1469 | gettimeofday(&_now, NULL); |
1470 | qemu_log("%d@%zu.%06zu:file_copy_file_range " "bs %p src_fd %d offset %" PRIu64" dst_fd %d offset %" PRIu64" bytes %" PRIu64" flags %d ret %" PRId64 "\n" , |
1471 | qemu_get_thread_id(), |
1472 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1473 | , bs, src, src_off, dst, dst_off, bytes, flags, ret); |
1474 | } |
1475 | } |
1476 | |
1477 | static inline void trace_file_copy_file_range(void * bs, int src, int64_t src_off, int dst, int64_t dst_off, int64_t bytes, int flags, int64_t ret) |
1478 | { |
1479 | if (true) { |
1480 | _nocheck__trace_file_copy_file_range(bs, src, src_off, dst, dst_off, bytes, flags, ret); |
1481 | } |
1482 | } |
1483 | |
1484 | #define TRACE_QCOW2_WRITEV_START_REQ_BACKEND_DSTATE() ( \ |
1485 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_WRITEV_START_REQ) || \ |
1486 | false) |
1487 | |
1488 | static inline void _nocheck__trace_qcow2_writev_start_req(void * co, int64_t offset, int bytes) |
1489 | { |
1490 | if (trace_event_get_state(TRACE_QCOW2_WRITEV_START_REQ) && qemu_loglevel_mask(LOG_TRACE)) { |
1491 | struct timeval _now; |
1492 | gettimeofday(&_now, NULL); |
1493 | qemu_log("%d@%zu.%06zu:qcow2_writev_start_req " "co %p offset 0x%" PRIx64 " bytes %d" "\n" , |
1494 | qemu_get_thread_id(), |
1495 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1496 | , co, offset, bytes); |
1497 | } |
1498 | } |
1499 | |
1500 | static inline void trace_qcow2_writev_start_req(void * co, int64_t offset, int bytes) |
1501 | { |
1502 | if (true) { |
1503 | _nocheck__trace_qcow2_writev_start_req(co, offset, bytes); |
1504 | } |
1505 | } |
1506 | |
1507 | #define TRACE_QCOW2_WRITEV_DONE_REQ_BACKEND_DSTATE() ( \ |
1508 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_WRITEV_DONE_REQ) || \ |
1509 | false) |
1510 | |
1511 | static inline void _nocheck__trace_qcow2_writev_done_req(void * co, int ret) |
1512 | { |
1513 | if (trace_event_get_state(TRACE_QCOW2_WRITEV_DONE_REQ) && qemu_loglevel_mask(LOG_TRACE)) { |
1514 | struct timeval _now; |
1515 | gettimeofday(&_now, NULL); |
1516 | qemu_log("%d@%zu.%06zu:qcow2_writev_done_req " "co %p ret %d" "\n" , |
1517 | qemu_get_thread_id(), |
1518 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1519 | , co, ret); |
1520 | } |
1521 | } |
1522 | |
1523 | static inline void trace_qcow2_writev_done_req(void * co, int ret) |
1524 | { |
1525 | if (true) { |
1526 | _nocheck__trace_qcow2_writev_done_req(co, ret); |
1527 | } |
1528 | } |
1529 | |
1530 | #define TRACE_QCOW2_WRITEV_START_PART_BACKEND_DSTATE() ( \ |
1531 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_WRITEV_START_PART) || \ |
1532 | false) |
1533 | |
1534 | static inline void _nocheck__trace_qcow2_writev_start_part(void * co) |
1535 | { |
1536 | if (trace_event_get_state(TRACE_QCOW2_WRITEV_START_PART) && qemu_loglevel_mask(LOG_TRACE)) { |
1537 | struct timeval _now; |
1538 | gettimeofday(&_now, NULL); |
1539 | qemu_log("%d@%zu.%06zu:qcow2_writev_start_part " "co %p" "\n" , |
1540 | qemu_get_thread_id(), |
1541 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1542 | , co); |
1543 | } |
1544 | } |
1545 | |
1546 | static inline void trace_qcow2_writev_start_part(void * co) |
1547 | { |
1548 | if (true) { |
1549 | _nocheck__trace_qcow2_writev_start_part(co); |
1550 | } |
1551 | } |
1552 | |
1553 | #define TRACE_QCOW2_WRITEV_DONE_PART_BACKEND_DSTATE() ( \ |
1554 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_WRITEV_DONE_PART) || \ |
1555 | false) |
1556 | |
1557 | static inline void _nocheck__trace_qcow2_writev_done_part(void * co, int cur_bytes) |
1558 | { |
1559 | if (trace_event_get_state(TRACE_QCOW2_WRITEV_DONE_PART) && qemu_loglevel_mask(LOG_TRACE)) { |
1560 | struct timeval _now; |
1561 | gettimeofday(&_now, NULL); |
1562 | qemu_log("%d@%zu.%06zu:qcow2_writev_done_part " "co %p cur_bytes %d" "\n" , |
1563 | qemu_get_thread_id(), |
1564 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1565 | , co, cur_bytes); |
1566 | } |
1567 | } |
1568 | |
1569 | static inline void trace_qcow2_writev_done_part(void * co, int cur_bytes) |
1570 | { |
1571 | if (true) { |
1572 | _nocheck__trace_qcow2_writev_done_part(co, cur_bytes); |
1573 | } |
1574 | } |
1575 | |
1576 | #define TRACE_QCOW2_WRITEV_DATA_BACKEND_DSTATE() ( \ |
1577 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_WRITEV_DATA) || \ |
1578 | false) |
1579 | |
1580 | static inline void _nocheck__trace_qcow2_writev_data(void * co, uint64_t offset) |
1581 | { |
1582 | if (trace_event_get_state(TRACE_QCOW2_WRITEV_DATA) && qemu_loglevel_mask(LOG_TRACE)) { |
1583 | struct timeval _now; |
1584 | gettimeofday(&_now, NULL); |
1585 | qemu_log("%d@%zu.%06zu:qcow2_writev_data " "co %p offset 0x%" PRIx64 "\n" , |
1586 | qemu_get_thread_id(), |
1587 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1588 | , co, offset); |
1589 | } |
1590 | } |
1591 | |
1592 | static inline void trace_qcow2_writev_data(void * co, uint64_t offset) |
1593 | { |
1594 | if (true) { |
1595 | _nocheck__trace_qcow2_writev_data(co, offset); |
1596 | } |
1597 | } |
1598 | |
1599 | #define TRACE_QCOW2_PWRITE_ZEROES_START_REQ_BACKEND_DSTATE() ( \ |
1600 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_PWRITE_ZEROES_START_REQ) || \ |
1601 | false) |
1602 | |
1603 | static inline void _nocheck__trace_qcow2_pwrite_zeroes_start_req(void * co, int64_t offset, int count) |
1604 | { |
1605 | if (trace_event_get_state(TRACE_QCOW2_PWRITE_ZEROES_START_REQ) && qemu_loglevel_mask(LOG_TRACE)) { |
1606 | struct timeval _now; |
1607 | gettimeofday(&_now, NULL); |
1608 | qemu_log("%d@%zu.%06zu:qcow2_pwrite_zeroes_start_req " "co %p offset 0x%" PRIx64 " count %d" "\n" , |
1609 | qemu_get_thread_id(), |
1610 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1611 | , co, offset, count); |
1612 | } |
1613 | } |
1614 | |
1615 | static inline void trace_qcow2_pwrite_zeroes_start_req(void * co, int64_t offset, int count) |
1616 | { |
1617 | if (true) { |
1618 | _nocheck__trace_qcow2_pwrite_zeroes_start_req(co, offset, count); |
1619 | } |
1620 | } |
1621 | |
1622 | #define TRACE_QCOW2_PWRITE_ZEROES_BACKEND_DSTATE() ( \ |
1623 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_PWRITE_ZEROES) || \ |
1624 | false) |
1625 | |
1626 | static inline void _nocheck__trace_qcow2_pwrite_zeroes(void * co, int64_t offset, int count) |
1627 | { |
1628 | if (trace_event_get_state(TRACE_QCOW2_PWRITE_ZEROES) && qemu_loglevel_mask(LOG_TRACE)) { |
1629 | struct timeval _now; |
1630 | gettimeofday(&_now, NULL); |
1631 | qemu_log("%d@%zu.%06zu:qcow2_pwrite_zeroes " "co %p offset 0x%" PRIx64 " count %d" "\n" , |
1632 | qemu_get_thread_id(), |
1633 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1634 | , co, offset, count); |
1635 | } |
1636 | } |
1637 | |
1638 | static inline void trace_qcow2_pwrite_zeroes(void * co, int64_t offset, int count) |
1639 | { |
1640 | if (true) { |
1641 | _nocheck__trace_qcow2_pwrite_zeroes(co, offset, count); |
1642 | } |
1643 | } |
1644 | |
1645 | #define TRACE_QCOW2_SKIP_COW_BACKEND_DSTATE() ( \ |
1646 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_SKIP_COW) || \ |
1647 | false) |
1648 | |
1649 | static inline void _nocheck__trace_qcow2_skip_cow(void * co, uint64_t offset, int nb_clusters) |
1650 | { |
1651 | if (trace_event_get_state(TRACE_QCOW2_SKIP_COW) && qemu_loglevel_mask(LOG_TRACE)) { |
1652 | struct timeval _now; |
1653 | gettimeofday(&_now, NULL); |
1654 | qemu_log("%d@%zu.%06zu:qcow2_skip_cow " "co %p offset 0x%" PRIx64 " nb_clusters %d" "\n" , |
1655 | qemu_get_thread_id(), |
1656 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1657 | , co, offset, nb_clusters); |
1658 | } |
1659 | } |
1660 | |
1661 | static inline void trace_qcow2_skip_cow(void * co, uint64_t offset, int nb_clusters) |
1662 | { |
1663 | if (true) { |
1664 | _nocheck__trace_qcow2_skip_cow(co, offset, nb_clusters); |
1665 | } |
1666 | } |
1667 | |
1668 | #define TRACE_QCOW2_ALLOC_CLUSTERS_OFFSET_BACKEND_DSTATE() ( \ |
1669 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_ALLOC_CLUSTERS_OFFSET) || \ |
1670 | false) |
1671 | |
1672 | static inline void _nocheck__trace_qcow2_alloc_clusters_offset(void * co, uint64_t offset, int bytes) |
1673 | { |
1674 | if (trace_event_get_state(TRACE_QCOW2_ALLOC_CLUSTERS_OFFSET) && qemu_loglevel_mask(LOG_TRACE)) { |
1675 | struct timeval _now; |
1676 | gettimeofday(&_now, NULL); |
1677 | qemu_log("%d@%zu.%06zu:qcow2_alloc_clusters_offset " "co %p offset 0x%" PRIx64 " bytes %d" "\n" , |
1678 | qemu_get_thread_id(), |
1679 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1680 | , co, offset, bytes); |
1681 | } |
1682 | } |
1683 | |
1684 | static inline void trace_qcow2_alloc_clusters_offset(void * co, uint64_t offset, int bytes) |
1685 | { |
1686 | if (true) { |
1687 | _nocheck__trace_qcow2_alloc_clusters_offset(co, offset, bytes); |
1688 | } |
1689 | } |
1690 | |
1691 | #define TRACE_QCOW2_HANDLE_COPIED_BACKEND_DSTATE() ( \ |
1692 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_HANDLE_COPIED) || \ |
1693 | false) |
1694 | |
1695 | static inline void _nocheck__trace_qcow2_handle_copied(void * co, uint64_t guest_offset, uint64_t host_offset, uint64_t bytes) |
1696 | { |
1697 | if (trace_event_get_state(TRACE_QCOW2_HANDLE_COPIED) && qemu_loglevel_mask(LOG_TRACE)) { |
1698 | struct timeval _now; |
1699 | gettimeofday(&_now, NULL); |
1700 | qemu_log("%d@%zu.%06zu:qcow2_handle_copied " "co %p guest_offset 0x%" PRIx64 " host_offset 0x%" PRIx64 " bytes 0x%" PRIx64 "\n" , |
1701 | qemu_get_thread_id(), |
1702 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1703 | , co, guest_offset, host_offset, bytes); |
1704 | } |
1705 | } |
1706 | |
1707 | static inline void trace_qcow2_handle_copied(void * co, uint64_t guest_offset, uint64_t host_offset, uint64_t bytes) |
1708 | { |
1709 | if (true) { |
1710 | _nocheck__trace_qcow2_handle_copied(co, guest_offset, host_offset, bytes); |
1711 | } |
1712 | } |
1713 | |
1714 | #define TRACE_QCOW2_HANDLE_ALLOC_BACKEND_DSTATE() ( \ |
1715 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_HANDLE_ALLOC) || \ |
1716 | false) |
1717 | |
1718 | static inline void _nocheck__trace_qcow2_handle_alloc(void * co, uint64_t guest_offset, uint64_t host_offset, uint64_t bytes) |
1719 | { |
1720 | if (trace_event_get_state(TRACE_QCOW2_HANDLE_ALLOC) && qemu_loglevel_mask(LOG_TRACE)) { |
1721 | struct timeval _now; |
1722 | gettimeofday(&_now, NULL); |
1723 | qemu_log("%d@%zu.%06zu:qcow2_handle_alloc " "co %p guest_offset 0x%" PRIx64 " host_offset 0x%" PRIx64 " bytes 0x%" PRIx64 "\n" , |
1724 | qemu_get_thread_id(), |
1725 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1726 | , co, guest_offset, host_offset, bytes); |
1727 | } |
1728 | } |
1729 | |
1730 | static inline void trace_qcow2_handle_alloc(void * co, uint64_t guest_offset, uint64_t host_offset, uint64_t bytes) |
1731 | { |
1732 | if (true) { |
1733 | _nocheck__trace_qcow2_handle_alloc(co, guest_offset, host_offset, bytes); |
1734 | } |
1735 | } |
1736 | |
1737 | #define TRACE_QCOW2_DO_ALLOC_CLUSTERS_OFFSET_BACKEND_DSTATE() ( \ |
1738 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_DO_ALLOC_CLUSTERS_OFFSET) || \ |
1739 | false) |
1740 | |
1741 | static inline void _nocheck__trace_qcow2_do_alloc_clusters_offset(void * co, uint64_t guest_offset, uint64_t host_offset, int nb_clusters) |
1742 | { |
1743 | if (trace_event_get_state(TRACE_QCOW2_DO_ALLOC_CLUSTERS_OFFSET) && qemu_loglevel_mask(LOG_TRACE)) { |
1744 | struct timeval _now; |
1745 | gettimeofday(&_now, NULL); |
1746 | qemu_log("%d@%zu.%06zu:qcow2_do_alloc_clusters_offset " "co %p guest_offset 0x%" PRIx64 " host_offset 0x%" PRIx64 " nb_clusters %d" "\n" , |
1747 | qemu_get_thread_id(), |
1748 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1749 | , co, guest_offset, host_offset, nb_clusters); |
1750 | } |
1751 | } |
1752 | |
1753 | static inline void trace_qcow2_do_alloc_clusters_offset(void * co, uint64_t guest_offset, uint64_t host_offset, int nb_clusters) |
1754 | { |
1755 | if (true) { |
1756 | _nocheck__trace_qcow2_do_alloc_clusters_offset(co, guest_offset, host_offset, nb_clusters); |
1757 | } |
1758 | } |
1759 | |
1760 | #define TRACE_QCOW2_CLUSTER_ALLOC_PHYS_BACKEND_DSTATE() ( \ |
1761 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_CLUSTER_ALLOC_PHYS) || \ |
1762 | false) |
1763 | |
1764 | static inline void _nocheck__trace_qcow2_cluster_alloc_phys(void * co) |
1765 | { |
1766 | if (trace_event_get_state(TRACE_QCOW2_CLUSTER_ALLOC_PHYS) && qemu_loglevel_mask(LOG_TRACE)) { |
1767 | struct timeval _now; |
1768 | gettimeofday(&_now, NULL); |
1769 | qemu_log("%d@%zu.%06zu:qcow2_cluster_alloc_phys " "co %p" "\n" , |
1770 | qemu_get_thread_id(), |
1771 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1772 | , co); |
1773 | } |
1774 | } |
1775 | |
1776 | static inline void trace_qcow2_cluster_alloc_phys(void * co) |
1777 | { |
1778 | if (true) { |
1779 | _nocheck__trace_qcow2_cluster_alloc_phys(co); |
1780 | } |
1781 | } |
1782 | |
1783 | #define TRACE_QCOW2_CLUSTER_LINK_L2_BACKEND_DSTATE() ( \ |
1784 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_CLUSTER_LINK_L2) || \ |
1785 | false) |
1786 | |
1787 | static inline void _nocheck__trace_qcow2_cluster_link_l2(void * co, int nb_clusters) |
1788 | { |
1789 | if (trace_event_get_state(TRACE_QCOW2_CLUSTER_LINK_L2) && qemu_loglevel_mask(LOG_TRACE)) { |
1790 | struct timeval _now; |
1791 | gettimeofday(&_now, NULL); |
1792 | qemu_log("%d@%zu.%06zu:qcow2_cluster_link_l2 " "co %p nb_clusters %d" "\n" , |
1793 | qemu_get_thread_id(), |
1794 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1795 | , co, nb_clusters); |
1796 | } |
1797 | } |
1798 | |
1799 | static inline void trace_qcow2_cluster_link_l2(void * co, int nb_clusters) |
1800 | { |
1801 | if (true) { |
1802 | _nocheck__trace_qcow2_cluster_link_l2(co, nb_clusters); |
1803 | } |
1804 | } |
1805 | |
1806 | #define TRACE_QCOW2_L2_ALLOCATE_BACKEND_DSTATE() ( \ |
1807 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_L2_ALLOCATE) || \ |
1808 | false) |
1809 | |
1810 | static inline void _nocheck__trace_qcow2_l2_allocate(void * bs, int l1_index) |
1811 | { |
1812 | if (trace_event_get_state(TRACE_QCOW2_L2_ALLOCATE) && qemu_loglevel_mask(LOG_TRACE)) { |
1813 | struct timeval _now; |
1814 | gettimeofday(&_now, NULL); |
1815 | qemu_log("%d@%zu.%06zu:qcow2_l2_allocate " "bs %p l1_index %d" "\n" , |
1816 | qemu_get_thread_id(), |
1817 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1818 | , bs, l1_index); |
1819 | } |
1820 | } |
1821 | |
1822 | static inline void trace_qcow2_l2_allocate(void * bs, int l1_index) |
1823 | { |
1824 | if (true) { |
1825 | _nocheck__trace_qcow2_l2_allocate(bs, l1_index); |
1826 | } |
1827 | } |
1828 | |
1829 | #define TRACE_QCOW2_L2_ALLOCATE_GET_EMPTY_BACKEND_DSTATE() ( \ |
1830 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_L2_ALLOCATE_GET_EMPTY) || \ |
1831 | false) |
1832 | |
1833 | static inline void _nocheck__trace_qcow2_l2_allocate_get_empty(void * bs, int l1_index) |
1834 | { |
1835 | if (trace_event_get_state(TRACE_QCOW2_L2_ALLOCATE_GET_EMPTY) && qemu_loglevel_mask(LOG_TRACE)) { |
1836 | struct timeval _now; |
1837 | gettimeofday(&_now, NULL); |
1838 | qemu_log("%d@%zu.%06zu:qcow2_l2_allocate_get_empty " "bs %p l1_index %d" "\n" , |
1839 | qemu_get_thread_id(), |
1840 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1841 | , bs, l1_index); |
1842 | } |
1843 | } |
1844 | |
1845 | static inline void trace_qcow2_l2_allocate_get_empty(void * bs, int l1_index) |
1846 | { |
1847 | if (true) { |
1848 | _nocheck__trace_qcow2_l2_allocate_get_empty(bs, l1_index); |
1849 | } |
1850 | } |
1851 | |
1852 | #define TRACE_QCOW2_L2_ALLOCATE_WRITE_L2_BACKEND_DSTATE() ( \ |
1853 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_L2_ALLOCATE_WRITE_L2) || \ |
1854 | false) |
1855 | |
1856 | static inline void _nocheck__trace_qcow2_l2_allocate_write_l2(void * bs, int l1_index) |
1857 | { |
1858 | if (trace_event_get_state(TRACE_QCOW2_L2_ALLOCATE_WRITE_L2) && qemu_loglevel_mask(LOG_TRACE)) { |
1859 | struct timeval _now; |
1860 | gettimeofday(&_now, NULL); |
1861 | qemu_log("%d@%zu.%06zu:qcow2_l2_allocate_write_l2 " "bs %p l1_index %d" "\n" , |
1862 | qemu_get_thread_id(), |
1863 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1864 | , bs, l1_index); |
1865 | } |
1866 | } |
1867 | |
1868 | static inline void trace_qcow2_l2_allocate_write_l2(void * bs, int l1_index) |
1869 | { |
1870 | if (true) { |
1871 | _nocheck__trace_qcow2_l2_allocate_write_l2(bs, l1_index); |
1872 | } |
1873 | } |
1874 | |
1875 | #define TRACE_QCOW2_L2_ALLOCATE_WRITE_L1_BACKEND_DSTATE() ( \ |
1876 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_L2_ALLOCATE_WRITE_L1) || \ |
1877 | false) |
1878 | |
1879 | static inline void _nocheck__trace_qcow2_l2_allocate_write_l1(void * bs, int l1_index) |
1880 | { |
1881 | if (trace_event_get_state(TRACE_QCOW2_L2_ALLOCATE_WRITE_L1) && qemu_loglevel_mask(LOG_TRACE)) { |
1882 | struct timeval _now; |
1883 | gettimeofday(&_now, NULL); |
1884 | qemu_log("%d@%zu.%06zu:qcow2_l2_allocate_write_l1 " "bs %p l1_index %d" "\n" , |
1885 | qemu_get_thread_id(), |
1886 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1887 | , bs, l1_index); |
1888 | } |
1889 | } |
1890 | |
1891 | static inline void trace_qcow2_l2_allocate_write_l1(void * bs, int l1_index) |
1892 | { |
1893 | if (true) { |
1894 | _nocheck__trace_qcow2_l2_allocate_write_l1(bs, l1_index); |
1895 | } |
1896 | } |
1897 | |
1898 | #define TRACE_QCOW2_L2_ALLOCATE_DONE_BACKEND_DSTATE() ( \ |
1899 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_L2_ALLOCATE_DONE) || \ |
1900 | false) |
1901 | |
1902 | static inline void _nocheck__trace_qcow2_l2_allocate_done(void * bs, int l1_index, int ret) |
1903 | { |
1904 | if (trace_event_get_state(TRACE_QCOW2_L2_ALLOCATE_DONE) && qemu_loglevel_mask(LOG_TRACE)) { |
1905 | struct timeval _now; |
1906 | gettimeofday(&_now, NULL); |
1907 | qemu_log("%d@%zu.%06zu:qcow2_l2_allocate_done " "bs %p l1_index %d ret %d" "\n" , |
1908 | qemu_get_thread_id(), |
1909 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1910 | , bs, l1_index, ret); |
1911 | } |
1912 | } |
1913 | |
1914 | static inline void trace_qcow2_l2_allocate_done(void * bs, int l1_index, int ret) |
1915 | { |
1916 | if (true) { |
1917 | _nocheck__trace_qcow2_l2_allocate_done(bs, l1_index, ret); |
1918 | } |
1919 | } |
1920 | |
1921 | #define TRACE_QCOW2_CACHE_GET_BACKEND_DSTATE() ( \ |
1922 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_CACHE_GET) || \ |
1923 | false) |
1924 | |
1925 | static inline void _nocheck__trace_qcow2_cache_get(void * co, int c, uint64_t offset, bool read_from_disk) |
1926 | { |
1927 | if (trace_event_get_state(TRACE_QCOW2_CACHE_GET) && qemu_loglevel_mask(LOG_TRACE)) { |
1928 | struct timeval _now; |
1929 | gettimeofday(&_now, NULL); |
1930 | qemu_log("%d@%zu.%06zu:qcow2_cache_get " "co %p is_l2_cache %d offset 0x%" PRIx64 " read_from_disk %d" "\n" , |
1931 | qemu_get_thread_id(), |
1932 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1933 | , co, c, offset, read_from_disk); |
1934 | } |
1935 | } |
1936 | |
1937 | static inline void trace_qcow2_cache_get(void * co, int c, uint64_t offset, bool read_from_disk) |
1938 | { |
1939 | if (true) { |
1940 | _nocheck__trace_qcow2_cache_get(co, c, offset, read_from_disk); |
1941 | } |
1942 | } |
1943 | |
1944 | #define TRACE_QCOW2_CACHE_GET_REPLACE_ENTRY_BACKEND_DSTATE() ( \ |
1945 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_CACHE_GET_REPLACE_ENTRY) || \ |
1946 | false) |
1947 | |
1948 | static inline void _nocheck__trace_qcow2_cache_get_replace_entry(void * co, int c, int i) |
1949 | { |
1950 | if (trace_event_get_state(TRACE_QCOW2_CACHE_GET_REPLACE_ENTRY) && qemu_loglevel_mask(LOG_TRACE)) { |
1951 | struct timeval _now; |
1952 | gettimeofday(&_now, NULL); |
1953 | qemu_log("%d@%zu.%06zu:qcow2_cache_get_replace_entry " "co %p is_l2_cache %d index %d" "\n" , |
1954 | qemu_get_thread_id(), |
1955 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1956 | , co, c, i); |
1957 | } |
1958 | } |
1959 | |
1960 | static inline void trace_qcow2_cache_get_replace_entry(void * co, int c, int i) |
1961 | { |
1962 | if (true) { |
1963 | _nocheck__trace_qcow2_cache_get_replace_entry(co, c, i); |
1964 | } |
1965 | } |
1966 | |
1967 | #define TRACE_QCOW2_CACHE_GET_READ_BACKEND_DSTATE() ( \ |
1968 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_CACHE_GET_READ) || \ |
1969 | false) |
1970 | |
1971 | static inline void _nocheck__trace_qcow2_cache_get_read(void * co, int c, int i) |
1972 | { |
1973 | if (trace_event_get_state(TRACE_QCOW2_CACHE_GET_READ) && qemu_loglevel_mask(LOG_TRACE)) { |
1974 | struct timeval _now; |
1975 | gettimeofday(&_now, NULL); |
1976 | qemu_log("%d@%zu.%06zu:qcow2_cache_get_read " "co %p is_l2_cache %d index %d" "\n" , |
1977 | qemu_get_thread_id(), |
1978 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
1979 | , co, c, i); |
1980 | } |
1981 | } |
1982 | |
1983 | static inline void trace_qcow2_cache_get_read(void * co, int c, int i) |
1984 | { |
1985 | if (true) { |
1986 | _nocheck__trace_qcow2_cache_get_read(co, c, i); |
1987 | } |
1988 | } |
1989 | |
1990 | #define TRACE_QCOW2_CACHE_GET_DONE_BACKEND_DSTATE() ( \ |
1991 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_CACHE_GET_DONE) || \ |
1992 | false) |
1993 | |
1994 | static inline void _nocheck__trace_qcow2_cache_get_done(void * co, int c, int i) |
1995 | { |
1996 | if (trace_event_get_state(TRACE_QCOW2_CACHE_GET_DONE) && qemu_loglevel_mask(LOG_TRACE)) { |
1997 | struct timeval _now; |
1998 | gettimeofday(&_now, NULL); |
1999 | qemu_log("%d@%zu.%06zu:qcow2_cache_get_done " "co %p is_l2_cache %d index %d" "\n" , |
2000 | qemu_get_thread_id(), |
2001 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2002 | , co, c, i); |
2003 | } |
2004 | } |
2005 | |
2006 | static inline void trace_qcow2_cache_get_done(void * co, int c, int i) |
2007 | { |
2008 | if (true) { |
2009 | _nocheck__trace_qcow2_cache_get_done(co, c, i); |
2010 | } |
2011 | } |
2012 | |
2013 | #define TRACE_QCOW2_CACHE_FLUSH_BACKEND_DSTATE() ( \ |
2014 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_CACHE_FLUSH) || \ |
2015 | false) |
2016 | |
2017 | static inline void _nocheck__trace_qcow2_cache_flush(void * co, int c) |
2018 | { |
2019 | if (trace_event_get_state(TRACE_QCOW2_CACHE_FLUSH) && qemu_loglevel_mask(LOG_TRACE)) { |
2020 | struct timeval _now; |
2021 | gettimeofday(&_now, NULL); |
2022 | qemu_log("%d@%zu.%06zu:qcow2_cache_flush " "co %p is_l2_cache %d" "\n" , |
2023 | qemu_get_thread_id(), |
2024 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2025 | , co, c); |
2026 | } |
2027 | } |
2028 | |
2029 | static inline void trace_qcow2_cache_flush(void * co, int c) |
2030 | { |
2031 | if (true) { |
2032 | _nocheck__trace_qcow2_cache_flush(co, c); |
2033 | } |
2034 | } |
2035 | |
2036 | #define TRACE_QCOW2_CACHE_ENTRY_FLUSH_BACKEND_DSTATE() ( \ |
2037 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_CACHE_ENTRY_FLUSH) || \ |
2038 | false) |
2039 | |
2040 | static inline void _nocheck__trace_qcow2_cache_entry_flush(void * co, int c, int i) |
2041 | { |
2042 | if (trace_event_get_state(TRACE_QCOW2_CACHE_ENTRY_FLUSH) && qemu_loglevel_mask(LOG_TRACE)) { |
2043 | struct timeval _now; |
2044 | gettimeofday(&_now, NULL); |
2045 | qemu_log("%d@%zu.%06zu:qcow2_cache_entry_flush " "co %p is_l2_cache %d index %d" "\n" , |
2046 | qemu_get_thread_id(), |
2047 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2048 | , co, c, i); |
2049 | } |
2050 | } |
2051 | |
2052 | static inline void trace_qcow2_cache_entry_flush(void * co, int c, int i) |
2053 | { |
2054 | if (true) { |
2055 | _nocheck__trace_qcow2_cache_entry_flush(co, c, i); |
2056 | } |
2057 | } |
2058 | |
2059 | #define TRACE_QCOW2_PROCESS_DISCARDS_FAILED_REGION_BACKEND_DSTATE() ( \ |
2060 | trace_event_get_state_dynamic_by_id(TRACE_QCOW2_PROCESS_DISCARDS_FAILED_REGION) || \ |
2061 | false) |
2062 | |
2063 | static inline void _nocheck__trace_qcow2_process_discards_failed_region(uint64_t offset, uint64_t bytes, int ret) |
2064 | { |
2065 | if (trace_event_get_state(TRACE_QCOW2_PROCESS_DISCARDS_FAILED_REGION) && qemu_loglevel_mask(LOG_TRACE)) { |
2066 | struct timeval _now; |
2067 | gettimeofday(&_now, NULL); |
2068 | qemu_log("%d@%zu.%06zu:qcow2_process_discards_failed_region " "offset 0x%" PRIx64 " bytes 0x%" PRIx64 " ret %d" "\n" , |
2069 | qemu_get_thread_id(), |
2070 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2071 | , offset, bytes, ret); |
2072 | } |
2073 | } |
2074 | |
2075 | static inline void trace_qcow2_process_discards_failed_region(uint64_t offset, uint64_t bytes, int ret) |
2076 | { |
2077 | if (true) { |
2078 | _nocheck__trace_qcow2_process_discards_failed_region(offset, bytes, ret); |
2079 | } |
2080 | } |
2081 | |
2082 | #define TRACE_QED_ALLOC_L2_CACHE_ENTRY_BACKEND_DSTATE() ( \ |
2083 | trace_event_get_state_dynamic_by_id(TRACE_QED_ALLOC_L2_CACHE_ENTRY) || \ |
2084 | false) |
2085 | |
2086 | static inline void _nocheck__trace_qed_alloc_l2_cache_entry(void * l2_cache, void * entry) |
2087 | { |
2088 | if (trace_event_get_state(TRACE_QED_ALLOC_L2_CACHE_ENTRY) && qemu_loglevel_mask(LOG_TRACE)) { |
2089 | struct timeval _now; |
2090 | gettimeofday(&_now, NULL); |
2091 | qemu_log("%d@%zu.%06zu:qed_alloc_l2_cache_entry " "l2_cache %p entry %p" "\n" , |
2092 | qemu_get_thread_id(), |
2093 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2094 | , l2_cache, entry); |
2095 | } |
2096 | } |
2097 | |
2098 | static inline void trace_qed_alloc_l2_cache_entry(void * l2_cache, void * entry) |
2099 | { |
2100 | if (true) { |
2101 | _nocheck__trace_qed_alloc_l2_cache_entry(l2_cache, entry); |
2102 | } |
2103 | } |
2104 | |
2105 | #define TRACE_QED_UNREF_L2_CACHE_ENTRY_BACKEND_DSTATE() ( \ |
2106 | trace_event_get_state_dynamic_by_id(TRACE_QED_UNREF_L2_CACHE_ENTRY) || \ |
2107 | false) |
2108 | |
2109 | static inline void _nocheck__trace_qed_unref_l2_cache_entry(void * entry, int ref) |
2110 | { |
2111 | if (trace_event_get_state(TRACE_QED_UNREF_L2_CACHE_ENTRY) && qemu_loglevel_mask(LOG_TRACE)) { |
2112 | struct timeval _now; |
2113 | gettimeofday(&_now, NULL); |
2114 | qemu_log("%d@%zu.%06zu:qed_unref_l2_cache_entry " "entry %p ref %d" "\n" , |
2115 | qemu_get_thread_id(), |
2116 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2117 | , entry, ref); |
2118 | } |
2119 | } |
2120 | |
2121 | static inline void trace_qed_unref_l2_cache_entry(void * entry, int ref) |
2122 | { |
2123 | if (true) { |
2124 | _nocheck__trace_qed_unref_l2_cache_entry(entry, ref); |
2125 | } |
2126 | } |
2127 | |
2128 | #define TRACE_QED_FIND_L2_CACHE_ENTRY_BACKEND_DSTATE() ( \ |
2129 | trace_event_get_state_dynamic_by_id(TRACE_QED_FIND_L2_CACHE_ENTRY) || \ |
2130 | false) |
2131 | |
2132 | static inline void _nocheck__trace_qed_find_l2_cache_entry(void * l2_cache, void * entry, uint64_t offset, int ref) |
2133 | { |
2134 | if (trace_event_get_state(TRACE_QED_FIND_L2_CACHE_ENTRY) && qemu_loglevel_mask(LOG_TRACE)) { |
2135 | struct timeval _now; |
2136 | gettimeofday(&_now, NULL); |
2137 | qemu_log("%d@%zu.%06zu:qed_find_l2_cache_entry " "l2_cache %p entry %p offset %" PRIu64" ref %d" "\n" , |
2138 | qemu_get_thread_id(), |
2139 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2140 | , l2_cache, entry, offset, ref); |
2141 | } |
2142 | } |
2143 | |
2144 | static inline void trace_qed_find_l2_cache_entry(void * l2_cache, void * entry, uint64_t offset, int ref) |
2145 | { |
2146 | if (true) { |
2147 | _nocheck__trace_qed_find_l2_cache_entry(l2_cache, entry, offset, ref); |
2148 | } |
2149 | } |
2150 | |
2151 | #define TRACE_QED_READ_TABLE_BACKEND_DSTATE() ( \ |
2152 | trace_event_get_state_dynamic_by_id(TRACE_QED_READ_TABLE) || \ |
2153 | false) |
2154 | |
2155 | static inline void _nocheck__trace_qed_read_table(void * s, uint64_t offset, void * table) |
2156 | { |
2157 | if (trace_event_get_state(TRACE_QED_READ_TABLE) && qemu_loglevel_mask(LOG_TRACE)) { |
2158 | struct timeval _now; |
2159 | gettimeofday(&_now, NULL); |
2160 | qemu_log("%d@%zu.%06zu:qed_read_table " "s %p offset %" PRIu64" table %p" "\n" , |
2161 | qemu_get_thread_id(), |
2162 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2163 | , s, offset, table); |
2164 | } |
2165 | } |
2166 | |
2167 | static inline void trace_qed_read_table(void * s, uint64_t offset, void * table) |
2168 | { |
2169 | if (true) { |
2170 | _nocheck__trace_qed_read_table(s, offset, table); |
2171 | } |
2172 | } |
2173 | |
2174 | #define TRACE_QED_READ_TABLE_CB_BACKEND_DSTATE() ( \ |
2175 | trace_event_get_state_dynamic_by_id(TRACE_QED_READ_TABLE_CB) || \ |
2176 | false) |
2177 | |
2178 | static inline void _nocheck__trace_qed_read_table_cb(void * s, void * table, int ret) |
2179 | { |
2180 | if (trace_event_get_state(TRACE_QED_READ_TABLE_CB) && qemu_loglevel_mask(LOG_TRACE)) { |
2181 | struct timeval _now; |
2182 | gettimeofday(&_now, NULL); |
2183 | qemu_log("%d@%zu.%06zu:qed_read_table_cb " "s %p table %p ret %d" "\n" , |
2184 | qemu_get_thread_id(), |
2185 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2186 | , s, table, ret); |
2187 | } |
2188 | } |
2189 | |
2190 | static inline void trace_qed_read_table_cb(void * s, void * table, int ret) |
2191 | { |
2192 | if (true) { |
2193 | _nocheck__trace_qed_read_table_cb(s, table, ret); |
2194 | } |
2195 | } |
2196 | |
2197 | #define TRACE_QED_WRITE_TABLE_BACKEND_DSTATE() ( \ |
2198 | trace_event_get_state_dynamic_by_id(TRACE_QED_WRITE_TABLE) || \ |
2199 | false) |
2200 | |
2201 | static inline void _nocheck__trace_qed_write_table(void * s, uint64_t offset, void * table, unsigned int index, unsigned int n) |
2202 | { |
2203 | if (trace_event_get_state(TRACE_QED_WRITE_TABLE) && qemu_loglevel_mask(LOG_TRACE)) { |
2204 | struct timeval _now; |
2205 | gettimeofday(&_now, NULL); |
2206 | qemu_log("%d@%zu.%06zu:qed_write_table " "s %p offset %" PRIu64" table %p index %u n %u" "\n" , |
2207 | qemu_get_thread_id(), |
2208 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2209 | , s, offset, table, index, n); |
2210 | } |
2211 | } |
2212 | |
2213 | static inline void trace_qed_write_table(void * s, uint64_t offset, void * table, unsigned int index, unsigned int n) |
2214 | { |
2215 | if (true) { |
2216 | _nocheck__trace_qed_write_table(s, offset, table, index, n); |
2217 | } |
2218 | } |
2219 | |
2220 | #define TRACE_QED_WRITE_TABLE_CB_BACKEND_DSTATE() ( \ |
2221 | trace_event_get_state_dynamic_by_id(TRACE_QED_WRITE_TABLE_CB) || \ |
2222 | false) |
2223 | |
2224 | static inline void _nocheck__trace_qed_write_table_cb(void * s, void * table, int flush, int ret) |
2225 | { |
2226 | if (trace_event_get_state(TRACE_QED_WRITE_TABLE_CB) && qemu_loglevel_mask(LOG_TRACE)) { |
2227 | struct timeval _now; |
2228 | gettimeofday(&_now, NULL); |
2229 | qemu_log("%d@%zu.%06zu:qed_write_table_cb " "s %p table %p flush %d ret %d" "\n" , |
2230 | qemu_get_thread_id(), |
2231 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2232 | , s, table, flush, ret); |
2233 | } |
2234 | } |
2235 | |
2236 | static inline void trace_qed_write_table_cb(void * s, void * table, int flush, int ret) |
2237 | { |
2238 | if (true) { |
2239 | _nocheck__trace_qed_write_table_cb(s, table, flush, ret); |
2240 | } |
2241 | } |
2242 | |
2243 | #define TRACE_QED_NEED_CHECK_TIMER_CB_BACKEND_DSTATE() ( \ |
2244 | trace_event_get_state_dynamic_by_id(TRACE_QED_NEED_CHECK_TIMER_CB) || \ |
2245 | false) |
2246 | |
2247 | static inline void _nocheck__trace_qed_need_check_timer_cb(void * s) |
2248 | { |
2249 | if (trace_event_get_state(TRACE_QED_NEED_CHECK_TIMER_CB) && qemu_loglevel_mask(LOG_TRACE)) { |
2250 | struct timeval _now; |
2251 | gettimeofday(&_now, NULL); |
2252 | qemu_log("%d@%zu.%06zu:qed_need_check_timer_cb " "s %p" "\n" , |
2253 | qemu_get_thread_id(), |
2254 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2255 | , s); |
2256 | } |
2257 | } |
2258 | |
2259 | static inline void trace_qed_need_check_timer_cb(void * s) |
2260 | { |
2261 | if (true) { |
2262 | _nocheck__trace_qed_need_check_timer_cb(s); |
2263 | } |
2264 | } |
2265 | |
2266 | #define TRACE_QED_START_NEED_CHECK_TIMER_BACKEND_DSTATE() ( \ |
2267 | trace_event_get_state_dynamic_by_id(TRACE_QED_START_NEED_CHECK_TIMER) || \ |
2268 | false) |
2269 | |
2270 | static inline void _nocheck__trace_qed_start_need_check_timer(void * s) |
2271 | { |
2272 | if (trace_event_get_state(TRACE_QED_START_NEED_CHECK_TIMER) && qemu_loglevel_mask(LOG_TRACE)) { |
2273 | struct timeval _now; |
2274 | gettimeofday(&_now, NULL); |
2275 | qemu_log("%d@%zu.%06zu:qed_start_need_check_timer " "s %p" "\n" , |
2276 | qemu_get_thread_id(), |
2277 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2278 | , s); |
2279 | } |
2280 | } |
2281 | |
2282 | static inline void trace_qed_start_need_check_timer(void * s) |
2283 | { |
2284 | if (true) { |
2285 | _nocheck__trace_qed_start_need_check_timer(s); |
2286 | } |
2287 | } |
2288 | |
2289 | #define TRACE_QED_CANCEL_NEED_CHECK_TIMER_BACKEND_DSTATE() ( \ |
2290 | trace_event_get_state_dynamic_by_id(TRACE_QED_CANCEL_NEED_CHECK_TIMER) || \ |
2291 | false) |
2292 | |
2293 | static inline void _nocheck__trace_qed_cancel_need_check_timer(void * s) |
2294 | { |
2295 | if (trace_event_get_state(TRACE_QED_CANCEL_NEED_CHECK_TIMER) && qemu_loglevel_mask(LOG_TRACE)) { |
2296 | struct timeval _now; |
2297 | gettimeofday(&_now, NULL); |
2298 | qemu_log("%d@%zu.%06zu:qed_cancel_need_check_timer " "s %p" "\n" , |
2299 | qemu_get_thread_id(), |
2300 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2301 | , s); |
2302 | } |
2303 | } |
2304 | |
2305 | static inline void trace_qed_cancel_need_check_timer(void * s) |
2306 | { |
2307 | if (true) { |
2308 | _nocheck__trace_qed_cancel_need_check_timer(s); |
2309 | } |
2310 | } |
2311 | |
2312 | #define TRACE_QED_AIO_COMPLETE_BACKEND_DSTATE() ( \ |
2313 | trace_event_get_state_dynamic_by_id(TRACE_QED_AIO_COMPLETE) || \ |
2314 | false) |
2315 | |
2316 | static inline void _nocheck__trace_qed_aio_complete(void * s, void * acb, int ret) |
2317 | { |
2318 | if (trace_event_get_state(TRACE_QED_AIO_COMPLETE) && qemu_loglevel_mask(LOG_TRACE)) { |
2319 | struct timeval _now; |
2320 | gettimeofday(&_now, NULL); |
2321 | qemu_log("%d@%zu.%06zu:qed_aio_complete " "s %p acb %p ret %d" "\n" , |
2322 | qemu_get_thread_id(), |
2323 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2324 | , s, acb, ret); |
2325 | } |
2326 | } |
2327 | |
2328 | static inline void trace_qed_aio_complete(void * s, void * acb, int ret) |
2329 | { |
2330 | if (true) { |
2331 | _nocheck__trace_qed_aio_complete(s, acb, ret); |
2332 | } |
2333 | } |
2334 | |
2335 | #define TRACE_QED_AIO_SETUP_BACKEND_DSTATE() ( \ |
2336 | trace_event_get_state_dynamic_by_id(TRACE_QED_AIO_SETUP) || \ |
2337 | false) |
2338 | |
2339 | static inline void _nocheck__trace_qed_aio_setup(void * s, void * acb, int64_t sector_num, int nb_sectors, void * opaque, int flags) |
2340 | { |
2341 | if (trace_event_get_state(TRACE_QED_AIO_SETUP) && qemu_loglevel_mask(LOG_TRACE)) { |
2342 | struct timeval _now; |
2343 | gettimeofday(&_now, NULL); |
2344 | qemu_log("%d@%zu.%06zu:qed_aio_setup " "s %p acb %p sector_num %" PRId64" nb_sectors %d opaque %p flags 0x%x" "\n" , |
2345 | qemu_get_thread_id(), |
2346 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2347 | , s, acb, sector_num, nb_sectors, opaque, flags); |
2348 | } |
2349 | } |
2350 | |
2351 | static inline void trace_qed_aio_setup(void * s, void * acb, int64_t sector_num, int nb_sectors, void * opaque, int flags) |
2352 | { |
2353 | if (true) { |
2354 | _nocheck__trace_qed_aio_setup(s, acb, sector_num, nb_sectors, opaque, flags); |
2355 | } |
2356 | } |
2357 | |
2358 | #define TRACE_QED_AIO_NEXT_IO_BACKEND_DSTATE() ( \ |
2359 | trace_event_get_state_dynamic_by_id(TRACE_QED_AIO_NEXT_IO) || \ |
2360 | false) |
2361 | |
2362 | static inline void _nocheck__trace_qed_aio_next_io(void * s, void * acb, int ret, uint64_t cur_pos) |
2363 | { |
2364 | if (trace_event_get_state(TRACE_QED_AIO_NEXT_IO) && qemu_loglevel_mask(LOG_TRACE)) { |
2365 | struct timeval _now; |
2366 | gettimeofday(&_now, NULL); |
2367 | qemu_log("%d@%zu.%06zu:qed_aio_next_io " "s %p acb %p ret %d cur_pos %" PRIu64 "\n" , |
2368 | qemu_get_thread_id(), |
2369 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2370 | , s, acb, ret, cur_pos); |
2371 | } |
2372 | } |
2373 | |
2374 | static inline void trace_qed_aio_next_io(void * s, void * acb, int ret, uint64_t cur_pos) |
2375 | { |
2376 | if (true) { |
2377 | _nocheck__trace_qed_aio_next_io(s, acb, ret, cur_pos); |
2378 | } |
2379 | } |
2380 | |
2381 | #define TRACE_QED_AIO_READ_DATA_BACKEND_DSTATE() ( \ |
2382 | trace_event_get_state_dynamic_by_id(TRACE_QED_AIO_READ_DATA) || \ |
2383 | false) |
2384 | |
2385 | static inline void _nocheck__trace_qed_aio_read_data(void * s, void * acb, int ret, uint64_t offset, size_t len) |
2386 | { |
2387 | if (trace_event_get_state(TRACE_QED_AIO_READ_DATA) && qemu_loglevel_mask(LOG_TRACE)) { |
2388 | struct timeval _now; |
2389 | gettimeofday(&_now, NULL); |
2390 | qemu_log("%d@%zu.%06zu:qed_aio_read_data " "s %p acb %p ret %d offset %" PRIu64" len %zu" "\n" , |
2391 | qemu_get_thread_id(), |
2392 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2393 | , s, acb, ret, offset, len); |
2394 | } |
2395 | } |
2396 | |
2397 | static inline void trace_qed_aio_read_data(void * s, void * acb, int ret, uint64_t offset, size_t len) |
2398 | { |
2399 | if (true) { |
2400 | _nocheck__trace_qed_aio_read_data(s, acb, ret, offset, len); |
2401 | } |
2402 | } |
2403 | |
2404 | #define TRACE_QED_AIO_WRITE_DATA_BACKEND_DSTATE() ( \ |
2405 | trace_event_get_state_dynamic_by_id(TRACE_QED_AIO_WRITE_DATA) || \ |
2406 | false) |
2407 | |
2408 | static inline void _nocheck__trace_qed_aio_write_data(void * s, void * acb, int ret, uint64_t offset, size_t len) |
2409 | { |
2410 | if (trace_event_get_state(TRACE_QED_AIO_WRITE_DATA) && qemu_loglevel_mask(LOG_TRACE)) { |
2411 | struct timeval _now; |
2412 | gettimeofday(&_now, NULL); |
2413 | qemu_log("%d@%zu.%06zu:qed_aio_write_data " "s %p acb %p ret %d offset %" PRIu64" len %zu" "\n" , |
2414 | qemu_get_thread_id(), |
2415 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2416 | , s, acb, ret, offset, len); |
2417 | } |
2418 | } |
2419 | |
2420 | static inline void trace_qed_aio_write_data(void * s, void * acb, int ret, uint64_t offset, size_t len) |
2421 | { |
2422 | if (true) { |
2423 | _nocheck__trace_qed_aio_write_data(s, acb, ret, offset, len); |
2424 | } |
2425 | } |
2426 | |
2427 | #define TRACE_QED_AIO_WRITE_PREFILL_BACKEND_DSTATE() ( \ |
2428 | trace_event_get_state_dynamic_by_id(TRACE_QED_AIO_WRITE_PREFILL) || \ |
2429 | false) |
2430 | |
2431 | static inline void _nocheck__trace_qed_aio_write_prefill(void * s, void * acb, uint64_t start, size_t len, uint64_t offset) |
2432 | { |
2433 | if (trace_event_get_state(TRACE_QED_AIO_WRITE_PREFILL) && qemu_loglevel_mask(LOG_TRACE)) { |
2434 | struct timeval _now; |
2435 | gettimeofday(&_now, NULL); |
2436 | qemu_log("%d@%zu.%06zu:qed_aio_write_prefill " "s %p acb %p start %" PRIu64" len %zu offset %" PRIu64 "\n" , |
2437 | qemu_get_thread_id(), |
2438 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2439 | , s, acb, start, len, offset); |
2440 | } |
2441 | } |
2442 | |
2443 | static inline void trace_qed_aio_write_prefill(void * s, void * acb, uint64_t start, size_t len, uint64_t offset) |
2444 | { |
2445 | if (true) { |
2446 | _nocheck__trace_qed_aio_write_prefill(s, acb, start, len, offset); |
2447 | } |
2448 | } |
2449 | |
2450 | #define TRACE_QED_AIO_WRITE_POSTFILL_BACKEND_DSTATE() ( \ |
2451 | trace_event_get_state_dynamic_by_id(TRACE_QED_AIO_WRITE_POSTFILL) || \ |
2452 | false) |
2453 | |
2454 | static inline void _nocheck__trace_qed_aio_write_postfill(void * s, void * acb, uint64_t start, size_t len, uint64_t offset) |
2455 | { |
2456 | if (trace_event_get_state(TRACE_QED_AIO_WRITE_POSTFILL) && qemu_loglevel_mask(LOG_TRACE)) { |
2457 | struct timeval _now; |
2458 | gettimeofday(&_now, NULL); |
2459 | qemu_log("%d@%zu.%06zu:qed_aio_write_postfill " "s %p acb %p start %" PRIu64" len %zu offset %" PRIu64 "\n" , |
2460 | qemu_get_thread_id(), |
2461 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2462 | , s, acb, start, len, offset); |
2463 | } |
2464 | } |
2465 | |
2466 | static inline void trace_qed_aio_write_postfill(void * s, void * acb, uint64_t start, size_t len, uint64_t offset) |
2467 | { |
2468 | if (true) { |
2469 | _nocheck__trace_qed_aio_write_postfill(s, acb, start, len, offset); |
2470 | } |
2471 | } |
2472 | |
2473 | #define TRACE_QED_AIO_WRITE_MAIN_BACKEND_DSTATE() ( \ |
2474 | trace_event_get_state_dynamic_by_id(TRACE_QED_AIO_WRITE_MAIN) || \ |
2475 | false) |
2476 | |
2477 | static inline void _nocheck__trace_qed_aio_write_main(void * s, void * acb, int ret, uint64_t offset, size_t len) |
2478 | { |
2479 | if (trace_event_get_state(TRACE_QED_AIO_WRITE_MAIN) && qemu_loglevel_mask(LOG_TRACE)) { |
2480 | struct timeval _now; |
2481 | gettimeofday(&_now, NULL); |
2482 | qemu_log("%d@%zu.%06zu:qed_aio_write_main " "s %p acb %p ret %d offset %" PRIu64" len %zu" "\n" , |
2483 | qemu_get_thread_id(), |
2484 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2485 | , s, acb, ret, offset, len); |
2486 | } |
2487 | } |
2488 | |
2489 | static inline void trace_qed_aio_write_main(void * s, void * acb, int ret, uint64_t offset, size_t len) |
2490 | { |
2491 | if (true) { |
2492 | _nocheck__trace_qed_aio_write_main(s, acb, ret, offset, len); |
2493 | } |
2494 | } |
2495 | |
2496 | #define TRACE_VXHS_IIO_CALLBACK_BACKEND_DSTATE() ( \ |
2497 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_IIO_CALLBACK) || \ |
2498 | false) |
2499 | |
2500 | static inline void _nocheck__trace_vxhs_iio_callback(int error) |
2501 | { |
2502 | if (trace_event_get_state(TRACE_VXHS_IIO_CALLBACK) && qemu_loglevel_mask(LOG_TRACE)) { |
2503 | struct timeval _now; |
2504 | gettimeofday(&_now, NULL); |
2505 | qemu_log("%d@%zu.%06zu:vxhs_iio_callback " "ctx is NULL: error %d" "\n" , |
2506 | qemu_get_thread_id(), |
2507 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2508 | , error); |
2509 | } |
2510 | } |
2511 | |
2512 | static inline void trace_vxhs_iio_callback(int error) |
2513 | { |
2514 | if (true) { |
2515 | _nocheck__trace_vxhs_iio_callback(error); |
2516 | } |
2517 | } |
2518 | |
2519 | #define TRACE_VXHS_IIO_CALLBACK_CHNFAIL_BACKEND_DSTATE() ( \ |
2520 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_IIO_CALLBACK_CHNFAIL) || \ |
2521 | false) |
2522 | |
2523 | static inline void _nocheck__trace_vxhs_iio_callback_chnfail(int err, int error) |
2524 | { |
2525 | if (trace_event_get_state(TRACE_VXHS_IIO_CALLBACK_CHNFAIL) && qemu_loglevel_mask(LOG_TRACE)) { |
2526 | struct timeval _now; |
2527 | gettimeofday(&_now, NULL); |
2528 | qemu_log("%d@%zu.%06zu:vxhs_iio_callback_chnfail " "QNIO channel failed, no i/o %d, %d" "\n" , |
2529 | qemu_get_thread_id(), |
2530 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2531 | , err, error); |
2532 | } |
2533 | } |
2534 | |
2535 | static inline void trace_vxhs_iio_callback_chnfail(int err, int error) |
2536 | { |
2537 | if (true) { |
2538 | _nocheck__trace_vxhs_iio_callback_chnfail(err, error); |
2539 | } |
2540 | } |
2541 | |
2542 | #define TRACE_VXHS_IIO_CALLBACK_UNKNWN_BACKEND_DSTATE() ( \ |
2543 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_IIO_CALLBACK_UNKNWN) || \ |
2544 | false) |
2545 | |
2546 | static inline void _nocheck__trace_vxhs_iio_callback_unknwn(int opcode, int err) |
2547 | { |
2548 | if (trace_event_get_state(TRACE_VXHS_IIO_CALLBACK_UNKNWN) && qemu_loglevel_mask(LOG_TRACE)) { |
2549 | struct timeval _now; |
2550 | gettimeofday(&_now, NULL); |
2551 | qemu_log("%d@%zu.%06zu:vxhs_iio_callback_unknwn " "unexpected opcode %d, errno %d" "\n" , |
2552 | qemu_get_thread_id(), |
2553 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2554 | , opcode, err); |
2555 | } |
2556 | } |
2557 | |
2558 | static inline void trace_vxhs_iio_callback_unknwn(int opcode, int err) |
2559 | { |
2560 | if (true) { |
2561 | _nocheck__trace_vxhs_iio_callback_unknwn(opcode, err); |
2562 | } |
2563 | } |
2564 | |
2565 | #define TRACE_VXHS_AIO_RW_INVALID_BACKEND_DSTATE() ( \ |
2566 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_AIO_RW_INVALID) || \ |
2567 | false) |
2568 | |
2569 | static inline void _nocheck__trace_vxhs_aio_rw_invalid(int req) |
2570 | { |
2571 | if (trace_event_get_state(TRACE_VXHS_AIO_RW_INVALID) && qemu_loglevel_mask(LOG_TRACE)) { |
2572 | struct timeval _now; |
2573 | gettimeofday(&_now, NULL); |
2574 | qemu_log("%d@%zu.%06zu:vxhs_aio_rw_invalid " "Invalid I/O request iodir %d" "\n" , |
2575 | qemu_get_thread_id(), |
2576 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2577 | , req); |
2578 | } |
2579 | } |
2580 | |
2581 | static inline void trace_vxhs_aio_rw_invalid(int req) |
2582 | { |
2583 | if (true) { |
2584 | _nocheck__trace_vxhs_aio_rw_invalid(req); |
2585 | } |
2586 | } |
2587 | |
2588 | #define TRACE_VXHS_AIO_RW_IOERR_BACKEND_DSTATE() ( \ |
2589 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_AIO_RW_IOERR) || \ |
2590 | false) |
2591 | |
2592 | static inline void _nocheck__trace_vxhs_aio_rw_ioerr(char * guid, int iodir, uint64_t size, uint64_t off, void * acb, int ret, int err) |
2593 | { |
2594 | if (trace_event_get_state(TRACE_VXHS_AIO_RW_IOERR) && qemu_loglevel_mask(LOG_TRACE)) { |
2595 | struct timeval _now; |
2596 | gettimeofday(&_now, NULL); |
2597 | qemu_log("%d@%zu.%06zu:vxhs_aio_rw_ioerr " "IO ERROR (vDisk %s) FOR : Read/Write = %d size = %" PRIu64" offset = %" PRIu64" ACB = %p. Error = %d, errno = %d" "\n" , |
2598 | qemu_get_thread_id(), |
2599 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2600 | , guid, iodir, size, off, acb, ret, err); |
2601 | } |
2602 | } |
2603 | |
2604 | static inline void trace_vxhs_aio_rw_ioerr(char * guid, int iodir, uint64_t size, uint64_t off, void * acb, int ret, int err) |
2605 | { |
2606 | if (true) { |
2607 | _nocheck__trace_vxhs_aio_rw_ioerr(guid, iodir, size, off, acb, ret, err); |
2608 | } |
2609 | } |
2610 | |
2611 | #define TRACE_VXHS_GET_VDISK_STAT_ERR_BACKEND_DSTATE() ( \ |
2612 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_GET_VDISK_STAT_ERR) || \ |
2613 | false) |
2614 | |
2615 | static inline void _nocheck__trace_vxhs_get_vdisk_stat_err(char * guid, int ret, int err) |
2616 | { |
2617 | if (trace_event_get_state(TRACE_VXHS_GET_VDISK_STAT_ERR) && qemu_loglevel_mask(LOG_TRACE)) { |
2618 | struct timeval _now; |
2619 | gettimeofday(&_now, NULL); |
2620 | qemu_log("%d@%zu.%06zu:vxhs_get_vdisk_stat_err " "vDisk (%s) stat ioctl failed, ret = %d, errno = %d" "\n" , |
2621 | qemu_get_thread_id(), |
2622 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2623 | , guid, ret, err); |
2624 | } |
2625 | } |
2626 | |
2627 | static inline void trace_vxhs_get_vdisk_stat_err(char * guid, int ret, int err) |
2628 | { |
2629 | if (true) { |
2630 | _nocheck__trace_vxhs_get_vdisk_stat_err(guid, ret, err); |
2631 | } |
2632 | } |
2633 | |
2634 | #define TRACE_VXHS_GET_VDISK_STAT_BACKEND_DSTATE() ( \ |
2635 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_GET_VDISK_STAT) || \ |
2636 | false) |
2637 | |
2638 | static inline void _nocheck__trace_vxhs_get_vdisk_stat(char * vdisk_guid, uint64_t vdisk_size) |
2639 | { |
2640 | if (trace_event_get_state(TRACE_VXHS_GET_VDISK_STAT) && qemu_loglevel_mask(LOG_TRACE)) { |
2641 | struct timeval _now; |
2642 | gettimeofday(&_now, NULL); |
2643 | qemu_log("%d@%zu.%06zu:vxhs_get_vdisk_stat " "vDisk %s stat ioctl returned size %" PRIu64 "\n" , |
2644 | qemu_get_thread_id(), |
2645 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2646 | , vdisk_guid, vdisk_size); |
2647 | } |
2648 | } |
2649 | |
2650 | static inline void trace_vxhs_get_vdisk_stat(char * vdisk_guid, uint64_t vdisk_size) |
2651 | { |
2652 | if (true) { |
2653 | _nocheck__trace_vxhs_get_vdisk_stat(vdisk_guid, vdisk_size); |
2654 | } |
2655 | } |
2656 | |
2657 | #define TRACE_VXHS_COMPLETE_AIO_BACKEND_DSTATE() ( \ |
2658 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_COMPLETE_AIO) || \ |
2659 | false) |
2660 | |
2661 | static inline void _nocheck__trace_vxhs_complete_aio(void * acb, uint64_t ret) |
2662 | { |
2663 | if (trace_event_get_state(TRACE_VXHS_COMPLETE_AIO) && qemu_loglevel_mask(LOG_TRACE)) { |
2664 | struct timeval _now; |
2665 | gettimeofday(&_now, NULL); |
2666 | qemu_log("%d@%zu.%06zu:vxhs_complete_aio " "aio failed acb %p ret %" PRIu64 "\n" , |
2667 | qemu_get_thread_id(), |
2668 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2669 | , acb, ret); |
2670 | } |
2671 | } |
2672 | |
2673 | static inline void trace_vxhs_complete_aio(void * acb, uint64_t ret) |
2674 | { |
2675 | if (true) { |
2676 | _nocheck__trace_vxhs_complete_aio(acb, ret); |
2677 | } |
2678 | } |
2679 | |
2680 | #define TRACE_VXHS_PARSE_URI_FILENAME_BACKEND_DSTATE() ( \ |
2681 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_PARSE_URI_FILENAME) || \ |
2682 | false) |
2683 | |
2684 | static inline void _nocheck__trace_vxhs_parse_uri_filename(const char * filename) |
2685 | { |
2686 | if (trace_event_get_state(TRACE_VXHS_PARSE_URI_FILENAME) && qemu_loglevel_mask(LOG_TRACE)) { |
2687 | struct timeval _now; |
2688 | gettimeofday(&_now, NULL); |
2689 | qemu_log("%d@%zu.%06zu:vxhs_parse_uri_filename " "URI passed via bdrv_parse_filename %s" "\n" , |
2690 | qemu_get_thread_id(), |
2691 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2692 | , filename); |
2693 | } |
2694 | } |
2695 | |
2696 | static inline void trace_vxhs_parse_uri_filename(const char * filename) |
2697 | { |
2698 | if (true) { |
2699 | _nocheck__trace_vxhs_parse_uri_filename(filename); |
2700 | } |
2701 | } |
2702 | |
2703 | #define TRACE_VXHS_OPEN_VDISKID_BACKEND_DSTATE() ( \ |
2704 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_OPEN_VDISKID) || \ |
2705 | false) |
2706 | |
2707 | static inline void _nocheck__trace_vxhs_open_vdiskid(const char * vdisk_id) |
2708 | { |
2709 | if (trace_event_get_state(TRACE_VXHS_OPEN_VDISKID) && qemu_loglevel_mask(LOG_TRACE)) { |
2710 | struct timeval _now; |
2711 | gettimeofday(&_now, NULL); |
2712 | qemu_log("%d@%zu.%06zu:vxhs_open_vdiskid " "Opening vdisk-id %s" "\n" , |
2713 | qemu_get_thread_id(), |
2714 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2715 | , vdisk_id); |
2716 | } |
2717 | } |
2718 | |
2719 | static inline void trace_vxhs_open_vdiskid(const char * vdisk_id) |
2720 | { |
2721 | if (true) { |
2722 | _nocheck__trace_vxhs_open_vdiskid(vdisk_id); |
2723 | } |
2724 | } |
2725 | |
2726 | #define TRACE_VXHS_OPEN_HOSTINFO_BACKEND_DSTATE() ( \ |
2727 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_OPEN_HOSTINFO) || \ |
2728 | false) |
2729 | |
2730 | static inline void _nocheck__trace_vxhs_open_hostinfo(char * of_vsa_addr, int port) |
2731 | { |
2732 | if (trace_event_get_state(TRACE_VXHS_OPEN_HOSTINFO) && qemu_loglevel_mask(LOG_TRACE)) { |
2733 | struct timeval _now; |
2734 | gettimeofday(&_now, NULL); |
2735 | qemu_log("%d@%zu.%06zu:vxhs_open_hostinfo " "Adding host %s:%d to BDRVVXHSState" "\n" , |
2736 | qemu_get_thread_id(), |
2737 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2738 | , of_vsa_addr, port); |
2739 | } |
2740 | } |
2741 | |
2742 | static inline void trace_vxhs_open_hostinfo(char * of_vsa_addr, int port) |
2743 | { |
2744 | if (true) { |
2745 | _nocheck__trace_vxhs_open_hostinfo(of_vsa_addr, port); |
2746 | } |
2747 | } |
2748 | |
2749 | #define TRACE_VXHS_OPEN_IIO_OPEN_BACKEND_DSTATE() ( \ |
2750 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_OPEN_IIO_OPEN) || \ |
2751 | false) |
2752 | |
2753 | static inline void _nocheck__trace_vxhs_open_iio_open(const char * host) |
2754 | { |
2755 | if (trace_event_get_state(TRACE_VXHS_OPEN_IIO_OPEN) && qemu_loglevel_mask(LOG_TRACE)) { |
2756 | struct timeval _now; |
2757 | gettimeofday(&_now, NULL); |
2758 | qemu_log("%d@%zu.%06zu:vxhs_open_iio_open " "Failed to connect to storage agent on host %s" "\n" , |
2759 | qemu_get_thread_id(), |
2760 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2761 | , host); |
2762 | } |
2763 | } |
2764 | |
2765 | static inline void trace_vxhs_open_iio_open(const char * host) |
2766 | { |
2767 | if (true) { |
2768 | _nocheck__trace_vxhs_open_iio_open(host); |
2769 | } |
2770 | } |
2771 | |
2772 | #define TRACE_VXHS_PARSE_URI_HOSTINFO_BACKEND_DSTATE() ( \ |
2773 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_PARSE_URI_HOSTINFO) || \ |
2774 | false) |
2775 | |
2776 | static inline void _nocheck__trace_vxhs_parse_uri_hostinfo(char * host, int port) |
2777 | { |
2778 | if (trace_event_get_state(TRACE_VXHS_PARSE_URI_HOSTINFO) && qemu_loglevel_mask(LOG_TRACE)) { |
2779 | struct timeval _now; |
2780 | gettimeofday(&_now, NULL); |
2781 | qemu_log("%d@%zu.%06zu:vxhs_parse_uri_hostinfo " "Host: IP %s, Port %d" "\n" , |
2782 | qemu_get_thread_id(), |
2783 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2784 | , host, port); |
2785 | } |
2786 | } |
2787 | |
2788 | static inline void trace_vxhs_parse_uri_hostinfo(char * host, int port) |
2789 | { |
2790 | if (true) { |
2791 | _nocheck__trace_vxhs_parse_uri_hostinfo(host, port); |
2792 | } |
2793 | } |
2794 | |
2795 | #define TRACE_VXHS_CLOSE_BACKEND_DSTATE() ( \ |
2796 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_CLOSE) || \ |
2797 | false) |
2798 | |
2799 | static inline void _nocheck__trace_vxhs_close(char * vdisk_guid) |
2800 | { |
2801 | if (trace_event_get_state(TRACE_VXHS_CLOSE) && qemu_loglevel_mask(LOG_TRACE)) { |
2802 | struct timeval _now; |
2803 | gettimeofday(&_now, NULL); |
2804 | qemu_log("%d@%zu.%06zu:vxhs_close " "Closing vdisk %s" "\n" , |
2805 | qemu_get_thread_id(), |
2806 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2807 | , vdisk_guid); |
2808 | } |
2809 | } |
2810 | |
2811 | static inline void trace_vxhs_close(char * vdisk_guid) |
2812 | { |
2813 | if (true) { |
2814 | _nocheck__trace_vxhs_close(vdisk_guid); |
2815 | } |
2816 | } |
2817 | |
2818 | #define TRACE_VXHS_GET_CREDS_BACKEND_DSTATE() ( \ |
2819 | trace_event_get_state_dynamic_by_id(TRACE_VXHS_GET_CREDS) || \ |
2820 | false) |
2821 | |
2822 | static inline void _nocheck__trace_vxhs_get_creds(const char * cacert, const char * client_key, const char * client_cert) |
2823 | { |
2824 | if (trace_event_get_state(TRACE_VXHS_GET_CREDS) && qemu_loglevel_mask(LOG_TRACE)) { |
2825 | struct timeval _now; |
2826 | gettimeofday(&_now, NULL); |
2827 | qemu_log("%d@%zu.%06zu:vxhs_get_creds " "cacert %s, client_key %s, client_cert %s" "\n" , |
2828 | qemu_get_thread_id(), |
2829 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2830 | , cacert, client_key, client_cert); |
2831 | } |
2832 | } |
2833 | |
2834 | static inline void trace_vxhs_get_creds(const char * cacert, const char * client_key, const char * client_cert) |
2835 | { |
2836 | if (true) { |
2837 | _nocheck__trace_vxhs_get_creds(cacert, client_key, client_cert); |
2838 | } |
2839 | } |
2840 | |
2841 | #define TRACE_NVME_KICK_BACKEND_DSTATE() ( \ |
2842 | trace_event_get_state_dynamic_by_id(TRACE_NVME_KICK) || \ |
2843 | false) |
2844 | |
2845 | static inline void _nocheck__trace_nvme_kick(void * s, int queue) |
2846 | { |
2847 | if (trace_event_get_state(TRACE_NVME_KICK) && qemu_loglevel_mask(LOG_TRACE)) { |
2848 | struct timeval _now; |
2849 | gettimeofday(&_now, NULL); |
2850 | qemu_log("%d@%zu.%06zu:nvme_kick " "s %p queue %d" "\n" , |
2851 | qemu_get_thread_id(), |
2852 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2853 | , s, queue); |
2854 | } |
2855 | } |
2856 | |
2857 | static inline void trace_nvme_kick(void * s, int queue) |
2858 | { |
2859 | if (true) { |
2860 | _nocheck__trace_nvme_kick(s, queue); |
2861 | } |
2862 | } |
2863 | |
2864 | #define TRACE_NVME_DMA_FLUSH_QUEUE_WAIT_BACKEND_DSTATE() ( \ |
2865 | trace_event_get_state_dynamic_by_id(TRACE_NVME_DMA_FLUSH_QUEUE_WAIT) || \ |
2866 | false) |
2867 | |
2868 | static inline void _nocheck__trace_nvme_dma_flush_queue_wait(void * s) |
2869 | { |
2870 | if (trace_event_get_state(TRACE_NVME_DMA_FLUSH_QUEUE_WAIT) && qemu_loglevel_mask(LOG_TRACE)) { |
2871 | struct timeval _now; |
2872 | gettimeofday(&_now, NULL); |
2873 | qemu_log("%d@%zu.%06zu:nvme_dma_flush_queue_wait " "s %p" "\n" , |
2874 | qemu_get_thread_id(), |
2875 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2876 | , s); |
2877 | } |
2878 | } |
2879 | |
2880 | static inline void trace_nvme_dma_flush_queue_wait(void * s) |
2881 | { |
2882 | if (true) { |
2883 | _nocheck__trace_nvme_dma_flush_queue_wait(s); |
2884 | } |
2885 | } |
2886 | |
2887 | #define TRACE_NVME_ERROR_BACKEND_DSTATE() ( \ |
2888 | trace_event_get_state_dynamic_by_id(TRACE_NVME_ERROR) || \ |
2889 | false) |
2890 | |
2891 | static inline void _nocheck__trace_nvme_error(int cmd_specific, int sq_head, int sqid, int cid, int status) |
2892 | { |
2893 | if (trace_event_get_state(TRACE_NVME_ERROR) && qemu_loglevel_mask(LOG_TRACE)) { |
2894 | struct timeval _now; |
2895 | gettimeofday(&_now, NULL); |
2896 | qemu_log("%d@%zu.%06zu:nvme_error " "cmd_specific %d sq_head %d sqid %d cid %d status 0x%x" "\n" , |
2897 | qemu_get_thread_id(), |
2898 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2899 | , cmd_specific, sq_head, sqid, cid, status); |
2900 | } |
2901 | } |
2902 | |
2903 | static inline void trace_nvme_error(int cmd_specific, int sq_head, int sqid, int cid, int status) |
2904 | { |
2905 | if (true) { |
2906 | _nocheck__trace_nvme_error(cmd_specific, sq_head, sqid, cid, status); |
2907 | } |
2908 | } |
2909 | |
2910 | #define TRACE_NVME_PROCESS_COMPLETION_BACKEND_DSTATE() ( \ |
2911 | trace_event_get_state_dynamic_by_id(TRACE_NVME_PROCESS_COMPLETION) || \ |
2912 | false) |
2913 | |
2914 | static inline void _nocheck__trace_nvme_process_completion(void * s, int index, int inflight) |
2915 | { |
2916 | if (trace_event_get_state(TRACE_NVME_PROCESS_COMPLETION) && qemu_loglevel_mask(LOG_TRACE)) { |
2917 | struct timeval _now; |
2918 | gettimeofday(&_now, NULL); |
2919 | qemu_log("%d@%zu.%06zu:nvme_process_completion " "s %p queue %d inflight %d" "\n" , |
2920 | qemu_get_thread_id(), |
2921 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2922 | , s, index, inflight); |
2923 | } |
2924 | } |
2925 | |
2926 | static inline void trace_nvme_process_completion(void * s, int index, int inflight) |
2927 | { |
2928 | if (true) { |
2929 | _nocheck__trace_nvme_process_completion(s, index, inflight); |
2930 | } |
2931 | } |
2932 | |
2933 | #define TRACE_NVME_PROCESS_COMPLETION_QUEUE_BUSY_BACKEND_DSTATE() ( \ |
2934 | trace_event_get_state_dynamic_by_id(TRACE_NVME_PROCESS_COMPLETION_QUEUE_BUSY) || \ |
2935 | false) |
2936 | |
2937 | static inline void _nocheck__trace_nvme_process_completion_queue_busy(void * s, int index) |
2938 | { |
2939 | if (trace_event_get_state(TRACE_NVME_PROCESS_COMPLETION_QUEUE_BUSY) && qemu_loglevel_mask(LOG_TRACE)) { |
2940 | struct timeval _now; |
2941 | gettimeofday(&_now, NULL); |
2942 | qemu_log("%d@%zu.%06zu:nvme_process_completion_queue_busy " "s %p queue %d" "\n" , |
2943 | qemu_get_thread_id(), |
2944 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2945 | , s, index); |
2946 | } |
2947 | } |
2948 | |
2949 | static inline void trace_nvme_process_completion_queue_busy(void * s, int index) |
2950 | { |
2951 | if (true) { |
2952 | _nocheck__trace_nvme_process_completion_queue_busy(s, index); |
2953 | } |
2954 | } |
2955 | |
2956 | #define TRACE_NVME_COMPLETE_COMMAND_BACKEND_DSTATE() ( \ |
2957 | trace_event_get_state_dynamic_by_id(TRACE_NVME_COMPLETE_COMMAND) || \ |
2958 | false) |
2959 | |
2960 | static inline void _nocheck__trace_nvme_complete_command(void * s, int index, int cid) |
2961 | { |
2962 | if (trace_event_get_state(TRACE_NVME_COMPLETE_COMMAND) && qemu_loglevel_mask(LOG_TRACE)) { |
2963 | struct timeval _now; |
2964 | gettimeofday(&_now, NULL); |
2965 | qemu_log("%d@%zu.%06zu:nvme_complete_command " "s %p queue %d cid %d" "\n" , |
2966 | qemu_get_thread_id(), |
2967 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2968 | , s, index, cid); |
2969 | } |
2970 | } |
2971 | |
2972 | static inline void trace_nvme_complete_command(void * s, int index, int cid) |
2973 | { |
2974 | if (true) { |
2975 | _nocheck__trace_nvme_complete_command(s, index, cid); |
2976 | } |
2977 | } |
2978 | |
2979 | #define TRACE_NVME_SUBMIT_COMMAND_BACKEND_DSTATE() ( \ |
2980 | trace_event_get_state_dynamic_by_id(TRACE_NVME_SUBMIT_COMMAND) || \ |
2981 | false) |
2982 | |
2983 | static inline void _nocheck__trace_nvme_submit_command(void * s, int index, int cid) |
2984 | { |
2985 | if (trace_event_get_state(TRACE_NVME_SUBMIT_COMMAND) && qemu_loglevel_mask(LOG_TRACE)) { |
2986 | struct timeval _now; |
2987 | gettimeofday(&_now, NULL); |
2988 | qemu_log("%d@%zu.%06zu:nvme_submit_command " "s %p queue %d cid %d" "\n" , |
2989 | qemu_get_thread_id(), |
2990 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
2991 | , s, index, cid); |
2992 | } |
2993 | } |
2994 | |
2995 | static inline void trace_nvme_submit_command(void * s, int index, int cid) |
2996 | { |
2997 | if (true) { |
2998 | _nocheck__trace_nvme_submit_command(s, index, cid); |
2999 | } |
3000 | } |
3001 | |
3002 | #define TRACE_NVME_SUBMIT_COMMAND_RAW_BACKEND_DSTATE() ( \ |
3003 | trace_event_get_state_dynamic_by_id(TRACE_NVME_SUBMIT_COMMAND_RAW) || \ |
3004 | false) |
3005 | |
3006 | static inline void _nocheck__trace_nvme_submit_command_raw(int c0, int c1, int c2, int c3, int c4, int c5, int c6, int c7) |
3007 | { |
3008 | if (trace_event_get_state(TRACE_NVME_SUBMIT_COMMAND_RAW) && qemu_loglevel_mask(LOG_TRACE)) { |
3009 | struct timeval _now; |
3010 | gettimeofday(&_now, NULL); |
3011 | qemu_log("%d@%zu.%06zu:nvme_submit_command_raw " "%02x %02x %02x %02x %02x %02x %02x %02x" "\n" , |
3012 | qemu_get_thread_id(), |
3013 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3014 | , c0, c1, c2, c3, c4, c5, c6, c7); |
3015 | } |
3016 | } |
3017 | |
3018 | static inline void trace_nvme_submit_command_raw(int c0, int c1, int c2, int c3, int c4, int c5, int c6, int c7) |
3019 | { |
3020 | if (true) { |
3021 | _nocheck__trace_nvme_submit_command_raw(c0, c1, c2, c3, c4, c5, c6, c7); |
3022 | } |
3023 | } |
3024 | |
3025 | #define TRACE_NVME_HANDLE_EVENT_BACKEND_DSTATE() ( \ |
3026 | trace_event_get_state_dynamic_by_id(TRACE_NVME_HANDLE_EVENT) || \ |
3027 | false) |
3028 | |
3029 | static inline void _nocheck__trace_nvme_handle_event(void * s) |
3030 | { |
3031 | if (trace_event_get_state(TRACE_NVME_HANDLE_EVENT) && qemu_loglevel_mask(LOG_TRACE)) { |
3032 | struct timeval _now; |
3033 | gettimeofday(&_now, NULL); |
3034 | qemu_log("%d@%zu.%06zu:nvme_handle_event " "s %p" "\n" , |
3035 | qemu_get_thread_id(), |
3036 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3037 | , s); |
3038 | } |
3039 | } |
3040 | |
3041 | static inline void trace_nvme_handle_event(void * s) |
3042 | { |
3043 | if (true) { |
3044 | _nocheck__trace_nvme_handle_event(s); |
3045 | } |
3046 | } |
3047 | |
3048 | #define TRACE_NVME_POLL_CB_BACKEND_DSTATE() ( \ |
3049 | trace_event_get_state_dynamic_by_id(TRACE_NVME_POLL_CB) || \ |
3050 | false) |
3051 | |
3052 | static inline void _nocheck__trace_nvme_poll_cb(void * s) |
3053 | { |
3054 | if (trace_event_get_state(TRACE_NVME_POLL_CB) && qemu_loglevel_mask(LOG_TRACE)) { |
3055 | struct timeval _now; |
3056 | gettimeofday(&_now, NULL); |
3057 | qemu_log("%d@%zu.%06zu:nvme_poll_cb " "s %p" "\n" , |
3058 | qemu_get_thread_id(), |
3059 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3060 | , s); |
3061 | } |
3062 | } |
3063 | |
3064 | static inline void trace_nvme_poll_cb(void * s) |
3065 | { |
3066 | if (true) { |
3067 | _nocheck__trace_nvme_poll_cb(s); |
3068 | } |
3069 | } |
3070 | |
3071 | #define TRACE_NVME_PRW_ALIGNED_BACKEND_DSTATE() ( \ |
3072 | trace_event_get_state_dynamic_by_id(TRACE_NVME_PRW_ALIGNED) || \ |
3073 | false) |
3074 | |
3075 | static inline void _nocheck__trace_nvme_prw_aligned(void * s, int is_write, uint64_t offset, uint64_t bytes, int flags, int niov) |
3076 | { |
3077 | if (trace_event_get_state(TRACE_NVME_PRW_ALIGNED) && qemu_loglevel_mask(LOG_TRACE)) { |
3078 | struct timeval _now; |
3079 | gettimeofday(&_now, NULL); |
3080 | qemu_log("%d@%zu.%06zu:nvme_prw_aligned " "s %p is_write %d offset %" PRId64" bytes %" PRId64" flags %d niov %d" "\n" , |
3081 | qemu_get_thread_id(), |
3082 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3083 | , s, is_write, offset, bytes, flags, niov); |
3084 | } |
3085 | } |
3086 | |
3087 | static inline void trace_nvme_prw_aligned(void * s, int is_write, uint64_t offset, uint64_t bytes, int flags, int niov) |
3088 | { |
3089 | if (true) { |
3090 | _nocheck__trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, niov); |
3091 | } |
3092 | } |
3093 | |
3094 | #define TRACE_NVME_QIOV_UNALIGNED_BACKEND_DSTATE() ( \ |
3095 | trace_event_get_state_dynamic_by_id(TRACE_NVME_QIOV_UNALIGNED) || \ |
3096 | false) |
3097 | |
3098 | static inline void _nocheck__trace_nvme_qiov_unaligned(const void * qiov, int n, void * base, size_t size, int align) |
3099 | { |
3100 | if (trace_event_get_state(TRACE_NVME_QIOV_UNALIGNED) && qemu_loglevel_mask(LOG_TRACE)) { |
3101 | struct timeval _now; |
3102 | gettimeofday(&_now, NULL); |
3103 | qemu_log("%d@%zu.%06zu:nvme_qiov_unaligned " "qiov %p n %d base %p size 0x%zx align 0x%x" "\n" , |
3104 | qemu_get_thread_id(), |
3105 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3106 | , qiov, n, base, size, align); |
3107 | } |
3108 | } |
3109 | |
3110 | static inline void trace_nvme_qiov_unaligned(const void * qiov, int n, void * base, size_t size, int align) |
3111 | { |
3112 | if (true) { |
3113 | _nocheck__trace_nvme_qiov_unaligned(qiov, n, base, size, align); |
3114 | } |
3115 | } |
3116 | |
3117 | #define TRACE_NVME_PRW_BUFFERED_BACKEND_DSTATE() ( \ |
3118 | trace_event_get_state_dynamic_by_id(TRACE_NVME_PRW_BUFFERED) || \ |
3119 | false) |
3120 | |
3121 | static inline void _nocheck__trace_nvme_prw_buffered(void * s, uint64_t offset, uint64_t bytes, int niov, int is_write) |
3122 | { |
3123 | if (trace_event_get_state(TRACE_NVME_PRW_BUFFERED) && qemu_loglevel_mask(LOG_TRACE)) { |
3124 | struct timeval _now; |
3125 | gettimeofday(&_now, NULL); |
3126 | qemu_log("%d@%zu.%06zu:nvme_prw_buffered " "s %p offset %" PRId64" bytes %" PRId64" niov %d is_write %d" "\n" , |
3127 | qemu_get_thread_id(), |
3128 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3129 | , s, offset, bytes, niov, is_write); |
3130 | } |
3131 | } |
3132 | |
3133 | static inline void trace_nvme_prw_buffered(void * s, uint64_t offset, uint64_t bytes, int niov, int is_write) |
3134 | { |
3135 | if (true) { |
3136 | _nocheck__trace_nvme_prw_buffered(s, offset, bytes, niov, is_write); |
3137 | } |
3138 | } |
3139 | |
3140 | #define TRACE_NVME_RW_DONE_BACKEND_DSTATE() ( \ |
3141 | trace_event_get_state_dynamic_by_id(TRACE_NVME_RW_DONE) || \ |
3142 | false) |
3143 | |
3144 | static inline void _nocheck__trace_nvme_rw_done(void * s, int is_write, uint64_t offset, uint64_t bytes, int ret) |
3145 | { |
3146 | if (trace_event_get_state(TRACE_NVME_RW_DONE) && qemu_loglevel_mask(LOG_TRACE)) { |
3147 | struct timeval _now; |
3148 | gettimeofday(&_now, NULL); |
3149 | qemu_log("%d@%zu.%06zu:nvme_rw_done " "s %p is_write %d offset %" PRId64" bytes %" PRId64" ret %d" "\n" , |
3150 | qemu_get_thread_id(), |
3151 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3152 | , s, is_write, offset, bytes, ret); |
3153 | } |
3154 | } |
3155 | |
3156 | static inline void trace_nvme_rw_done(void * s, int is_write, uint64_t offset, uint64_t bytes, int ret) |
3157 | { |
3158 | if (true) { |
3159 | _nocheck__trace_nvme_rw_done(s, is_write, offset, bytes, ret); |
3160 | } |
3161 | } |
3162 | |
3163 | #define TRACE_NVME_DMA_MAP_FLUSH_BACKEND_DSTATE() ( \ |
3164 | trace_event_get_state_dynamic_by_id(TRACE_NVME_DMA_MAP_FLUSH) || \ |
3165 | false) |
3166 | |
3167 | static inline void _nocheck__trace_nvme_dma_map_flush(void * s) |
3168 | { |
3169 | if (trace_event_get_state(TRACE_NVME_DMA_MAP_FLUSH) && qemu_loglevel_mask(LOG_TRACE)) { |
3170 | struct timeval _now; |
3171 | gettimeofday(&_now, NULL); |
3172 | qemu_log("%d@%zu.%06zu:nvme_dma_map_flush " "s %p" "\n" , |
3173 | qemu_get_thread_id(), |
3174 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3175 | , s); |
3176 | } |
3177 | } |
3178 | |
3179 | static inline void trace_nvme_dma_map_flush(void * s) |
3180 | { |
3181 | if (true) { |
3182 | _nocheck__trace_nvme_dma_map_flush(s); |
3183 | } |
3184 | } |
3185 | |
3186 | #define TRACE_NVME_FREE_REQ_QUEUE_WAIT_BACKEND_DSTATE() ( \ |
3187 | trace_event_get_state_dynamic_by_id(TRACE_NVME_FREE_REQ_QUEUE_WAIT) || \ |
3188 | false) |
3189 | |
3190 | static inline void _nocheck__trace_nvme_free_req_queue_wait(void * q) |
3191 | { |
3192 | if (trace_event_get_state(TRACE_NVME_FREE_REQ_QUEUE_WAIT) && qemu_loglevel_mask(LOG_TRACE)) { |
3193 | struct timeval _now; |
3194 | gettimeofday(&_now, NULL); |
3195 | qemu_log("%d@%zu.%06zu:nvme_free_req_queue_wait " "q %p" "\n" , |
3196 | qemu_get_thread_id(), |
3197 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3198 | , q); |
3199 | } |
3200 | } |
3201 | |
3202 | static inline void trace_nvme_free_req_queue_wait(void * q) |
3203 | { |
3204 | if (true) { |
3205 | _nocheck__trace_nvme_free_req_queue_wait(q); |
3206 | } |
3207 | } |
3208 | |
3209 | #define TRACE_NVME_CMD_MAP_QIOV_BACKEND_DSTATE() ( \ |
3210 | trace_event_get_state_dynamic_by_id(TRACE_NVME_CMD_MAP_QIOV) || \ |
3211 | false) |
3212 | |
3213 | static inline void _nocheck__trace_nvme_cmd_map_qiov(void * s, void * cmd, void * req, void * qiov, int entries) |
3214 | { |
3215 | if (trace_event_get_state(TRACE_NVME_CMD_MAP_QIOV) && qemu_loglevel_mask(LOG_TRACE)) { |
3216 | struct timeval _now; |
3217 | gettimeofday(&_now, NULL); |
3218 | qemu_log("%d@%zu.%06zu:nvme_cmd_map_qiov " "s %p cmd %p req %p qiov %p entries %d" "\n" , |
3219 | qemu_get_thread_id(), |
3220 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3221 | , s, cmd, req, qiov, entries); |
3222 | } |
3223 | } |
3224 | |
3225 | static inline void trace_nvme_cmd_map_qiov(void * s, void * cmd, void * req, void * qiov, int entries) |
3226 | { |
3227 | if (true) { |
3228 | _nocheck__trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries); |
3229 | } |
3230 | } |
3231 | |
3232 | #define TRACE_NVME_CMD_MAP_QIOV_PAGES_BACKEND_DSTATE() ( \ |
3233 | trace_event_get_state_dynamic_by_id(TRACE_NVME_CMD_MAP_QIOV_PAGES) || \ |
3234 | false) |
3235 | |
3236 | static inline void _nocheck__trace_nvme_cmd_map_qiov_pages(void * s, int i, uint64_t page) |
3237 | { |
3238 | if (trace_event_get_state(TRACE_NVME_CMD_MAP_QIOV_PAGES) && qemu_loglevel_mask(LOG_TRACE)) { |
3239 | struct timeval _now; |
3240 | gettimeofday(&_now, NULL); |
3241 | qemu_log("%d@%zu.%06zu:nvme_cmd_map_qiov_pages " "s %p page[%d] 0x%" PRIx64 "\n" , |
3242 | qemu_get_thread_id(), |
3243 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3244 | , s, i, page); |
3245 | } |
3246 | } |
3247 | |
3248 | static inline void trace_nvme_cmd_map_qiov_pages(void * s, int i, uint64_t page) |
3249 | { |
3250 | if (true) { |
3251 | _nocheck__trace_nvme_cmd_map_qiov_pages(s, i, page); |
3252 | } |
3253 | } |
3254 | |
3255 | #define TRACE_NVME_CMD_MAP_QIOV_IOV_BACKEND_DSTATE() ( \ |
3256 | trace_event_get_state_dynamic_by_id(TRACE_NVME_CMD_MAP_QIOV_IOV) || \ |
3257 | false) |
3258 | |
3259 | static inline void _nocheck__trace_nvme_cmd_map_qiov_iov(void * s, int i, void * page, int pages) |
3260 | { |
3261 | if (trace_event_get_state(TRACE_NVME_CMD_MAP_QIOV_IOV) && qemu_loglevel_mask(LOG_TRACE)) { |
3262 | struct timeval _now; |
3263 | gettimeofday(&_now, NULL); |
3264 | qemu_log("%d@%zu.%06zu:nvme_cmd_map_qiov_iov " "s %p iov[%d] %p pages %d" "\n" , |
3265 | qemu_get_thread_id(), |
3266 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3267 | , s, i, page, pages); |
3268 | } |
3269 | } |
3270 | |
3271 | static inline void trace_nvme_cmd_map_qiov_iov(void * s, int i, void * page, int pages) |
3272 | { |
3273 | if (true) { |
3274 | _nocheck__trace_nvme_cmd_map_qiov_iov(s, i, page, pages); |
3275 | } |
3276 | } |
3277 | |
3278 | #define TRACE_ISCSI_XCOPY_BACKEND_DSTATE() ( \ |
3279 | trace_event_get_state_dynamic_by_id(TRACE_ISCSI_XCOPY) || \ |
3280 | false) |
3281 | |
3282 | static inline void _nocheck__trace_iscsi_xcopy(void * src_lun, uint64_t src_off, void * dst_lun, uint64_t dst_off, uint64_t bytes, int ret) |
3283 | { |
3284 | if (trace_event_get_state(TRACE_ISCSI_XCOPY) && qemu_loglevel_mask(LOG_TRACE)) { |
3285 | struct timeval _now; |
3286 | gettimeofday(&_now, NULL); |
3287 | qemu_log("%d@%zu.%06zu:iscsi_xcopy " "src_lun %p offset %" PRIu64" dst_lun %p offset %" PRIu64" bytes %" PRIu64" ret %d" "\n" , |
3288 | qemu_get_thread_id(), |
3289 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3290 | , src_lun, src_off, dst_lun, dst_off, bytes, ret); |
3291 | } |
3292 | } |
3293 | |
3294 | static inline void trace_iscsi_xcopy(void * src_lun, uint64_t src_off, void * dst_lun, uint64_t dst_off, uint64_t bytes, int ret) |
3295 | { |
3296 | if (true) { |
3297 | _nocheck__trace_iscsi_xcopy(src_lun, src_off, dst_lun, dst_off, bytes, ret); |
3298 | } |
3299 | } |
3300 | |
3301 | #define TRACE_NBD_PARSE_BLOCKSTATUS_COMPLIANCE_BACKEND_DSTATE() ( \ |
3302 | trace_event_get_state_dynamic_by_id(TRACE_NBD_PARSE_BLOCKSTATUS_COMPLIANCE) || \ |
3303 | false) |
3304 | |
3305 | static inline void _nocheck__trace_nbd_parse_blockstatus_compliance(const char * err) |
3306 | { |
3307 | if (trace_event_get_state(TRACE_NBD_PARSE_BLOCKSTATUS_COMPLIANCE) && qemu_loglevel_mask(LOG_TRACE)) { |
3308 | struct timeval _now; |
3309 | gettimeofday(&_now, NULL); |
3310 | qemu_log("%d@%zu.%06zu:nbd_parse_blockstatus_compliance " "ignoring extra data from non-compliant server: %s" "\n" , |
3311 | qemu_get_thread_id(), |
3312 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3313 | , err); |
3314 | } |
3315 | } |
3316 | |
3317 | static inline void trace_nbd_parse_blockstatus_compliance(const char * err) |
3318 | { |
3319 | if (true) { |
3320 | _nocheck__trace_nbd_parse_blockstatus_compliance(err); |
3321 | } |
3322 | } |
3323 | |
3324 | #define TRACE_NBD_STRUCTURED_READ_COMPLIANCE_BACKEND_DSTATE() ( \ |
3325 | trace_event_get_state_dynamic_by_id(TRACE_NBD_STRUCTURED_READ_COMPLIANCE) || \ |
3326 | false) |
3327 | |
3328 | static inline void _nocheck__trace_nbd_structured_read_compliance(const char * type) |
3329 | { |
3330 | if (trace_event_get_state(TRACE_NBD_STRUCTURED_READ_COMPLIANCE) && qemu_loglevel_mask(LOG_TRACE)) { |
3331 | struct timeval _now; |
3332 | gettimeofday(&_now, NULL); |
3333 | qemu_log("%d@%zu.%06zu:nbd_structured_read_compliance " "server sent non-compliant unaligned read %s chunk" "\n" , |
3334 | qemu_get_thread_id(), |
3335 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3336 | , type); |
3337 | } |
3338 | } |
3339 | |
3340 | static inline void trace_nbd_structured_read_compliance(const char * type) |
3341 | { |
3342 | if (true) { |
3343 | _nocheck__trace_nbd_structured_read_compliance(type); |
3344 | } |
3345 | } |
3346 | |
3347 | #define TRACE_NBD_READ_REPLY_ENTRY_FAIL_BACKEND_DSTATE() ( \ |
3348 | trace_event_get_state_dynamic_by_id(TRACE_NBD_READ_REPLY_ENTRY_FAIL) || \ |
3349 | false) |
3350 | |
3351 | static inline void _nocheck__trace_nbd_read_reply_entry_fail(int ret, const char * err) |
3352 | { |
3353 | if (trace_event_get_state(TRACE_NBD_READ_REPLY_ENTRY_FAIL) && qemu_loglevel_mask(LOG_TRACE)) { |
3354 | struct timeval _now; |
3355 | gettimeofday(&_now, NULL); |
3356 | qemu_log("%d@%zu.%06zu:nbd_read_reply_entry_fail " "ret = %d, err: %s" "\n" , |
3357 | qemu_get_thread_id(), |
3358 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3359 | , ret, err); |
3360 | } |
3361 | } |
3362 | |
3363 | static inline void trace_nbd_read_reply_entry_fail(int ret, const char * err) |
3364 | { |
3365 | if (true) { |
3366 | _nocheck__trace_nbd_read_reply_entry_fail(ret, err); |
3367 | } |
3368 | } |
3369 | |
3370 | #define TRACE_NBD_CO_REQUEST_FAIL_BACKEND_DSTATE() ( \ |
3371 | trace_event_get_state_dynamic_by_id(TRACE_NBD_CO_REQUEST_FAIL) || \ |
3372 | false) |
3373 | |
3374 | static inline void _nocheck__trace_nbd_co_request_fail(uint64_t from, uint32_t len, uint64_t handle, uint16_t flags, uint16_t type, const char * name, int ret, const char * err) |
3375 | { |
3376 | if (trace_event_get_state(TRACE_NBD_CO_REQUEST_FAIL) && qemu_loglevel_mask(LOG_TRACE)) { |
3377 | struct timeval _now; |
3378 | gettimeofday(&_now, NULL); |
3379 | qemu_log("%d@%zu.%06zu:nbd_co_request_fail " "Request failed { .from = %" PRIu64", .len = %" PRIu32 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) } ret = %d, err: %s" "\n" , |
3380 | qemu_get_thread_id(), |
3381 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3382 | , from, len, handle, flags, type, name, ret, err); |
3383 | } |
3384 | } |
3385 | |
3386 | static inline void trace_nbd_co_request_fail(uint64_t from, uint32_t len, uint64_t handle, uint16_t flags, uint16_t type, const char * name, int ret, const char * err) |
3387 | { |
3388 | if (true) { |
3389 | _nocheck__trace_nbd_co_request_fail(from, len, handle, flags, type, name, ret, err); |
3390 | } |
3391 | } |
3392 | |
3393 | #define TRACE_NBD_CLIENT_CONNECT_BACKEND_DSTATE() ( \ |
3394 | trace_event_get_state_dynamic_by_id(TRACE_NBD_CLIENT_CONNECT) || \ |
3395 | false) |
3396 | |
3397 | static inline void _nocheck__trace_nbd_client_connect(const char * export_name) |
3398 | { |
3399 | if (trace_event_get_state(TRACE_NBD_CLIENT_CONNECT) && qemu_loglevel_mask(LOG_TRACE)) { |
3400 | struct timeval _now; |
3401 | gettimeofday(&_now, NULL); |
3402 | qemu_log("%d@%zu.%06zu:nbd_client_connect " "export '%s'" "\n" , |
3403 | qemu_get_thread_id(), |
3404 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3405 | , export_name); |
3406 | } |
3407 | } |
3408 | |
3409 | static inline void trace_nbd_client_connect(const char * export_name) |
3410 | { |
3411 | if (true) { |
3412 | _nocheck__trace_nbd_client_connect(export_name); |
3413 | } |
3414 | } |
3415 | |
3416 | #define TRACE_NBD_CLIENT_CONNECT_SUCCESS_BACKEND_DSTATE() ( \ |
3417 | trace_event_get_state_dynamic_by_id(TRACE_NBD_CLIENT_CONNECT_SUCCESS) || \ |
3418 | false) |
3419 | |
3420 | static inline void _nocheck__trace_nbd_client_connect_success(const char * export_name) |
3421 | { |
3422 | if (trace_event_get_state(TRACE_NBD_CLIENT_CONNECT_SUCCESS) && qemu_loglevel_mask(LOG_TRACE)) { |
3423 | struct timeval _now; |
3424 | gettimeofday(&_now, NULL); |
3425 | qemu_log("%d@%zu.%06zu:nbd_client_connect_success " "export '%s'" "\n" , |
3426 | qemu_get_thread_id(), |
3427 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3428 | , export_name); |
3429 | } |
3430 | } |
3431 | |
3432 | static inline void trace_nbd_client_connect_success(const char * export_name) |
3433 | { |
3434 | if (true) { |
3435 | _nocheck__trace_nbd_client_connect_success(export_name); |
3436 | } |
3437 | } |
3438 | |
3439 | #define TRACE_SSH_RESTART_COROUTINE_BACKEND_DSTATE() ( \ |
3440 | trace_event_get_state_dynamic_by_id(TRACE_SSH_RESTART_COROUTINE) || \ |
3441 | false) |
3442 | |
3443 | static inline void _nocheck__trace_ssh_restart_coroutine(void * co) |
3444 | { |
3445 | if (trace_event_get_state(TRACE_SSH_RESTART_COROUTINE) && qemu_loglevel_mask(LOG_TRACE)) { |
3446 | struct timeval _now; |
3447 | gettimeofday(&_now, NULL); |
3448 | qemu_log("%d@%zu.%06zu:ssh_restart_coroutine " "co=%p" "\n" , |
3449 | qemu_get_thread_id(), |
3450 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3451 | , co); |
3452 | } |
3453 | } |
3454 | |
3455 | static inline void trace_ssh_restart_coroutine(void * co) |
3456 | { |
3457 | if (true) { |
3458 | _nocheck__trace_ssh_restart_coroutine(co); |
3459 | } |
3460 | } |
3461 | |
3462 | #define TRACE_SSH_FLUSH_BACKEND_DSTATE() ( \ |
3463 | trace_event_get_state_dynamic_by_id(TRACE_SSH_FLUSH) || \ |
3464 | false) |
3465 | |
3466 | static inline void _nocheck__trace_ssh_flush(void) |
3467 | { |
3468 | if (trace_event_get_state(TRACE_SSH_FLUSH) && qemu_loglevel_mask(LOG_TRACE)) { |
3469 | struct timeval _now; |
3470 | gettimeofday(&_now, NULL); |
3471 | qemu_log("%d@%zu.%06zu:ssh_flush " "fsync" "\n" , |
3472 | qemu_get_thread_id(), |
3473 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3474 | ); |
3475 | } |
3476 | } |
3477 | |
3478 | static inline void trace_ssh_flush(void) |
3479 | { |
3480 | if (true) { |
3481 | _nocheck__trace_ssh_flush(); |
3482 | } |
3483 | } |
3484 | |
3485 | #define TRACE_SSH_CHECK_HOST_KEY_KNOWNHOSTS_BACKEND_DSTATE() ( \ |
3486 | trace_event_get_state_dynamic_by_id(TRACE_SSH_CHECK_HOST_KEY_KNOWNHOSTS) || \ |
3487 | false) |
3488 | |
3489 | static inline void _nocheck__trace_ssh_check_host_key_knownhosts(void) |
3490 | { |
3491 | if (trace_event_get_state(TRACE_SSH_CHECK_HOST_KEY_KNOWNHOSTS) && qemu_loglevel_mask(LOG_TRACE)) { |
3492 | struct timeval _now; |
3493 | gettimeofday(&_now, NULL); |
3494 | qemu_log("%d@%zu.%06zu:ssh_check_host_key_knownhosts " "host key OK" "\n" , |
3495 | qemu_get_thread_id(), |
3496 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3497 | ); |
3498 | } |
3499 | } |
3500 | |
3501 | static inline void trace_ssh_check_host_key_knownhosts(void) |
3502 | { |
3503 | if (true) { |
3504 | _nocheck__trace_ssh_check_host_key_knownhosts(); |
3505 | } |
3506 | } |
3507 | |
3508 | #define TRACE_SSH_CONNECT_TO_SSH_BACKEND_DSTATE() ( \ |
3509 | trace_event_get_state_dynamic_by_id(TRACE_SSH_CONNECT_TO_SSH) || \ |
3510 | false) |
3511 | |
3512 | static inline void _nocheck__trace_ssh_connect_to_ssh(char * path, int flags, int mode) |
3513 | { |
3514 | if (trace_event_get_state(TRACE_SSH_CONNECT_TO_SSH) && qemu_loglevel_mask(LOG_TRACE)) { |
3515 | struct timeval _now; |
3516 | gettimeofday(&_now, NULL); |
3517 | qemu_log("%d@%zu.%06zu:ssh_connect_to_ssh " "opening file %s flags=0x%x creat_mode=0%o" "\n" , |
3518 | qemu_get_thread_id(), |
3519 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3520 | , path, flags, mode); |
3521 | } |
3522 | } |
3523 | |
3524 | static inline void trace_ssh_connect_to_ssh(char * path, int flags, int mode) |
3525 | { |
3526 | if (true) { |
3527 | _nocheck__trace_ssh_connect_to_ssh(path, flags, mode); |
3528 | } |
3529 | } |
3530 | |
3531 | #define TRACE_SSH_CO_YIELD_BACKEND_DSTATE() ( \ |
3532 | trace_event_get_state_dynamic_by_id(TRACE_SSH_CO_YIELD) || \ |
3533 | false) |
3534 | |
3535 | static inline void _nocheck__trace_ssh_co_yield(int sock, void * rd_handler, void * wr_handler) |
3536 | { |
3537 | if (trace_event_get_state(TRACE_SSH_CO_YIELD) && qemu_loglevel_mask(LOG_TRACE)) { |
3538 | struct timeval _now; |
3539 | gettimeofday(&_now, NULL); |
3540 | qemu_log("%d@%zu.%06zu:ssh_co_yield " "s->sock=%d rd_handler=%p wr_handler=%p" "\n" , |
3541 | qemu_get_thread_id(), |
3542 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3543 | , sock, rd_handler, wr_handler); |
3544 | } |
3545 | } |
3546 | |
3547 | static inline void trace_ssh_co_yield(int sock, void * rd_handler, void * wr_handler) |
3548 | { |
3549 | if (true) { |
3550 | _nocheck__trace_ssh_co_yield(sock, rd_handler, wr_handler); |
3551 | } |
3552 | } |
3553 | |
3554 | #define TRACE_SSH_CO_YIELD_BACK_BACKEND_DSTATE() ( \ |
3555 | trace_event_get_state_dynamic_by_id(TRACE_SSH_CO_YIELD_BACK) || \ |
3556 | false) |
3557 | |
3558 | static inline void _nocheck__trace_ssh_co_yield_back(int sock) |
3559 | { |
3560 | if (trace_event_get_state(TRACE_SSH_CO_YIELD_BACK) && qemu_loglevel_mask(LOG_TRACE)) { |
3561 | struct timeval _now; |
3562 | gettimeofday(&_now, NULL); |
3563 | qemu_log("%d@%zu.%06zu:ssh_co_yield_back " "s->sock=%d - back" "\n" , |
3564 | qemu_get_thread_id(), |
3565 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3566 | , sock); |
3567 | } |
3568 | } |
3569 | |
3570 | static inline void trace_ssh_co_yield_back(int sock) |
3571 | { |
3572 | if (true) { |
3573 | _nocheck__trace_ssh_co_yield_back(sock); |
3574 | } |
3575 | } |
3576 | |
3577 | #define TRACE_SSH_GETLENGTH_BACKEND_DSTATE() ( \ |
3578 | trace_event_get_state_dynamic_by_id(TRACE_SSH_GETLENGTH) || \ |
3579 | false) |
3580 | |
3581 | static inline void _nocheck__trace_ssh_getlength(int64_t length) |
3582 | { |
3583 | if (trace_event_get_state(TRACE_SSH_GETLENGTH) && qemu_loglevel_mask(LOG_TRACE)) { |
3584 | struct timeval _now; |
3585 | gettimeofday(&_now, NULL); |
3586 | qemu_log("%d@%zu.%06zu:ssh_getlength " "length=%" PRIi64 "\n" , |
3587 | qemu_get_thread_id(), |
3588 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3589 | , length); |
3590 | } |
3591 | } |
3592 | |
3593 | static inline void trace_ssh_getlength(int64_t length) |
3594 | { |
3595 | if (true) { |
3596 | _nocheck__trace_ssh_getlength(length); |
3597 | } |
3598 | } |
3599 | |
3600 | #define TRACE_SSH_CO_CREATE_OPTS_BACKEND_DSTATE() ( \ |
3601 | trace_event_get_state_dynamic_by_id(TRACE_SSH_CO_CREATE_OPTS) || \ |
3602 | false) |
3603 | |
3604 | static inline void _nocheck__trace_ssh_co_create_opts(uint64_t size) |
3605 | { |
3606 | if (trace_event_get_state(TRACE_SSH_CO_CREATE_OPTS) && qemu_loglevel_mask(LOG_TRACE)) { |
3607 | struct timeval _now; |
3608 | gettimeofday(&_now, NULL); |
3609 | qemu_log("%d@%zu.%06zu:ssh_co_create_opts " "total_size=%" PRIu64 "\n" , |
3610 | qemu_get_thread_id(), |
3611 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3612 | , size); |
3613 | } |
3614 | } |
3615 | |
3616 | static inline void trace_ssh_co_create_opts(uint64_t size) |
3617 | { |
3618 | if (true) { |
3619 | _nocheck__trace_ssh_co_create_opts(size); |
3620 | } |
3621 | } |
3622 | |
3623 | #define TRACE_SSH_READ_BACKEND_DSTATE() ( \ |
3624 | trace_event_get_state_dynamic_by_id(TRACE_SSH_READ) || \ |
3625 | false) |
3626 | |
3627 | static inline void _nocheck__trace_ssh_read(int64_t offset, size_t size) |
3628 | { |
3629 | if (trace_event_get_state(TRACE_SSH_READ) && qemu_loglevel_mask(LOG_TRACE)) { |
3630 | struct timeval _now; |
3631 | gettimeofday(&_now, NULL); |
3632 | qemu_log("%d@%zu.%06zu:ssh_read " "offset=%" PRIi64 " size=%zu" "\n" , |
3633 | qemu_get_thread_id(), |
3634 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3635 | , offset, size); |
3636 | } |
3637 | } |
3638 | |
3639 | static inline void trace_ssh_read(int64_t offset, size_t size) |
3640 | { |
3641 | if (true) { |
3642 | _nocheck__trace_ssh_read(offset, size); |
3643 | } |
3644 | } |
3645 | |
3646 | #define TRACE_SSH_READ_BUF_BACKEND_DSTATE() ( \ |
3647 | trace_event_get_state_dynamic_by_id(TRACE_SSH_READ_BUF) || \ |
3648 | false) |
3649 | |
3650 | static inline void _nocheck__trace_ssh_read_buf(void * buf, size_t size, size_t actual_size) |
3651 | { |
3652 | if (trace_event_get_state(TRACE_SSH_READ_BUF) && qemu_loglevel_mask(LOG_TRACE)) { |
3653 | struct timeval _now; |
3654 | gettimeofday(&_now, NULL); |
3655 | qemu_log("%d@%zu.%06zu:ssh_read_buf " "sftp_read buf=%p size=%zu (actual size=%zu)" "\n" , |
3656 | qemu_get_thread_id(), |
3657 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3658 | , buf, size, actual_size); |
3659 | } |
3660 | } |
3661 | |
3662 | static inline void trace_ssh_read_buf(void * buf, size_t size, size_t actual_size) |
3663 | { |
3664 | if (true) { |
3665 | _nocheck__trace_ssh_read_buf(buf, size, actual_size); |
3666 | } |
3667 | } |
3668 | |
3669 | #define TRACE_SSH_READ_RETURN_BACKEND_DSTATE() ( \ |
3670 | trace_event_get_state_dynamic_by_id(TRACE_SSH_READ_RETURN) || \ |
3671 | false) |
3672 | |
3673 | static inline void _nocheck__trace_ssh_read_return(ssize_t ret, int sftp_err) |
3674 | { |
3675 | if (trace_event_get_state(TRACE_SSH_READ_RETURN) && qemu_loglevel_mask(LOG_TRACE)) { |
3676 | struct timeval _now; |
3677 | gettimeofday(&_now, NULL); |
3678 | qemu_log("%d@%zu.%06zu:ssh_read_return " "sftp_read returned %zd (sftp error=%d)" "\n" , |
3679 | qemu_get_thread_id(), |
3680 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3681 | , ret, sftp_err); |
3682 | } |
3683 | } |
3684 | |
3685 | static inline void trace_ssh_read_return(ssize_t ret, int sftp_err) |
3686 | { |
3687 | if (true) { |
3688 | _nocheck__trace_ssh_read_return(ret, sftp_err); |
3689 | } |
3690 | } |
3691 | |
3692 | #define TRACE_SSH_WRITE_BACKEND_DSTATE() ( \ |
3693 | trace_event_get_state_dynamic_by_id(TRACE_SSH_WRITE) || \ |
3694 | false) |
3695 | |
3696 | static inline void _nocheck__trace_ssh_write(int64_t offset, size_t size) |
3697 | { |
3698 | if (trace_event_get_state(TRACE_SSH_WRITE) && qemu_loglevel_mask(LOG_TRACE)) { |
3699 | struct timeval _now; |
3700 | gettimeofday(&_now, NULL); |
3701 | qemu_log("%d@%zu.%06zu:ssh_write " "offset=%" PRIi64 " size=%zu" "\n" , |
3702 | qemu_get_thread_id(), |
3703 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3704 | , offset, size); |
3705 | } |
3706 | } |
3707 | |
3708 | static inline void trace_ssh_write(int64_t offset, size_t size) |
3709 | { |
3710 | if (true) { |
3711 | _nocheck__trace_ssh_write(offset, size); |
3712 | } |
3713 | } |
3714 | |
3715 | #define TRACE_SSH_WRITE_BUF_BACKEND_DSTATE() ( \ |
3716 | trace_event_get_state_dynamic_by_id(TRACE_SSH_WRITE_BUF) || \ |
3717 | false) |
3718 | |
3719 | static inline void _nocheck__trace_ssh_write_buf(void * buf, size_t size, size_t actual_size) |
3720 | { |
3721 | if (trace_event_get_state(TRACE_SSH_WRITE_BUF) && qemu_loglevel_mask(LOG_TRACE)) { |
3722 | struct timeval _now; |
3723 | gettimeofday(&_now, NULL); |
3724 | qemu_log("%d@%zu.%06zu:ssh_write_buf " "sftp_write buf=%p size=%zu (actual size=%zu)" "\n" , |
3725 | qemu_get_thread_id(), |
3726 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3727 | , buf, size, actual_size); |
3728 | } |
3729 | } |
3730 | |
3731 | static inline void trace_ssh_write_buf(void * buf, size_t size, size_t actual_size) |
3732 | { |
3733 | if (true) { |
3734 | _nocheck__trace_ssh_write_buf(buf, size, actual_size); |
3735 | } |
3736 | } |
3737 | |
3738 | #define TRACE_SSH_WRITE_RETURN_BACKEND_DSTATE() ( \ |
3739 | trace_event_get_state_dynamic_by_id(TRACE_SSH_WRITE_RETURN) || \ |
3740 | false) |
3741 | |
3742 | static inline void _nocheck__trace_ssh_write_return(ssize_t ret, int sftp_err) |
3743 | { |
3744 | if (trace_event_get_state(TRACE_SSH_WRITE_RETURN) && qemu_loglevel_mask(LOG_TRACE)) { |
3745 | struct timeval _now; |
3746 | gettimeofday(&_now, NULL); |
3747 | qemu_log("%d@%zu.%06zu:ssh_write_return " "sftp_write returned %zd (sftp error=%d)" "\n" , |
3748 | qemu_get_thread_id(), |
3749 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3750 | , ret, sftp_err); |
3751 | } |
3752 | } |
3753 | |
3754 | static inline void trace_ssh_write_return(ssize_t ret, int sftp_err) |
3755 | { |
3756 | if (true) { |
3757 | _nocheck__trace_ssh_write_return(ret, sftp_err); |
3758 | } |
3759 | } |
3760 | |
3761 | #define TRACE_SSH_SEEK_BACKEND_DSTATE() ( \ |
3762 | trace_event_get_state_dynamic_by_id(TRACE_SSH_SEEK) || \ |
3763 | false) |
3764 | |
3765 | static inline void _nocheck__trace_ssh_seek(int64_t offset) |
3766 | { |
3767 | if (trace_event_get_state(TRACE_SSH_SEEK) && qemu_loglevel_mask(LOG_TRACE)) { |
3768 | struct timeval _now; |
3769 | gettimeofday(&_now, NULL); |
3770 | qemu_log("%d@%zu.%06zu:ssh_seek " "seeking to offset=%" PRIi64 "\n" , |
3771 | qemu_get_thread_id(), |
3772 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3773 | , offset); |
3774 | } |
3775 | } |
3776 | |
3777 | static inline void trace_ssh_seek(int64_t offset) |
3778 | { |
3779 | if (true) { |
3780 | _nocheck__trace_ssh_seek(offset); |
3781 | } |
3782 | } |
3783 | |
3784 | #define TRACE_SSH_AUTH_METHODS_BACKEND_DSTATE() ( \ |
3785 | trace_event_get_state_dynamic_by_id(TRACE_SSH_AUTH_METHODS) || \ |
3786 | false) |
3787 | |
3788 | static inline void _nocheck__trace_ssh_auth_methods(int methods) |
3789 | { |
3790 | if (trace_event_get_state(TRACE_SSH_AUTH_METHODS) && qemu_loglevel_mask(LOG_TRACE)) { |
3791 | struct timeval _now; |
3792 | gettimeofday(&_now, NULL); |
3793 | qemu_log("%d@%zu.%06zu:ssh_auth_methods " "auth methods=0x%x" "\n" , |
3794 | qemu_get_thread_id(), |
3795 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3796 | , methods); |
3797 | } |
3798 | } |
3799 | |
3800 | static inline void trace_ssh_auth_methods(int methods) |
3801 | { |
3802 | if (true) { |
3803 | _nocheck__trace_ssh_auth_methods(methods); |
3804 | } |
3805 | } |
3806 | |
3807 | #define TRACE_SSH_SERVER_STATUS_BACKEND_DSTATE() ( \ |
3808 | trace_event_get_state_dynamic_by_id(TRACE_SSH_SERVER_STATUS) || \ |
3809 | false) |
3810 | |
3811 | static inline void _nocheck__trace_ssh_server_status(int status) |
3812 | { |
3813 | if (trace_event_get_state(TRACE_SSH_SERVER_STATUS) && qemu_loglevel_mask(LOG_TRACE)) { |
3814 | struct timeval _now; |
3815 | gettimeofday(&_now, NULL); |
3816 | qemu_log("%d@%zu.%06zu:ssh_server_status " "server status=%d" "\n" , |
3817 | qemu_get_thread_id(), |
3818 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3819 | , status); |
3820 | } |
3821 | } |
3822 | |
3823 | static inline void trace_ssh_server_status(int status) |
3824 | { |
3825 | if (true) { |
3826 | _nocheck__trace_ssh_server_status(status); |
3827 | } |
3828 | } |
3829 | |
3830 | #define TRACE_CURL_TIMER_CB_BACKEND_DSTATE() ( \ |
3831 | trace_event_get_state_dynamic_by_id(TRACE_CURL_TIMER_CB) || \ |
3832 | false) |
3833 | |
3834 | static inline void _nocheck__trace_curl_timer_cb(long timeout_ms) |
3835 | { |
3836 | if (trace_event_get_state(TRACE_CURL_TIMER_CB) && qemu_loglevel_mask(LOG_TRACE)) { |
3837 | struct timeval _now; |
3838 | gettimeofday(&_now, NULL); |
3839 | qemu_log("%d@%zu.%06zu:curl_timer_cb " "timer callback timeout_ms %ld" "\n" , |
3840 | qemu_get_thread_id(), |
3841 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3842 | , timeout_ms); |
3843 | } |
3844 | } |
3845 | |
3846 | static inline void trace_curl_timer_cb(long timeout_ms) |
3847 | { |
3848 | if (true) { |
3849 | _nocheck__trace_curl_timer_cb(timeout_ms); |
3850 | } |
3851 | } |
3852 | |
3853 | #define TRACE_CURL_SOCK_CB_BACKEND_DSTATE() ( \ |
3854 | trace_event_get_state_dynamic_by_id(TRACE_CURL_SOCK_CB) || \ |
3855 | false) |
3856 | |
3857 | static inline void _nocheck__trace_curl_sock_cb(int action, int fd) |
3858 | { |
3859 | if (trace_event_get_state(TRACE_CURL_SOCK_CB) && qemu_loglevel_mask(LOG_TRACE)) { |
3860 | struct timeval _now; |
3861 | gettimeofday(&_now, NULL); |
3862 | qemu_log("%d@%zu.%06zu:curl_sock_cb " "sock action %d on fd %d" "\n" , |
3863 | qemu_get_thread_id(), |
3864 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3865 | , action, fd); |
3866 | } |
3867 | } |
3868 | |
3869 | static inline void trace_curl_sock_cb(int action, int fd) |
3870 | { |
3871 | if (true) { |
3872 | _nocheck__trace_curl_sock_cb(action, fd); |
3873 | } |
3874 | } |
3875 | |
3876 | #define TRACE_CURL_READ_CB_BACKEND_DSTATE() ( \ |
3877 | trace_event_get_state_dynamic_by_id(TRACE_CURL_READ_CB) || \ |
3878 | false) |
3879 | |
3880 | static inline void _nocheck__trace_curl_read_cb(size_t realsize) |
3881 | { |
3882 | if (trace_event_get_state(TRACE_CURL_READ_CB) && qemu_loglevel_mask(LOG_TRACE)) { |
3883 | struct timeval _now; |
3884 | gettimeofday(&_now, NULL); |
3885 | qemu_log("%d@%zu.%06zu:curl_read_cb " "just reading %zu bytes" "\n" , |
3886 | qemu_get_thread_id(), |
3887 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3888 | , realsize); |
3889 | } |
3890 | } |
3891 | |
3892 | static inline void trace_curl_read_cb(size_t realsize) |
3893 | { |
3894 | if (true) { |
3895 | _nocheck__trace_curl_read_cb(realsize); |
3896 | } |
3897 | } |
3898 | |
3899 | #define TRACE_CURL_OPEN_BACKEND_DSTATE() ( \ |
3900 | trace_event_get_state_dynamic_by_id(TRACE_CURL_OPEN) || \ |
3901 | false) |
3902 | |
3903 | static inline void _nocheck__trace_curl_open(const char * file) |
3904 | { |
3905 | if (trace_event_get_state(TRACE_CURL_OPEN) && qemu_loglevel_mask(LOG_TRACE)) { |
3906 | struct timeval _now; |
3907 | gettimeofday(&_now, NULL); |
3908 | qemu_log("%d@%zu.%06zu:curl_open " "opening %s" "\n" , |
3909 | qemu_get_thread_id(), |
3910 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3911 | , file); |
3912 | } |
3913 | } |
3914 | |
3915 | static inline void trace_curl_open(const char * file) |
3916 | { |
3917 | if (true) { |
3918 | _nocheck__trace_curl_open(file); |
3919 | } |
3920 | } |
3921 | |
3922 | #define TRACE_CURL_OPEN_SIZE_BACKEND_DSTATE() ( \ |
3923 | trace_event_get_state_dynamic_by_id(TRACE_CURL_OPEN_SIZE) || \ |
3924 | false) |
3925 | |
3926 | static inline void _nocheck__trace_curl_open_size(uint64_t size) |
3927 | { |
3928 | if (trace_event_get_state(TRACE_CURL_OPEN_SIZE) && qemu_loglevel_mask(LOG_TRACE)) { |
3929 | struct timeval _now; |
3930 | gettimeofday(&_now, NULL); |
3931 | qemu_log("%d@%zu.%06zu:curl_open_size " "size = %" PRIu64 "\n" , |
3932 | qemu_get_thread_id(), |
3933 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3934 | , size); |
3935 | } |
3936 | } |
3937 | |
3938 | static inline void trace_curl_open_size(uint64_t size) |
3939 | { |
3940 | if (true) { |
3941 | _nocheck__trace_curl_open_size(size); |
3942 | } |
3943 | } |
3944 | |
3945 | #define TRACE_CURL_SETUP_PREADV_BACKEND_DSTATE() ( \ |
3946 | trace_event_get_state_dynamic_by_id(TRACE_CURL_SETUP_PREADV) || \ |
3947 | false) |
3948 | |
3949 | static inline void _nocheck__trace_curl_setup_preadv(uint64_t bytes, uint64_t start, const char * range) |
3950 | { |
3951 | if (trace_event_get_state(TRACE_CURL_SETUP_PREADV) && qemu_loglevel_mask(LOG_TRACE)) { |
3952 | struct timeval _now; |
3953 | gettimeofday(&_now, NULL); |
3954 | qemu_log("%d@%zu.%06zu:curl_setup_preadv " "reading %" PRIu64 " at %" PRIu64 " (%s)" "\n" , |
3955 | qemu_get_thread_id(), |
3956 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3957 | , bytes, start, range); |
3958 | } |
3959 | } |
3960 | |
3961 | static inline void trace_curl_setup_preadv(uint64_t bytes, uint64_t start, const char * range) |
3962 | { |
3963 | if (true) { |
3964 | _nocheck__trace_curl_setup_preadv(bytes, start, range); |
3965 | } |
3966 | } |
3967 | |
3968 | #define TRACE_CURL_CLOSE_BACKEND_DSTATE() ( \ |
3969 | trace_event_get_state_dynamic_by_id(TRACE_CURL_CLOSE) || \ |
3970 | false) |
3971 | |
3972 | static inline void _nocheck__trace_curl_close(void) |
3973 | { |
3974 | if (trace_event_get_state(TRACE_CURL_CLOSE) && qemu_loglevel_mask(LOG_TRACE)) { |
3975 | struct timeval _now; |
3976 | gettimeofday(&_now, NULL); |
3977 | qemu_log("%d@%zu.%06zu:curl_close " "close" "\n" , |
3978 | qemu_get_thread_id(), |
3979 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
3980 | ); |
3981 | } |
3982 | } |
3983 | |
3984 | static inline void trace_curl_close(void) |
3985 | { |
3986 | if (true) { |
3987 | _nocheck__trace_curl_close(); |
3988 | } |
3989 | } |
3990 | |
3991 | #define TRACE_FILE_XFS_WRITE_ZEROES_BACKEND_DSTATE() ( \ |
3992 | trace_event_get_state_dynamic_by_id(TRACE_FILE_XFS_WRITE_ZEROES) || \ |
3993 | false) |
3994 | |
3995 | static inline void _nocheck__trace_file_xfs_write_zeroes(const char * error) |
3996 | { |
3997 | if (trace_event_get_state(TRACE_FILE_XFS_WRITE_ZEROES) && qemu_loglevel_mask(LOG_TRACE)) { |
3998 | struct timeval _now; |
3999 | gettimeofday(&_now, NULL); |
4000 | qemu_log("%d@%zu.%06zu:file_xfs_write_zeroes " "cannot write zero range (%s)" "\n" , |
4001 | qemu_get_thread_id(), |
4002 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4003 | , error); |
4004 | } |
4005 | } |
4006 | |
4007 | static inline void trace_file_xfs_write_zeroes(const char * error) |
4008 | { |
4009 | if (true) { |
4010 | _nocheck__trace_file_xfs_write_zeroes(error); |
4011 | } |
4012 | } |
4013 | |
4014 | #define TRACE_FILE_XFS_DISCARD_BACKEND_DSTATE() ( \ |
4015 | trace_event_get_state_dynamic_by_id(TRACE_FILE_XFS_DISCARD) || \ |
4016 | false) |
4017 | |
4018 | static inline void _nocheck__trace_file_xfs_discard(const char * error) |
4019 | { |
4020 | if (trace_event_get_state(TRACE_FILE_XFS_DISCARD) && qemu_loglevel_mask(LOG_TRACE)) { |
4021 | struct timeval _now; |
4022 | gettimeofday(&_now, NULL); |
4023 | qemu_log("%d@%zu.%06zu:file_xfs_discard " "cannot punch hole (%s)" "\n" , |
4024 | qemu_get_thread_id(), |
4025 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4026 | , error); |
4027 | } |
4028 | } |
4029 | |
4030 | static inline void trace_file_xfs_discard(const char * error) |
4031 | { |
4032 | if (true) { |
4033 | _nocheck__trace_file_xfs_discard(error); |
4034 | } |
4035 | } |
4036 | |
4037 | #define TRACE_FILE_FINDEJECTABLEOPTICALMEDIA_BACKEND_DSTATE() ( \ |
4038 | trace_event_get_state_dynamic_by_id(TRACE_FILE_FINDEJECTABLEOPTICALMEDIA) || \ |
4039 | false) |
4040 | |
4041 | static inline void _nocheck__trace_file_FindEjectableOpticalMedia(const char * media) |
4042 | { |
4043 | if (trace_event_get_state(TRACE_FILE_FINDEJECTABLEOPTICALMEDIA) && qemu_loglevel_mask(LOG_TRACE)) { |
4044 | struct timeval _now; |
4045 | gettimeofday(&_now, NULL); |
4046 | qemu_log("%d@%zu.%06zu:file_FindEjectableOpticalMedia " "Matching using %s" "\n" , |
4047 | qemu_get_thread_id(), |
4048 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4049 | , media); |
4050 | } |
4051 | } |
4052 | |
4053 | static inline void trace_file_FindEjectableOpticalMedia(const char * media) |
4054 | { |
4055 | if (true) { |
4056 | _nocheck__trace_file_FindEjectableOpticalMedia(media); |
4057 | } |
4058 | } |
4059 | |
4060 | #define TRACE_FILE_SETUP_CDROM_BACKEND_DSTATE() ( \ |
4061 | trace_event_get_state_dynamic_by_id(TRACE_FILE_SETUP_CDROM) || \ |
4062 | false) |
4063 | |
4064 | static inline void _nocheck__trace_file_setup_cdrom(const char * partition) |
4065 | { |
4066 | if (trace_event_get_state(TRACE_FILE_SETUP_CDROM) && qemu_loglevel_mask(LOG_TRACE)) { |
4067 | struct timeval _now; |
4068 | gettimeofday(&_now, NULL); |
4069 | qemu_log("%d@%zu.%06zu:file_setup_cdrom " "Using %s as optical disc" "\n" , |
4070 | qemu_get_thread_id(), |
4071 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4072 | , partition); |
4073 | } |
4074 | } |
4075 | |
4076 | static inline void trace_file_setup_cdrom(const char * partition) |
4077 | { |
4078 | if (true) { |
4079 | _nocheck__trace_file_setup_cdrom(partition); |
4080 | } |
4081 | } |
4082 | |
4083 | #define TRACE_FILE_HDEV_IS_SG_BACKEND_DSTATE() ( \ |
4084 | trace_event_get_state_dynamic_by_id(TRACE_FILE_HDEV_IS_SG) || \ |
4085 | false) |
4086 | |
4087 | static inline void _nocheck__trace_file_hdev_is_sg(int type, int version) |
4088 | { |
4089 | if (trace_event_get_state(TRACE_FILE_HDEV_IS_SG) && qemu_loglevel_mask(LOG_TRACE)) { |
4090 | struct timeval _now; |
4091 | gettimeofday(&_now, NULL); |
4092 | qemu_log("%d@%zu.%06zu:file_hdev_is_sg " "SG device found: type=%d, version=%d" "\n" , |
4093 | qemu_get_thread_id(), |
4094 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4095 | , type, version); |
4096 | } |
4097 | } |
4098 | |
4099 | static inline void trace_file_hdev_is_sg(int type, int version) |
4100 | { |
4101 | if (true) { |
4102 | _nocheck__trace_file_hdev_is_sg(type, version); |
4103 | } |
4104 | } |
4105 | |
4106 | #define TRACE_SHEEPDOG_RECONNECT_TO_SDOG_BACKEND_DSTATE() ( \ |
4107 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_RECONNECT_TO_SDOG) || \ |
4108 | false) |
4109 | |
4110 | static inline void _nocheck__trace_sheepdog_reconnect_to_sdog(void) |
4111 | { |
4112 | if (trace_event_get_state(TRACE_SHEEPDOG_RECONNECT_TO_SDOG) && qemu_loglevel_mask(LOG_TRACE)) { |
4113 | struct timeval _now; |
4114 | gettimeofday(&_now, NULL); |
4115 | qemu_log("%d@%zu.%06zu:sheepdog_reconnect_to_sdog " "Wait for connection to be established" "\n" , |
4116 | qemu_get_thread_id(), |
4117 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4118 | ); |
4119 | } |
4120 | } |
4121 | |
4122 | static inline void trace_sheepdog_reconnect_to_sdog(void) |
4123 | { |
4124 | if (true) { |
4125 | _nocheck__trace_sheepdog_reconnect_to_sdog(); |
4126 | } |
4127 | } |
4128 | |
4129 | #define TRACE_SHEEPDOG_AIO_READ_RESPONSE_BACKEND_DSTATE() ( \ |
4130 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_AIO_READ_RESPONSE) || \ |
4131 | false) |
4132 | |
4133 | static inline void _nocheck__trace_sheepdog_aio_read_response(void) |
4134 | { |
4135 | if (trace_event_get_state(TRACE_SHEEPDOG_AIO_READ_RESPONSE) && qemu_loglevel_mask(LOG_TRACE)) { |
4136 | struct timeval _now; |
4137 | gettimeofday(&_now, NULL); |
4138 | qemu_log("%d@%zu.%06zu:sheepdog_aio_read_response " "disable cache since the server doesn't support it" "\n" , |
4139 | qemu_get_thread_id(), |
4140 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4141 | ); |
4142 | } |
4143 | } |
4144 | |
4145 | static inline void trace_sheepdog_aio_read_response(void) |
4146 | { |
4147 | if (true) { |
4148 | _nocheck__trace_sheepdog_aio_read_response(); |
4149 | } |
4150 | } |
4151 | |
4152 | #define TRACE_SHEEPDOG_OPEN_BACKEND_DSTATE() ( \ |
4153 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_OPEN) || \ |
4154 | false) |
4155 | |
4156 | static inline void _nocheck__trace_sheepdog_open(uint32_t vid) |
4157 | { |
4158 | if (trace_event_get_state(TRACE_SHEEPDOG_OPEN) && qemu_loglevel_mask(LOG_TRACE)) { |
4159 | struct timeval _now; |
4160 | gettimeofday(&_now, NULL); |
4161 | qemu_log("%d@%zu.%06zu:sheepdog_open " "0x%" PRIx32 " snapshot inode was open" "\n" , |
4162 | qemu_get_thread_id(), |
4163 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4164 | , vid); |
4165 | } |
4166 | } |
4167 | |
4168 | static inline void trace_sheepdog_open(uint32_t vid) |
4169 | { |
4170 | if (true) { |
4171 | _nocheck__trace_sheepdog_open(vid); |
4172 | } |
4173 | } |
4174 | |
4175 | #define TRACE_SHEEPDOG_CLOSE_BACKEND_DSTATE() ( \ |
4176 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_CLOSE) || \ |
4177 | false) |
4178 | |
4179 | static inline void _nocheck__trace_sheepdog_close(const char * name) |
4180 | { |
4181 | if (trace_event_get_state(TRACE_SHEEPDOG_CLOSE) && qemu_loglevel_mask(LOG_TRACE)) { |
4182 | struct timeval _now; |
4183 | gettimeofday(&_now, NULL); |
4184 | qemu_log("%d@%zu.%06zu:sheepdog_close " "%s" "\n" , |
4185 | qemu_get_thread_id(), |
4186 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4187 | , name); |
4188 | } |
4189 | } |
4190 | |
4191 | static inline void trace_sheepdog_close(const char * name) |
4192 | { |
4193 | if (true) { |
4194 | _nocheck__trace_sheepdog_close(name); |
4195 | } |
4196 | } |
4197 | |
4198 | #define TRACE_SHEEPDOG_CREATE_BRANCH_SNAPSHOT_BACKEND_DSTATE() ( \ |
4199 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_CREATE_BRANCH_SNAPSHOT) || \ |
4200 | false) |
4201 | |
4202 | static inline void _nocheck__trace_sheepdog_create_branch_snapshot(uint32_t vdi) |
4203 | { |
4204 | if (trace_event_get_state(TRACE_SHEEPDOG_CREATE_BRANCH_SNAPSHOT) && qemu_loglevel_mask(LOG_TRACE)) { |
4205 | struct timeval _now; |
4206 | gettimeofday(&_now, NULL); |
4207 | qemu_log("%d@%zu.%06zu:sheepdog_create_branch_snapshot " "0x%" PRIx32 " is snapshot" "\n" , |
4208 | qemu_get_thread_id(), |
4209 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4210 | , vdi); |
4211 | } |
4212 | } |
4213 | |
4214 | static inline void trace_sheepdog_create_branch_snapshot(uint32_t vdi) |
4215 | { |
4216 | if (true) { |
4217 | _nocheck__trace_sheepdog_create_branch_snapshot(vdi); |
4218 | } |
4219 | } |
4220 | |
4221 | #define TRACE_SHEEPDOG_CREATE_BRANCH_CREATED_BACKEND_DSTATE() ( \ |
4222 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_CREATE_BRANCH_CREATED) || \ |
4223 | false) |
4224 | |
4225 | static inline void _nocheck__trace_sheepdog_create_branch_created(uint32_t vdi) |
4226 | { |
4227 | if (trace_event_get_state(TRACE_SHEEPDOG_CREATE_BRANCH_CREATED) && qemu_loglevel_mask(LOG_TRACE)) { |
4228 | struct timeval _now; |
4229 | gettimeofday(&_now, NULL); |
4230 | qemu_log("%d@%zu.%06zu:sheepdog_create_branch_created " "0x%" PRIx32 " is created" "\n" , |
4231 | qemu_get_thread_id(), |
4232 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4233 | , vdi); |
4234 | } |
4235 | } |
4236 | |
4237 | static inline void trace_sheepdog_create_branch_created(uint32_t vdi) |
4238 | { |
4239 | if (true) { |
4240 | _nocheck__trace_sheepdog_create_branch_created(vdi); |
4241 | } |
4242 | } |
4243 | |
4244 | #define TRACE_SHEEPDOG_CREATE_BRANCH_NEW_BACKEND_DSTATE() ( \ |
4245 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_CREATE_BRANCH_NEW) || \ |
4246 | false) |
4247 | |
4248 | static inline void _nocheck__trace_sheepdog_create_branch_new(uint32_t vdi) |
4249 | { |
4250 | if (trace_event_get_state(TRACE_SHEEPDOG_CREATE_BRANCH_NEW) && qemu_loglevel_mask(LOG_TRACE)) { |
4251 | struct timeval _now; |
4252 | gettimeofday(&_now, NULL); |
4253 | qemu_log("%d@%zu.%06zu:sheepdog_create_branch_new " "0x%" PRIx32 " was newly created" "\n" , |
4254 | qemu_get_thread_id(), |
4255 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4256 | , vdi); |
4257 | } |
4258 | } |
4259 | |
4260 | static inline void trace_sheepdog_create_branch_new(uint32_t vdi) |
4261 | { |
4262 | if (true) { |
4263 | _nocheck__trace_sheepdog_create_branch_new(vdi); |
4264 | } |
4265 | } |
4266 | |
4267 | #define TRACE_SHEEPDOG_CO_RW_VECTOR_UPDATE_BACKEND_DSTATE() ( \ |
4268 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_CO_RW_VECTOR_UPDATE) || \ |
4269 | false) |
4270 | |
4271 | static inline void _nocheck__trace_sheepdog_co_rw_vector_update(uint32_t vdi, uint64_t oid, uint64_t data, long idx) |
4272 | { |
4273 | if (trace_event_get_state(TRACE_SHEEPDOG_CO_RW_VECTOR_UPDATE) && qemu_loglevel_mask(LOG_TRACE)) { |
4274 | struct timeval _now; |
4275 | gettimeofday(&_now, NULL); |
4276 | qemu_log("%d@%zu.%06zu:sheepdog_co_rw_vector_update " "update ino (%" PRIu32 ") %" PRIu64 " %" PRIu64 " %ld" "\n" , |
4277 | qemu_get_thread_id(), |
4278 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4279 | , vdi, oid, data, idx); |
4280 | } |
4281 | } |
4282 | |
4283 | static inline void trace_sheepdog_co_rw_vector_update(uint32_t vdi, uint64_t oid, uint64_t data, long idx) |
4284 | { |
4285 | if (true) { |
4286 | _nocheck__trace_sheepdog_co_rw_vector_update(vdi, oid, data, idx); |
4287 | } |
4288 | } |
4289 | |
4290 | #define TRACE_SHEEPDOG_CO_RW_VECTOR_NEW_BACKEND_DSTATE() ( \ |
4291 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_CO_RW_VECTOR_NEW) || \ |
4292 | false) |
4293 | |
4294 | static inline void _nocheck__trace_sheepdog_co_rw_vector_new(uint64_t oid) |
4295 | { |
4296 | if (trace_event_get_state(TRACE_SHEEPDOG_CO_RW_VECTOR_NEW) && qemu_loglevel_mask(LOG_TRACE)) { |
4297 | struct timeval _now; |
4298 | gettimeofday(&_now, NULL); |
4299 | qemu_log("%d@%zu.%06zu:sheepdog_co_rw_vector_new " "new oid 0x%" PRIx64 "\n" , |
4300 | qemu_get_thread_id(), |
4301 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4302 | , oid); |
4303 | } |
4304 | } |
4305 | |
4306 | static inline void trace_sheepdog_co_rw_vector_new(uint64_t oid) |
4307 | { |
4308 | if (true) { |
4309 | _nocheck__trace_sheepdog_co_rw_vector_new(oid); |
4310 | } |
4311 | } |
4312 | |
4313 | #define TRACE_SHEEPDOG_SNAPSHOT_CREATE_INFO_BACKEND_DSTATE() ( \ |
4314 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_SNAPSHOT_CREATE_INFO) || \ |
4315 | false) |
4316 | |
4317 | static inline void _nocheck__trace_sheepdog_snapshot_create_info(const char * sn_name, const char * id, const char * name, int64_t size, int is_snapshot) |
4318 | { |
4319 | if (trace_event_get_state(TRACE_SHEEPDOG_SNAPSHOT_CREATE_INFO) && qemu_loglevel_mask(LOG_TRACE)) { |
4320 | struct timeval _now; |
4321 | gettimeofday(&_now, NULL); |
4322 | qemu_log("%d@%zu.%06zu:sheepdog_snapshot_create_info " "sn_info: name %s id_str %s s: name %s vm_state_size %" PRId64 " " "is_snapshot %d" "\n" , |
4323 | qemu_get_thread_id(), |
4324 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4325 | , sn_name, id, name, size, is_snapshot); |
4326 | } |
4327 | } |
4328 | |
4329 | static inline void trace_sheepdog_snapshot_create_info(const char * sn_name, const char * id, const char * name, int64_t size, int is_snapshot) |
4330 | { |
4331 | if (true) { |
4332 | _nocheck__trace_sheepdog_snapshot_create_info(sn_name, id, name, size, is_snapshot); |
4333 | } |
4334 | } |
4335 | |
4336 | #define TRACE_SHEEPDOG_SNAPSHOT_CREATE_BACKEND_DSTATE() ( \ |
4337 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_SNAPSHOT_CREATE) || \ |
4338 | false) |
4339 | |
4340 | static inline void _nocheck__trace_sheepdog_snapshot_create(const char * sn_name, const char * id) |
4341 | { |
4342 | if (trace_event_get_state(TRACE_SHEEPDOG_SNAPSHOT_CREATE) && qemu_loglevel_mask(LOG_TRACE)) { |
4343 | struct timeval _now; |
4344 | gettimeofday(&_now, NULL); |
4345 | qemu_log("%d@%zu.%06zu:sheepdog_snapshot_create " "%s %s" "\n" , |
4346 | qemu_get_thread_id(), |
4347 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4348 | , sn_name, id); |
4349 | } |
4350 | } |
4351 | |
4352 | static inline void trace_sheepdog_snapshot_create(const char * sn_name, const char * id) |
4353 | { |
4354 | if (true) { |
4355 | _nocheck__trace_sheepdog_snapshot_create(sn_name, id); |
4356 | } |
4357 | } |
4358 | |
4359 | #define TRACE_SHEEPDOG_SNAPSHOT_CREATE_INODE_BACKEND_DSTATE() ( \ |
4360 | trace_event_get_state_dynamic_by_id(TRACE_SHEEPDOG_SNAPSHOT_CREATE_INODE) || \ |
4361 | false) |
4362 | |
4363 | static inline void _nocheck__trace_sheepdog_snapshot_create_inode(const char * name, uint32_t snap, uint32_t vdi) |
4364 | { |
4365 | if (trace_event_get_state(TRACE_SHEEPDOG_SNAPSHOT_CREATE_INODE) && qemu_loglevel_mask(LOG_TRACE)) { |
4366 | struct timeval _now; |
4367 | gettimeofday(&_now, NULL); |
4368 | qemu_log("%d@%zu.%06zu:sheepdog_snapshot_create_inode " "s->inode: name %s snap_id 0x%" PRIx32 " vdi 0x%" PRIx32 "\n" , |
4369 | qemu_get_thread_id(), |
4370 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4371 | , name, snap, vdi); |
4372 | } |
4373 | } |
4374 | |
4375 | static inline void trace_sheepdog_snapshot_create_inode(const char * name, uint32_t snap, uint32_t vdi) |
4376 | { |
4377 | if (true) { |
4378 | _nocheck__trace_sheepdog_snapshot_create_inode(name, snap, vdi); |
4379 | } |
4380 | } |
4381 | |
4382 | #define TRACE_SFTP_ERROR_BACKEND_DSTATE() ( \ |
4383 | trace_event_get_state_dynamic_by_id(TRACE_SFTP_ERROR) || \ |
4384 | false) |
4385 | |
4386 | static inline void _nocheck__trace_sftp_error(const char * op, const char * ssh_err, int ssh_err_code, int sftp_err_code) |
4387 | { |
4388 | if (trace_event_get_state(TRACE_SFTP_ERROR) && qemu_loglevel_mask(LOG_TRACE)) { |
4389 | struct timeval _now; |
4390 | gettimeofday(&_now, NULL); |
4391 | qemu_log("%d@%zu.%06zu:sftp_error " "%s failed: %s (libssh error code: %d, sftp error code: %d)" "\n" , |
4392 | qemu_get_thread_id(), |
4393 | (size_t)_now.tv_sec, (size_t)_now.tv_usec |
4394 | , op, ssh_err, ssh_err_code, sftp_err_code); |
4395 | } |
4396 | } |
4397 | |
4398 | static inline void trace_sftp_error(const char * op, const char * ssh_err, int ssh_err_code, int sftp_err_code) |
4399 | { |
4400 | if (true) { |
4401 | _nocheck__trace_sftp_error(op, ssh_err, ssh_err_code, sftp_err_code); |
4402 | } |
4403 | } |
4404 | #endif /* TRACE_BLOCK_GENERATED_TRACERS_H */ |
4405 | |