1 | /* |
2 | * Virtio GPU Device |
3 | * |
4 | * Copyright Red Hat, Inc. 2013-2014 |
5 | * |
6 | * Authors: |
7 | * Dave Airlie <airlied@redhat.com> |
8 | * Gerd Hoffmann <kraxel@redhat.com> |
9 | * |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
11 | * See the COPYING file in the top-level directory. |
12 | */ |
13 | |
14 | #include "qemu/osdep.h" |
15 | #include "qemu/iov.h" |
16 | #include "trace.h" |
17 | #include "hw/virtio/virtio.h" |
18 | #include "hw/virtio/virtio-gpu.h" |
19 | |
20 | #ifdef CONFIG_VIRGL |
21 | |
22 | #include <virglrenderer.h> |
23 | |
24 | static struct virgl_renderer_callbacks virtio_gpu_3d_cbs; |
25 | |
26 | static void virgl_cmd_create_resource_2d(VirtIOGPU *g, |
27 | struct virtio_gpu_ctrl_command *cmd) |
28 | { |
29 | struct virtio_gpu_resource_create_2d c2d; |
30 | struct virgl_renderer_resource_create_args args; |
31 | |
32 | VIRTIO_GPU_FILL_CMD(c2d); |
33 | trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, |
34 | c2d.width, c2d.height); |
35 | |
36 | args.handle = c2d.resource_id; |
37 | args.target = 2; |
38 | args.format = c2d.format; |
39 | args.bind = (1 << 1); |
40 | args.width = c2d.width; |
41 | args.height = c2d.height; |
42 | args.depth = 1; |
43 | args.array_size = 1; |
44 | args.last_level = 0; |
45 | args.nr_samples = 0; |
46 | args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; |
47 | virgl_renderer_resource_create(&args, NULL, 0); |
48 | } |
49 | |
50 | static void virgl_cmd_create_resource_3d(VirtIOGPU *g, |
51 | struct virtio_gpu_ctrl_command *cmd) |
52 | { |
53 | struct virtio_gpu_resource_create_3d c3d; |
54 | struct virgl_renderer_resource_create_args args; |
55 | |
56 | VIRTIO_GPU_FILL_CMD(c3d); |
57 | trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format, |
58 | c3d.width, c3d.height, c3d.depth); |
59 | |
60 | args.handle = c3d.resource_id; |
61 | args.target = c3d.target; |
62 | args.format = c3d.format; |
63 | args.bind = c3d.bind; |
64 | args.width = c3d.width; |
65 | args.height = c3d.height; |
66 | args.depth = c3d.depth; |
67 | args.array_size = c3d.array_size; |
68 | args.last_level = c3d.last_level; |
69 | args.nr_samples = c3d.nr_samples; |
70 | args.flags = c3d.flags; |
71 | virgl_renderer_resource_create(&args, NULL, 0); |
72 | } |
73 | |
74 | static void virgl_cmd_resource_unref(VirtIOGPU *g, |
75 | struct virtio_gpu_ctrl_command *cmd) |
76 | { |
77 | struct virtio_gpu_resource_unref unref; |
78 | struct iovec *res_iovs = NULL; |
79 | int num_iovs = 0; |
80 | |
81 | VIRTIO_GPU_FILL_CMD(unref); |
82 | trace_virtio_gpu_cmd_res_unref(unref.resource_id); |
83 | |
84 | virgl_renderer_resource_detach_iov(unref.resource_id, |
85 | &res_iovs, |
86 | &num_iovs); |
87 | if (res_iovs != NULL && num_iovs != 0) { |
88 | virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); |
89 | } |
90 | virgl_renderer_resource_unref(unref.resource_id); |
91 | } |
92 | |
93 | static void virgl_cmd_context_create(VirtIOGPU *g, |
94 | struct virtio_gpu_ctrl_command *cmd) |
95 | { |
96 | struct virtio_gpu_ctx_create cc; |
97 | |
98 | VIRTIO_GPU_FILL_CMD(cc); |
99 | trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id, |
100 | cc.debug_name); |
101 | |
102 | virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, |
103 | cc.debug_name); |
104 | } |
105 | |
106 | static void virgl_cmd_context_destroy(VirtIOGPU *g, |
107 | struct virtio_gpu_ctrl_command *cmd) |
108 | { |
109 | struct virtio_gpu_ctx_destroy cd; |
110 | |
111 | VIRTIO_GPU_FILL_CMD(cd); |
112 | trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id); |
113 | |
114 | virgl_renderer_context_destroy(cd.hdr.ctx_id); |
115 | } |
116 | |
117 | static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y, |
118 | int width, int height) |
119 | { |
120 | if (!g->parent_obj.scanout[idx].con) { |
121 | return; |
122 | } |
123 | |
124 | dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height); |
125 | } |
126 | |
127 | static void virgl_cmd_resource_flush(VirtIOGPU *g, |
128 | struct virtio_gpu_ctrl_command *cmd) |
129 | { |
130 | struct virtio_gpu_resource_flush rf; |
131 | int i; |
132 | |
133 | VIRTIO_GPU_FILL_CMD(rf); |
134 | trace_virtio_gpu_cmd_res_flush(rf.resource_id, |
135 | rf.r.width, rf.r.height, rf.r.x, rf.r.y); |
136 | |
137 | for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { |
138 | if (g->parent_obj.scanout[i].resource_id != rf.resource_id) { |
139 | continue; |
140 | } |
141 | virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height); |
142 | } |
143 | } |
144 | |
145 | static void virgl_cmd_set_scanout(VirtIOGPU *g, |
146 | struct virtio_gpu_ctrl_command *cmd) |
147 | { |
148 | struct virtio_gpu_set_scanout ss; |
149 | struct virgl_renderer_resource_info info; |
150 | int ret; |
151 | |
152 | VIRTIO_GPU_FILL_CMD(ss); |
153 | trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, |
154 | ss.r.width, ss.r.height, ss.r.x, ss.r.y); |
155 | |
156 | if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { |
157 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d" , |
158 | __func__, ss.scanout_id); |
159 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; |
160 | return; |
161 | } |
162 | g->parent_obj.enable = 1; |
163 | |
164 | memset(&info, 0, sizeof(info)); |
165 | |
166 | if (ss.resource_id && ss.r.width && ss.r.height) { |
167 | ret = virgl_renderer_resource_get_info(ss.resource_id, &info); |
168 | if (ret == -1) { |
169 | qemu_log_mask(LOG_GUEST_ERROR, |
170 | "%s: illegal resource specified %d\n" , |
171 | __func__, ss.resource_id); |
172 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; |
173 | return; |
174 | } |
175 | qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con, |
176 | ss.r.width, ss.r.height); |
177 | virgl_renderer_force_ctx_0(); |
178 | dpy_gl_scanout_texture( |
179 | g->parent_obj.scanout[ss.scanout_id].con, info.tex_id, |
180 | info.flags & 1 /* FIXME: Y_0_TOP */, |
181 | info.width, info.height, |
182 | ss.r.x, ss.r.y, ss.r.width, ss.r.height); |
183 | } else { |
184 | if (ss.scanout_id != 0) { |
185 | dpy_gfx_replace_surface( |
186 | g->parent_obj.scanout[ss.scanout_id].con, NULL); |
187 | } |
188 | dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con); |
189 | } |
190 | g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id; |
191 | } |
192 | |
193 | static void virgl_cmd_submit_3d(VirtIOGPU *g, |
194 | struct virtio_gpu_ctrl_command *cmd) |
195 | { |
196 | struct virtio_gpu_cmd_submit cs; |
197 | void *buf; |
198 | size_t s; |
199 | |
200 | VIRTIO_GPU_FILL_CMD(cs); |
201 | trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size); |
202 | |
203 | buf = g_malloc(cs.size); |
204 | s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, |
205 | sizeof(cs), buf, cs.size); |
206 | if (s != cs.size) { |
207 | qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)" , |
208 | __func__, s, cs.size); |
209 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; |
210 | goto out; |
211 | } |
212 | |
213 | if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { |
214 | g->stats.req_3d++; |
215 | g->stats.bytes_3d += cs.size; |
216 | } |
217 | |
218 | virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); |
219 | |
220 | out: |
221 | g_free(buf); |
222 | } |
223 | |
224 | static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g, |
225 | struct virtio_gpu_ctrl_command *cmd) |
226 | { |
227 | struct virtio_gpu_transfer_to_host_2d t2d; |
228 | struct virtio_gpu_box box; |
229 | |
230 | VIRTIO_GPU_FILL_CMD(t2d); |
231 | trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); |
232 | |
233 | box.x = t2d.r.x; |
234 | box.y = t2d.r.y; |
235 | box.z = 0; |
236 | box.w = t2d.r.width; |
237 | box.h = t2d.r.height; |
238 | box.d = 1; |
239 | |
240 | virgl_renderer_transfer_write_iov(t2d.resource_id, |
241 | 0, |
242 | 0, |
243 | 0, |
244 | 0, |
245 | (struct virgl_box *)&box, |
246 | t2d.offset, NULL, 0); |
247 | } |
248 | |
249 | static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g, |
250 | struct virtio_gpu_ctrl_command *cmd) |
251 | { |
252 | struct virtio_gpu_transfer_host_3d t3d; |
253 | |
254 | VIRTIO_GPU_FILL_CMD(t3d); |
255 | trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id); |
256 | |
257 | virgl_renderer_transfer_write_iov(t3d.resource_id, |
258 | t3d.hdr.ctx_id, |
259 | t3d.level, |
260 | t3d.stride, |
261 | t3d.layer_stride, |
262 | (struct virgl_box *)&t3d.box, |
263 | t3d.offset, NULL, 0); |
264 | } |
265 | |
266 | static void |
267 | virgl_cmd_transfer_from_host_3d(VirtIOGPU *g, |
268 | struct virtio_gpu_ctrl_command *cmd) |
269 | { |
270 | struct virtio_gpu_transfer_host_3d tf3d; |
271 | |
272 | VIRTIO_GPU_FILL_CMD(tf3d); |
273 | trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id); |
274 | |
275 | virgl_renderer_transfer_read_iov(tf3d.resource_id, |
276 | tf3d.hdr.ctx_id, |
277 | tf3d.level, |
278 | tf3d.stride, |
279 | tf3d.layer_stride, |
280 | (struct virgl_box *)&tf3d.box, |
281 | tf3d.offset, NULL, 0); |
282 | } |
283 | |
284 | |
285 | static void virgl_resource_attach_backing(VirtIOGPU *g, |
286 | struct virtio_gpu_ctrl_command *cmd) |
287 | { |
288 | struct virtio_gpu_resource_attach_backing att_rb; |
289 | struct iovec *res_iovs; |
290 | int ret; |
291 | |
292 | VIRTIO_GPU_FILL_CMD(att_rb); |
293 | trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id); |
294 | |
295 | ret = virtio_gpu_create_mapping_iov(g, &att_rb, cmd, NULL, &res_iovs); |
296 | if (ret != 0) { |
297 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; |
298 | return; |
299 | } |
300 | |
301 | ret = virgl_renderer_resource_attach_iov(att_rb.resource_id, |
302 | res_iovs, att_rb.nr_entries); |
303 | |
304 | if (ret != 0) |
305 | virtio_gpu_cleanup_mapping_iov(g, res_iovs, att_rb.nr_entries); |
306 | } |
307 | |
308 | static void virgl_resource_detach_backing(VirtIOGPU *g, |
309 | struct virtio_gpu_ctrl_command *cmd) |
310 | { |
311 | struct virtio_gpu_resource_detach_backing detach_rb; |
312 | struct iovec *res_iovs = NULL; |
313 | int num_iovs = 0; |
314 | |
315 | VIRTIO_GPU_FILL_CMD(detach_rb); |
316 | trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id); |
317 | |
318 | virgl_renderer_resource_detach_iov(detach_rb.resource_id, |
319 | &res_iovs, |
320 | &num_iovs); |
321 | if (res_iovs == NULL || num_iovs == 0) { |
322 | return; |
323 | } |
324 | virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); |
325 | } |
326 | |
327 | |
328 | static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g, |
329 | struct virtio_gpu_ctrl_command *cmd) |
330 | { |
331 | struct virtio_gpu_ctx_resource att_res; |
332 | |
333 | VIRTIO_GPU_FILL_CMD(att_res); |
334 | trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id, |
335 | att_res.resource_id); |
336 | |
337 | virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); |
338 | } |
339 | |
340 | static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g, |
341 | struct virtio_gpu_ctrl_command *cmd) |
342 | { |
343 | struct virtio_gpu_ctx_resource det_res; |
344 | |
345 | VIRTIO_GPU_FILL_CMD(det_res); |
346 | trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id, |
347 | det_res.resource_id); |
348 | |
349 | virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); |
350 | } |
351 | |
352 | static void virgl_cmd_get_capset_info(VirtIOGPU *g, |
353 | struct virtio_gpu_ctrl_command *cmd) |
354 | { |
355 | struct virtio_gpu_get_capset_info info; |
356 | struct virtio_gpu_resp_capset_info resp; |
357 | |
358 | VIRTIO_GPU_FILL_CMD(info); |
359 | |
360 | memset(&resp, 0, sizeof(resp)); |
361 | if (info.capset_index == 0) { |
362 | resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; |
363 | virgl_renderer_get_cap_set(resp.capset_id, |
364 | &resp.capset_max_version, |
365 | &resp.capset_max_size); |
366 | } else if (info.capset_index == 1) { |
367 | resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; |
368 | virgl_renderer_get_cap_set(resp.capset_id, |
369 | &resp.capset_max_version, |
370 | &resp.capset_max_size); |
371 | } else { |
372 | resp.capset_max_version = 0; |
373 | resp.capset_max_size = 0; |
374 | } |
375 | resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; |
376 | virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); |
377 | } |
378 | |
379 | static void virgl_cmd_get_capset(VirtIOGPU *g, |
380 | struct virtio_gpu_ctrl_command *cmd) |
381 | { |
382 | struct virtio_gpu_get_capset gc; |
383 | struct virtio_gpu_resp_capset *resp; |
384 | uint32_t max_ver, max_size; |
385 | VIRTIO_GPU_FILL_CMD(gc); |
386 | |
387 | virgl_renderer_get_cap_set(gc.capset_id, &max_ver, |
388 | &max_size); |
389 | if (!max_size) { |
390 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; |
391 | return; |
392 | } |
393 | |
394 | resp = g_malloc0(sizeof(*resp) + max_size); |
395 | resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; |
396 | virgl_renderer_fill_caps(gc.capset_id, |
397 | gc.capset_version, |
398 | (void *)resp->capset_data); |
399 | virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); |
400 | g_free(resp); |
401 | } |
402 | |
403 | void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, |
404 | struct virtio_gpu_ctrl_command *cmd) |
405 | { |
406 | VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); |
407 | |
408 | virgl_renderer_force_ctx_0(); |
409 | switch (cmd->cmd_hdr.type) { |
410 | case VIRTIO_GPU_CMD_CTX_CREATE: |
411 | virgl_cmd_context_create(g, cmd); |
412 | break; |
413 | case VIRTIO_GPU_CMD_CTX_DESTROY: |
414 | virgl_cmd_context_destroy(g, cmd); |
415 | break; |
416 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: |
417 | virgl_cmd_create_resource_2d(g, cmd); |
418 | break; |
419 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: |
420 | virgl_cmd_create_resource_3d(g, cmd); |
421 | break; |
422 | case VIRTIO_GPU_CMD_SUBMIT_3D: |
423 | virgl_cmd_submit_3d(g, cmd); |
424 | break; |
425 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: |
426 | virgl_cmd_transfer_to_host_2d(g, cmd); |
427 | break; |
428 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: |
429 | virgl_cmd_transfer_to_host_3d(g, cmd); |
430 | break; |
431 | case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: |
432 | virgl_cmd_transfer_from_host_3d(g, cmd); |
433 | break; |
434 | case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: |
435 | virgl_resource_attach_backing(g, cmd); |
436 | break; |
437 | case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: |
438 | virgl_resource_detach_backing(g, cmd); |
439 | break; |
440 | case VIRTIO_GPU_CMD_SET_SCANOUT: |
441 | virgl_cmd_set_scanout(g, cmd); |
442 | break; |
443 | case VIRTIO_GPU_CMD_RESOURCE_FLUSH: |
444 | virgl_cmd_resource_flush(g, cmd); |
445 | break; |
446 | case VIRTIO_GPU_CMD_RESOURCE_UNREF: |
447 | virgl_cmd_resource_unref(g, cmd); |
448 | break; |
449 | case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: |
450 | /* TODO add security */ |
451 | virgl_cmd_ctx_attach_resource(g, cmd); |
452 | break; |
453 | case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: |
454 | /* TODO add security */ |
455 | virgl_cmd_ctx_detach_resource(g, cmd); |
456 | break; |
457 | case VIRTIO_GPU_CMD_GET_CAPSET_INFO: |
458 | virgl_cmd_get_capset_info(g, cmd); |
459 | break; |
460 | case VIRTIO_GPU_CMD_GET_CAPSET: |
461 | virgl_cmd_get_capset(g, cmd); |
462 | break; |
463 | |
464 | case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: |
465 | virtio_gpu_get_display_info(g, cmd); |
466 | break; |
467 | case VIRTIO_GPU_CMD_GET_EDID: |
468 | virtio_gpu_get_edid(g, cmd); |
469 | break; |
470 | default: |
471 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; |
472 | break; |
473 | } |
474 | |
475 | if (cmd->finished) { |
476 | return; |
477 | } |
478 | if (cmd->error) { |
479 | fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n" , __func__, |
480 | cmd->cmd_hdr.type, cmd->error); |
481 | virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error); |
482 | return; |
483 | } |
484 | if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { |
485 | virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); |
486 | return; |
487 | } |
488 | |
489 | trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); |
490 | virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); |
491 | } |
492 | |
493 | static void virgl_write_fence(void *opaque, uint32_t fence) |
494 | { |
495 | VirtIOGPU *g = opaque; |
496 | struct virtio_gpu_ctrl_command *cmd, *tmp; |
497 | |
498 | QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { |
499 | /* |
500 | * the guest can end up emitting fences out of order |
501 | * so we should check all fenced cmds not just the first one. |
502 | */ |
503 | if (cmd->cmd_hdr.fence_id > fence) { |
504 | continue; |
505 | } |
506 | trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); |
507 | virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); |
508 | QTAILQ_REMOVE(&g->fenceq, cmd, next); |
509 | g_free(cmd); |
510 | g->inflight--; |
511 | if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { |
512 | fprintf(stderr, "inflight: %3d (-)\r" , g->inflight); |
513 | } |
514 | } |
515 | } |
516 | |
517 | static virgl_renderer_gl_context |
518 | virgl_create_context(void *opaque, int scanout_idx, |
519 | struct virgl_renderer_gl_ctx_param *params) |
520 | { |
521 | VirtIOGPU *g = opaque; |
522 | QEMUGLContext ctx; |
523 | QEMUGLParams qparams; |
524 | |
525 | qparams.major_ver = params->major_ver; |
526 | qparams.minor_ver = params->minor_ver; |
527 | |
528 | ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams); |
529 | return (virgl_renderer_gl_context)ctx; |
530 | } |
531 | |
532 | static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx) |
533 | { |
534 | VirtIOGPU *g = opaque; |
535 | QEMUGLContext qctx = (QEMUGLContext)ctx; |
536 | |
537 | dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx); |
538 | } |
539 | |
540 | static int virgl_make_context_current(void *opaque, int scanout_idx, |
541 | virgl_renderer_gl_context ctx) |
542 | { |
543 | VirtIOGPU *g = opaque; |
544 | QEMUGLContext qctx = (QEMUGLContext)ctx; |
545 | |
546 | return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con, |
547 | qctx); |
548 | } |
549 | |
550 | static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = { |
551 | .version = 1, |
552 | .write_fence = virgl_write_fence, |
553 | .create_gl_context = virgl_create_context, |
554 | .destroy_gl_context = virgl_destroy_context, |
555 | .make_current = virgl_make_context_current, |
556 | }; |
557 | |
558 | static void virtio_gpu_print_stats(void *opaque) |
559 | { |
560 | VirtIOGPU *g = opaque; |
561 | |
562 | if (g->stats.requests) { |
563 | fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n" , |
564 | g->stats.requests, |
565 | g->stats.max_inflight, |
566 | g->stats.req_3d, |
567 | g->stats.bytes_3d); |
568 | g->stats.requests = 0; |
569 | g->stats.max_inflight = 0; |
570 | g->stats.req_3d = 0; |
571 | g->stats.bytes_3d = 0; |
572 | } else { |
573 | fprintf(stderr, "stats: idle\r" ); |
574 | } |
575 | timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); |
576 | } |
577 | |
578 | static void virtio_gpu_fence_poll(void *opaque) |
579 | { |
580 | VirtIOGPU *g = opaque; |
581 | |
582 | virgl_renderer_poll(); |
583 | virtio_gpu_process_cmdq(g); |
584 | if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) { |
585 | timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10); |
586 | } |
587 | } |
588 | |
589 | void virtio_gpu_virgl_fence_poll(VirtIOGPU *g) |
590 | { |
591 | virtio_gpu_fence_poll(g); |
592 | } |
593 | |
594 | void virtio_gpu_virgl_reset(VirtIOGPU *g) |
595 | { |
596 | int i; |
597 | |
598 | /* virgl_renderer_reset() ??? */ |
599 | for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { |
600 | if (i != 0) { |
601 | dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL); |
602 | } |
603 | dpy_gl_scanout_disable(g->parent_obj.scanout[i].con); |
604 | } |
605 | } |
606 | |
607 | int virtio_gpu_virgl_init(VirtIOGPU *g) |
608 | { |
609 | int ret; |
610 | |
611 | ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs); |
612 | if (ret != 0) { |
613 | return ret; |
614 | } |
615 | |
616 | g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL, |
617 | virtio_gpu_fence_poll, g); |
618 | |
619 | if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { |
620 | g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL, |
621 | virtio_gpu_print_stats, g); |
622 | timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); |
623 | } |
624 | return 0; |
625 | } |
626 | |
627 | int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g) |
628 | { |
629 | uint32_t capset2_max_ver, capset2_max_size; |
630 | virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, |
631 | &capset2_max_ver, |
632 | &capset2_max_size); |
633 | |
634 | return capset2_max_ver ? 2 : 1; |
635 | } |
636 | |
637 | #endif /* CONFIG_VIRGL */ |
638 | |