1 | /* |
2 | Copyright (c) 2012, Broadcom Europe Ltd |
3 | All rights reserved. |
4 | |
5 | Redistribution and use in source and binary forms, with or without |
6 | modification, are permitted provided that the following conditions are met: |
7 | * Redistributions of source code must retain the above copyright |
8 | notice, this list of conditions and the following disclaimer. |
9 | * Redistributions in binary form must reproduce the above copyright |
10 | notice, this list of conditions and the following disclaimer in the |
11 | documentation and/or other materials provided with the distribution. |
12 | * Neither the name of the copyright holder nor the |
13 | names of its contributors may be used to endorse or promote products |
14 | derived from this software without specific prior written permission. |
15 | |
16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND |
17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY |
20 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
23 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | */ |
27 | |
28 | #include <stdio.h> |
29 | #include <stdlib.h> |
30 | #include <stdint.h> |
31 | #include <string.h> |
32 | #include <errno.h> |
33 | #include <fcntl.h> |
34 | #include <unistd.h> |
35 | #include <sys/mman.h> |
36 | #include <sys/ioctl.h> |
37 | |
38 | |
39 | #include <vmcs_sm_ioctl.h> |
40 | #include "vc_sm_cma_ioctl.h" |
41 | #include "dma-buf.h" |
42 | #include "user-vcsm.h" |
43 | #include "interface/vcos/vcos.h" |
44 | |
45 | typedef struct |
46 | { |
47 | VCSM_CACHE_TYPE_T cur; /* Current pattern. */ |
48 | VCSM_CACHE_TYPE_T new; /* New pattern. */ |
49 | VCSM_CACHE_TYPE_T res; /* End result. */ |
50 | |
51 | } VCSM_CACHE_MUTEX_LKUP_T; |
52 | |
53 | // /dev/vcsm-cma is the new device that is using CMA and importing it to the VPU. |
54 | #define VCSM_CMA_DEVICE_NAME "/dev/vcsm-cma" |
55 | // /dev/vcsm is the older driver that maps gpu_mem into the ARM. |
56 | #define VCSM_DEVICE_NAME "/dev/vcsm" |
57 | #define VCSM_INVALID_HANDLE (-1) |
58 | |
59 | static VCOS_LOG_CAT_T usrvcsm_log_category; |
60 | #define VCOS_LOG_CATEGORY (&usrvcsm_log_category) |
61 | static int using_vc_sm_cma = 0; |
62 | static int vcsm_handle = VCSM_INVALID_HANDLE; |
63 | static int vcsm_refcount; |
64 | static unsigned int vcsm_page_size = 0; |
65 | |
66 | static VCOS_ONCE_T vcsm_once = VCOS_ONCE_INIT; |
67 | static VCOS_MUTEX_T vcsm_mutex; |
68 | |
69 | #define VCSM_PAYLOAD_ELEM_MAX 512 |
70 | |
71 | typedef struct VCSM_PAYLOAD_ELEM_T |
72 | { |
73 | unsigned int handle; // User handle |
74 | int fd; // vcsm-cma / dmabuf fd (= user handle-1) |
75 | uint32_t vc_handle; // VPU reloc heap handle |
76 | uint8_t *mem; // mmap'ed address |
77 | unsigned int size; // size of mmap |
78 | uint32_t dma_addr; // VPU address for the buffer |
79 | int in_use; |
80 | } VCSM_PAYLOAD_ELEM_T; |
81 | |
82 | typedef struct VCSM_PAYLOAD_LIST_T |
83 | { |
84 | VCSM_PAYLOAD_ELEM_T list[VCSM_PAYLOAD_ELEM_MAX]; |
85 | VCOS_MUTEX_T lock; |
86 | struct VCSM_PAYLOAD_LIST_T *next; |
87 | } VCSM_PAYLOAD_LIST_T; |
88 | |
89 | static VCSM_PAYLOAD_LIST_T vcsm_payload_list; |
90 | |
91 | static void vcsm_payload_list_init(void) |
92 | { |
93 | vcos_mutex_create(&vcsm_payload_list.lock, "vcsm_payload_list" ); |
94 | } |
95 | |
96 | static VCSM_PAYLOAD_ELEM_T *vcsm_payload_list_get() |
97 | { |
98 | VCSM_PAYLOAD_ELEM_T *elem = 0; |
99 | unsigned int i; |
100 | |
101 | vcos_mutex_lock(&vcsm_payload_list.lock); |
102 | for (i = 0; i < VCSM_PAYLOAD_ELEM_MAX; i++) |
103 | { |
104 | if (vcsm_payload_list.list[i].in_use) |
105 | continue; |
106 | elem = &vcsm_payload_list.list[i]; |
107 | elem->in_use = 1; |
108 | break; |
109 | } |
110 | vcos_mutex_unlock(&vcsm_payload_list.lock); |
111 | |
112 | return elem; |
113 | } |
114 | |
115 | static void vcsm_payload_list_release(VCSM_PAYLOAD_ELEM_T *elem) |
116 | { |
117 | vcos_mutex_lock(&vcsm_payload_list.lock); |
118 | elem->handle = elem->vc_handle = elem->fd = 0; |
119 | elem->mem = NULL; |
120 | elem->in_use = 0; |
121 | vcos_mutex_unlock(&vcsm_payload_list.lock); |
122 | } |
123 | |
124 | static VCSM_PAYLOAD_ELEM_T *vcsm_payload_list_find_mem(void *mem) |
125 | { |
126 | VCSM_PAYLOAD_ELEM_T *elem = 0; |
127 | unsigned int i; |
128 | |
129 | vcos_mutex_lock(&vcsm_payload_list.lock); |
130 | for (i = 0; i < VCSM_PAYLOAD_ELEM_MAX; i++) |
131 | { |
132 | if (!vcsm_payload_list.list[i].in_use) |
133 | continue; |
134 | if (vcsm_payload_list.list[i].mem != mem) |
135 | continue; |
136 | elem = &vcsm_payload_list.list[i]; |
137 | break; |
138 | } |
139 | vcos_mutex_unlock(&vcsm_payload_list.lock); |
140 | |
141 | return elem; |
142 | } |
143 | |
144 | static VCSM_PAYLOAD_ELEM_T *vcsm_payload_list_find_handle(unsigned int handle) |
145 | { |
146 | VCSM_PAYLOAD_ELEM_T *elem = 0; |
147 | unsigned int i; |
148 | |
149 | vcos_mutex_lock(&vcsm_payload_list.lock); |
150 | for (i = 0; i < VCSM_PAYLOAD_ELEM_MAX; i++) |
151 | { |
152 | if (!vcsm_payload_list.list[i].in_use) |
153 | continue; |
154 | if (vcsm_payload_list.list[i].handle != handle) |
155 | continue; |
156 | elem = &vcsm_payload_list.list[i]; |
157 | break; |
158 | } |
159 | vcos_mutex_unlock(&vcsm_payload_list.lock); |
160 | |
161 | return elem; |
162 | } |
163 | |
164 | /*static VCSM_PAYLOAD_ELEM_T *vcsm_payload_list_find_vc_handle(uint32_t vc_handle) |
165 | { |
166 | VCSM_PAYLOAD_ELEM_T *elem = 0; |
167 | unsigned int i; |
168 | |
169 | vcos_mutex_lock(&vcsm_payload_list.lock); |
170 | for (i = 0; i < VCSM_PAYLOAD_ELEM_MAX; i++) |
171 | { |
172 | if (!vcsm_payload_list.list[i].in_use) |
173 | continue; |
174 | if (vcsm_payload_list.list[i].vc_handle != vc_handle) |
175 | continue; |
176 | elem = &vcsm_payload_list.list[i]; |
177 | break; |
178 | } |
179 | vcos_mutex_unlock(&vcsm_payload_list.lock); |
180 | |
181 | return elem; |
182 | }*/ |
183 | |
184 | |
185 | /* Cache [(current, new) -> outcome] mapping table, ignoring identity. |
186 | ** |
187 | ** Note: Videocore cache mode cannot be udpated 'lock' time. |
188 | */ |
189 | static VCSM_CACHE_MUTEX_LKUP_T vcsm_cache_mutex_table[] = |
190 | { |
191 | /* ------ CURRENT ------- *//* ---------- NEW --------- *//* --------- RESULT --------- */ |
192 | { VCSM_CACHE_TYPE_NONE, VCSM_CACHE_TYPE_HOST, VCSM_CACHE_TYPE_HOST }, |
193 | { VCSM_CACHE_TYPE_NONE, VCSM_CACHE_TYPE_VC, VCSM_CACHE_TYPE_NONE }, |
194 | { VCSM_CACHE_TYPE_NONE, VCSM_CACHE_TYPE_HOST_AND_VC, VCSM_CACHE_TYPE_HOST }, |
195 | |
196 | { VCSM_CACHE_TYPE_HOST, VCSM_CACHE_TYPE_NONE, VCSM_CACHE_TYPE_NONE }, |
197 | { VCSM_CACHE_TYPE_HOST, VCSM_CACHE_TYPE_VC, VCSM_CACHE_TYPE_HOST }, |
198 | { VCSM_CACHE_TYPE_HOST, VCSM_CACHE_TYPE_HOST_AND_VC, VCSM_CACHE_TYPE_HOST }, |
199 | |
200 | { VCSM_CACHE_TYPE_VC, VCSM_CACHE_TYPE_NONE, VCSM_CACHE_TYPE_NONE }, |
201 | { VCSM_CACHE_TYPE_VC, VCSM_CACHE_TYPE_HOST, VCSM_CACHE_TYPE_HOST_AND_VC }, |
202 | { VCSM_CACHE_TYPE_VC, VCSM_CACHE_TYPE_HOST_AND_VC, VCSM_CACHE_TYPE_HOST_AND_VC }, |
203 | |
204 | { VCSM_CACHE_TYPE_HOST_AND_VC, VCSM_CACHE_TYPE_NONE, VCSM_CACHE_TYPE_VC }, |
205 | { VCSM_CACHE_TYPE_HOST_AND_VC, VCSM_CACHE_TYPE_HOST, VCSM_CACHE_TYPE_HOST_AND_VC }, |
206 | { VCSM_CACHE_TYPE_HOST_AND_VC, VCSM_CACHE_TYPE_VC, VCSM_CACHE_TYPE_VC }, |
207 | |
208 | /* Used for lookup termination. */ |
209 | { VCSM_CACHE_TYPE_NONE, VCSM_CACHE_TYPE_NONE, VCSM_CACHE_TYPE_NONE }, |
210 | }; |
211 | |
212 | static VCSM_CACHE_TYPE_T vcsm_cache_table_lookup( VCSM_CACHE_TYPE_T current, |
213 | VCSM_CACHE_TYPE_T new ) |
214 | { |
215 | VCSM_CACHE_MUTEX_LKUP_T *p_map = vcsm_cache_mutex_table; |
216 | |
217 | while ( !( (p_map->cur == VCSM_CACHE_TYPE_NONE) && |
218 | (p_map->new == VCSM_CACHE_TYPE_NONE) ) ) |
219 | { |
220 | if ( (p_map->cur == current) && (p_map->new == new) ) |
221 | { |
222 | return p_map->res; |
223 | } |
224 | |
225 | p_map++; |
226 | }; |
227 | |
228 | vcos_log_error( "[%s]: [%d]: no mapping found for current %d - new %d" , |
229 | __func__, |
230 | getpid(), |
231 | current, |
232 | new ); |
233 | return current; |
234 | } |
235 | |
236 | /* A one off vcsm initialization routine |
237 | */ |
238 | static void vcsm_init_once(void) |
239 | { |
240 | vcos_mutex_create(&vcsm_mutex, VCOS_FUNCTION); |
241 | vcos_log_set_level(&usrvcsm_log_category, VCOS_LOG_ERROR); |
242 | usrvcsm_log_category.flags.want_prefix = 0; |
243 | vcos_log_register( "usrvcsm" , &usrvcsm_log_category ); |
244 | vcsm_payload_list_init(); |
245 | } |
246 | |
247 | |
248 | /* Initialize the vcsm processing. |
249 | ** |
250 | ** Must be called once before attempting to do anything else. |
251 | ** |
252 | ** Returns 0 on success, -1 on error. |
253 | */ |
254 | int vcsm_init_ex( int want_export, int fd ) |
255 | { |
256 | int result = VCSM_INVALID_HANDLE; |
257 | vcos_once(&vcsm_once, vcsm_init_once); |
258 | |
259 | /* Only open the VCSM device once per process. |
260 | */ |
261 | vcos_mutex_lock( &vcsm_mutex ); |
262 | if ( vcsm_refcount != 0 ) |
263 | { |
264 | // Note that in this case the external fd is ignored. |
265 | |
266 | if (want_export && !using_vc_sm_cma) |
267 | { |
268 | vcos_log_trace( "[%s]: fail as already open and export not available" , |
269 | __func__); |
270 | vcos_mutex_unlock( &vcsm_mutex ); |
271 | return -1; |
272 | } |
273 | goto out; /* VCSM already opened. Nothing to do. */ |
274 | } |
275 | |
276 | if (fd != -1) |
277 | { |
278 | vcsm_handle = dup(fd); |
279 | |
280 | // FIXME: Sanity check which device that the fd actually relates to. |
281 | // For now we have to guess based on whether export is requested. |
282 | // (the main use case is from Chromium which will be requesting export). |
283 | if (want_export) |
284 | using_vc_sm_cma = 1; |
285 | |
286 | goto out; |
287 | } |
288 | |
289 | if (want_export) |
290 | { |
291 | vcsm_handle = open( VCSM_CMA_DEVICE_NAME, O_RDWR, 0 ); |
292 | |
293 | if (vcsm_handle >= 0) |
294 | { |
295 | using_vc_sm_cma = 1; |
296 | vcos_log_trace( "[%s]: Using vc-sm-cma, handle %d" , |
297 | __func__, vcsm_handle); |
298 | } |
299 | } |
300 | |
301 | if (vcsm_handle < 0) |
302 | { |
303 | vcos_log_trace( "[%s]: NOT using vc-sm-cma as handle was %d" , |
304 | __func__, vcsm_handle); |
305 | vcsm_handle = open( VCSM_DEVICE_NAME, O_RDWR, 0 ); |
306 | } |
307 | |
308 | if (vcsm_handle < 0 && !want_export) |
309 | { |
310 | // vcsm failed and not tried vcsm-cma yet. |
311 | vcsm_handle = open( VCSM_CMA_DEVICE_NAME, O_RDWR, 0 ); |
312 | |
313 | if (vcsm_handle >= 0) |
314 | { |
315 | using_vc_sm_cma = 1; |
316 | vcos_log_trace( "[%s]: Using vc-sm-cma, handle %d" , |
317 | __func__, vcsm_handle); |
318 | } |
319 | } |
320 | |
321 | out: |
322 | if ( vcsm_handle >= 0 ) |
323 | { |
324 | vcsm_page_size = getpagesize(); |
325 | |
326 | result = 0; |
327 | vcsm_refcount++; |
328 | |
329 | vcos_log_trace( "[%s]: [%d]: %d (align: %u) - ref-cnt %u" , |
330 | __func__, |
331 | getpid(), |
332 | vcsm_handle, |
333 | vcsm_page_size, |
334 | vcsm_refcount ); |
335 | } |
336 | vcos_mutex_unlock( &vcsm_mutex ); |
337 | return result; |
338 | } |
339 | |
340 | /* Initialize the vcsm processing. |
341 | ** |
342 | ** Must be called once before attempting to do anything else. |
343 | ** |
344 | ** Returns 0 on success, -1 on error. |
345 | */ |
346 | int vcsm_init( void ) |
347 | { |
348 | return vcsm_init_ex(0, -1); |
349 | } |
350 | |
351 | /* Terminates the vcsm processing. |
352 | ** |
353 | ** Must be called vcsm services are no longer needed, it will |
354 | ** take care of removing any allocation under the current process |
355 | ** control if deemed necessary. |
356 | */ |
357 | void vcsm_exit( void ) |
358 | { |
359 | vcos_mutex_lock( &vcsm_mutex ); |
360 | |
361 | if ( vcsm_refcount == 0 ) |
362 | { |
363 | goto out; /* Shouldn't really happen. */ |
364 | } |
365 | |
366 | if ( --vcsm_refcount != 0 ) |
367 | { |
368 | vcos_log_trace( "[%s]: [%d]: %d - ref-cnt: %u" , |
369 | __func__, |
370 | getpid(), |
371 | vcsm_handle, |
372 | vcsm_refcount ); |
373 | |
374 | goto out; /* We're done. */ |
375 | } |
376 | |
377 | close( vcsm_handle ); |
378 | vcsm_handle = VCSM_INVALID_HANDLE; |
379 | |
380 | out: |
381 | vcos_mutex_unlock( &vcsm_mutex ); |
382 | } |
383 | |
384 | |
385 | /* Allocates a cached block of memory of size 'size' via the vcsm memory |
386 | ** allocator, the type of caching requested is passed as argument of the |
387 | ** function call. |
388 | ** |
389 | ** Returns: 0 on error |
390 | ** a non-zero opaque handle on success. |
391 | ** |
392 | ** On success, the user must invoke vcsm_lock with the returned opaque |
393 | ** handle to gain access to the memory associated with the opaque handle. |
394 | ** When finished using the memory, the user calls vcsm_unlock_xx (see those |
395 | ** function definition for more details on the one that can be used). |
396 | ** |
397 | ** A well behaved application should make every attempt to lock/unlock |
398 | ** only for the duration it needs to access the memory data associated with |
399 | ** the opaque handle. |
400 | */ |
401 | unsigned int vcsm_malloc_cache( unsigned int size, VCSM_CACHE_TYPE_T cache, const char *name ) |
402 | { |
403 | unsigned int size_aligned = size; |
404 | void *usr_ptr = NULL; |
405 | int rc; |
406 | unsigned int handle = VCSM_INVALID_HANDLE; |
407 | |
408 | if ( (size == 0) || (vcsm_handle == VCSM_INVALID_HANDLE) ) |
409 | { |
410 | vcos_log_error( "[%s]: [%d] [%s]: NULL size or invalid device!" , |
411 | __func__, |
412 | getpid(), |
413 | name ); |
414 | return 0; |
415 | } |
416 | |
417 | /* Ask for page aligned. |
418 | */ |
419 | size_aligned = (size + vcsm_page_size - 1) & ~(vcsm_page_size - 1); |
420 | |
421 | if (using_vc_sm_cma) |
422 | { |
423 | struct vc_sm_cma_ioctl_alloc alloc; |
424 | VCSM_PAYLOAD_ELEM_T *payload; |
425 | |
426 | memset( &alloc, 0, sizeof(alloc)); |
427 | |
428 | alloc.size = size_aligned; |
429 | alloc.num = 1; |
430 | alloc.cached = (enum vmcs_sm_cache_e) cache; /* Convenient one to one mapping. */ |
431 | alloc.handle = 0; |
432 | if ( name != NULL ) |
433 | { |
434 | memcpy ( alloc.name, name, 32 ); |
435 | } |
436 | rc = ioctl( vcsm_handle, |
437 | VC_SM_CMA_IOCTL_MEM_ALLOC, |
438 | &alloc ); |
439 | |
440 | if ( rc < 0 || alloc.handle < 0 ) |
441 | { |
442 | vcos_log_error( "[%s]: [%d] [%s]: ioctl mem-alloc FAILED [%d] (hdl: %x)" , |
443 | __func__, |
444 | getpid(), |
445 | alloc.name, |
446 | rc, |
447 | alloc.handle ); |
448 | return 0; |
449 | } |
450 | |
451 | vcos_log_trace( "[%s]: [%d] [%s]: ioctl mem-alloc %d (hdl: %x)" , |
452 | __func__, |
453 | getpid(), |
454 | alloc.name, |
455 | rc, |
456 | alloc.handle ); |
457 | |
458 | /* Map the buffer into user space. |
459 | */ |
460 | usr_ptr = mmap( 0, |
461 | alloc.size, |
462 | PROT_READ | PROT_WRITE, |
463 | MAP_SHARED, |
464 | alloc.handle, |
465 | 0 ); |
466 | |
467 | if ( usr_ptr == MAP_FAILED ) |
468 | { |
469 | vcos_log_error( "[%s]: [%d]: mmap FAILED (hdl: %x)" , |
470 | __func__, |
471 | getpid(), |
472 | alloc.handle ); |
473 | vcsm_free( alloc.handle ); |
474 | return 0; |
475 | } |
476 | |
477 | // vc-sm-cma now hands out file handles (signed int), whilst libvcsm is |
478 | // handling unsigned int handles. Already checked the handle >=0, so |
479 | // add one to make it a usable handle. |
480 | handle = alloc.handle + 1; |
481 | |
482 | vcos_log_trace( "[%s]: mmap to %p" , |
483 | __func__, |
484 | usr_ptr |
485 | ); |
486 | |
487 | payload = vcsm_payload_list_get(); |
488 | payload->handle = handle; |
489 | payload->fd = alloc.handle; |
490 | payload->vc_handle = alloc.vc_handle; |
491 | payload->mem = usr_ptr; |
492 | payload->size = size_aligned; |
493 | if (alloc.dma_addr & 0xFFFFFFFF00000000ULL) |
494 | { |
495 | vcos_log_error("[%s]: dma address returned > 32bit 0x%llx" , __func__, alloc.dma_addr); |
496 | payload->dma_addr = 0; |
497 | } |
498 | else |
499 | payload->dma_addr = (uint32_t)alloc.dma_addr; |
500 | } |
501 | else |
502 | { |
503 | struct vmcs_sm_ioctl_alloc alloc; |
504 | |
505 | memset( &alloc, 0, sizeof(alloc)); |
506 | /* Allocate the buffer on videocore via the VCSM (Videocore Shared Memory) |
507 | ** interface. |
508 | */ |
509 | alloc.size = size_aligned; |
510 | alloc.num = 1; |
511 | alloc.cached = (enum vmcs_sm_cache_e) cache; /* Convenient one to one mapping. */ |
512 | alloc.handle = 0; |
513 | if ( name != NULL ) |
514 | { |
515 | memcpy ( alloc.name, name, 32 ); |
516 | } |
517 | rc = ioctl( vcsm_handle, |
518 | VMCS_SM_IOCTL_MEM_ALLOC, |
519 | &alloc ); |
520 | |
521 | if ( rc < 0 || alloc.handle == 0 ) |
522 | { |
523 | vcos_log_error( "[%s]: [%d] [%s]: ioctl mem-alloc FAILED [%d] (hdl: %x)" , |
524 | __func__, |
525 | getpid(), |
526 | alloc.name, |
527 | rc, |
528 | alloc.handle ); |
529 | return 0; |
530 | } |
531 | |
532 | vcos_log_trace( "[%s]: [%d] [%s]: ioctl mem-alloc %d (hdl: %x)" , |
533 | __func__, |
534 | getpid(), |
535 | alloc.name, |
536 | rc, |
537 | alloc.handle ); |
538 | |
539 | /* Map the buffer into user space. |
540 | */ |
541 | usr_ptr = mmap( 0, |
542 | alloc.size, |
543 | PROT_READ | PROT_WRITE, |
544 | MAP_SHARED, |
545 | vcsm_handle, |
546 | alloc.handle ); |
547 | |
548 | if ( usr_ptr == NULL ) |
549 | { |
550 | vcos_log_error( "[%s]: [%d]: mmap FAILED (hdl: %x)" , |
551 | __func__, |
552 | getpid(), |
553 | alloc.handle ); |
554 | vcsm_free( alloc.handle ); |
555 | return 0; |
556 | } |
557 | |
558 | handle = alloc.handle; |
559 | } |
560 | |
561 | return handle; |
562 | } |
563 | |
564 | |
565 | /* Allocates a non-cached block of memory of size 'size' via the vcsm memory |
566 | ** allocator. |
567 | ** |
568 | ** Returns: 0 on error |
569 | ** a non-zero opaque handle on success. |
570 | ** |
571 | ** On success, the user must invoke vcsm_lock with the returned opaque |
572 | ** handle to gain access to the memory associated with the opaque handle. |
573 | ** When finished using the memory, the user calls vcsm_unlock_xx (see those |
574 | ** function definition for more details on the one that can be used). |
575 | ** |
576 | ** A well behaved application should make every attempt to lock/unlock |
577 | ** only for the duration it needs to access the memory data associated with |
578 | ** the opaque handle. |
579 | */ |
580 | unsigned int vcsm_malloc( unsigned int size, const char *name ) |
581 | { |
582 | return vcsm_malloc_cache( size, VCSM_CACHE_TYPE_NONE, name ); |
583 | } |
584 | |
585 | /* Shares an allocated block of memory. |
586 | ** |
587 | ** Returns: 0 on error |
588 | ** a non-zero opaque handle on success. |
589 | ** |
590 | ** On success, the user must invoke vcsm_lock with the returned opaque |
591 | ** handle to gain access to the memory associated with the opaque handle. |
592 | ** When finished using the memory, the user calls vcsm_unlock_xx (see those |
593 | ** function definition for more details on the one that can be used). |
594 | ** |
595 | ** A well behaved application should make every attempt to lock/unlock |
596 | ** only for the duration it needs to access the memory data associated with |
597 | ** the opaque handle. |
598 | */ |
599 | unsigned int vcsm_malloc_share( unsigned int handle ) |
600 | { |
601 | struct vmcs_sm_ioctl_alloc_share alloc; |
602 | int rc; |
603 | |
604 | if ( vcsm_handle == VCSM_INVALID_HANDLE ) |
605 | { |
606 | vcos_log_error( "[%s]: [%d]: NULL size or invalid device!" , |
607 | __func__, |
608 | getpid() ); |
609 | return 0; |
610 | } |
611 | |
612 | if (using_vc_sm_cma) |
613 | { |
614 | return 0; |
615 | } |
616 | |
617 | memset( &alloc, 0, sizeof(alloc) ); |
618 | |
619 | /* Share the buffer on videocore via the VCSM (Videocore Shared Memory) |
620 | ** interface. |
621 | */ |
622 | alloc.handle = handle; |
623 | rc = ioctl( vcsm_handle, |
624 | VMCS_SM_IOCTL_MEM_ALLOC_SHARE, |
625 | &alloc ); |
626 | |
627 | if ( rc < 0 || alloc.handle == 0 ) |
628 | { |
629 | vcos_log_error( "[%s]: [%d]: ioctl mem-share FAILED [%d] (hdl: %x->%x)" , |
630 | __func__, |
631 | getpid(), |
632 | rc, |
633 | handle, |
634 | alloc.handle ); |
635 | goto error; |
636 | } |
637 | |
638 | vcos_log_trace( "[%s]: [%d]: ioctl mem-share %d (hdl: %x->%x)" , |
639 | __func__, |
640 | getpid(), |
641 | rc, |
642 | handle, |
643 | alloc.handle ); |
644 | |
645 | return alloc.handle; |
646 | |
647 | error: |
648 | if ( alloc.handle ) |
649 | { |
650 | vcsm_free( alloc.handle ); |
651 | } |
652 | return 0; |
653 | } |
654 | |
655 | /* Frees a block of memory that was successfully allocated by |
656 | ** a prior call the vcms_alloc. |
657 | ** |
658 | ** The handle should be considered invalid upon return from this |
659 | ** call. |
660 | ** |
661 | ** Whether any memory is actually freed up or not as the result of |
662 | ** this call will depends on many factors, if all goes well it will |
663 | ** be freed. If something goes wrong, the memory will likely end up |
664 | ** being freed up as part of the vcsm_exit process. In the end the |
665 | ** memory is guaranteed to be freed one way or another. |
666 | */ |
667 | void vcsm_free( unsigned int handle ) |
668 | { |
669 | int rc; |
670 | void *usr_ptr = NULL; |
671 | |
672 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (handle == 0) ) |
673 | { |
674 | vcos_log_error( "[%s]: [%d]: invalid device or handle!" , |
675 | __func__, |
676 | getpid() ); |
677 | |
678 | goto out; |
679 | } |
680 | |
681 | if (using_vc_sm_cma) |
682 | { |
683 | VCSM_PAYLOAD_ELEM_T *elem; |
684 | |
685 | elem = vcsm_payload_list_find_handle(handle); |
686 | |
687 | if (!elem) |
688 | { |
689 | vcos_log_trace( "[%s]: handle %u not tracked, or not mapped. elem %p\n" , |
690 | __func__, handle, elem); |
691 | goto out; |
692 | } |
693 | |
694 | rc = munmap( elem->mem, elem->size ); |
695 | |
696 | vcos_log_trace( "[%s]: ioctl unmap fd: %d, addr %p, size %u. rc %d" , |
697 | __func__, |
698 | elem->fd, |
699 | elem->mem, |
700 | elem->size, |
701 | rc |
702 | ); |
703 | |
704 | close(elem->fd); |
705 | |
706 | vcsm_payload_list_release(elem); |
707 | } |
708 | else |
709 | { |
710 | struct vmcs_sm_ioctl_free alloc_free; |
711 | struct vmcs_sm_ioctl_size sz; |
712 | struct vmcs_sm_ioctl_map map; |
713 | |
714 | memset( &sz, 0, sizeof(sz) ); |
715 | memset( &alloc_free, 0, sizeof(alloc_free) ); |
716 | memset( &map, 0, sizeof(map) ); |
717 | |
718 | /* Verify what we want is valid. |
719 | */ |
720 | sz.handle = handle; |
721 | |
722 | rc = ioctl( vcsm_handle, |
723 | VMCS_SM_IOCTL_SIZE_USR_HDL, |
724 | &sz ); |
725 | |
726 | vcos_log_trace( "[%s]: [%d]: ioctl size-usr-hdl %d (hdl: %x) - size %u" , |
727 | __func__, |
728 | getpid(), |
729 | rc, |
730 | sz.handle, |
731 | sz.size ); |
732 | |
733 | /* We will not be able to free up the resource! |
734 | ** |
735 | ** However, the driver will take care of it eventually once the device is |
736 | ** closed (or dies), so this is not such a dramatic event... |
737 | */ |
738 | if ( (rc < 0) || (sz.size == 0) ) |
739 | { |
740 | goto out; |
741 | } |
742 | |
743 | /* Un-map the buffer from user space, using the last known mapped |
744 | ** address valid. |
745 | */ |
746 | usr_ptr = (void *) vcsm_usr_address( sz.handle ); |
747 | if ( usr_ptr != NULL ) |
748 | { |
749 | munmap( usr_ptr, sz.size ); |
750 | |
751 | vcos_log_trace( "[%s]: [%d]: ioctl unmap hdl: %x" , |
752 | __func__, |
753 | getpid(), |
754 | sz.handle ); |
755 | } |
756 | else |
757 | { |
758 | vcos_log_trace( "[%s]: [%d]: freeing unmapped area (hdl: %x)" , |
759 | __func__, |
760 | getpid(), |
761 | map.handle ); |
762 | } |
763 | |
764 | /* Free the allocated buffer all the way through videocore. |
765 | */ |
766 | alloc_free.handle = sz.handle; |
767 | |
768 | rc = ioctl( vcsm_handle, |
769 | VMCS_SM_IOCTL_MEM_FREE, |
770 | &alloc_free ); |
771 | |
772 | vcos_log_trace( "[%s]: [%d]: ioctl mem-free %d (hdl: %x)" , |
773 | __func__, |
774 | getpid(), |
775 | rc, |
776 | alloc_free.handle ); |
777 | } |
778 | |
779 | out: |
780 | return; |
781 | } |
782 | |
783 | |
784 | /* Queries the status of the the vcsm. |
785 | ** |
786 | ** Triggers dump of various kind of information, see the |
787 | ** different variants specified in VCSM_STATUS_T. |
788 | ** |
789 | ** Pid is optional. |
790 | */ |
791 | void vcsm_status( VCSM_STATUS_T status, int pid ) |
792 | { |
793 | struct vmcs_sm_ioctl_walk walk; |
794 | |
795 | if ( vcsm_handle == VCSM_INVALID_HANDLE ) |
796 | { |
797 | vcos_log_error( "[%s]: [%d]: invalid device!" , |
798 | __func__, |
799 | getpid() ); |
800 | |
801 | return; |
802 | } |
803 | |
804 | if (using_vc_sm_cma) |
805 | { |
806 | return; |
807 | } |
808 | |
809 | memset( &walk, 0, sizeof(walk) ); |
810 | |
811 | /* Allow user to specify the pid of interest if desired, otherwise |
812 | ** assume the current one. |
813 | */ |
814 | walk.pid = (pid == VCSM_INVALID_HANDLE) ? getpid() : pid; |
815 | |
816 | switch ( status ) |
817 | { |
818 | case VCSM_STATUS_VC_WALK_ALLOC: |
819 | { |
820 | ioctl( vcsm_handle, |
821 | VMCS_SM_IOCTL_VC_WALK_ALLOC, |
822 | NULL ); |
823 | } |
824 | break; |
825 | |
826 | case VCSM_STATUS_HOST_WALK_MAP: |
827 | { |
828 | ioctl( vcsm_handle, |
829 | VMCS_SM_IOCTL_HOST_WALK_MAP, |
830 | NULL ); |
831 | } |
832 | break; |
833 | |
834 | case VCSM_STATUS_HOST_WALK_PID_MAP: |
835 | { |
836 | ioctl( vcsm_handle, |
837 | VMCS_SM_IOCTL_HOST_WALK_PID_MAP, |
838 | &walk ); |
839 | } |
840 | break; |
841 | |
842 | case VCSM_STATUS_HOST_WALK_PID_ALLOC: |
843 | { |
844 | ioctl( vcsm_handle, |
845 | VMCS_SM_IOCTL_HOST_WALK_PID_ALLOC, |
846 | &walk ); |
847 | } |
848 | break; |
849 | |
850 | case VCSM_STATUS_NONE: |
851 | default: |
852 | vcos_log_error( "[%s]: [%d]: invalid argument %d" , |
853 | __func__, |
854 | getpid(), |
855 | status ); |
856 | break; |
857 | } |
858 | } |
859 | |
860 | |
861 | /* Retrieves a videocore opaque handle from a mapped user address |
862 | ** pointer. The videocore handle will correspond to the actual |
863 | ** memory mapped in videocore. |
864 | ** |
865 | ** Returns: 0 on error |
866 | ** a non-zero opaque handle on success. |
867 | ** |
868 | ** Note: the videocore opaque handle is distinct from the user |
869 | ** opaque handle (allocated via vcsm_malloc) and it is only |
870 | ** significant for such application which knows what to do |
871 | ** with it, for the others it is just a number with little |
872 | ** use since nothing can be done with it (in particular |
873 | ** for safety reason it cannot be used to map anything). |
874 | */ |
875 | unsigned int vcsm_vc_hdl_from_ptr( void *usr_ptr ) |
876 | { |
877 | int rc; |
878 | |
879 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (usr_ptr == NULL) ) |
880 | { |
881 | vcos_log_error( "[%s]: [%d]: invalid device!" , |
882 | __func__, |
883 | getpid() ); |
884 | |
885 | return 0; |
886 | } |
887 | |
888 | if (using_vc_sm_cma) |
889 | { |
890 | VCSM_PAYLOAD_ELEM_T *elem; |
891 | |
892 | elem = vcsm_payload_list_find_mem(usr_ptr); |
893 | |
894 | if (!elem) |
895 | { |
896 | vcos_log_trace( "[%s]: addr %p not tracked, or not mapped. elem %p\n" , |
897 | __func__, usr_ptr, elem); |
898 | return 0; |
899 | } |
900 | return elem->vc_handle; |
901 | } |
902 | else |
903 | { |
904 | struct vmcs_sm_ioctl_map map; |
905 | |
906 | memset( &map, 0, sizeof(map) ); |
907 | |
908 | map.pid = getpid(); |
909 | map.addr = (uintptr_t) usr_ptr; |
910 | |
911 | rc = ioctl( vcsm_handle, |
912 | VMCS_SM_IOCTL_MAP_VC_HDL_FR_ADDR, |
913 | &map ); |
914 | |
915 | if ( rc < 0 ) |
916 | { |
917 | vcos_log_error( "[%s]: [%d]: ioctl mapped-usr-hdl FAILED [%d] (pid: %d, addr: %x)" , |
918 | __func__, |
919 | getpid(), |
920 | rc, |
921 | map.pid, |
922 | map.addr ); |
923 | |
924 | return 0; |
925 | } |
926 | else |
927 | { |
928 | vcos_log_trace( "[%s]: [%d]: ioctl mapped-usr-hdl %d (hdl: %x, addr: %x)" , |
929 | __func__, |
930 | getpid(), |
931 | rc, |
932 | map.handle, |
933 | map.addr ); |
934 | |
935 | return map.handle; |
936 | } |
937 | } |
938 | } |
939 | |
940 | |
941 | /* Retrieves a videocore opaque handle from a opaque handle |
942 | ** pointer. The videocore handle will correspond to the actual |
943 | ** memory mapped in videocore. |
944 | ** |
945 | ** Returns: 0 on error |
946 | ** a non-zero opaque handle on success. |
947 | ** |
948 | ** Note: the videocore opaque handle is distinct from the user |
949 | ** opaque handle (allocated via vcsm_malloc) and it is only |
950 | ** significant for such application which knows what to do |
951 | ** with it, for the others it is just a number with little |
952 | ** use since nothing can be done with it (in particular |
953 | ** for safety reason it cannot be used to map anything). |
954 | */ |
955 | unsigned int vcsm_vc_hdl_from_hdl( unsigned int handle ) |
956 | { |
957 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (handle == 0) ) |
958 | { |
959 | vcos_log_error( "[%s]: [%d]: invalid device or handle!" , |
960 | __func__, |
961 | getpid() ); |
962 | |
963 | return 0; |
964 | } |
965 | |
966 | if (using_vc_sm_cma) |
967 | { |
968 | VCSM_PAYLOAD_ELEM_T *elem; |
969 | |
970 | elem = vcsm_payload_list_find_handle(handle); |
971 | |
972 | if (!elem) |
973 | { |
974 | vcos_log_trace( "[%s]: handle %u not tracked, or not mapped. \n" , |
975 | __func__, handle); |
976 | return 0; |
977 | } |
978 | return elem->vc_handle; |
979 | } |
980 | else |
981 | { |
982 | int rc; |
983 | struct vmcs_sm_ioctl_map map; |
984 | |
985 | memset( &map, 0, sizeof(map) ); |
986 | |
987 | map.pid = getpid(); |
988 | map.handle = handle; |
989 | |
990 | rc = ioctl( vcsm_handle, |
991 | VMCS_SM_IOCTL_MAP_VC_HDL_FR_HDL, |
992 | &map ); |
993 | |
994 | if ( rc < 0 ) |
995 | { |
996 | vcos_log_error( "[%s]: [%d]: ioctl mapped-usr-hdl FAILED [%d] (pid: %d, hdl: %x)" , |
997 | __func__, |
998 | getpid(), |
999 | rc, |
1000 | map.pid, |
1001 | map.handle ); |
1002 | |
1003 | return 0; |
1004 | } |
1005 | else |
1006 | { |
1007 | vcos_log_trace( "[%s]: [%d]: ioctl mapped-usr-hdl %d (hdl: %x)" , |
1008 | __func__, |
1009 | getpid(), |
1010 | rc, |
1011 | map.handle ); |
1012 | |
1013 | return map.handle; |
1014 | } |
1015 | } |
1016 | } |
1017 | |
1018 | |
1019 | /* Retrieves a videocore (bus) address from a opaque handle |
1020 | ** pointer. |
1021 | ** |
1022 | ** Returns: 0 on error |
1023 | ** a non-zero videocore address on success. |
1024 | */ |
1025 | unsigned int vcsm_vc_addr_from_hdl( unsigned int handle ) |
1026 | { |
1027 | int rc; |
1028 | struct vmcs_sm_ioctl_map map; |
1029 | |
1030 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (handle == 0) ) |
1031 | { |
1032 | vcos_log_error( "[%s]: [%d]: invalid device or handle!" , |
1033 | __func__, |
1034 | getpid() ); |
1035 | |
1036 | return 0; |
1037 | } |
1038 | |
1039 | if (using_vc_sm_cma) |
1040 | { |
1041 | // The API is broken here if we're looking for 64bit support. |
1042 | // Need to return a dma_addr_t instead, and the value is platform |
1043 | // dependent. |
1044 | // Admittedly VideoCore is only 32-bit, so there could be an |
1045 | // implementation returning a VPU bus address which would fit in an |
1046 | // unsigned int. TODO. |
1047 | VCSM_PAYLOAD_ELEM_T *elem; |
1048 | |
1049 | elem = vcsm_payload_list_find_handle(handle); |
1050 | |
1051 | if (!elem) |
1052 | { |
1053 | vcos_log_trace( "[%s]: handle %u not tracked, or not mapped. \n" , |
1054 | __func__, handle); |
1055 | return 0; |
1056 | } |
1057 | return elem->dma_addr; |
1058 | } |
1059 | else |
1060 | { |
1061 | memset( &map, 0, sizeof(map) ); |
1062 | |
1063 | map.pid = getpid(); |
1064 | map.handle = handle; |
1065 | |
1066 | rc = ioctl( vcsm_handle, |
1067 | VMCS_SM_IOCTL_MAP_VC_ADDR_FR_HDL, |
1068 | &map ); |
1069 | |
1070 | if ( rc < 0 ) |
1071 | { |
1072 | vcos_log_error( "[%s]: [%d]: ioctl mapped-usr-hdl FAILED [%d] (pid: %d, hdl: %x)" , |
1073 | __func__, |
1074 | getpid(), |
1075 | rc, |
1076 | map.pid, |
1077 | map.handle ); |
1078 | |
1079 | return 0; |
1080 | } |
1081 | else |
1082 | { |
1083 | vcos_log_trace( "[%s]: [%d]: ioctl mapped-usr-hdl %d (hdl: %x)" , |
1084 | __func__, |
1085 | getpid(), |
1086 | rc, |
1087 | map.handle ); |
1088 | |
1089 | return map.addr; |
1090 | } |
1091 | } |
1092 | } |
1093 | |
1094 | |
1095 | /* Retrieves a mapped user address from an opaque user |
1096 | ** handle. |
1097 | ** |
1098 | ** Returns: 0 on error |
1099 | ** a non-zero address on success. |
1100 | ** |
1101 | ** On success, the address corresponds to the pointer |
1102 | ** which can access the data allocated via the vcsm_malloc |
1103 | ** call. |
1104 | */ |
1105 | void *vcsm_usr_address( unsigned int handle ) |
1106 | { |
1107 | int rc; |
1108 | |
1109 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (handle == 0) ) |
1110 | { |
1111 | vcos_log_error( "[%s]: [%d]: invalid device or handle!" , |
1112 | __func__, |
1113 | getpid() ); |
1114 | |
1115 | return NULL; |
1116 | } |
1117 | |
1118 | if (using_vc_sm_cma) |
1119 | { |
1120 | //No need to lock the buffer, but need to retrieve the user address |
1121 | VCSM_PAYLOAD_ELEM_T *elem; |
1122 | |
1123 | elem = vcsm_payload_list_find_handle(handle); |
1124 | |
1125 | if (!elem || !elem->mem) |
1126 | { |
1127 | vcos_log_trace( "[%s]: handle %u not tracked, or not mapped. elem %p\n" , |
1128 | __func__, handle, elem); |
1129 | return NULL; |
1130 | } |
1131 | |
1132 | return elem->mem; |
1133 | } |
1134 | else |
1135 | { |
1136 | struct vmcs_sm_ioctl_map map; |
1137 | memset( &map, 0, sizeof(map) ); |
1138 | |
1139 | map.pid = getpid(); |
1140 | map.handle = handle; |
1141 | |
1142 | rc = ioctl( vcsm_handle, |
1143 | VMCS_SM_IOCTL_MAP_USR_ADDRESS, |
1144 | &map ); |
1145 | |
1146 | if ( rc < 0 ) |
1147 | { |
1148 | vcos_log_error( "[%s]: [%d]: ioctl mapped-usr-address FAILED [%d] (pid: %d, addr: %x)" , |
1149 | __func__, |
1150 | getpid(), |
1151 | rc, |
1152 | map.pid, |
1153 | map.addr ); |
1154 | |
1155 | return NULL; |
1156 | } |
1157 | else |
1158 | { |
1159 | vcos_log_trace( "[%s]: [%d]: ioctl mapped-usr-address %d (hdl: %x, addr: %x)" , |
1160 | __func__, |
1161 | getpid(), |
1162 | rc, |
1163 | map.handle, |
1164 | map.addr ); |
1165 | |
1166 | return (void*)(uintptr_t)map.addr; |
1167 | } |
1168 | } |
1169 | } |
1170 | |
1171 | |
1172 | /* Retrieves a user opaque handle from a mapped user address |
1173 | ** pointer. |
1174 | ** |
1175 | ** Returns: 0 on error |
1176 | ** a non-zero opaque handle on success. |
1177 | */ |
1178 | unsigned int vcsm_usr_handle( void *usr_ptr ) |
1179 | { |
1180 | int rc; |
1181 | |
1182 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (usr_ptr == NULL) ) |
1183 | { |
1184 | vcos_log_error( "[%s]: [%d]: invalid device or null usr-ptr!" , |
1185 | __func__, |
1186 | getpid() ); |
1187 | |
1188 | return 0; |
1189 | } |
1190 | |
1191 | if (using_vc_sm_cma) |
1192 | { |
1193 | //No need to lock the buffer, but need to retrieve the user address |
1194 | VCSM_PAYLOAD_ELEM_T *elem; |
1195 | |
1196 | elem = vcsm_payload_list_find_mem(usr_ptr); |
1197 | |
1198 | if (!elem || !elem->mem) |
1199 | { |
1200 | vcos_log_trace( "[%s]: usr_ptr %p not tracked, or not mapped. elem %p\n" , |
1201 | __func__, usr_ptr, elem); |
1202 | return 0; |
1203 | } |
1204 | |
1205 | return elem->handle; |
1206 | } |
1207 | else |
1208 | { |
1209 | struct vmcs_sm_ioctl_map map; |
1210 | |
1211 | memset( &map, 0, sizeof(map) ); |
1212 | |
1213 | map.pid = getpid(); |
1214 | map.addr = (uintptr_t) usr_ptr; |
1215 | |
1216 | rc = ioctl( vcsm_handle, |
1217 | VMCS_SM_IOCTL_MAP_USR_HDL, |
1218 | &map ); |
1219 | |
1220 | if ( rc < 0 ) |
1221 | { |
1222 | vcos_log_error( "[%s]: [%d]: ioctl mapped-usr-hdl FAILED [%d] (pid: %d, addr: %x)" , |
1223 | __func__, |
1224 | getpid(), |
1225 | rc, |
1226 | map.pid, |
1227 | map.addr ); |
1228 | |
1229 | return 0; |
1230 | } |
1231 | else |
1232 | { |
1233 | vcos_log_trace( "[%s]: [%d]: ioctl mapped-usr-hdl %d (hdl: %x, addr: %x)" , |
1234 | __func__, |
1235 | getpid(), |
1236 | rc, |
1237 | map.handle, |
1238 | map.addr ); |
1239 | |
1240 | return map.handle; |
1241 | } |
1242 | } |
1243 | } |
1244 | |
1245 | |
1246 | /* Locks the memory associated with this opaque handle. |
1247 | ** |
1248 | ** Returns: NULL on error |
1249 | ** a valid pointer on success. |
1250 | ** |
1251 | ** A user MUST lock the handle received from vcsm_malloc |
1252 | ** in order to be able to use the memory associated with it. |
1253 | ** |
1254 | ** On success, the pointer returned is only valid within |
1255 | ** the lock content (ie until a corresponding vcsm_unlock_xx |
1256 | ** is invoked). |
1257 | */ |
1258 | void *vcsm_lock( unsigned int handle ) |
1259 | { |
1260 | int rc; |
1261 | void *usr_ptr = NULL; |
1262 | |
1263 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (handle == 0) ) |
1264 | { |
1265 | vcos_log_error( "[%s]: [%d]: invalid device or invalid handle!" , |
1266 | __func__, |
1267 | getpid() ); |
1268 | |
1269 | goto out; |
1270 | } |
1271 | |
1272 | if (using_vc_sm_cma) |
1273 | { |
1274 | //No need to lock the buffer, but need to retrieve the user address |
1275 | VCSM_PAYLOAD_ELEM_T *elem; |
1276 | |
1277 | elem = vcsm_payload_list_find_handle(handle); |
1278 | |
1279 | if (!elem || !elem->mem) |
1280 | { |
1281 | vcos_log_trace( "[%s]: handle %u not tracked, or not mapped. elem %p\n" , |
1282 | __func__, handle, elem); |
1283 | goto out; |
1284 | } |
1285 | |
1286 | usr_ptr = elem->mem; |
1287 | |
1288 | { |
1289 | struct dma_buf_sync sync; |
1290 | |
1291 | //Now sync the buffer |
1292 | sync.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_RW; |
1293 | rc = ioctl( elem->fd, |
1294 | DMA_BUF_IOCTL_SYNC, |
1295 | &sync ); |
1296 | if ( rc < 0 ) |
1297 | { |
1298 | vcos_log_trace( "[%s]: [%d]: ioctl DMA_BUF_IOCTL_SYNC failed, rc %d" , |
1299 | __func__, |
1300 | getpid(), |
1301 | rc); |
1302 | } |
1303 | } |
1304 | vcos_log_trace( "[%s]: [%d]: ioctl size-usr-hdl %d (hdl: %x) - addr %p" , |
1305 | __func__, |
1306 | getpid(), |
1307 | rc, |
1308 | handle, |
1309 | usr_ptr); |
1310 | } |
1311 | else |
1312 | { |
1313 | struct vmcs_sm_ioctl_lock_unlock lock_unlock; |
1314 | struct vmcs_sm_ioctl_size sz; |
1315 | struct vmcs_sm_ioctl_map map; |
1316 | struct vmcs_sm_ioctl_cache cache; |
1317 | |
1318 | memset( &sz, 0, sizeof(sz) ); |
1319 | memset( &lock_unlock, 0, sizeof(lock_unlock) ); |
1320 | memset( &map, 0, sizeof(map) ); |
1321 | memset( &cache, 0, sizeof(cache) ); |
1322 | |
1323 | /* Verify what we want is valid. */ |
1324 | sz.handle = handle; |
1325 | |
1326 | rc = ioctl( vcsm_handle, |
1327 | VMCS_SM_IOCTL_SIZE_USR_HDL, |
1328 | &sz ); |
1329 | |
1330 | vcos_log_trace( "[%s]: [%d]: ioctl size-usr-hdl %d (hdl: %x) - size %u" , |
1331 | __func__, |
1332 | getpid(), |
1333 | rc, |
1334 | sz.handle, |
1335 | sz.size ); |
1336 | |
1337 | /* We will not be able to lock the resource! */ |
1338 | if ( (rc < 0) || (sz.size == 0) ) |
1339 | { |
1340 | goto out; |
1341 | } |
1342 | |
1343 | /* Lock the allocated buffer all the way through videocore. */ |
1344 | lock_unlock.handle = sz.handle; |
1345 | |
1346 | rc = ioctl( vcsm_handle, |
1347 | VMCS_SM_IOCTL_MEM_LOCK, |
1348 | &lock_unlock ); |
1349 | |
1350 | vcos_log_trace( "[%s]: [%d]: ioctl mem-lock %d (hdl: %x)" , |
1351 | __func__, |
1352 | getpid(), |
1353 | rc, |
1354 | lock_unlock.handle ); |
1355 | |
1356 | /* We will not be able to lock the resource! |
1357 | */ |
1358 | if ( rc < 0 ) |
1359 | { |
1360 | goto out; |
1361 | } |
1362 | |
1363 | usr_ptr = (void *) (uintptr_t)lock_unlock.addr; |
1364 | |
1365 | /* If applicable, invalidate the cache now. |
1366 | */ |
1367 | if ( usr_ptr && sz.size ) |
1368 | { |
1369 | cache.handle = sz.handle; |
1370 | cache.addr = (uintptr_t) usr_ptr; |
1371 | cache.size = sz.size; |
1372 | |
1373 | rc = ioctl( vcsm_handle, |
1374 | VMCS_SM_IOCTL_MEM_INVALID, |
1375 | &cache ); |
1376 | |
1377 | vcos_log_trace( "[%s]: [%d]: ioctl invalidate (cache) %d (hdl: %x, addr: %x, size: %u)" , |
1378 | __func__, |
1379 | getpid(), |
1380 | rc, |
1381 | cache.handle, |
1382 | cache.addr, |
1383 | cache.size ); |
1384 | |
1385 | if ( rc < 0 ) |
1386 | { |
1387 | vcos_log_error( "[%s]: [%d]: invalidate failed (rc: %d) - [%x;%x] - size: %u (hdl: %x) - cache incoherency" , |
1388 | __func__, |
1389 | getpid(), |
1390 | rc, |
1391 | (unsigned int) cache.addr, |
1392 | (unsigned int) (cache.addr + cache.size), |
1393 | (unsigned int) (cache.addr + cache.size) - (unsigned int) cache.addr, |
1394 | cache.handle ); |
1395 | } |
1396 | } |
1397 | } |
1398 | |
1399 | out: |
1400 | return usr_ptr; |
1401 | } |
1402 | |
1403 | |
1404 | /* Locks the memory associated with this opaque handle. The lock |
1405 | ** also gives a chance to update the *host* cache behavior of the |
1406 | ** allocated buffer if so desired. The *videocore* cache behavior |
1407 | ** of the allocated buffer cannot be changed by this call and such |
1408 | ** attempt will be ignored. |
1409 | ** |
1410 | ** The system will attempt to honour the cache_update mode request, |
1411 | ** the cache_result mode will provide the final answer on which cache |
1412 | ** mode is really in use. Failing to change the cache mode will not |
1413 | ** result in a failure to lock the buffer as it is an application |
1414 | ** decision to choose what to do if (cache_result != cache_update) |
1415 | ** |
1416 | ** The value returned in cache_result can only be considered valid if |
1417 | ** the returned pointer is non NULL. The cache_result pointer may be |
1418 | ** NULL if the application does not care about the actual outcome of |
1419 | ** its action with regards to the cache behavior change. |
1420 | ** |
1421 | ** Returns: NULL on error |
1422 | ** a valid pointer on success. |
1423 | ** |
1424 | ** A user MUST lock the handle received from vcsm_malloc |
1425 | ** in order to be able to use the memory associated with it. |
1426 | ** |
1427 | ** On success, the pointer returned is only valid within |
1428 | ** the lock content (ie until a corresponding vcsm_unlock_xx |
1429 | ** is invoked). |
1430 | */ |
1431 | void *vcsm_lock_cache( unsigned int handle, |
1432 | VCSM_CACHE_TYPE_T cache_update, |
1433 | VCSM_CACHE_TYPE_T *cache_result ) |
1434 | { |
1435 | int rc; |
1436 | struct vmcs_sm_ioctl_lock_cache lock_cache; |
1437 | struct vmcs_sm_ioctl_chk chk; |
1438 | struct vmcs_sm_ioctl_map map; |
1439 | struct vmcs_sm_ioctl_cache cache; |
1440 | struct vmcs_sm_ioctl_size sz; |
1441 | void *usr_ptr = NULL; |
1442 | VCSM_CACHE_TYPE_T new_cache; |
1443 | |
1444 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (handle == 0) ) |
1445 | { |
1446 | vcos_log_error( "[%s]: [%d]: invalid device or invalid handle!" , |
1447 | __func__, |
1448 | getpid() ); |
1449 | |
1450 | goto out; |
1451 | } |
1452 | |
1453 | if (using_vc_sm_cma) |
1454 | { |
1455 | //FIXME: IMPLEMENT THIS |
1456 | vcos_log_error("[%s]: IMPLEMENT ME" , __func__); |
1457 | return NULL; |
1458 | } |
1459 | else |
1460 | { |
1461 | memset( &chk, 0, sizeof(chk) ); |
1462 | memset( &sz, 0, sizeof(sz) ); |
1463 | memset( &lock_cache, 0, sizeof(lock_cache) ); |
1464 | memset( &map, 0, sizeof(map) ); |
1465 | memset( &cache, 0, sizeof(cache) ); |
1466 | |
1467 | /* Verify what we want is valid. |
1468 | */ |
1469 | chk.handle = handle; |
1470 | |
1471 | rc = ioctl( vcsm_handle, |
1472 | VMCS_SM_IOCTL_CHK_USR_HDL, |
1473 | &chk ); |
1474 | |
1475 | vcos_log_trace( "[%s]: [%d]: ioctl chk-usr-hdl %d (hdl: %x, addr: %x, sz: %u, cache: %d)" , |
1476 | __func__, |
1477 | getpid(), |
1478 | rc, |
1479 | chk.handle, |
1480 | chk.addr, |
1481 | chk.size, |
1482 | chk.cache ); |
1483 | |
1484 | /* We will not be able to lock the resource! |
1485 | */ |
1486 | if ( rc < 0 ) |
1487 | { |
1488 | goto out; |
1489 | } |
1490 | |
1491 | /* Validate cache requirements. |
1492 | */ |
1493 | if ( cache_update != (VCSM_CACHE_TYPE_T)chk.cache ) |
1494 | { |
1495 | new_cache = vcsm_cache_table_lookup( (VCSM_CACHE_TYPE_T) chk.cache, |
1496 | cache_update ); |
1497 | vcos_log_trace( "[%s]: [%d]: cache lookup hdl: %x: [cur %d ; req %d] -> new %d " , |
1498 | __func__, |
1499 | getpid(), |
1500 | chk.handle, |
1501 | (VCSM_CACHE_TYPE_T)chk.cache, |
1502 | cache_update, |
1503 | new_cache ); |
1504 | |
1505 | if ( (enum vmcs_sm_cache_e)new_cache == chk.cache ) |
1506 | { |
1507 | /* Effectively no change. |
1508 | */ |
1509 | if ( cache_result != NULL ) |
1510 | { |
1511 | *cache_result = new_cache; |
1512 | } |
1513 | goto lock_default; |
1514 | } |
1515 | } |
1516 | else |
1517 | { |
1518 | if ( cache_result != NULL ) |
1519 | { |
1520 | *cache_result = (VCSM_CACHE_TYPE_T)chk.cache; |
1521 | } |
1522 | goto lock_default; |
1523 | } |
1524 | |
1525 | /* At this point we know we want to lock the buffer and apply a cache |
1526 | ** behavior change. Start by cleaning out whatever is already setup. |
1527 | */ |
1528 | if ( chk.addr && chk.size ) |
1529 | { |
1530 | munmap( (void *)(uintptr_t)chk.addr, chk.size ); |
1531 | |
1532 | vcos_log_trace( "[%s]: [%d]: ioctl unmap hdl: %x" , |
1533 | __func__, |
1534 | getpid(), |
1535 | chk.handle ); |
1536 | } |
1537 | |
1538 | /* Lock and apply cache behavior change to the allocated buffer all the |
1539 | ** way through videocore. |
1540 | */ |
1541 | lock_cache.handle = chk.handle; |
1542 | lock_cache.cached = (enum vmcs_sm_cache_e) new_cache; /* Convenient one to one mapping. */ |
1543 | |
1544 | rc = ioctl( vcsm_handle, |
1545 | VMCS_SM_IOCTL_MEM_LOCK_CACHE, |
1546 | &lock_cache ); |
1547 | |
1548 | vcos_log_trace( "[%s]: [%d]: ioctl mem-lock-cache %d (hdl: %x)" , |
1549 | __func__, |
1550 | getpid(), |
1551 | rc, |
1552 | lock_cache.handle ); |
1553 | |
1554 | /* We will not be able to lock the resource! |
1555 | */ |
1556 | if ( rc < 0 ) |
1557 | { |
1558 | goto out; |
1559 | } |
1560 | |
1561 | /* It is possible that this size was zero if the resource was |
1562 | ** already un-mapped when we queried it, in such case we need |
1563 | ** to figure out the size now to allow mapping to work. |
1564 | */ |
1565 | if ( chk.size == 0 ) |
1566 | { |
1567 | sz.handle = chk.handle; |
1568 | |
1569 | rc = ioctl( vcsm_handle, |
1570 | VMCS_SM_IOCTL_SIZE_USR_HDL, |
1571 | &sz ); |
1572 | |
1573 | vcos_log_trace( "[%s]: [%d]: ioctl size-usr-hdl %d (hdl: %x) - size %u" , |
1574 | __func__, |
1575 | getpid(), |
1576 | rc, |
1577 | sz.handle, |
1578 | sz.size ); |
1579 | |
1580 | /* We will not be able to map again the resource! |
1581 | */ |
1582 | if ( (rc < 0) || (sz.size == 0) ) |
1583 | { |
1584 | goto out; |
1585 | } |
1586 | } |
1587 | |
1588 | /* Map the locked buffer into user space. |
1589 | */ |
1590 | usr_ptr = mmap( 0, |
1591 | (chk.size != 0) ? chk.size : sz.size, |
1592 | PROT_READ | PROT_WRITE, |
1593 | MAP_SHARED, |
1594 | vcsm_handle, |
1595 | chk.handle ); |
1596 | |
1597 | if ( usr_ptr == NULL ) |
1598 | { |
1599 | vcos_log_error( "[%s]: [%d]: mmap FAILED (hdl: %x)" , |
1600 | __func__, |
1601 | getpid(), |
1602 | chk.handle ); |
1603 | } |
1604 | |
1605 | /* If applicable, invalidate the cache now. |
1606 | */ |
1607 | cache.size = (chk.size != 0) ? chk.size : sz.size; |
1608 | if ( usr_ptr && cache.size ) |
1609 | { |
1610 | cache.handle = chk.handle; |
1611 | cache.addr = (uintptr_t) usr_ptr; |
1612 | |
1613 | rc = ioctl( vcsm_handle, |
1614 | VMCS_SM_IOCTL_MEM_INVALID, |
1615 | &cache ); |
1616 | |
1617 | vcos_log_trace( "[%s]: [%d]: ioctl invalidate (cache) %d (hdl: %x, addr: %x, size: %u)" , |
1618 | __func__, |
1619 | getpid(), |
1620 | rc, |
1621 | cache.handle, |
1622 | cache.addr, |
1623 | cache.size ); |
1624 | |
1625 | if ( rc < 0 ) |
1626 | { |
1627 | vcos_log_error( "[%s]: [%d]: invalidate failed (rc: %d) - [%x;%x] - size: %u (hdl: %x) - cache incoherency" , |
1628 | __func__, |
1629 | getpid(), |
1630 | rc, |
1631 | (unsigned int) cache.addr, |
1632 | (unsigned int) (cache.addr + cache.size), |
1633 | (unsigned int) (cache.addr + cache.size) - (unsigned int) cache.addr, |
1634 | cache.handle ); |
1635 | } |
1636 | } |
1637 | |
1638 | /* Update the caller with the information it expects to see. |
1639 | */ |
1640 | if ( cache_result != NULL ) |
1641 | { |
1642 | *cache_result = new_cache; |
1643 | } |
1644 | |
1645 | /* Done. |
1646 | */ |
1647 | goto out; |
1648 | } |
1649 | |
1650 | |
1651 | lock_default: |
1652 | usr_ptr = vcsm_lock ( handle ); |
1653 | |
1654 | out: |
1655 | return usr_ptr; |
1656 | } |
1657 | |
1658 | |
1659 | /* Unlocks the memory associated with this user mapped address. |
1660 | ** Apply special processing that would override the otherwise |
1661 | ** default behavior. |
1662 | ** |
1663 | ** If 'cache_no_flush' is specified: |
1664 | ** Do not flush cache as the result of the unlock (if cache |
1665 | ** flush was otherwise applicable in this case). |
1666 | ** |
1667 | ** Returns: 0 on success |
1668 | ** -errno on error. |
1669 | ** |
1670 | ** After unlocking a mapped address, the user should no longer |
1671 | ** attempt to reference it. |
1672 | */ |
1673 | int vcsm_unlock_ptr_sp( void *usr_ptr, int cache_no_flush ) |
1674 | { |
1675 | int rc; |
1676 | struct vmcs_sm_ioctl_lock_unlock lock_unlock; |
1677 | struct vmcs_sm_ioctl_map map; |
1678 | struct vmcs_sm_ioctl_cache cache; |
1679 | |
1680 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (usr_ptr == NULL) ) |
1681 | { |
1682 | vcos_log_error( "[%s]: [%d]: invalid device or invalid user-ptr!" , |
1683 | __func__, |
1684 | getpid() ); |
1685 | |
1686 | rc = -EIO; |
1687 | goto out; |
1688 | } |
1689 | |
1690 | if (using_vc_sm_cma) |
1691 | { |
1692 | struct dma_buf_sync sync; |
1693 | VCSM_PAYLOAD_ELEM_T *elem; |
1694 | |
1695 | elem = vcsm_payload_list_find_mem(usr_ptr); |
1696 | |
1697 | if (!elem) |
1698 | { |
1699 | vcos_log_trace( "[%s]: addr %p not tracked, or not mapped. elem %p\n" , |
1700 | __func__, usr_ptr, elem); |
1701 | rc = -EINVAL; |
1702 | goto out; |
1703 | } |
1704 | |
1705 | if (!cache_no_flush) |
1706 | { |
1707 | sync.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_RW; |
1708 | rc = ioctl( elem->fd, |
1709 | DMA_BUF_IOCTL_SYNC, |
1710 | &sync ); |
1711 | if ( rc < 0 ) |
1712 | { |
1713 | vcos_log_trace( "[%s]: [%d]: ioctl DMA_BUF_IOCTL_SYNC failed, rc %d" , |
1714 | __func__, |
1715 | getpid(), |
1716 | rc); |
1717 | } |
1718 | } |
1719 | else |
1720 | rc = 0; |
1721 | } |
1722 | else |
1723 | { |
1724 | memset( &map, 0, sizeof(map) ); |
1725 | memset( &lock_unlock, 0, sizeof(lock_unlock) ); |
1726 | memset( &cache, 0, sizeof(cache) ); |
1727 | |
1728 | /* Retrieve the handle of the memory we want to unlock. |
1729 | */ |
1730 | map.pid = getpid(); |
1731 | map.addr = (uintptr_t) usr_ptr; |
1732 | |
1733 | rc = ioctl( vcsm_handle, |
1734 | VMCS_SM_IOCTL_MAP_USR_HDL, |
1735 | &map ); |
1736 | |
1737 | vcos_log_trace( "[%s]: [%d]: ioctl mapped-usr-hdl %d (hdl: %x, addr: %x, sz: %u)" , |
1738 | __func__, |
1739 | getpid(), |
1740 | rc, |
1741 | map.handle, |
1742 | map.addr, |
1743 | map.size ); |
1744 | |
1745 | /* We will not be able to flush/unlock the resource! |
1746 | */ |
1747 | if ( rc < 0 ) |
1748 | { |
1749 | goto out; |
1750 | } |
1751 | |
1752 | /* If applicable, flush the cache now. |
1753 | */ |
1754 | if ( !cache_no_flush && map.addr && map.size ) |
1755 | { |
1756 | cache.handle = map.handle; |
1757 | cache.addr = map.addr; |
1758 | cache.size = map.size; |
1759 | |
1760 | rc = ioctl( vcsm_handle, |
1761 | VMCS_SM_IOCTL_MEM_FLUSH, |
1762 | &cache ); |
1763 | |
1764 | vcos_log_trace( "[%s]: [%d]: ioctl flush (cache) %d (hdl: %x, addr: %x, size: %u)" , |
1765 | __func__, |
1766 | getpid(), |
1767 | rc, |
1768 | cache.handle, |
1769 | cache.addr, |
1770 | cache.size ); |
1771 | |
1772 | if ( rc < 0 ) |
1773 | { |
1774 | vcos_log_error( "[%s]: [%d]: flush failed (rc: %d) - [%x;%x] - size: %u (hdl: %x) - cache incoherency" , |
1775 | __func__, |
1776 | getpid(), |
1777 | rc, |
1778 | (unsigned int) cache.addr, |
1779 | (unsigned int) (cache.addr + cache.size), |
1780 | (unsigned int) (cache.addr + cache.size) - (unsigned int) cache.addr, |
1781 | cache.handle ); |
1782 | } |
1783 | } |
1784 | |
1785 | /* Unock the allocated buffer all the way through videocore. |
1786 | */ |
1787 | lock_unlock.handle = map.handle; /* From above ioctl. */ |
1788 | |
1789 | rc = ioctl( vcsm_handle, |
1790 | VMCS_SM_IOCTL_MEM_UNLOCK, |
1791 | &lock_unlock ); |
1792 | |
1793 | vcos_log_trace( "[%s]: [%d]: ioctl mem-unlock %d (hdl: %x)" , |
1794 | __func__, |
1795 | getpid(), |
1796 | rc, |
1797 | lock_unlock.handle ); |
1798 | } |
1799 | |
1800 | out: |
1801 | return rc; |
1802 | } |
1803 | |
1804 | |
1805 | /* Unlocks the memory associated with this user mapped address. |
1806 | ** |
1807 | ** Returns: 0 on success |
1808 | ** -errno on error. |
1809 | ** |
1810 | ** After unlocking a mapped address, the user should no longer |
1811 | ** attempt to reference it. |
1812 | */ |
1813 | int vcsm_unlock_ptr( void *usr_ptr ) |
1814 | { |
1815 | return vcsm_unlock_ptr_sp( usr_ptr, 0 ); |
1816 | } |
1817 | |
1818 | |
1819 | /* Unlocks the memory associated with this user opaque handle. |
1820 | ** Apply special processing that would override the otherwise |
1821 | ** default behavior. |
1822 | ** |
1823 | ** If 'cache_no_flush' is specified: |
1824 | ** Do not flush cache as the result of the unlock (if cache |
1825 | ** flush was otherwise applicable in this case). |
1826 | ** |
1827 | ** Returns: 0 on success |
1828 | ** -errno on error. |
1829 | ** |
1830 | ** After unlocking an opaque handle, the user should no longer |
1831 | ** attempt to reference the mapped addressed once associated |
1832 | ** with it. |
1833 | */ |
1834 | int vcsm_unlock_hdl_sp( unsigned int handle, int cache_no_flush ) |
1835 | { |
1836 | int rc; |
1837 | struct vmcs_sm_ioctl_lock_unlock lock_unlock; |
1838 | struct vmcs_sm_ioctl_chk chk; |
1839 | struct vmcs_sm_ioctl_cache cache; |
1840 | struct vmcs_sm_ioctl_map map; |
1841 | |
1842 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (handle == 0) ) |
1843 | { |
1844 | vcos_log_error( "[%s]: [%d]: invalid device or invalid handle!" , |
1845 | __func__, |
1846 | getpid() ); |
1847 | |
1848 | rc = -EIO; |
1849 | goto out; |
1850 | } |
1851 | |
1852 | if (using_vc_sm_cma) |
1853 | { |
1854 | VCSM_PAYLOAD_ELEM_T *elem; |
1855 | struct dma_buf_sync sync; |
1856 | |
1857 | elem = vcsm_payload_list_find_handle(handle); |
1858 | |
1859 | if (!elem) |
1860 | { |
1861 | vcos_log_trace( "[%s]: handle %u not tracked, or not mapped. elem %p\n" , |
1862 | __func__, handle, elem); |
1863 | rc = -EINVAL; |
1864 | goto out; |
1865 | } |
1866 | |
1867 | sync.flags = DMA_BUF_SYNC_END; |
1868 | if (!cache_no_flush) |
1869 | { |
1870 | sync.flags |= DMA_BUF_SYNC_RW; |
1871 | } |
1872 | |
1873 | rc = ioctl( elem->fd, DMA_BUF_IOCTL_SYNC, &sync ); |
1874 | if ( rc < 0 ) |
1875 | { |
1876 | vcos_log_trace( "[%s]: [%d]: ioctl DMA_BUF_IOCTL_SYNC failed, rc %d" , |
1877 | __func__, |
1878 | getpid(), |
1879 | rc); |
1880 | } |
1881 | } |
1882 | else |
1883 | { |
1884 | memset( &chk, 0, sizeof(chk) ); |
1885 | memset( &lock_unlock, 0, sizeof(lock_unlock) ); |
1886 | memset( &cache, 0, sizeof(cache) ); |
1887 | memset( &map, 0, sizeof(map) ); |
1888 | |
1889 | /* Retrieve the handle of the memory we want to lock. |
1890 | */ |
1891 | chk.handle = handle; |
1892 | |
1893 | rc = ioctl( vcsm_handle, |
1894 | VMCS_SM_IOCTL_CHK_USR_HDL, |
1895 | &chk ); |
1896 | |
1897 | vcos_log_trace( "[%s]: [%d]: ioctl chk-usr-hdl %d (hdl: %x, addr: %x, sz: %u) nf %d" , |
1898 | __func__, |
1899 | getpid(), |
1900 | rc, |
1901 | chk.handle, |
1902 | chk.addr, |
1903 | chk.size, |
1904 | cache_no_flush); |
1905 | |
1906 | /* We will not be able to flush/unlock the resource! |
1907 | */ |
1908 | if ( rc < 0 ) |
1909 | { |
1910 | goto out; |
1911 | } |
1912 | |
1913 | /* If applicable, flush the cache now. |
1914 | */ |
1915 | if ( !cache_no_flush && chk.addr && chk.size ) |
1916 | { |
1917 | cache.handle = chk.handle; |
1918 | cache.addr = chk.addr; |
1919 | cache.size = chk.size; |
1920 | |
1921 | rc = ioctl( vcsm_handle, |
1922 | VMCS_SM_IOCTL_MEM_FLUSH, |
1923 | &cache ); |
1924 | |
1925 | vcos_log_trace( "[%s]: [%d]: ioctl flush (cache) %d (hdl: %x)" , |
1926 | __func__, |
1927 | getpid(), |
1928 | rc, |
1929 | cache.handle ); |
1930 | |
1931 | if ( rc < 0 ) |
1932 | { |
1933 | vcos_log_error( "[%s]: [%d]: flush failed (rc: %d) - [%x;%x] - size: %u (hdl: %x) - cache incoherency" , |
1934 | __func__, |
1935 | getpid(), |
1936 | rc, |
1937 | (unsigned int) cache.addr, |
1938 | (unsigned int) (cache.addr + cache.size), |
1939 | (unsigned int) (cache.addr + cache.size) - (unsigned int) cache.addr, |
1940 | cache.handle ); |
1941 | } |
1942 | } |
1943 | |
1944 | /* Unlock the allocated buffer all the way through videocore. |
1945 | */ |
1946 | lock_unlock.handle = chk.handle; |
1947 | |
1948 | rc = ioctl( vcsm_handle, |
1949 | VMCS_SM_IOCTL_MEM_UNLOCK, |
1950 | &lock_unlock ); |
1951 | |
1952 | vcos_log_trace( "[%s]: [%d]: ioctl mem-unlock %d (hdl: %x)" , |
1953 | __func__, |
1954 | getpid(), |
1955 | rc, |
1956 | lock_unlock.handle ); |
1957 | } |
1958 | |
1959 | out: |
1960 | return rc; |
1961 | } |
1962 | |
1963 | |
1964 | /* Unlocks the memory associated with this user opaque handle. |
1965 | ** |
1966 | ** Returns: 0 on success |
1967 | ** -errno on error. |
1968 | ** |
1969 | ** After unlocking an opaque handle, the user should no longer |
1970 | ** attempt to reference the mapped addressed once associated |
1971 | ** with it. |
1972 | */ |
1973 | int vcsm_unlock_hdl( unsigned int handle ) |
1974 | { |
1975 | return vcsm_unlock_hdl_sp( handle, 0 ); |
1976 | } |
1977 | |
1978 | /* Resizes a block of memory allocated previously by vcsm_alloc. |
1979 | ** |
1980 | ** Returns: 0 on success |
1981 | ** -errno on error. |
1982 | ** |
1983 | ** The handle must be unlocked by user prior to attempting any |
1984 | ** resize action. |
1985 | ** |
1986 | ** On error, the original size allocated against the handle |
1987 | ** remains available the same way it would be following a |
1988 | ** successful vcsm_malloc. |
1989 | */ |
1990 | int vcsm_resize( unsigned int handle, unsigned int new_size ) |
1991 | { |
1992 | int rc; |
1993 | struct vmcs_sm_ioctl_size sz; |
1994 | struct vmcs_sm_ioctl_resize resize; |
1995 | struct vmcs_sm_ioctl_lock_unlock lock_unlock; |
1996 | struct vmcs_sm_ioctl_map map; |
1997 | unsigned int size_aligned = new_size; |
1998 | void *usr_ptr = NULL; |
1999 | |
2000 | if ( (vcsm_handle == VCSM_INVALID_HANDLE) || (handle == 0) ) |
2001 | { |
2002 | vcos_log_error( "[%s]: [%d]: invalid device or invalid handle!" , |
2003 | __func__, |
2004 | getpid() ); |
2005 | |
2006 | rc = -EIO; |
2007 | goto out; |
2008 | } |
2009 | |
2010 | if (using_vc_sm_cma) |
2011 | { |
2012 | //Not supported with CMA |
2013 | rc = -EFAULT; |
2014 | } |
2015 | else |
2016 | { |
2017 | memset( &sz, 0, sizeof(sz) ); |
2018 | memset( &resize, 0, sizeof(resize) ); |
2019 | memset( &lock_unlock, 0, sizeof(lock_unlock) ); |
2020 | memset( &map, 0, sizeof(map) ); |
2021 | |
2022 | /* Ask for page aligned. |
2023 | */ |
2024 | size_aligned = (new_size + vcsm_page_size - 1) & ~(vcsm_page_size - 1); |
2025 | |
2026 | /* Verify what we want is valid. |
2027 | */ |
2028 | sz.handle = handle; |
2029 | |
2030 | rc = ioctl( vcsm_handle, |
2031 | VMCS_SM_IOCTL_SIZE_USR_HDL, |
2032 | &sz ); |
2033 | |
2034 | vcos_log_trace( "[%s]: [%d]: ioctl size-usr-hdl %d (hdl: %x) - size %u" , |
2035 | __func__, |
2036 | getpid(), |
2037 | rc, |
2038 | sz.handle, |
2039 | sz.size ); |
2040 | |
2041 | /* We will not be able to free up the resource! |
2042 | ** |
2043 | ** However, the driver will take care of it eventually once the device is |
2044 | ** closed (or dies), so this is not such a dramatic event... |
2045 | */ |
2046 | if ( (rc < 0) || (sz.size == 0) ) |
2047 | { |
2048 | goto out; |
2049 | } |
2050 | |
2051 | /* We first need to unmap the resource |
2052 | */ |
2053 | usr_ptr = (void *) vcsm_usr_address( sz.handle ); |
2054 | if ( usr_ptr != NULL ) |
2055 | { |
2056 | munmap( usr_ptr, sz.size ); |
2057 | |
2058 | vcos_log_trace( "[%s]: [%d]: ioctl unmap hdl: %x" , |
2059 | __func__, |
2060 | getpid(), |
2061 | sz.handle ); |
2062 | } |
2063 | else |
2064 | { |
2065 | vcos_log_trace( "[%s]: [%d]: freeing unmapped area (hdl: %x)" , |
2066 | __func__, |
2067 | getpid(), |
2068 | map.handle ); |
2069 | } |
2070 | |
2071 | /* Resize the allocated buffer all the way through videocore. |
2072 | */ |
2073 | resize.handle = sz.handle; |
2074 | resize.new_size = size_aligned; |
2075 | |
2076 | rc = ioctl( vcsm_handle, |
2077 | VMCS_SM_IOCTL_MEM_RESIZE, |
2078 | &resize ); |
2079 | |
2080 | vcos_log_trace( "[%s]: [%d]: ioctl resize %d (hdl: %x)" , |
2081 | __func__, |
2082 | getpid(), |
2083 | rc, |
2084 | resize.handle ); |
2085 | |
2086 | /* Although resized, the resource will not be usable. |
2087 | */ |
2088 | if ( rc < 0 ) |
2089 | { |
2090 | goto out; |
2091 | } |
2092 | |
2093 | /* Remap the resource |
2094 | */ |
2095 | if ( mmap( 0, |
2096 | resize.new_size, |
2097 | PROT_READ | PROT_WRITE, |
2098 | MAP_SHARED, |
2099 | vcsm_handle, |
2100 | resize.handle ) == NULL ) |
2101 | { |
2102 | vcos_log_error( "[%s]: [%d]: mmap FAILED (hdl: %x)" , |
2103 | __func__, |
2104 | getpid(), |
2105 | resize.handle ); |
2106 | |
2107 | /* At this point, it is not yet a problem that we failed to |
2108 | ** map the buffer because it will not be used right away. |
2109 | ** |
2110 | ** Possibly the mapping may work the next time the user tries |
2111 | ** to lock the buffer for usage, and if it still fails, it will |
2112 | ** be up to the user to deal with it. |
2113 | */ |
2114 | } |
2115 | } |
2116 | |
2117 | out: |
2118 | return rc; |
2119 | } |
2120 | |
2121 | |
2122 | /* Flush or invalidate the memory associated with this user opaque handle |
2123 | ** |
2124 | ** Returns: non-zero on error |
2125 | ** |
2126 | ** structure contains a list of flush/invalidate commands |
2127 | ** See header file |
2128 | */ |
2129 | int vcsm_clean_invalid( struct vcsm_user_clean_invalid_s *s ) |
2130 | { |
2131 | int rc = 0; |
2132 | struct vmcs_sm_ioctl_clean_invalid cache; |
2133 | |
2134 | if ( vcsm_handle == VCSM_INVALID_HANDLE ) |
2135 | { |
2136 | vcos_log_error( "[%s]: [%d]: invalid device or invalid handle!" , |
2137 | __func__, |
2138 | getpid() ); |
2139 | |
2140 | return -1; |
2141 | } |
2142 | |
2143 | memcpy( &cache, s, sizeof cache ); |
2144 | |
2145 | if (using_vc_sm_cma) |
2146 | { |
2147 | // Deprecated. The API is not compatible with 64-bit support (addr is an |
2148 | // unsigned int), therefore please update to vcsm_clean_invalid2. |
2149 | rc = -1; |
2150 | } |
2151 | else |
2152 | { |
2153 | rc = ioctl( vcsm_handle, |
2154 | VMCS_SM_IOCTL_MEM_CLEAN_INVALID, |
2155 | &cache ); |
2156 | } |
2157 | |
2158 | return rc; |
2159 | } |
2160 | |
2161 | /* Flush or invalidate the memory associated with this user opaque handle |
2162 | ** |
2163 | ** Returns: non-zero on error |
2164 | ** |
2165 | ** structure contains a list of flush/invalidate commands |
2166 | ** See header file |
2167 | */ |
2168 | int vcsm_clean_invalid2( struct vcsm_user_clean_invalid2_s *s ) |
2169 | { |
2170 | int rc = 0; |
2171 | |
2172 | if ( vcsm_handle == VCSM_INVALID_HANDLE ) |
2173 | { |
2174 | vcos_log_error( "[%s]: [%d]: invalid device or invalid handle!" , |
2175 | __func__, |
2176 | getpid() ); |
2177 | |
2178 | return -1; |
2179 | } |
2180 | |
2181 | if (using_vc_sm_cma) |
2182 | { |
2183 | struct vc_sm_cma_ioctl_clean_invalid2 *vcsm_s; |
2184 | int i; |
2185 | |
2186 | vcsm_s = (struct vc_sm_cma_ioctl_clean_invalid2 *)malloc(sizeof(*vcsm_s) + sizeof(struct vc_sm_cma_ioctl_clean_invalid_block)*s->op_count); |
2187 | if (!vcsm_s) |
2188 | return -1; |
2189 | |
2190 | vcsm_s->op_count = s->op_count; |
2191 | for (i = 0; i < vcsm_s->op_count; i++) |
2192 | { |
2193 | vcsm_s->s[i].invalidate_mode = s->s[i].invalidate_mode; |
2194 | vcsm_s->s[i].block_count = s->s[i].block_count; |
2195 | vcsm_s->s[i].start_address = s->s[i].start_address; |
2196 | vcsm_s->s[i].block_size = s->s[i].block_size; |
2197 | vcsm_s->s[i].inter_block_stride = s->s[i].inter_block_stride; |
2198 | } |
2199 | |
2200 | rc = ioctl( vcsm_handle, |
2201 | VC_SM_CMA_IOCTL_MEM_CLEAN_INVALID2, |
2202 | vcsm_s ); |
2203 | free(vcsm_s); |
2204 | } |
2205 | else |
2206 | { |
2207 | rc = ioctl( vcsm_handle, |
2208 | VMCS_SM_IOCTL_MEM_CLEAN_INVALID2, |
2209 | s ); |
2210 | } |
2211 | |
2212 | return rc; |
2213 | } |
2214 | |
2215 | /* Imports a dmabuf, and binds it to a VCSM handle and MEM_HANDLE_T |
2216 | ** |
2217 | ** Returns: 0 on error |
2218 | ** a non-zero opaque handle on success. |
2219 | ** |
2220 | ** On success, the user must invoke vcsm_lock with the returned opaque |
2221 | ** handle to gain access to the memory associated with the opaque handle. |
2222 | ** When finished using the memory, the user calls vcsm_unlock_xx (see those |
2223 | ** function definition for more details on the one that can be used). |
2224 | ** Use vcsm_release to detach from the dmabuf (VideoCore may still hold |
2225 | ** a reference to the buffer until it has finished with the buffer). |
2226 | ** |
2227 | */ |
2228 | unsigned int vcsm_import_dmabuf( int dmabuf, const char *name ) |
2229 | { |
2230 | int rc; |
2231 | unsigned int handle = 0; |
2232 | |
2233 | if ( vcsm_handle == VCSM_INVALID_HANDLE ) |
2234 | { |
2235 | vcos_log_error( "[%s]: [%d]: invalid device or invalid handle!" , |
2236 | __func__, |
2237 | getpid() ); |
2238 | |
2239 | return -1; |
2240 | } |
2241 | |
2242 | if (using_vc_sm_cma) |
2243 | { |
2244 | VCSM_PAYLOAD_ELEM_T *payload; |
2245 | struct vc_sm_cma_ioctl_import_dmabuf import; |
2246 | memset( &import, 0, sizeof(import) ); |
2247 | |
2248 | /* Map the buffer on videocore via the VCSM (Videocore Shared Memory) interface. */ |
2249 | import.dmabuf_fd = dmabuf; |
2250 | import.cached = VMCS_SM_CACHE_NONE; //Support no caching for now - makes it easier for cache management |
2251 | if ( name != NULL ) |
2252 | { |
2253 | memcpy ( import.name, name, 32 ); |
2254 | } |
2255 | rc = ioctl( vcsm_handle, |
2256 | VC_SM_CMA_IOCTL_MEM_IMPORT_DMABUF, |
2257 | &import ); |
2258 | |
2259 | if ( rc < 0 || import.handle < 0 ) |
2260 | { |
2261 | vcos_log_error( "[%s]: [%d] [%s]: ioctl mem-import-dmabuf FAILED [%d] (hdl: %x)" , |
2262 | __func__, |
2263 | getpid(), |
2264 | import.name, |
2265 | rc, |
2266 | import.handle ); |
2267 | } |
2268 | else |
2269 | { |
2270 | /* Map the buffer into user space. |
2271 | */ |
2272 | vcos_log_trace( "[%s]: mapping fd %d, imported from fd %d\n" , __func__, import.handle, dmabuf); |
2273 | void *usr_ptr = mmap( 0, |
2274 | import.size, |
2275 | PROT_READ | PROT_WRITE, |
2276 | MAP_SHARED, |
2277 | import.handle, |
2278 | 0 ); |
2279 | |
2280 | if ( usr_ptr == MAP_FAILED ) |
2281 | { |
2282 | vcos_log_error( "[%s]: [%d]: mmap FAILED (hdl: %x), size %u" , |
2283 | __func__, |
2284 | getpid(), |
2285 | import.handle, import.size ); |
2286 | vcsm_free( import.handle ); |
2287 | return 0; |
2288 | } |
2289 | |
2290 | |
2291 | vcos_log_trace( "[%s]: mmap to %p" , |
2292 | __func__, |
2293 | usr_ptr |
2294 | ); |
2295 | |
2296 | //vc-sm-cma now hands out file handles (signed int), whilst libvcsm is |
2297 | //handling unsigned int handles. Already checked the handle >=0, so |
2298 | //add one to make it a usable handle. |
2299 | handle = import.handle + 1; |
2300 | |
2301 | |
2302 | vcos_log_trace( "[%s]: [%d] [%s]: ioctl mem-import-dmabuf %d (dmabuf %d imported as hdl: %x)" , |
2303 | __func__, |
2304 | getpid(), |
2305 | import.name, |
2306 | rc, |
2307 | dmabuf, |
2308 | import.handle ); |
2309 | |
2310 | payload = vcsm_payload_list_get(); |
2311 | payload->handle = handle; |
2312 | payload->fd = import.handle; |
2313 | payload->vc_handle = import.vc_handle; |
2314 | payload->mem = usr_ptr; |
2315 | payload->size = import.size; |
2316 | if (import.dma_addr & 0xFFFFFFFF00000000ULL) |
2317 | { |
2318 | vcos_log_error("[%s]: dma address returned > 32bit 0x%llx" , __func__, import.dma_addr); |
2319 | payload->dma_addr = 0; |
2320 | } |
2321 | else |
2322 | payload->dma_addr = (uint32_t)import.dma_addr; |
2323 | } |
2324 | } |
2325 | else |
2326 | { |
2327 | struct vmcs_sm_ioctl_import_dmabuf import; |
2328 | memset( &import, 0, sizeof(import) ); |
2329 | |
2330 | /* Map the buffer on videocore via the VCSM (Videocore Shared Memory) interface. */ |
2331 | import.dmabuf_fd = dmabuf; |
2332 | import.cached = VMCS_SM_CACHE_NONE; //Support no caching for now - makes it easier for cache management |
2333 | if ( name != NULL ) |
2334 | { |
2335 | memcpy ( import.name, name, 32 ); |
2336 | } |
2337 | rc = ioctl( vcsm_handle, |
2338 | VMCS_SM_IOCTL_MEM_IMPORT_DMABUF, |
2339 | &import ); |
2340 | |
2341 | if ( rc < 0 || import.handle == 0 ) |
2342 | { |
2343 | vcos_log_error( "[%s]: [%d] [%s]: ioctl mem-import-dmabuf FAILED [%d] (hdl: %x)" , |
2344 | __func__, |
2345 | getpid(), |
2346 | import.name, |
2347 | rc, |
2348 | import.handle ); |
2349 | } |
2350 | else |
2351 | { |
2352 | handle = import.handle; |
2353 | } |
2354 | |
2355 | vcos_log_trace( "[%s]: [%d] [%s]: ioctl mem-import-dmabuf hdl %d rc %d (vcsm hdl: %x)" , |
2356 | __func__, |
2357 | getpid(), |
2358 | import.name, |
2359 | dmabuf, |
2360 | rc, |
2361 | import.handle ); |
2362 | } |
2363 | |
2364 | return handle; |
2365 | } |
2366 | |
2367 | /* Exports a vcsm handle as a dmabuf. |
2368 | ** |
2369 | ** Returns: <0 on error |
2370 | ** a file descriptor to the dmabuf on success. |
2371 | ** |
2372 | ** The allocation will persist until the file descriptor is closed, |
2373 | ** even if the vcsm handle is released. |
2374 | ** |
2375 | */ |
2376 | int vcsm_export_dmabuf( unsigned int vcsm_handle ) |
2377 | { |
2378 | int handle = -1; |
2379 | |
2380 | if (using_vc_sm_cma) |
2381 | { |
2382 | VCSM_PAYLOAD_ELEM_T *elem; |
2383 | |
2384 | elem = vcsm_payload_list_find_handle(vcsm_handle); |
2385 | |
2386 | if (!elem) |
2387 | { |
2388 | vcos_log_trace( "[%s]: handle %u not tracked, or not mapped. elem %p\n" , |
2389 | __func__, handle, elem); |
2390 | return -1; |
2391 | } |
2392 | |
2393 | //Duplicate the existing fd. |
2394 | handle = dup(elem->fd); |
2395 | } |
2396 | else |
2397 | { |
2398 | //Not supported on the old vcsm kernel driver. |
2399 | handle = -1; |
2400 | } |
2401 | return handle; |
2402 | } |
2403 | |