1 | /* |
2 | Copyright (c) 2012, Broadcom Europe Ltd |
3 | All rights reserved. |
4 | |
5 | Redistribution and use in source and binary forms, with or without |
6 | modification, are permitted provided that the following conditions are met: |
7 | * Redistributions of source code must retain the above copyright |
8 | notice, this list of conditions and the following disclaimer. |
9 | * Redistributions in binary form must reproduce the above copyright |
10 | notice, this list of conditions and the following disclaimer in the |
11 | documentation and/or other materials provided with the distribution. |
12 | * Neither the name of the copyright holder nor the |
13 | names of its contributors may be used to endorse or promote products |
14 | derived from this software without specific prior written permission. |
15 | |
16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND |
17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY |
20 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
23 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | */ |
27 | |
28 | #ifndef __USER_VCSM__H__INCLUDED__ |
29 | #define __USER_VCSM__H__INCLUDED__ |
30 | |
31 | /* VideoCore Shared Memory - user interface library. |
32 | ** |
33 | ** This library provides all the necessary abstraction for any application to |
34 | ** make use of the shared memory service which is distributed across a kernel |
35 | ** driver and a videocore service. |
36 | ** |
37 | ** It is an application design decision to choose or not to use this service. |
38 | ** |
39 | ** The logical flow of operations that a user application needs to follow when |
40 | ** using this service is: |
41 | ** |
42 | ** 1) Initialize the service. |
43 | ** 2) Allocate shared memory blocks. |
44 | ** 3) Start using the allocated blocks. |
45 | ** - In order to gain ownership on a block, lock the allocated block, |
46 | ** locking a block returns a valid address that the user application |
47 | ** can access. |
48 | ** - When finished with using the block for the current execution cycle |
49 | ** or function, and so when giving up the ownership, unlock the block. |
50 | ** 4) A block can be locked/unlocked as many times required - within or outside |
51 | ** of - a specific execution context. |
52 | ** 5) To completely release an allocated block, free it. |
53 | ** 6) If the service is no longer required, terminate it. |
54 | ** |
55 | ** |
56 | ** Some generic considerations: |
57 | |
58 | ** Allocating memory blocks. |
59 | ** |
60 | ** Memory blocks can be allocated in different manners depending on the cache |
61 | ** behavior desired. A given block can either be: |
62 | |
63 | ** - Allocated in a non cached fashion all the way through host and videocore. |
64 | ** - Allocated in a cached fashion on host OR videocore. |
65 | ** - Allocated in a cached fashion on host AND videocore. |
66 | ** |
67 | ** It is an application decision to determine how to allocate a block. Evidently |
68 | ** if the application will be doing substantial read/write accesses to a given block, |
69 | ** it is recommended to allocate the block at least in a 'host cached' fashion for |
70 | ** better results. |
71 | ** |
72 | ** |
73 | ** Locking memory blocks. |
74 | ** |
75 | ** When the memory block has been allocated in a host cached fashion, locking the |
76 | ** memory block (and so taking ownership of it) will trigger a cache invalidation. |
77 | ** |
78 | ** For the above reason and when using host cached allocation, it is important that |
79 | ** an application properly implements the lock/unlock mechanism to ensure cache will |
80 | ** stay coherent, otherwise there is no guarantee it will at all be. |
81 | ** |
82 | ** It is possible to dynamically change the host cache behavior (ie cached or non |
83 | ** cached) of a given allocation without needing to free and re-allocate the block. |
84 | ** This feature can be useful for such application which requires access to the block |
85 | ** only at certain times and not otherwise. By changing the cache behavior dynamically |
86 | ** the application can optimize performances for a given duration of use. |
87 | ** Such dynamic cache behavior remapping only applies to host cache and not videocore |
88 | ** cache. If one requires to change the videocore cache behavior, then a new block |
89 | ** must be created to replace the old one. |
90 | ** |
91 | ** On successful locking, a valid pointer is returned that the application can use |
92 | ** to access to data inside the block. There is no guarantee that the pointer will |
93 | ** stay valid following the unlock action corresponding to this lock. |
94 | ** |
95 | ** |
96 | ** Unocking memory blocks. |
97 | ** |
98 | ** When the memory block has been allocated in a host cached fashion, unlocking the |
99 | ** memory block (and so forgiving its ownership) will trigger a cache flush unless |
100 | ** explicitely asked not to flush the cache for performances reasons. |
101 | ** |
102 | ** For the above reason and when using host cached allocation, it is important that |
103 | ** an application properly implements the lock/unlock mechanism to ensure cache will |
104 | ** stay coherent, otherwise there is no guarantee it will at all be. |
105 | ** |
106 | ** |
107 | ** A complete API is defined below. |
108 | */ |
109 | |
110 | #ifdef __cplusplus |
111 | extern "C" |
112 | { |
113 | #endif |
114 | |
115 | /* Different status that can be dumped. |
116 | */ |
117 | typedef enum |
118 | { |
119 | VCSM_STATUS_VC_WALK_ALLOC = 0, // Walks *all* the allocation on videocore. |
120 | // Result of the walk is seen in the videocore |
121 | // log. |
122 | VCSM_STATUS_HOST_WALK_MAP, // Walks the *full* mapping allocation on host |
123 | // driver (ie for all processes). Result of |
124 | // the walk is seen in the kernel log. |
125 | VCSM_STATUS_HOST_WALK_PID_MAP, // Walks the per process mapping allocation on host |
126 | // driver (for current process). Result of |
127 | // the walk is seen in the kernel log. |
128 | VCSM_STATUS_HOST_WALK_PID_ALLOC, // Walks the per process host allocation on host |
129 | // driver (for current process). Result of |
130 | // the walk is seen in the kernel log. |
131 | VCSM_STATUS_VC_MAP_ALL, // Equivalent to both VCSM_STATUS_VC_WALK_ALLOC and |
132 | // VCSM_STATUS_HOST_WALK_MAP. |
133 | // |
134 | VCSM_STATUS_NONE, // Must be last - invalid. |
135 | |
136 | } VCSM_STATUS_T; |
137 | |
138 | /* Different kind of cache behavior. |
139 | */ |
140 | typedef enum |
141 | { |
142 | VCSM_CACHE_TYPE_NONE = 0, // No caching applies. |
143 | VCSM_CACHE_TYPE_HOST, // Allocation is cached on host (user space). |
144 | VCSM_CACHE_TYPE_VC, // Allocation is cached on videocore. |
145 | VCSM_CACHE_TYPE_HOST_AND_VC, // Allocation is cached on both host and videocore. |
146 | |
147 | } VCSM_CACHE_TYPE_T; |
148 | |
149 | /* Initialize the vcsm processing with option to use vc-sm-cma which supports |
150 | ** dmabuf export, and passing in an external fd to /dev/vcsm-cma or /dev/vcsm. |
151 | ** |
152 | ** Must be called once before attempting to do anything else. |
153 | ** |
154 | ** Returns 0 on success, -1 on error. |
155 | */ |
156 | int vcsm_init_ex( int want_export, int fd ); |
157 | |
158 | /* Initialize the vcsm processing. |
159 | ** |
160 | ** Must be called once before attempting to do anything else. |
161 | ** |
162 | ** Returns 0 on success, -1 on error. |
163 | */ |
164 | int vcsm_init( void ); |
165 | |
166 | /* Terminates the vcsm processing. |
167 | ** |
168 | ** Must be called vcsm services are no longer needed, it will |
169 | ** take care of removing any allocation under the current process |
170 | ** control if deemed necessary. |
171 | */ |
172 | void vcsm_exit( void ); |
173 | |
174 | |
175 | /* Queries the status of the the vcsm. |
176 | ** |
177 | ** Triggers dump of various kind of information, see the |
178 | ** different variants specified in VCSM_STATUS_T. |
179 | ** |
180 | ** Pid is optional. |
181 | */ |
182 | void vcsm_status( VCSM_STATUS_T status, int pid ); |
183 | |
184 | |
185 | /* Allocates a non-cached block of memory of size 'size' via the vcsm memory |
186 | ** allocator. |
187 | ** |
188 | ** Returns: 0 on error |
189 | ** a non-zero opaque handle on success. |
190 | ** |
191 | ** On success, the user must invoke vcsm_lock with the returned opaque |
192 | ** handle to gain access to the memory associated with the opaque handle. |
193 | ** When finished using the memory, the user calls vcsm_unlock_xx (see those |
194 | ** function definition for more details on the one that can be used). |
195 | ** |
196 | ** A well behaved application should make every attempt to lock/unlock |
197 | ** only for the duration it needs to access the memory data associated with |
198 | ** the opaque handle. |
199 | */ |
200 | unsigned int vcsm_malloc( unsigned int size, const char *name ); |
201 | |
202 | |
203 | /* Allocates a cached block of memory of size 'size' via the vcsm memory |
204 | ** allocator, the type of caching requested is passed as argument of the |
205 | ** function call. |
206 | ** |
207 | ** Returns: 0 on error |
208 | ** a non-zero opaque handle on success. |
209 | ** |
210 | ** On success, the user must invoke vcsm_lock with the returned opaque |
211 | ** handle to gain access to the memory associated with the opaque handle. |
212 | ** When finished using the memory, the user calls vcsm_unlock_xx (see those |
213 | ** function definition for more details on the one that can be used). |
214 | ** |
215 | ** A well behaved application should make every attempt to lock/unlock |
216 | ** only for the duration it needs to access the memory data associated with |
217 | ** the opaque handle. |
218 | */ |
219 | unsigned int vcsm_malloc_cache( unsigned int size, VCSM_CACHE_TYPE_T cache, const char *name ); |
220 | |
221 | |
222 | /* Shares an allocated block of memory via the vcsm memory allocator. |
223 | ** |
224 | ** Returns: 0 on error |
225 | ** a non-zero opaque handle on success. |
226 | ** |
227 | ** On success, the user must invoke vcsm_lock with the returned opaque |
228 | ** handle to gain access to the memory associated with the opaque handle. |
229 | ** When finished using the memory, the user calls vcsm_unlock_xx (see those |
230 | ** function definition for more details on the one that can be used). |
231 | ** |
232 | ** A well behaved application should make every attempt to lock/unlock |
233 | ** only for the duration it needs to access the memory data associated with |
234 | ** the opaque handle. |
235 | */ |
236 | unsigned int vcsm_malloc_share( unsigned int handle ); |
237 | |
238 | |
239 | /* Resizes a block of memory allocated previously by vcsm_alloc. |
240 | ** |
241 | ** Returns: 0 on success |
242 | ** -errno on error. |
243 | ** |
244 | ** The handle must be unlocked by user prior to attempting any |
245 | ** resize action. |
246 | ** |
247 | ** On error, the original size allocated against the handle |
248 | ** remains available the same way it would be following a |
249 | ** successful vcsm_malloc. |
250 | */ |
251 | int vcsm_resize( unsigned int handle, unsigned int new_size ); |
252 | |
253 | |
254 | /* Frees a block of memory that was successfully allocated by |
255 | ** a prior call the vcms_alloc. |
256 | ** |
257 | ** The handle should be considered invalid upon return from this |
258 | ** call. |
259 | ** |
260 | ** Whether any memory is actually freed up or not as the result of |
261 | ** this call will depends on many factors, if all goes well it will |
262 | ** be freed. If something goes wrong, the memory will likely end up |
263 | ** being freed up as part of the vcsm_exit process. In the end the |
264 | ** memory is guaranteed to be freed one way or another. |
265 | */ |
266 | void vcsm_free( unsigned int handle ); |
267 | |
268 | |
269 | /* Retrieves a videocore opaque handle from a mapped user address |
270 | ** pointer. The videocore handle will correspond to the actual |
271 | ** memory mapped in videocore. |
272 | ** |
273 | ** Returns: 0 on error |
274 | ** a non-zero opaque handle on success. |
275 | ** |
276 | ** Note: the videocore opaque handle is distinct from the user |
277 | ** opaque handle (allocated via vcsm_malloc) and it is only |
278 | ** significant for such application which knows what to do |
279 | ** with it, for the others it is just a number with little |
280 | ** use since nothing can be done with it (in particular |
281 | ** for safety reason it cannot be used to map anything). |
282 | */ |
283 | unsigned int vcsm_vc_hdl_from_ptr( void *usr_ptr ); |
284 | |
285 | |
286 | /* Retrieves a videocore opaque handle from a opaque handle |
287 | ** pointer. The videocore handle will correspond to the actual |
288 | ** memory mapped in videocore. |
289 | ** |
290 | ** Returns: 0 on error |
291 | ** a non-zero opaque handle on success. |
292 | ** |
293 | ** Note: the videocore opaque handle is distinct from the user |
294 | ** opaque handle (allocated via vcsm_malloc) and it is only |
295 | ** significant for such application which knows what to do |
296 | ** with it, for the others it is just a number with little |
297 | ** use since nothing can be done with it (in particular |
298 | ** for safety reason it cannot be used to map anything). |
299 | */ |
300 | unsigned int vcsm_vc_hdl_from_hdl( unsigned int handle ); |
301 | |
302 | |
303 | /* Retrieves a videocore (bus) address from a opaque handle |
304 | ** pointer. |
305 | ** |
306 | ** Returns: 0 on error |
307 | ** a non-zero videocore address on success. |
308 | */ |
309 | unsigned int vcsm_vc_addr_from_hdl( unsigned int handle ); |
310 | |
311 | |
312 | /* Retrieves a user opaque handle from a mapped user address |
313 | ** pointer. |
314 | ** |
315 | ** Returns: 0 on error |
316 | ** a non-zero opaque handle on success. |
317 | */ |
318 | unsigned int vcsm_usr_handle( void *usr_ptr ); |
319 | |
320 | |
321 | /* Retrieves a mapped user address from an opaque user |
322 | ** handle. |
323 | ** |
324 | ** Returns: 0 on error |
325 | ** a non-zero address on success. |
326 | ** |
327 | ** On success, the address corresponds to the pointer |
328 | ** which can access the data allocated via the vcsm_malloc |
329 | ** call. |
330 | */ |
331 | void *vcsm_usr_address( unsigned int handle ); |
332 | |
333 | |
334 | /* Locks the memory associated with this opaque handle. |
335 | ** |
336 | ** Returns: NULL on error |
337 | ** a valid pointer on success. |
338 | ** |
339 | ** A user MUST lock the handle received from vcsm_malloc |
340 | ** in order to be able to use the memory associated with it. |
341 | ** |
342 | ** On success, the pointer returned is only valid within |
343 | ** the lock content (ie until a corresponding vcsm_unlock_xx |
344 | ** is invoked). |
345 | */ |
346 | void *vcsm_lock( unsigned int handle ); |
347 | |
348 | |
349 | /* Locks the memory associated with this opaque handle. The lock |
350 | ** also gives a chance to update the *host* cache behavior of the |
351 | ** allocated buffer if so desired. The *videocore* cache behavior |
352 | ** of the allocated buffer cannot be changed by this call and such |
353 | ** attempt will be ignored. |
354 | ** |
355 | ** The system will attempt to honour the cache_update mode request, |
356 | ** the cache_result mode will provide the final answer on which cache |
357 | ** mode is really in use. Failing to change the cache mode will not |
358 | ** result in a failure to lock the buffer as it is an application |
359 | ** decision to choose what to do if (cache_result != cache_update) |
360 | ** |
361 | ** The value returned in cache_result can only be considered valid if |
362 | ** the returned pointer is non NULL. The cache_result pointer may be |
363 | ** NULL if the application does not care about the actual outcome of |
364 | ** its action with regards to the cache behavior change. |
365 | ** |
366 | ** Returns: NULL on error |
367 | ** a valid pointer on success. |
368 | ** |
369 | ** A user MUST lock the handle received from vcsm_malloc |
370 | ** in order to be able to use the memory associated with it. |
371 | ** |
372 | ** On success, the pointer returned is only valid within |
373 | ** the lock content (ie until a corresponding vcsm_unlock_xx |
374 | ** is invoked). |
375 | */ |
376 | void *vcsm_lock_cache( unsigned int handle, |
377 | VCSM_CACHE_TYPE_T cache_update, |
378 | VCSM_CACHE_TYPE_T *cache_result ); |
379 | |
380 | |
381 | /* Unlocks the memory associated with this user mapped address. |
382 | ** |
383 | ** Returns: 0 on success |
384 | ** -errno on error. |
385 | ** |
386 | ** After unlocking a mapped address, the user should no longer |
387 | ** attempt to reference it. |
388 | */ |
389 | int vcsm_unlock_ptr( void *usr_ptr ); |
390 | |
391 | |
392 | /* Unlocks the memory associated with this user mapped address. |
393 | ** Apply special processing that would override the otherwise |
394 | ** default behavior. |
395 | ** |
396 | ** If 'cache_no_flush' is specified: |
397 | ** Do not flush cache as the result of the unlock (if cache |
398 | ** flush was otherwise applicable in this case). |
399 | ** |
400 | ** Returns: 0 on success |
401 | ** -errno on error. |
402 | ** |
403 | ** After unlocking a mapped address, the user should no longer |
404 | ** attempt to reference it. |
405 | */ |
406 | int vcsm_unlock_ptr_sp( void *usr_ptr, int cache_no_flush ); |
407 | |
408 | |
409 | /* Unlocks the memory associated with this user opaque handle. |
410 | ** |
411 | ** Returns: 0 on success |
412 | ** -errno on error. |
413 | ** |
414 | ** After unlocking an opaque handle, the user should no longer |
415 | ** attempt to reference the mapped addressed once associated |
416 | ** with it. |
417 | */ |
418 | int vcsm_unlock_hdl( unsigned int handle ); |
419 | |
420 | |
421 | /* Unlocks the memory associated with this user opaque handle. |
422 | ** Apply special processing that would override the otherwise |
423 | ** default behavior. |
424 | ** |
425 | ** If 'cache_no_flush' is specified: |
426 | ** Do not flush cache as the result of the unlock (if cache |
427 | ** flush was otherwise applicable in this case). |
428 | ** |
429 | ** Returns: 0 on success |
430 | ** -errno on error. |
431 | ** |
432 | ** After unlocking an opaque handle, the user should no longer |
433 | ** attempt to reference the mapped addressed once associated |
434 | ** with it. |
435 | */ |
436 | int vcsm_unlock_hdl_sp( unsigned int handle, int cache_no_flush ); |
437 | |
438 | /* Clean and/or invalidate the memory associated with this user opaque handle |
439 | ** |
440 | ** Returns: non-zero on error |
441 | ** |
442 | ** structure contains a list of flush/invalidate commands. Commands are: |
443 | ** 0: nop |
444 | ** 1: invalidate given virtual range in L1/L2 |
445 | ** 2: clean given virtual range in L1/L2 |
446 | ** 3: clean+invalidate given virtual range in L1/L2 |
447 | */ |
448 | #define VCSM_MAX_CLEAN_INVALIDATE_ENTRIES 8 |
449 | struct vcsm_user_clean_invalid_s { |
450 | struct { |
451 | unsigned int cmd; |
452 | unsigned int handle; |
453 | unsigned int addr; |
454 | unsigned int size; |
455 | } s[VCSM_MAX_CLEAN_INVALIDATE_ENTRIES]; |
456 | }; |
457 | |
458 | int vcsm_clean_invalid( struct vcsm_user_clean_invalid_s *s ); |
459 | |
460 | struct vcsm_user_clean_invalid2_s { |
461 | unsigned char op_count; |
462 | unsigned char zero[3]; |
463 | struct vcsm_user_clean_invalid2_block_s { |
464 | unsigned short invalidate_mode; |
465 | unsigned short block_count; |
466 | void * start_address; |
467 | unsigned int block_size; |
468 | unsigned int inter_block_stride; |
469 | } s[0]; |
470 | }; |
471 | |
472 | int vcsm_clean_invalid2( struct vcsm_user_clean_invalid2_s *s ); |
473 | |
474 | unsigned int vcsm_import_dmabuf( int dmabuf, const char *name ); |
475 | |
476 | int vcsm_export_dmabuf( unsigned int vcsm_handle ); |
477 | |
478 | #ifdef __cplusplus |
479 | } |
480 | #endif |
481 | |
482 | #endif /* __USER_VCSM__H__INCLUDED__ */ |
483 | |
484 | |