1 | /* |
2 | Copyright (c) 2012, Broadcom Europe Ltd |
3 | All rights reserved. |
4 | |
5 | Redistribution and use in source and binary forms, with or without |
6 | modification, are permitted provided that the following conditions are met: |
7 | * Redistributions of source code must retain the above copyright |
8 | notice, this list of conditions and the following disclaimer. |
9 | * Redistributions in binary form must reproduce the above copyright |
10 | notice, this list of conditions and the following disclaimer in the |
11 | documentation and/or other materials provided with the distribution. |
12 | * Neither the name of the copyright holder nor the |
13 | names of its contributors may be used to endorse or promote products |
14 | derived from this software without specific prior written permission. |
15 | |
16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND |
17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY |
20 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
23 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | */ |
27 | |
28 | |
29 | #include "mmal.h" |
30 | #include "mmal_vc_msgs.h" |
31 | #include "mmal_vc_api.h" |
32 | #include "mmal_vc_client_priv.h" |
33 | #include "interface/vcos/vcos.h" |
34 | #include "vchiq_util.h" |
35 | #include "interface/mmal/core/mmal_buffer_private.h" |
36 | #include "interface/mmal/core/mmal_component_private.h" |
37 | #include "interface/mmal/core/mmal_port_private.h" |
38 | #include "interface/mmal/util/mmal_list.h" |
39 | #include "interface/mmal/util/mmal_util.h" |
40 | |
41 | #define VCOS_LOG_CATEGORY (&mmal_ipc_log_category) |
42 | #include "interface/mmal/mmal_logging.h" |
43 | |
44 | #include <stdio.h> |
45 | |
46 | #define MAX_WAITERS 16 |
47 | static VCOS_ONCE_T once = VCOS_ONCE_INIT; |
48 | static VCHIQ_INSTANCE_T mmal_vchiq_instance; |
49 | static VCOS_LOG_CAT_T mmal_ipc_log_category; |
50 | |
51 | /** Client threads use one of these to wait for |
52 | * a reply from VideoCore. |
53 | */ |
54 | typedef struct MMAL_WAITER_T |
55 | { |
56 | int index; |
57 | VCOS_SEMAPHORE_T sem; |
58 | unsigned inuse; |
59 | void *dest; /**< Where to write reply */ |
60 | size_t destlen; /**< Max length for reply */ |
61 | } MMAL_WAITER_T; |
62 | |
63 | /** We have an array of waiters and allocate them to waiting |
64 | * threads. They can be released back to the pool in any order. |
65 | * If there are none free, the calling thread will block until |
66 | * one becomes available. |
67 | */ |
68 | typedef struct |
69 | { |
70 | MMAL_WAITER_T waiters[MAX_WAITERS]; |
71 | VCOS_SEMAPHORE_T sem; |
72 | } MMAL_WAITPOOL_T; |
73 | |
74 | struct MMAL_CLIENT_T |
75 | { |
76 | int refcount; |
77 | int usecount; |
78 | VCOS_MUTEX_T lock; |
79 | VCHIQ_SERVICE_HANDLE_T service; |
80 | MMAL_WAITPOOL_T waitpool; |
81 | VCOS_MUTEX_T bulk_lock; |
82 | |
83 | MMAL_BOOL_T inited; |
84 | }; |
85 | |
86 | /***************************************************************************** |
87 | * Lookup table functions for client_component handles. |
88 | * Required as the IPC is strictly 32bit, therefore 64bit userland can not |
89 | * pass in the required pointers. |
90 | *****************************************************************************/ |
91 | #define MAX_COMPONENT_HANDLES 128 |
92 | |
93 | typedef struct |
94 | { |
95 | unsigned int inuse:1; |
96 | unsigned int index:31; |
97 | MMAL_COMPONENT_T *component; |
98 | } MMAL_CLIENT_COMPONENT_T; |
99 | |
100 | typedef struct |
101 | { |
102 | MMAL_CLIENT_COMPONENT_T components[MAX_COMPONENT_HANDLES]; |
103 | VCOS_MUTEX_T lock; |
104 | } MMAL_CLIENT_COMPONENT_POOL_T; |
105 | |
106 | static MMAL_CLIENT_COMPONENT_POOL_T client_component_pool; |
107 | |
108 | uint32_t mmal_vc_allocate_client_component(MMAL_COMPONENT_T *component) |
109 | { |
110 | int i; |
111 | |
112 | vcos_mutex_lock(&client_component_pool.lock); |
113 | for (i=0; i<MAX_COMPONENT_HANDLES; i++) |
114 | { |
115 | if (client_component_pool.components[i].inuse == 0) |
116 | break; |
117 | } |
118 | |
119 | if (vcos_verify(i != MAX_COMPONENT_HANDLES)) |
120 | { |
121 | client_component_pool.components[i].index = i; |
122 | client_component_pool.components[i].component = component; |
123 | client_component_pool.components[i].inuse = 1; |
124 | } |
125 | vcos_mutex_unlock(&client_component_pool.lock); |
126 | |
127 | return i; |
128 | } |
129 | |
130 | static MMAL_COMPONENT_T *lookup_client_component(int index) |
131 | { |
132 | if (vcos_verify(index < MAX_COMPONENT_HANDLES)) |
133 | { |
134 | vcos_assert(client_component_pool.components[index].inuse); |
135 | return client_component_pool.components[index].component; |
136 | } |
137 | |
138 | return NULL; |
139 | } |
140 | |
141 | void mmal_vc_release_client_component(MMAL_COMPONENT_T *component) |
142 | { |
143 | int i; |
144 | |
145 | vcos_mutex_lock(&client_component_pool.lock); |
146 | for (i=0; i<MAX_COMPONENT_HANDLES; i++) |
147 | { |
148 | if (client_component_pool.components[i].component == component) |
149 | { |
150 | client_component_pool.components[i].component = NULL; |
151 | client_component_pool.components[i].inuse = 0; |
152 | } |
153 | } |
154 | vcos_mutex_unlock(&client_component_pool.lock); |
155 | } |
156 | |
157 | #define MAX_CLIENT_CONTEXTS 512 |
158 | |
159 | typedef struct |
160 | { |
161 | unsigned int inuse:1; |
162 | unsigned int index:31; |
163 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *ctx; |
164 | } MMAL_CLIENT_CONTEXT_T; |
165 | |
166 | typedef struct |
167 | { |
168 | MMAL_CLIENT_CONTEXT_T contexts[MAX_CLIENT_CONTEXTS]; |
169 | VCOS_MUTEX_T lock; |
170 | } MMAL_CLIENT_CONTEXT_POOL_T; |
171 | |
172 | static MMAL_CLIENT_CONTEXT_POOL_T client_context_pool; |
173 | #define CLIENT_CONTEXT_MAGIC 0xFEDC0000 |
174 | #define CLIENT_CONTEXT_MAGIC_MASK(a) (a & 0xFFFF) |
175 | #define CLIENT_CONTEXT_MAGIC_CHECK(a) (a & 0xFFFF0000) |
176 | |
177 | uint32_t mmal_vc_allocate_client_context(MMAL_VC_CLIENT_BUFFER_CONTEXT_T *context) |
178 | { |
179 | int i; |
180 | |
181 | vcos_mutex_lock(&client_context_pool.lock); |
182 | for (i=0; i<MAX_CLIENT_CONTEXTS; i++) |
183 | { |
184 | if (client_context_pool.contexts[i].inuse == 0) |
185 | break; |
186 | } |
187 | |
188 | if (vcos_verify(i != MAX_CLIENT_CONTEXTS)) |
189 | { |
190 | client_context_pool.contexts[i].index = i; |
191 | client_context_pool.contexts[i].ctx = context; |
192 | client_context_pool.contexts[i].inuse = 1; |
193 | } |
194 | vcos_mutex_unlock(&client_context_pool.lock); |
195 | |
196 | return i | CLIENT_CONTEXT_MAGIC; |
197 | } |
198 | |
199 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *mmal_vc_lookup_client_context(int index) |
200 | { |
201 | if (vcos_verify((CLIENT_CONTEXT_MAGIC_CHECK(index) == CLIENT_CONTEXT_MAGIC) && |
202 | (CLIENT_CONTEXT_MAGIC_MASK(index) < MAX_CLIENT_CONTEXTS))) |
203 | { |
204 | vcos_assert(client_context_pool.contexts[CLIENT_CONTEXT_MAGIC_MASK(index)].inuse); |
205 | return client_context_pool.contexts[CLIENT_CONTEXT_MAGIC_MASK(index)].ctx; |
206 | } |
207 | |
208 | return NULL; |
209 | } |
210 | |
211 | void mmal_vc_release_client_context(MMAL_VC_CLIENT_BUFFER_CONTEXT_T *context) |
212 | { |
213 | int i; |
214 | |
215 | vcos_mutex_lock(&client_context_pool.lock); |
216 | for (i=0; i<MAX_CLIENT_CONTEXTS; i++) |
217 | { |
218 | if (client_context_pool.contexts[i].ctx == context) |
219 | { |
220 | client_context_pool.contexts[i].ctx = NULL; |
221 | client_context_pool.contexts[i].inuse = 0; |
222 | break; |
223 | } |
224 | } |
225 | if (i >= MAX_CLIENT_CONTEXTS) |
226 | LOG_ERROR("Failed to release context %p - not found" , context); |
227 | |
228 | vcos_mutex_unlock(&client_context_pool.lock); |
229 | } |
230 | |
231 | /* One client per process/VC connection. Multiple threads may |
232 | * be using a single client. |
233 | */ |
234 | static MMAL_CLIENT_T client; |
235 | |
236 | static void init_once(void) |
237 | { |
238 | vcos_mutex_create(&client.lock, VCOS_FUNCTION); |
239 | vcos_mutex_create(&client_component_pool.lock, VCOS_FUNCTION); |
240 | vcos_mutex_create(&client_context_pool.lock, VCOS_FUNCTION); |
241 | } |
242 | |
243 | /** Create a pool of wait-structures. |
244 | */ |
245 | static MMAL_STATUS_T create_waitpool(MMAL_WAITPOOL_T *waitpool) |
246 | { |
247 | VCOS_STATUS_T status; |
248 | int i; |
249 | |
250 | status = vcos_semaphore_create(&waitpool->sem, VCOS_FUNCTION, |
251 | MAX_WAITERS); |
252 | if (status != VCOS_SUCCESS) |
253 | return status==VCOS_SUCCESS ? MMAL_SUCCESS : MMAL_ENOSPC; |
254 | |
255 | for (i=0; i<MAX_WAITERS; i++) |
256 | { |
257 | waitpool->waiters[i].inuse = 0; |
258 | waitpool->waiters[i].index = i; |
259 | status = vcos_semaphore_create(&waitpool->waiters[i].sem, |
260 | "mmal waiter" , 0); |
261 | if (status != VCOS_SUCCESS) |
262 | break; |
263 | } |
264 | |
265 | if (status != VCOS_SUCCESS) |
266 | { |
267 | /* clean up */ |
268 | i--; |
269 | while (i>=0) |
270 | { |
271 | vcos_semaphore_delete(&waitpool->waiters[i].sem); |
272 | i--; |
273 | } |
274 | vcos_semaphore_delete(&waitpool->sem); |
275 | } |
276 | return status==VCOS_SUCCESS ? MMAL_SUCCESS : MMAL_ENOSPC; |
277 | } |
278 | |
279 | static void destroy_waitpool(MMAL_WAITPOOL_T *waitpool) |
280 | { |
281 | int i; |
282 | for (i=0; i<MAX_WAITERS; i++) |
283 | vcos_semaphore_delete(&waitpool->waiters[i].sem); |
284 | |
285 | vcos_semaphore_delete(&waitpool->sem); |
286 | } |
287 | |
288 | /** Grab a waiter from the pool. Return immediately if one already |
289 | * available, or wait for one to become available. |
290 | */ |
291 | static MMAL_WAITER_T *get_waiter(MMAL_CLIENT_T *client) |
292 | { |
293 | int i; |
294 | MMAL_WAITER_T *waiter = NULL; |
295 | vcos_semaphore_wait(&client->waitpool.sem); |
296 | vcos_mutex_lock(&client->lock); |
297 | for (i=0; i<MAX_WAITERS; i++) |
298 | { |
299 | if (client->waitpool.waiters[i].inuse == 0) |
300 | break; |
301 | } |
302 | /* If this fails, the semaphore is not working */ |
303 | if (vcos_verify(i != MAX_WAITERS)) |
304 | { |
305 | waiter = client->waitpool.waiters+i; |
306 | waiter->inuse = 1; |
307 | } |
308 | vcos_mutex_unlock(&client->lock); |
309 | |
310 | return waiter; |
311 | } |
312 | |
313 | /** Look up a waiter reference based on the static client |
314 | */ |
315 | static MMAL_WAITER_T *lookup_waiter(uint32_t index) |
316 | { |
317 | //NB this uses the static client variable, whilst most others use the client |
318 | //variable passed in. I don't believe there is a way to have multiple clients |
319 | //in one process, so this should be safe. |
320 | if (vcos_verify(index < MAX_WAITERS)) |
321 | return &client.waitpool.waiters[index]; |
322 | |
323 | return NULL; |
324 | } |
325 | |
326 | /** Return a waiter to the pool. |
327 | */ |
328 | static void release_waiter(MMAL_CLIENT_T *client, MMAL_WAITER_T *waiter) |
329 | { |
330 | LOG_TRACE("at %p" , waiter); |
331 | vcos_assert(waiter); |
332 | vcos_assert(waiter->inuse); |
333 | waiter->inuse = 0; |
334 | vcos_semaphore_post(&client->waitpool.sem); |
335 | } |
336 | |
337 | static MMAL_PORT_T *mmal_vc_port_by_number(MMAL_COMPONENT_T *component, uint32_t type, uint32_t number) |
338 | { |
339 | switch (type) |
340 | { |
341 | case MMAL_PORT_TYPE_CONTROL: |
342 | vcos_assert(number == 0); |
343 | return component->control; |
344 | case MMAL_PORT_TYPE_INPUT: |
345 | vcos_assert(number < component->input_num); |
346 | return component->input[number]; |
347 | case MMAL_PORT_TYPE_OUTPUT: |
348 | vcos_assert(number < component->output_num); |
349 | return component->output[number]; |
350 | case MMAL_PORT_TYPE_CLOCK: |
351 | vcos_assert(number < component->clock_num); |
352 | return component->clock[number]; |
353 | } |
354 | |
355 | return NULL; |
356 | } |
357 | |
358 | static void mmal_vc_handle_event_msg(VCHIQ_HEADER_T *, |
359 | VCHIQ_SERVICE_HANDLE_T service, |
360 | void *context) |
361 | { |
362 | mmal_worker_event_to_host *msg = (mmal_worker_event_to_host *)vchiq_header->data; |
363 | MMAL_COMPONENT_T *component = lookup_client_component(msg->client_component); |
364 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *client_context; |
365 | MMAL_BUFFER_HEADER_T *buffer; |
366 | MMAL_STATUS_T status; |
367 | MMAL_PORT_T *port; |
368 | |
369 | LOG_DEBUG("event to host, cmd 0x%08x len %d to component %u/%p port (%d,%d)" , |
370 | msg->cmd, msg->length, msg->client_component, component, msg->port_type, |
371 | msg->port_num); |
372 | (void)context; |
373 | |
374 | port = mmal_vc_port_by_number(component, msg->port_type, msg->port_num); |
375 | if (!vcos_verify(port)) |
376 | { |
377 | LOG_ERROR("port (%i,%i) doesn't exist" , (int)msg->port_type, (int)msg->port_num); |
378 | goto error; |
379 | } |
380 | |
381 | status = mmal_port_event_get(port, &buffer, msg->cmd); |
382 | if (status != MMAL_SUCCESS) |
383 | { |
384 | LOG_ERROR("no event buffer available to receive event (%i)" , (int)status); |
385 | goto error; |
386 | } |
387 | |
388 | if (!vcos_verify(msg->length <= buffer->alloc_size)) |
389 | { |
390 | LOG_ERROR("event buffer to small to receive event (%i/%i)" , |
391 | (int)buffer->alloc_size, (int)msg->length); |
392 | goto error; |
393 | } |
394 | buffer->length = msg->length; |
395 | |
396 | client_context = mmal_vc_lookup_client_context(mmal_buffer_header_driver_data(buffer)->client_context); |
397 | /* Sanity check that the event buffers have the proper vc client context */ |
398 | if (!vcos_verify(mmal_buffer_header_driver_data(buffer)->magic == MMAL_MAGIC && |
399 | client_context && |
400 | client_context->magic == MMAL_MAGIC && |
401 | client_context->callback_event)) |
402 | { |
403 | LOG_ERROR("event buffers not configured properly by component" ); |
404 | goto error; |
405 | } |
406 | |
407 | if (buffer->length > MMAL_WORKER_EVENT_SPACE) |
408 | { |
409 | /* a buffer full of data for us to process */ |
410 | int len = buffer->length; |
411 | len = (len+3) & (~3); |
412 | LOG_DEBUG("queue event bulk rx: %p, %d" , buffer->data, buffer->length); |
413 | msg->delayed_buffer = buffer; |
414 | |
415 | VCHIQ_STATUS_T vst = vchiq_queue_bulk_receive(service, buffer->data, len, vchiq_header); |
416 | if (vst != VCHIQ_SUCCESS) |
417 | { |
418 | LOG_TRACE("queue event bulk rx len %d failed to start" , buffer->length); |
419 | mmal_buffer_header_release(buffer); |
420 | goto error; |
421 | } |
422 | } |
423 | else |
424 | { |
425 | if (msg->length) |
426 | { |
427 | if (buffer->cmd == MMAL_EVENT_FORMAT_CHANGED && buffer->length >= msg->length) |
428 | { |
429 | //64bit userspace. |
430 | //No need to fix the pointers in the msg as mmal_event_format_changed_get |
431 | //will do that for us, but the start positions of each section does need |
432 | //to be adjusted. |
433 | mmal_worker_event_format_changed *fmt_changed_vc = |
434 | (mmal_worker_event_format_changed*)msg->data; |
435 | MMAL_EVENT_FORMAT_CHANGED_T *fmt_changed_host = |
436 | (MMAL_EVENT_FORMAT_CHANGED_T*)buffer->data; |
437 | MMAL_ES_FORMAT_T *fmt_host; |
438 | MMAL_VC_ES_FORMAT_T *fmt_vc; |
439 | MMAL_ES_SPECIFIC_FORMAT_T *es_host, *es_vc; |
440 | const uint32_t size_host = sizeof(MMAL_EVENT_FORMAT_CHANGED_T) + |
441 | sizeof(MMAL_ES_FORMAT_T) + |
442 | sizeof(MMAL_ES_SPECIFIC_FORMAT_T); |
443 | const uint32_t size_vc = sizeof(mmal_worker_event_format_changed) + |
444 | sizeof(MMAL_VC_ES_FORMAT_T) + |
445 | sizeof(MMAL_ES_SPECIFIC_FORMAT_T); |
446 | |
447 | //Copy the base event (ignore the format pointer from the end) |
448 | memcpy(fmt_changed_host, fmt_changed_vc, sizeof(mmal_worker_event_format_changed)); |
449 | fmt_changed_host->format = NULL; |
450 | |
451 | //Copy the es format |
452 | fmt_vc = (MMAL_VC_ES_FORMAT_T *)&fmt_changed_vc[1]; |
453 | fmt_host = (MMAL_ES_FORMAT_T *)&fmt_changed_host[1]; |
454 | mmal_vc_copy_es_format_from_vc(fmt_vc, fmt_host); |
455 | |
456 | //Copy the ES_SPECIFIC_FORMAT_T (structures are identical) |
457 | es_host = (MMAL_ES_SPECIFIC_FORMAT_T *)&fmt_host[1]; |
458 | es_vc = (MMAL_ES_SPECIFIC_FORMAT_T *)&fmt_vc[1]; |
459 | memcpy(es_host, es_vc, sizeof(MMAL_ES_SPECIFIC_FORMAT_T)); |
460 | |
461 | //Copy the extradata (if present) |
462 | fmt_host->extradata_size = msg->length - size_vc; |
463 | memcpy((uint8_t *)&es_host[1], (uint8_t*)&es_vc[1], fmt_host->extradata_size); |
464 | buffer->length = size_host + fmt_host->extradata_size; |
465 | } |
466 | else |
467 | { |
468 | memcpy(buffer->data, msg->data, msg->length); |
469 | } |
470 | } |
471 | |
472 | client_context->callback_event(port, buffer); |
473 | LOG_DEBUG("done callback back to client" ); |
474 | vchiq_release_message(service, vchiq_header); |
475 | } |
476 | |
477 | return; |
478 | |
479 | error: |
480 | /* FIXME: How to abort bulk receive if necessary? */ |
481 | msg->length = 0; /* FIXME: set a buffer flag to signal error */ |
482 | vchiq_release_message(service, vchiq_header); |
483 | } |
484 | |
485 | static MMAL_STATUS_T mmal_vc_use_internal(MMAL_CLIENT_T *client) |
486 | { |
487 | MMAL_STATUS_T status = MMAL_SUCCESS; |
488 | vcos_mutex_lock(&client->lock); |
489 | if(client->usecount++ == 0) |
490 | { |
491 | if(vchiq_use_service(client->service) != VCHIQ_SUCCESS) |
492 | { |
493 | client->usecount--; |
494 | status = MMAL_EIO; |
495 | } |
496 | } |
497 | vcos_mutex_unlock(&client->lock); |
498 | return status; |
499 | } |
500 | |
501 | static MMAL_STATUS_T mmal_vc_release_internal(MMAL_CLIENT_T *client) |
502 | { |
503 | MMAL_STATUS_T status = MMAL_SUCCESS; |
504 | vcos_mutex_lock(&client->lock); |
505 | if(--client->usecount == 0) |
506 | { |
507 | if(vchiq_release_service(client->service) != VCHIQ_SUCCESS) |
508 | { |
509 | client->usecount++; |
510 | status = MMAL_EIO; |
511 | } |
512 | } |
513 | vcos_mutex_unlock(&client->lock); |
514 | return status; |
515 | } |
516 | |
517 | |
518 | /** Callback invoked by VCHIQ |
519 | */ |
520 | static VCHIQ_STATUS_T mmal_vc_vchiq_callback(VCHIQ_REASON_T reason, |
521 | VCHIQ_HEADER_T *, |
522 | VCHIQ_SERVICE_HANDLE_T service, |
523 | void *context) |
524 | { |
525 | LOG_TRACE("reason %d" , reason); |
526 | |
527 | switch (reason) |
528 | { |
529 | case VCHIQ_MESSAGE_AVAILABLE: |
530 | { |
531 | mmal_worker_msg_header *msg = (mmal_worker_msg_header*)vchiq_header->data; |
532 | vcos_assert(msg->magic == MMAL_MAGIC); |
533 | |
534 | if (msg->msgid == MMAL_WORKER_BUFFER_TO_HOST) |
535 | { |
536 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *client_context; |
537 | LOG_TRACE("buffer to host" ); |
538 | mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)vchiq_header->data; |
539 | |
540 | client_context = mmal_vc_lookup_client_context(msg->drvbuf.client_context); |
541 | LOG_TRACE("len %d context %p" , msg->buffer_header.length, client_context); |
542 | vcos_assert(client_context); |
543 | vcos_assert(client_context->magic == MMAL_MAGIC); |
544 | |
545 | /* If the buffer is referencing another, need to replicate it here |
546 | * in order to use the reference buffer's payload and ensure the |
547 | * reference is not released prematurely */ |
548 | if (msg->has_reference) |
549 | { |
550 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *ref_context = |
551 | mmal_vc_lookup_client_context(msg->drvbuf_ref.client_context); |
552 | vcos_assert(ref_context); |
553 | mmal_buffer_header_replicate(client_context->buffer, ref_context->buffer); |
554 | } |
555 | |
556 | /* Sanity check the size of the transfer so we don't overrun our buffer */ |
557 | if (!vcos_verify(msg->buffer_header.offset + msg->buffer_header.length <= |
558 | client_context->buffer->alloc_size)) |
559 | { |
560 | LOG_TRACE("buffer too small (%i, %i)" , |
561 | msg->buffer_header.offset + msg->buffer_header.length, |
562 | client_context->buffer->alloc_size); |
563 | msg->buffer_header.length = 0; |
564 | msg->buffer_header.flags |= MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED; |
565 | client_context->callback(msg); |
566 | vchiq_release_message(service, vchiq_header); |
567 | break; |
568 | } |
569 | /*To handle VC to HOST filled buffer callback of EOS buffer to receive in sync with data buffers*/ |
570 | if (!msg->is_zero_copy && |
571 | (msg->buffer_header.length != 0 || |
572 | (msg->buffer_header.flags & MMAL_BUFFER_HEADER_FLAG_EOS))) |
573 | { |
574 | /* a buffer full of data for us to process */ |
575 | VCHIQ_STATUS_T vst = VCHIQ_SUCCESS; |
576 | LOG_TRACE("queue bulk rx: %p, %d" , client_context->buffer->data + |
577 | msg->buffer_header.offset, msg->buffer_header.length); |
578 | int len = msg->buffer_header.length; |
579 | len = (len+3) & (~3); |
580 | |
581 | if (!len && (msg->buffer_header.flags & MMAL_BUFFER_HEADER_FLAG_EOS)) |
582 | { |
583 | len = 8; |
584 | } |
585 | if (!msg->payload_in_message) |
586 | { |
587 | /* buffer transferred using vchiq bulk xfer */ |
588 | vst = vchiq_queue_bulk_receive(service, |
589 | client_context->buffer->data + msg->buffer_header.offset, |
590 | len, vchiq_header); |
591 | |
592 | if (vst != VCHIQ_SUCCESS) |
593 | { |
594 | LOG_TRACE("queue bulk rx len %d failed to start" , msg->buffer_header.length); |
595 | msg->buffer_header.length = 0; |
596 | msg->buffer_header.flags |= MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED; |
597 | client_context->callback(msg); |
598 | vchiq_release_message(service, vchiq_header); |
599 | } |
600 | } |
601 | else if (msg->payload_in_message <= MMAL_VC_SHORT_DATA) |
602 | { |
603 | /* we have already received the buffer data in the message! */ |
604 | MMAL_BUFFER_HEADER_T *dst = client_context->buffer; |
605 | LOG_TRACE("short data: dst = %p, dst->data = %p, len %d short len %d" , dst, dst? dst->data : 0, msg->buffer_header.length, msg->payload_in_message); |
606 | memcpy(dst->data, msg->short_data, msg->payload_in_message); |
607 | dst->offset = 0; |
608 | dst->length = msg->payload_in_message; |
609 | vchiq_release_message(service, vchiq_header); |
610 | client_context->callback(msg); |
611 | } |
612 | else |
613 | { |
614 | /* impossible short data length */ |
615 | LOG_ERROR("Message with invalid short payload length %d" , |
616 | msg->payload_in_message); |
617 | vcos_assert(0); |
618 | } |
619 | } |
620 | else |
621 | { |
622 | |
623 | /* Message received from videocore; the client_context should have |
624 | * been passed all the way through by videocore back to us, and will |
625 | * be picked up in the callback to complete the sequence. |
626 | */ |
627 | LOG_TRACE("doing cb (%p) context %p" , |
628 | client_context, client_context ? |
629 | client_context->callback : 0); |
630 | client_context->callback(msg); |
631 | LOG_TRACE("done callback back to client" ); |
632 | vchiq_release_message(service, vchiq_header); |
633 | } |
634 | } |
635 | else if (msg->msgid == MMAL_WORKER_EVENT_TO_HOST) |
636 | { |
637 | mmal_vc_handle_event_msg(vchiq_header, service, context); |
638 | } |
639 | else |
640 | { |
641 | MMAL_WAITER_T *waiter = lookup_waiter(msg->u.waiter); |
642 | LOG_TRACE("waking up waiter at %p" , waiter); |
643 | vcos_assert(waiter->inuse); |
644 | int len = vcos_min(waiter->destlen, vchiq_header->size); |
645 | waiter->destlen = len; |
646 | LOG_TRACE("copying payload @%p to %p len %d" , waiter->dest, msg, len); |
647 | memcpy(waiter->dest, msg, len); |
648 | vchiq_release_message(service, vchiq_header); |
649 | vcos_semaphore_post(&waiter->sem); |
650 | } |
651 | } |
652 | break; |
653 | case VCHIQ_BULK_TRANSMIT_DONE: |
654 | { |
655 | /* nothing to do here, need to wait for the copro to tell us it |
656 | * has emptied the buffer before we can recycle it, otherwise we |
657 | * end up feeding the copro with buffers it cannot handle. |
658 | */ |
659 | #ifdef VCOS_LOGGING_ENABLED |
660 | mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)context; |
661 | #endif |
662 | LOG_TRACE("bulk tx done: %08x, %d" , msg->buffer_header.data, msg->buffer_header.length); |
663 | } |
664 | break; |
665 | case VCHIQ_BULK_RECEIVE_DONE: |
666 | { |
667 | VCHIQ_HEADER_T * = (VCHIQ_HEADER_T *)context; |
668 | mmal_worker_msg_header *msg_hdr = (mmal_worker_msg_header*)header->data; |
669 | if (msg_hdr->msgid == MMAL_WORKER_BUFFER_TO_HOST) |
670 | { |
671 | mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)msg_hdr; |
672 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *client_context = mmal_vc_lookup_client_context(msg->drvbuf.client_context); |
673 | vcos_assert(client_context && client_context->magic == MMAL_MAGIC); |
674 | client_context->callback(msg); |
675 | LOG_TRACE("bulk rx done: %08x, %d" , msg->buffer_header.data, msg->buffer_header.length); |
676 | } |
677 | else |
678 | { |
679 | mmal_worker_event_to_host *msg = (mmal_worker_event_to_host *)msg_hdr; |
680 | MMAL_COMPONENT_T *component = lookup_client_component(msg->client_component); |
681 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *client_context = |
682 | mmal_vc_lookup_client_context(mmal_buffer_header_driver_data(msg->delayed_buffer)->client_context); |
683 | MMAL_PORT_T *port = mmal_vc_port_by_number(component, msg->port_type, msg->port_num); |
684 | |
685 | vcos_assert(client_context && port); |
686 | client_context->callback_event(port, msg->delayed_buffer); |
687 | LOG_DEBUG("event bulk rx done, length %d" , msg->length); |
688 | } |
689 | vchiq_release_message(service, header); |
690 | } |
691 | break; |
692 | case VCHIQ_BULK_RECEIVE_ABORTED: |
693 | { |
694 | VCHIQ_HEADER_T * = (VCHIQ_HEADER_T *)context; |
695 | mmal_worker_msg_header *msg_hdr = (mmal_worker_msg_header*)header->data; |
696 | if (msg_hdr->msgid == MMAL_WORKER_BUFFER_TO_HOST) |
697 | { |
698 | mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)msg_hdr; |
699 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *client_context = mmal_vc_lookup_client_context(msg->drvbuf.client_context); |
700 | LOG_TRACE("bulk rx aborted: %08x, %d" , msg->buffer_header.data, msg->buffer_header.length); |
701 | vcos_assert(client_context && client_context->magic == MMAL_MAGIC); |
702 | msg->buffer_header.flags |= MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED; |
703 | client_context->callback(msg); |
704 | } |
705 | else |
706 | { |
707 | mmal_worker_event_to_host *msg = (mmal_worker_event_to_host *)msg_hdr; |
708 | MMAL_COMPONENT_T *component = lookup_client_component(msg->client_component); |
709 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *client_context = |
710 | mmal_vc_lookup_client_context(mmal_buffer_header_driver_data(msg->delayed_buffer)->client_context); |
711 | MMAL_PORT_T *port = mmal_vc_port_by_number(component, msg->port_type, msg->port_num); |
712 | |
713 | vcos_assert(port); |
714 | LOG_DEBUG("event bulk rx aborted" ); |
715 | msg->delayed_buffer->flags |= MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED; |
716 | |
717 | client_context->callback_event(port, msg->delayed_buffer); |
718 | } |
719 | vchiq_release_message(service, header); |
720 | } |
721 | break; |
722 | case VCHIQ_BULK_TRANSMIT_ABORTED: |
723 | { |
724 | mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)context; |
725 | MMAL_VC_CLIENT_BUFFER_CONTEXT_T *client_context = |
726 | mmal_vc_lookup_client_context(msg->drvbuf.client_context); |
727 | LOG_INFO("bulk tx aborted: %08x, %d" , msg->buffer_header.data, msg->buffer_header.length); |
728 | vcos_assert(client_context->magic == MMAL_MAGIC); |
729 | /* Nothing to do as the VC side will release the buffer and notify us of the error */ |
730 | client_context = NULL; // Avoid warnings in release builds |
731 | } |
732 | break; |
733 | default: |
734 | break; |
735 | } |
736 | |
737 | return VCHIQ_SUCCESS; |
738 | } |
739 | |
740 | /** Send a message and wait for a reply. |
741 | * |
742 | * @param client client to send message for |
743 | * @param msg_header message vchiq_header to send |
744 | * @param size length of message, including header |
745 | * @param msgid message id |
746 | * @param dest destination for reply |
747 | * @param destlen size of destination, updated with actual length |
748 | * @param send_dummy_bulk whether to send a dummy bulk transfer |
749 | */ |
750 | MMAL_STATUS_T mmal_vc_sendwait_message(struct MMAL_CLIENT_T *client, |
751 | mmal_worker_msg_header *, |
752 | size_t size, |
753 | uint32_t msgid, |
754 | void *dest, |
755 | size_t *destlen, |
756 | MMAL_BOOL_T send_dummy_bulk) |
757 | { |
758 | MMAL_STATUS_T ret; |
759 | MMAL_WAITER_T *waiter; |
760 | VCHIQ_STATUS_T vst; |
761 | VCHIQ_ELEMENT_T elems[] = {{msg_header, size}}; |
762 | |
763 | vcos_assert(size >= sizeof(mmal_worker_msg_header)); |
764 | vcos_assert(dest); |
765 | |
766 | if (!client->inited) |
767 | { |
768 | vcos_assert(0); |
769 | return MMAL_EINVAL; |
770 | } |
771 | |
772 | if (send_dummy_bulk) |
773 | vcos_mutex_lock(&client->bulk_lock); |
774 | |
775 | waiter = get_waiter(client); |
776 | msg_header->msgid = msgid; |
777 | msg_header->u.waiter = waiter->index; |
778 | msg_header->magic = MMAL_MAGIC; |
779 | |
780 | waiter->dest = dest; |
781 | waiter->destlen = *destlen; |
782 | LOG_TRACE("wait %p, reply to %p" , waiter, dest); |
783 | mmal_vc_use_internal(client); |
784 | |
785 | vst = vchiq_queue_message(client->service, elems, 1); |
786 | |
787 | if (vst != VCHIQ_SUCCESS) |
788 | { |
789 | ret = MMAL_EIO; |
790 | if (send_dummy_bulk) |
791 | vcos_mutex_unlock(&client->bulk_lock); |
792 | goto fail_msg; |
793 | } |
794 | |
795 | if (send_dummy_bulk) |
796 | { |
797 | uint32_t data_size = 8; |
798 | /* The data is just some dummy bytes so it's fine for it to be static */ |
799 | static uint8_t data[8]; |
800 | vst = vchiq_queue_bulk_transmit(client->service, data, data_size, msg_header); |
801 | |
802 | vcos_mutex_unlock(&client->bulk_lock); |
803 | |
804 | if (!vcos_verify(vst == VCHIQ_SUCCESS)) |
805 | { |
806 | LOG_ERROR("failed bulk transmit" ); |
807 | /* This really should not happen and if it does, things will go wrong as |
808 | * we've already queued the vchiq message above. */ |
809 | vcos_assert(0); |
810 | ret = MMAL_EIO; |
811 | goto fail_msg; |
812 | } |
813 | } |
814 | |
815 | /* now wait for the reply... |
816 | * |
817 | * FIXME: we could do with a timeout here. Need to be careful to cancel |
818 | * the semaphore on a timeout. |
819 | */ |
820 | /* coverity[lock] This semaphore isn't being used as a mutex */ |
821 | vcos_semaphore_wait(&waiter->sem); |
822 | |
823 | mmal_vc_release_internal(client); |
824 | LOG_TRACE("got reply (len %i/%i)" , (int)*destlen, (int)waiter->destlen); |
825 | *destlen = waiter->destlen; |
826 | |
827 | release_waiter(client, waiter); |
828 | return MMAL_SUCCESS; |
829 | |
830 | fail_msg: |
831 | mmal_vc_release_internal(client); |
832 | |
833 | release_waiter(client, waiter); |
834 | return ret; |
835 | } |
836 | |
837 | /** Send a message and do not wait for a reply. |
838 | * |
839 | * @note |
840 | * This function should only be called from within a mmal component, so |
841 | * vchiq_use/release_service calls aren't required (dealt with at higher level). |
842 | * |
843 | * @param client client to send message for |
844 | * @param msg_header message header to send |
845 | * @param size length of message, including header |
846 | * @param msgid message id |
847 | */ |
848 | MMAL_STATUS_T mmal_vc_send_message(MMAL_CLIENT_T *client, |
849 | mmal_worker_msg_header *, size_t size, |
850 | uint8_t *data, size_t data_size, |
851 | uint32_t msgid) |
852 | { |
853 | VCHIQ_STATUS_T vst; |
854 | VCHIQ_ELEMENT_T elems[] = {{msg_header, size}}; |
855 | MMAL_BOOL_T using_bulk_transfer = (data_size != 0); |
856 | |
857 | LOG_TRACE("len %zu" , data_size); |
858 | vcos_assert(size >= sizeof(mmal_worker_msg_header)); |
859 | |
860 | if (!client->inited) |
861 | { |
862 | vcos_assert(0); |
863 | return MMAL_EINVAL; |
864 | } |
865 | |
866 | if (using_bulk_transfer) |
867 | vcos_mutex_lock(&client->bulk_lock); |
868 | |
869 | msg_header->msgid = msgid; |
870 | msg_header->magic = MMAL_MAGIC; |
871 | |
872 | vst = vchiq_queue_message(client->service, elems, 1); |
873 | |
874 | if (vst != VCHIQ_SUCCESS) |
875 | { |
876 | if (using_bulk_transfer) |
877 | vcos_mutex_unlock(&client->bulk_lock); |
878 | |
879 | LOG_ERROR("failed" ); |
880 | goto error; |
881 | } |
882 | |
883 | if (using_bulk_transfer) |
884 | { |
885 | LOG_TRACE("bulk transmit: %p, %zu" , data, data_size); |
886 | |
887 | data_size = (data_size + 3) & ~3; |
888 | vst = vchiq_queue_bulk_transmit(client->service, data, data_size, msg_header); |
889 | |
890 | vcos_mutex_unlock(&client->bulk_lock); |
891 | |
892 | if (!vcos_verify(vst == VCHIQ_SUCCESS)) |
893 | { |
894 | LOG_ERROR("failed bulk transmit" ); |
895 | /* This really should not happen and if it does, things will go wrong as |
896 | * we've already queued the vchiq message above. */ |
897 | vcos_assert(0); |
898 | goto error; |
899 | } |
900 | } |
901 | |
902 | return MMAL_SUCCESS; |
903 | |
904 | error: |
905 | return MMAL_EIO; |
906 | } |
907 | |
908 | MMAL_STATUS_T mmal_vc_use(void) |
909 | { |
910 | MMAL_STATUS_T status = MMAL_ENOTCONN; |
911 | if(client.inited) |
912 | status = mmal_vc_use_internal(&client); |
913 | return status; |
914 | } |
915 | |
916 | MMAL_STATUS_T mmal_vc_release(void) |
917 | { |
918 | MMAL_STATUS_T status = MMAL_ENOTCONN; |
919 | if(client.inited) |
920 | status = mmal_vc_release_internal(&client); |
921 | return status; |
922 | } |
923 | |
924 | MMAL_STATUS_T mmal_vc_init_fd(int dev_vchiq_fd) |
925 | { |
926 | VCHIQ_SERVICE_PARAMS_T vchiq_params; |
927 | MMAL_BOOL_T vchiq_initialised = 0, waitpool_initialised = 0; |
928 | MMAL_BOOL_T service_initialised = 0; |
929 | MMAL_STATUS_T status = MMAL_EIO; |
930 | VCHIQ_STATUS_T vchiq_status; |
931 | int count; |
932 | |
933 | vcos_once(&once, init_once); |
934 | |
935 | vcos_mutex_lock(&client.lock); |
936 | |
937 | count = client.refcount++; |
938 | if (count > 0) |
939 | { |
940 | /* Already initialised so nothing to do */ |
941 | vcos_mutex_unlock(&client.lock); |
942 | return MMAL_SUCCESS; |
943 | } |
944 | |
945 | vcos_log_register("mmalipc" , VCOS_LOG_CATEGORY); |
946 | |
947 | /* Initialise a VCHIQ instance */ |
948 | vchiq_status = vchiq_initialise_fd(&mmal_vchiq_instance, dev_vchiq_fd); |
949 | if (vchiq_status != VCHIQ_SUCCESS) |
950 | { |
951 | LOG_ERROR("failed to initialise vchiq" ); |
952 | status = MMAL_EIO; |
953 | goto error; |
954 | } |
955 | vchiq_initialised = 1; |
956 | |
957 | vchiq_status = vchiq_connect(mmal_vchiq_instance); |
958 | if (vchiq_status != VCHIQ_SUCCESS) |
959 | { |
960 | LOG_ERROR("failed to connect to vchiq" ); |
961 | status = MMAL_EIO; |
962 | goto error; |
963 | } |
964 | |
965 | memset(&vchiq_params,0,sizeof(vchiq_params)); |
966 | vchiq_params.fourcc = MMAL_CONTROL_FOURCC(); |
967 | vchiq_params.callback = mmal_vc_vchiq_callback; |
968 | vchiq_params.userdata = &client; |
969 | vchiq_params.version = WORKER_VER_MAJOR; |
970 | vchiq_params.version_min = WORKER_VER_MINIMUM; |
971 | |
972 | vchiq_status = vchiq_open_service(mmal_vchiq_instance, &vchiq_params, &client.service); |
973 | if (vchiq_status != VCHIQ_SUCCESS) |
974 | { |
975 | LOG_ERROR("could not open vchiq service" ); |
976 | status = MMAL_EIO; |
977 | goto error; |
978 | } |
979 | client.usecount = 1; /* usecount set to 1 by the open call. */ |
980 | service_initialised = 1; |
981 | |
982 | status = create_waitpool(&client.waitpool); |
983 | if (status != MMAL_SUCCESS) |
984 | { |
985 | LOG_ERROR("could not create wait pool" ); |
986 | goto error; |
987 | } |
988 | waitpool_initialised = 1; |
989 | |
990 | if (vcos_mutex_create(&client.bulk_lock, "mmal client bulk lock" ) != VCOS_SUCCESS) |
991 | { |
992 | LOG_ERROR("could not create bulk lock" ); |
993 | status = MMAL_ENOSPC; |
994 | goto error; |
995 | } |
996 | |
997 | client.inited = 1; |
998 | |
999 | vcos_mutex_unlock(&client.lock); |
1000 | /* assume we're not using VC immediately. Do this outside the lock */ |
1001 | mmal_vc_release(); |
1002 | |
1003 | |
1004 | return MMAL_SUCCESS; |
1005 | |
1006 | error: |
1007 | if (waitpool_initialised) |
1008 | destroy_waitpool(&client.waitpool); |
1009 | if (service_initialised) |
1010 | { |
1011 | client.usecount = 0; |
1012 | vchiq_close_service(client.service); |
1013 | } |
1014 | if (vchiq_initialised) |
1015 | vchiq_shutdown(mmal_vchiq_instance); |
1016 | vcos_log_unregister(VCOS_LOG_CATEGORY); |
1017 | client.refcount--; |
1018 | |
1019 | vcos_mutex_unlock(&client.lock); |
1020 | return status; |
1021 | } |
1022 | |
1023 | MMAL_STATUS_T mmal_vc_init(void) |
1024 | { |
1025 | return mmal_vc_init_fd(-1); |
1026 | } |
1027 | |
1028 | void mmal_vc_deinit(void) |
1029 | { |
1030 | int count; |
1031 | |
1032 | vcos_mutex_lock(&client.lock); |
1033 | count = --client.refcount; |
1034 | if (count != 0) |
1035 | { |
1036 | /* Still in use so don't do anything */ |
1037 | vcos_mutex_unlock(&client.lock); |
1038 | return; |
1039 | } |
1040 | |
1041 | vcos_mutex_delete(&client.bulk_lock); |
1042 | destroy_waitpool(&client.waitpool); |
1043 | vchiq_close_service(client.service); |
1044 | vchiq_shutdown(mmal_vchiq_instance); |
1045 | vcos_log_unregister(VCOS_LOG_CATEGORY); |
1046 | |
1047 | client.service = VCHIQ_SERVICE_HANDLE_INVALID; |
1048 | client.inited = 0; |
1049 | vcos_mutex_unlock(&client.lock); |
1050 | } |
1051 | |
1052 | MMAL_CLIENT_T *mmal_vc_get_client(void) |
1053 | { |
1054 | return &client; |
1055 | } |
1056 | |