1 | /* |
2 | Copyright (c) 2012, Broadcom Europe Ltd |
3 | All rights reserved. |
4 | |
5 | Redistribution and use in source and binary forms, with or without |
6 | modification, are permitted provided that the following conditions are met: |
7 | * Redistributions of source code must retain the above copyright |
8 | notice, this list of conditions and the following disclaimer. |
9 | * Redistributions in binary form must reproduce the above copyright |
10 | notice, this list of conditions and the following disclaimer in the |
11 | documentation and/or other materials provided with the distribution. |
12 | * Neither the name of the copyright holder nor the |
13 | names of its contributors may be used to endorse or promote products |
14 | derived from this software without specific prior written permission. |
15 | |
16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND |
17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY |
20 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
23 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | */ |
27 | |
28 | #include "mmal.h" |
29 | #include "mmal_queue.h" |
30 | |
31 | /** Definition of the QUEUE */ |
32 | struct MMAL_QUEUE_T |
33 | { |
34 | VCOS_MUTEX_T lock; |
35 | unsigned int length; |
36 | MMAL_BUFFER_HEADER_T *first; |
37 | MMAL_BUFFER_HEADER_T **last; |
38 | VCOS_SEMAPHORE_T semaphore; |
39 | }; |
40 | |
41 | // Only sanity check if asserts are enabled |
42 | #if VCOS_ASSERT_ENABLED |
43 | static void mmal_queue_sanity_check(MMAL_QUEUE_T *queue, MMAL_BUFFER_HEADER_T *buffer) |
44 | { |
45 | MMAL_BUFFER_HEADER_T *q; |
46 | int len = 0; |
47 | for (q = queue->first; q && len<queue->length; q = q->next) |
48 | { |
49 | vcos_assert(buffer != q); |
50 | len++; |
51 | } |
52 | vcos_assert(len == queue->length && !q); |
53 | } |
54 | #else |
55 | #define mmal_queue_sanity_check(q,b) |
56 | #endif |
57 | |
58 | /** Create a QUEUE of MMAL_BUFFER_HEADER_T */ |
59 | MMAL_QUEUE_T *mmal_queue_create(void) |
60 | { |
61 | MMAL_QUEUE_T *queue; |
62 | |
63 | queue = vcos_malloc(sizeof(*queue), "MMAL queue" ); |
64 | if(!queue) return 0; |
65 | |
66 | if(vcos_mutex_create(&queue->lock, "MMAL queue lock" ) != VCOS_SUCCESS ) |
67 | { |
68 | vcos_free(queue); |
69 | return 0; |
70 | } |
71 | |
72 | if(vcos_semaphore_create(&queue->semaphore, "MMAL queue sema" , 0) != VCOS_SUCCESS ) |
73 | { |
74 | vcos_mutex_delete(&queue->lock); |
75 | vcos_free(queue); |
76 | return 0; |
77 | } |
78 | |
79 | /* gratuitous lock for coverity */ vcos_mutex_lock(&queue->lock); |
80 | queue->length = 0; |
81 | queue->first = 0; |
82 | queue->last = &queue->first; |
83 | mmal_queue_sanity_check(queue, NULL); |
84 | /* gratuitous unlock for coverity */ vcos_mutex_unlock(&queue->lock); |
85 | |
86 | return queue; |
87 | } |
88 | |
89 | /** Put a MMAL_BUFFER_HEADER_T into a QUEUE */ |
90 | void mmal_queue_put(MMAL_QUEUE_T *queue, MMAL_BUFFER_HEADER_T *buffer) |
91 | { |
92 | vcos_assert(queue && buffer); |
93 | if(!queue || !buffer) return; |
94 | |
95 | vcos_mutex_lock(&queue->lock); |
96 | mmal_queue_sanity_check(queue, buffer); |
97 | queue->length++; |
98 | *queue->last = buffer; |
99 | buffer->next = 0; |
100 | queue->last = &buffer->next; |
101 | // There is a possible advantage to putting the semaphore_post outside |
102 | // the lock as that would avoid the case where we set the semaphore, causing |
103 | // a task switch to a waiting get thread which then blocks because we still |
104 | // have the lock. However this allows a race condition if we have (legit) code |
105 | // of the (simplified) form: |
106 | // if (mmal_queue_length(q) > 0) b = mmal_queue_get(q); |
107 | // where the _get should always succeed |
108 | // This has an easy fix if we have a fn that returns the count in a semaphore |
109 | // but by not all OSs support that (e.g. Win32) |
110 | vcos_semaphore_post(&queue->semaphore); |
111 | vcos_mutex_unlock(&queue->lock); |
112 | } |
113 | |
114 | /** Put a MMAL_BUFFER_HEADER_T back at the start of a QUEUE. */ |
115 | void mmal_queue_put_back(MMAL_QUEUE_T *queue, MMAL_BUFFER_HEADER_T *buffer) |
116 | { |
117 | if(!queue || !buffer) return; |
118 | |
119 | vcos_mutex_lock(&queue->lock); |
120 | mmal_queue_sanity_check(queue, buffer); |
121 | queue->length++; |
122 | buffer->next = queue->first; |
123 | queue->first = buffer; |
124 | if(queue->last == &queue->first) queue->last = &buffer->next; |
125 | vcos_semaphore_post(&queue->semaphore); |
126 | vcos_mutex_unlock(&queue->lock); |
127 | } |
128 | |
129 | |
130 | /** Get a MMAL_BUFFER_HEADER_T from a QUEUE. Semaphore already claimed */ |
131 | static MMAL_BUFFER_HEADER_T *mmal_queue_get_core(MMAL_QUEUE_T *queue) |
132 | { |
133 | MMAL_BUFFER_HEADER_T * buffer; |
134 | |
135 | vcos_mutex_lock(&queue->lock); |
136 | mmal_queue_sanity_check(queue, NULL); |
137 | buffer = queue->first; |
138 | vcos_assert(buffer != NULL); |
139 | |
140 | queue->first = buffer->next; |
141 | if(!queue->first) queue->last = &queue->first; |
142 | |
143 | queue->length--; |
144 | vcos_mutex_unlock(&queue->lock); |
145 | |
146 | return buffer; |
147 | } |
148 | |
149 | /** Get a MMAL_BUFFER_HEADER_T from a QUEUE. */ |
150 | MMAL_BUFFER_HEADER_T *mmal_queue_get(MMAL_QUEUE_T *queue) |
151 | { |
152 | vcos_assert(queue); |
153 | if(!queue) return 0; |
154 | |
155 | if(vcos_semaphore_trywait(&queue->semaphore) != VCOS_SUCCESS) |
156 | return NULL; |
157 | |
158 | return mmal_queue_get_core(queue); |
159 | } |
160 | |
161 | /** Wait for a MMAL_BUFFER_HEADER_T from a QUEUE. */ |
162 | MMAL_BUFFER_HEADER_T *mmal_queue_wait(MMAL_QUEUE_T *queue) |
163 | { |
164 | if(!queue) return 0; |
165 | |
166 | if (vcos_semaphore_wait(&queue->semaphore) != VCOS_SUCCESS) |
167 | return NULL; |
168 | |
169 | return mmal_queue_get_core(queue); |
170 | } |
171 | |
172 | MMAL_BUFFER_HEADER_T *mmal_queue_timedwait(MMAL_QUEUE_T *queue, VCOS_UNSIGNED timeout) |
173 | { |
174 | if (!queue) |
175 | return NULL; |
176 | |
177 | if (vcos_semaphore_wait_timeout(&queue->semaphore, timeout) != VCOS_SUCCESS) |
178 | return NULL; |
179 | |
180 | return mmal_queue_get_core(queue); |
181 | } |
182 | |
183 | /** Get the number of MMAL_BUFFER_HEADER_T currently in a QUEUE */ |
184 | unsigned int mmal_queue_length(MMAL_QUEUE_T *queue) |
185 | { |
186 | if(!queue) return 0; |
187 | |
188 | return queue->length; |
189 | } |
190 | |
191 | /** Destroy a queue of MMAL_BUFFER_HEADER_T */ |
192 | void mmal_queue_destroy(MMAL_QUEUE_T *queue) |
193 | { |
194 | if(!queue) return; |
195 | vcos_mutex_delete(&queue->lock); |
196 | vcos_semaphore_delete(&queue->semaphore); |
197 | vcos_free(queue); |
198 | } |
199 | |