1 | /* |
2 | * QEMU ETRAX DMA Controller. |
3 | * |
4 | * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | * of this software and associated documentation files (the "Software"), to deal |
8 | * in the Software without restriction, including without limitation the rights |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
10 | * copies of the Software, and to permit persons to whom the Software is |
11 | * furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
22 | * THE SOFTWARE. |
23 | */ |
24 | |
25 | #include "qemu/osdep.h" |
26 | #include "hw/hw.h" |
27 | #include "hw/irq.h" |
28 | #include "qemu/main-loop.h" |
29 | #include "sysemu/runstate.h" |
30 | #include "exec/address-spaces.h" |
31 | |
32 | #include "hw/cris/etraxfs_dma.h" |
33 | |
34 | #define D(x) |
35 | |
36 | #define RW_DATA (0x0 / 4) |
37 | #define RW_SAVED_DATA (0x58 / 4) |
38 | #define RW_SAVED_DATA_BUF (0x5c / 4) |
39 | #define RW_GROUP (0x60 / 4) |
40 | #define RW_GROUP_DOWN (0x7c / 4) |
41 | #define RW_CMD (0x80 / 4) |
42 | #define RW_CFG (0x84 / 4) |
43 | #define RW_STAT (0x88 / 4) |
44 | #define RW_INTR_MASK (0x8c / 4) |
45 | #define RW_ACK_INTR (0x90 / 4) |
46 | #define R_INTR (0x94 / 4) |
47 | #define R_MASKED_INTR (0x98 / 4) |
48 | #define RW_STREAM_CMD (0x9c / 4) |
49 | |
50 | #define DMA_REG_MAX (0x100 / 4) |
51 | |
52 | /* descriptors */ |
53 | |
54 | // ------------------------------------------------------------ dma_descr_group |
55 | typedef struct dma_descr_group { |
56 | uint32_t next; |
57 | unsigned eol : 1; |
58 | unsigned tol : 1; |
59 | unsigned bol : 1; |
60 | unsigned : 1; |
61 | unsigned intr : 1; |
62 | unsigned : 2; |
63 | unsigned en : 1; |
64 | unsigned : 7; |
65 | unsigned dis : 1; |
66 | unsigned md : 16; |
67 | struct dma_descr_group *up; |
68 | union { |
69 | struct dma_descr_context *context; |
70 | struct dma_descr_group *group; |
71 | } down; |
72 | } dma_descr_group; |
73 | |
74 | // ---------------------------------------------------------- dma_descr_context |
75 | typedef struct dma_descr_context { |
76 | uint32_t next; |
77 | unsigned eol : 1; |
78 | unsigned : 3; |
79 | unsigned intr : 1; |
80 | unsigned : 1; |
81 | unsigned store_mode : 1; |
82 | unsigned en : 1; |
83 | unsigned : 7; |
84 | unsigned dis : 1; |
85 | unsigned md0 : 16; |
86 | unsigned md1; |
87 | unsigned md2; |
88 | unsigned md3; |
89 | unsigned md4; |
90 | uint32_t saved_data; |
91 | uint32_t saved_data_buf; |
92 | } dma_descr_context; |
93 | |
94 | // ------------------------------------------------------------- dma_descr_data |
95 | typedef struct dma_descr_data { |
96 | uint32_t next; |
97 | uint32_t buf; |
98 | unsigned eol : 1; |
99 | unsigned : 2; |
100 | unsigned out_eop : 1; |
101 | unsigned intr : 1; |
102 | unsigned wait : 1; |
103 | unsigned : 2; |
104 | unsigned : 3; |
105 | unsigned in_eop : 1; |
106 | unsigned : 4; |
107 | unsigned md : 16; |
108 | uint32_t after; |
109 | } dma_descr_data; |
110 | |
111 | /* Constants */ |
112 | enum { |
113 | regk_dma_ack_pkt = 0x00000100, |
114 | regk_dma_anytime = 0x00000001, |
115 | regk_dma_array = 0x00000008, |
116 | regk_dma_burst = 0x00000020, |
117 | regk_dma_client = 0x00000002, |
118 | regk_dma_copy_next = 0x00000010, |
119 | regk_dma_copy_up = 0x00000020, |
120 | regk_dma_data_at_eol = 0x00000001, |
121 | regk_dma_dis_c = 0x00000010, |
122 | regk_dma_dis_g = 0x00000020, |
123 | regk_dma_idle = 0x00000001, |
124 | regk_dma_intern = 0x00000004, |
125 | regk_dma_load_c = 0x00000200, |
126 | regk_dma_load_c_n = 0x00000280, |
127 | regk_dma_load_c_next = 0x00000240, |
128 | regk_dma_load_d = 0x00000140, |
129 | regk_dma_load_g = 0x00000300, |
130 | regk_dma_load_g_down = 0x000003c0, |
131 | regk_dma_load_g_next = 0x00000340, |
132 | regk_dma_load_g_up = 0x00000380, |
133 | regk_dma_next_en = 0x00000010, |
134 | regk_dma_next_pkt = 0x00000010, |
135 | regk_dma_no = 0x00000000, |
136 | regk_dma_only_at_wait = 0x00000000, |
137 | regk_dma_restore = 0x00000020, |
138 | regk_dma_rst = 0x00000001, |
139 | regk_dma_running = 0x00000004, |
140 | regk_dma_rw_cfg_default = 0x00000000, |
141 | regk_dma_rw_cmd_default = 0x00000000, |
142 | regk_dma_rw_intr_mask_default = 0x00000000, |
143 | regk_dma_rw_stat_default = 0x00000101, |
144 | regk_dma_rw_stream_cmd_default = 0x00000000, |
145 | regk_dma_save_down = 0x00000020, |
146 | regk_dma_save_up = 0x00000020, |
147 | regk_dma_set_reg = 0x00000050, |
148 | regk_dma_set_w_size1 = 0x00000190, |
149 | regk_dma_set_w_size2 = 0x000001a0, |
150 | regk_dma_set_w_size4 = 0x000001c0, |
151 | regk_dma_stopped = 0x00000002, |
152 | regk_dma_store_c = 0x00000002, |
153 | regk_dma_store_descr = 0x00000000, |
154 | regk_dma_store_g = 0x00000004, |
155 | regk_dma_store_md = 0x00000001, |
156 | regk_dma_sw = 0x00000008, |
157 | regk_dma_update_down = 0x00000020, |
158 | regk_dma_yes = 0x00000001 |
159 | }; |
160 | |
161 | enum dma_ch_state |
162 | { |
163 | RST = 1, |
164 | STOPPED = 2, |
165 | RUNNING = 4 |
166 | }; |
167 | |
168 | struct fs_dma_channel |
169 | { |
170 | qemu_irq irq; |
171 | struct etraxfs_dma_client *client; |
172 | |
173 | /* Internal status. */ |
174 | int stream_cmd_src; |
175 | enum dma_ch_state state; |
176 | |
177 | unsigned int input : 1; |
178 | unsigned int eol : 1; |
179 | |
180 | struct dma_descr_group current_g; |
181 | struct dma_descr_context current_c; |
182 | struct dma_descr_data current_d; |
183 | |
184 | /* Control registers. */ |
185 | uint32_t regs[DMA_REG_MAX]; |
186 | }; |
187 | |
188 | struct fs_dma_ctrl |
189 | { |
190 | MemoryRegion mmio; |
191 | int nr_channels; |
192 | struct fs_dma_channel *channels; |
193 | |
194 | QEMUBH *bh; |
195 | }; |
196 | |
197 | static void DMA_run(void *opaque); |
198 | static int channel_out_run(struct fs_dma_ctrl *ctrl, int c); |
199 | |
200 | static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg) |
201 | { |
202 | return ctrl->channels[c].regs[reg]; |
203 | } |
204 | |
205 | static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c) |
206 | { |
207 | return channel_reg(ctrl, c, RW_CFG) & 2; |
208 | } |
209 | |
210 | static inline int channel_en(struct fs_dma_ctrl *ctrl, int c) |
211 | { |
212 | return (channel_reg(ctrl, c, RW_CFG) & 1) |
213 | && ctrl->channels[c].client; |
214 | } |
215 | |
216 | static inline int fs_channel(hwaddr addr) |
217 | { |
218 | /* Every channel has a 0x2000 ctrl register map. */ |
219 | return addr >> 13; |
220 | } |
221 | |
222 | #ifdef USE_THIS_DEAD_CODE |
223 | static void channel_load_g(struct fs_dma_ctrl *ctrl, int c) |
224 | { |
225 | hwaddr addr = channel_reg(ctrl, c, RW_GROUP); |
226 | |
227 | /* Load and decode. FIXME: handle endianness. */ |
228 | cpu_physical_memory_read (addr, |
229 | (void *) &ctrl->channels[c].current_g, |
230 | sizeof ctrl->channels[c].current_g); |
231 | } |
232 | |
233 | static void dump_c(int ch, struct dma_descr_context *c) |
234 | { |
235 | printf("%s ch=%d\n" , __func__, ch); |
236 | printf("next=%x\n" , c->next); |
237 | printf("saved_data=%x\n" , c->saved_data); |
238 | printf("saved_data_buf=%x\n" , c->saved_data_buf); |
239 | printf("eol=%x\n" , (uint32_t) c->eol); |
240 | } |
241 | |
242 | static void dump_d(int ch, struct dma_descr_data *d) |
243 | { |
244 | printf("%s ch=%d\n" , __func__, ch); |
245 | printf("next=%x\n" , d->next); |
246 | printf("buf=%x\n" , d->buf); |
247 | printf("after=%x\n" , d->after); |
248 | printf("intr=%x\n" , (uint32_t) d->intr); |
249 | printf("out_eop=%x\n" , (uint32_t) d->out_eop); |
250 | printf("in_eop=%x\n" , (uint32_t) d->in_eop); |
251 | printf("eol=%x\n" , (uint32_t) d->eol); |
252 | } |
253 | #endif |
254 | |
255 | static void channel_load_c(struct fs_dma_ctrl *ctrl, int c) |
256 | { |
257 | hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN); |
258 | |
259 | /* Load and decode. FIXME: handle endianness. */ |
260 | cpu_physical_memory_read (addr, |
261 | (void *) &ctrl->channels[c].current_c, |
262 | sizeof ctrl->channels[c].current_c); |
263 | |
264 | D(dump_c(c, &ctrl->channels[c].current_c)); |
265 | /* I guess this should update the current pos. */ |
266 | ctrl->channels[c].regs[RW_SAVED_DATA] = |
267 | (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data; |
268 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = |
269 | (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf; |
270 | } |
271 | |
272 | static void channel_load_d(struct fs_dma_ctrl *ctrl, int c) |
273 | { |
274 | hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA); |
275 | |
276 | /* Load and decode. FIXME: handle endianness. */ |
277 | D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n" , __func__, c, addr)); |
278 | cpu_physical_memory_read (addr, |
279 | (void *) &ctrl->channels[c].current_d, |
280 | sizeof ctrl->channels[c].current_d); |
281 | |
282 | D(dump_d(c, &ctrl->channels[c].current_d)); |
283 | ctrl->channels[c].regs[RW_DATA] = addr; |
284 | } |
285 | |
286 | static void channel_store_c(struct fs_dma_ctrl *ctrl, int c) |
287 | { |
288 | hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN); |
289 | |
290 | /* Encode and store. FIXME: handle endianness. */ |
291 | D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n" , __func__, c, addr)); |
292 | D(dump_d(c, &ctrl->channels[c].current_d)); |
293 | cpu_physical_memory_write (addr, |
294 | (void *) &ctrl->channels[c].current_c, |
295 | sizeof ctrl->channels[c].current_c); |
296 | } |
297 | |
298 | static void channel_store_d(struct fs_dma_ctrl *ctrl, int c) |
299 | { |
300 | hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA); |
301 | |
302 | /* Encode and store. FIXME: handle endianness. */ |
303 | D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n" , __func__, c, addr)); |
304 | cpu_physical_memory_write (addr, |
305 | (void *) &ctrl->channels[c].current_d, |
306 | sizeof ctrl->channels[c].current_d); |
307 | } |
308 | |
309 | static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c) |
310 | { |
311 | /* FIXME: */ |
312 | } |
313 | |
314 | static inline void channel_start(struct fs_dma_ctrl *ctrl, int c) |
315 | { |
316 | if (ctrl->channels[c].client) |
317 | { |
318 | ctrl->channels[c].eol = 0; |
319 | ctrl->channels[c].state = RUNNING; |
320 | if (!ctrl->channels[c].input) |
321 | channel_out_run(ctrl, c); |
322 | } else |
323 | printf("WARNING: starting DMA ch %d with no client\n" , c); |
324 | |
325 | qemu_bh_schedule_idle(ctrl->bh); |
326 | } |
327 | |
328 | static void channel_continue(struct fs_dma_ctrl *ctrl, int c) |
329 | { |
330 | if (!channel_en(ctrl, c) |
331 | || channel_stopped(ctrl, c) |
332 | || ctrl->channels[c].state != RUNNING |
333 | /* Only reload the current data descriptor if it has eol set. */ |
334 | || !ctrl->channels[c].current_d.eol) { |
335 | D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n" , |
336 | c, ctrl->channels[c].state, |
337 | channel_stopped(ctrl, c), |
338 | channel_en(ctrl,c), |
339 | ctrl->channels[c].eol)); |
340 | D(dump_d(c, &ctrl->channels[c].current_d)); |
341 | return; |
342 | } |
343 | |
344 | /* Reload the current descriptor. */ |
345 | channel_load_d(ctrl, c); |
346 | |
347 | /* If the current descriptor cleared the eol flag and we had already |
348 | reached eol state, do the continue. */ |
349 | if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) { |
350 | D(printf("continue %d ok %x\n" , c, |
351 | ctrl->channels[c].current_d.next)); |
352 | ctrl->channels[c].regs[RW_SAVED_DATA] = |
353 | (uint32_t)(unsigned long)ctrl->channels[c].current_d.next; |
354 | channel_load_d(ctrl, c); |
355 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = |
356 | (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; |
357 | |
358 | channel_start(ctrl, c); |
359 | } |
360 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = |
361 | (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; |
362 | } |
363 | |
364 | static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v) |
365 | { |
366 | unsigned int cmd = v & ((1 << 10) - 1); |
367 | |
368 | D(printf("%s ch=%d cmd=%x\n" , |
369 | __func__, c, cmd)); |
370 | if (cmd & regk_dma_load_d) { |
371 | channel_load_d(ctrl, c); |
372 | if (cmd & regk_dma_burst) |
373 | channel_start(ctrl, c); |
374 | } |
375 | |
376 | if (cmd & regk_dma_load_c) { |
377 | channel_load_c(ctrl, c); |
378 | } |
379 | } |
380 | |
381 | static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c) |
382 | { |
383 | D(printf("%s %d\n" , __func__, c)); |
384 | ctrl->channels[c].regs[R_INTR] &= |
385 | ~(ctrl->channels[c].regs[RW_ACK_INTR]); |
386 | |
387 | ctrl->channels[c].regs[R_MASKED_INTR] = |
388 | ctrl->channels[c].regs[R_INTR] |
389 | & ctrl->channels[c].regs[RW_INTR_MASK]; |
390 | |
391 | D(printf("%s: chan=%d masked_intr=%x\n" , __func__, |
392 | c, |
393 | ctrl->channels[c].regs[R_MASKED_INTR])); |
394 | |
395 | qemu_set_irq(ctrl->channels[c].irq, |
396 | !!ctrl->channels[c].regs[R_MASKED_INTR]); |
397 | } |
398 | |
399 | static int channel_out_run(struct fs_dma_ctrl *ctrl, int c) |
400 | { |
401 | uint32_t len; |
402 | uint32_t saved_data_buf; |
403 | unsigned char buf[2 * 1024]; |
404 | |
405 | struct dma_context_metadata meta; |
406 | bool send_context = true; |
407 | |
408 | if (ctrl->channels[c].eol) |
409 | return 0; |
410 | |
411 | do { |
412 | bool out_eop; |
413 | D(printf("ch=%d buf=%x after=%x\n" , |
414 | c, |
415 | (uint32_t)ctrl->channels[c].current_d.buf, |
416 | (uint32_t)ctrl->channels[c].current_d.after)); |
417 | |
418 | if (send_context) { |
419 | if (ctrl->channels[c].client->client.metadata_push) { |
420 | meta.metadata = ctrl->channels[c].current_d.md; |
421 | ctrl->channels[c].client->client.metadata_push( |
422 | ctrl->channels[c].client->client.opaque, |
423 | &meta); |
424 | } |
425 | send_context = false; |
426 | } |
427 | |
428 | channel_load_d(ctrl, c); |
429 | saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); |
430 | len = (uint32_t)(unsigned long) |
431 | ctrl->channels[c].current_d.after; |
432 | len -= saved_data_buf; |
433 | |
434 | if (len > sizeof buf) |
435 | len = sizeof buf; |
436 | cpu_physical_memory_read (saved_data_buf, buf, len); |
437 | |
438 | out_eop = ((saved_data_buf + len) == |
439 | ctrl->channels[c].current_d.after) && |
440 | ctrl->channels[c].current_d.out_eop; |
441 | |
442 | D(printf("channel %d pushes %x %u bytes eop=%u\n" , c, |
443 | saved_data_buf, len, out_eop)); |
444 | |
445 | if (ctrl->channels[c].client->client.push) { |
446 | if (len > 0) { |
447 | ctrl->channels[c].client->client.push( |
448 | ctrl->channels[c].client->client.opaque, |
449 | buf, len, out_eop); |
450 | } |
451 | } else { |
452 | printf("WARNING: DMA ch%d dataloss," |
453 | " no attached client.\n" , c); |
454 | } |
455 | |
456 | saved_data_buf += len; |
457 | |
458 | if (saved_data_buf == (uint32_t)(unsigned long) |
459 | ctrl->channels[c].current_d.after) { |
460 | /* Done. Step to next. */ |
461 | if (ctrl->channels[c].current_d.out_eop) { |
462 | send_context = true; |
463 | } |
464 | if (ctrl->channels[c].current_d.intr) { |
465 | /* data intr. */ |
466 | D(printf("signal intr %d eol=%d\n" , |
467 | len, ctrl->channels[c].current_d.eol)); |
468 | ctrl->channels[c].regs[R_INTR] |= (1 << 2); |
469 | channel_update_irq(ctrl, c); |
470 | } |
471 | channel_store_d(ctrl, c); |
472 | if (ctrl->channels[c].current_d.eol) { |
473 | D(printf("channel %d EOL\n" , c)); |
474 | ctrl->channels[c].eol = 1; |
475 | |
476 | /* Mark the context as disabled. */ |
477 | ctrl->channels[c].current_c.dis = 1; |
478 | channel_store_c(ctrl, c); |
479 | |
480 | channel_stop(ctrl, c); |
481 | } else { |
482 | ctrl->channels[c].regs[RW_SAVED_DATA] = |
483 | (uint32_t)(unsigned long)ctrl-> |
484 | channels[c].current_d.next; |
485 | /* Load new descriptor. */ |
486 | channel_load_d(ctrl, c); |
487 | saved_data_buf = (uint32_t)(unsigned long) |
488 | ctrl->channels[c].current_d.buf; |
489 | } |
490 | |
491 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = |
492 | saved_data_buf; |
493 | D(dump_d(c, &ctrl->channels[c].current_d)); |
494 | } |
495 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; |
496 | } while (!ctrl->channels[c].eol); |
497 | return 1; |
498 | } |
499 | |
500 | static int channel_in_process(struct fs_dma_ctrl *ctrl, int c, |
501 | unsigned char *buf, int buflen, int eop) |
502 | { |
503 | uint32_t len; |
504 | uint32_t saved_data_buf; |
505 | |
506 | if (ctrl->channels[c].eol == 1) |
507 | return 0; |
508 | |
509 | channel_load_d(ctrl, c); |
510 | saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); |
511 | len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after; |
512 | len -= saved_data_buf; |
513 | |
514 | if (len > buflen) |
515 | len = buflen; |
516 | |
517 | cpu_physical_memory_write (saved_data_buf, buf, len); |
518 | saved_data_buf += len; |
519 | |
520 | if (saved_data_buf == |
521 | (uint32_t)(unsigned long)ctrl->channels[c].current_d.after |
522 | || eop) { |
523 | uint32_t r_intr = ctrl->channels[c].regs[R_INTR]; |
524 | |
525 | D(printf("in dscr end len=%d\n" , |
526 | ctrl->channels[c].current_d.after |
527 | - ctrl->channels[c].current_d.buf)); |
528 | ctrl->channels[c].current_d.after = saved_data_buf; |
529 | |
530 | /* Done. Step to next. */ |
531 | if (ctrl->channels[c].current_d.intr) { |
532 | /* TODO: signal eop to the client. */ |
533 | /* data intr. */ |
534 | ctrl->channels[c].regs[R_INTR] |= 3; |
535 | } |
536 | if (eop) { |
537 | ctrl->channels[c].current_d.in_eop = 1; |
538 | ctrl->channels[c].regs[R_INTR] |= 8; |
539 | } |
540 | if (r_intr != ctrl->channels[c].regs[R_INTR]) |
541 | channel_update_irq(ctrl, c); |
542 | |
543 | channel_store_d(ctrl, c); |
544 | D(dump_d(c, &ctrl->channels[c].current_d)); |
545 | |
546 | if (ctrl->channels[c].current_d.eol) { |
547 | D(printf("channel %d EOL\n" , c)); |
548 | ctrl->channels[c].eol = 1; |
549 | |
550 | /* Mark the context as disabled. */ |
551 | ctrl->channels[c].current_c.dis = 1; |
552 | channel_store_c(ctrl, c); |
553 | |
554 | channel_stop(ctrl, c); |
555 | } else { |
556 | ctrl->channels[c].regs[RW_SAVED_DATA] = |
557 | (uint32_t)(unsigned long)ctrl-> |
558 | channels[c].current_d.next; |
559 | /* Load new descriptor. */ |
560 | channel_load_d(ctrl, c); |
561 | saved_data_buf = (uint32_t)(unsigned long) |
562 | ctrl->channels[c].current_d.buf; |
563 | } |
564 | } |
565 | |
566 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; |
567 | return len; |
568 | } |
569 | |
570 | static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c) |
571 | { |
572 | if (ctrl->channels[c].client->client.pull) { |
573 | ctrl->channels[c].client->client.pull( |
574 | ctrl->channels[c].client->client.opaque); |
575 | return 1; |
576 | } else |
577 | return 0; |
578 | } |
579 | |
580 | static uint32_t dma_rinvalid (void *opaque, hwaddr addr) |
581 | { |
582 | hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n" , addr); |
583 | return 0; |
584 | } |
585 | |
586 | static uint64_t |
587 | dma_read(void *opaque, hwaddr addr, unsigned int size) |
588 | { |
589 | struct fs_dma_ctrl *ctrl = opaque; |
590 | int c; |
591 | uint32_t r = 0; |
592 | |
593 | if (size != 4) { |
594 | dma_rinvalid(opaque, addr); |
595 | } |
596 | |
597 | /* Make addr relative to this channel and bounded to nr regs. */ |
598 | c = fs_channel(addr); |
599 | addr &= 0xff; |
600 | addr >>= 2; |
601 | switch (addr) |
602 | { |
603 | case RW_STAT: |
604 | r = ctrl->channels[c].state & 7; |
605 | r |= ctrl->channels[c].eol << 5; |
606 | r |= ctrl->channels[c].stream_cmd_src << 8; |
607 | break; |
608 | |
609 | default: |
610 | r = ctrl->channels[c].regs[addr]; |
611 | D(printf ("%s c=%d addr=" TARGET_FMT_plx "\n" , |
612 | __func__, c, addr)); |
613 | break; |
614 | } |
615 | return r; |
616 | } |
617 | |
618 | static void |
619 | dma_winvalid (void *opaque, hwaddr addr, uint32_t value) |
620 | { |
621 | hw_error("Unsupported short waccess. reg=" TARGET_FMT_plx "\n" , addr); |
622 | } |
623 | |
624 | static void |
625 | dma_update_state(struct fs_dma_ctrl *ctrl, int c) |
626 | { |
627 | if (ctrl->channels[c].regs[RW_CFG] & 2) |
628 | ctrl->channels[c].state = STOPPED; |
629 | if (!(ctrl->channels[c].regs[RW_CFG] & 1)) |
630 | ctrl->channels[c].state = RST; |
631 | } |
632 | |
633 | static void |
634 | dma_write(void *opaque, hwaddr addr, |
635 | uint64_t val64, unsigned int size) |
636 | { |
637 | struct fs_dma_ctrl *ctrl = opaque; |
638 | uint32_t value = val64; |
639 | int c; |
640 | |
641 | if (size != 4) { |
642 | dma_winvalid(opaque, addr, value); |
643 | } |
644 | |
645 | /* Make addr relative to this channel and bounded to nr regs. */ |
646 | c = fs_channel(addr); |
647 | addr &= 0xff; |
648 | addr >>= 2; |
649 | switch (addr) |
650 | { |
651 | case RW_DATA: |
652 | ctrl->channels[c].regs[addr] = value; |
653 | break; |
654 | |
655 | case RW_CFG: |
656 | ctrl->channels[c].regs[addr] = value; |
657 | dma_update_state(ctrl, c); |
658 | break; |
659 | case RW_CMD: |
660 | /* continue. */ |
661 | if (value & ~1) |
662 | printf("Invalid store to ch=%d RW_CMD %x\n" , |
663 | c, value); |
664 | ctrl->channels[c].regs[addr] = value; |
665 | channel_continue(ctrl, c); |
666 | break; |
667 | |
668 | case RW_SAVED_DATA: |
669 | case RW_SAVED_DATA_BUF: |
670 | case RW_GROUP: |
671 | case RW_GROUP_DOWN: |
672 | ctrl->channels[c].regs[addr] = value; |
673 | break; |
674 | |
675 | case RW_ACK_INTR: |
676 | case RW_INTR_MASK: |
677 | ctrl->channels[c].regs[addr] = value; |
678 | channel_update_irq(ctrl, c); |
679 | if (addr == RW_ACK_INTR) |
680 | ctrl->channels[c].regs[RW_ACK_INTR] = 0; |
681 | break; |
682 | |
683 | case RW_STREAM_CMD: |
684 | if (value & ~1023) |
685 | printf("Invalid store to ch=%d " |
686 | "RW_STREAMCMD %x\n" , |
687 | c, value); |
688 | ctrl->channels[c].regs[addr] = value; |
689 | D(printf("stream_cmd ch=%d\n" , c)); |
690 | channel_stream_cmd(ctrl, c, value); |
691 | break; |
692 | |
693 | default: |
694 | D(printf ("%s c=%d " TARGET_FMT_plx "\n" , |
695 | __func__, c, addr)); |
696 | break; |
697 | } |
698 | } |
699 | |
700 | static const MemoryRegionOps dma_ops = { |
701 | .read = dma_read, |
702 | .write = dma_write, |
703 | .endianness = DEVICE_NATIVE_ENDIAN, |
704 | .valid = { |
705 | .min_access_size = 1, |
706 | .max_access_size = 4 |
707 | } |
708 | }; |
709 | |
710 | static int etraxfs_dmac_run(void *opaque) |
711 | { |
712 | struct fs_dma_ctrl *ctrl = opaque; |
713 | int i; |
714 | int p = 0; |
715 | |
716 | for (i = 0; |
717 | i < ctrl->nr_channels; |
718 | i++) |
719 | { |
720 | if (ctrl->channels[i].state == RUNNING) |
721 | { |
722 | if (ctrl->channels[i].input) { |
723 | p += channel_in_run(ctrl, i); |
724 | } else { |
725 | p += channel_out_run(ctrl, i); |
726 | } |
727 | } |
728 | } |
729 | return p; |
730 | } |
731 | |
732 | int etraxfs_dmac_input(struct etraxfs_dma_client *client, |
733 | void *buf, int len, int eop) |
734 | { |
735 | return channel_in_process(client->ctrl, client->channel, |
736 | buf, len, eop); |
737 | } |
738 | |
739 | /* Connect an IRQ line with a channel. */ |
740 | void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input) |
741 | { |
742 | struct fs_dma_ctrl *ctrl = opaque; |
743 | ctrl->channels[c].irq = *line; |
744 | ctrl->channels[c].input = input; |
745 | } |
746 | |
747 | void etraxfs_dmac_connect_client(void *opaque, int c, |
748 | struct etraxfs_dma_client *cl) |
749 | { |
750 | struct fs_dma_ctrl *ctrl = opaque; |
751 | cl->ctrl = ctrl; |
752 | cl->channel = c; |
753 | ctrl->channels[c].client = cl; |
754 | } |
755 | |
756 | |
757 | static void DMA_run(void *opaque) |
758 | { |
759 | struct fs_dma_ctrl *etraxfs_dmac = opaque; |
760 | int p = 1; |
761 | |
762 | if (runstate_is_running()) |
763 | p = etraxfs_dmac_run(etraxfs_dmac); |
764 | |
765 | if (p) |
766 | qemu_bh_schedule_idle(etraxfs_dmac->bh); |
767 | } |
768 | |
769 | void *etraxfs_dmac_init(hwaddr base, int nr_channels) |
770 | { |
771 | struct fs_dma_ctrl *ctrl = NULL; |
772 | |
773 | ctrl = g_malloc0(sizeof *ctrl); |
774 | |
775 | ctrl->bh = qemu_bh_new(DMA_run, ctrl); |
776 | |
777 | ctrl->nr_channels = nr_channels; |
778 | ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels); |
779 | |
780 | memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma" , |
781 | nr_channels * 0x2000); |
782 | memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio); |
783 | |
784 | return ctrl; |
785 | } |
786 | |