1/*
2 * i.MX Fast Ethernet Controller emulation.
3 *
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
5 *
6 * Based on Coldfire Fast Ethernet Controller emulation.
7 *
8 * Copyright (c) 2007 CodeSourcery.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include "qemu/osdep.h"
25#include "hw/irq.h"
26#include "hw/net/imx_fec.h"
27#include "hw/qdev-properties.h"
28#include "migration/vmstate.h"
29#include "sysemu/dma.h"
30#include "qemu/log.h"
31#include "qemu/module.h"
32#include "net/checksum.h"
33#include "net/eth.h"
34
35/* For crc32 */
36#include <zlib.h>
37
38#ifndef DEBUG_IMX_FEC
39#define DEBUG_IMX_FEC 0
40#endif
41
42#define FEC_PRINTF(fmt, args...) \
43 do { \
44 if (DEBUG_IMX_FEC) { \
45 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \
46 __func__, ##args); \
47 } \
48 } while (0)
49
50#ifndef DEBUG_IMX_PHY
51#define DEBUG_IMX_PHY 0
52#endif
53
54#define PHY_PRINTF(fmt, args...) \
55 do { \
56 if (DEBUG_IMX_PHY) { \
57 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \
58 __func__, ##args); \
59 } \
60 } while (0)
61
62#define IMX_MAX_DESC 1024
63
64static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
65{
66 static char tmp[20];
67 sprintf(tmp, "index %d", index);
68 return tmp;
69}
70
71static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
72{
73 switch (index) {
74 case ENET_FRBR:
75 return "FRBR";
76 case ENET_FRSR:
77 return "FRSR";
78 case ENET_MIIGSK_CFGR:
79 return "MIIGSK_CFGR";
80 case ENET_MIIGSK_ENR:
81 return "MIIGSK_ENR";
82 default:
83 return imx_default_reg_name(s, index);
84 }
85}
86
87static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
88{
89 switch (index) {
90 case ENET_RSFL:
91 return "RSFL";
92 case ENET_RSEM:
93 return "RSEM";
94 case ENET_RAEM:
95 return "RAEM";
96 case ENET_RAFL:
97 return "RAFL";
98 case ENET_TSEM:
99 return "TSEM";
100 case ENET_TAEM:
101 return "TAEM";
102 case ENET_TAFL:
103 return "TAFL";
104 case ENET_TIPG:
105 return "TIPG";
106 case ENET_FTRL:
107 return "FTRL";
108 case ENET_TACC:
109 return "TACC";
110 case ENET_RACC:
111 return "RACC";
112 case ENET_ATCR:
113 return "ATCR";
114 case ENET_ATVR:
115 return "ATVR";
116 case ENET_ATOFF:
117 return "ATOFF";
118 case ENET_ATPER:
119 return "ATPER";
120 case ENET_ATCOR:
121 return "ATCOR";
122 case ENET_ATINC:
123 return "ATINC";
124 case ENET_ATSTMP:
125 return "ATSTMP";
126 case ENET_TGSR:
127 return "TGSR";
128 case ENET_TCSR0:
129 return "TCSR0";
130 case ENET_TCCR0:
131 return "TCCR0";
132 case ENET_TCSR1:
133 return "TCSR1";
134 case ENET_TCCR1:
135 return "TCCR1";
136 case ENET_TCSR2:
137 return "TCSR2";
138 case ENET_TCCR2:
139 return "TCCR2";
140 case ENET_TCSR3:
141 return "TCSR3";
142 case ENET_TCCR3:
143 return "TCCR3";
144 default:
145 return imx_default_reg_name(s, index);
146 }
147}
148
149static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
150{
151 switch (index) {
152 case ENET_EIR:
153 return "EIR";
154 case ENET_EIMR:
155 return "EIMR";
156 case ENET_RDAR:
157 return "RDAR";
158 case ENET_TDAR:
159 return "TDAR";
160 case ENET_ECR:
161 return "ECR";
162 case ENET_MMFR:
163 return "MMFR";
164 case ENET_MSCR:
165 return "MSCR";
166 case ENET_MIBC:
167 return "MIBC";
168 case ENET_RCR:
169 return "RCR";
170 case ENET_TCR:
171 return "TCR";
172 case ENET_PALR:
173 return "PALR";
174 case ENET_PAUR:
175 return "PAUR";
176 case ENET_OPD:
177 return "OPD";
178 case ENET_IAUR:
179 return "IAUR";
180 case ENET_IALR:
181 return "IALR";
182 case ENET_GAUR:
183 return "GAUR";
184 case ENET_GALR:
185 return "GALR";
186 case ENET_TFWR:
187 return "TFWR";
188 case ENET_RDSR:
189 return "RDSR";
190 case ENET_TDSR:
191 return "TDSR";
192 case ENET_MRBR:
193 return "MRBR";
194 default:
195 if (s->is_fec) {
196 return imx_fec_reg_name(s, index);
197 } else {
198 return imx_enet_reg_name(s, index);
199 }
200 }
201}
202
203/*
204 * Versions of this device with more than one TX descriptor save the
205 * 2nd and 3rd descriptors in a subsection, to maintain migration
206 * compatibility with previous versions of the device that only
207 * supported a single descriptor.
208 */
209static bool imx_eth_is_multi_tx_ring(void *opaque)
210{
211 IMXFECState *s = IMX_FEC(opaque);
212
213 return s->tx_ring_num > 1;
214}
215
216static const VMStateDescription vmstate_imx_eth_txdescs = {
217 .name = "imx.fec/txdescs",
218 .version_id = 1,
219 .minimum_version_id = 1,
220 .needed = imx_eth_is_multi_tx_ring,
221 .fields = (VMStateField[]) {
222 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
223 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
224 VMSTATE_END_OF_LIST()
225 }
226};
227
228static const VMStateDescription vmstate_imx_eth = {
229 .name = TYPE_IMX_FEC,
230 .version_id = 2,
231 .minimum_version_id = 2,
232 .fields = (VMStateField[]) {
233 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
234 VMSTATE_UINT32(rx_descriptor, IMXFECState),
235 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
236 VMSTATE_UINT32(phy_status, IMXFECState),
237 VMSTATE_UINT32(phy_control, IMXFECState),
238 VMSTATE_UINT32(phy_advertise, IMXFECState),
239 VMSTATE_UINT32(phy_int, IMXFECState),
240 VMSTATE_UINT32(phy_int_mask, IMXFECState),
241 VMSTATE_END_OF_LIST()
242 },
243 .subsections = (const VMStateDescription * []) {
244 &vmstate_imx_eth_txdescs,
245 NULL
246 },
247};
248
249#define PHY_INT_ENERGYON (1 << 7)
250#define PHY_INT_AUTONEG_COMPLETE (1 << 6)
251#define PHY_INT_FAULT (1 << 5)
252#define PHY_INT_DOWN (1 << 4)
253#define PHY_INT_AUTONEG_LP (1 << 3)
254#define PHY_INT_PARFAULT (1 << 2)
255#define PHY_INT_AUTONEG_PAGE (1 << 1)
256
257static void imx_eth_update(IMXFECState *s);
258
259/*
260 * The MII phy could raise a GPIO to the processor which in turn
261 * could be handled as an interrpt by the OS.
262 * For now we don't handle any GPIO/interrupt line, so the OS will
263 * have to poll for the PHY status.
264 */
265static void phy_update_irq(IMXFECState *s)
266{
267 imx_eth_update(s);
268}
269
270static void phy_update_link(IMXFECState *s)
271{
272 /* Autonegotiation status mirrors link status. */
273 if (qemu_get_queue(s->nic)->link_down) {
274 PHY_PRINTF("link is down\n");
275 s->phy_status &= ~0x0024;
276 s->phy_int |= PHY_INT_DOWN;
277 } else {
278 PHY_PRINTF("link is up\n");
279 s->phy_status |= 0x0024;
280 s->phy_int |= PHY_INT_ENERGYON;
281 s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
282 }
283 phy_update_irq(s);
284}
285
286static void imx_eth_set_link(NetClientState *nc)
287{
288 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
289}
290
291static void phy_reset(IMXFECState *s)
292{
293 s->phy_status = 0x7809;
294 s->phy_control = 0x3000;
295 s->phy_advertise = 0x01e1;
296 s->phy_int_mask = 0;
297 s->phy_int = 0;
298 phy_update_link(s);
299}
300
301static uint32_t do_phy_read(IMXFECState *s, int reg)
302{
303 uint32_t val;
304
305 if (reg > 31) {
306 /* we only advertise one phy */
307 return 0;
308 }
309
310 switch (reg) {
311 case 0: /* Basic Control */
312 val = s->phy_control;
313 break;
314 case 1: /* Basic Status */
315 val = s->phy_status;
316 break;
317 case 2: /* ID1 */
318 val = 0x0007;
319 break;
320 case 3: /* ID2 */
321 val = 0xc0d1;
322 break;
323 case 4: /* Auto-neg advertisement */
324 val = s->phy_advertise;
325 break;
326 case 5: /* Auto-neg Link Partner Ability */
327 val = 0x0f71;
328 break;
329 case 6: /* Auto-neg Expansion */
330 val = 1;
331 break;
332 case 29: /* Interrupt source. */
333 val = s->phy_int;
334 s->phy_int = 0;
335 phy_update_irq(s);
336 break;
337 case 30: /* Interrupt mask */
338 val = s->phy_int_mask;
339 break;
340 case 17:
341 case 18:
342 case 27:
343 case 31:
344 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
345 TYPE_IMX_FEC, __func__, reg);
346 val = 0;
347 break;
348 default:
349 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
350 TYPE_IMX_FEC, __func__, reg);
351 val = 0;
352 break;
353 }
354
355 PHY_PRINTF("read 0x%04x @ %d\n", val, reg);
356
357 return val;
358}
359
360static void do_phy_write(IMXFECState *s, int reg, uint32_t val)
361{
362 PHY_PRINTF("write 0x%04x @ %d\n", val, reg);
363
364 if (reg > 31) {
365 /* we only advertise one phy */
366 return;
367 }
368
369 switch (reg) {
370 case 0: /* Basic Control */
371 if (val & 0x8000) {
372 phy_reset(s);
373 } else {
374 s->phy_control = val & 0x7980;
375 /* Complete autonegotiation immediately. */
376 if (val & 0x1000) {
377 s->phy_status |= 0x0020;
378 }
379 }
380 break;
381 case 4: /* Auto-neg advertisement */
382 s->phy_advertise = (val & 0x2d7f) | 0x80;
383 break;
384 case 30: /* Interrupt mask */
385 s->phy_int_mask = val & 0xff;
386 phy_update_irq(s);
387 break;
388 case 17:
389 case 18:
390 case 27:
391 case 31:
392 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
393 TYPE_IMX_FEC, __func__, reg);
394 break;
395 default:
396 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
397 TYPE_IMX_FEC, __func__, reg);
398 break;
399 }
400}
401
402static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
403{
404 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
405}
406
407static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
408{
409 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
410}
411
412static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
413{
414 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
415}
416
417static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
418{
419 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
420}
421
422static void imx_eth_update(IMXFECState *s)
423{
424 /*
425 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
426 * interrupts swapped. This worked with older versions of Linux (4.14
427 * and older) since Linux associated both interrupt lines with Ethernet
428 * MAC interrupts. Specifically,
429 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
430 * timer interrupts. Those versions of Linux fail with versions of QEMU
431 * with swapped interrupt assignments.
432 * - In linux 4.14, both interrupt lines were registered with the Ethernet
433 * MAC interrupt handler. As a result, all versions of qemu happen to
434 * work, though that is accidental.
435 * - In Linux 4.9 and older, the timer interrupt was registered directly
436 * with the Ethernet MAC interrupt handler. The MAC interrupt was
437 * redirected to a GPIO interrupt to work around erratum ERR006687.
438 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
439 * interrupt never fired since IOMUX is currently not supported in qemu.
440 * Linux instead received MAC interrupts on the timer interrupt.
441 * As a result, qemu versions with the swapped interrupt assignment work,
442 * albeit accidentally, but qemu versions with the correct interrupt
443 * assignment fail.
444 *
445 * To ensure that all versions of Linux work, generate ENET_INT_MAC
446 * interrrupts on both interrupt lines. This should be changed if and when
447 * qemu supports IOMUX.
448 */
449 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
450 (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
451 qemu_set_irq(s->irq[1], 1);
452 } else {
453 qemu_set_irq(s->irq[1], 0);
454 }
455
456 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
457 qemu_set_irq(s->irq[0], 1);
458 } else {
459 qemu_set_irq(s->irq[0], 0);
460 }
461}
462
463static void imx_fec_do_tx(IMXFECState *s)
464{
465 int frame_size = 0, descnt = 0;
466 uint8_t *ptr = s->frame;
467 uint32_t addr = s->tx_descriptor[0];
468
469 while (descnt++ < IMX_MAX_DESC) {
470 IMXFECBufDesc bd;
471 int len;
472
473 imx_fec_read_bd(&bd, addr);
474 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n",
475 addr, bd.flags, bd.length, bd.data);
476 if ((bd.flags & ENET_BD_R) == 0) {
477 /* Run out of descriptors to transmit. */
478 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n");
479 break;
480 }
481 len = bd.length;
482 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
483 len = ENET_MAX_FRAME_SIZE - frame_size;
484 s->regs[ENET_EIR] |= ENET_INT_BABT;
485 }
486 dma_memory_read(&address_space_memory, bd.data, ptr, len);
487 ptr += len;
488 frame_size += len;
489 if (bd.flags & ENET_BD_L) {
490 /* Last buffer in frame. */
491 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
492 ptr = s->frame;
493 frame_size = 0;
494 s->regs[ENET_EIR] |= ENET_INT_TXF;
495 }
496 s->regs[ENET_EIR] |= ENET_INT_TXB;
497 bd.flags &= ~ENET_BD_R;
498 /* Write back the modified descriptor. */
499 imx_fec_write_bd(&bd, addr);
500 /* Advance to the next descriptor. */
501 if ((bd.flags & ENET_BD_W) != 0) {
502 addr = s->regs[ENET_TDSR];
503 } else {
504 addr += sizeof(bd);
505 }
506 }
507
508 s->tx_descriptor[0] = addr;
509
510 imx_eth_update(s);
511}
512
513static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
514{
515 int frame_size = 0, descnt = 0;
516
517 uint8_t *ptr = s->frame;
518 uint32_t addr, int_txb, int_txf, tdsr;
519 size_t ring;
520
521 switch (index) {
522 case ENET_TDAR:
523 ring = 0;
524 int_txb = ENET_INT_TXB;
525 int_txf = ENET_INT_TXF;
526 tdsr = ENET_TDSR;
527 break;
528 case ENET_TDAR1:
529 ring = 1;
530 int_txb = ENET_INT_TXB1;
531 int_txf = ENET_INT_TXF1;
532 tdsr = ENET_TDSR1;
533 break;
534 case ENET_TDAR2:
535 ring = 2;
536 int_txb = ENET_INT_TXB2;
537 int_txf = ENET_INT_TXF2;
538 tdsr = ENET_TDSR2;
539 break;
540 default:
541 qemu_log_mask(LOG_GUEST_ERROR,
542 "%s: bogus value for index %x\n",
543 __func__, index);
544 abort();
545 break;
546 }
547
548 addr = s->tx_descriptor[ring];
549
550 while (descnt++ < IMX_MAX_DESC) {
551 IMXENETBufDesc bd;
552 int len;
553
554 imx_enet_read_bd(&bd, addr);
555 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x "
556 "status %04x\n", addr, bd.flags, bd.length, bd.data,
557 bd.option, bd.status);
558 if ((bd.flags & ENET_BD_R) == 0) {
559 /* Run out of descriptors to transmit. */
560 break;
561 }
562 len = bd.length;
563 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
564 len = ENET_MAX_FRAME_SIZE - frame_size;
565 s->regs[ENET_EIR] |= ENET_INT_BABT;
566 }
567 dma_memory_read(&address_space_memory, bd.data, ptr, len);
568 ptr += len;
569 frame_size += len;
570 if (bd.flags & ENET_BD_L) {
571 if (bd.option & ENET_BD_PINS) {
572 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
573 if (IP_HEADER_VERSION(ip_hd) == 4) {
574 net_checksum_calculate(s->frame, frame_size);
575 }
576 }
577 if (bd.option & ENET_BD_IINS) {
578 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
579 /* We compute checksum only for IPv4 frames */
580 if (IP_HEADER_VERSION(ip_hd) == 4) {
581 uint16_t csum;
582 ip_hd->ip_sum = 0;
583 csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd));
584 ip_hd->ip_sum = cpu_to_be16(csum);
585 }
586 }
587 /* Last buffer in frame. */
588
589 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
590 ptr = s->frame;
591
592 frame_size = 0;
593 if (bd.option & ENET_BD_TX_INT) {
594 s->regs[ENET_EIR] |= int_txf;
595 }
596 /* Indicate that we've updated the last buffer descriptor. */
597 bd.last_buffer = ENET_BD_BDU;
598 }
599 if (bd.option & ENET_BD_TX_INT) {
600 s->regs[ENET_EIR] |= int_txb;
601 }
602 bd.flags &= ~ENET_BD_R;
603 /* Write back the modified descriptor. */
604 imx_enet_write_bd(&bd, addr);
605 /* Advance to the next descriptor. */
606 if ((bd.flags & ENET_BD_W) != 0) {
607 addr = s->regs[tdsr];
608 } else {
609 addr += sizeof(bd);
610 }
611 }
612
613 s->tx_descriptor[ring] = addr;
614
615 imx_eth_update(s);
616}
617
618static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
619{
620 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
621 imx_enet_do_tx(s, index);
622 } else {
623 imx_fec_do_tx(s);
624 }
625}
626
627static void imx_eth_enable_rx(IMXFECState *s, bool flush)
628{
629 IMXFECBufDesc bd;
630
631 imx_fec_read_bd(&bd, s->rx_descriptor);
632
633 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
634
635 if (!s->regs[ENET_RDAR]) {
636 FEC_PRINTF("RX buffer full\n");
637 } else if (flush) {
638 qemu_flush_queued_packets(qemu_get_queue(s->nic));
639 }
640}
641
642static void imx_eth_reset(DeviceState *d)
643{
644 IMXFECState *s = IMX_FEC(d);
645
646 /* Reset the Device */
647 memset(s->regs, 0, sizeof(s->regs));
648 s->regs[ENET_ECR] = 0xf0000000;
649 s->regs[ENET_MIBC] = 0xc0000000;
650 s->regs[ENET_RCR] = 0x05ee0001;
651 s->regs[ENET_OPD] = 0x00010000;
652
653 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
654 | (s->conf.macaddr.a[1] << 16)
655 | (s->conf.macaddr.a[2] << 8)
656 | s->conf.macaddr.a[3];
657 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
658 | (s->conf.macaddr.a[5] << 16)
659 | 0x8808;
660
661 if (s->is_fec) {
662 s->regs[ENET_FRBR] = 0x00000600;
663 s->regs[ENET_FRSR] = 0x00000500;
664 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
665 } else {
666 s->regs[ENET_RAEM] = 0x00000004;
667 s->regs[ENET_RAFL] = 0x00000004;
668 s->regs[ENET_TAEM] = 0x00000004;
669 s->regs[ENET_TAFL] = 0x00000008;
670 s->regs[ENET_TIPG] = 0x0000000c;
671 s->regs[ENET_FTRL] = 0x000007ff;
672 s->regs[ENET_ATPER] = 0x3b9aca00;
673 }
674
675 s->rx_descriptor = 0;
676 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
677
678 /* We also reset the PHY */
679 phy_reset(s);
680}
681
682static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
683{
684 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
685 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
686 return 0;
687}
688
689static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
690{
691 switch (index) {
692 case ENET_FRBR:
693 case ENET_FRSR:
694 case ENET_MIIGSK_CFGR:
695 case ENET_MIIGSK_ENR:
696 return s->regs[index];
697 default:
698 return imx_default_read(s, index);
699 }
700}
701
702static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
703{
704 switch (index) {
705 case ENET_RSFL:
706 case ENET_RSEM:
707 case ENET_RAEM:
708 case ENET_RAFL:
709 case ENET_TSEM:
710 case ENET_TAEM:
711 case ENET_TAFL:
712 case ENET_TIPG:
713 case ENET_FTRL:
714 case ENET_TACC:
715 case ENET_RACC:
716 case ENET_ATCR:
717 case ENET_ATVR:
718 case ENET_ATOFF:
719 case ENET_ATPER:
720 case ENET_ATCOR:
721 case ENET_ATINC:
722 case ENET_ATSTMP:
723 case ENET_TGSR:
724 case ENET_TCSR0:
725 case ENET_TCCR0:
726 case ENET_TCSR1:
727 case ENET_TCCR1:
728 case ENET_TCSR2:
729 case ENET_TCCR2:
730 case ENET_TCSR3:
731 case ENET_TCCR3:
732 return s->regs[index];
733 default:
734 return imx_default_read(s, index);
735 }
736}
737
738static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
739{
740 uint32_t value = 0;
741 IMXFECState *s = IMX_FEC(opaque);
742 uint32_t index = offset >> 2;
743
744 switch (index) {
745 case ENET_EIR:
746 case ENET_EIMR:
747 case ENET_RDAR:
748 case ENET_TDAR:
749 case ENET_ECR:
750 case ENET_MMFR:
751 case ENET_MSCR:
752 case ENET_MIBC:
753 case ENET_RCR:
754 case ENET_TCR:
755 case ENET_PALR:
756 case ENET_PAUR:
757 case ENET_OPD:
758 case ENET_IAUR:
759 case ENET_IALR:
760 case ENET_GAUR:
761 case ENET_GALR:
762 case ENET_TFWR:
763 case ENET_RDSR:
764 case ENET_TDSR:
765 case ENET_MRBR:
766 value = s->regs[index];
767 break;
768 default:
769 if (s->is_fec) {
770 value = imx_fec_read(s, index);
771 } else {
772 value = imx_enet_read(s, index);
773 }
774 break;
775 }
776
777 FEC_PRINTF("reg[%s] => 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
778 value);
779
780 return value;
781}
782
783static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
784{
785 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
786 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
787 return;
788}
789
790static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
791{
792 switch (index) {
793 case ENET_FRBR:
794 /* FRBR is read only */
795 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
796 TYPE_IMX_FEC, __func__);
797 break;
798 case ENET_FRSR:
799 s->regs[index] = (value & 0x000003fc) | 0x00000400;
800 break;
801 case ENET_MIIGSK_CFGR:
802 s->regs[index] = value & 0x00000053;
803 break;
804 case ENET_MIIGSK_ENR:
805 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
806 break;
807 default:
808 imx_default_write(s, index, value);
809 break;
810 }
811}
812
813static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
814{
815 switch (index) {
816 case ENET_RSFL:
817 case ENET_RSEM:
818 case ENET_RAEM:
819 case ENET_RAFL:
820 case ENET_TSEM:
821 case ENET_TAEM:
822 case ENET_TAFL:
823 s->regs[index] = value & 0x000001ff;
824 break;
825 case ENET_TIPG:
826 s->regs[index] = value & 0x0000001f;
827 break;
828 case ENET_FTRL:
829 s->regs[index] = value & 0x00003fff;
830 break;
831 case ENET_TACC:
832 s->regs[index] = value & 0x00000019;
833 break;
834 case ENET_RACC:
835 s->regs[index] = value & 0x000000C7;
836 break;
837 case ENET_ATCR:
838 s->regs[index] = value & 0x00002a9d;
839 break;
840 case ENET_ATVR:
841 case ENET_ATOFF:
842 case ENET_ATPER:
843 s->regs[index] = value;
844 break;
845 case ENET_ATSTMP:
846 /* ATSTMP is read only */
847 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
848 TYPE_IMX_FEC, __func__);
849 break;
850 case ENET_ATCOR:
851 s->regs[index] = value & 0x7fffffff;
852 break;
853 case ENET_ATINC:
854 s->regs[index] = value & 0x00007f7f;
855 break;
856 case ENET_TGSR:
857 /* implement clear timer flag */
858 value = value & 0x0000000f;
859 break;
860 case ENET_TCSR0:
861 case ENET_TCSR1:
862 case ENET_TCSR2:
863 case ENET_TCSR3:
864 value = value & 0x000000fd;
865 break;
866 case ENET_TCCR0:
867 case ENET_TCCR1:
868 case ENET_TCCR2:
869 case ENET_TCCR3:
870 s->regs[index] = value;
871 break;
872 default:
873 imx_default_write(s, index, value);
874 break;
875 }
876}
877
878static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
879 unsigned size)
880{
881 IMXFECState *s = IMX_FEC(opaque);
882 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
883 uint32_t index = offset >> 2;
884
885 FEC_PRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
886 (uint32_t)value);
887
888 switch (index) {
889 case ENET_EIR:
890 s->regs[index] &= ~value;
891 break;
892 case ENET_EIMR:
893 s->regs[index] = value;
894 break;
895 case ENET_RDAR:
896 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
897 if (!s->regs[index]) {
898 imx_eth_enable_rx(s, true);
899 }
900 } else {
901 s->regs[index] = 0;
902 }
903 break;
904 case ENET_TDAR1: /* FALLTHROUGH */
905 case ENET_TDAR2: /* FALLTHROUGH */
906 if (unlikely(single_tx_ring)) {
907 qemu_log_mask(LOG_GUEST_ERROR,
908 "[%s]%s: trying to access TDAR2 or TDAR1\n",
909 TYPE_IMX_FEC, __func__);
910 return;
911 }
912 case ENET_TDAR: /* FALLTHROUGH */
913 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
914 s->regs[index] = ENET_TDAR_TDAR;
915 imx_eth_do_tx(s, index);
916 }
917 s->regs[index] = 0;
918 break;
919 case ENET_ECR:
920 if (value & ENET_ECR_RESET) {
921 return imx_eth_reset(DEVICE(s));
922 }
923 s->regs[index] = value;
924 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
925 s->regs[ENET_RDAR] = 0;
926 s->rx_descriptor = s->regs[ENET_RDSR];
927 s->regs[ENET_TDAR] = 0;
928 s->regs[ENET_TDAR1] = 0;
929 s->regs[ENET_TDAR2] = 0;
930 s->tx_descriptor[0] = s->regs[ENET_TDSR];
931 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
932 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
933 }
934 break;
935 case ENET_MMFR:
936 s->regs[index] = value;
937 if (extract32(value, 29, 1)) {
938 /* This is a read operation */
939 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
940 do_phy_read(s,
941 extract32(value,
942 18, 10)));
943 } else {
944 /* This a write operation */
945 do_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
946 }
947 /* raise the interrupt as the PHY operation is done */
948 s->regs[ENET_EIR] |= ENET_INT_MII;
949 break;
950 case ENET_MSCR:
951 s->regs[index] = value & 0xfe;
952 break;
953 case ENET_MIBC:
954 /* TODO: Implement MIB. */
955 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
956 break;
957 case ENET_RCR:
958 s->regs[index] = value & 0x07ff003f;
959 /* TODO: Implement LOOP mode. */
960 break;
961 case ENET_TCR:
962 /* We transmit immediately, so raise GRA immediately. */
963 s->regs[index] = value;
964 if (value & 1) {
965 s->regs[ENET_EIR] |= ENET_INT_GRA;
966 }
967 break;
968 case ENET_PALR:
969 s->regs[index] = value;
970 s->conf.macaddr.a[0] = value >> 24;
971 s->conf.macaddr.a[1] = value >> 16;
972 s->conf.macaddr.a[2] = value >> 8;
973 s->conf.macaddr.a[3] = value;
974 break;
975 case ENET_PAUR:
976 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
977 s->conf.macaddr.a[4] = value >> 24;
978 s->conf.macaddr.a[5] = value >> 16;
979 break;
980 case ENET_OPD:
981 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
982 break;
983 case ENET_IAUR:
984 case ENET_IALR:
985 case ENET_GAUR:
986 case ENET_GALR:
987 /* TODO: implement MAC hash filtering. */
988 break;
989 case ENET_TFWR:
990 if (s->is_fec) {
991 s->regs[index] = value & 0x3;
992 } else {
993 s->regs[index] = value & 0x13f;
994 }
995 break;
996 case ENET_RDSR:
997 if (s->is_fec) {
998 s->regs[index] = value & ~3;
999 } else {
1000 s->regs[index] = value & ~7;
1001 }
1002 s->rx_descriptor = s->regs[index];
1003 break;
1004 case ENET_TDSR:
1005 if (s->is_fec) {
1006 s->regs[index] = value & ~3;
1007 } else {
1008 s->regs[index] = value & ~7;
1009 }
1010 s->tx_descriptor[0] = s->regs[index];
1011 break;
1012 case ENET_TDSR1:
1013 if (unlikely(single_tx_ring)) {
1014 qemu_log_mask(LOG_GUEST_ERROR,
1015 "[%s]%s: trying to access TDSR1\n",
1016 TYPE_IMX_FEC, __func__);
1017 return;
1018 }
1019
1020 s->regs[index] = value & ~7;
1021 s->tx_descriptor[1] = s->regs[index];
1022 break;
1023 case ENET_TDSR2:
1024 if (unlikely(single_tx_ring)) {
1025 qemu_log_mask(LOG_GUEST_ERROR,
1026 "[%s]%s: trying to access TDSR2\n",
1027 TYPE_IMX_FEC, __func__);
1028 return;
1029 }
1030
1031 s->regs[index] = value & ~7;
1032 s->tx_descriptor[2] = s->regs[index];
1033 break;
1034 case ENET_MRBR:
1035 s->regs[index] = value & 0x00003ff0;
1036 break;
1037 default:
1038 if (s->is_fec) {
1039 imx_fec_write(s, index, value);
1040 } else {
1041 imx_enet_write(s, index, value);
1042 }
1043 return;
1044 }
1045
1046 imx_eth_update(s);
1047}
1048
1049static int imx_eth_can_receive(NetClientState *nc)
1050{
1051 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1052
1053 FEC_PRINTF("\n");
1054
1055 return !!s->regs[ENET_RDAR];
1056}
1057
1058static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
1059 size_t len)
1060{
1061 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1062 IMXFECBufDesc bd;
1063 uint32_t flags = 0;
1064 uint32_t addr;
1065 uint32_t crc;
1066 uint32_t buf_addr;
1067 uint8_t *crc_ptr;
1068 unsigned int buf_len;
1069 size_t size = len;
1070
1071 FEC_PRINTF("len %d\n", (int)size);
1072
1073 if (!s->regs[ENET_RDAR]) {
1074 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1075 TYPE_IMX_FEC, __func__);
1076 return 0;
1077 }
1078
1079 /* 4 bytes for the CRC. */
1080 size += 4;
1081 crc = cpu_to_be32(crc32(~0, buf, size));
1082 crc_ptr = (uint8_t *) &crc;
1083
1084 /* Huge frames are truncated. */
1085 if (size > ENET_MAX_FRAME_SIZE) {
1086 size = ENET_MAX_FRAME_SIZE;
1087 flags |= ENET_BD_TR | ENET_BD_LG;
1088 }
1089
1090 /* Frames larger than the user limit just set error flags. */
1091 if (size > (s->regs[ENET_RCR] >> 16)) {
1092 flags |= ENET_BD_LG;
1093 }
1094
1095 addr = s->rx_descriptor;
1096 while (size > 0) {
1097 imx_fec_read_bd(&bd, addr);
1098 if ((bd.flags & ENET_BD_E) == 0) {
1099 /* No descriptors available. Bail out. */
1100 /*
1101 * FIXME: This is wrong. We should probably either
1102 * save the remainder for when more RX buffers are
1103 * available, or flag an error.
1104 */
1105 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1106 TYPE_IMX_FEC, __func__);
1107 break;
1108 }
1109 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
1110 bd.length = buf_len;
1111 size -= buf_len;
1112
1113 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
1114
1115 /* The last 4 bytes are the CRC. */
1116 if (size < 4) {
1117 buf_len += size - 4;
1118 }
1119 buf_addr = bd.data;
1120 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1121 buf += buf_len;
1122 if (size < 4) {
1123 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1124 crc_ptr, 4 - size);
1125 crc_ptr += 4 - size;
1126 }
1127 bd.flags &= ~ENET_BD_E;
1128 if (size == 0) {
1129 /* Last buffer in frame. */
1130 bd.flags |= flags | ENET_BD_L;
1131 FEC_PRINTF("rx frame flags %04x\n", bd.flags);
1132 s->regs[ENET_EIR] |= ENET_INT_RXF;
1133 } else {
1134 s->regs[ENET_EIR] |= ENET_INT_RXB;
1135 }
1136 imx_fec_write_bd(&bd, addr);
1137 /* Advance to the next descriptor. */
1138 if ((bd.flags & ENET_BD_W) != 0) {
1139 addr = s->regs[ENET_RDSR];
1140 } else {
1141 addr += sizeof(bd);
1142 }
1143 }
1144 s->rx_descriptor = addr;
1145 imx_eth_enable_rx(s, false);
1146 imx_eth_update(s);
1147 return len;
1148}
1149
1150static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1151 size_t len)
1152{
1153 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1154 IMXENETBufDesc bd;
1155 uint32_t flags = 0;
1156 uint32_t addr;
1157 uint32_t crc;
1158 uint32_t buf_addr;
1159 uint8_t *crc_ptr;
1160 unsigned int buf_len;
1161 size_t size = len;
1162 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1163
1164 FEC_PRINTF("len %d\n", (int)size);
1165
1166 if (!s->regs[ENET_RDAR]) {
1167 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1168 TYPE_IMX_FEC, __func__);
1169 return 0;
1170 }
1171
1172 /* 4 bytes for the CRC. */
1173 size += 4;
1174 crc = cpu_to_be32(crc32(~0, buf, size));
1175 crc_ptr = (uint8_t *) &crc;
1176
1177 if (shift16) {
1178 size += 2;
1179 }
1180
1181 /* Huge frames are truncated. */
1182 if (size > s->regs[ENET_FTRL]) {
1183 size = s->regs[ENET_FTRL];
1184 flags |= ENET_BD_TR | ENET_BD_LG;
1185 }
1186
1187 /* Frames larger than the user limit just set error flags. */
1188 if (size > (s->regs[ENET_RCR] >> 16)) {
1189 flags |= ENET_BD_LG;
1190 }
1191
1192 addr = s->rx_descriptor;
1193 while (size > 0) {
1194 imx_enet_read_bd(&bd, addr);
1195 if ((bd.flags & ENET_BD_E) == 0) {
1196 /* No descriptors available. Bail out. */
1197 /*
1198 * FIXME: This is wrong. We should probably either
1199 * save the remainder for when more RX buffers are
1200 * available, or flag an error.
1201 */
1202 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1203 TYPE_IMX_FEC, __func__);
1204 break;
1205 }
1206 buf_len = MIN(size, s->regs[ENET_MRBR]);
1207 bd.length = buf_len;
1208 size -= buf_len;
1209
1210 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
1211
1212 /* The last 4 bytes are the CRC. */
1213 if (size < 4) {
1214 buf_len += size - 4;
1215 }
1216 buf_addr = bd.data;
1217
1218 if (shift16) {
1219 /*
1220 * If SHIFT16 bit of ENETx_RACC register is set we need to
1221 * align the payload to 4-byte boundary.
1222 */
1223 const uint8_t zeros[2] = { 0 };
1224
1225 dma_memory_write(&address_space_memory, buf_addr,
1226 zeros, sizeof(zeros));
1227
1228 buf_addr += sizeof(zeros);
1229 buf_len -= sizeof(zeros);
1230
1231 /* We only do this once per Ethernet frame */
1232 shift16 = false;
1233 }
1234
1235 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1236 buf += buf_len;
1237 if (size < 4) {
1238 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1239 crc_ptr, 4 - size);
1240 crc_ptr += 4 - size;
1241 }
1242 bd.flags &= ~ENET_BD_E;
1243 if (size == 0) {
1244 /* Last buffer in frame. */
1245 bd.flags |= flags | ENET_BD_L;
1246 FEC_PRINTF("rx frame flags %04x\n", bd.flags);
1247 /* Indicate that we've updated the last buffer descriptor. */
1248 bd.last_buffer = ENET_BD_BDU;
1249 if (bd.option & ENET_BD_RX_INT) {
1250 s->regs[ENET_EIR] |= ENET_INT_RXF;
1251 }
1252 } else {
1253 if (bd.option & ENET_BD_RX_INT) {
1254 s->regs[ENET_EIR] |= ENET_INT_RXB;
1255 }
1256 }
1257 imx_enet_write_bd(&bd, addr);
1258 /* Advance to the next descriptor. */
1259 if ((bd.flags & ENET_BD_W) != 0) {
1260 addr = s->regs[ENET_RDSR];
1261 } else {
1262 addr += sizeof(bd);
1263 }
1264 }
1265 s->rx_descriptor = addr;
1266 imx_eth_enable_rx(s, false);
1267 imx_eth_update(s);
1268 return len;
1269}
1270
1271static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1272 size_t len)
1273{
1274 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1275
1276 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1277 return imx_enet_receive(nc, buf, len);
1278 } else {
1279 return imx_fec_receive(nc, buf, len);
1280 }
1281}
1282
1283static const MemoryRegionOps imx_eth_ops = {
1284 .read = imx_eth_read,
1285 .write = imx_eth_write,
1286 .valid.min_access_size = 4,
1287 .valid.max_access_size = 4,
1288 .endianness = DEVICE_NATIVE_ENDIAN,
1289};
1290
1291static void imx_eth_cleanup(NetClientState *nc)
1292{
1293 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1294
1295 s->nic = NULL;
1296}
1297
1298static NetClientInfo imx_eth_net_info = {
1299 .type = NET_CLIENT_DRIVER_NIC,
1300 .size = sizeof(NICState),
1301 .can_receive = imx_eth_can_receive,
1302 .receive = imx_eth_receive,
1303 .cleanup = imx_eth_cleanup,
1304 .link_status_changed = imx_eth_set_link,
1305};
1306
1307
1308static void imx_eth_realize(DeviceState *dev, Error **errp)
1309{
1310 IMXFECState *s = IMX_FEC(dev);
1311 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1312
1313 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1314 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1315 sysbus_init_mmio(sbd, &s->iomem);
1316 sysbus_init_irq(sbd, &s->irq[0]);
1317 sysbus_init_irq(sbd, &s->irq[1]);
1318
1319 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1320
1321 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1322 object_get_typename(OBJECT(dev)),
1323 DEVICE(dev)->id, s);
1324
1325 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1326}
1327
1328static Property imx_eth_properties[] = {
1329 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1330 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1331 DEFINE_PROP_END_OF_LIST(),
1332};
1333
1334static void imx_eth_class_init(ObjectClass *klass, void *data)
1335{
1336 DeviceClass *dc = DEVICE_CLASS(klass);
1337
1338 dc->vmsd = &vmstate_imx_eth;
1339 dc->reset = imx_eth_reset;
1340 dc->props = imx_eth_properties;
1341 dc->realize = imx_eth_realize;
1342 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1343}
1344
1345static void imx_fec_init(Object *obj)
1346{
1347 IMXFECState *s = IMX_FEC(obj);
1348
1349 s->is_fec = true;
1350}
1351
1352static void imx_enet_init(Object *obj)
1353{
1354 IMXFECState *s = IMX_FEC(obj);
1355
1356 s->is_fec = false;
1357}
1358
1359static const TypeInfo imx_fec_info = {
1360 .name = TYPE_IMX_FEC,
1361 .parent = TYPE_SYS_BUS_DEVICE,
1362 .instance_size = sizeof(IMXFECState),
1363 .instance_init = imx_fec_init,
1364 .class_init = imx_eth_class_init,
1365};
1366
1367static const TypeInfo imx_enet_info = {
1368 .name = TYPE_IMX_ENET,
1369 .parent = TYPE_IMX_FEC,
1370 .instance_init = imx_enet_init,
1371};
1372
1373static void imx_eth_register_types(void)
1374{
1375 type_register_static(&imx_fec_info);
1376 type_register_static(&imx_enet_info);
1377}
1378
1379type_init(imx_eth_register_types)
1380