1/*
2 * QEMU model of Xilinx AXI-Ethernet.
3 *
4 * Copyright (c) 2011 Edgar E. Iglesias.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "hw/hw.h"
27#include "hw/sysbus.h"
28#include "qapi/error.h"
29#include "qemu/log.h"
30#include "qemu/module.h"
31#include "net/net.h"
32#include "net/checksum.h"
33
34#include "hw/hw.h"
35#include "hw/irq.h"
36#include "hw/qdev-properties.h"
37#include "hw/stream.h"
38
39#define DPHY(x)
40
41#define TYPE_XILINX_AXI_ENET "xlnx.axi-ethernet"
42#define TYPE_XILINX_AXI_ENET_DATA_STREAM "xilinx-axienet-data-stream"
43#define TYPE_XILINX_AXI_ENET_CONTROL_STREAM "xilinx-axienet-control-stream"
44
45#define XILINX_AXI_ENET(obj) \
46 OBJECT_CHECK(XilinxAXIEnet, (obj), TYPE_XILINX_AXI_ENET)
47
48#define XILINX_AXI_ENET_DATA_STREAM(obj) \
49 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\
50 TYPE_XILINX_AXI_ENET_DATA_STREAM)
51
52#define XILINX_AXI_ENET_CONTROL_STREAM(obj) \
53 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\
54 TYPE_XILINX_AXI_ENET_CONTROL_STREAM)
55
56/* Advertisement control register. */
57#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
58#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
59#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
60#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
61
62#define CONTROL_PAYLOAD_WORDS 5
63#define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
64
65struct PHY {
66 uint32_t regs[32];
67
68 int link;
69
70 unsigned int (*read)(struct PHY *phy, unsigned int req);
71 void (*write)(struct PHY *phy, unsigned int req,
72 unsigned int data);
73};
74
75static unsigned int tdk_read(struct PHY *phy, unsigned int req)
76{
77 int regnum;
78 unsigned r = 0;
79
80 regnum = req & 0x1f;
81
82 switch (regnum) {
83 case 1:
84 if (!phy->link) {
85 break;
86 }
87 /* MR1. */
88 /* Speeds and modes. */
89 r |= (1 << 13) | (1 << 14);
90 r |= (1 << 11) | (1 << 12);
91 r |= (1 << 5); /* Autoneg complete. */
92 r |= (1 << 3); /* Autoneg able. */
93 r |= (1 << 2); /* link. */
94 r |= (1 << 1); /* link. */
95 break;
96 case 5:
97 /* Link partner ability.
98 We are kind; always agree with whatever best mode
99 the guest advertises. */
100 r = 1 << 14; /* Success. */
101 /* Copy advertised modes. */
102 r |= phy->regs[4] & (15 << 5);
103 /* Autoneg support. */
104 r |= 1;
105 break;
106 case 17:
107 /* Marvell PHY on many xilinx boards. */
108 r = 0x8000; /* 1000Mb */
109 break;
110 case 18:
111 {
112 /* Diagnostics reg. */
113 int duplex = 0;
114 int speed_100 = 0;
115
116 if (!phy->link) {
117 break;
118 }
119
120 /* Are we advertising 100 half or 100 duplex ? */
121 speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF);
122 speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL);
123
124 /* Are we advertising 10 duplex or 100 duplex ? */
125 duplex = !!(phy->regs[4] & ADVERTISE_100FULL);
126 duplex |= !!(phy->regs[4] & ADVERTISE_10FULL);
127 r = (speed_100 << 10) | (duplex << 11);
128 }
129 break;
130
131 default:
132 r = phy->regs[regnum];
133 break;
134 }
135 DPHY(qemu_log("\n%s %x = reg[%d]\n", __func__, r, regnum));
136 return r;
137}
138
139static void
140tdk_write(struct PHY *phy, unsigned int req, unsigned int data)
141{
142 int regnum;
143
144 regnum = req & 0x1f;
145 DPHY(qemu_log("%s reg[%d] = %x\n", __func__, regnum, data));
146 switch (regnum) {
147 default:
148 phy->regs[regnum] = data;
149 break;
150 }
151
152 /* Unconditionally clear regs[BMCR][BMCR_RESET] */
153 phy->regs[0] &= ~0x8000;
154}
155
156static void
157tdk_init(struct PHY *phy)
158{
159 phy->regs[0] = 0x3100;
160 /* PHY Id. */
161 phy->regs[2] = 0x0300;
162 phy->regs[3] = 0xe400;
163 /* Autonegotiation advertisement reg. */
164 phy->regs[4] = 0x01E1;
165 phy->link = 1;
166
167 phy->read = tdk_read;
168 phy->write = tdk_write;
169}
170
171struct MDIOBus {
172 /* bus. */
173 int mdc;
174 int mdio;
175
176 /* decoder. */
177 enum {
178 PREAMBLE,
179 SOF,
180 OPC,
181 ADDR,
182 REQ,
183 TURNAROUND,
184 DATA
185 } state;
186 unsigned int drive;
187
188 unsigned int cnt;
189 unsigned int addr;
190 unsigned int opc;
191 unsigned int req;
192 unsigned int data;
193
194 struct PHY *devs[32];
195};
196
197static void
198mdio_attach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
199{
200 bus->devs[addr & 0x1f] = phy;
201}
202
203#ifdef USE_THIS_DEAD_CODE
204static void
205mdio_detach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
206{
207 bus->devs[addr & 0x1f] = NULL;
208}
209#endif
210
211static uint16_t mdio_read_req(struct MDIOBus *bus, unsigned int addr,
212 unsigned int reg)
213{
214 struct PHY *phy;
215 uint16_t data;
216
217 phy = bus->devs[addr];
218 if (phy && phy->read) {
219 data = phy->read(phy, reg);
220 } else {
221 data = 0xffff;
222 }
223 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
224 return data;
225}
226
227static void mdio_write_req(struct MDIOBus *bus, unsigned int addr,
228 unsigned int reg, uint16_t data)
229{
230 struct PHY *phy;
231
232 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
233 phy = bus->devs[addr];
234 if (phy && phy->write) {
235 phy->write(phy, reg, data);
236 }
237}
238
239#define DENET(x)
240
241#define R_RAF (0x000 / 4)
242enum {
243 RAF_MCAST_REJ = (1 << 1),
244 RAF_BCAST_REJ = (1 << 2),
245 RAF_EMCF_EN = (1 << 12),
246 RAF_NEWFUNC_EN = (1 << 11)
247};
248
249#define R_IS (0x00C / 4)
250enum {
251 IS_HARD_ACCESS_COMPLETE = 1,
252 IS_AUTONEG = (1 << 1),
253 IS_RX_COMPLETE = (1 << 2),
254 IS_RX_REJECT = (1 << 3),
255 IS_TX_COMPLETE = (1 << 5),
256 IS_RX_DCM_LOCK = (1 << 6),
257 IS_MGM_RDY = (1 << 7),
258 IS_PHY_RST_DONE = (1 << 8),
259};
260
261#define R_IP (0x010 / 4)
262#define R_IE (0x014 / 4)
263#define R_UAWL (0x020 / 4)
264#define R_UAWU (0x024 / 4)
265#define R_PPST (0x030 / 4)
266enum {
267 PPST_LINKSTATUS = (1 << 0),
268 PPST_PHY_LINKSTATUS = (1 << 7),
269};
270
271#define R_STATS_RX_BYTESL (0x200 / 4)
272#define R_STATS_RX_BYTESH (0x204 / 4)
273#define R_STATS_TX_BYTESL (0x208 / 4)
274#define R_STATS_TX_BYTESH (0x20C / 4)
275#define R_STATS_RXL (0x290 / 4)
276#define R_STATS_RXH (0x294 / 4)
277#define R_STATS_RX_BCASTL (0x2a0 / 4)
278#define R_STATS_RX_BCASTH (0x2a4 / 4)
279#define R_STATS_RX_MCASTL (0x2a8 / 4)
280#define R_STATS_RX_MCASTH (0x2ac / 4)
281
282#define R_RCW0 (0x400 / 4)
283#define R_RCW1 (0x404 / 4)
284enum {
285 RCW1_VLAN = (1 << 27),
286 RCW1_RX = (1 << 28),
287 RCW1_FCS = (1 << 29),
288 RCW1_JUM = (1 << 30),
289 RCW1_RST = (1 << 31),
290};
291
292#define R_TC (0x408 / 4)
293enum {
294 TC_VLAN = (1 << 27),
295 TC_TX = (1 << 28),
296 TC_FCS = (1 << 29),
297 TC_JUM = (1 << 30),
298 TC_RST = (1 << 31),
299};
300
301#define R_EMMC (0x410 / 4)
302enum {
303 EMMC_LINKSPEED_10MB = (0 << 30),
304 EMMC_LINKSPEED_100MB = (1 << 30),
305 EMMC_LINKSPEED_1000MB = (2 << 30),
306};
307
308#define R_PHYC (0x414 / 4)
309
310#define R_MC (0x500 / 4)
311#define MC_EN (1 << 6)
312
313#define R_MCR (0x504 / 4)
314#define R_MWD (0x508 / 4)
315#define R_MRD (0x50c / 4)
316#define R_MIS (0x600 / 4)
317#define R_MIP (0x620 / 4)
318#define R_MIE (0x640 / 4)
319#define R_MIC (0x640 / 4)
320
321#define R_UAW0 (0x700 / 4)
322#define R_UAW1 (0x704 / 4)
323#define R_FMI (0x708 / 4)
324#define R_AF0 (0x710 / 4)
325#define R_AF1 (0x714 / 4)
326#define R_MAX (0x34 / 4)
327
328/* Indirect registers. */
329struct TEMAC {
330 struct MDIOBus mdio_bus;
331 struct PHY phy;
332
333 void *parent;
334};
335
336typedef struct XilinxAXIEnetStreamSlave XilinxAXIEnetStreamSlave;
337typedef struct XilinxAXIEnet XilinxAXIEnet;
338
339struct XilinxAXIEnetStreamSlave {
340 Object parent;
341
342 struct XilinxAXIEnet *enet;
343} ;
344
345struct XilinxAXIEnet {
346 SysBusDevice busdev;
347 MemoryRegion iomem;
348 qemu_irq irq;
349 StreamSlave *tx_data_dev;
350 StreamSlave *tx_control_dev;
351 XilinxAXIEnetStreamSlave rx_data_dev;
352 XilinxAXIEnetStreamSlave rx_control_dev;
353 NICState *nic;
354 NICConf conf;
355
356
357 uint32_t c_rxmem;
358 uint32_t c_txmem;
359 uint32_t c_phyaddr;
360
361 struct TEMAC TEMAC;
362
363 /* MII regs. */
364 union {
365 uint32_t regs[4];
366 struct {
367 uint32_t mc;
368 uint32_t mcr;
369 uint32_t mwd;
370 uint32_t mrd;
371 };
372 } mii;
373
374 struct {
375 uint64_t rx_bytes;
376 uint64_t tx_bytes;
377
378 uint64_t rx;
379 uint64_t rx_bcast;
380 uint64_t rx_mcast;
381 } stats;
382
383 /* Receive configuration words. */
384 uint32_t rcw[2];
385 /* Transmit config. */
386 uint32_t tc;
387 uint32_t emmc;
388 uint32_t phyc;
389
390 /* Unicast Address Word. */
391 uint32_t uaw[2];
392 /* Unicast address filter used with extended mcast. */
393 uint32_t ext_uaw[2];
394 uint32_t fmi;
395
396 uint32_t regs[R_MAX];
397
398 /* Multicast filter addrs. */
399 uint32_t maddr[4][2];
400 /* 32K x 1 lookup filter. */
401 uint32_t ext_mtable[1024];
402
403 uint32_t hdr[CONTROL_PAYLOAD_WORDS];
404
405 uint8_t *rxmem;
406 uint32_t rxsize;
407 uint32_t rxpos;
408
409 uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
410 uint32_t rxappsize;
411
412 /* Whether axienet_eth_rx_notify should flush incoming queue. */
413 bool need_flush;
414};
415
416static void axienet_rx_reset(XilinxAXIEnet *s)
417{
418 s->rcw[1] = RCW1_JUM | RCW1_FCS | RCW1_RX | RCW1_VLAN;
419}
420
421static void axienet_tx_reset(XilinxAXIEnet *s)
422{
423 s->tc = TC_JUM | TC_TX | TC_VLAN;
424}
425
426static inline int axienet_rx_resetting(XilinxAXIEnet *s)
427{
428 return s->rcw[1] & RCW1_RST;
429}
430
431static inline int axienet_rx_enabled(XilinxAXIEnet *s)
432{
433 return s->rcw[1] & RCW1_RX;
434}
435
436static inline int axienet_extmcf_enabled(XilinxAXIEnet *s)
437{
438 return !!(s->regs[R_RAF] & RAF_EMCF_EN);
439}
440
441static inline int axienet_newfunc_enabled(XilinxAXIEnet *s)
442{
443 return !!(s->regs[R_RAF] & RAF_NEWFUNC_EN);
444}
445
446static void xilinx_axienet_reset(DeviceState *d)
447{
448 XilinxAXIEnet *s = XILINX_AXI_ENET(d);
449
450 axienet_rx_reset(s);
451 axienet_tx_reset(s);
452
453 s->regs[R_PPST] = PPST_LINKSTATUS | PPST_PHY_LINKSTATUS;
454 s->regs[R_IS] = IS_AUTONEG | IS_RX_DCM_LOCK | IS_MGM_RDY | IS_PHY_RST_DONE;
455
456 s->emmc = EMMC_LINKSPEED_100MB;
457}
458
459static void enet_update_irq(XilinxAXIEnet *s)
460{
461 s->regs[R_IP] = s->regs[R_IS] & s->regs[R_IE];
462 qemu_set_irq(s->irq, !!s->regs[R_IP]);
463}
464
465static uint64_t enet_read(void *opaque, hwaddr addr, unsigned size)
466{
467 XilinxAXIEnet *s = opaque;
468 uint32_t r = 0;
469 addr >>= 2;
470
471 switch (addr) {
472 case R_RCW0:
473 case R_RCW1:
474 r = s->rcw[addr & 1];
475 break;
476
477 case R_TC:
478 r = s->tc;
479 break;
480
481 case R_EMMC:
482 r = s->emmc;
483 break;
484
485 case R_PHYC:
486 r = s->phyc;
487 break;
488
489 case R_MCR:
490 r = s->mii.regs[addr & 3] | (1 << 7); /* Always ready. */
491 break;
492
493 case R_STATS_RX_BYTESL:
494 case R_STATS_RX_BYTESH:
495 r = s->stats.rx_bytes >> (32 * (addr & 1));
496 break;
497
498 case R_STATS_TX_BYTESL:
499 case R_STATS_TX_BYTESH:
500 r = s->stats.tx_bytes >> (32 * (addr & 1));
501 break;
502
503 case R_STATS_RXL:
504 case R_STATS_RXH:
505 r = s->stats.rx >> (32 * (addr & 1));
506 break;
507 case R_STATS_RX_BCASTL:
508 case R_STATS_RX_BCASTH:
509 r = s->stats.rx_bcast >> (32 * (addr & 1));
510 break;
511 case R_STATS_RX_MCASTL:
512 case R_STATS_RX_MCASTH:
513 r = s->stats.rx_mcast >> (32 * (addr & 1));
514 break;
515
516 case R_MC:
517 case R_MWD:
518 case R_MRD:
519 r = s->mii.regs[addr & 3];
520 break;
521
522 case R_UAW0:
523 case R_UAW1:
524 r = s->uaw[addr & 1];
525 break;
526
527 case R_UAWU:
528 case R_UAWL:
529 r = s->ext_uaw[addr & 1];
530 break;
531
532 case R_FMI:
533 r = s->fmi;
534 break;
535
536 case R_AF0:
537 case R_AF1:
538 r = s->maddr[s->fmi & 3][addr & 1];
539 break;
540
541 case 0x8000 ... 0x83ff:
542 r = s->ext_mtable[addr - 0x8000];
543 break;
544
545 default:
546 if (addr < ARRAY_SIZE(s->regs)) {
547 r = s->regs[addr];
548 }
549 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
550 __func__, addr * 4, r));
551 break;
552 }
553 return r;
554}
555
556static void enet_write(void *opaque, hwaddr addr,
557 uint64_t value, unsigned size)
558{
559 XilinxAXIEnet *s = opaque;
560 struct TEMAC *t = &s->TEMAC;
561
562 addr >>= 2;
563 switch (addr) {
564 case R_RCW0:
565 case R_RCW1:
566 s->rcw[addr & 1] = value;
567 if ((addr & 1) && value & RCW1_RST) {
568 axienet_rx_reset(s);
569 } else {
570 qemu_flush_queued_packets(qemu_get_queue(s->nic));
571 }
572 break;
573
574 case R_TC:
575 s->tc = value;
576 if (value & TC_RST) {
577 axienet_tx_reset(s);
578 }
579 break;
580
581 case R_EMMC:
582 s->emmc = value;
583 break;
584
585 case R_PHYC:
586 s->phyc = value;
587 break;
588
589 case R_MC:
590 value &= ((1 << 7) - 1);
591
592 /* Enable the MII. */
593 if (value & MC_EN) {
594 unsigned int miiclkdiv = value & ((1 << 6) - 1);
595 if (!miiclkdiv) {
596 qemu_log("AXIENET: MDIO enabled but MDIOCLK is zero!\n");
597 }
598 }
599 s->mii.mc = value;
600 break;
601
602 case R_MCR: {
603 unsigned int phyaddr = (value >> 24) & 0x1f;
604 unsigned int regaddr = (value >> 16) & 0x1f;
605 unsigned int op = (value >> 14) & 3;
606 unsigned int initiate = (value >> 11) & 1;
607
608 if (initiate) {
609 if (op == 1) {
610 mdio_write_req(&t->mdio_bus, phyaddr, regaddr, s->mii.mwd);
611 } else if (op == 2) {
612 s->mii.mrd = mdio_read_req(&t->mdio_bus, phyaddr, regaddr);
613 } else {
614 qemu_log("AXIENET: invalid MDIOBus OP=%d\n", op);
615 }
616 }
617 s->mii.mcr = value;
618 break;
619 }
620
621 case R_MWD:
622 case R_MRD:
623 s->mii.regs[addr & 3] = value;
624 break;
625
626
627 case R_UAW0:
628 case R_UAW1:
629 s->uaw[addr & 1] = value;
630 break;
631
632 case R_UAWL:
633 case R_UAWU:
634 s->ext_uaw[addr & 1] = value;
635 break;
636
637 case R_FMI:
638 s->fmi = value;
639 break;
640
641 case R_AF0:
642 case R_AF1:
643 s->maddr[s->fmi & 3][addr & 1] = value;
644 break;
645
646 case R_IS:
647 s->regs[addr] &= ~value;
648 break;
649
650 case 0x8000 ... 0x83ff:
651 s->ext_mtable[addr - 0x8000] = value;
652 break;
653
654 default:
655 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
656 __func__, addr * 4, (unsigned)value));
657 if (addr < ARRAY_SIZE(s->regs)) {
658 s->regs[addr] = value;
659 }
660 break;
661 }
662 enet_update_irq(s);
663}
664
665static const MemoryRegionOps enet_ops = {
666 .read = enet_read,
667 .write = enet_write,
668 .endianness = DEVICE_LITTLE_ENDIAN,
669};
670
671static int eth_can_rx(XilinxAXIEnet *s)
672{
673 /* RX enabled? */
674 return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
675}
676
677static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1)
678{
679 int match = 1;
680
681 if (memcmp(buf, &f0, 4)) {
682 match = 0;
683 }
684
685 if (buf[4] != (f1 & 0xff) || buf[5] != ((f1 >> 8) & 0xff)) {
686 match = 0;
687 }
688
689 return match;
690}
691
692static void axienet_eth_rx_notify(void *opaque)
693{
694 XilinxAXIEnet *s = XILINX_AXI_ENET(opaque);
695
696 while (s->rxappsize && stream_can_push(s->tx_control_dev,
697 axienet_eth_rx_notify, s)) {
698 size_t ret = stream_push(s->tx_control_dev,
699 (void *)s->rxapp + CONTROL_PAYLOAD_SIZE
700 - s->rxappsize, s->rxappsize);
701 s->rxappsize -= ret;
702 }
703
704 while (s->rxsize && stream_can_push(s->tx_data_dev,
705 axienet_eth_rx_notify, s)) {
706 size_t ret = stream_push(s->tx_data_dev, (void *)s->rxmem + s->rxpos,
707 s->rxsize);
708 s->rxsize -= ret;
709 s->rxpos += ret;
710 if (!s->rxsize) {
711 s->regs[R_IS] |= IS_RX_COMPLETE;
712 if (s->need_flush) {
713 s->need_flush = false;
714 qemu_flush_queued_packets(qemu_get_queue(s->nic));
715 }
716 }
717 }
718 enet_update_irq(s);
719}
720
721static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
722{
723 XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
724 static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff,
725 0xff, 0xff, 0xff};
726 static const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52};
727 uint32_t app[CONTROL_PAYLOAD_WORDS] = {0};
728 int promisc = s->fmi & (1 << 31);
729 int unicast, broadcast, multicast, ip_multicast = 0;
730 uint32_t csum32;
731 uint16_t csum16;
732 int i;
733
734 DENET(qemu_log("%s: %zd bytes\n", __func__, size));
735
736 if (!eth_can_rx(s)) {
737 s->need_flush = true;
738 return 0;
739 }
740
741 unicast = ~buf[0] & 0x1;
742 broadcast = memcmp(buf, sa_bcast, 6) == 0;
743 multicast = !unicast && !broadcast;
744 if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) {
745 ip_multicast = 1;
746 }
747
748 /* Jumbo or vlan sizes ? */
749 if (!(s->rcw[1] & RCW1_JUM)) {
750 if (size > 1518 && size <= 1522 && !(s->rcw[1] & RCW1_VLAN)) {
751 return size;
752 }
753 }
754
755 /* Basic Address filters. If you want to use the extended filters
756 you'll generally have to place the ethernet mac into promiscuous mode
757 to avoid the basic filtering from dropping most frames. */
758 if (!promisc) {
759 if (unicast) {
760 if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) {
761 return size;
762 }
763 } else {
764 if (broadcast) {
765 /* Broadcast. */
766 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
767 return size;
768 }
769 } else {
770 int drop = 1;
771
772 /* Multicast. */
773 if (s->regs[R_RAF] & RAF_MCAST_REJ) {
774 return size;
775 }
776
777 for (i = 0; i < 4; i++) {
778 if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) {
779 drop = 0;
780 break;
781 }
782 }
783
784 if (drop) {
785 return size;
786 }
787 }
788 }
789 }
790
791 /* Extended mcast filtering enabled? */
792 if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) {
793 if (unicast) {
794 if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) {
795 return size;
796 }
797 } else {
798 if (broadcast) {
799 /* Broadcast. ??? */
800 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
801 return size;
802 }
803 } else {
804 int idx, bit;
805
806 /* Multicast. */
807 if (!memcmp(buf, sa_ipmcast, 3)) {
808 return size;
809 }
810
811 idx = (buf[4] & 0x7f) << 8;
812 idx |= buf[5];
813
814 bit = 1 << (idx & 0x1f);
815 idx >>= 5;
816
817 if (!(s->ext_mtable[idx] & bit)) {
818 return size;
819 }
820 }
821 }
822 }
823
824 if (size < 12) {
825 s->regs[R_IS] |= IS_RX_REJECT;
826 enet_update_irq(s);
827 return -1;
828 }
829
830 if (size > (s->c_rxmem - 4)) {
831 size = s->c_rxmem - 4;
832 }
833
834 memcpy(s->rxmem, buf, size);
835 memset(s->rxmem + size, 0, 4); /* Clear the FCS. */
836
837 if (s->rcw[1] & RCW1_FCS) {
838 size += 4; /* fcs is inband. */
839 }
840
841 app[0] = 5 << 28;
842 csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14);
843 /* Fold it once. */
844 csum32 = (csum32 & 0xffff) + (csum32 >> 16);
845 /* And twice to get rid of possible carries. */
846 csum16 = (csum32 & 0xffff) + (csum32 >> 16);
847 app[3] = csum16;
848 app[4] = size & 0xffff;
849
850 s->stats.rx_bytes += size;
851 s->stats.rx++;
852 if (multicast) {
853 s->stats.rx_mcast++;
854 app[2] |= 1 | (ip_multicast << 1);
855 } else if (broadcast) {
856 s->stats.rx_bcast++;
857 app[2] |= 1 << 3;
858 }
859
860 /* Good frame. */
861 app[2] |= 1 << 6;
862
863 s->rxsize = size;
864 s->rxpos = 0;
865 for (i = 0; i < ARRAY_SIZE(app); ++i) {
866 app[i] = cpu_to_le32(app[i]);
867 }
868 s->rxappsize = CONTROL_PAYLOAD_SIZE;
869 memcpy(s->rxapp, app, s->rxappsize);
870 axienet_eth_rx_notify(s);
871
872 enet_update_irq(s);
873 return size;
874}
875
876static size_t
877xilinx_axienet_control_stream_push(StreamSlave *obj, uint8_t *buf, size_t len)
878{
879 int i;
880 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM(obj);
881 XilinxAXIEnet *s = cs->enet;
882
883 if (len != CONTROL_PAYLOAD_SIZE) {
884 hw_error("AXI Enet requires %d byte control stream payload\n",
885 (int)CONTROL_PAYLOAD_SIZE);
886 }
887
888 memcpy(s->hdr, buf, len);
889
890 for (i = 0; i < ARRAY_SIZE(s->hdr); ++i) {
891 s->hdr[i] = le32_to_cpu(s->hdr[i]);
892 }
893 return len;
894}
895
896static size_t
897xilinx_axienet_data_stream_push(StreamSlave *obj, uint8_t *buf, size_t size)
898{
899 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(obj);
900 XilinxAXIEnet *s = ds->enet;
901
902 /* TX enable ? */
903 if (!(s->tc & TC_TX)) {
904 return size;
905 }
906
907 /* Jumbo or vlan sizes ? */
908 if (!(s->tc & TC_JUM)) {
909 if (size > 1518 && size <= 1522 && !(s->tc & TC_VLAN)) {
910 return size;
911 }
912 }
913
914 if (s->hdr[0] & 1) {
915 unsigned int start_off = s->hdr[1] >> 16;
916 unsigned int write_off = s->hdr[1] & 0xffff;
917 uint32_t tmp_csum;
918 uint16_t csum;
919
920 tmp_csum = net_checksum_add(size - start_off,
921 (uint8_t *)buf + start_off);
922 /* Accumulate the seed. */
923 tmp_csum += s->hdr[2] & 0xffff;
924
925 /* Fold the 32bit partial checksum. */
926 csum = net_checksum_finish(tmp_csum);
927
928 /* Writeback. */
929 buf[write_off] = csum >> 8;
930 buf[write_off + 1] = csum & 0xff;
931 }
932
933 qemu_send_packet(qemu_get_queue(s->nic), buf, size);
934
935 s->stats.tx_bytes += size;
936 s->regs[R_IS] |= IS_TX_COMPLETE;
937 enet_update_irq(s);
938
939 return size;
940}
941
942static NetClientInfo net_xilinx_enet_info = {
943 .type = NET_CLIENT_DRIVER_NIC,
944 .size = sizeof(NICState),
945 .receive = eth_rx,
946};
947
948static void xilinx_enet_realize(DeviceState *dev, Error **errp)
949{
950 XilinxAXIEnet *s = XILINX_AXI_ENET(dev);
951 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(&s->rx_data_dev);
952 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM(
953 &s->rx_control_dev);
954 Error *local_err = NULL;
955
956 object_property_add_link(OBJECT(ds), "enet", "xlnx.axi-ethernet",
957 (Object **) &ds->enet,
958 object_property_allow_set_link,
959 OBJ_PROP_LINK_STRONG,
960 &local_err);
961 object_property_add_link(OBJECT(cs), "enet", "xlnx.axi-ethernet",
962 (Object **) &cs->enet,
963 object_property_allow_set_link,
964 OBJ_PROP_LINK_STRONG,
965 &local_err);
966 if (local_err) {
967 goto xilinx_enet_realize_fail;
968 }
969 object_property_set_link(OBJECT(ds), OBJECT(s), "enet", &local_err);
970 object_property_set_link(OBJECT(cs), OBJECT(s), "enet", &local_err);
971 if (local_err) {
972 goto xilinx_enet_realize_fail;
973 }
974
975 qemu_macaddr_default_if_unset(&s->conf.macaddr);
976 s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf,
977 object_get_typename(OBJECT(dev)), dev->id, s);
978 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
979
980 tdk_init(&s->TEMAC.phy);
981 mdio_attach(&s->TEMAC.mdio_bus, &s->TEMAC.phy, s->c_phyaddr);
982
983 s->TEMAC.parent = s;
984
985 s->rxmem = g_malloc(s->c_rxmem);
986 return;
987
988xilinx_enet_realize_fail:
989 error_propagate(errp, local_err);
990}
991
992static void xilinx_enet_init(Object *obj)
993{
994 XilinxAXIEnet *s = XILINX_AXI_ENET(obj);
995 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
996
997 object_initialize_child(OBJECT(s), "axistream-connected-target",
998 &s->rx_data_dev, sizeof(s->rx_data_dev),
999 TYPE_XILINX_AXI_ENET_DATA_STREAM, &error_abort,
1000 NULL);
1001 object_initialize_child(OBJECT(s), "axistream-control-connected-target",
1002 &s->rx_control_dev, sizeof(s->rx_control_dev),
1003 TYPE_XILINX_AXI_ENET_CONTROL_STREAM, &error_abort,
1004 NULL);
1005 sysbus_init_irq(sbd, &s->irq);
1006
1007 memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000);
1008 sysbus_init_mmio(sbd, &s->iomem);
1009}
1010
1011static Property xilinx_enet_properties[] = {
1012 DEFINE_PROP_UINT32("phyaddr", XilinxAXIEnet, c_phyaddr, 7),
1013 DEFINE_PROP_UINT32("rxmem", XilinxAXIEnet, c_rxmem, 0x1000),
1014 DEFINE_PROP_UINT32("txmem", XilinxAXIEnet, c_txmem, 0x1000),
1015 DEFINE_NIC_PROPERTIES(XilinxAXIEnet, conf),
1016 DEFINE_PROP_LINK("axistream-connected", XilinxAXIEnet,
1017 tx_data_dev, TYPE_STREAM_SLAVE, StreamSlave *),
1018 DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIEnet,
1019 tx_control_dev, TYPE_STREAM_SLAVE, StreamSlave *),
1020 DEFINE_PROP_END_OF_LIST(),
1021};
1022
1023static void xilinx_enet_class_init(ObjectClass *klass, void *data)
1024{
1025 DeviceClass *dc = DEVICE_CLASS(klass);
1026
1027 dc->realize = xilinx_enet_realize;
1028 dc->props = xilinx_enet_properties;
1029 dc->reset = xilinx_axienet_reset;
1030}
1031
1032static void xilinx_enet_stream_class_init(ObjectClass *klass, void *data)
1033{
1034 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
1035
1036 ssc->push = data;
1037}
1038
1039static const TypeInfo xilinx_enet_info = {
1040 .name = TYPE_XILINX_AXI_ENET,
1041 .parent = TYPE_SYS_BUS_DEVICE,
1042 .instance_size = sizeof(XilinxAXIEnet),
1043 .class_init = xilinx_enet_class_init,
1044 .instance_init = xilinx_enet_init,
1045};
1046
1047static const TypeInfo xilinx_enet_data_stream_info = {
1048 .name = TYPE_XILINX_AXI_ENET_DATA_STREAM,
1049 .parent = TYPE_OBJECT,
1050 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave),
1051 .class_init = xilinx_enet_stream_class_init,
1052 .class_data = xilinx_axienet_data_stream_push,
1053 .interfaces = (InterfaceInfo[]) {
1054 { TYPE_STREAM_SLAVE },
1055 { }
1056 }
1057};
1058
1059static const TypeInfo xilinx_enet_control_stream_info = {
1060 .name = TYPE_XILINX_AXI_ENET_CONTROL_STREAM,
1061 .parent = TYPE_OBJECT,
1062 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave),
1063 .class_init = xilinx_enet_stream_class_init,
1064 .class_data = xilinx_axienet_control_stream_push,
1065 .interfaces = (InterfaceInfo[]) {
1066 { TYPE_STREAM_SLAVE },
1067 { }
1068 }
1069};
1070
1071static void xilinx_enet_register_types(void)
1072{
1073 type_register_static(&xilinx_enet_info);
1074 type_register_static(&xilinx_enet_data_stream_info);
1075 type_register_static(&xilinx_enet_control_stream_info);
1076}
1077
1078type_init(xilinx_enet_register_types)
1079