2 * ColdFire Fast Ethernet Controller emulation.
4 * Copyright (c) 2007 CodeSourcery.
6 * This code is licensed under the GPL
9 #include "qemu/osdep.h"
12 #include "qemu/module.h"
13 #include "hw/m68k/mcf.h"
14 #include "hw/m68k/mcf_fec.h"
15 #include "hw/net/mii.h"
16 #include "hw/sysbus.h"
23 #define DPRINTF(fmt, ...) \
24 do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0)
26 #define DPRINTF(fmt, ...) do {} while(0)
29 #define FEC_MAX_DESC 1024
30 #define FEC_MAX_FRAME_SIZE 2032
31 #define FEC_MIB_SIZE 64
34 SysBusDevice parent_obj;
37 qemu_irq irq[FEC_NUM_IRQ];
44 uint32_t rx_descriptor;
45 uint32_t tx_descriptor;
56 uint32_t mib[FEC_MIB_SIZE];
59 #define FEC_INT_HB 0x80000000
60 #define FEC_INT_BABR 0x40000000
61 #define FEC_INT_BABT 0x20000000
62 #define FEC_INT_GRA 0x10000000
63 #define FEC_INT_TXF 0x08000000
64 #define FEC_INT_TXB 0x04000000
65 #define FEC_INT_RXF 0x02000000
66 #define FEC_INT_RXB 0x01000000
67 #define FEC_INT_MII 0x00800000
68 #define FEC_INT_EB 0x00400000
69 #define FEC_INT_LC 0x00200000
70 #define FEC_INT_RL 0x00100000
71 #define FEC_INT_UN 0x00080000
76 /* Map interrupt flags onto IRQ lines. */
77 static const uint32_t mcf_fec_irq_map[FEC_NUM_IRQ] = {
93 /* Buffer Descriptor. */
100 #define FEC_BD_R 0x8000
101 #define FEC_BD_E 0x8000
102 #define FEC_BD_O1 0x4000
103 #define FEC_BD_W 0x2000
104 #define FEC_BD_O2 0x1000
105 #define FEC_BD_L 0x0800
106 #define FEC_BD_TC 0x0400
107 #define FEC_BD_ABC 0x0200
108 #define FEC_BD_M 0x0100
109 #define FEC_BD_BC 0x0080
110 #define FEC_BD_MC 0x0040
111 #define FEC_BD_LG 0x0020
112 #define FEC_BD_NO 0x0010
113 #define FEC_BD_CR 0x0004
114 #define FEC_BD_OV 0x0002
115 #define FEC_BD_TR 0x0001
117 #define MIB_RMON_T_DROP 0
118 #define MIB_RMON_T_PACKETS 1
119 #define MIB_RMON_T_BC_PKT 2
120 #define MIB_RMON_T_MC_PKT 3
121 #define MIB_RMON_T_CRC_ALIGN 4
122 #define MIB_RMON_T_UNDERSIZE 5
123 #define MIB_RMON_T_OVERSIZE 6
124 #define MIB_RMON_T_FRAG 7
125 #define MIB_RMON_T_JAB 8
126 #define MIB_RMON_T_COL 9
127 #define MIB_RMON_T_P64 10
128 #define MIB_RMON_T_P65TO127 11
129 #define MIB_RMON_T_P128TO255 12
130 #define MIB_RMON_T_P256TO511 13
131 #define MIB_RMON_T_P512TO1023 14
132 #define MIB_RMON_T_P1024TO2047 15
133 #define MIB_RMON_T_P_GTE2048 16
134 #define MIB_RMON_T_OCTETS 17
135 #define MIB_IEEE_T_DROP 18
136 #define MIB_IEEE_T_FRAME_OK 19
137 #define MIB_IEEE_T_1COL 20
138 #define MIB_IEEE_T_MCOL 21
139 #define MIB_IEEE_T_DEF 22
140 #define MIB_IEEE_T_LCOL 23
141 #define MIB_IEEE_T_EXCOL 24
142 #define MIB_IEEE_T_MACERR 25
143 #define MIB_IEEE_T_CSERR 26
144 #define MIB_IEEE_T_SQE 27
145 #define MIB_IEEE_T_FDXFC 28
146 #define MIB_IEEE_T_OCTETS_OK 29
148 #define MIB_RMON_R_DROP 32
149 #define MIB_RMON_R_PACKETS 33
150 #define MIB_RMON_R_BC_PKT 34
151 #define MIB_RMON_R_MC_PKT 35
152 #define MIB_RMON_R_CRC_ALIGN 36
153 #define MIB_RMON_R_UNDERSIZE 37
154 #define MIB_RMON_R_OVERSIZE 38
155 #define MIB_RMON_R_FRAG 39
156 #define MIB_RMON_R_JAB 40
157 #define MIB_RMON_R_RESVD_0 41
158 #define MIB_RMON_R_P64 42
159 #define MIB_RMON_R_P65TO127 43
160 #define MIB_RMON_R_P128TO255 44
161 #define MIB_RMON_R_P256TO511 45
162 #define MIB_RMON_R_P512TO1023 46
163 #define MIB_RMON_R_P1024TO2047 47
164 #define MIB_RMON_R_P_GTE2048 48
165 #define MIB_RMON_R_OCTETS 49
166 #define MIB_IEEE_R_DROP 50
167 #define MIB_IEEE_R_FRAME_OK 51
168 #define MIB_IEEE_R_CRC 52
169 #define MIB_IEEE_R_ALIGN 53
170 #define MIB_IEEE_R_MACERR 54
171 #define MIB_IEEE_R_FDXFC 55
172 #define MIB_IEEE_R_OCTETS_OK 56
174 static void mcf_fec_read_bd(mcf_fec_bd *bd, uint32_t addr)
176 cpu_physical_memory_read(addr, bd, sizeof(*bd));
177 be16_to_cpus(&bd->flags);
178 be16_to_cpus(&bd->length);
179 be32_to_cpus(&bd->data);
182 static void mcf_fec_write_bd(mcf_fec_bd *bd, uint32_t addr)
185 tmp.flags = cpu_to_be16(bd->flags);
186 tmp.length = cpu_to_be16(bd->length);
187 tmp.data = cpu_to_be32(bd->data);
188 cpu_physical_memory_write(addr, &tmp, sizeof(tmp));
191 static void mcf_fec_update(mcf_fec_state *s)
198 active = s->eir & s->eimr;
199 changed = active ^s->irq_state;
200 for (i = 0; i < FEC_NUM_IRQ; i++) {
201 mask = mcf_fec_irq_map[i];
202 if (changed & mask) {
203 DPRINTF("IRQ %d = %d\n", i, (active & mask) != 0);
204 qemu_set_irq(s->irq[i], (active & mask) != 0);
207 s->irq_state = active;
210 static void mcf_fec_tx_stats(mcf_fec_state *s, int size)
212 s->mib[MIB_RMON_T_PACKETS]++;
213 s->mib[MIB_RMON_T_OCTETS] += size;
215 s->mib[MIB_RMON_T_FRAG]++;
216 } else if (size == 64) {
217 s->mib[MIB_RMON_T_P64]++;
218 } else if (size < 128) {
219 s->mib[MIB_RMON_T_P65TO127]++;
220 } else if (size < 256) {
221 s->mib[MIB_RMON_T_P128TO255]++;
222 } else if (size < 512) {
223 s->mib[MIB_RMON_T_P256TO511]++;
224 } else if (size < 1024) {
225 s->mib[MIB_RMON_T_P512TO1023]++;
226 } else if (size < 2048) {
227 s->mib[MIB_RMON_T_P1024TO2047]++;
229 s->mib[MIB_RMON_T_P_GTE2048]++;
231 s->mib[MIB_IEEE_T_FRAME_OK]++;
232 s->mib[MIB_IEEE_T_OCTETS_OK] += size;
235 static void mcf_fec_do_tx(mcf_fec_state *s)
241 uint8_t frame[FEC_MAX_FRAME_SIZE];
247 addr = s->tx_descriptor;
248 while (descnt++ < FEC_MAX_DESC) {
249 mcf_fec_read_bd(&bd, addr);
250 DPRINTF("tx_bd %x flags %04x len %d data %08x\n",
251 addr, bd.flags, bd.length, bd.data);
252 if ((bd.flags & FEC_BD_R) == 0) {
253 /* Run out of descriptors to transmit. */
257 if (frame_size + len > FEC_MAX_FRAME_SIZE) {
258 len = FEC_MAX_FRAME_SIZE - frame_size;
259 s->eir |= FEC_INT_BABT;
261 cpu_physical_memory_read(bd.data, ptr, len);
264 if (bd.flags & FEC_BD_L) {
265 /* Last buffer in frame. */
266 DPRINTF("Sending packet\n");
267 qemu_send_packet(qemu_get_queue(s->nic), frame, frame_size);
268 mcf_fec_tx_stats(s, frame_size);
271 s->eir |= FEC_INT_TXF;
273 s->eir |= FEC_INT_TXB;
274 bd.flags &= ~FEC_BD_R;
275 /* Write back the modified descriptor. */
276 mcf_fec_write_bd(&bd, addr);
277 /* Advance to the next descriptor. */
278 if ((bd.flags & FEC_BD_W) != 0) {
284 s->tx_descriptor = addr;
287 static void mcf_fec_enable_rx(mcf_fec_state *s)
289 NetClientState *nc = qemu_get_queue(s->nic);
292 mcf_fec_read_bd(&bd, s->rx_descriptor);
293 s->rx_enabled = ((bd.flags & FEC_BD_E) != 0);
295 qemu_flush_queued_packets(nc);
299 static void mcf_fec_reset(DeviceState *dev)
301 mcf_fec_state *s = MCF_FEC_NET(dev);
314 #define MMFR_WRITE_OP (1 << 28)
315 #define MMFR_READ_OP (2 << 28)
316 #define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f)
317 #define MMFR_REGNUM(v) (((v) >> 18) & 0x1f)
319 static uint64_t mcf_fec_read_mdio(mcf_fec_state *s)
323 if (s->mmfr & MMFR_WRITE_OP)
325 if (MMFR_PHYADDR(s->mmfr) != 1)
326 return s->mmfr |= 0xffff;
328 switch (MMFR_REGNUM(s->mmfr)) {
330 v = MII_BMCR_SPEED | MII_BMCR_AUTOEN | MII_BMCR_FD;
333 v = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD |
334 MII_BMSR_10T_HD | MII_BMSR_MFPS | MII_BMSR_AN_COMP |
335 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST;
344 v = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD |
345 MII_ANAR_10 | MII_ANAR_CSMACD;
348 v = MII_ANLPAR_ACK | MII_ANLPAR_TXFD | MII_ANLPAR_TX |
349 MII_ANLPAR_10FD | MII_ANLPAR_10 | MII_ANLPAR_CSMACD;
355 s->mmfr = (s->mmfr & ~0xffff) | v;
359 static uint64_t mcf_fec_read(void *opaque, hwaddr addr,
362 mcf_fec_state *s = (mcf_fec_state *)opaque;
363 switch (addr & 0x3ff) {
364 case 0x004: return s->eir;
365 case 0x008: return s->eimr;
366 case 0x010: return s->rx_enabled ? (1 << 24) : 0; /* RDAR */
367 case 0x014: return 0; /* TDAR */
368 case 0x024: return s->ecr;
369 case 0x040: return mcf_fec_read_mdio(s);
370 case 0x044: return s->mscr;
371 case 0x064: return 0; /* MIBC */
372 case 0x084: return s->rcr;
373 case 0x0c4: return s->tcr;
374 case 0x0e4: /* PALR */
375 return (s->conf.macaddr.a[0] << 24) | (s->conf.macaddr.a[1] << 16)
376 | (s->conf.macaddr.a[2] << 8) | s->conf.macaddr.a[3];
378 case 0x0e8: /* PAUR */
379 return (s->conf.macaddr.a[4] << 24) | (s->conf.macaddr.a[5] << 16) | 0x8808;
380 case 0x0ec: return 0x10000; /* OPD */
381 case 0x118: return 0;
382 case 0x11c: return 0;
383 case 0x120: return 0;
384 case 0x124: return 0;
385 case 0x144: return s->tfwr;
386 case 0x14c: return 0x600;
387 case 0x150: return s->rfsr;
388 case 0x180: return s->erdsr;
389 case 0x184: return s->etdsr;
390 case 0x188: return s->emrbr;
391 case 0x200 ... 0x2e0: return s->mib[(addr & 0x1ff) / 4];
393 hw_error("mcf_fec_read: Bad address 0x%x\n", (int)addr);
398 static void mcf_fec_write(void *opaque, hwaddr addr,
399 uint64_t value, unsigned size)
401 mcf_fec_state *s = (mcf_fec_state *)opaque;
402 switch (addr & 0x3ff) {
409 case 0x010: /* RDAR */
410 if ((s->ecr & FEC_EN) && !s->rx_enabled) {
411 DPRINTF("RX enable\n");
412 mcf_fec_enable_rx(s);
415 case 0x014: /* TDAR */
416 if (s->ecr & FEC_EN) {
422 if (value & FEC_RESET) {
424 mcf_fec_reset(opaque);
426 if ((s->ecr & FEC_EN) == 0) {
432 s->eir |= FEC_INT_MII;
435 s->mscr = value & 0xfe;
438 /* TODO: Implement MIB. */
441 s->rcr = value & 0x07ff003f;
442 /* TODO: Implement LOOP mode. */
444 case 0x0c4: /* TCR */
445 /* We transmit immediately, so raise GRA immediately. */
448 s->eir |= FEC_INT_GRA;
450 case 0x0e4: /* PALR */
451 s->conf.macaddr.a[0] = value >> 24;
452 s->conf.macaddr.a[1] = value >> 16;
453 s->conf.macaddr.a[2] = value >> 8;
454 s->conf.macaddr.a[3] = value;
456 case 0x0e8: /* PAUR */
457 s->conf.macaddr.a[4] = value >> 24;
458 s->conf.macaddr.a[5] = value >> 16;
467 /* TODO: implement MAC hash filtering. */
473 /* FRBR writes ignored. */
476 s->rfsr = (value & 0x3fc) | 0x400;
479 s->erdsr = value & ~3;
480 s->rx_descriptor = s->erdsr;
483 s->etdsr = value & ~3;
484 s->tx_descriptor = s->etdsr;
487 s->emrbr = value > 0 ? value & 0x7F0 : 0x7F0;
489 case 0x200 ... 0x2e0:
490 s->mib[(addr & 0x1ff) / 4] = value;
493 hw_error("mcf_fec_write Bad address 0x%x\n", (int)addr);
498 static void mcf_fec_rx_stats(mcf_fec_state *s, int size)
500 s->mib[MIB_RMON_R_PACKETS]++;
501 s->mib[MIB_RMON_R_OCTETS] += size;
503 s->mib[MIB_RMON_R_FRAG]++;
504 } else if (size == 64) {
505 s->mib[MIB_RMON_R_P64]++;
506 } else if (size < 128) {
507 s->mib[MIB_RMON_R_P65TO127]++;
508 } else if (size < 256) {
509 s->mib[MIB_RMON_R_P128TO255]++;
510 } else if (size < 512) {
511 s->mib[MIB_RMON_R_P256TO511]++;
512 } else if (size < 1024) {
513 s->mib[MIB_RMON_R_P512TO1023]++;
514 } else if (size < 2048) {
515 s->mib[MIB_RMON_R_P1024TO2047]++;
517 s->mib[MIB_RMON_R_P_GTE2048]++;
519 s->mib[MIB_IEEE_R_FRAME_OK]++;
520 s->mib[MIB_IEEE_R_OCTETS_OK] += size;
523 static int mcf_fec_have_receive_space(mcf_fec_state *s, size_t want)
528 /* Walk descriptor list to determine if we have enough buffer */
529 addr = s->rx_descriptor;
531 mcf_fec_read_bd(&bd, addr);
532 if ((bd.flags & FEC_BD_E) == 0) {
535 if (want < s->emrbr) {
539 /* Advance to the next descriptor. */
540 if ((bd.flags & FEC_BD_W) != 0) {
549 static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t size)
551 mcf_fec_state *s = qemu_get_nic_opaque(nc);
558 unsigned int buf_len;
561 DPRINTF("do_rx len %d\n", size);
562 if (!s->rx_enabled) {
565 /* 4 bytes for the CRC. */
567 crc = cpu_to_be32(crc32(~0, buf, size));
568 crc_ptr = (uint8_t *)&crc;
569 /* Huge frames are truncted. */
570 if (size > FEC_MAX_FRAME_SIZE) {
571 size = FEC_MAX_FRAME_SIZE;
572 flags |= FEC_BD_TR | FEC_BD_LG;
574 /* Frames larger than the user limit just set error flags. */
575 if (size > (s->rcr >> 16)) {
578 /* Check if we have enough space in current descriptors */
579 if (!mcf_fec_have_receive_space(s, size)) {
582 addr = s->rx_descriptor;
585 mcf_fec_read_bd(&bd, addr);
586 buf_len = (size <= s->emrbr) ? size: s->emrbr;
589 DPRINTF("rx_bd %x length %d\n", addr, bd.length);
590 /* The last 4 bytes are the CRC. */
594 cpu_physical_memory_write(buf_addr, buf, buf_len);
597 cpu_physical_memory_write(buf_addr + buf_len, crc_ptr, 4 - size);
600 bd.flags &= ~FEC_BD_E;
602 /* Last buffer in frame. */
603 bd.flags |= flags | FEC_BD_L;
604 DPRINTF("rx frame flags %04x\n", bd.flags);
605 s->eir |= FEC_INT_RXF;
607 s->eir |= FEC_INT_RXB;
609 mcf_fec_write_bd(&bd, addr);
610 /* Advance to the next descriptor. */
611 if ((bd.flags & FEC_BD_W) != 0) {
617 s->rx_descriptor = addr;
618 mcf_fec_rx_stats(s, retsize);
619 mcf_fec_enable_rx(s);
624 static const MemoryRegionOps mcf_fec_ops = {
625 .read = mcf_fec_read,
626 .write = mcf_fec_write,
627 .endianness = DEVICE_NATIVE_ENDIAN,
630 static NetClientInfo net_mcf_fec_info = {
631 .type = NET_CLIENT_DRIVER_NIC,
632 .size = sizeof(NICState),
633 .receive = mcf_fec_receive,
636 static void mcf_fec_realize(DeviceState *dev, Error **errp)
638 mcf_fec_state *s = MCF_FEC_NET(dev);
640 s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf,
641 object_get_typename(OBJECT(dev)), dev->id, s);
642 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
645 static void mcf_fec_instance_init(Object *obj)
647 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
648 mcf_fec_state *s = MCF_FEC_NET(obj);
651 memory_region_init_io(&s->iomem, obj, &mcf_fec_ops, s, "fec", 0x400);
652 sysbus_init_mmio(sbd, &s->iomem);
653 for (i = 0; i < FEC_NUM_IRQ; i++) {
654 sysbus_init_irq(sbd, &s->irq[i]);
658 static Property mcf_fec_properties[] = {
659 DEFINE_NIC_PROPERTIES(mcf_fec_state, conf),
660 DEFINE_PROP_END_OF_LIST(),
663 static void mcf_fec_class_init(ObjectClass *oc, void *data)
665 DeviceClass *dc = DEVICE_CLASS(oc);
667 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
668 dc->realize = mcf_fec_realize;
669 dc->desc = "MCF Fast Ethernet Controller network device";
670 dc->reset = mcf_fec_reset;
671 dc->props = mcf_fec_properties;
674 static const TypeInfo mcf_fec_info = {
675 .name = TYPE_MCF_FEC_NET,
676 .parent = TYPE_SYS_BUS_DEVICE,
677 .instance_size = sizeof(mcf_fec_state),
678 .instance_init = mcf_fec_instance_init,
679 .class_init = mcf_fec_class_init,
682 static void mcf_fec_register_types(void)
684 type_register_static(&mcf_fec_info);
687 type_init(mcf_fec_register_types)