]> Git Repo - qemu.git/blame - hw/net/mcf_fec.c
etsec: Flush queue when rx buffer is consumed
[qemu.git] / hw / net / mcf_fec.c
CommitLineData
5fafdf24 1/*
7e049b8a
PB
2 * ColdFire Fast Ethernet Controller emulation.
3 *
4 * Copyright (c) 2007 CodeSourcery.
5 *
8e31bf38 6 * This code is licensed under the GPL
7e049b8a 7 */
83c9f4ca 8#include "hw/hw.h"
1422e32d 9#include "net/net.h"
0d09e41a 10#include "hw/m68k/mcf.h"
299f7bec 11#include "hw/net/mii.h"
7e049b8a
PB
12/* For crc32 */
13#include <zlib.h>
022c62cb 14#include "exec/address-spaces.h"
7e049b8a
PB
15
16//#define DEBUG_FEC 1
17
18#ifdef DEBUG_FEC
001faf32
BS
19#define DPRINTF(fmt, ...) \
20do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0)
7e049b8a 21#else
001faf32 22#define DPRINTF(fmt, ...) do {} while(0)
7e049b8a
PB
23#endif
24
25#define FEC_MAX_FRAME_SIZE 2032
26
27typedef struct {
c65fc1df
BC
28 MemoryRegion *sysmem;
29 MemoryRegion iomem;
7e049b8a 30 qemu_irq *irq;
1cc49d95
MM
31 NICState *nic;
32 NICConf conf;
7e049b8a
PB
33 uint32_t irq_state;
34 uint32_t eir;
35 uint32_t eimr;
36 int rx_enabled;
37 uint32_t rx_descriptor;
38 uint32_t tx_descriptor;
39 uint32_t ecr;
40 uint32_t mmfr;
41 uint32_t mscr;
42 uint32_t rcr;
43 uint32_t tcr;
44 uint32_t tfwr;
45 uint32_t rfsr;
46 uint32_t erdsr;
47 uint32_t etdsr;
48 uint32_t emrbr;
7e049b8a
PB
49} mcf_fec_state;
50
51#define FEC_INT_HB 0x80000000
52#define FEC_INT_BABR 0x40000000
53#define FEC_INT_BABT 0x20000000
54#define FEC_INT_GRA 0x10000000
55#define FEC_INT_TXF 0x08000000
56#define FEC_INT_TXB 0x04000000
57#define FEC_INT_RXF 0x02000000
58#define FEC_INT_RXB 0x01000000
59#define FEC_INT_MII 0x00800000
60#define FEC_INT_EB 0x00400000
61#define FEC_INT_LC 0x00200000
62#define FEC_INT_RL 0x00100000
63#define FEC_INT_UN 0x00080000
64
65#define FEC_EN 2
66#define FEC_RESET 1
67
68/* Map interrupt flags onto IRQ lines. */
69#define FEC_NUM_IRQ 13
70static const uint32_t mcf_fec_irq_map[FEC_NUM_IRQ] = {
71 FEC_INT_TXF,
72 FEC_INT_TXB,
73 FEC_INT_UN,
74 FEC_INT_RL,
75 FEC_INT_RXF,
76 FEC_INT_RXB,
77 FEC_INT_MII,
78 FEC_INT_LC,
79 FEC_INT_HB,
80 FEC_INT_GRA,
81 FEC_INT_EB,
82 FEC_INT_BABT,
83 FEC_INT_BABR
84};
85
86/* Buffer Descriptor. */
87typedef struct {
88 uint16_t flags;
89 uint16_t length;
90 uint32_t data;
91} mcf_fec_bd;
92
93#define FEC_BD_R 0x8000
94#define FEC_BD_E 0x8000
95#define FEC_BD_O1 0x4000
96#define FEC_BD_W 0x2000
97#define FEC_BD_O2 0x1000
98#define FEC_BD_L 0x0800
99#define FEC_BD_TC 0x0400
100#define FEC_BD_ABC 0x0200
101#define FEC_BD_M 0x0100
102#define FEC_BD_BC 0x0080
103#define FEC_BD_MC 0x0040
104#define FEC_BD_LG 0x0020
105#define FEC_BD_NO 0x0010
106#define FEC_BD_CR 0x0004
107#define FEC_BD_OV 0x0002
108#define FEC_BD_TR 0x0001
109
110static void mcf_fec_read_bd(mcf_fec_bd *bd, uint32_t addr)
111{
e1fe50dc 112 cpu_physical_memory_read(addr, bd, sizeof(*bd));
7e049b8a
PB
113 be16_to_cpus(&bd->flags);
114 be16_to_cpus(&bd->length);
115 be32_to_cpus(&bd->data);
116}
117
118static void mcf_fec_write_bd(mcf_fec_bd *bd, uint32_t addr)
119{
120 mcf_fec_bd tmp;
121 tmp.flags = cpu_to_be16(bd->flags);
122 tmp.length = cpu_to_be16(bd->length);
123 tmp.data = cpu_to_be32(bd->data);
e1fe50dc 124 cpu_physical_memory_write(addr, &tmp, sizeof(tmp));
7e049b8a
PB
125}
126
127static void mcf_fec_update(mcf_fec_state *s)
128{
129 uint32_t active;
130 uint32_t changed;
131 uint32_t mask;
132 int i;
133
134 active = s->eir & s->eimr;
135 changed = active ^s->irq_state;
136 for (i = 0; i < FEC_NUM_IRQ; i++) {
137 mask = mcf_fec_irq_map[i];
138 if (changed & mask) {
139 DPRINTF("IRQ %d = %d\n", i, (active & mask) != 0);
140 qemu_set_irq(s->irq[i], (active & mask) != 0);
141 }
142 }
143 s->irq_state = active;
144}
145
146static void mcf_fec_do_tx(mcf_fec_state *s)
147{
148 uint32_t addr;
149 mcf_fec_bd bd;
150 int frame_size;
151 int len;
152 uint8_t frame[FEC_MAX_FRAME_SIZE];
153 uint8_t *ptr;
154
155 DPRINTF("do_tx\n");
156 ptr = frame;
157 frame_size = 0;
158 addr = s->tx_descriptor;
159 while (1) {
160 mcf_fec_read_bd(&bd, addr);
161 DPRINTF("tx_bd %x flags %04x len %d data %08x\n",
162 addr, bd.flags, bd.length, bd.data);
163 if ((bd.flags & FEC_BD_R) == 0) {
164 /* Run out of descriptors to transmit. */
165 break;
166 }
167 len = bd.length;
168 if (frame_size + len > FEC_MAX_FRAME_SIZE) {
169 len = FEC_MAX_FRAME_SIZE - frame_size;
170 s->eir |= FEC_INT_BABT;
171 }
172 cpu_physical_memory_read(bd.data, ptr, len);
173 ptr += len;
174 frame_size += len;
175 if (bd.flags & FEC_BD_L) {
176 /* Last buffer in frame. */
177 DPRINTF("Sending packet\n");
b356f76d 178 qemu_send_packet(qemu_get_queue(s->nic), frame, len);
7e049b8a
PB
179 ptr = frame;
180 frame_size = 0;
181 s->eir |= FEC_INT_TXF;
182 }
183 s->eir |= FEC_INT_TXB;
184 bd.flags &= ~FEC_BD_R;
185 /* Write back the modified descriptor. */
186 mcf_fec_write_bd(&bd, addr);
187 /* Advance to the next descriptor. */
188 if ((bd.flags & FEC_BD_W) != 0) {
189 addr = s->etdsr;
190 } else {
191 addr += 8;
192 }
193 }
194 s->tx_descriptor = addr;
195}
196
4fdcd8d4 197static void mcf_fec_enable_rx(mcf_fec_state *s)
7e049b8a
PB
198{
199 mcf_fec_bd bd;
200
201 mcf_fec_read_bd(&bd, s->rx_descriptor);
202 s->rx_enabled = ((bd.flags & FEC_BD_E) != 0);
203 if (!s->rx_enabled)
204 DPRINTF("RX buffer full\n");
205}
206
207static void mcf_fec_reset(mcf_fec_state *s)
208{
209 s->eir = 0;
210 s->eimr = 0;
211 s->rx_enabled = 0;
212 s->ecr = 0;
213 s->mscr = 0;
214 s->rcr = 0x05ee0001;
215 s->tcr = 0;
216 s->tfwr = 0;
217 s->rfsr = 0x500;
218}
219
299f7bec
GU
220#define MMFR_WRITE_OP (1 << 28)
221#define MMFR_READ_OP (2 << 28)
222#define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f)
223#define MMFR_REGNUM(v) (((v) >> 18) & 0x1f)
224
225static uint64_t mcf_fec_read_mdio(mcf_fec_state *s)
226{
227 uint64_t v;
228
229 if (s->mmfr & MMFR_WRITE_OP)
230 return s->mmfr;
231 if (MMFR_PHYADDR(s->mmfr) != 1)
232 return s->mmfr |= 0xffff;
233
234 switch (MMFR_REGNUM(s->mmfr)) {
235 case MII_BMCR:
236 v = MII_BMCR_SPEED | MII_BMCR_AUTOEN | MII_BMCR_FD;
237 break;
238 case MII_BMSR:
239 v = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD |
240 MII_BMSR_10T_HD | MII_BMSR_MFPS | MII_BMSR_AN_COMP |
241 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST;
242 break;
243 case MII_PHYID1:
244 v = DP83848_PHYID1;
245 break;
246 case MII_PHYID2:
247 v = DP83848_PHYID2;
248 break;
249 case MII_ANAR:
250 v = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD |
251 MII_ANAR_10 | MII_ANAR_CSMACD;
252 break;
253 case MII_ANLPAR:
254 v = MII_ANLPAR_ACK | MII_ANLPAR_TXFD | MII_ANLPAR_TX |
255 MII_ANLPAR_10FD | MII_ANLPAR_10 | MII_ANLPAR_CSMACD;
256 break;
257 default:
258 v = 0xffff;
259 break;
260 }
261 s->mmfr = (s->mmfr & ~0xffff) | v;
262 return s->mmfr;
263}
264
a8170e5e 265static uint64_t mcf_fec_read(void *opaque, hwaddr addr,
c65fc1df 266 unsigned size)
7e049b8a
PB
267{
268 mcf_fec_state *s = (mcf_fec_state *)opaque;
269 switch (addr & 0x3ff) {
270 case 0x004: return s->eir;
271 case 0x008: return s->eimr;
272 case 0x010: return s->rx_enabled ? (1 << 24) : 0; /* RDAR */
273 case 0x014: return 0; /* TDAR */
274 case 0x024: return s->ecr;
299f7bec 275 case 0x040: return mcf_fec_read_mdio(s);
7e049b8a
PB
276 case 0x044: return s->mscr;
277 case 0x064: return 0; /* MIBC */
278 case 0x084: return s->rcr;
279 case 0x0c4: return s->tcr;
280 case 0x0e4: /* PALR */
1cc49d95
MM
281 return (s->conf.macaddr.a[0] << 24) | (s->conf.macaddr.a[1] << 16)
282 | (s->conf.macaddr.a[2] << 8) | s->conf.macaddr.a[3];
7e049b8a
PB
283 break;
284 case 0x0e8: /* PAUR */
1cc49d95 285 return (s->conf.macaddr.a[4] << 24) | (s->conf.macaddr.a[5] << 16) | 0x8808;
7e049b8a
PB
286 case 0x0ec: return 0x10000; /* OPD */
287 case 0x118: return 0;
288 case 0x11c: return 0;
289 case 0x120: return 0;
290 case 0x124: return 0;
291 case 0x144: return s->tfwr;
292 case 0x14c: return 0x600;
293 case 0x150: return s->rfsr;
294 case 0x180: return s->erdsr;
295 case 0x184: return s->etdsr;
296 case 0x188: return s->emrbr;
297 default:
2ac71179 298 hw_error("mcf_fec_read: Bad address 0x%x\n", (int)addr);
7e049b8a
PB
299 return 0;
300 }
301}
302
a8170e5e 303static void mcf_fec_write(void *opaque, hwaddr addr,
c65fc1df 304 uint64_t value, unsigned size)
7e049b8a
PB
305{
306 mcf_fec_state *s = (mcf_fec_state *)opaque;
307 switch (addr & 0x3ff) {
308 case 0x004:
309 s->eir &= ~value;
310 break;
311 case 0x008:
312 s->eimr = value;
313 break;
314 case 0x010: /* RDAR */
315 if ((s->ecr & FEC_EN) && !s->rx_enabled) {
316 DPRINTF("RX enable\n");
317 mcf_fec_enable_rx(s);
318 }
319 break;
320 case 0x014: /* TDAR */
321 if (s->ecr & FEC_EN) {
322 mcf_fec_do_tx(s);
323 }
324 break;
325 case 0x024:
326 s->ecr = value;
327 if (value & FEC_RESET) {
328 DPRINTF("Reset\n");
329 mcf_fec_reset(s);
330 }
331 if ((s->ecr & FEC_EN) == 0) {
332 s->rx_enabled = 0;
333 }
334 break;
335 case 0x040:
7e049b8a 336 s->mmfr = value;
299f7bec 337 s->eir |= FEC_INT_MII;
7e049b8a
PB
338 break;
339 case 0x044:
340 s->mscr = value & 0xfe;
341 break;
342 case 0x064:
343 /* TODO: Implement MIB. */
344 break;
345 case 0x084:
346 s->rcr = value & 0x07ff003f;
347 /* TODO: Implement LOOP mode. */
348 break;
349 case 0x0c4: /* TCR */
350 /* We transmit immediately, so raise GRA immediately. */
351 s->tcr = value;
352 if (value & 1)
353 s->eir |= FEC_INT_GRA;
354 break;
355 case 0x0e4: /* PALR */
1cc49d95
MM
356 s->conf.macaddr.a[0] = value >> 24;
357 s->conf.macaddr.a[1] = value >> 16;
358 s->conf.macaddr.a[2] = value >> 8;
359 s->conf.macaddr.a[3] = value;
7e049b8a
PB
360 break;
361 case 0x0e8: /* PAUR */
1cc49d95
MM
362 s->conf.macaddr.a[4] = value >> 24;
363 s->conf.macaddr.a[5] = value >> 16;
7e049b8a
PB
364 break;
365 case 0x0ec:
366 /* OPD */
367 break;
368 case 0x118:
369 case 0x11c:
370 case 0x120:
371 case 0x124:
372 /* TODO: implement MAC hash filtering. */
373 break;
374 case 0x144:
375 s->tfwr = value & 3;
376 break;
377 case 0x14c:
378 /* FRBR writes ignored. */
379 break;
380 case 0x150:
381 s->rfsr = (value & 0x3fc) | 0x400;
382 break;
383 case 0x180:
384 s->erdsr = value & ~3;
385 s->rx_descriptor = s->erdsr;
386 break;
387 case 0x184:
388 s->etdsr = value & ~3;
389 s->tx_descriptor = s->etdsr;
390 break;
391 case 0x188:
392 s->emrbr = value & 0x7f0;
393 break;
394 default:
2ac71179 395 hw_error("mcf_fec_write Bad address 0x%x\n", (int)addr);
7e049b8a
PB
396 }
397 mcf_fec_update(s);
398}
399
4e68f7a0 400static int mcf_fec_can_receive(NetClientState *nc)
7e049b8a 401{
cc1f0f45 402 mcf_fec_state *s = qemu_get_nic_opaque(nc);
7e049b8a
PB
403 return s->rx_enabled;
404}
405
4e68f7a0 406static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t size)
7e049b8a 407{
cc1f0f45 408 mcf_fec_state *s = qemu_get_nic_opaque(nc);
7e049b8a
PB
409 mcf_fec_bd bd;
410 uint32_t flags = 0;
411 uint32_t addr;
412 uint32_t crc;
413 uint32_t buf_addr;
414 uint8_t *crc_ptr;
415 unsigned int buf_len;
491a1f49 416 size_t retsize;
7e049b8a
PB
417
418 DPRINTF("do_rx len %d\n", size);
419 if (!s->rx_enabled) {
420 fprintf(stderr, "mcf_fec_receive: Unexpected packet\n");
421 }
422 /* 4 bytes for the CRC. */
423 size += 4;
424 crc = cpu_to_be32(crc32(~0, buf, size));
425 crc_ptr = (uint8_t *)&crc;
426 /* Huge frames are truncted. */
427 if (size > FEC_MAX_FRAME_SIZE) {
428 size = FEC_MAX_FRAME_SIZE;
429 flags |= FEC_BD_TR | FEC_BD_LG;
430 }
431 /* Frames larger than the user limit just set error flags. */
432 if (size > (s->rcr >> 16)) {
433 flags |= FEC_BD_LG;
434 }
435 addr = s->rx_descriptor;
491a1f49 436 retsize = size;
7e049b8a
PB
437 while (size > 0) {
438 mcf_fec_read_bd(&bd, addr);
439 if ((bd.flags & FEC_BD_E) == 0) {
440 /* No descriptors available. Bail out. */
441 /* FIXME: This is wrong. We should probably either save the
442 remainder for when more RX buffers are available, or
443 flag an error. */
444 fprintf(stderr, "mcf_fec: Lost end of frame\n");
445 break;
446 }
447 buf_len = (size <= s->emrbr) ? size: s->emrbr;
448 bd.length = buf_len;
449 size -= buf_len;
450 DPRINTF("rx_bd %x length %d\n", addr, bd.length);
451 /* The last 4 bytes are the CRC. */
452 if (size < 4)
453 buf_len += size - 4;
454 buf_addr = bd.data;
455 cpu_physical_memory_write(buf_addr, buf, buf_len);
456 buf += buf_len;
457 if (size < 4) {
458 cpu_physical_memory_write(buf_addr + buf_len, crc_ptr, 4 - size);
459 crc_ptr += 4 - size;
460 }
461 bd.flags &= ~FEC_BD_E;
462 if (size == 0) {
463 /* Last buffer in frame. */
464 bd.flags |= flags | FEC_BD_L;
465 DPRINTF("rx frame flags %04x\n", bd.flags);
466 s->eir |= FEC_INT_RXF;
467 } else {
468 s->eir |= FEC_INT_RXB;
469 }
470 mcf_fec_write_bd(&bd, addr);
471 /* Advance to the next descriptor. */
472 if ((bd.flags & FEC_BD_W) != 0) {
473 addr = s->erdsr;
474 } else {
475 addr += 8;
476 }
477 }
478 s->rx_descriptor = addr;
479 mcf_fec_enable_rx(s);
480 mcf_fec_update(s);
491a1f49 481 return retsize;
7e049b8a
PB
482}
483
c65fc1df
BC
484static const MemoryRegionOps mcf_fec_ops = {
485 .read = mcf_fec_read,
486 .write = mcf_fec_write,
487 .endianness = DEVICE_NATIVE_ENDIAN,
7e049b8a
PB
488};
489
1cc49d95 490static NetClientInfo net_mcf_fec_info = {
2be64a68 491 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1cc49d95
MM
492 .size = sizeof(NICState),
493 .can_receive = mcf_fec_can_receive,
494 .receive = mcf_fec_receive,
1cc49d95
MM
495};
496
c65fc1df 497void mcf_fec_init(MemoryRegion *sysmem, NICInfo *nd,
a8170e5e 498 hwaddr base, qemu_irq *irq)
7e049b8a
PB
499{
500 mcf_fec_state *s;
7e049b8a 501
0ae18cee
AL
502 qemu_check_nic_model(nd, "mcf_fec");
503
7267c094 504 s = (mcf_fec_state *)g_malloc0(sizeof(mcf_fec_state));
c65fc1df 505 s->sysmem = sysmem;
7e049b8a 506 s->irq = irq;
c65fc1df 507
2c9b15ca 508 memory_region_init_io(&s->iomem, NULL, &mcf_fec_ops, s, "fec", 0x400);
c65fc1df 509 memory_region_add_subregion(sysmem, base, &s->iomem);
7e049b8a 510
6eed1856 511 s->conf.macaddr = nd->macaddr;
1ceef9f2 512 s->conf.peers.ncs[0] = nd->netdev;
1cc49d95
MM
513
514 s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, nd->model, nd->name, s);
515
b356f76d 516 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
7e049b8a 517}
This page took 0.823142 seconds and 4 git commands to generate.