]> Git Repo - qemu.git/blame - hw/net/sunhme.c
Include hw/hw.h exactly where needed
[qemu.git] / hw / net / sunhme.c
CommitLineData
c110425d
MCA
1/*
2 * QEMU Sun Happy Meal Ethernet emulation
3 *
4 * Copyright (c) 2017 Mark Cave-Ayland
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
c110425d 26#include "hw/pci/pci.h"
d6454270 27#include "migration/vmstate.h"
c110425d
MCA
28#include "hw/net/mii.h"
29#include "net/net.h"
0b8fa32f 30#include "qemu/module.h"
c110425d
MCA
31#include "net/checksum.h"
32#include "net/eth.h"
33#include "sysemu/sysemu.h"
34#include "trace.h"
35
36#define HME_REG_SIZE 0x8000
37
38#define HME_SEB_REG_SIZE 0x2000
39
40#define HME_SEBI_RESET 0x0
41#define HME_SEB_RESET_ETX 0x1
42#define HME_SEB_RESET_ERX 0x2
43
44#define HME_SEBI_STAT 0x100
45#define HME_SEBI_STAT_LINUXBUG 0x108
46#define HME_SEB_STAT_RXTOHOST 0x10000
09340f49 47#define HME_SEB_STAT_NORXD 0x20000
c110425d
MCA
48#define HME_SEB_STAT_MIFIRQ 0x800000
49#define HME_SEB_STAT_HOSTTOTX 0x1000000
50#define HME_SEB_STAT_TXALL 0x2000000
51
52#define HME_SEBI_IMASK 0x104
53#define HME_SEBI_IMASK_LINUXBUG 0x10c
54
55#define HME_ETX_REG_SIZE 0x2000
56
57#define HME_ETXI_PENDING 0x0
58
59#define HME_ETXI_RING 0x8
60#define HME_ETXI_RING_ADDR 0xffffff00
61#define HME_ETXI_RING_OFFSET 0xff
62
63#define HME_ETXI_RSIZE 0x2c
64
65#define HME_ERX_REG_SIZE 0x2000
66
67#define HME_ERXI_CFG 0x0
68#define HME_ERX_CFG_RINGSIZE 0x600
69#define HME_ERX_CFG_RINGSIZE_SHIFT 9
70#define HME_ERX_CFG_BYTEOFFSET 0x38
71#define HME_ERX_CFG_BYTEOFFSET_SHIFT 3
72#define HME_ERX_CFG_CSUMSTART 0x7f0000
73#define HME_ERX_CFG_CSUMSHIFT 16
74
75#define HME_ERXI_RING 0x4
76#define HME_ERXI_RING_ADDR 0xffffff00
77#define HME_ERXI_RING_OFFSET 0xff
78
79#define HME_MAC_REG_SIZE 0x1000
80
81#define HME_MACI_TXCFG 0x20c
82#define HME_MAC_TXCFG_ENABLE 0x1
83
84#define HME_MACI_RXCFG 0x30c
85#define HME_MAC_RXCFG_ENABLE 0x1
86#define HME_MAC_RXCFG_PMISC 0x40
87#define HME_MAC_RXCFG_HENABLE 0x800
88
89#define HME_MACI_MACADDR2 0x318
90#define HME_MACI_MACADDR1 0x31c
91#define HME_MACI_MACADDR0 0x320
92
93#define HME_MACI_HASHTAB3 0x340
94#define HME_MACI_HASHTAB2 0x344
95#define HME_MACI_HASHTAB1 0x348
96#define HME_MACI_HASHTAB0 0x34c
97
98#define HME_MIF_REG_SIZE 0x20
99
100#define HME_MIFI_FO 0xc
101#define HME_MIF_FO_ST 0xc0000000
102#define HME_MIF_FO_ST_SHIFT 30
103#define HME_MIF_FO_OPC 0x30000000
104#define HME_MIF_FO_OPC_SHIFT 28
105#define HME_MIF_FO_PHYAD 0x0f800000
106#define HME_MIF_FO_PHYAD_SHIFT 23
107#define HME_MIF_FO_REGAD 0x007c0000
108#define HME_MIF_FO_REGAD_SHIFT 18
109#define HME_MIF_FO_TAMSB 0x20000
110#define HME_MIF_FO_TALSB 0x10000
111#define HME_MIF_FO_DATA 0xffff
112
113#define HME_MIFI_CFG 0x10
114#define HME_MIF_CFG_MDI0 0x100
115#define HME_MIF_CFG_MDI1 0x200
116
117#define HME_MIFI_IMASK 0x14
118
119#define HME_MIFI_STAT 0x18
120
121
122/* Wired HME PHY addresses */
123#define HME_PHYAD_INTERNAL 1
124#define HME_PHYAD_EXTERNAL 0
125
126#define MII_COMMAND_START 0x1
127#define MII_COMMAND_READ 0x2
128#define MII_COMMAND_WRITE 0x1
129
130#define TYPE_SUNHME "sunhme"
131#define SUNHME(obj) OBJECT_CHECK(SunHMEState, (obj), TYPE_SUNHME)
132
133/* Maximum size of buffer */
134#define HME_FIFO_SIZE 0x800
135
136/* Size of TX/RX descriptor */
137#define HME_DESC_SIZE 0x8
138
139#define HME_XD_OWN 0x80000000
140#define HME_XD_OFL 0x40000000
141#define HME_XD_SOP 0x40000000
142#define HME_XD_EOP 0x20000000
143#define HME_XD_RXLENMSK 0x3fff0000
144#define HME_XD_RXLENSHIFT 16
145#define HME_XD_RXCKSUM 0xffff
146#define HME_XD_TXLENMSK 0x00001fff
147#define HME_XD_TXCKSUM 0x10000000
148#define HME_XD_TXCSSTUFF 0xff00000
149#define HME_XD_TXCSSTUFFSHIFT 20
150#define HME_XD_TXCSSTART 0xfc000
151#define HME_XD_TXCSSTARTSHIFT 14
152
153#define HME_MII_REGS_SIZE 0x20
154
155typedef struct SunHMEState {
156 /*< private >*/
157 PCIDevice parent_obj;
158
159 NICState *nic;
160 NICConf conf;
161
162 MemoryRegion hme;
163 MemoryRegion sebreg;
164 MemoryRegion etxreg;
165 MemoryRegion erxreg;
166 MemoryRegion macreg;
167 MemoryRegion mifreg;
168
169 uint32_t sebregs[HME_SEB_REG_SIZE >> 2];
170 uint32_t etxregs[HME_ETX_REG_SIZE >> 2];
171 uint32_t erxregs[HME_ERX_REG_SIZE >> 2];
172 uint32_t macregs[HME_MAC_REG_SIZE >> 2];
173 uint32_t mifregs[HME_MIF_REG_SIZE >> 2];
174
175 uint16_t miiregs[HME_MII_REGS_SIZE];
176} SunHMEState;
177
178static Property sunhme_properties[] = {
179 DEFINE_NIC_PROPERTIES(SunHMEState, conf),
180 DEFINE_PROP_END_OF_LIST(),
181};
182
183static void sunhme_reset_tx(SunHMEState *s)
184{
185 /* Indicate TX reset complete */
186 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX;
187}
188
189static void sunhme_reset_rx(SunHMEState *s)
190{
191 /* Indicate RX reset complete */
192 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX;
193}
194
195static void sunhme_update_irq(SunHMEState *s)
196{
197 PCIDevice *d = PCI_DEVICE(s);
198 int level;
199
200 /* MIF interrupt mask (16-bit) */
201 uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff;
202 uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask;
203
204 /* Main SEB interrupt mask (include MIF status from above) */
205 uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) &
206 ~HME_SEB_STAT_MIFIRQ;
207 uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask;
208 if (mif) {
209 seb |= HME_SEB_STAT_MIFIRQ;
210 }
211
212 level = (seb ? 1 : 0);
6bdc3707
MCA
213 trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level);
214
c110425d
MCA
215 pci_set_irq(d, level);
216}
217
218static void sunhme_seb_write(void *opaque, hwaddr addr,
219 uint64_t val, unsigned size)
220{
221 SunHMEState *s = SUNHME(opaque);
222
223 trace_sunhme_seb_write(addr, val);
224
225 /* Handly buggy Linux drivers before 4.13 which have
226 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
227 switch (addr) {
228 case HME_SEBI_STAT_LINUXBUG:
229 addr = HME_SEBI_STAT;
230 break;
231 case HME_SEBI_IMASK_LINUXBUG:
232 addr = HME_SEBI_IMASK;
233 break;
234 default:
235 break;
236 }
237
238 switch (addr) {
239 case HME_SEBI_RESET:
240 if (val & HME_SEB_RESET_ETX) {
241 sunhme_reset_tx(s);
242 }
243 if (val & HME_SEB_RESET_ERX) {
244 sunhme_reset_rx(s);
245 }
246 val = s->sebregs[HME_SEBI_RESET >> 2];
247 break;
248 }
249
250 s->sebregs[addr >> 2] = val;
251}
252
253static uint64_t sunhme_seb_read(void *opaque, hwaddr addr,
254 unsigned size)
255{
256 SunHMEState *s = SUNHME(opaque);
257 uint64_t val;
258
259 /* Handly buggy Linux drivers before 4.13 which have
260 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
261 switch (addr) {
262 case HME_SEBI_STAT_LINUXBUG:
263 addr = HME_SEBI_STAT;
264 break;
265 case HME_SEBI_IMASK_LINUXBUG:
266 addr = HME_SEBI_IMASK;
267 break;
268 default:
269 break;
270 }
271
272 val = s->sebregs[addr >> 2];
273
274 switch (addr) {
275 case HME_SEBI_STAT:
276 /* Autoclear status (except MIF) */
277 s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ;
278 sunhme_update_irq(s);
279 break;
280 }
281
282 trace_sunhme_seb_read(addr, val);
283
284 return val;
285}
286
287static const MemoryRegionOps sunhme_seb_ops = {
288 .read = sunhme_seb_read,
289 .write = sunhme_seb_write,
290 .endianness = DEVICE_LITTLE_ENDIAN,
291 .valid = {
292 .min_access_size = 4,
293 .max_access_size = 4,
294 },
295};
296
297static void sunhme_transmit(SunHMEState *s);
298
299static void sunhme_etx_write(void *opaque, hwaddr addr,
300 uint64_t val, unsigned size)
301{
302 SunHMEState *s = SUNHME(opaque);
303
304 trace_sunhme_etx_write(addr, val);
305
306 switch (addr) {
307 case HME_ETXI_PENDING:
308 if (val) {
309 sunhme_transmit(s);
310 }
311 break;
312 }
313
314 s->etxregs[addr >> 2] = val;
315}
316
317static uint64_t sunhme_etx_read(void *opaque, hwaddr addr,
318 unsigned size)
319{
320 SunHMEState *s = SUNHME(opaque);
321 uint64_t val;
322
323 val = s->etxregs[addr >> 2];
324
325 trace_sunhme_etx_read(addr, val);
326
327 return val;
328}
329
330static const MemoryRegionOps sunhme_etx_ops = {
331 .read = sunhme_etx_read,
332 .write = sunhme_etx_write,
333 .endianness = DEVICE_LITTLE_ENDIAN,
334 .valid = {
335 .min_access_size = 4,
336 .max_access_size = 4,
337 },
338};
339
340static void sunhme_erx_write(void *opaque, hwaddr addr,
341 uint64_t val, unsigned size)
342{
343 SunHMEState *s = SUNHME(opaque);
344
345 trace_sunhme_erx_write(addr, val);
346
347 s->erxregs[addr >> 2] = val;
348}
349
350static uint64_t sunhme_erx_read(void *opaque, hwaddr addr,
351 unsigned size)
352{
353 SunHMEState *s = SUNHME(opaque);
354 uint64_t val;
355
356 val = s->erxregs[addr >> 2];
357
358 trace_sunhme_erx_read(addr, val);
359
360 return val;
361}
362
363static const MemoryRegionOps sunhme_erx_ops = {
364 .read = sunhme_erx_read,
365 .write = sunhme_erx_write,
366 .endianness = DEVICE_LITTLE_ENDIAN,
367 .valid = {
368 .min_access_size = 4,
369 .max_access_size = 4,
370 },
371};
372
373static void sunhme_mac_write(void *opaque, hwaddr addr,
374 uint64_t val, unsigned size)
375{
376 SunHMEState *s = SUNHME(opaque);
1058e1a3 377 uint64_t oldval = s->macregs[addr >> 2];
c110425d
MCA
378
379 trace_sunhme_mac_write(addr, val);
380
381 s->macregs[addr >> 2] = val;
1058e1a3
MCA
382
383 switch (addr) {
384 case HME_MACI_RXCFG:
385 if (!(oldval & HME_MAC_RXCFG_ENABLE) &&
386 (val & HME_MAC_RXCFG_ENABLE)) {
387 qemu_flush_queued_packets(qemu_get_queue(s->nic));
388 }
389 break;
390 }
c110425d
MCA
391}
392
393static uint64_t sunhme_mac_read(void *opaque, hwaddr addr,
394 unsigned size)
395{
396 SunHMEState *s = SUNHME(opaque);
397 uint64_t val;
398
399 val = s->macregs[addr >> 2];
400
401 trace_sunhme_mac_read(addr, val);
402
403 return val;
404}
405
406static const MemoryRegionOps sunhme_mac_ops = {
407 .read = sunhme_mac_read,
408 .write = sunhme_mac_write,
409 .endianness = DEVICE_LITTLE_ENDIAN,
410 .valid = {
411 .min_access_size = 4,
412 .max_access_size = 4,
413 },
414};
415
416static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data)
417{
418 trace_sunhme_mii_write(reg, data);
419
420 switch (reg) {
421 case MII_BMCR:
422 if (data & MII_BMCR_RESET) {
423 /* Autoclear reset bit, enable auto negotiation */
424 data &= ~MII_BMCR_RESET;
425 data |= MII_BMCR_AUTOEN;
426 }
427 if (data & MII_BMCR_ANRESTART) {
428 /* Autoclear auto negotiation restart */
429 data &= ~MII_BMCR_ANRESTART;
430
431 /* Indicate negotiation complete */
432 s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP;
433
434 if (!qemu_get_queue(s->nic)->link_down) {
435 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
436 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
437 }
438 }
439 break;
440 }
441
442 s->miiregs[reg] = data;
443}
444
445static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg)
446{
447 uint16_t data = s->miiregs[reg];
448
449 trace_sunhme_mii_read(reg, data);
450
451 return data;
452}
453
454static void sunhme_mif_write(void *opaque, hwaddr addr,
455 uint64_t val, unsigned size)
456{
457 SunHMEState *s = SUNHME(opaque);
458 uint8_t cmd, reg;
459 uint16_t data;
460
461 trace_sunhme_mif_write(addr, val);
462
463 switch (addr) {
464 case HME_MIFI_CFG:
465 /* Mask the read-only bits */
466 val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
467 val |= s->mifregs[HME_MIFI_CFG >> 2] &
468 (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
469 break;
470 case HME_MIFI_FO:
471 /* Detect start of MII command */
472 if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT
473 != MII_COMMAND_START) {
474 val |= HME_MIF_FO_TALSB;
475 break;
476 }
477
478 /* Internal phy only */
479 if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT
480 != HME_PHYAD_INTERNAL) {
481 val |= HME_MIF_FO_TALSB;
482 break;
483 }
484
485 cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT;
486 reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT;
487 data = (val & HME_MIF_FO_DATA);
488
489 switch (cmd) {
490 case MII_COMMAND_WRITE:
491 sunhme_mii_write(s, reg, data);
492 break;
493
494 case MII_COMMAND_READ:
495 val &= ~HME_MIF_FO_DATA;
496 val |= sunhme_mii_read(s, reg);
497 break;
498 }
499
500 val |= HME_MIF_FO_TALSB;
501 break;
502 }
503
504 s->mifregs[addr >> 2] = val;
505}
506
507static uint64_t sunhme_mif_read(void *opaque, hwaddr addr,
508 unsigned size)
509{
510 SunHMEState *s = SUNHME(opaque);
511 uint64_t val;
512
513 val = s->mifregs[addr >> 2];
514
515 switch (addr) {
516 case HME_MIFI_STAT:
517 /* Autoclear MIF interrupt status */
518 s->mifregs[HME_MIFI_STAT >> 2] = 0;
519 sunhme_update_irq(s);
520 break;
521 }
522
523 trace_sunhme_mif_read(addr, val);
524
525 return val;
526}
527
528static const MemoryRegionOps sunhme_mif_ops = {
529 .read = sunhme_mif_read,
530 .write = sunhme_mif_write,
531 .endianness = DEVICE_LITTLE_ENDIAN,
532 .valid = {
533 .min_access_size = 4,
534 .max_access_size = 4,
535 },
536};
537
538static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size)
539{
540 qemu_send_packet(qemu_get_queue(s->nic), buf, size);
541}
542
543static inline int sunhme_get_tx_ring_count(SunHMEState *s)
544{
545 return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4;
546}
547
548static inline int sunhme_get_tx_ring_nr(SunHMEState *s)
549{
550 return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET;
551}
552
553static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i)
554{
555 uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET;
556 ring |= i & HME_ETXI_RING_OFFSET;
557
558 s->etxregs[HME_ETXI_RING >> 2] = ring;
559}
560
561static void sunhme_transmit(SunHMEState *s)
562{
563 PCIDevice *d = PCI_DEVICE(s);
564 dma_addr_t tb, addr;
565 uint32_t intstatus, status, buffer, sum = 0;
566 int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0;
567 uint16_t csum = 0;
568 uint8_t xmit_buffer[HME_FIFO_SIZE];
569
570 tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR;
571 nr = sunhme_get_tx_ring_count(s);
572 cr = sunhme_get_tx_ring_nr(s);
573
574 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
575 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
576
577 xmit_pos = 0;
578 while (status & HME_XD_OWN) {
579 trace_sunhme_tx_desc(buffer, status, cr, nr);
580
581 /* Copy data into transmit buffer */
582 addr = buffer;
583 len = status & HME_XD_TXLENMSK;
584
585 if (xmit_pos + len > HME_FIFO_SIZE) {
586 len = HME_FIFO_SIZE - xmit_pos;
587 }
588
589 pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len);
590 xmit_pos += len;
591
592 /* Detect start of packet for TX checksum */
593 if (status & HME_XD_SOP) {
594 sum = 0;
595 csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT;
596 csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >>
597 HME_XD_TXCSSTUFFSHIFT;
598 }
599
600 if (status & HME_XD_TXCKSUM) {
601 /* Only start calculation from csum_offset */
602 if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) {
603 sum += net_checksum_add(xmit_pos - csum_offset,
604 xmit_buffer + csum_offset);
605 trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset);
606 } else {
607 sum += net_checksum_add(len, xmit_buffer + xmit_pos - len);
608 trace_sunhme_tx_xsum_add(xmit_pos - len, len);
609 }
610 }
611
612 /* Detect end of packet for TX checksum */
613 if (status & HME_XD_EOP) {
614 /* Stuff the checksum if required */
615 if (status & HME_XD_TXCKSUM) {
616 csum = net_checksum_finish(sum);
617 stw_be_p(xmit_buffer + csum_stuff_offset, csum);
618 trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset);
619 }
620
621 if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) {
622 sunhme_transmit_frame(s, xmit_buffer, xmit_pos);
623 trace_sunhme_tx_done(xmit_pos);
624 }
625 }
626
627 /* Update status */
628 status &= ~HME_XD_OWN;
629 pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4);
630
631 /* Move onto next descriptor */
632 cr++;
633 if (cr >= nr) {
634 cr = 0;
635 }
636 sunhme_set_tx_ring_nr(s, cr);
637
638 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
639 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
640
641 /* Indicate TX complete */
642 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
643 intstatus |= HME_SEB_STAT_HOSTTOTX;
644 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
645
646 /* Autoclear TX pending */
647 s->etxregs[HME_ETXI_PENDING >> 2] = 0;
648
649 sunhme_update_irq(s);
650 }
651
652 /* TX FIFO now clear */
653 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
654 intstatus |= HME_SEB_STAT_TXALL;
655 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
656 sunhme_update_irq(s);
657}
658
659static int sunhme_can_receive(NetClientState *nc)
660{
661 SunHMEState *s = qemu_get_nic_opaque(nc);
662
076489c0 663 return s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE;
c110425d
MCA
664}
665
666static void sunhme_link_status_changed(NetClientState *nc)
667{
668 SunHMEState *s = qemu_get_nic_opaque(nc);
669
670 if (nc->link_down) {
671 s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD;
672 s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST;
673 } else {
674 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
675 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
676 }
677
678 /* Exact bits unknown */
679 s->mifregs[HME_MIFI_STAT >> 2] = 0xffff;
680 sunhme_update_irq(s);
681}
682
683static inline int sunhme_get_rx_ring_count(SunHMEState *s)
684{
685 uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE)
686 >> HME_ERX_CFG_RINGSIZE_SHIFT;
687
688 switch (rings) {
689 case 0:
690 return 32;
691 case 1:
692 return 64;
693 case 2:
694 return 128;
695 case 3:
696 return 256;
697 }
698
699 return 0;
700}
701
702static inline int sunhme_get_rx_ring_nr(SunHMEState *s)
703{
704 return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET;
705}
706
707static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
708{
709 uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET;
710 ring |= i & HME_ERXI_RING_OFFSET;
711
712 s->erxregs[HME_ERXI_RING >> 2] = ring;
713}
714
c110425d
MCA
715#define MIN_BUF_SIZE 60
716
717static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
718 size_t size)
719{
720 SunHMEState *s = qemu_get_nic_opaque(nc);
721 PCIDevice *d = PCI_DEVICE(s);
722 dma_addr_t rb, addr;
723 uint32_t intstatus, status, buffer, buffersize, sum;
724 uint16_t csum;
725 uint8_t buf1[60];
726 int nr, cr, len, rxoffset, csum_offset;
727
728 trace_sunhme_rx_incoming(size);
729
730 /* Do nothing if MAC RX disabled */
731 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) {
29df47a5 732 return 0;
c110425d
MCA
733 }
734
735 trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2],
736 buf[3], buf[4], buf[5]);
737
738 /* Check destination MAC address */
739 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) {
740 /* Try and match local MAC address */
741 if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] &&
742 (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] &&
743 ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] &&
744 (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] &&
745 ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] &&
746 (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) {
747 /* Matched local MAC address */
748 trace_sunhme_rx_filter_local_match();
749 } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff &&
750 buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) {
751 /* Matched broadcast address */
752 trace_sunhme_rx_filter_bcast_match();
753 } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
754 /* Didn't match local address, check hash filter */
a89a6b05 755 int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
c110425d
MCA
756 if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
757 (1 << (mcast_idx & 0xf)))) {
758 /* Didn't match hash filter */
759 trace_sunhme_rx_filter_hash_nomatch();
760 trace_sunhme_rx_filter_reject();
29df47a5 761 return -1;
c110425d
MCA
762 } else {
763 trace_sunhme_rx_filter_hash_match();
764 }
765 } else {
766 /* Not for us */
767 trace_sunhme_rx_filter_reject();
29df47a5 768 return -1;
c110425d
MCA
769 }
770 } else {
771 trace_sunhme_rx_filter_promisc_match();
772 }
773
774 trace_sunhme_rx_filter_accept();
775
776 /* If too small buffer, then expand it */
777 if (size < MIN_BUF_SIZE) {
778 memcpy(buf1, buf, size);
779 memset(buf1 + size, 0, MIN_BUF_SIZE - size);
780 buf = buf1;
781 size = MIN_BUF_SIZE;
782 }
783
784 rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR;
785 nr = sunhme_get_rx_ring_count(s);
786 cr = sunhme_get_rx_ring_nr(s);
787
788 pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4);
789 pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4);
790
09340f49
MCA
791 /* If we don't own the current descriptor then indicate overflow error */
792 if (!(status & HME_XD_OWN)) {
793 s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD;
794 sunhme_update_irq(s);
795 trace_sunhme_rx_norxd();
796 return -1;
797 }
798
c110425d
MCA
799 rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >>
800 HME_ERX_CFG_BYTEOFFSET_SHIFT;
801
802 addr = buffer + rxoffset;
803 buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT;
804
805 /* Detect receive overflow */
806 len = size;
807 if (size > buffersize) {
808 status |= HME_XD_OFL;
809 len = buffersize;
810 }
811
812 pci_dma_write(d, addr, buf, len);
813
814 trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr);
815
816 /* Calculate the receive checksum */
817 csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >>
818 HME_ERX_CFG_CSUMSHIFT << 1;
819 sum = 0;
820 sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset);
821 csum = net_checksum_finish(sum);
822
823 trace_sunhme_rx_xsum_calc(csum);
824
825 /* Update status */
826 status &= ~HME_XD_OWN;
827 status &= ~HME_XD_RXLENMSK;
828 status |= len << HME_XD_RXLENSHIFT;
829 status &= ~HME_XD_RXCKSUM;
830 status |= csum;
831
832 pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4);
833
834 cr++;
835 if (cr >= nr) {
836 cr = 0;
837 }
838
839 sunhme_set_rx_ring_nr(s, cr);
840
841 /* Indicate RX complete */
842 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
843 intstatus |= HME_SEB_STAT_RXTOHOST;
844 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
845
846 sunhme_update_irq(s);
847
848 return len;
849}
850
851static NetClientInfo net_sunhme_info = {
852 .type = NET_CLIENT_DRIVER_NIC,
853 .size = sizeof(NICState),
854 .can_receive = sunhme_can_receive,
855 .receive = sunhme_receive,
856 .link_status_changed = sunhme_link_status_changed,
857};
858
859static void sunhme_realize(PCIDevice *pci_dev, Error **errp)
860{
861 SunHMEState *s = SUNHME(pci_dev);
862 DeviceState *d = DEVICE(pci_dev);
863 uint8_t *pci_conf;
864
865 pci_conf = pci_dev->config;
866 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
867
868 memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE);
869 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme);
870
871 memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s,
872 "sunhme.seb", HME_SEB_REG_SIZE);
873 memory_region_add_subregion(&s->hme, 0, &s->sebreg);
874
875 memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s,
876 "sunhme.etx", HME_ETX_REG_SIZE);
877 memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg);
878
879 memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s,
880 "sunhme.erx", HME_ERX_REG_SIZE);
881 memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg);
882
883 memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s,
884 "sunhme.mac", HME_MAC_REG_SIZE);
885 memory_region_add_subregion(&s->hme, 0x6000, &s->macreg);
886
887 memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s,
888 "sunhme.mif", HME_MIF_REG_SIZE);
889 memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg);
890
891 qemu_macaddr_default_if_unset(&s->conf.macaddr);
892 s->nic = qemu_new_nic(&net_sunhme_info, &s->conf,
893 object_get_typename(OBJECT(d)), d->id, s);
894 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
895}
896
897static void sunhme_instance_init(Object *obj)
898{
899 SunHMEState *s = SUNHME(obj);
900
901 device_add_bootindex_property(obj, &s->conf.bootindex,
902 "bootindex", "/ethernet-phy@0",
903 DEVICE(obj), NULL);
904}
905
906static void sunhme_reset(DeviceState *ds)
907{
908 SunHMEState *s = SUNHME(ds);
909
910 /* Configure internal transceiver */
911 s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
912
913 /* Advetise auto, 100Mbps FD */
914 s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
915 s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
916 MII_BMSR_AN_COMP;
917
918 if (!qemu_get_queue(s->nic)->link_down) {
919 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
920 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
921 }
922
923 /* Set manufacturer */
924 s->miiregs[MII_PHYID1] = DP83840_PHYID1;
925 s->miiregs[MII_PHYID2] = DP83840_PHYID2;
926
927 /* Configure default interrupt mask */
928 s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff;
929 s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff;
930}
931
932static const VMStateDescription vmstate_hme = {
933 .name = "sunhme",
934 .version_id = 0,
935 .minimum_version_id = 0,
936 .fields = (VMStateField[]) {
937 VMSTATE_PCI_DEVICE(parent_obj, SunHMEState),
938 VMSTATE_MACADDR(conf.macaddr, SunHMEState),
939 VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)),
940 VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)),
941 VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)),
942 VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)),
943 VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)),
944 VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE),
945 VMSTATE_END_OF_LIST()
946 }
947};
948
949static void sunhme_class_init(ObjectClass *klass, void *data)
950{
951 DeviceClass *dc = DEVICE_CLASS(klass);
952 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
953
954 k->realize = sunhme_realize;
955 k->vendor_id = PCI_VENDOR_ID_SUN;
956 k->device_id = PCI_DEVICE_ID_SUN_HME;
957 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
958 dc->vmsd = &vmstate_hme;
959 dc->reset = sunhme_reset;
960 dc->props = sunhme_properties;
961 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
962}
963
964static const TypeInfo sunhme_info = {
965 .name = TYPE_SUNHME,
966 .parent = TYPE_PCI_DEVICE,
967 .class_init = sunhme_class_init,
968 .instance_size = sizeof(SunHMEState),
969 .instance_init = sunhme_instance_init,
fd3b02c8
EH
970 .interfaces = (InterfaceInfo[]) {
971 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
972 { }
973 }
c110425d
MCA
974};
975
976static void sunhme_register_types(void)
977{
978 type_register_static(&sunhme_info);
979}
980
981type_init(sunhme_register_types)
This page took 0.252049 seconds and 4 git commands to generate.