]> Git Repo - qemu.git/blame - hw/net/e1000.c
mcf_fec: Drop mcf_fec_can_receive
[qemu.git] / hw / net / e1000.c
CommitLineData
7c23b892
AZ
1/*
2 * QEMU e1000 emulation
3 *
2758aa52
MT
4 * Software developer's manual:
5 * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6 *
7c23b892
AZ
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2008 Qumranet
9 * Based on work done by:
10 * Copyright (c) 2007 Dan Aloni
11 * Copyright (c) 2004 Antony T Curtis
12 *
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
17 *
18 * This library is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
22 *
23 * You should have received a copy of the GNU Lesser General Public
8167ee88 24 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7c23b892
AZ
25 */
26
27
83c9f4ca
PB
28#include "hw/hw.h"
29#include "hw/pci/pci.h"
1422e32d 30#include "net/net.h"
7200ac3c 31#include "net/checksum.h"
83c9f4ca 32#include "hw/loader.h"
9c17d615
PB
33#include "sysemu/sysemu.h"
34#include "sysemu/dma.h"
97410dde 35#include "qemu/iov.h"
20302e71 36#include "qemu/range.h"
7c23b892 37
47b43a1f 38#include "e1000_regs.h"
7c23b892 39
27124888 40#define E1000_DEBUG
7c23b892 41
27124888 42#ifdef E1000_DEBUG
7c23b892
AZ
43enum {
44 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
45 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
46 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
f9c1cdf4 47 DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
7c23b892
AZ
48};
49#define DBGBIT(x) (1<<DEBUG_##x)
50static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
51
6c7f4b47 52#define DBGOUT(what, fmt, ...) do { \
7c23b892 53 if (debugflags & DBGBIT(what)) \
6c7f4b47 54 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
7c23b892
AZ
55 } while (0)
56#else
6c7f4b47 57#define DBGOUT(what, fmt, ...) do {} while (0)
7c23b892
AZ
58#endif
59
60#define IOPORT_SIZE 0x40
e94bbefe 61#define PNPMMIO_SIZE 0x20000
78aeb23e 62#define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
7c23b892 63
b0d9ffcd
MC
64/* this is the size past which hardware will drop packets when setting LPE=0 */
65#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
2c0331f4
MC
66/* this is the size past which hardware will drop packets when setting LPE=1 */
67#define MAXIMUM_ETHERNET_LPE_SIZE 16384
b0d9ffcd 68
97410dde
VM
69#define MAXIMUM_ETHERNET_HDR_LEN (14+4)
70
7c23b892
AZ
71/*
72 * HW models:
8597f2e1 73 * E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
7c23b892 74 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
8597f2e1 75 * E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
7c23b892
AZ
76 * Others never tested
77 */
7c23b892
AZ
78
79typedef struct E1000State_st {
b08340d5
AF
80 /*< private >*/
81 PCIDevice parent_obj;
82 /*< public >*/
83
a03e2aec 84 NICState *nic;
fbdaa002 85 NICConf conf;
ad00a9b9
AK
86 MemoryRegion mmio;
87 MemoryRegion io;
7c23b892
AZ
88
89 uint32_t mac_reg[0x8000];
90 uint16_t phy_reg[0x20];
91 uint16_t eeprom_data[64];
92
93 uint32_t rxbuf_size;
94 uint32_t rxbuf_min_shift;
7c23b892
AZ
95 struct e1000_tx {
96 unsigned char header[256];
8f2e8d1f 97 unsigned char vlan_header[4];
b10fec9b 98 /* Fields vlan and data must not be reordered or separated. */
8f2e8d1f 99 unsigned char vlan[4];
7c23b892
AZ
100 unsigned char data[0x10000];
101 uint16_t size;
102 unsigned char sum_needed;
8f2e8d1f 103 unsigned char vlan_needed;
7c23b892
AZ
104 uint8_t ipcss;
105 uint8_t ipcso;
106 uint16_t ipcse;
107 uint8_t tucss;
108 uint8_t tucso;
109 uint16_t tucse;
110 uint8_t hdr_len;
111 uint16_t mss;
112 uint32_t paylen;
113 uint16_t tso_frames;
114 char tse;
b6c4f71f
BS
115 int8_t ip;
116 int8_t tcp;
1b0009db 117 char cptse; // current packet tse bit
7c23b892
AZ
118 } tx;
119
120 struct {
121 uint32_t val_in; // shifted in from guest driver
122 uint16_t bitnum_in;
123 uint16_t bitnum_out;
124 uint16_t reading;
125 uint32_t old_eecd;
126 } eecd_state;
b9d03e35
JW
127
128 QEMUTimer *autoneg_timer;
2af234e6 129
e9845f09
VM
130 QEMUTimer *mit_timer; /* Mitigation timer. */
131 bool mit_timer_on; /* Mitigation timer is running. */
132 bool mit_irq_level; /* Tracks interrupt pin level. */
133 uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */
134
2af234e6
MT
135/* Compatibility flags for migration to/from qemu 1.3.0 and older */
136#define E1000_FLAG_AUTONEG_BIT 0
e9845f09 137#define E1000_FLAG_MIT_BIT 1
2af234e6 138#define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
e9845f09 139#define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
2af234e6 140 uint32_t compat_flags;
7c23b892
AZ
141} E1000State;
142
8597f2e1
GS
143typedef struct E1000BaseClass {
144 PCIDeviceClass parent_class;
145 uint16_t phy_id2;
8597f2e1
GS
146} E1000BaseClass;
147
148#define TYPE_E1000_BASE "e1000-base"
567a3c9e
PC
149
150#define E1000(obj) \
8597f2e1
GS
151 OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
152
153#define E1000_DEVICE_CLASS(klass) \
154 OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
155#define E1000_DEVICE_GET_CLASS(obj) \
156 OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
567a3c9e 157
7c23b892
AZ
158#define defreg(x) x = (E1000_##x>>2)
159enum {
160 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
161 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
162 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
163 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
164 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
165 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
166 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
167 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
168 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
8f2e8d1f 169 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
e9845f09
VM
170 defreg(VET), defreg(RDTR), defreg(RADV), defreg(TADV),
171 defreg(ITR),
7c23b892
AZ
172};
173
71aadd3c
JW
174static void
175e1000_link_down(E1000State *s)
176{
177 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
178 s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
6a2acedb 179 s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
6883b591 180 s->phy_reg[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK;
71aadd3c
JW
181}
182
183static void
184e1000_link_up(E1000State *s)
185{
186 s->mac_reg[STATUS] |= E1000_STATUS_LU;
187 s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
5df6a185
SH
188
189 /* E1000_STATUS_LU is tested by e1000_can_receive() */
190 qemu_flush_queued_packets(qemu_get_queue(s->nic));
71aadd3c
JW
191}
192
1195fed9
GS
193static bool
194have_autoneg(E1000State *s)
195{
196 return (s->compat_flags & E1000_FLAG_AUTONEG) &&
197 (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
198}
199
b9d03e35
JW
200static void
201set_phy_ctrl(E1000State *s, int index, uint16_t val)
202{
1195fed9
GS
203 /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
204 s->phy_reg[PHY_CTRL] = val & ~(0x3f |
205 MII_CR_RESET |
206 MII_CR_RESTART_AUTO_NEG);
207
2af234e6
MT
208 /*
209 * QEMU 1.3 does not support link auto-negotiation emulation, so if we
210 * migrate during auto negotiation, after migration the link will be
211 * down.
212 */
1195fed9 213 if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) {
b9d03e35 214 e1000_link_down(s);
b9d03e35 215 DBGOUT(PHY, "Start link auto negotiation\n");
1195fed9
GS
216 timer_mod(s->autoneg_timer,
217 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
b9d03e35
JW
218 }
219}
220
b9d03e35
JW
221static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
222 [PHY_CTRL] = set_phy_ctrl,
223};
224
225enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
226
7c23b892 227enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
88b4e9db 228static const char phy_regcap[0x20] = {
7c23b892
AZ
229 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
230 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
231 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
232 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
233 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
6883b591
GS
234 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
235 [PHY_AUTONEG_EXP] = PHY_R,
7c23b892
AZ
236};
237
8597f2e1 238/* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
814cd3ac 239static const uint16_t phy_reg_init[] = {
9616c290
GS
240 [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
241 MII_CR_FULL_DUPLEX |
242 MII_CR_AUTO_NEG_EN,
243
244 [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
245 MII_SR_LINK_STATUS | /* link initially up */
246 MII_SR_AUTONEG_CAPS |
247 /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */
248 MII_SR_PREAMBLE_SUPPRESS |
249 MII_SR_EXTENDED_STATUS |
250 MII_SR_10T_HD_CAPS |
251 MII_SR_10T_FD_CAPS |
252 MII_SR_100X_HD_CAPS |
253 MII_SR_100X_FD_CAPS,
254
255 [PHY_ID1] = 0x141,
256 /* [PHY_ID2] configured per DevId, from e1000_reset() */
257 [PHY_AUTONEG_ADV] = 0xde1,
258 [PHY_LP_ABILITY] = 0x1e0,
259 [PHY_1000T_CTRL] = 0x0e00,
260 [PHY_1000T_STATUS] = 0x3c00,
261 [M88E1000_PHY_SPEC_CTRL] = 0x360,
814cd3ac 262 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
9616c290 263 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
814cd3ac
MT
264};
265
266static const uint32_t mac_reg_init[] = {
267 [PBA] = 0x00100030,
268 [LEDCTL] = 0x602,
269 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
270 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
271 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
272 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
273 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
274 E1000_STATUS_LU,
275 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
276 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
277 E1000_MANC_RMCP_EN,
278};
279
e9845f09
VM
280/* Helper function, *curr == 0 means the value is not set */
281static inline void
282mit_update_delay(uint32_t *curr, uint32_t value)
283{
284 if (value && (*curr == 0 || value < *curr)) {
285 *curr = value;
286 }
287}
288
7c23b892
AZ
289static void
290set_interrupt_cause(E1000State *s, int index, uint32_t val)
291{
b08340d5 292 PCIDevice *d = PCI_DEVICE(s);
e9845f09
VM
293 uint32_t pending_ints;
294 uint32_t mit_delay;
b08340d5 295
7c23b892 296 s->mac_reg[ICR] = val;
a52a8841
MT
297
298 /*
299 * Make sure ICR and ICS registers have the same value.
300 * The spec says that the ICS register is write-only. However in practice,
301 * on real hardware ICS is readable, and for reads it has the same value as
302 * ICR (except that ICS does not have the clear on read behaviour of ICR).
303 *
304 * The VxWorks PRO/1000 driver uses this behaviour.
305 */
b1332393 306 s->mac_reg[ICS] = val;
a52a8841 307
e9845f09
VM
308 pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
309 if (!s->mit_irq_level && pending_ints) {
310 /*
311 * Here we detect a potential raising edge. We postpone raising the
312 * interrupt line if we are inside the mitigation delay window
313 * (s->mit_timer_on == 1).
314 * We provide a partial implementation of interrupt mitigation,
315 * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
316 * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
317 * RADV; relative timers based on TIDV and RDTR are not implemented.
318 */
319 if (s->mit_timer_on) {
320 return;
321 }
322 if (s->compat_flags & E1000_FLAG_MIT) {
323 /* Compute the next mitigation delay according to pending
324 * interrupts and the current values of RADV (provided
325 * RDTR!=0), TADV and ITR.
326 * Then rearm the timer.
327 */
328 mit_delay = 0;
329 if (s->mit_ide &&
330 (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
331 mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
332 }
333 if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
334 mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
335 }
336 mit_update_delay(&mit_delay, s->mac_reg[ITR]);
337
338 if (mit_delay) {
339 s->mit_timer_on = 1;
340 timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
341 mit_delay * 256);
342 }
343 s->mit_ide = 0;
344 }
345 }
346
347 s->mit_irq_level = (pending_ints != 0);
9e64f8a3 348 pci_set_irq(d, s->mit_irq_level);
e9845f09
VM
349}
350
351static void
352e1000_mit_timer(void *opaque)
353{
354 E1000State *s = opaque;
355
356 s->mit_timer_on = 0;
357 /* Call set_interrupt_cause to update the irq level (if necessary). */
358 set_interrupt_cause(s, 0, s->mac_reg[ICR]);
7c23b892
AZ
359}
360
361static void
362set_ics(E1000State *s, int index, uint32_t val)
363{
364 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
365 s->mac_reg[IMS]);
366 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
367}
368
d52aec95
GS
369static void
370e1000_autoneg_timer(void *opaque)
371{
372 E1000State *s = opaque;
373 if (!qemu_get_queue(s->nic)->link_down) {
374 e1000_link_up(s);
375 s->phy_reg[PHY_LP_ABILITY] |= MII_LPAR_LPACK;
376 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
377 DBGOUT(PHY, "Auto negotiation is completed\n");
378 set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
379 }
380}
381
7c23b892
AZ
382static int
383rxbufsize(uint32_t v)
384{
385 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
386 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
387 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
388 switch (v) {
389 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
390 return 16384;
391 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
392 return 8192;
393 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
394 return 4096;
395 case E1000_RCTL_SZ_1024:
396 return 1024;
397 case E1000_RCTL_SZ_512:
398 return 512;
399 case E1000_RCTL_SZ_256:
400 return 256;
401 }
402 return 2048;
403}
404
814cd3ac
MT
405static void e1000_reset(void *opaque)
406{
407 E1000State *d = opaque;
8597f2e1 408 E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
372254c6
GS
409 uint8_t *macaddr = d->conf.macaddr.a;
410 int i;
814cd3ac 411
bc72ad67 412 timer_del(d->autoneg_timer);
e9845f09
VM
413 timer_del(d->mit_timer);
414 d->mit_timer_on = 0;
415 d->mit_irq_level = 0;
416 d->mit_ide = 0;
814cd3ac
MT
417 memset(d->phy_reg, 0, sizeof d->phy_reg);
418 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
8597f2e1 419 d->phy_reg[PHY_ID2] = edc->phy_id2;
814cd3ac
MT
420 memset(d->mac_reg, 0, sizeof d->mac_reg);
421 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
422 d->rxbuf_min_shift = 1;
423 memset(&d->tx, 0, sizeof d->tx);
424
b356f76d 425 if (qemu_get_queue(d->nic)->link_down) {
71aadd3c 426 e1000_link_down(d);
814cd3ac 427 }
372254c6
GS
428
429 /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
430 d->mac_reg[RA] = 0;
431 d->mac_reg[RA + 1] = E1000_RAH_AV;
432 for (i = 0; i < 4; i++) {
433 d->mac_reg[RA] |= macaddr[i] << (8 * i);
434 d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
435 }
655d3b63 436 qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
814cd3ac
MT
437}
438
cab3c825
KW
439static void
440set_ctrl(E1000State *s, int index, uint32_t val)
441{
442 /* RST is self clearing */
443 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
444}
445
7c23b892
AZ
446static void
447set_rx_control(E1000State *s, int index, uint32_t val)
448{
449 s->mac_reg[RCTL] = val;
450 s->rxbuf_size = rxbufsize(val);
451 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
452 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
453 s->mac_reg[RCTL]);
b356f76d 454 qemu_flush_queued_packets(qemu_get_queue(s->nic));
7c23b892
AZ
455}
456
457static void
458set_mdic(E1000State *s, int index, uint32_t val)
459{
460 uint32_t data = val & E1000_MDIC_DATA_MASK;
461 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
462
463 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
464 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
465 else if (val & E1000_MDIC_OP_READ) {
466 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
467 if (!(phy_regcap[addr] & PHY_R)) {
468 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
469 val |= E1000_MDIC_ERROR;
470 } else
471 val = (val ^ data) | s->phy_reg[addr];
472 } else if (val & E1000_MDIC_OP_WRITE) {
473 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
474 if (!(phy_regcap[addr] & PHY_W)) {
475 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
476 val |= E1000_MDIC_ERROR;
b9d03e35
JW
477 } else {
478 if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
479 phyreg_writeops[addr](s, index, data);
1195fed9
GS
480 } else {
481 s->phy_reg[addr] = data;
b9d03e35 482 }
b9d03e35 483 }
7c23b892
AZ
484 }
485 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
17fbbb0b
JW
486
487 if (val & E1000_MDIC_INT_EN) {
488 set_ics(s, 0, E1000_ICR_MDAC);
489 }
7c23b892
AZ
490}
491
492static uint32_t
493get_eecd(E1000State *s, int index)
494{
495 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
496
497 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
498 s->eecd_state.bitnum_out, s->eecd_state.reading);
499 if (!s->eecd_state.reading ||
500 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
501 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
502 ret |= E1000_EECD_DO;
503 return ret;
504}
505
506static void
507set_eecd(E1000State *s, int index, uint32_t val)
508{
509 uint32_t oldval = s->eecd_state.old_eecd;
510
511 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
512 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
9651ac55
IT
513 if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
514 return;
515 if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
516 s->eecd_state.val_in = 0;
517 s->eecd_state.bitnum_in = 0;
518 s->eecd_state.bitnum_out = 0;
519 s->eecd_state.reading = 0;
520 }
7c23b892
AZ
521 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
522 return;
523 if (!(E1000_EECD_SK & val)) { // falling edge
524 s->eecd_state.bitnum_out++;
525 return;
526 }
7c23b892
AZ
527 s->eecd_state.val_in <<= 1;
528 if (val & E1000_EECD_DI)
529 s->eecd_state.val_in |= 1;
530 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
531 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
532 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
533 EEPROM_READ_OPCODE_MICROWIRE);
534 }
535 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
536 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
537 s->eecd_state.reading);
538}
539
540static uint32_t
541flash_eerd_read(E1000State *s, int x)
542{
543 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
544
b1332393
BP
545 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
546 return (s->mac_reg[EERD]);
547
7c23b892 548 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
b1332393
BP
549 return (E1000_EEPROM_RW_REG_DONE | r);
550
551 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
552 E1000_EEPROM_RW_REG_DONE | r);
7c23b892
AZ
553}
554
7c23b892
AZ
555static void
556putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
557{
c6a6a5e3
AL
558 uint32_t sum;
559
7c23b892
AZ
560 if (cse && cse < n)
561 n = cse + 1;
c6a6a5e3
AL
562 if (sloc < n-1) {
563 sum = net_checksum_add(n-css, data+css);
d8ee2591 564 stw_be_p(data + sloc, net_checksum_finish(sum));
c6a6a5e3 565 }
7c23b892
AZ
566}
567
8f2e8d1f
AL
568static inline int
569vlan_enabled(E1000State *s)
570{
571 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
572}
573
574static inline int
575vlan_rx_filter_enabled(E1000State *s)
576{
577 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
578}
579
580static inline int
581is_vlan_packet(E1000State *s, const uint8_t *buf)
582{
583 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
4e60a250 584 le16_to_cpu(s->mac_reg[VET]));
8f2e8d1f
AL
585}
586
587static inline int
588is_vlan_txd(uint32_t txd_lower)
589{
590 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
591}
592
55e8d1ce
MT
593/* FCS aka Ethernet CRC-32. We don't get it from backends and can't
594 * fill it in, just pad descriptor length by 4 bytes unless guest
a05e8a6e 595 * told us to strip it off the packet. */
55e8d1ce
MT
596static inline int
597fcs_len(E1000State *s)
598{
599 return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
600}
601
93e37d76
JW
602static void
603e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
604{
b356f76d 605 NetClientState *nc = qemu_get_queue(s->nic);
93e37d76 606 if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
b356f76d 607 nc->info->receive(nc, buf, size);
93e37d76 608 } else {
b356f76d 609 qemu_send_packet(nc, buf, size);
93e37d76
JW
610 }
611}
612
7c23b892
AZ
613static void
614xmit_seg(E1000State *s)
615{
616 uint16_t len, *sp;
617 unsigned int frames = s->tx.tso_frames, css, sofar, n;
618 struct e1000_tx *tp = &s->tx;
619
1b0009db 620 if (tp->tse && tp->cptse) {
7c23b892
AZ
621 css = tp->ipcss;
622 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
623 frames, tp->size, css);
624 if (tp->ip) { // IPv4
d8ee2591
PM
625 stw_be_p(tp->data+css+2, tp->size - css);
626 stw_be_p(tp->data+css+4,
7c23b892
AZ
627 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
628 } else // IPv6
d8ee2591 629 stw_be_p(tp->data+css+4, tp->size - css);
7c23b892
AZ
630 css = tp->tucss;
631 len = tp->size - css;
632 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
633 if (tp->tcp) {
634 sofar = frames * tp->mss;
6bd194ab 635 stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
7c23b892
AZ
636 if (tp->paylen - sofar > tp->mss)
637 tp->data[css + 13] &= ~9; // PSH, FIN
638 } else // UDP
d8ee2591 639 stw_be_p(tp->data+css+4, len);
7c23b892 640 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
e685b4eb 641 unsigned int phsum;
7c23b892
AZ
642 // add pseudo-header length before checksum calculation
643 sp = (uint16_t *)(tp->data + tp->tucso);
e685b4eb
AW
644 phsum = be16_to_cpup(sp) + len;
645 phsum = (phsum >> 16) + (phsum & 0xffff);
d8ee2591 646 stw_be_p(sp, phsum);
7c23b892
AZ
647 }
648 tp->tso_frames++;
649 }
650
651 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
652 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
653 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
654 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
8f2e8d1f 655 if (tp->vlan_needed) {
b10fec9b
SW
656 memmove(tp->vlan, tp->data, 4);
657 memmove(tp->data, tp->data + 4, 8);
8f2e8d1f 658 memcpy(tp->data + 8, tp->vlan_header, 4);
93e37d76 659 e1000_send_packet(s, tp->vlan, tp->size + 4);
8f2e8d1f 660 } else
93e37d76 661 e1000_send_packet(s, tp->data, tp->size);
7c23b892
AZ
662 s->mac_reg[TPT]++;
663 s->mac_reg[GPTC]++;
664 n = s->mac_reg[TOTL];
665 if ((s->mac_reg[TOTL] += s->tx.size) < n)
666 s->mac_reg[TOTH]++;
667}
668
669static void
670process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
671{
b08340d5 672 PCIDevice *d = PCI_DEVICE(s);
7c23b892
AZ
673 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
674 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
675 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
a0ae17a6 676 unsigned int msh = 0xfffff;
7c23b892
AZ
677 uint64_t addr;
678 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
679 struct e1000_tx *tp = &s->tx;
680
e9845f09 681 s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
7c23b892
AZ
682 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
683 op = le32_to_cpu(xp->cmd_and_length);
684 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
685 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
686 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
687 tp->tucss = xp->upper_setup.tcp_fields.tucss;
688 tp->tucso = xp->upper_setup.tcp_fields.tucso;
689 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
690 tp->paylen = op & 0xfffff;
691 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
692 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
693 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
694 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
695 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
696 tp->tso_frames = 0;
697 if (tp->tucso == 0) { // this is probably wrong
698 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
699 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
700 }
701 return;
1b0009db
AZ
702 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
703 // data descriptor
735e77ec
SH
704 if (tp->size == 0) {
705 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
706 }
1b0009db 707 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
43ad7e3e 708 } else {
1b0009db
AZ
709 // legacy descriptor
710 tp->cptse = 0;
43ad7e3e 711 }
7c23b892 712
8f2e8d1f
AL
713 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
714 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
715 tp->vlan_needed = 1;
d8ee2591 716 stw_be_p(tp->vlan_header,
4e60a250 717 le16_to_cpu(s->mac_reg[VET]));
d8ee2591 718 stw_be_p(tp->vlan_header + 2,
8f2e8d1f
AL
719 le16_to_cpu(dp->upper.fields.special));
720 }
721
7c23b892 722 addr = le64_to_cpu(dp->buffer_addr);
1b0009db 723 if (tp->tse && tp->cptse) {
a0ae17a6 724 msh = tp->hdr_len + tp->mss;
1b0009db
AZ
725 do {
726 bytes = split_size;
727 if (tp->size + bytes > msh)
728 bytes = msh - tp->size;
65f82df0
AL
729
730 bytes = MIN(sizeof(tp->data) - tp->size, bytes);
b08340d5 731 pci_dma_read(d, addr, tp->data + tp->size, bytes);
a0ae17a6
AJ
732 sz = tp->size + bytes;
733 if (sz >= tp->hdr_len && tp->size < tp->hdr_len) {
734 memmove(tp->header, tp->data, tp->hdr_len);
735 }
1b0009db
AZ
736 tp->size = sz;
737 addr += bytes;
738 if (sz == msh) {
739 xmit_seg(s);
a0ae17a6
AJ
740 memmove(tp->data, tp->header, tp->hdr_len);
741 tp->size = tp->hdr_len;
1b0009db
AZ
742 }
743 } while (split_size -= bytes);
744 } else if (!tp->tse && tp->cptse) {
745 // context descriptor TSE is not set, while data descriptor TSE is set
362f5fb5 746 DBGOUT(TXERR, "TCP segmentation error\n");
1b0009db 747 } else {
65f82df0 748 split_size = MIN(sizeof(tp->data) - tp->size, split_size);
b08340d5 749 pci_dma_read(d, addr, tp->data + tp->size, split_size);
1b0009db 750 tp->size += split_size;
7c23b892 751 }
7c23b892
AZ
752
753 if (!(txd_lower & E1000_TXD_CMD_EOP))
754 return;
a0ae17a6 755 if (!(tp->tse && tp->cptse && tp->size < tp->hdr_len)) {
7c23b892 756 xmit_seg(s);
a0ae17a6 757 }
7c23b892
AZ
758 tp->tso_frames = 0;
759 tp->sum_needed = 0;
8f2e8d1f 760 tp->vlan_needed = 0;
7c23b892 761 tp->size = 0;
1b0009db 762 tp->cptse = 0;
7c23b892
AZ
763}
764
765static uint32_t
62ecbd35 766txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
7c23b892 767{
b08340d5 768 PCIDevice *d = PCI_DEVICE(s);
7c23b892
AZ
769 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
770
771 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
772 return 0;
773 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
774 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
775 dp->upper.data = cpu_to_le32(txd_upper);
b08340d5 776 pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
00c3a05b 777 &dp->upper, sizeof(dp->upper));
7c23b892
AZ
778 return E1000_ICR_TXDW;
779}
780
d17161f6
KW
781static uint64_t tx_desc_base(E1000State *s)
782{
783 uint64_t bah = s->mac_reg[TDBAH];
784 uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
785
786 return (bah << 32) + bal;
787}
788
7c23b892
AZ
789static void
790start_xmit(E1000State *s)
791{
b08340d5 792 PCIDevice *d = PCI_DEVICE(s);
62ecbd35 793 dma_addr_t base;
7c23b892
AZ
794 struct e1000_tx_desc desc;
795 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
796
797 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
798 DBGOUT(TX, "tx disabled\n");
799 return;
800 }
801
802 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
d17161f6 803 base = tx_desc_base(s) +
7c23b892 804 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
b08340d5 805 pci_dma_read(d, base, &desc, sizeof(desc));
7c23b892
AZ
806
807 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
6106075b 808 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
7c23b892
AZ
809 desc.upper.data);
810
811 process_tx_desc(s, &desc);
62ecbd35 812 cause |= txdesc_writeback(s, base, &desc);
7c23b892
AZ
813
814 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
815 s->mac_reg[TDH] = 0;
816 /*
817 * the following could happen only if guest sw assigns
818 * bogus values to TDT/TDLEN.
819 * there's nothing too intelligent we could do about this.
820 */
821 if (s->mac_reg[TDH] == tdh_start) {
822 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
823 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
824 break;
825 }
826 }
827 set_ics(s, 0, cause);
828}
829
830static int
831receive_filter(E1000State *s, const uint8_t *buf, int size)
832{
af2960f9
BS
833 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
834 static const int mta_shift[] = {4, 3, 2, 0};
7c23b892
AZ
835 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
836
8f2e8d1f
AL
837 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
838 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
839 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
840 ((vid >> 5) & 0x7f));
841 if ((vfta & (1 << (vid & 0x1f))) == 0)
842 return 0;
843 }
844
7c23b892
AZ
845 if (rctl & E1000_RCTL_UPE) // promiscuous
846 return 1;
847
848 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
849 return 1;
850
851 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
852 return 1;
853
854 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
855 if (!(rp[1] & E1000_RAH_AV))
856 continue;
857 ra[0] = cpu_to_le32(rp[0]);
858 ra[1] = cpu_to_le32(rp[1]);
859 if (!memcmp(buf, (uint8_t *)ra, 6)) {
860 DBGOUT(RXFILTER,
861 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
862 (int)(rp - s->mac_reg - RA)/2,
863 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
864 return 1;
865 }
866 }
867 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
868 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
869
870 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
871 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
872 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
873 return 1;
874 DBGOUT(RXFILTER,
875 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
876 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
877 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
878 s->mac_reg[MTA + (f >> 5)]);
879
880 return 0;
881}
882
99ed7e30 883static void
4e68f7a0 884e1000_set_link_status(NetClientState *nc)
99ed7e30 885{
cc1f0f45 886 E1000State *s = qemu_get_nic_opaque(nc);
99ed7e30
AL
887 uint32_t old_status = s->mac_reg[STATUS];
888
d4044c2a 889 if (nc->link_down) {
71aadd3c 890 e1000_link_down(s);
d4044c2a 891 } else {
d7a41552 892 if (have_autoneg(s) &&
6a2acedb
GS
893 !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
894 /* emulate auto-negotiation if supported */
895 timer_mod(s->autoneg_timer,
896 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
897 } else {
898 e1000_link_up(s);
899 }
d4044c2a 900 }
99ed7e30
AL
901
902 if (s->mac_reg[STATUS] != old_status)
903 set_ics(s, 0, E1000_ICR_LSC);
904}
905
322fd48a
MT
906static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
907{
908 int bufs;
909 /* Fast-path short packets */
910 if (total_size <= s->rxbuf_size) {
e5b8b0d4 911 return s->mac_reg[RDH] != s->mac_reg[RDT];
322fd48a
MT
912 }
913 if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
914 bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
e5b8b0d4 915 } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
322fd48a
MT
916 bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
917 s->mac_reg[RDT] - s->mac_reg[RDH];
918 } else {
919 return false;
920 }
921 return total_size <= bufs * s->rxbuf_size;
922}
923
6cdfab28 924static int
4e68f7a0 925e1000_can_receive(NetClientState *nc)
6cdfab28 926{
cc1f0f45 927 E1000State *s = qemu_get_nic_opaque(nc);
6cdfab28 928
ddcb73b7 929 return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
20302e71
MT
930 (s->mac_reg[RCTL] & E1000_RCTL_EN) &&
931 (s->parent_obj.config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
932 e1000_has_rxbufs(s, 1);
6cdfab28
MT
933}
934
d17161f6
KW
935static uint64_t rx_desc_base(E1000State *s)
936{
937 uint64_t bah = s->mac_reg[RDBAH];
938 uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
939
940 return (bah << 32) + bal;
941}
942
4f1c942b 943static ssize_t
97410dde 944e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
7c23b892 945{
cc1f0f45 946 E1000State *s = qemu_get_nic_opaque(nc);
b08340d5 947 PCIDevice *d = PCI_DEVICE(s);
7c23b892 948 struct e1000_rx_desc desc;
62ecbd35 949 dma_addr_t base;
7c23b892
AZ
950 unsigned int n, rdt;
951 uint32_t rdh_start;
8f2e8d1f 952 uint16_t vlan_special = 0;
97410dde 953 uint8_t vlan_status = 0;
78aeb23e 954 uint8_t min_buf[MIN_BUF_SIZE];
97410dde
VM
955 struct iovec min_iov;
956 uint8_t *filter_buf = iov->iov_base;
957 size_t size = iov_size(iov, iovcnt);
958 size_t iov_ofs = 0;
b19487e2
MT
959 size_t desc_offset;
960 size_t desc_size;
961 size_t total_size;
7c23b892 962
ddcb73b7
MT
963 if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
964 return -1;
965 }
966
967 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
4f1c942b 968 return -1;
ddcb73b7 969 }
7c23b892 970
78aeb23e
SH
971 /* Pad to minimum Ethernet frame length */
972 if (size < sizeof(min_buf)) {
97410dde 973 iov_to_buf(iov, iovcnt, 0, min_buf, size);
78aeb23e 974 memset(&min_buf[size], 0, sizeof(min_buf) - size);
97410dde
VM
975 min_iov.iov_base = filter_buf = min_buf;
976 min_iov.iov_len = size = sizeof(min_buf);
977 iovcnt = 1;
978 iov = &min_iov;
979 } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
980 /* This is very unlikely, but may happen. */
981 iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
982 filter_buf = min_buf;
78aeb23e
SH
983 }
984
b0d9ffcd 985 /* Discard oversized packets if !LPE and !SBP. */
2c0331f4
MC
986 if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
987 (size > MAXIMUM_ETHERNET_VLAN_SIZE
988 && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
b0d9ffcd
MC
989 && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
990 return size;
991 }
992
97410dde 993 if (!receive_filter(s, filter_buf, size)) {
4f1c942b 994 return size;
97410dde 995 }
7c23b892 996
97410dde
VM
997 if (vlan_enabled(s) && is_vlan_packet(s, filter_buf)) {
998 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
999 + 14)));
1000 iov_ofs = 4;
1001 if (filter_buf == iov->iov_base) {
1002 memmove(filter_buf + 4, filter_buf, 12);
1003 } else {
1004 iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
1005 while (iov->iov_len <= iov_ofs) {
1006 iov_ofs -= iov->iov_len;
1007 iov++;
1008 }
1009 }
8f2e8d1f 1010 vlan_status = E1000_RXD_STAT_VP;
8f2e8d1f
AL
1011 size -= 4;
1012 }
1013
7c23b892 1014 rdh_start = s->mac_reg[RDH];
b19487e2
MT
1015 desc_offset = 0;
1016 total_size = size + fcs_len(s);
322fd48a
MT
1017 if (!e1000_has_rxbufs(s, total_size)) {
1018 set_ics(s, 0, E1000_ICS_RXO);
1019 return -1;
1020 }
7c23b892 1021 do {
b19487e2
MT
1022 desc_size = total_size - desc_offset;
1023 if (desc_size > s->rxbuf_size) {
1024 desc_size = s->rxbuf_size;
1025 }
d17161f6 1026 base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
b08340d5 1027 pci_dma_read(d, base, &desc, sizeof(desc));
8f2e8d1f
AL
1028 desc.special = vlan_special;
1029 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
7c23b892 1030 if (desc.buffer_addr) {
b19487e2 1031 if (desc_offset < size) {
97410dde
VM
1032 size_t iov_copy;
1033 hwaddr ba = le64_to_cpu(desc.buffer_addr);
b19487e2
MT
1034 size_t copy_size = size - desc_offset;
1035 if (copy_size > s->rxbuf_size) {
1036 copy_size = s->rxbuf_size;
1037 }
97410dde
VM
1038 do {
1039 iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
1040 pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
1041 copy_size -= iov_copy;
1042 ba += iov_copy;
1043 iov_ofs += iov_copy;
1044 if (iov_ofs == iov->iov_len) {
1045 iov++;
1046 iov_ofs = 0;
1047 }
1048 } while (copy_size);
b19487e2
MT
1049 }
1050 desc_offset += desc_size;
ee912ccf 1051 desc.length = cpu_to_le16(desc_size);
b19487e2 1052 if (desc_offset >= total_size) {
b19487e2
MT
1053 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
1054 } else {
ee912ccf
MT
1055 /* Guest zeroing out status is not a hardware requirement.
1056 Clear EOP in case guest didn't do it. */
1057 desc.status &= ~E1000_RXD_STAT_EOP;
b19487e2 1058 }
43ad7e3e 1059 } else { // as per intel docs; skip descriptors with null buf addr
7c23b892 1060 DBGOUT(RX, "Null RX descriptor!!\n");
43ad7e3e 1061 }
b08340d5 1062 pci_dma_write(d, base, &desc, sizeof(desc));
7c23b892
AZ
1063
1064 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
1065 s->mac_reg[RDH] = 0;
7c23b892
AZ
1066 /* see comment in start_xmit; same here */
1067 if (s->mac_reg[RDH] == rdh_start) {
1068 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
1069 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
1070 set_ics(s, 0, E1000_ICS_RXO);
4f1c942b 1071 return -1;
7c23b892 1072 }
b19487e2 1073 } while (desc_offset < total_size);
7c23b892
AZ
1074
1075 s->mac_reg[GPRC]++;
1076 s->mac_reg[TPR]++;
a05e8a6e
MT
1077 /* TOR - Total Octets Received:
1078 * This register includes bytes received in a packet from the <Destination
1079 * Address> field through the <CRC> field, inclusively.
1080 */
1081 n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
1082 if (n < s->mac_reg[TORL])
7c23b892 1083 s->mac_reg[TORH]++;
a05e8a6e 1084 s->mac_reg[TORL] = n;
7c23b892
AZ
1085
1086 n = E1000_ICS_RXT0;
1087 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
1088 rdt += s->mac_reg[RDLEN] / sizeof(desc);
bf16cc8f
AL
1089 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
1090 s->rxbuf_min_shift)
7c23b892
AZ
1091 n |= E1000_ICS_RXDMT0;
1092
1093 set_ics(s, 0, n);
4f1c942b
MM
1094
1095 return size;
7c23b892
AZ
1096}
1097
97410dde
VM
1098static ssize_t
1099e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1100{
1101 const struct iovec iov = {
1102 .iov_base = (uint8_t *)buf,
1103 .iov_len = size
1104 };
1105
1106 return e1000_receive_iov(nc, &iov, 1);
1107}
1108
7c23b892
AZ
1109static uint32_t
1110mac_readreg(E1000State *s, int index)
1111{
1112 return s->mac_reg[index];
1113}
1114
1115static uint32_t
1116mac_icr_read(E1000State *s, int index)
1117{
1118 uint32_t ret = s->mac_reg[ICR];
1119
1120 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
1121 set_interrupt_cause(s, 0, 0);
1122 return ret;
1123}
1124
1125static uint32_t
1126mac_read_clr4(E1000State *s, int index)
1127{
1128 uint32_t ret = s->mac_reg[index];
1129
1130 s->mac_reg[index] = 0;
1131 return ret;
1132}
1133
1134static uint32_t
1135mac_read_clr8(E1000State *s, int index)
1136{
1137 uint32_t ret = s->mac_reg[index];
1138
1139 s->mac_reg[index] = 0;
1140 s->mac_reg[index-1] = 0;
1141 return ret;
1142}
1143
1144static void
1145mac_writereg(E1000State *s, int index, uint32_t val)
1146{
7c36507c
AK
1147 uint32_t macaddr[2];
1148
7c23b892 1149 s->mac_reg[index] = val;
7c36507c 1150
90d131fb 1151 if (index == RA + 1) {
7c36507c
AK
1152 macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
1153 macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
1154 qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
1155 }
7c23b892
AZ
1156}
1157
1158static void
1159set_rdt(E1000State *s, int index, uint32_t val)
1160{
7c23b892 1161 s->mac_reg[index] = val & 0xffff;
e8b4c680 1162 if (e1000_has_rxbufs(s, 1)) {
b356f76d 1163 qemu_flush_queued_packets(qemu_get_queue(s->nic));
e8b4c680 1164 }
7c23b892
AZ
1165}
1166
1167static void
1168set_16bit(E1000State *s, int index, uint32_t val)
1169{
1170 s->mac_reg[index] = val & 0xffff;
1171}
1172
1173static void
1174set_dlen(E1000State *s, int index, uint32_t val)
1175{
1176 s->mac_reg[index] = val & 0xfff80;
1177}
1178
1179static void
1180set_tctl(E1000State *s, int index, uint32_t val)
1181{
1182 s->mac_reg[index] = val;
1183 s->mac_reg[TDT] &= 0xffff;
1184 start_xmit(s);
1185}
1186
1187static void
1188set_icr(E1000State *s, int index, uint32_t val)
1189{
1190 DBGOUT(INTERRUPT, "set_icr %x\n", val);
1191 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
1192}
1193
1194static void
1195set_imc(E1000State *s, int index, uint32_t val)
1196{
1197 s->mac_reg[IMS] &= ~val;
1198 set_ics(s, 0, 0);
1199}
1200
1201static void
1202set_ims(E1000State *s, int index, uint32_t val)
1203{
1204 s->mac_reg[IMS] |= val;
1205 set_ics(s, 0, 0);
1206}
1207
1208#define getreg(x) [x] = mac_readreg
1209static uint32_t (*macreg_readops[])(E1000State *, int) = {
1210 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
1211 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
1212 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
1213 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
b1332393 1214 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
a00b2335 1215 getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
e9845f09
VM
1216 getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
1217 getreg(TADV), getreg(ITR),
7c23b892
AZ
1218
1219 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
1220 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
1221 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
1222 [CRCERRS ... MPC] = &mac_readreg,
1223 [RA ... RA+31] = &mac_readreg,
1224 [MTA ... MTA+127] = &mac_readreg,
8f2e8d1f 1225 [VFTA ... VFTA+127] = &mac_readreg,
7c23b892 1226};
b1503cda 1227enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
7c23b892
AZ
1228
1229#define putreg(x) [x] = mac_writereg
1230static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1231 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
1232 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
cab3c825 1233 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
7c23b892
AZ
1234 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
1235 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
1236 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
1237 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
cab3c825 1238 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
e9845f09
VM
1239 [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
1240 [ITR] = set_16bit,
7c23b892
AZ
1241 [RA ... RA+31] = &mac_writereg,
1242 [MTA ... MTA+127] = &mac_writereg,
8f2e8d1f 1243 [VFTA ... VFTA+127] = &mac_writereg,
7c23b892 1244};
b9d03e35 1245
b1503cda 1246enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
7c23b892
AZ
1247
1248static void
a8170e5e 1249e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
ad00a9b9 1250 unsigned size)
7c23b892
AZ
1251{
1252 E1000State *s = opaque;
8da3ff18 1253 unsigned int index = (addr & 0x1ffff) >> 2;
7c23b892 1254
43ad7e3e 1255 if (index < NWRITEOPS && macreg_writeops[index]) {
6b59fc74 1256 macreg_writeops[index](s, index, val);
43ad7e3e 1257 } else if (index < NREADOPS && macreg_readops[index]) {
ad00a9b9 1258 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
43ad7e3e 1259 } else {
ad00a9b9 1260 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
7c23b892 1261 index<<2, val);
43ad7e3e 1262 }
7c23b892
AZ
1263}
1264
ad00a9b9 1265static uint64_t
a8170e5e 1266e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
7c23b892
AZ
1267{
1268 E1000State *s = opaque;
8da3ff18 1269 unsigned int index = (addr & 0x1ffff) >> 2;
7c23b892
AZ
1270
1271 if (index < NREADOPS && macreg_readops[index])
6b59fc74 1272 {
32600a30 1273 return macreg_readops[index](s, index);
6b59fc74 1274 }
7c23b892
AZ
1275 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1276 return 0;
1277}
1278
ad00a9b9
AK
1279static const MemoryRegionOps e1000_mmio_ops = {
1280 .read = e1000_mmio_read,
1281 .write = e1000_mmio_write,
1282 .endianness = DEVICE_LITTLE_ENDIAN,
1283 .impl = {
1284 .min_access_size = 4,
1285 .max_access_size = 4,
1286 },
1287};
1288
a8170e5e 1289static uint64_t e1000_io_read(void *opaque, hwaddr addr,
ad00a9b9 1290 unsigned size)
7c23b892 1291{
ad00a9b9
AK
1292 E1000State *s = opaque;
1293
1294 (void)s;
1295 return 0;
7c23b892
AZ
1296}
1297
a8170e5e 1298static void e1000_io_write(void *opaque, hwaddr addr,
ad00a9b9 1299 uint64_t val, unsigned size)
7c23b892 1300{
ad00a9b9
AK
1301 E1000State *s = opaque;
1302
1303 (void)s;
7c23b892
AZ
1304}
1305
ad00a9b9
AK
1306static const MemoryRegionOps e1000_io_ops = {
1307 .read = e1000_io_read,
1308 .write = e1000_io_write,
1309 .endianness = DEVICE_LITTLE_ENDIAN,
1310};
1311
e482dc3e 1312static bool is_version_1(void *opaque, int version_id)
7c23b892 1313{
e482dc3e 1314 return version_id == 1;
7c23b892
AZ
1315}
1316
ddcb73b7
MT
1317static void e1000_pre_save(void *opaque)
1318{
1319 E1000State *s = opaque;
1320 NetClientState *nc = qemu_get_queue(s->nic);
2af234e6 1321
e9845f09
VM
1322 /* If the mitigation timer is active, emulate a timeout now. */
1323 if (s->mit_timer_on) {
1324 e1000_mit_timer(s);
1325 }
1326
ddcb73b7 1327 /*
6a2acedb
GS
1328 * If link is down and auto-negotiation is supported and ongoing,
1329 * complete auto-negotiation immediately. This allows us to look
1330 * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
ddcb73b7 1331 */
d7a41552
GS
1332 if (nc->link_down && have_autoneg(s)) {
1333 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
ddcb73b7
MT
1334 }
1335}
1336
e4b82364
AK
1337static int e1000_post_load(void *opaque, int version_id)
1338{
1339 E1000State *s = opaque;
b356f76d 1340 NetClientState *nc = qemu_get_queue(s->nic);
e4b82364 1341
e9845f09
VM
1342 if (!(s->compat_flags & E1000_FLAG_MIT)) {
1343 s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
1344 s->mac_reg[TADV] = 0;
1345 s->mit_irq_level = false;
1346 }
1347 s->mit_ide = 0;
1348 s->mit_timer_on = false;
1349
e4b82364 1350 /* nc.link_down can't be migrated, so infer link_down according
ddcb73b7
MT
1351 * to link status bit in mac_reg[STATUS].
1352 * Alternatively, restart link negotiation if it was in progress. */
b356f76d 1353 nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
2af234e6 1354
d7a41552 1355 if (have_autoneg(s) &&
ddcb73b7
MT
1356 !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1357 nc->link_down = false;
d7a41552
GS
1358 timer_mod(s->autoneg_timer,
1359 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
ddcb73b7 1360 }
e4b82364
AK
1361
1362 return 0;
1363}
1364
e9845f09
VM
1365static bool e1000_mit_state_needed(void *opaque)
1366{
1367 E1000State *s = opaque;
1368
1369 return s->compat_flags & E1000_FLAG_MIT;
1370}
1371
1372static const VMStateDescription vmstate_e1000_mit_state = {
1373 .name = "e1000/mit_state",
1374 .version_id = 1,
1375 .minimum_version_id = 1,
5cd8cada 1376 .needed = e1000_mit_state_needed,
d49805ae 1377 .fields = (VMStateField[]) {
e9845f09
VM
1378 VMSTATE_UINT32(mac_reg[RDTR], E1000State),
1379 VMSTATE_UINT32(mac_reg[RADV], E1000State),
1380 VMSTATE_UINT32(mac_reg[TADV], E1000State),
1381 VMSTATE_UINT32(mac_reg[ITR], E1000State),
1382 VMSTATE_BOOL(mit_irq_level, E1000State),
1383 VMSTATE_END_OF_LIST()
1384 }
1385};
1386
e482dc3e
JQ
1387static const VMStateDescription vmstate_e1000 = {
1388 .name = "e1000",
1389 .version_id = 2,
1390 .minimum_version_id = 1,
ddcb73b7 1391 .pre_save = e1000_pre_save,
e4b82364 1392 .post_load = e1000_post_load,
d49805ae 1393 .fields = (VMStateField[]) {
b08340d5 1394 VMSTATE_PCI_DEVICE(parent_obj, E1000State),
e482dc3e
JQ
1395 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1396 VMSTATE_UNUSED(4), /* Was mmio_base. */
1397 VMSTATE_UINT32(rxbuf_size, E1000State),
1398 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1399 VMSTATE_UINT32(eecd_state.val_in, E1000State),
1400 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1401 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1402 VMSTATE_UINT16(eecd_state.reading, E1000State),
1403 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1404 VMSTATE_UINT8(tx.ipcss, E1000State),
1405 VMSTATE_UINT8(tx.ipcso, E1000State),
1406 VMSTATE_UINT16(tx.ipcse, E1000State),
1407 VMSTATE_UINT8(tx.tucss, E1000State),
1408 VMSTATE_UINT8(tx.tucso, E1000State),
1409 VMSTATE_UINT16(tx.tucse, E1000State),
1410 VMSTATE_UINT32(tx.paylen, E1000State),
1411 VMSTATE_UINT8(tx.hdr_len, E1000State),
1412 VMSTATE_UINT16(tx.mss, E1000State),
1413 VMSTATE_UINT16(tx.size, E1000State),
1414 VMSTATE_UINT16(tx.tso_frames, E1000State),
1415 VMSTATE_UINT8(tx.sum_needed, E1000State),
1416 VMSTATE_INT8(tx.ip, E1000State),
1417 VMSTATE_INT8(tx.tcp, E1000State),
1418 VMSTATE_BUFFER(tx.header, E1000State),
1419 VMSTATE_BUFFER(tx.data, E1000State),
1420 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1421 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1422 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1423 VMSTATE_UINT32(mac_reg[EECD], E1000State),
1424 VMSTATE_UINT32(mac_reg[EERD], E1000State),
1425 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1426 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1427 VMSTATE_UINT32(mac_reg[ICR], E1000State),
1428 VMSTATE_UINT32(mac_reg[ICS], E1000State),
1429 VMSTATE_UINT32(mac_reg[IMC], E1000State),
1430 VMSTATE_UINT32(mac_reg[IMS], E1000State),
1431 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1432 VMSTATE_UINT32(mac_reg[MANC], E1000State),
1433 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1434 VMSTATE_UINT32(mac_reg[MPC], E1000State),
1435 VMSTATE_UINT32(mac_reg[PBA], E1000State),
1436 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1437 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1438 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1439 VMSTATE_UINT32(mac_reg[RDH], E1000State),
1440 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1441 VMSTATE_UINT32(mac_reg[RDT], E1000State),
1442 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1443 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1444 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1445 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1446 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1447 VMSTATE_UINT32(mac_reg[TDH], E1000State),
1448 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1449 VMSTATE_UINT32(mac_reg[TDT], E1000State),
1450 VMSTATE_UINT32(mac_reg[TORH], E1000State),
1451 VMSTATE_UINT32(mac_reg[TORL], E1000State),
1452 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1453 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1454 VMSTATE_UINT32(mac_reg[TPR], E1000State),
1455 VMSTATE_UINT32(mac_reg[TPT], E1000State),
1456 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1457 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1458 VMSTATE_UINT32(mac_reg[VET], E1000State),
1459 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1460 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1461 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1462 VMSTATE_END_OF_LIST()
e9845f09 1463 },
5cd8cada
JQ
1464 .subsections = (const VMStateDescription*[]) {
1465 &vmstate_e1000_mit_state,
1466 NULL
e482dc3e
JQ
1467 }
1468};
7c23b892 1469
8597f2e1
GS
1470/*
1471 * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
1472 * Note: A valid DevId will be inserted during pci_e1000_init().
1473 */
88b4e9db 1474static const uint16_t e1000_eeprom_template[64] = {
7c23b892 1475 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
8597f2e1 1476 0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
7c23b892
AZ
1477 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
1478 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
1479 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
1480 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1481 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1482 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
1483};
1484
7c23b892
AZ
1485/* PCI interface */
1486
7c23b892 1487static void
ad00a9b9 1488e1000_mmio_setup(E1000State *d)
7c23b892 1489{
f65ed4c1
AL
1490 int i;
1491 const uint32_t excluded_regs[] = {
1492 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1493 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1494 };
1495
eedfac6f
PB
1496 memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
1497 "e1000-mmio", PNPMMIO_SIZE);
ad00a9b9 1498 memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
f65ed4c1 1499 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
ad00a9b9
AK
1500 memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1501 excluded_regs[i+1] - excluded_regs[i] - 4);
eedfac6f 1502 memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
7c23b892
AZ
1503}
1504
f90c2bcd 1505static void
4b09be85
AL
1506pci_e1000_uninit(PCIDevice *dev)
1507{
567a3c9e 1508 E1000State *d = E1000(dev);
4b09be85 1509
bc72ad67
AB
1510 timer_del(d->autoneg_timer);
1511 timer_free(d->autoneg_timer);
e9845f09
VM
1512 timer_del(d->mit_timer);
1513 timer_free(d->mit_timer);
948ecf21 1514 qemu_del_nic(d->nic);
4b09be85
AL
1515}
1516
a03e2aec 1517static NetClientInfo net_e1000_info = {
2be64a68 1518 .type = NET_CLIENT_OPTIONS_KIND_NIC,
a03e2aec
MM
1519 .size = sizeof(NICState),
1520 .can_receive = e1000_can_receive,
1521 .receive = e1000_receive,
97410dde 1522 .receive_iov = e1000_receive_iov,
a03e2aec
MM
1523 .link_status_changed = e1000_set_link_status,
1524};
1525
20302e71
MT
1526static void e1000_write_config(PCIDevice *pci_dev, uint32_t address,
1527 uint32_t val, int len)
1528{
1529 E1000State *s = E1000(pci_dev);
1530
1531 pci_default_write_config(pci_dev, address, val, len);
1532
1533 if (range_covers_byte(address, len, PCI_COMMAND) &&
1534 (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1535 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1536 }
1537}
1538
1539
9af21dbe 1540static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
7c23b892 1541{
567a3c9e
PC
1542 DeviceState *dev = DEVICE(pci_dev);
1543 E1000State *d = E1000(pci_dev);
8597f2e1 1544 PCIDeviceClass *pdc = PCI_DEVICE_GET_CLASS(pci_dev);
7c23b892 1545 uint8_t *pci_conf;
7c23b892 1546 uint16_t checksum = 0;
7c23b892 1547 int i;
fbdaa002 1548 uint8_t *macaddr;
aff427a1 1549
20302e71
MT
1550 pci_dev->config_write = e1000_write_config;
1551
b08340d5 1552 pci_conf = pci_dev->config;
7c23b892 1553
a9cbacb0
MT
1554 /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1555 pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
7c23b892 1556
817e0b6f 1557 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
7c23b892 1558
ad00a9b9 1559 e1000_mmio_setup(d);
7c23b892 1560
b08340d5 1561 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
7c23b892 1562
b08340d5 1563 pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
7c23b892 1564
7c23b892
AZ
1565 memmove(d->eeprom_data, e1000_eeprom_template,
1566 sizeof e1000_eeprom_template);
fbdaa002
GH
1567 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1568 macaddr = d->conf.macaddr.a;
7c23b892 1569 for (i = 0; i < 3; i++)
9d07d757 1570 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
8597f2e1 1571 d->eeprom_data[11] = d->eeprom_data[13] = pdc->device_id;
7c23b892
AZ
1572 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1573 checksum += d->eeprom_data[i];
1574 checksum = (uint16_t) EEPROM_SUM - checksum;
1575 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1576
a03e2aec 1577 d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
567a3c9e 1578 object_get_typename(OBJECT(d)), dev->id, d);
7c23b892 1579
b356f76d 1580 qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1ca4d09a 1581
bc72ad67 1582 d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
e9845f09 1583 d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
9d07d757 1584}
72da4208 1585
fbdaa002
GH
1586static void qdev_e1000_reset(DeviceState *dev)
1587{
567a3c9e 1588 E1000State *d = E1000(dev);
fbdaa002
GH
1589 e1000_reset(d);
1590}
1591
40021f08
AL
1592static Property e1000_properties[] = {
1593 DEFINE_NIC_PROPERTIES(E1000State, conf),
2af234e6
MT
1594 DEFINE_PROP_BIT("autonegotiation", E1000State,
1595 compat_flags, E1000_FLAG_AUTONEG_BIT, true),
e9845f09
VM
1596 DEFINE_PROP_BIT("mitigation", E1000State,
1597 compat_flags, E1000_FLAG_MIT_BIT, true),
40021f08
AL
1598 DEFINE_PROP_END_OF_LIST(),
1599};
1600
8597f2e1
GS
1601typedef struct E1000Info {
1602 const char *name;
1603 uint16_t device_id;
1604 uint8_t revision;
1605 uint16_t phy_id2;
8597f2e1
GS
1606} E1000Info;
1607
40021f08
AL
1608static void e1000_class_init(ObjectClass *klass, void *data)
1609{
39bffca2 1610 DeviceClass *dc = DEVICE_CLASS(klass);
40021f08 1611 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
8597f2e1
GS
1612 E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
1613 const E1000Info *info = data;
40021f08 1614
9af21dbe 1615 k->realize = pci_e1000_realize;
40021f08 1616 k->exit = pci_e1000_uninit;
c45e5b5b 1617 k->romfile = "efi-e1000.rom";
40021f08 1618 k->vendor_id = PCI_VENDOR_ID_INTEL;
8597f2e1
GS
1619 k->device_id = info->device_id;
1620 k->revision = info->revision;
1621 e->phy_id2 = info->phy_id2;
40021f08 1622 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
125ee0ed 1623 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
39bffca2
AL
1624 dc->desc = "Intel Gigabit Ethernet";
1625 dc->reset = qdev_e1000_reset;
1626 dc->vmsd = &vmstate_e1000;
1627 dc->props = e1000_properties;
40021f08
AL
1628}
1629
5df3bf62
GA
1630static void e1000_instance_init(Object *obj)
1631{
1632 E1000State *n = E1000(obj);
1633 device_add_bootindex_property(obj, &n->conf.bootindex,
1634 "bootindex", "/ethernet-phy@0",
1635 DEVICE(n), NULL);
1636}
1637
8597f2e1
GS
1638static const TypeInfo e1000_base_info = {
1639 .name = TYPE_E1000_BASE,
39bffca2
AL
1640 .parent = TYPE_PCI_DEVICE,
1641 .instance_size = sizeof(E1000State),
5df3bf62 1642 .instance_init = e1000_instance_init,
8597f2e1
GS
1643 .class_size = sizeof(E1000BaseClass),
1644 .abstract = true,
1645};
1646
1647static const E1000Info e1000_devices[] = {
1648 {
1649 .name = "e1000-82540em",
1650 .device_id = E1000_DEV_ID_82540EM,
1651 .revision = 0x03,
1652 .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
1653 },
1654 {
1655 .name = "e1000-82544gc",
1656 .device_id = E1000_DEV_ID_82544GC_COPPER,
1657 .revision = 0x03,
1658 .phy_id2 = E1000_PHY_ID2_82544x,
1659 },
1660 {
1661 .name = "e1000-82545em",
1662 .device_id = E1000_DEV_ID_82545EM_COPPER,
1663 .revision = 0x03,
1664 .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
1665 },
8597f2e1
GS
1666};
1667
1668static const TypeInfo e1000_default_info = {
1669 .name = "e1000",
1670 .parent = "e1000-82540em",
0aab0d3a
GH
1671};
1672
83f7d43a 1673static void e1000_register_types(void)
9d07d757 1674{
8597f2e1
GS
1675 int i;
1676
1677 type_register_static(&e1000_base_info);
1678 for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
1679 const E1000Info *info = &e1000_devices[i];
1680 TypeInfo type_info = {};
1681
1682 type_info.name = info->name;
1683 type_info.parent = TYPE_E1000_BASE;
1684 type_info.class_data = (void *)info;
1685 type_info.class_init = e1000_class_init;
5df3bf62 1686 type_info.instance_init = e1000_instance_init;
8597f2e1
GS
1687
1688 type_register(&type_info);
1689 }
1690 type_register_static(&e1000_default_info);
7c23b892 1691}
9d07d757 1692
83f7d43a 1693type_init(e1000_register_types)
This page took 0.946546 seconds and 4 git commands to generate.