1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/init.h>
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36 #include <linux/pagemap.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/tcp.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/mii.h>
45 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47 #include <linux/cpu.h>
48 #include <linux/smp.h>
49 #include <linux/pm_qos_params.h>
50 #include <linux/pm_runtime.h>
51 #include <linux/aer.h>
55 #define DRV_EXTRAVERSION "-k2"
57 #define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
58 char e1000e_driver_name[] = "e1000e";
59 const char e1000e_driver_version[] = DRV_VERSION;
61 static const struct e1000_info *e1000_info_tbl[] = {
62 [board_82571] = &e1000_82571_info,
63 [board_82572] = &e1000_82572_info,
64 [board_82573] = &e1000_82573_info,
65 [board_82574] = &e1000_82574_info,
66 [board_82583] = &e1000_82583_info,
67 [board_80003es2lan] = &e1000_es2_info,
68 [board_ich8lan] = &e1000_ich8_info,
69 [board_ich9lan] = &e1000_ich9_info,
70 [board_ich10lan] = &e1000_ich10_info,
71 [board_pchlan] = &e1000_pch_info,
72 [board_pch2lan] = &e1000_pch2_info,
75 struct e1000_reg_info {
80 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
81 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
82 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
83 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
84 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
86 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
87 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
88 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
89 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
90 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
92 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
94 /* General Registers */
96 {E1000_STATUS, "STATUS"},
97 {E1000_CTRL_EXT, "CTRL_EXT"},
99 /* Interrupt Registers */
103 {E1000_RCTL, "RCTL"},
104 {E1000_RDLEN, "RDLEN"},
107 {E1000_RDTR, "RDTR"},
108 {E1000_RXDCTL(0), "RXDCTL"},
110 {E1000_RDBAL, "RDBAL"},
111 {E1000_RDBAH, "RDBAH"},
112 {E1000_RDFH, "RDFH"},
113 {E1000_RDFT, "RDFT"},
114 {E1000_RDFHS, "RDFHS"},
115 {E1000_RDFTS, "RDFTS"},
116 {E1000_RDFPC, "RDFPC"},
119 {E1000_TCTL, "TCTL"},
120 {E1000_TDBAL, "TDBAL"},
121 {E1000_TDBAH, "TDBAH"},
122 {E1000_TDLEN, "TDLEN"},
125 {E1000_TIDV, "TIDV"},
126 {E1000_TXDCTL(0), "TXDCTL"},
127 {E1000_TADV, "TADV"},
128 {E1000_TARC(0), "TARC"},
129 {E1000_TDFH, "TDFH"},
130 {E1000_TDFT, "TDFT"},
131 {E1000_TDFHS, "TDFHS"},
132 {E1000_TDFTS, "TDFTS"},
133 {E1000_TDFPC, "TDFPC"},
135 /* List Terminator */
140 * e1000_regdump - register printout routine
142 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
148 switch (reginfo->ofs) {
149 case E1000_RXDCTL(0):
150 for (n = 0; n < 2; n++)
151 regs[n] = __er32(hw, E1000_RXDCTL(n));
153 case E1000_TXDCTL(0):
154 for (n = 0; n < 2; n++)
155 regs[n] = __er32(hw, E1000_TXDCTL(n));
158 for (n = 0; n < 2; n++)
159 regs[n] = __er32(hw, E1000_TARC(n));
162 printk(KERN_INFO "%-15s %08x\n",
163 reginfo->name, __er32(hw, reginfo->ofs));
167 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
168 printk(KERN_INFO "%-15s ", rname);
169 for (n = 0; n < 2; n++)
170 printk(KERN_CONT "%08x ", regs[n]);
171 printk(KERN_CONT "\n");
176 * e1000e_dump - Print registers, tx-ring and rx-ring
178 static void e1000e_dump(struct e1000_adapter *adapter)
180 struct net_device *netdev = adapter->netdev;
181 struct e1000_hw *hw = &adapter->hw;
182 struct e1000_reg_info *reginfo;
183 struct e1000_ring *tx_ring = adapter->tx_ring;
184 struct e1000_tx_desc *tx_desc;
185 struct my_u0 { u64 a; u64 b; } *u0;
186 struct e1000_buffer *buffer_info;
187 struct e1000_ring *rx_ring = adapter->rx_ring;
188 union e1000_rx_desc_packet_split *rx_desc_ps;
189 struct e1000_rx_desc *rx_desc;
190 struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
194 if (!netif_msg_hw(adapter))
197 /* Print netdevice Info */
199 dev_info(&adapter->pdev->dev, "Net device Info\n");
200 printk(KERN_INFO "Device Name state "
201 "trans_start last_rx\n");
202 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
209 /* Print Registers */
210 dev_info(&adapter->pdev->dev, "Register Dump\n");
211 printk(KERN_INFO " Register Name Value\n");
212 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
213 reginfo->name; reginfo++) {
214 e1000_regdump(hw, reginfo);
217 /* Print TX Ring Summary */
218 if (!netdev || !netif_running(netdev))
221 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
222 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
223 " leng ntw timestamp\n");
224 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
225 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
226 0, tx_ring->next_to_use, tx_ring->next_to_clean,
227 (unsigned long long)buffer_info->dma,
229 buffer_info->next_to_watch,
230 (unsigned long long)buffer_info->time_stamp);
233 if (!netif_msg_tx_done(adapter))
234 goto rx_ring_summary;
236 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
238 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
240 * Legacy Transmit Descriptor
241 * +--------------------------------------------------------------+
242 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
243 * +--------------------------------------------------------------+
244 * 8 | Special | CSS | Status | CMD | CSO | Length |
245 * +--------------------------------------------------------------+
246 * 63 48 47 36 35 32 31 24 23 16 15 0
248 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
249 * 63 48 47 40 39 32 31 16 15 8 7 0
250 * +----------------------------------------------------------------+
251 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
252 * +----------------------------------------------------------------+
253 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
254 * +----------------------------------------------------------------+
255 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
257 * Extended Data Descriptor (DTYP=0x1)
258 * +----------------------------------------------------------------+
259 * 0 | Buffer Address [63:0] |
260 * +----------------------------------------------------------------+
261 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
262 * +----------------------------------------------------------------+
263 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
265 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
266 " [bi->dma ] leng ntw timestamp bi->skb "
267 "<-- Legacy format\n");
268 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
269 " [bi->dma ] leng ntw timestamp bi->skb "
270 "<-- Ext Context format\n");
271 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
272 " [bi->dma ] leng ntw timestamp bi->skb "
273 "<-- Ext Data format\n");
274 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
275 tx_desc = E1000_TX_DESC(*tx_ring, i);
276 buffer_info = &tx_ring->buffer_info[i];
277 u0 = (struct my_u0 *)tx_desc;
278 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
279 "%04X %3X %016llX %p",
280 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
281 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
282 (unsigned long long)le64_to_cpu(u0->a),
283 (unsigned long long)le64_to_cpu(u0->b),
284 (unsigned long long)buffer_info->dma,
285 buffer_info->length, buffer_info->next_to_watch,
286 (unsigned long long)buffer_info->time_stamp,
288 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
289 printk(KERN_CONT " NTC/U\n");
290 else if (i == tx_ring->next_to_use)
291 printk(KERN_CONT " NTU\n");
292 else if (i == tx_ring->next_to_clean)
293 printk(KERN_CONT " NTC\n");
295 printk(KERN_CONT "\n");
297 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
298 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
299 16, 1, phys_to_virt(buffer_info->dma),
300 buffer_info->length, true);
303 /* Print RX Rings Summary */
305 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
306 printk(KERN_INFO "Queue [NTU] [NTC]\n");
307 printk(KERN_INFO " %5d %5X %5X\n", 0,
308 rx_ring->next_to_use, rx_ring->next_to_clean);
311 if (!netif_msg_rx_status(adapter))
314 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
315 switch (adapter->rx_ps_pages) {
319 /* [Extended] Packet Split Receive Descriptor Format
321 * +-----------------------------------------------------+
322 * 0 | Buffer Address 0 [63:0] |
323 * +-----------------------------------------------------+
324 * 8 | Buffer Address 1 [63:0] |
325 * +-----------------------------------------------------+
326 * 16 | Buffer Address 2 [63:0] |
327 * +-----------------------------------------------------+
328 * 24 | Buffer Address 3 [63:0] |
329 * +-----------------------------------------------------+
331 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
333 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
334 "[bi->skb] <-- Ext Pkt Split format\n");
335 /* [Extended] Receive Descriptor (Write-Back) Format
337 * 63 48 47 32 31 13 12 8 7 4 3 0
338 * +------------------------------------------------------+
339 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
340 * | Checksum | Ident | | Queue | | Type |
341 * +------------------------------------------------------+
342 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
343 * +------------------------------------------------------+
344 * 63 48 47 32 31 20 19 0
346 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
348 "[ l3 l2 l1 hs] [reserved ] ---------------- "
349 "[bi->skb] <-- Ext Rx Write-Back format\n");
350 for (i = 0; i < rx_ring->count; i++) {
351 buffer_info = &rx_ring->buffer_info[i];
352 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
353 u1 = (struct my_u1 *)rx_desc_ps;
355 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
356 if (staterr & E1000_RXD_STAT_DD) {
357 /* Descriptor Done */
358 printk(KERN_INFO "RWB[0x%03X] %016llX "
359 "%016llX %016llX %016llX "
360 "---------------- %p", i,
361 (unsigned long long)le64_to_cpu(u1->a),
362 (unsigned long long)le64_to_cpu(u1->b),
363 (unsigned long long)le64_to_cpu(u1->c),
364 (unsigned long long)le64_to_cpu(u1->d),
367 printk(KERN_INFO "R [0x%03X] %016llX "
368 "%016llX %016llX %016llX %016llX %p", i,
369 (unsigned long long)le64_to_cpu(u1->a),
370 (unsigned long long)le64_to_cpu(u1->b),
371 (unsigned long long)le64_to_cpu(u1->c),
372 (unsigned long long)le64_to_cpu(u1->d),
373 (unsigned long long)buffer_info->dma,
376 if (netif_msg_pktdata(adapter))
377 print_hex_dump(KERN_INFO, "",
378 DUMP_PREFIX_ADDRESS, 16, 1,
379 phys_to_virt(buffer_info->dma),
380 adapter->rx_ps_bsize0, true);
383 if (i == rx_ring->next_to_use)
384 printk(KERN_CONT " NTU\n");
385 else if (i == rx_ring->next_to_clean)
386 printk(KERN_CONT " NTC\n");
388 printk(KERN_CONT "\n");
393 /* Legacy Receive Descriptor Format
395 * +-----------------------------------------------------+
396 * | Buffer Address [63:0] |
397 * +-----------------------------------------------------+
398 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
399 * +-----------------------------------------------------+
400 * 63 48 47 40 39 32 31 16 15 0
402 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
403 "[vl er S cks ln] [bi->dma ] [bi->skb] "
404 "<-- Legacy format\n");
405 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
406 rx_desc = E1000_RX_DESC(*rx_ring, i);
407 buffer_info = &rx_ring->buffer_info[i];
408 u0 = (struct my_u0 *)rx_desc;
409 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
411 (unsigned long long)le64_to_cpu(u0->a),
412 (unsigned long long)le64_to_cpu(u0->b),
413 (unsigned long long)buffer_info->dma,
415 if (i == rx_ring->next_to_use)
416 printk(KERN_CONT " NTU\n");
417 else if (i == rx_ring->next_to_clean)
418 printk(KERN_CONT " NTC\n");
420 printk(KERN_CONT "\n");
422 if (netif_msg_pktdata(adapter))
423 print_hex_dump(KERN_INFO, "",
425 16, 1, phys_to_virt(buffer_info->dma),
426 adapter->rx_buffer_len, true);
435 * e1000_desc_unused - calculate if we have unused descriptors
437 static int e1000_desc_unused(struct e1000_ring *ring)
439 if (ring->next_to_clean > ring->next_to_use)
440 return ring->next_to_clean - ring->next_to_use - 1;
442 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
446 * e1000_receive_skb - helper function to handle Rx indications
447 * @adapter: board private structure
448 * @status: descriptor status field as written by hardware
449 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
450 * @skb: pointer to sk_buff to be indicated to stack
452 static void e1000_receive_skb(struct e1000_adapter *adapter,
453 struct net_device *netdev,
455 u8 status, __le16 vlan)
457 skb->protocol = eth_type_trans(skb, netdev);
459 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
460 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
461 le16_to_cpu(vlan), skb);
463 napi_gro_receive(&adapter->napi, skb);
467 * e1000_rx_checksum - Receive Checksum Offload for 82543
468 * @adapter: board private structure
469 * @status_err: receive descriptor status and error fields
470 * @csum: receive descriptor csum field
471 * @sk_buff: socket buffer with received data
473 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
474 u32 csum, struct sk_buff *skb)
476 u16 status = (u16)status_err;
477 u8 errors = (u8)(status_err >> 24);
478 skb->ip_summed = CHECKSUM_NONE;
480 /* Ignore Checksum bit is set */
481 if (status & E1000_RXD_STAT_IXSM)
483 /* TCP/UDP checksum error bit is set */
484 if (errors & E1000_RXD_ERR_TCPE) {
485 /* let the stack verify checksum errors */
486 adapter->hw_csum_err++;
490 /* TCP/UDP Checksum has not been calculated */
491 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
494 /* It must be a TCP or UDP packet with a valid checksum */
495 if (status & E1000_RXD_STAT_TCPCS) {
496 /* TCP checksum is good */
497 skb->ip_summed = CHECKSUM_UNNECESSARY;
500 * IP fragment with UDP payload
501 * Hardware complements the payload checksum, so we undo it
502 * and then put the value in host order for further stack use.
504 __sum16 sum = (__force __sum16)htons(csum);
505 skb->csum = csum_unfold(~sum);
506 skb->ip_summed = CHECKSUM_COMPLETE;
508 adapter->hw_csum_good++;
512 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
513 * @adapter: address of board private structure
515 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
518 struct net_device *netdev = adapter->netdev;
519 struct pci_dev *pdev = adapter->pdev;
520 struct e1000_ring *rx_ring = adapter->rx_ring;
521 struct e1000_rx_desc *rx_desc;
522 struct e1000_buffer *buffer_info;
525 unsigned int bufsz = adapter->rx_buffer_len;
527 i = rx_ring->next_to_use;
528 buffer_info = &rx_ring->buffer_info[i];
530 while (cleaned_count--) {
531 skb = buffer_info->skb;
537 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
539 /* Better luck next round */
540 adapter->alloc_rx_buff_failed++;
544 buffer_info->skb = skb;
546 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
547 adapter->rx_buffer_len,
549 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
550 dev_err(&pdev->dev, "RX DMA map failed\n");
551 adapter->rx_dma_failed++;
555 rx_desc = E1000_RX_DESC(*rx_ring, i);
556 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
558 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
560 * Force memory writes to complete before letting h/w
561 * know there are new descriptors to fetch. (Only
562 * applicable for weak-ordered memory model archs,
566 writel(i, adapter->hw.hw_addr + rx_ring->tail);
569 if (i == rx_ring->count)
571 buffer_info = &rx_ring->buffer_info[i];
574 rx_ring->next_to_use = i;
578 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
579 * @adapter: address of board private structure
581 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
584 struct net_device *netdev = adapter->netdev;
585 struct pci_dev *pdev = adapter->pdev;
586 union e1000_rx_desc_packet_split *rx_desc;
587 struct e1000_ring *rx_ring = adapter->rx_ring;
588 struct e1000_buffer *buffer_info;
589 struct e1000_ps_page *ps_page;
593 i = rx_ring->next_to_use;
594 buffer_info = &rx_ring->buffer_info[i];
596 while (cleaned_count--) {
597 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
599 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
600 ps_page = &buffer_info->ps_pages[j];
601 if (j >= adapter->rx_ps_pages) {
602 /* all unused desc entries get hw null ptr */
603 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
606 if (!ps_page->page) {
607 ps_page->page = alloc_page(GFP_ATOMIC);
608 if (!ps_page->page) {
609 adapter->alloc_rx_buff_failed++;
612 ps_page->dma = dma_map_page(&pdev->dev,
616 if (dma_mapping_error(&pdev->dev,
618 dev_err(&adapter->pdev->dev,
619 "RX DMA page map failed\n");
620 adapter->rx_dma_failed++;
625 * Refresh the desc even if buffer_addrs
626 * didn't change because each write-back
629 rx_desc->read.buffer_addr[j+1] =
630 cpu_to_le64(ps_page->dma);
633 skb = netdev_alloc_skb_ip_align(netdev,
634 adapter->rx_ps_bsize0);
637 adapter->alloc_rx_buff_failed++;
641 buffer_info->skb = skb;
642 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
643 adapter->rx_ps_bsize0,
645 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
646 dev_err(&pdev->dev, "RX DMA map failed\n");
647 adapter->rx_dma_failed++;
649 dev_kfree_skb_any(skb);
650 buffer_info->skb = NULL;
654 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
656 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
658 * Force memory writes to complete before letting h/w
659 * know there are new descriptors to fetch. (Only
660 * applicable for weak-ordered memory model archs,
664 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
668 if (i == rx_ring->count)
670 buffer_info = &rx_ring->buffer_info[i];
674 rx_ring->next_to_use = i;
678 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
679 * @adapter: address of board private structure
680 * @cleaned_count: number of buffers to allocate this pass
683 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
686 struct net_device *netdev = adapter->netdev;
687 struct pci_dev *pdev = adapter->pdev;
688 struct e1000_rx_desc *rx_desc;
689 struct e1000_ring *rx_ring = adapter->rx_ring;
690 struct e1000_buffer *buffer_info;
693 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
695 i = rx_ring->next_to_use;
696 buffer_info = &rx_ring->buffer_info[i];
698 while (cleaned_count--) {
699 skb = buffer_info->skb;
705 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
706 if (unlikely(!skb)) {
707 /* Better luck next round */
708 adapter->alloc_rx_buff_failed++;
712 buffer_info->skb = skb;
714 /* allocate a new page if necessary */
715 if (!buffer_info->page) {
716 buffer_info->page = alloc_page(GFP_ATOMIC);
717 if (unlikely(!buffer_info->page)) {
718 adapter->alloc_rx_buff_failed++;
723 if (!buffer_info->dma)
724 buffer_info->dma = dma_map_page(&pdev->dev,
725 buffer_info->page, 0,
729 rx_desc = E1000_RX_DESC(*rx_ring, i);
730 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
732 if (unlikely(++i == rx_ring->count))
734 buffer_info = &rx_ring->buffer_info[i];
737 if (likely(rx_ring->next_to_use != i)) {
738 rx_ring->next_to_use = i;
739 if (unlikely(i-- == 0))
740 i = (rx_ring->count - 1);
742 /* Force memory writes to complete before letting h/w
743 * know there are new descriptors to fetch. (Only
744 * applicable for weak-ordered memory model archs,
747 writel(i, adapter->hw.hw_addr + rx_ring->tail);
752 * e1000_clean_rx_irq - Send received data up the network stack; legacy
753 * @adapter: board private structure
755 * the return value indicates whether actual cleaning was done, there
756 * is no guarantee that everything was cleaned
758 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
759 int *work_done, int work_to_do)
761 struct net_device *netdev = adapter->netdev;
762 struct pci_dev *pdev = adapter->pdev;
763 struct e1000_hw *hw = &adapter->hw;
764 struct e1000_ring *rx_ring = adapter->rx_ring;
765 struct e1000_rx_desc *rx_desc, *next_rxd;
766 struct e1000_buffer *buffer_info, *next_buffer;
769 int cleaned_count = 0;
771 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
773 i = rx_ring->next_to_clean;
774 rx_desc = E1000_RX_DESC(*rx_ring, i);
775 buffer_info = &rx_ring->buffer_info[i];
777 while (rx_desc->status & E1000_RXD_STAT_DD) {
781 if (*work_done >= work_to_do)
785 status = rx_desc->status;
786 skb = buffer_info->skb;
787 buffer_info->skb = NULL;
789 prefetch(skb->data - NET_IP_ALIGN);
792 if (i == rx_ring->count)
794 next_rxd = E1000_RX_DESC(*rx_ring, i);
797 next_buffer = &rx_ring->buffer_info[i];
801 dma_unmap_single(&pdev->dev,
803 adapter->rx_buffer_len,
805 buffer_info->dma = 0;
807 length = le16_to_cpu(rx_desc->length);
810 * !EOP means multiple descriptors were used to store a single
811 * packet, if that's the case we need to toss it. In fact, we
812 * need to toss every packet with the EOP bit clear and the
813 * next frame that _does_ have the EOP bit set, as it is by
814 * definition only a frame fragment
816 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
817 adapter->flags2 |= FLAG2_IS_DISCARDING;
819 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
820 /* All receives must fit into a single buffer */
821 e_dbg("Receive packet consumed multiple buffers\n");
823 buffer_info->skb = skb;
824 if (status & E1000_RXD_STAT_EOP)
825 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
829 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
831 buffer_info->skb = skb;
835 /* adjust length to remove Ethernet CRC */
836 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
839 total_rx_bytes += length;
843 * code added for copybreak, this should improve
844 * performance for small packets with large amounts
845 * of reassembly being done in the stack
847 if (length < copybreak) {
848 struct sk_buff *new_skb =
849 netdev_alloc_skb_ip_align(netdev, length);
851 skb_copy_to_linear_data_offset(new_skb,
857 /* save the skb in buffer_info as good */
858 buffer_info->skb = skb;
861 /* else just continue with the old one */
863 /* end copybreak code */
864 skb_put(skb, length);
866 /* Receive Checksum Offload */
867 e1000_rx_checksum(adapter,
869 ((u32)(rx_desc->errors) << 24),
870 le16_to_cpu(rx_desc->csum), skb);
872 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
877 /* return some buffers to hardware, one at a time is too slow */
878 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
879 adapter->alloc_rx_buf(adapter, cleaned_count);
883 /* use prefetched values */
885 buffer_info = next_buffer;
887 rx_ring->next_to_clean = i;
889 cleaned_count = e1000_desc_unused(rx_ring);
891 adapter->alloc_rx_buf(adapter, cleaned_count);
893 adapter->total_rx_bytes += total_rx_bytes;
894 adapter->total_rx_packets += total_rx_packets;
895 netdev->stats.rx_bytes += total_rx_bytes;
896 netdev->stats.rx_packets += total_rx_packets;
900 static void e1000_put_txbuf(struct e1000_adapter *adapter,
901 struct e1000_buffer *buffer_info)
903 if (buffer_info->dma) {
904 if (buffer_info->mapped_as_page)
905 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
906 buffer_info->length, DMA_TO_DEVICE);
908 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
909 buffer_info->length, DMA_TO_DEVICE);
910 buffer_info->dma = 0;
912 if (buffer_info->skb) {
913 dev_kfree_skb_any(buffer_info->skb);
914 buffer_info->skb = NULL;
916 buffer_info->time_stamp = 0;
919 static void e1000_print_hw_hang(struct work_struct *work)
921 struct e1000_adapter *adapter = container_of(work,
922 struct e1000_adapter,
924 struct e1000_ring *tx_ring = adapter->tx_ring;
925 unsigned int i = tx_ring->next_to_clean;
926 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
927 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
928 struct e1000_hw *hw = &adapter->hw;
929 u16 phy_status, phy_1000t_status, phy_ext_status;
932 e1e_rphy(hw, PHY_STATUS, &phy_status);
933 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
934 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
936 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
938 /* detected Hardware unit hang */
939 e_err("Detected Hardware Unit Hang:\n"
942 " next_to_use <%x>\n"
943 " next_to_clean <%x>\n"
944 "buffer_info[next_to_clean]:\n"
945 " time_stamp <%lx>\n"
946 " next_to_watch <%x>\n"
948 " next_to_watch.status <%x>\n"
951 "PHY 1000BASE-T Status <%x>\n"
952 "PHY Extended Status <%x>\n"
954 readl(adapter->hw.hw_addr + tx_ring->head),
955 readl(adapter->hw.hw_addr + tx_ring->tail),
956 tx_ring->next_to_use,
957 tx_ring->next_to_clean,
958 tx_ring->buffer_info[eop].time_stamp,
961 eop_desc->upper.fields.status,
970 * e1000_clean_tx_irq - Reclaim resources after transmit completes
971 * @adapter: board private structure
973 * the return value indicates whether actual cleaning was done, there
974 * is no guarantee that everything was cleaned
976 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
978 struct net_device *netdev = adapter->netdev;
979 struct e1000_hw *hw = &adapter->hw;
980 struct e1000_ring *tx_ring = adapter->tx_ring;
981 struct e1000_tx_desc *tx_desc, *eop_desc;
982 struct e1000_buffer *buffer_info;
984 unsigned int count = 0;
985 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
987 i = tx_ring->next_to_clean;
988 eop = tx_ring->buffer_info[i].next_to_watch;
989 eop_desc = E1000_TX_DESC(*tx_ring, eop);
991 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
992 (count < tx_ring->count)) {
993 bool cleaned = false;
994 for (; !cleaned; count++) {
995 tx_desc = E1000_TX_DESC(*tx_ring, i);
996 buffer_info = &tx_ring->buffer_info[i];
997 cleaned = (i == eop);
1000 total_tx_packets += buffer_info->segs;
1001 total_tx_bytes += buffer_info->bytecount;
1004 e1000_put_txbuf(adapter, buffer_info);
1005 tx_desc->upper.data = 0;
1008 if (i == tx_ring->count)
1012 if (i == tx_ring->next_to_use)
1014 eop = tx_ring->buffer_info[i].next_to_watch;
1015 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1018 tx_ring->next_to_clean = i;
1020 #define TX_WAKE_THRESHOLD 32
1021 if (count && netif_carrier_ok(netdev) &&
1022 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1023 /* Make sure that anybody stopping the queue after this
1024 * sees the new next_to_clean.
1028 if (netif_queue_stopped(netdev) &&
1029 !(test_bit(__E1000_DOWN, &adapter->state))) {
1030 netif_wake_queue(netdev);
1031 ++adapter->restart_queue;
1035 if (adapter->detect_tx_hung) {
1037 * Detect a transmit hang in hardware, this serializes the
1038 * check with the clearing of time_stamp and movement of i
1040 adapter->detect_tx_hung = 0;
1041 if (tx_ring->buffer_info[i].time_stamp &&
1042 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1043 + (adapter->tx_timeout_factor * HZ)) &&
1044 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
1045 schedule_work(&adapter->print_hang_task);
1046 netif_stop_queue(netdev);
1049 adapter->total_tx_bytes += total_tx_bytes;
1050 adapter->total_tx_packets += total_tx_packets;
1051 netdev->stats.tx_bytes += total_tx_bytes;
1052 netdev->stats.tx_packets += total_tx_packets;
1053 return (count < tx_ring->count);
1057 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1058 * @adapter: board private structure
1060 * the return value indicates whether actual cleaning was done, there
1061 * is no guarantee that everything was cleaned
1063 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1064 int *work_done, int work_to_do)
1066 struct e1000_hw *hw = &adapter->hw;
1067 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1068 struct net_device *netdev = adapter->netdev;
1069 struct pci_dev *pdev = adapter->pdev;
1070 struct e1000_ring *rx_ring = adapter->rx_ring;
1071 struct e1000_buffer *buffer_info, *next_buffer;
1072 struct e1000_ps_page *ps_page;
1073 struct sk_buff *skb;
1075 u32 length, staterr;
1076 int cleaned_count = 0;
1078 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1080 i = rx_ring->next_to_clean;
1081 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1082 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1083 buffer_info = &rx_ring->buffer_info[i];
1085 while (staterr & E1000_RXD_STAT_DD) {
1086 if (*work_done >= work_to_do)
1089 skb = buffer_info->skb;
1091 /* in the packet split case this is header only */
1092 prefetch(skb->data - NET_IP_ALIGN);
1095 if (i == rx_ring->count)
1097 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1100 next_buffer = &rx_ring->buffer_info[i];
1104 dma_unmap_single(&pdev->dev, buffer_info->dma,
1105 adapter->rx_ps_bsize0,
1107 buffer_info->dma = 0;
1109 /* see !EOP comment in other rx routine */
1110 if (!(staterr & E1000_RXD_STAT_EOP))
1111 adapter->flags2 |= FLAG2_IS_DISCARDING;
1113 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1114 e_dbg("Packet Split buffers didn't pick up the full "
1116 dev_kfree_skb_irq(skb);
1117 if (staterr & E1000_RXD_STAT_EOP)
1118 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1122 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
1123 dev_kfree_skb_irq(skb);
1127 length = le16_to_cpu(rx_desc->wb.middle.length0);
1130 e_dbg("Last part of the packet spanning multiple "
1132 dev_kfree_skb_irq(skb);
1137 skb_put(skb, length);
1141 * this looks ugly, but it seems compiler issues make it
1142 * more efficient than reusing j
1144 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1147 * page alloc/put takes too long and effects small packet
1148 * throughput, so unsplit small packets and save the alloc/put
1149 * only valid in softirq (napi) context to call kmap_*
1151 if (l1 && (l1 <= copybreak) &&
1152 ((length + l1) <= adapter->rx_ps_bsize0)) {
1155 ps_page = &buffer_info->ps_pages[0];
1158 * there is no documentation about how to call
1159 * kmap_atomic, so we can't hold the mapping
1162 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
1163 PAGE_SIZE, DMA_FROM_DEVICE);
1164 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
1165 memcpy(skb_tail_pointer(skb), vaddr, l1);
1166 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1167 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
1168 PAGE_SIZE, DMA_FROM_DEVICE);
1170 /* remove the CRC */
1171 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1179 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1180 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1184 ps_page = &buffer_info->ps_pages[j];
1185 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1188 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1189 ps_page->page = NULL;
1191 skb->data_len += length;
1192 skb->truesize += length;
1195 /* strip the ethernet crc, problem is we're using pages now so
1196 * this whole operation can get a little cpu intensive
1198 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1199 pskb_trim(skb, skb->len - 4);
1202 total_rx_bytes += skb->len;
1205 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1206 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1208 if (rx_desc->wb.upper.header_status &
1209 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1210 adapter->rx_hdr_split++;
1212 e1000_receive_skb(adapter, netdev, skb,
1213 staterr, rx_desc->wb.middle.vlan);
1216 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1217 buffer_info->skb = NULL;
1219 /* return some buffers to hardware, one at a time is too slow */
1220 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1221 adapter->alloc_rx_buf(adapter, cleaned_count);
1225 /* use prefetched values */
1227 buffer_info = next_buffer;
1229 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1231 rx_ring->next_to_clean = i;
1233 cleaned_count = e1000_desc_unused(rx_ring);
1235 adapter->alloc_rx_buf(adapter, cleaned_count);
1237 adapter->total_rx_bytes += total_rx_bytes;
1238 adapter->total_rx_packets += total_rx_packets;
1239 netdev->stats.rx_bytes += total_rx_bytes;
1240 netdev->stats.rx_packets += total_rx_packets;
1245 * e1000_consume_page - helper function
1247 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1252 skb->data_len += length;
1253 skb->truesize += length;
1257 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1258 * @adapter: board private structure
1260 * the return value indicates whether actual cleaning was done, there
1261 * is no guarantee that everything was cleaned
1264 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1265 int *work_done, int work_to_do)
1267 struct net_device *netdev = adapter->netdev;
1268 struct pci_dev *pdev = adapter->pdev;
1269 struct e1000_ring *rx_ring = adapter->rx_ring;
1270 struct e1000_rx_desc *rx_desc, *next_rxd;
1271 struct e1000_buffer *buffer_info, *next_buffer;
1274 int cleaned_count = 0;
1275 bool cleaned = false;
1276 unsigned int total_rx_bytes=0, total_rx_packets=0;
1278 i = rx_ring->next_to_clean;
1279 rx_desc = E1000_RX_DESC(*rx_ring, i);
1280 buffer_info = &rx_ring->buffer_info[i];
1282 while (rx_desc->status & E1000_RXD_STAT_DD) {
1283 struct sk_buff *skb;
1286 if (*work_done >= work_to_do)
1290 status = rx_desc->status;
1291 skb = buffer_info->skb;
1292 buffer_info->skb = NULL;
1295 if (i == rx_ring->count)
1297 next_rxd = E1000_RX_DESC(*rx_ring, i);
1300 next_buffer = &rx_ring->buffer_info[i];
1304 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1306 buffer_info->dma = 0;
1308 length = le16_to_cpu(rx_desc->length);
1310 /* errors is only valid for DD + EOP descriptors */
1311 if (unlikely((status & E1000_RXD_STAT_EOP) &&
1312 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
1313 /* recycle both page and skb */
1314 buffer_info->skb = skb;
1315 /* an error means any chain goes out the window
1317 if (rx_ring->rx_skb_top)
1318 dev_kfree_skb(rx_ring->rx_skb_top);
1319 rx_ring->rx_skb_top = NULL;
1323 #define rxtop rx_ring->rx_skb_top
1324 if (!(status & E1000_RXD_STAT_EOP)) {
1325 /* this descriptor is only the beginning (or middle) */
1327 /* this is the beginning of a chain */
1329 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1332 /* this is the middle of a chain */
1333 skb_fill_page_desc(rxtop,
1334 skb_shinfo(rxtop)->nr_frags,
1335 buffer_info->page, 0, length);
1336 /* re-use the skb, only consumed the page */
1337 buffer_info->skb = skb;
1339 e1000_consume_page(buffer_info, rxtop, length);
1343 /* end of the chain */
1344 skb_fill_page_desc(rxtop,
1345 skb_shinfo(rxtop)->nr_frags,
1346 buffer_info->page, 0, length);
1347 /* re-use the current skb, we only consumed the
1349 buffer_info->skb = skb;
1352 e1000_consume_page(buffer_info, skb, length);
1354 /* no chain, got EOP, this buf is the packet
1355 * copybreak to save the put_page/alloc_page */
1356 if (length <= copybreak &&
1357 skb_tailroom(skb) >= length) {
1359 vaddr = kmap_atomic(buffer_info->page,
1360 KM_SKB_DATA_SOFTIRQ);
1361 memcpy(skb_tail_pointer(skb), vaddr,
1363 kunmap_atomic(vaddr,
1364 KM_SKB_DATA_SOFTIRQ);
1365 /* re-use the page, so don't erase
1366 * buffer_info->page */
1367 skb_put(skb, length);
1369 skb_fill_page_desc(skb, 0,
1370 buffer_info->page, 0,
1372 e1000_consume_page(buffer_info, skb,
1378 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1379 e1000_rx_checksum(adapter,
1381 ((u32)(rx_desc->errors) << 24),
1382 le16_to_cpu(rx_desc->csum), skb);
1384 /* probably a little skewed due to removing CRC */
1385 total_rx_bytes += skb->len;
1388 /* eth type trans needs skb->data to point to something */
1389 if (!pskb_may_pull(skb, ETH_HLEN)) {
1390 e_err("pskb_may_pull failed.\n");
1395 e1000_receive_skb(adapter, netdev, skb, status,
1399 rx_desc->status = 0;
1401 /* return some buffers to hardware, one at a time is too slow */
1402 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1403 adapter->alloc_rx_buf(adapter, cleaned_count);
1407 /* use prefetched values */
1409 buffer_info = next_buffer;
1411 rx_ring->next_to_clean = i;
1413 cleaned_count = e1000_desc_unused(rx_ring);
1415 adapter->alloc_rx_buf(adapter, cleaned_count);
1417 adapter->total_rx_bytes += total_rx_bytes;
1418 adapter->total_rx_packets += total_rx_packets;
1419 netdev->stats.rx_bytes += total_rx_bytes;
1420 netdev->stats.rx_packets += total_rx_packets;
1425 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1426 * @adapter: board private structure
1428 static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1430 struct e1000_ring *rx_ring = adapter->rx_ring;
1431 struct e1000_buffer *buffer_info;
1432 struct e1000_ps_page *ps_page;
1433 struct pci_dev *pdev = adapter->pdev;
1436 /* Free all the Rx ring sk_buffs */
1437 for (i = 0; i < rx_ring->count; i++) {
1438 buffer_info = &rx_ring->buffer_info[i];
1439 if (buffer_info->dma) {
1440 if (adapter->clean_rx == e1000_clean_rx_irq)
1441 dma_unmap_single(&pdev->dev, buffer_info->dma,
1442 adapter->rx_buffer_len,
1444 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1445 dma_unmap_page(&pdev->dev, buffer_info->dma,
1448 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1449 dma_unmap_single(&pdev->dev, buffer_info->dma,
1450 adapter->rx_ps_bsize0,
1452 buffer_info->dma = 0;
1455 if (buffer_info->page) {
1456 put_page(buffer_info->page);
1457 buffer_info->page = NULL;
1460 if (buffer_info->skb) {
1461 dev_kfree_skb(buffer_info->skb);
1462 buffer_info->skb = NULL;
1465 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1466 ps_page = &buffer_info->ps_pages[j];
1469 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1472 put_page(ps_page->page);
1473 ps_page->page = NULL;
1477 /* there also may be some cached data from a chained receive */
1478 if (rx_ring->rx_skb_top) {
1479 dev_kfree_skb(rx_ring->rx_skb_top);
1480 rx_ring->rx_skb_top = NULL;
1483 /* Zero out the descriptor ring */
1484 memset(rx_ring->desc, 0, rx_ring->size);
1486 rx_ring->next_to_clean = 0;
1487 rx_ring->next_to_use = 0;
1488 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1490 writel(0, adapter->hw.hw_addr + rx_ring->head);
1491 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1494 static void e1000e_downshift_workaround(struct work_struct *work)
1496 struct e1000_adapter *adapter = container_of(work,
1497 struct e1000_adapter, downshift_task);
1499 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1503 * e1000_intr_msi - Interrupt Handler
1504 * @irq: interrupt number
1505 * @data: pointer to a network interface device structure
1507 static irqreturn_t e1000_intr_msi(int irq, void *data)
1509 struct net_device *netdev = data;
1510 struct e1000_adapter *adapter = netdev_priv(netdev);
1511 struct e1000_hw *hw = &adapter->hw;
1512 u32 icr = er32(ICR);
1515 * read ICR disables interrupts using IAM
1518 if (icr & E1000_ICR_LSC) {
1519 hw->mac.get_link_status = 1;
1521 * ICH8 workaround-- Call gig speed drop workaround on cable
1522 * disconnect (LSC) before accessing any PHY registers
1524 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1525 (!(er32(STATUS) & E1000_STATUS_LU)))
1526 schedule_work(&adapter->downshift_task);
1529 * 80003ES2LAN workaround-- For packet buffer work-around on
1530 * link down event; disable receives here in the ISR and reset
1531 * adapter in watchdog
1533 if (netif_carrier_ok(netdev) &&
1534 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1535 /* disable receives */
1536 u32 rctl = er32(RCTL);
1537 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1538 adapter->flags |= FLAG_RX_RESTART_NOW;
1540 /* guard against interrupt when we're going down */
1541 if (!test_bit(__E1000_DOWN, &adapter->state))
1542 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1545 if (napi_schedule_prep(&adapter->napi)) {
1546 adapter->total_tx_bytes = 0;
1547 adapter->total_tx_packets = 0;
1548 adapter->total_rx_bytes = 0;
1549 adapter->total_rx_packets = 0;
1550 __napi_schedule(&adapter->napi);
1557 * e1000_intr - Interrupt Handler
1558 * @irq: interrupt number
1559 * @data: pointer to a network interface device structure
1561 static irqreturn_t e1000_intr(int irq, void *data)
1563 struct net_device *netdev = data;
1564 struct e1000_adapter *adapter = netdev_priv(netdev);
1565 struct e1000_hw *hw = &adapter->hw;
1566 u32 rctl, icr = er32(ICR);
1568 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1569 return IRQ_NONE; /* Not our interrupt */
1572 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1573 * not set, then the adapter didn't send an interrupt
1575 if (!(icr & E1000_ICR_INT_ASSERTED))
1579 * Interrupt Auto-Mask...upon reading ICR,
1580 * interrupts are masked. No need for the
1584 if (icr & E1000_ICR_LSC) {
1585 hw->mac.get_link_status = 1;
1587 * ICH8 workaround-- Call gig speed drop workaround on cable
1588 * disconnect (LSC) before accessing any PHY registers
1590 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1591 (!(er32(STATUS) & E1000_STATUS_LU)))
1592 schedule_work(&adapter->downshift_task);
1595 * 80003ES2LAN workaround--
1596 * For packet buffer work-around on link down event;
1597 * disable receives here in the ISR and
1598 * reset adapter in watchdog
1600 if (netif_carrier_ok(netdev) &&
1601 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1602 /* disable receives */
1604 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1605 adapter->flags |= FLAG_RX_RESTART_NOW;
1607 /* guard against interrupt when we're going down */
1608 if (!test_bit(__E1000_DOWN, &adapter->state))
1609 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1612 if (napi_schedule_prep(&adapter->napi)) {
1613 adapter->total_tx_bytes = 0;
1614 adapter->total_tx_packets = 0;
1615 adapter->total_rx_bytes = 0;
1616 adapter->total_rx_packets = 0;
1617 __napi_schedule(&adapter->napi);
1623 static irqreturn_t e1000_msix_other(int irq, void *data)
1625 struct net_device *netdev = data;
1626 struct e1000_adapter *adapter = netdev_priv(netdev);
1627 struct e1000_hw *hw = &adapter->hw;
1628 u32 icr = er32(ICR);
1630 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1631 if (!test_bit(__E1000_DOWN, &adapter->state))
1632 ew32(IMS, E1000_IMS_OTHER);
1636 if (icr & adapter->eiac_mask)
1637 ew32(ICS, (icr & adapter->eiac_mask));
1639 if (icr & E1000_ICR_OTHER) {
1640 if (!(icr & E1000_ICR_LSC))
1641 goto no_link_interrupt;
1642 hw->mac.get_link_status = 1;
1643 /* guard against interrupt when we're going down */
1644 if (!test_bit(__E1000_DOWN, &adapter->state))
1645 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1649 if (!test_bit(__E1000_DOWN, &adapter->state))
1650 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1656 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1658 struct net_device *netdev = data;
1659 struct e1000_adapter *adapter = netdev_priv(netdev);
1660 struct e1000_hw *hw = &adapter->hw;
1661 struct e1000_ring *tx_ring = adapter->tx_ring;
1664 adapter->total_tx_bytes = 0;
1665 adapter->total_tx_packets = 0;
1667 if (!e1000_clean_tx_irq(adapter))
1668 /* Ring was not completely cleaned, so fire another interrupt */
1669 ew32(ICS, tx_ring->ims_val);
1674 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1676 struct net_device *netdev = data;
1677 struct e1000_adapter *adapter = netdev_priv(netdev);
1679 /* Write the ITR value calculated at the end of the
1680 * previous interrupt.
1682 if (adapter->rx_ring->set_itr) {
1683 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1684 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1685 adapter->rx_ring->set_itr = 0;
1688 if (napi_schedule_prep(&adapter->napi)) {
1689 adapter->total_rx_bytes = 0;
1690 adapter->total_rx_packets = 0;
1691 __napi_schedule(&adapter->napi);
1697 * e1000_configure_msix - Configure MSI-X hardware
1699 * e1000_configure_msix sets up the hardware to properly
1700 * generate MSI-X interrupts.
1702 static void e1000_configure_msix(struct e1000_adapter *adapter)
1704 struct e1000_hw *hw = &adapter->hw;
1705 struct e1000_ring *rx_ring = adapter->rx_ring;
1706 struct e1000_ring *tx_ring = adapter->tx_ring;
1708 u32 ctrl_ext, ivar = 0;
1710 adapter->eiac_mask = 0;
1712 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1713 if (hw->mac.type == e1000_82574) {
1714 u32 rfctl = er32(RFCTL);
1715 rfctl |= E1000_RFCTL_ACK_DIS;
1719 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1720 /* Configure Rx vector */
1721 rx_ring->ims_val = E1000_IMS_RXQ0;
1722 adapter->eiac_mask |= rx_ring->ims_val;
1723 if (rx_ring->itr_val)
1724 writel(1000000000 / (rx_ring->itr_val * 256),
1725 hw->hw_addr + rx_ring->itr_register);
1727 writel(1, hw->hw_addr + rx_ring->itr_register);
1728 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1730 /* Configure Tx vector */
1731 tx_ring->ims_val = E1000_IMS_TXQ0;
1733 if (tx_ring->itr_val)
1734 writel(1000000000 / (tx_ring->itr_val * 256),
1735 hw->hw_addr + tx_ring->itr_register);
1737 writel(1, hw->hw_addr + tx_ring->itr_register);
1738 adapter->eiac_mask |= tx_ring->ims_val;
1739 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1741 /* set vector for Other Causes, e.g. link changes */
1743 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1744 if (rx_ring->itr_val)
1745 writel(1000000000 / (rx_ring->itr_val * 256),
1746 hw->hw_addr + E1000_EITR_82574(vector));
1748 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1750 /* Cause Tx interrupts on every write back */
1755 /* enable MSI-X PBA support */
1756 ctrl_ext = er32(CTRL_EXT);
1757 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1759 /* Auto-Mask Other interrupts upon ICR read */
1760 #define E1000_EIAC_MASK_82574 0x01F00000
1761 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1762 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1763 ew32(CTRL_EXT, ctrl_ext);
1767 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1769 if (adapter->msix_entries) {
1770 pci_disable_msix(adapter->pdev);
1771 kfree(adapter->msix_entries);
1772 adapter->msix_entries = NULL;
1773 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1774 pci_disable_msi(adapter->pdev);
1775 adapter->flags &= ~FLAG_MSI_ENABLED;
1780 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1782 * Attempt to configure interrupts using the best available
1783 * capabilities of the hardware and kernel.
1785 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1790 switch (adapter->int_mode) {
1791 case E1000E_INT_MODE_MSIX:
1792 if (adapter->flags & FLAG_HAS_MSIX) {
1793 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1794 adapter->msix_entries = kcalloc(adapter->num_vectors,
1795 sizeof(struct msix_entry),
1797 if (adapter->msix_entries) {
1798 for (i = 0; i < adapter->num_vectors; i++)
1799 adapter->msix_entries[i].entry = i;
1801 err = pci_enable_msix(adapter->pdev,
1802 adapter->msix_entries,
1803 adapter->num_vectors);
1808 /* MSI-X failed, so fall through and try MSI */
1809 e_err("Failed to initialize MSI-X interrupts. "
1810 "Falling back to MSI interrupts.\n");
1811 e1000e_reset_interrupt_capability(adapter);
1813 adapter->int_mode = E1000E_INT_MODE_MSI;
1815 case E1000E_INT_MODE_MSI:
1816 if (!pci_enable_msi(adapter->pdev)) {
1817 adapter->flags |= FLAG_MSI_ENABLED;
1819 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1820 e_err("Failed to initialize MSI interrupts. Falling "
1821 "back to legacy interrupts.\n");
1824 case E1000E_INT_MODE_LEGACY:
1825 /* Don't do anything; this is the system default */
1829 /* store the number of vectors being used */
1830 adapter->num_vectors = 1;
1834 * e1000_request_msix - Initialize MSI-X interrupts
1836 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1839 static int e1000_request_msix(struct e1000_adapter *adapter)
1841 struct net_device *netdev = adapter->netdev;
1842 int err = 0, vector = 0;
1844 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1845 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1847 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1848 err = request_irq(adapter->msix_entries[vector].vector,
1849 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1853 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1854 adapter->rx_ring->itr_val = adapter->itr;
1857 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1858 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1860 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1861 err = request_irq(adapter->msix_entries[vector].vector,
1862 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1866 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1867 adapter->tx_ring->itr_val = adapter->itr;
1870 err = request_irq(adapter->msix_entries[vector].vector,
1871 e1000_msix_other, 0, netdev->name, netdev);
1875 e1000_configure_msix(adapter);
1882 * e1000_request_irq - initialize interrupts
1884 * Attempts to configure interrupts using the best available
1885 * capabilities of the hardware and kernel.
1887 static int e1000_request_irq(struct e1000_adapter *adapter)
1889 struct net_device *netdev = adapter->netdev;
1892 if (adapter->msix_entries) {
1893 err = e1000_request_msix(adapter);
1896 /* fall back to MSI */
1897 e1000e_reset_interrupt_capability(adapter);
1898 adapter->int_mode = E1000E_INT_MODE_MSI;
1899 e1000e_set_interrupt_capability(adapter);
1901 if (adapter->flags & FLAG_MSI_ENABLED) {
1902 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
1903 netdev->name, netdev);
1907 /* fall back to legacy interrupt */
1908 e1000e_reset_interrupt_capability(adapter);
1909 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1912 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
1913 netdev->name, netdev);
1915 e_err("Unable to allocate interrupt, Error: %d\n", err);
1920 static void e1000_free_irq(struct e1000_adapter *adapter)
1922 struct net_device *netdev = adapter->netdev;
1924 if (adapter->msix_entries) {
1927 free_irq(adapter->msix_entries[vector].vector, netdev);
1930 free_irq(adapter->msix_entries[vector].vector, netdev);
1933 /* Other Causes interrupt vector */
1934 free_irq(adapter->msix_entries[vector].vector, netdev);
1938 free_irq(adapter->pdev->irq, netdev);
1942 * e1000_irq_disable - Mask off interrupt generation on the NIC
1944 static void e1000_irq_disable(struct e1000_adapter *adapter)
1946 struct e1000_hw *hw = &adapter->hw;
1949 if (adapter->msix_entries)
1950 ew32(EIAC_82574, 0);
1953 if (adapter->msix_entries) {
1955 for (i = 0; i < adapter->num_vectors; i++)
1956 synchronize_irq(adapter->msix_entries[i].vector);
1958 synchronize_irq(adapter->pdev->irq);
1963 * e1000_irq_enable - Enable default interrupt generation settings
1965 static void e1000_irq_enable(struct e1000_adapter *adapter)
1967 struct e1000_hw *hw = &adapter->hw;
1969 if (adapter->msix_entries) {
1970 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
1971 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
1973 ew32(IMS, IMS_ENABLE_MASK);
1979 * e1000_get_hw_control - get control of the h/w from f/w
1980 * @adapter: address of board private structure
1982 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1983 * For ASF and Pass Through versions of f/w this means that
1984 * the driver is loaded. For AMT version (only with 82573)
1985 * of the f/w this means that the network i/f is open.
1987 static void e1000_get_hw_control(struct e1000_adapter *adapter)
1989 struct e1000_hw *hw = &adapter->hw;
1993 /* Let firmware know the driver has taken over */
1994 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1996 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1997 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1998 ctrl_ext = er32(CTRL_EXT);
1999 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2004 * e1000_release_hw_control - release control of the h/w to f/w
2005 * @adapter: address of board private structure
2007 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2008 * For ASF and Pass Through versions of f/w this means that the
2009 * driver is no longer loaded. For AMT version (only with 82573) i
2010 * of the f/w this means that the network i/f is closed.
2013 static void e1000_release_hw_control(struct e1000_adapter *adapter)
2015 struct e1000_hw *hw = &adapter->hw;
2019 /* Let firmware taken over control of h/w */
2020 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2022 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2023 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2024 ctrl_ext = er32(CTRL_EXT);
2025 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2030 * @e1000_alloc_ring - allocate memory for a ring structure
2032 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2033 struct e1000_ring *ring)
2035 struct pci_dev *pdev = adapter->pdev;
2037 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2046 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2047 * @adapter: board private structure
2049 * Return 0 on success, negative on failure
2051 int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2053 struct e1000_ring *tx_ring = adapter->tx_ring;
2054 int err = -ENOMEM, size;
2056 size = sizeof(struct e1000_buffer) * tx_ring->count;
2057 tx_ring->buffer_info = vmalloc(size);
2058 if (!tx_ring->buffer_info)
2060 memset(tx_ring->buffer_info, 0, size);
2062 /* round up to nearest 4K */
2063 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2064 tx_ring->size = ALIGN(tx_ring->size, 4096);
2066 err = e1000_alloc_ring_dma(adapter, tx_ring);
2070 tx_ring->next_to_use = 0;
2071 tx_ring->next_to_clean = 0;
2075 vfree(tx_ring->buffer_info);
2076 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2081 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2082 * @adapter: board private structure
2084 * Returns 0 on success, negative on failure
2086 int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2088 struct e1000_ring *rx_ring = adapter->rx_ring;
2089 struct e1000_buffer *buffer_info;
2090 int i, size, desc_len, err = -ENOMEM;
2092 size = sizeof(struct e1000_buffer) * rx_ring->count;
2093 rx_ring->buffer_info = vmalloc(size);
2094 if (!rx_ring->buffer_info)
2096 memset(rx_ring->buffer_info, 0, size);
2098 for (i = 0; i < rx_ring->count; i++) {
2099 buffer_info = &rx_ring->buffer_info[i];
2100 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2101 sizeof(struct e1000_ps_page),
2103 if (!buffer_info->ps_pages)
2107 desc_len = sizeof(union e1000_rx_desc_packet_split);
2109 /* Round up to nearest 4K */
2110 rx_ring->size = rx_ring->count * desc_len;
2111 rx_ring->size = ALIGN(rx_ring->size, 4096);
2113 err = e1000_alloc_ring_dma(adapter, rx_ring);
2117 rx_ring->next_to_clean = 0;
2118 rx_ring->next_to_use = 0;
2119 rx_ring->rx_skb_top = NULL;
2124 for (i = 0; i < rx_ring->count; i++) {
2125 buffer_info = &rx_ring->buffer_info[i];
2126 kfree(buffer_info->ps_pages);
2129 vfree(rx_ring->buffer_info);
2130 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2135 * e1000_clean_tx_ring - Free Tx Buffers
2136 * @adapter: board private structure
2138 static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
2140 struct e1000_ring *tx_ring = adapter->tx_ring;
2141 struct e1000_buffer *buffer_info;
2145 for (i = 0; i < tx_ring->count; i++) {
2146 buffer_info = &tx_ring->buffer_info[i];
2147 e1000_put_txbuf(adapter, buffer_info);
2150 size = sizeof(struct e1000_buffer) * tx_ring->count;
2151 memset(tx_ring->buffer_info, 0, size);
2153 memset(tx_ring->desc, 0, tx_ring->size);
2155 tx_ring->next_to_use = 0;
2156 tx_ring->next_to_clean = 0;
2158 writel(0, adapter->hw.hw_addr + tx_ring->head);
2159 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2163 * e1000e_free_tx_resources - Free Tx Resources per Queue
2164 * @adapter: board private structure
2166 * Free all transmit software resources
2168 void e1000e_free_tx_resources(struct e1000_adapter *adapter)
2170 struct pci_dev *pdev = adapter->pdev;
2171 struct e1000_ring *tx_ring = adapter->tx_ring;
2173 e1000_clean_tx_ring(adapter);
2175 vfree(tx_ring->buffer_info);
2176 tx_ring->buffer_info = NULL;
2178 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2180 tx_ring->desc = NULL;
2184 * e1000e_free_rx_resources - Free Rx Resources
2185 * @adapter: board private structure
2187 * Free all receive software resources
2190 void e1000e_free_rx_resources(struct e1000_adapter *adapter)
2192 struct pci_dev *pdev = adapter->pdev;
2193 struct e1000_ring *rx_ring = adapter->rx_ring;
2196 e1000_clean_rx_ring(adapter);
2198 for (i = 0; i < rx_ring->count; i++) {
2199 kfree(rx_ring->buffer_info[i].ps_pages);
2202 vfree(rx_ring->buffer_info);
2203 rx_ring->buffer_info = NULL;
2205 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2207 rx_ring->desc = NULL;
2211 * e1000_update_itr - update the dynamic ITR value based on statistics
2212 * @adapter: pointer to adapter
2213 * @itr_setting: current adapter->itr
2214 * @packets: the number of packets during this measurement interval
2215 * @bytes: the number of bytes during this measurement interval
2217 * Stores a new ITR value based on packets and byte
2218 * counts during the last interrupt. The advantage of per interrupt
2219 * computation is faster updates and more accurate ITR for the current
2220 * traffic pattern. Constants in this function were computed
2221 * based on theoretical maximum wire speed and thresholds were set based
2222 * on testing data as well as attempting to minimize response time
2223 * while increasing bulk throughput. This functionality is controlled
2224 * by the InterruptThrottleRate module parameter.
2226 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2227 u16 itr_setting, int packets,
2230 unsigned int retval = itr_setting;
2233 goto update_itr_done;
2235 switch (itr_setting) {
2236 case lowest_latency:
2237 /* handle TSO and jumbo frames */
2238 if (bytes/packets > 8000)
2239 retval = bulk_latency;
2240 else if ((packets < 5) && (bytes > 512)) {
2241 retval = low_latency;
2244 case low_latency: /* 50 usec aka 20000 ints/s */
2245 if (bytes > 10000) {
2246 /* this if handles the TSO accounting */
2247 if (bytes/packets > 8000) {
2248 retval = bulk_latency;
2249 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2250 retval = bulk_latency;
2251 } else if ((packets > 35)) {
2252 retval = lowest_latency;
2254 } else if (bytes/packets > 2000) {
2255 retval = bulk_latency;
2256 } else if (packets <= 2 && bytes < 512) {
2257 retval = lowest_latency;
2260 case bulk_latency: /* 250 usec aka 4000 ints/s */
2261 if (bytes > 25000) {
2263 retval = low_latency;
2265 } else if (bytes < 6000) {
2266 retval = low_latency;
2275 static void e1000_set_itr(struct e1000_adapter *adapter)
2277 struct e1000_hw *hw = &adapter->hw;
2279 u32 new_itr = adapter->itr;
2281 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2282 if (adapter->link_speed != SPEED_1000) {
2288 adapter->tx_itr = e1000_update_itr(adapter,
2290 adapter->total_tx_packets,
2291 adapter->total_tx_bytes);
2292 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2293 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2294 adapter->tx_itr = low_latency;
2296 adapter->rx_itr = e1000_update_itr(adapter,
2298 adapter->total_rx_packets,
2299 adapter->total_rx_bytes);
2300 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2301 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2302 adapter->rx_itr = low_latency;
2304 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2306 switch (current_itr) {
2307 /* counts and packets in update_itr are dependent on these numbers */
2308 case lowest_latency:
2312 new_itr = 20000; /* aka hwitr = ~200 */
2322 if (new_itr != adapter->itr) {
2324 * this attempts to bias the interrupt rate towards Bulk
2325 * by adding intermediate steps when interrupt rate is
2328 new_itr = new_itr > adapter->itr ?
2329 min(adapter->itr + (new_itr >> 2), new_itr) :
2331 adapter->itr = new_itr;
2332 adapter->rx_ring->itr_val = new_itr;
2333 if (adapter->msix_entries)
2334 adapter->rx_ring->set_itr = 1;
2336 ew32(ITR, 1000000000 / (new_itr * 256));
2341 * e1000_alloc_queues - Allocate memory for all rings
2342 * @adapter: board private structure to initialize
2344 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2346 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2347 if (!adapter->tx_ring)
2350 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2351 if (!adapter->rx_ring)
2356 e_err("Unable to allocate memory for queues\n");
2357 kfree(adapter->rx_ring);
2358 kfree(adapter->tx_ring);
2363 * e1000_clean - NAPI Rx polling callback
2364 * @napi: struct associated with this polling callback
2365 * @budget: amount of packets driver is allowed to process this poll
2367 static int e1000_clean(struct napi_struct *napi, int budget)
2369 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
2370 struct e1000_hw *hw = &adapter->hw;
2371 struct net_device *poll_dev = adapter->netdev;
2372 int tx_cleaned = 1, work_done = 0;
2374 adapter = netdev_priv(poll_dev);
2376 if (adapter->msix_entries &&
2377 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2380 tx_cleaned = e1000_clean_tx_irq(adapter);
2383 adapter->clean_rx(adapter, &work_done, budget);
2388 /* If budget not fully consumed, exit the polling mode */
2389 if (work_done < budget) {
2390 if (adapter->itr_setting & 3)
2391 e1000_set_itr(adapter);
2392 napi_complete(napi);
2393 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2394 if (adapter->msix_entries)
2395 ew32(IMS, adapter->rx_ring->ims_val);
2397 e1000_irq_enable(adapter);
2404 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2406 struct e1000_adapter *adapter = netdev_priv(netdev);
2407 struct e1000_hw *hw = &adapter->hw;
2410 /* don't update vlan cookie if already programmed */
2411 if ((adapter->hw.mng_cookie.status &
2412 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2413 (vid == adapter->mng_vlan_id))
2416 /* add VID to filter table */
2417 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2418 index = (vid >> 5) & 0x7F;
2419 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2420 vfta |= (1 << (vid & 0x1F));
2421 hw->mac.ops.write_vfta(hw, index, vfta);
2425 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2427 struct e1000_adapter *adapter = netdev_priv(netdev);
2428 struct e1000_hw *hw = &adapter->hw;
2431 if (!test_bit(__E1000_DOWN, &adapter->state))
2432 e1000_irq_disable(adapter);
2433 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2435 if (!test_bit(__E1000_DOWN, &adapter->state))
2436 e1000_irq_enable(adapter);
2438 if ((adapter->hw.mng_cookie.status &
2439 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2440 (vid == adapter->mng_vlan_id)) {
2441 /* release control to f/w */
2442 e1000_release_hw_control(adapter);
2446 /* remove VID from filter table */
2447 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2448 index = (vid >> 5) & 0x7F;
2449 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2450 vfta &= ~(1 << (vid & 0x1F));
2451 hw->mac.ops.write_vfta(hw, index, vfta);
2455 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2457 struct net_device *netdev = adapter->netdev;
2458 u16 vid = adapter->hw.mng_cookie.vlan_id;
2459 u16 old_vid = adapter->mng_vlan_id;
2461 if (!adapter->vlgrp)
2464 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
2465 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2466 if (adapter->hw.mng_cookie.status &
2467 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2468 e1000_vlan_rx_add_vid(netdev, vid);
2469 adapter->mng_vlan_id = vid;
2472 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
2474 !vlan_group_get_device(adapter->vlgrp, old_vid))
2475 e1000_vlan_rx_kill_vid(netdev, old_vid);
2477 adapter->mng_vlan_id = vid;
2482 static void e1000_vlan_rx_register(struct net_device *netdev,
2483 struct vlan_group *grp)
2485 struct e1000_adapter *adapter = netdev_priv(netdev);
2486 struct e1000_hw *hw = &adapter->hw;
2489 if (!test_bit(__E1000_DOWN, &adapter->state))
2490 e1000_irq_disable(adapter);
2491 adapter->vlgrp = grp;
2494 /* enable VLAN tag insert/strip */
2496 ctrl |= E1000_CTRL_VME;
2499 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2500 /* enable VLAN receive filtering */
2502 rctl &= ~E1000_RCTL_CFIEN;
2504 e1000_update_mng_vlan(adapter);
2507 /* disable VLAN tag insert/strip */
2509 ctrl &= ~E1000_CTRL_VME;
2512 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2513 if (adapter->mng_vlan_id !=
2514 (u16)E1000_MNG_VLAN_NONE) {
2515 e1000_vlan_rx_kill_vid(netdev,
2516 adapter->mng_vlan_id);
2517 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2522 if (!test_bit(__E1000_DOWN, &adapter->state))
2523 e1000_irq_enable(adapter);
2526 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2530 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2532 if (!adapter->vlgrp)
2535 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2536 if (!vlan_group_get_device(adapter->vlgrp, vid))
2538 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2542 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2544 struct e1000_hw *hw = &adapter->hw;
2545 u32 manc, manc2h, mdef, i, j;
2547 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2553 * enable receiving management packets to the host. this will probably
2554 * generate destination unreachable messages from the host OS, but
2555 * the packets will be handled on SMBUS
2557 manc |= E1000_MANC_EN_MNG2HOST;
2558 manc2h = er32(MANC2H);
2560 switch (hw->mac.type) {
2562 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2567 * Check if IPMI pass-through decision filter already exists;
2570 for (i = 0, j = 0; i < 8; i++) {
2571 mdef = er32(MDEF(i));
2573 /* Ignore filters with anything other than IPMI ports */
2574 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2577 /* Enable this decision filter in MANC2H */
2584 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2587 /* Create new decision filter in an empty filter */
2588 for (i = 0, j = 0; i < 8; i++)
2589 if (er32(MDEF(i)) == 0) {
2590 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2591 E1000_MDEF_PORT_664));
2598 e_warn("Unable to create IPMI pass-through filter\n");
2602 ew32(MANC2H, manc2h);
2607 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
2608 * @adapter: board private structure
2610 * Configure the Tx unit of the MAC after a reset.
2612 static void e1000_configure_tx(struct e1000_adapter *adapter)
2614 struct e1000_hw *hw = &adapter->hw;
2615 struct e1000_ring *tx_ring = adapter->tx_ring;
2617 u32 tdlen, tctl, tipg, tarc;
2620 /* Setup the HW Tx Head and Tail descriptor pointers */
2621 tdba = tx_ring->dma;
2622 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2623 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
2624 ew32(TDBAH, (tdba >> 32));
2628 tx_ring->head = E1000_TDH;
2629 tx_ring->tail = E1000_TDT;
2631 /* Set the default values for the Tx Inter Packet Gap timer */
2632 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2633 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2634 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2636 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2637 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2639 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2640 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2643 /* Set the Tx Interrupt Delay register */
2644 ew32(TIDV, adapter->tx_int_delay);
2645 /* Tx irq moderation */
2646 ew32(TADV, adapter->tx_abs_int_delay);
2648 /* Program the Transmit Control Register */
2650 tctl &= ~E1000_TCTL_CT;
2651 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2652 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2654 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2655 tarc = er32(TARC(0));
2657 * set the speed mode bit, we'll clear it if we're not at
2658 * gigabit link later
2660 #define SPEED_MODE_BIT (1 << 21)
2661 tarc |= SPEED_MODE_BIT;
2662 ew32(TARC(0), tarc);
2665 /* errata: program both queues to unweighted RR */
2666 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2667 tarc = er32(TARC(0));
2669 ew32(TARC(0), tarc);
2670 tarc = er32(TARC(1));
2672 ew32(TARC(1), tarc);
2675 /* Setup Transmit Descriptor Settings for eop descriptor */
2676 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2678 /* only set IDE if we are delaying interrupts using the timers */
2679 if (adapter->tx_int_delay)
2680 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2682 /* enable Report Status bit */
2683 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2687 e1000e_config_collision_dist(hw);
2691 * e1000_setup_rctl - configure the receive control registers
2692 * @adapter: Board private structure
2694 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2695 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2696 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2698 struct e1000_hw *hw = &adapter->hw;
2703 /* Program MC offset vector base */
2705 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2706 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2707 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2708 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2710 /* Do not Store bad packets */
2711 rctl &= ~E1000_RCTL_SBP;
2713 /* Enable Long Packet receive */
2714 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2715 rctl &= ~E1000_RCTL_LPE;
2717 rctl |= E1000_RCTL_LPE;
2719 /* Some systems expect that the CRC is included in SMBUS traffic. The
2720 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2721 * host memory when this is enabled
2723 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2724 rctl |= E1000_RCTL_SECRC;
2726 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2727 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2730 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2732 phy_data |= (1 << 2);
2733 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2735 e1e_rphy(hw, 22, &phy_data);
2737 phy_data |= (1 << 14);
2738 e1e_wphy(hw, 0x10, 0x2823);
2739 e1e_wphy(hw, 0x11, 0x0003);
2740 e1e_wphy(hw, 22, phy_data);
2743 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2744 if (hw->mac.type == e1000_pch2lan) {
2747 if (rctl & E1000_RCTL_LPE)
2748 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2750 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2753 /* Setup buffer sizes */
2754 rctl &= ~E1000_RCTL_SZ_4096;
2755 rctl |= E1000_RCTL_BSEX;
2756 switch (adapter->rx_buffer_len) {
2759 rctl |= E1000_RCTL_SZ_2048;
2760 rctl &= ~E1000_RCTL_BSEX;
2763 rctl |= E1000_RCTL_SZ_4096;
2766 rctl |= E1000_RCTL_SZ_8192;
2769 rctl |= E1000_RCTL_SZ_16384;
2774 * 82571 and greater support packet-split where the protocol
2775 * header is placed in skb->data and the packet data is
2776 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2777 * In the case of a non-split, skb->data is linearly filled,
2778 * followed by the page buffers. Therefore, skb->data is
2779 * sized to hold the largest protocol header.
2781 * allocations using alloc_page take too long for regular MTU
2782 * so only enable packet split for jumbo frames
2784 * Using pages when the page size is greater than 16k wastes
2785 * a lot of memory, since we allocate 3 pages at all times
2788 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2789 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
2790 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2791 adapter->rx_ps_pages = pages;
2793 adapter->rx_ps_pages = 0;
2795 if (adapter->rx_ps_pages) {
2796 /* Configure extra packet-split registers */
2797 rfctl = er32(RFCTL);
2798 rfctl |= E1000_RFCTL_EXTEN;
2800 * disable packet split support for IPv6 extension headers,
2801 * because some malformed IPv6 headers can hang the Rx
2803 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2804 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2808 /* Enable Packet split descriptors */
2809 rctl |= E1000_RCTL_DTYP_PS;
2811 psrctl |= adapter->rx_ps_bsize0 >>
2812 E1000_PSRCTL_BSIZE0_SHIFT;
2814 switch (adapter->rx_ps_pages) {
2816 psrctl |= PAGE_SIZE <<
2817 E1000_PSRCTL_BSIZE3_SHIFT;
2819 psrctl |= PAGE_SIZE <<
2820 E1000_PSRCTL_BSIZE2_SHIFT;
2822 psrctl |= PAGE_SIZE >>
2823 E1000_PSRCTL_BSIZE1_SHIFT;
2827 ew32(PSRCTL, psrctl);
2831 /* just started the receive unit, no need to restart */
2832 adapter->flags &= ~FLAG_RX_RESTART_NOW;
2836 * e1000_configure_rx - Configure Receive Unit after Reset
2837 * @adapter: board private structure
2839 * Configure the Rx unit of the MAC after a reset.
2841 static void e1000_configure_rx(struct e1000_adapter *adapter)
2843 struct e1000_hw *hw = &adapter->hw;
2844 struct e1000_ring *rx_ring = adapter->rx_ring;
2846 u32 rdlen, rctl, rxcsum, ctrl_ext;
2848 if (adapter->rx_ps_pages) {
2849 /* this is a 32 byte descriptor */
2850 rdlen = rx_ring->count *
2851 sizeof(union e1000_rx_desc_packet_split);
2852 adapter->clean_rx = e1000_clean_rx_irq_ps;
2853 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2854 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2855 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2856 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2857 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
2859 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2860 adapter->clean_rx = e1000_clean_rx_irq;
2861 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2864 /* disable receives while setting up the descriptors */
2866 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2870 /* set the Receive Delay Timer Register */
2871 ew32(RDTR, adapter->rx_int_delay);
2873 /* irq moderation */
2874 ew32(RADV, adapter->rx_abs_int_delay);
2875 if (adapter->itr_setting != 0)
2876 ew32(ITR, 1000000000 / (adapter->itr * 256));
2878 ctrl_ext = er32(CTRL_EXT);
2879 /* Auto-Mask interrupts upon ICR access */
2880 ctrl_ext |= E1000_CTRL_EXT_IAME;
2881 ew32(IAM, 0xffffffff);
2882 ew32(CTRL_EXT, ctrl_ext);
2886 * Setup the HW Rx Head and Tail Descriptor Pointers and
2887 * the Base and Length of the Rx Descriptor Ring
2889 rdba = rx_ring->dma;
2890 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
2891 ew32(RDBAH, (rdba >> 32));
2895 rx_ring->head = E1000_RDH;
2896 rx_ring->tail = E1000_RDT;
2898 /* Enable Receive Checksum Offload for TCP and UDP */
2899 rxcsum = er32(RXCSUM);
2900 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2901 rxcsum |= E1000_RXCSUM_TUOFL;
2904 * IPv4 payload checksum for UDP fragments must be
2905 * used in conjunction with packet-split.
2907 if (adapter->rx_ps_pages)
2908 rxcsum |= E1000_RXCSUM_IPPCSE;
2910 rxcsum &= ~E1000_RXCSUM_TUOFL;
2911 /* no need to clear IPPCSE as it defaults to 0 */
2913 ew32(RXCSUM, rxcsum);
2916 * Enable early receives on supported devices, only takes effect when
2917 * packet size is equal or larger than the specified value (in 8 byte
2918 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2920 if (adapter->flags & FLAG_HAS_ERT) {
2921 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2922 u32 rxdctl = er32(RXDCTL(0));
2923 ew32(RXDCTL(0), rxdctl | 0x3);
2924 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2926 * With jumbo frames and early-receive enabled,
2927 * excessive C-state transition latencies result in
2928 * dropped transactions.
2930 pm_qos_update_request(
2931 &adapter->netdev->pm_qos_req, 55);
2933 pm_qos_update_request(
2934 &adapter->netdev->pm_qos_req,
2935 PM_QOS_DEFAULT_VALUE);
2939 /* Enable Receives */
2944 * e1000_update_mc_addr_list - Update Multicast addresses
2945 * @hw: pointer to the HW structure
2946 * @mc_addr_list: array of multicast addresses to program
2947 * @mc_addr_count: number of multicast addresses to program
2949 * Updates the Multicast Table Array.
2950 * The caller must have a packed mc_addr_list of multicast addresses.
2952 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2955 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
2959 * e1000_set_multi - Multicast and Promiscuous mode set
2960 * @netdev: network interface device structure
2962 * The set_multi entry point is called whenever the multicast address
2963 * list or the network interface flags are updated. This routine is
2964 * responsible for configuring the hardware for proper multicast,
2965 * promiscuous mode, and all-multi behavior.
2967 static void e1000_set_multi(struct net_device *netdev)
2969 struct e1000_adapter *adapter = netdev_priv(netdev);
2970 struct e1000_hw *hw = &adapter->hw;
2971 struct netdev_hw_addr *ha;
2976 /* Check for Promiscuous and All Multicast modes */
2980 if (netdev->flags & IFF_PROMISC) {
2981 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2982 rctl &= ~E1000_RCTL_VFE;
2984 if (netdev->flags & IFF_ALLMULTI) {
2985 rctl |= E1000_RCTL_MPE;
2986 rctl &= ~E1000_RCTL_UPE;
2988 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2990 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
2991 rctl |= E1000_RCTL_VFE;
2996 if (!netdev_mc_empty(netdev)) {
2997 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3001 /* prepare a packed array of only addresses. */
3003 netdev_for_each_mc_addr(ha, netdev)
3004 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3006 e1000_update_mc_addr_list(hw, mta_list, i);
3010 * if we're called from probe, we might not have
3011 * anything to do here, so clear out the list
3013 e1000_update_mc_addr_list(hw, NULL, 0);
3018 * e1000_configure - configure the hardware for Rx and Tx
3019 * @adapter: private board structure
3021 static void e1000_configure(struct e1000_adapter *adapter)
3023 e1000_set_multi(adapter->netdev);
3025 e1000_restore_vlan(adapter);
3026 e1000_init_manageability_pt(adapter);
3028 e1000_configure_tx(adapter);
3029 e1000_setup_rctl(adapter);
3030 e1000_configure_rx(adapter);
3031 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
3035 * e1000e_power_up_phy - restore link in case the phy was powered down
3036 * @adapter: address of board private structure
3038 * The phy may be powered down to save power and turn off link when the
3039 * driver is unloaded and wake on lan is not enabled (among others)
3040 * *** this routine MUST be followed by a call to e1000e_reset ***
3042 void e1000e_power_up_phy(struct e1000_adapter *adapter)
3044 if (adapter->hw.phy.ops.power_up)
3045 adapter->hw.phy.ops.power_up(&adapter->hw);
3047 adapter->hw.mac.ops.setup_link(&adapter->hw);
3051 * e1000_power_down_phy - Power down the PHY
3053 * Power down the PHY so no link is implied when interface is down.
3054 * The PHY cannot be powered down if management or WoL is active.
3056 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3058 /* WoL is enabled */
3062 if (adapter->hw.phy.ops.power_down)
3063 adapter->hw.phy.ops.power_down(&adapter->hw);
3067 * e1000e_reset - bring the hardware into a known good state
3069 * This function boots the hardware and enables some settings that
3070 * require a configuration cycle of the hardware - those cannot be
3071 * set/changed during runtime. After reset the device needs to be
3072 * properly configured for Rx, Tx etc.
3074 void e1000e_reset(struct e1000_adapter *adapter)
3076 struct e1000_mac_info *mac = &adapter->hw.mac;
3077 struct e1000_fc_info *fc = &adapter->hw.fc;
3078 struct e1000_hw *hw = &adapter->hw;
3079 u32 tx_space, min_tx_space, min_rx_space;
3080 u32 pba = adapter->pba;
3083 /* reset Packet Buffer Allocation to default */
3086 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3088 * To maintain wire speed transmits, the Tx FIFO should be
3089 * large enough to accommodate two full transmit packets,
3090 * rounded up to the next 1KB and expressed in KB. Likewise,
3091 * the Rx FIFO should be large enough to accommodate at least
3092 * one full receive packet and is similarly rounded up and
3096 /* upper 16 bits has Tx packet buffer allocation size in KB */
3097 tx_space = pba >> 16;
3098 /* lower 16 bits has Rx packet buffer allocation size in KB */
3101 * the Tx fifo also stores 16 bytes of information about the tx
3102 * but don't include ethernet FCS because hardware appends it
3104 min_tx_space = (adapter->max_frame_size +
3105 sizeof(struct e1000_tx_desc) -
3107 min_tx_space = ALIGN(min_tx_space, 1024);
3108 min_tx_space >>= 10;
3109 /* software strips receive CRC, so leave room for it */
3110 min_rx_space = adapter->max_frame_size;
3111 min_rx_space = ALIGN(min_rx_space, 1024);
3112 min_rx_space >>= 10;
3115 * If current Tx allocation is less than the min Tx FIFO size,
3116 * and the min Tx FIFO size is less than the current Rx FIFO
3117 * allocation, take space away from current Rx allocation
3119 if ((tx_space < min_tx_space) &&
3120 ((min_tx_space - tx_space) < pba)) {
3121 pba -= min_tx_space - tx_space;
3124 * if short on Rx space, Rx wins and must trump tx
3125 * adjustment or use Early Receive if available
3127 if ((pba < min_rx_space) &&
3128 (!(adapter->flags & FLAG_HAS_ERT)))
3129 /* ERT enabled in e1000_configure_rx */
3138 * flow control settings
3140 * The high water mark must be low enough to fit one full frame
3141 * (or the size used for early receive) above it in the Rx FIFO.
3142 * Set it to the lower of:
3143 * - 90% of the Rx FIFO size, and
3144 * - the full Rx FIFO size minus the early receive size (for parts
3145 * with ERT support assuming ERT set to E1000_ERT_2048), or
3146 * - the full Rx FIFO size minus one full frame
3148 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3149 fc->pause_time = 0xFFFF;
3151 fc->pause_time = E1000_FC_PAUSE_TIME;
3153 fc->current_mode = fc->requested_mode;
3155 switch (hw->mac.type) {
3157 if ((adapter->flags & FLAG_HAS_ERT) &&
3158 (adapter->netdev->mtu > ETH_DATA_LEN))
3159 hwm = min(((pba << 10) * 9 / 10),
3160 ((pba << 10) - (E1000_ERT_2048 << 3)));
3162 hwm = min(((pba << 10) * 9 / 10),
3163 ((pba << 10) - adapter->max_frame_size));
3165 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3166 fc->low_water = fc->high_water - 8;
3170 * Workaround PCH LOM adapter hangs with certain network
3171 * loads. If hangs persist, try disabling Tx flow control.
3173 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3174 fc->high_water = 0x3500;
3175 fc->low_water = 0x1500;
3177 fc->high_water = 0x5000;
3178 fc->low_water = 0x3000;
3180 fc->refresh_time = 0x1000;
3183 fc->high_water = 0x05C20;
3184 fc->low_water = 0x05048;
3185 fc->pause_time = 0x0650;
3186 fc->refresh_time = 0x0400;
3190 /* Allow time for pending master requests to run */
3191 mac->ops.reset_hw(hw);
3194 * For parts with AMT enabled, let the firmware know
3195 * that the network interface is in control
3197 if (adapter->flags & FLAG_HAS_AMT)
3198 e1000_get_hw_control(adapter);
3202 if (mac->ops.init_hw(hw))
3203 e_err("Hardware Error\n");
3205 e1000_update_mng_vlan(adapter);
3207 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3208 ew32(VET, ETH_P_8021Q);
3210 e1000e_reset_adaptive(hw);
3211 e1000_get_phy_info(hw);
3213 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3214 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3217 * speed up time to link by disabling smart power down, ignore
3218 * the return value of this function because there is nothing
3219 * different we would do if it failed
3221 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3222 phy_data &= ~IGP02E1000_PM_SPD;
3223 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3227 int e1000e_up(struct e1000_adapter *adapter)
3229 struct e1000_hw *hw = &adapter->hw;
3231 /* hardware has been reset, we need to reload some things */
3232 e1000_configure(adapter);
3234 clear_bit(__E1000_DOWN, &adapter->state);
3236 napi_enable(&adapter->napi);
3237 if (adapter->msix_entries)
3238 e1000_configure_msix(adapter);
3239 e1000_irq_enable(adapter);
3241 netif_wake_queue(adapter->netdev);
3243 /* fire a link change interrupt to start the watchdog */
3244 if (adapter->msix_entries)
3245 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3247 ew32(ICS, E1000_ICS_LSC);
3252 void e1000e_down(struct e1000_adapter *adapter)
3254 struct net_device *netdev = adapter->netdev;
3255 struct e1000_hw *hw = &adapter->hw;
3259 * signal that we're down so the interrupt handler does not
3260 * reschedule our watchdog timer
3262 set_bit(__E1000_DOWN, &adapter->state);
3264 /* disable receives in the hardware */
3266 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3267 /* flush and sleep below */
3269 netif_stop_queue(netdev);
3271 /* disable transmits in the hardware */
3273 tctl &= ~E1000_TCTL_EN;
3275 /* flush both disables and wait for them to finish */
3279 napi_disable(&adapter->napi);
3280 e1000_irq_disable(adapter);
3282 del_timer_sync(&adapter->watchdog_timer);
3283 del_timer_sync(&adapter->phy_info_timer);
3285 netif_carrier_off(netdev);
3286 adapter->link_speed = 0;
3287 adapter->link_duplex = 0;
3289 if (!pci_channel_offline(adapter->pdev))
3290 e1000e_reset(adapter);
3291 e1000_clean_tx_ring(adapter);
3292 e1000_clean_rx_ring(adapter);
3295 * TODO: for power management, we could drop the link and
3296 * pci_disable_device here.
3300 void e1000e_reinit_locked(struct e1000_adapter *adapter)
3303 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3305 e1000e_down(adapter);
3307 clear_bit(__E1000_RESETTING, &adapter->state);
3311 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3312 * @adapter: board private structure to initialize
3314 * e1000_sw_init initializes the Adapter private data structure.
3315 * Fields are initialized based on PCI device information and
3316 * OS network device settings (MTU size).
3318 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3320 struct net_device *netdev = adapter->netdev;
3322 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3323 adapter->rx_ps_bsize0 = 128;
3324 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3325 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3327 e1000e_set_interrupt_capability(adapter);
3329 if (e1000_alloc_queues(adapter))
3332 /* Explicitly disable IRQ since the NIC can be in any state. */
3333 e1000_irq_disable(adapter);
3335 set_bit(__E1000_DOWN, &adapter->state);
3340 * e1000_intr_msi_test - Interrupt Handler
3341 * @irq: interrupt number
3342 * @data: pointer to a network interface device structure
3344 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3346 struct net_device *netdev = data;
3347 struct e1000_adapter *adapter = netdev_priv(netdev);
3348 struct e1000_hw *hw = &adapter->hw;
3349 u32 icr = er32(ICR);
3351 e_dbg("icr is %08X\n", icr);
3352 if (icr & E1000_ICR_RXSEQ) {
3353 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3361 * e1000_test_msi_interrupt - Returns 0 for successful test
3362 * @adapter: board private struct
3364 * code flow taken from tg3.c
3366 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3368 struct net_device *netdev = adapter->netdev;
3369 struct e1000_hw *hw = &adapter->hw;
3372 /* poll_enable hasn't been called yet, so don't need disable */
3373 /* clear any pending events */
3376 /* free the real vector and request a test handler */
3377 e1000_free_irq(adapter);
3378 e1000e_reset_interrupt_capability(adapter);
3380 /* Assume that the test fails, if it succeeds then the test
3381 * MSI irq handler will unset this flag */
3382 adapter->flags |= FLAG_MSI_TEST_FAILED;
3384 err = pci_enable_msi(adapter->pdev);
3386 goto msi_test_failed;
3388 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3389 netdev->name, netdev);
3391 pci_disable_msi(adapter->pdev);
3392 goto msi_test_failed;
3397 e1000_irq_enable(adapter);
3399 /* fire an unusual interrupt on the test handler */
3400 ew32(ICS, E1000_ICS_RXSEQ);
3404 e1000_irq_disable(adapter);
3408 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3409 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3411 e_info("MSI interrupt test failed!\n");
3414 free_irq(adapter->pdev->irq, netdev);
3415 pci_disable_msi(adapter->pdev);
3418 goto msi_test_failed;
3420 /* okay so the test worked, restore settings */
3421 e_dbg("MSI interrupt test succeeded!\n");
3423 e1000e_set_interrupt_capability(adapter);
3424 e1000_request_irq(adapter);
3429 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3430 * @adapter: board private struct
3432 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3434 static int e1000_test_msi(struct e1000_adapter *adapter)
3439 if (!(adapter->flags & FLAG_MSI_ENABLED))
3442 /* disable SERR in case the MSI write causes a master abort */
3443 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3444 if (pci_cmd & PCI_COMMAND_SERR)
3445 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3446 pci_cmd & ~PCI_COMMAND_SERR);
3448 err = e1000_test_msi_interrupt(adapter);
3450 /* re-enable SERR */
3451 if (pci_cmd & PCI_COMMAND_SERR) {
3452 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3453 pci_cmd |= PCI_COMMAND_SERR;
3454 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3461 /* EIO means MSI test failed */
3465 /* back to INTx mode */
3466 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
3468 e1000_free_irq(adapter);
3470 err = e1000_request_irq(adapter);
3476 * e1000_open - Called when a network interface is made active
3477 * @netdev: network interface device structure
3479 * Returns 0 on success, negative value on failure
3481 * The open entry point is called when a network interface is made
3482 * active by the system (IFF_UP). At this point all resources needed
3483 * for transmit and receive operations are allocated, the interrupt
3484 * handler is registered with the OS, the watchdog timer is started,
3485 * and the stack is notified that the interface is ready.
3487 static int e1000_open(struct net_device *netdev)
3489 struct e1000_adapter *adapter = netdev_priv(netdev);
3490 struct e1000_hw *hw = &adapter->hw;
3491 struct pci_dev *pdev = adapter->pdev;
3494 /* disallow open during test */
3495 if (test_bit(__E1000_TESTING, &adapter->state))
3498 pm_runtime_get_sync(&pdev->dev);
3500 netif_carrier_off(netdev);
3502 /* allocate transmit descriptors */
3503 err = e1000e_setup_tx_resources(adapter);
3507 /* allocate receive descriptors */
3508 err = e1000e_setup_rx_resources(adapter);
3513 * If AMT is enabled, let the firmware know that the network
3514 * interface is now open and reset the part to a known state.
3516 if (adapter->flags & FLAG_HAS_AMT) {
3517 e1000_get_hw_control(adapter);
3518 e1000e_reset(adapter);
3521 e1000e_power_up_phy(adapter);
3523 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3524 if ((adapter->hw.mng_cookie.status &
3525 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3526 e1000_update_mng_vlan(adapter);
3528 /* DMA latency requirement to workaround early-receive/jumbo issue */
3529 if (adapter->flags & FLAG_HAS_ERT)
3530 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3531 PM_QOS_CPU_DMA_LATENCY,
3532 PM_QOS_DEFAULT_VALUE);
3535 * before we allocate an interrupt, we must be ready to handle it.
3536 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3537 * as soon as we call pci_request_irq, so we have to setup our
3538 * clean_rx handler before we do so.
3540 e1000_configure(adapter);
3542 err = e1000_request_irq(adapter);
3547 * Work around PCIe errata with MSI interrupts causing some chipsets to
3548 * ignore e1000e MSI messages, which means we need to test our MSI
3551 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3552 err = e1000_test_msi(adapter);
3554 e_err("Interrupt allocation failed\n");
3559 /* From here on the code is the same as e1000e_up() */
3560 clear_bit(__E1000_DOWN, &adapter->state);
3562 napi_enable(&adapter->napi);
3564 e1000_irq_enable(adapter);
3566 netif_start_queue(netdev);
3568 adapter->idle_check = true;
3569 pm_runtime_put(&pdev->dev);
3571 /* fire a link status change interrupt to start the watchdog */
3572 if (adapter->msix_entries)
3573 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3575 ew32(ICS, E1000_ICS_LSC);
3580 e1000_release_hw_control(adapter);
3581 e1000_power_down_phy(adapter);
3582 e1000e_free_rx_resources(adapter);
3584 e1000e_free_tx_resources(adapter);
3586 e1000e_reset(adapter);
3587 pm_runtime_put_sync(&pdev->dev);
3593 * e1000_close - Disables a network interface
3594 * @netdev: network interface device structure
3596 * Returns 0, this is not allowed to fail
3598 * The close entry point is called when an interface is de-activated
3599 * by the OS. The hardware is still under the drivers control, but
3600 * needs to be disabled. A global MAC reset is issued to stop the
3601 * hardware, and all transmit and receive resources are freed.
3603 static int e1000_close(struct net_device *netdev)
3605 struct e1000_adapter *adapter = netdev_priv(netdev);
3606 struct pci_dev *pdev = adapter->pdev;
3608 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3610 pm_runtime_get_sync(&pdev->dev);
3612 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3613 e1000e_down(adapter);
3614 e1000_free_irq(adapter);
3616 e1000_power_down_phy(adapter);
3618 e1000e_free_tx_resources(adapter);
3619 e1000e_free_rx_resources(adapter);
3622 * kill manageability vlan ID if supported, but not if a vlan with
3623 * the same ID is registered on the host OS (let 8021q kill it)
3625 if ((adapter->hw.mng_cookie.status &
3626 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3628 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
3629 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3632 * If AMT is enabled, let the firmware know that the network
3633 * interface is now closed
3635 if (adapter->flags & FLAG_HAS_AMT)
3636 e1000_release_hw_control(adapter);
3638 if (adapter->flags & FLAG_HAS_ERT)
3639 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3641 pm_runtime_put_sync(&pdev->dev);
3646 * e1000_set_mac - Change the Ethernet Address of the NIC
3647 * @netdev: network interface device structure
3648 * @p: pointer to an address structure
3650 * Returns 0 on success, negative on failure
3652 static int e1000_set_mac(struct net_device *netdev, void *p)
3654 struct e1000_adapter *adapter = netdev_priv(netdev);
3655 struct sockaddr *addr = p;
3657 if (!is_valid_ether_addr(addr->sa_data))
3658 return -EADDRNOTAVAIL;
3660 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3661 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3663 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3665 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3666 /* activate the work around */
3667 e1000e_set_laa_state_82571(&adapter->hw, 1);
3670 * Hold a copy of the LAA in RAR[14] This is done so that
3671 * between the time RAR[0] gets clobbered and the time it
3672 * gets fixed (in e1000_watchdog), the actual LAA is in one
3673 * of the RARs and no incoming packets directed to this port
3674 * are dropped. Eventually the LAA will be in RAR[0] and
3677 e1000e_rar_set(&adapter->hw,
3678 adapter->hw.mac.addr,
3679 adapter->hw.mac.rar_entry_count - 1);
3686 * e1000e_update_phy_task - work thread to update phy
3687 * @work: pointer to our work struct
3689 * this worker thread exists because we must acquire a
3690 * semaphore to read the phy, which we could msleep while
3691 * waiting for it, and we can't msleep in a timer.
3693 static void e1000e_update_phy_task(struct work_struct *work)
3695 struct e1000_adapter *adapter = container_of(work,
3696 struct e1000_adapter, update_phy_task);
3697 e1000_get_phy_info(&adapter->hw);
3701 * Need to wait a few seconds after link up to get diagnostic information from
3704 static void e1000_update_phy_info(unsigned long data)
3706 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3707 schedule_work(&adapter->update_phy_task);
3711 * e1000e_update_phy_stats - Update the PHY statistics counters
3712 * @adapter: board private structure
3714 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
3716 struct e1000_hw *hw = &adapter->hw;
3720 ret_val = hw->phy.ops.acquire(hw);
3726 #define HV_PHY_STATS_PAGE 778
3728 * A page set is expensive so check if already on desired page.
3729 * If not, set to the page with the PHY status registers.
3731 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
3735 if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
3736 ret_val = e1000e_write_phy_reg_mdic(hw,
3737 IGP01E1000_PHY_PAGE_SELECT,
3738 (HV_PHY_STATS_PAGE <<
3744 /* Read/clear the upper 16-bit registers and read/accumulate lower */
3746 /* Single Collision Count */
3747 e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
3749 ret_val = e1000e_read_phy_reg_mdic(hw,
3750 HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
3753 adapter->stats.scc += phy_data;
3755 /* Excessive Collision Count */
3756 e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
3758 ret_val = e1000e_read_phy_reg_mdic(hw,
3759 HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
3762 adapter->stats.ecol += phy_data;
3764 /* Multiple Collision Count */
3765 e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
3767 ret_val = e1000e_read_phy_reg_mdic(hw,
3768 HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
3771 adapter->stats.mcc += phy_data;
3773 /* Late Collision Count */
3774 e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
3776 ret_val = e1000e_read_phy_reg_mdic(hw,
3778 MAX_PHY_REG_ADDRESS,
3781 adapter->stats.latecol += phy_data;
3783 /* Collision Count - also used for adaptive IFS */
3784 e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
3786 ret_val = e1000e_read_phy_reg_mdic(hw,
3787 HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
3790 hw->mac.collision_delta = phy_data;
3793 e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
3795 ret_val = e1000e_read_phy_reg_mdic(hw,
3796 HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
3799 adapter->stats.dc += phy_data;
3801 /* Transmit with no CRS */
3802 e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
3804 ret_val = e1000e_read_phy_reg_mdic(hw,
3805 HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
3808 adapter->stats.tncrs += phy_data;
3811 hw->phy.ops.release(hw);
3815 * e1000e_update_stats - Update the board statistics counters
3816 * @adapter: board private structure
3818 void e1000e_update_stats(struct e1000_adapter *adapter)
3820 struct net_device *netdev = adapter->netdev;
3821 struct e1000_hw *hw = &adapter->hw;
3822 struct pci_dev *pdev = adapter->pdev;
3825 * Prevent stats update while adapter is being reset, or if the pci
3826 * connection is down.
3828 if (adapter->link_speed == 0)
3830 if (pci_channel_offline(pdev))
3833 adapter->stats.crcerrs += er32(CRCERRS);
3834 adapter->stats.gprc += er32(GPRC);
3835 adapter->stats.gorc += er32(GORCL);
3836 er32(GORCH); /* Clear gorc */
3837 adapter->stats.bprc += er32(BPRC);
3838 adapter->stats.mprc += er32(MPRC);
3839 adapter->stats.roc += er32(ROC);
3841 adapter->stats.mpc += er32(MPC);
3843 /* Half-duplex statistics */
3844 if (adapter->link_duplex == HALF_DUPLEX) {
3845 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
3846 e1000e_update_phy_stats(adapter);
3848 adapter->stats.scc += er32(SCC);
3849 adapter->stats.ecol += er32(ECOL);
3850 adapter->stats.mcc += er32(MCC);
3851 adapter->stats.latecol += er32(LATECOL);
3852 adapter->stats.dc += er32(DC);
3854 hw->mac.collision_delta = er32(COLC);
3856 if ((hw->mac.type != e1000_82574) &&
3857 (hw->mac.type != e1000_82583))
3858 adapter->stats.tncrs += er32(TNCRS);
3860 adapter->stats.colc += hw->mac.collision_delta;
3863 adapter->stats.xonrxc += er32(XONRXC);
3864 adapter->stats.xontxc += er32(XONTXC);
3865 adapter->stats.xoffrxc += er32(XOFFRXC);
3866 adapter->stats.xofftxc += er32(XOFFTXC);
3867 adapter->stats.gptc += er32(GPTC);
3868 adapter->stats.gotc += er32(GOTCL);
3869 er32(GOTCH); /* Clear gotc */
3870 adapter->stats.rnbc += er32(RNBC);
3871 adapter->stats.ruc += er32(RUC);
3873 adapter->stats.mptc += er32(MPTC);
3874 adapter->stats.bptc += er32(BPTC);
3876 /* used for adaptive IFS */
3878 hw->mac.tx_packet_delta = er32(TPT);
3879 adapter->stats.tpt += hw->mac.tx_packet_delta;
3881 adapter->stats.algnerrc += er32(ALGNERRC);
3882 adapter->stats.rxerrc += er32(RXERRC);
3883 adapter->stats.cexterr += er32(CEXTERR);
3884 adapter->stats.tsctc += er32(TSCTC);
3885 adapter->stats.tsctfc += er32(TSCTFC);
3887 /* Fill out the OS statistics structure */
3888 netdev->stats.multicast = adapter->stats.mprc;
3889 netdev->stats.collisions = adapter->stats.colc;
3894 * RLEC on some newer hardware can be incorrect so build
3895 * our own version based on RUC and ROC
3897 netdev->stats.rx_errors = adapter->stats.rxerrc +
3898 adapter->stats.crcerrs + adapter->stats.algnerrc +
3899 adapter->stats.ruc + adapter->stats.roc +
3900 adapter->stats.cexterr;
3901 netdev->stats.rx_length_errors = adapter->stats.ruc +
3903 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3904 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3905 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3908 netdev->stats.tx_errors = adapter->stats.ecol +
3909 adapter->stats.latecol;
3910 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3911 netdev->stats.tx_window_errors = adapter->stats.latecol;
3912 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3914 /* Tx Dropped needs to be maintained elsewhere */
3916 /* Management Stats */
3917 adapter->stats.mgptc += er32(MGTPTC);
3918 adapter->stats.mgprc += er32(MGTPRC);
3919 adapter->stats.mgpdc += er32(MGTPDC);
3923 * e1000_phy_read_status - Update the PHY register status snapshot
3924 * @adapter: board private structure
3926 static void e1000_phy_read_status(struct e1000_adapter *adapter)
3928 struct e1000_hw *hw = &adapter->hw;
3929 struct e1000_phy_regs *phy = &adapter->phy_regs;
3932 if ((er32(STATUS) & E1000_STATUS_LU) &&
3933 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
3934 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
3935 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
3936 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
3937 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
3938 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
3939 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
3940 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
3941 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
3943 e_warn("Error reading PHY register\n");
3946 * Do not read PHY registers if link is not up
3947 * Set values to typical power-on defaults
3949 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
3950 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
3951 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
3953 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
3954 ADVERTISE_ALL | ADVERTISE_CSMA);
3956 phy->expansion = EXPANSION_ENABLENPAGE;
3957 phy->ctrl1000 = ADVERTISE_1000FULL;
3959 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
3963 static void e1000_print_link_info(struct e1000_adapter *adapter)
3965 struct e1000_hw *hw = &adapter->hw;
3966 u32 ctrl = er32(CTRL);
3968 /* Link status message must follow this format for user tools */
3969 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
3970 "Flow Control: %s\n",
3971 adapter->netdev->name,
3972 adapter->link_speed,
3973 (adapter->link_duplex == FULL_DUPLEX) ?
3974 "Full Duplex" : "Half Duplex",
3975 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
3977 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3978 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3981 static bool e1000e_has_link(struct e1000_adapter *adapter)
3983 struct e1000_hw *hw = &adapter->hw;
3984 bool link_active = 0;
3988 * get_link_status is set on LSC (link status) interrupt or
3989 * Rx sequence error interrupt. get_link_status will stay
3990 * false until the check_for_link establishes link
3991 * for copper adapters ONLY
3993 switch (hw->phy.media_type) {
3994 case e1000_media_type_copper:
3995 if (hw->mac.get_link_status) {
3996 ret_val = hw->mac.ops.check_for_link(hw);
3997 link_active = !hw->mac.get_link_status;
4002 case e1000_media_type_fiber:
4003 ret_val = hw->mac.ops.check_for_link(hw);
4004 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4006 case e1000_media_type_internal_serdes:
4007 ret_val = hw->mac.ops.check_for_link(hw);
4008 link_active = adapter->hw.mac.serdes_has_link;
4011 case e1000_media_type_unknown:
4015 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4016 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4017 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4018 e_info("Gigabit has been disabled, downgrading speed\n");
4024 static void e1000e_enable_receives(struct e1000_adapter *adapter)
4026 /* make sure the receive unit is started */
4027 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4028 (adapter->flags & FLAG_RX_RESTART_NOW)) {
4029 struct e1000_hw *hw = &adapter->hw;
4030 u32 rctl = er32(RCTL);
4031 ew32(RCTL, rctl | E1000_RCTL_EN);
4032 adapter->flags &= ~FLAG_RX_RESTART_NOW;
4037 * e1000_watchdog - Timer Call-back
4038 * @data: pointer to adapter cast into an unsigned long
4040 static void e1000_watchdog(unsigned long data)
4042 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4044 /* Do the rest outside of interrupt context */
4045 schedule_work(&adapter->watchdog_task);
4047 /* TODO: make this use queue_delayed_work() */
4050 static void e1000_watchdog_task(struct work_struct *work)
4052 struct e1000_adapter *adapter = container_of(work,
4053 struct e1000_adapter, watchdog_task);
4054 struct net_device *netdev = adapter->netdev;
4055 struct e1000_mac_info *mac = &adapter->hw.mac;
4056 struct e1000_phy_info *phy = &adapter->hw.phy;
4057 struct e1000_ring *tx_ring = adapter->tx_ring;
4058 struct e1000_hw *hw = &adapter->hw;
4062 link = e1000e_has_link(adapter);
4063 if ((netif_carrier_ok(netdev)) && link) {
4064 /* Cancel scheduled suspend requests. */
4065 pm_runtime_resume(netdev->dev.parent);
4067 e1000e_enable_receives(adapter);
4071 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4072 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4073 e1000_update_mng_vlan(adapter);
4076 if (!netif_carrier_ok(netdev)) {
4079 /* Cancel scheduled suspend requests. */
4080 pm_runtime_resume(netdev->dev.parent);
4082 /* update snapshot of PHY registers on LSC */
4083 e1000_phy_read_status(adapter);
4084 mac->ops.get_link_up_info(&adapter->hw,
4085 &adapter->link_speed,
4086 &adapter->link_duplex);
4087 e1000_print_link_info(adapter);
4089 * On supported PHYs, check for duplex mismatch only
4090 * if link has autonegotiated at 10/100 half
4092 if ((hw->phy.type == e1000_phy_igp_3 ||
4093 hw->phy.type == e1000_phy_bm) &&
4094 (hw->mac.autoneg == true) &&
4095 (adapter->link_speed == SPEED_10 ||
4096 adapter->link_speed == SPEED_100) &&
4097 (adapter->link_duplex == HALF_DUPLEX)) {
4100 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4102 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4103 e_info("Autonegotiated half duplex but"
4104 " link partner cannot autoneg. "
4105 " Try forcing full duplex if "
4106 "link gets many collisions.\n");
4109 /* adjust timeout factor according to speed/duplex */
4110 adapter->tx_timeout_factor = 1;
4111 switch (adapter->link_speed) {
4114 adapter->tx_timeout_factor = 16;
4118 adapter->tx_timeout_factor = 10;
4123 * workaround: re-program speed mode bit after
4126 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4129 tarc0 = er32(TARC(0));
4130 tarc0 &= ~SPEED_MODE_BIT;
4131 ew32(TARC(0), tarc0);
4135 * disable TSO for pcie and 10/100 speeds, to avoid
4136 * some hardware issues
4138 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4139 switch (adapter->link_speed) {
4142 e_info("10/100 speed: disabling TSO\n");
4143 netdev->features &= ~NETIF_F_TSO;
4144 netdev->features &= ~NETIF_F_TSO6;
4147 netdev->features |= NETIF_F_TSO;
4148 netdev->features |= NETIF_F_TSO6;
4157 * enable transmits in the hardware, need to do this
4158 * after setting TARC(0)
4161 tctl |= E1000_TCTL_EN;
4165 * Perform any post-link-up configuration before
4166 * reporting link up.
4168 if (phy->ops.cfg_on_link_up)
4169 phy->ops.cfg_on_link_up(hw);
4171 netif_carrier_on(netdev);
4173 if (!test_bit(__E1000_DOWN, &adapter->state))
4174 mod_timer(&adapter->phy_info_timer,
4175 round_jiffies(jiffies + 2 * HZ));
4178 if (netif_carrier_ok(netdev)) {
4179 adapter->link_speed = 0;
4180 adapter->link_duplex = 0;
4181 /* Link status message must follow this format */
4182 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4183 adapter->netdev->name);
4184 netif_carrier_off(netdev);
4185 if (!test_bit(__E1000_DOWN, &adapter->state))
4186 mod_timer(&adapter->phy_info_timer,
4187 round_jiffies(jiffies + 2 * HZ));
4189 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4190 schedule_work(&adapter->reset_task);
4192 pm_schedule_suspend(netdev->dev.parent,
4198 e1000e_update_stats(adapter);
4200 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4201 adapter->tpt_old = adapter->stats.tpt;
4202 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4203 adapter->colc_old = adapter->stats.colc;
4205 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4206 adapter->gorc_old = adapter->stats.gorc;
4207 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4208 adapter->gotc_old = adapter->stats.gotc;
4210 e1000e_update_adaptive(&adapter->hw);
4212 if (!netif_carrier_ok(netdev)) {
4213 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
4217 * We've lost link, so the controller stops DMA,
4218 * but we've got queued Tx work that's never going
4219 * to get done, so reset controller to flush Tx.
4220 * (Do the reset outside of interrupt context).
4222 adapter->tx_timeout_count++;
4223 schedule_work(&adapter->reset_task);
4224 /* return immediately since reset is imminent */
4229 /* Simple mode for Interrupt Throttle Rate (ITR) */
4230 if (adapter->itr_setting == 4) {
4232 * Symmetric Tx/Rx gets a reduced ITR=2000;
4233 * Total asymmetrical Tx or Rx gets ITR=8000;
4234 * everyone else is between 2000-8000.
4236 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4237 u32 dif = (adapter->gotc > adapter->gorc ?
4238 adapter->gotc - adapter->gorc :
4239 adapter->gorc - adapter->gotc) / 10000;
4240 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4242 ew32(ITR, 1000000000 / (itr * 256));
4245 /* Cause software interrupt to ensure Rx ring is cleaned */
4246 if (adapter->msix_entries)
4247 ew32(ICS, adapter->rx_ring->ims_val);
4249 ew32(ICS, E1000_ICS_RXDMT0);
4251 /* Force detection of hung controller every watchdog period */
4252 adapter->detect_tx_hung = 1;
4255 * With 82571 controllers, LAA may be overwritten due to controller
4256 * reset from the other port. Set the appropriate LAA in RAR[0]
4258 if (e1000e_get_laa_state_82571(hw))
4259 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4261 /* Reset the timer */
4262 if (!test_bit(__E1000_DOWN, &adapter->state))
4263 mod_timer(&adapter->watchdog_timer,
4264 round_jiffies(jiffies + 2 * HZ));
4267 #define E1000_TX_FLAGS_CSUM 0x00000001
4268 #define E1000_TX_FLAGS_VLAN 0x00000002
4269 #define E1000_TX_FLAGS_TSO 0x00000004
4270 #define E1000_TX_FLAGS_IPV4 0x00000008
4271 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4272 #define E1000_TX_FLAGS_VLAN_SHIFT 16
4274 static int e1000_tso(struct e1000_adapter *adapter,
4275 struct sk_buff *skb)
4277 struct e1000_ring *tx_ring = adapter->tx_ring;
4278 struct e1000_context_desc *context_desc;
4279 struct e1000_buffer *buffer_info;
4282 u16 ipcse = 0, tucse, mss;
4283 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4286 if (!skb_is_gso(skb))
4289 if (skb_header_cloned(skb)) {
4290 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4295 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4296 mss = skb_shinfo(skb)->gso_size;
4297 if (skb->protocol == htons(ETH_P_IP)) {
4298 struct iphdr *iph = ip_hdr(skb);
4301 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4303 cmd_length = E1000_TXD_CMD_IP;
4304 ipcse = skb_transport_offset(skb) - 1;
4305 } else if (skb_is_gso_v6(skb)) {
4306 ipv6_hdr(skb)->payload_len = 0;
4307 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4308 &ipv6_hdr(skb)->daddr,
4312 ipcss = skb_network_offset(skb);
4313 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4314 tucss = skb_transport_offset(skb);
4315 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4318 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4319 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4321 i = tx_ring->next_to_use;
4322 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4323 buffer_info = &tx_ring->buffer_info[i];
4325 context_desc->lower_setup.ip_fields.ipcss = ipcss;
4326 context_desc->lower_setup.ip_fields.ipcso = ipcso;
4327 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4328 context_desc->upper_setup.tcp_fields.tucss = tucss;
4329 context_desc->upper_setup.tcp_fields.tucso = tucso;
4330 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
4331 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4332 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4333 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4335 buffer_info->time_stamp = jiffies;
4336 buffer_info->next_to_watch = i;
4339 if (i == tx_ring->count)
4341 tx_ring->next_to_use = i;
4346 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
4348 struct e1000_ring *tx_ring = adapter->tx_ring;
4349 struct e1000_context_desc *context_desc;
4350 struct e1000_buffer *buffer_info;
4353 u32 cmd_len = E1000_TXD_CMD_DEXT;
4356 if (skb->ip_summed != CHECKSUM_PARTIAL)
4359 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4360 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4362 protocol = skb->protocol;
4365 case cpu_to_be16(ETH_P_IP):
4366 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4367 cmd_len |= E1000_TXD_CMD_TCP;
4369 case cpu_to_be16(ETH_P_IPV6):
4370 /* XXX not handling all IPV6 headers */
4371 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4372 cmd_len |= E1000_TXD_CMD_TCP;
4375 if (unlikely(net_ratelimit()))
4376 e_warn("checksum_partial proto=%x!\n",
4377 be16_to_cpu(protocol));
4381 css = skb_transport_offset(skb);
4383 i = tx_ring->next_to_use;
4384 buffer_info = &tx_ring->buffer_info[i];
4385 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4387 context_desc->lower_setup.ip_config = 0;
4388 context_desc->upper_setup.tcp_fields.tucss = css;
4389 context_desc->upper_setup.tcp_fields.tucso =
4390 css + skb->csum_offset;
4391 context_desc->upper_setup.tcp_fields.tucse = 0;
4392 context_desc->tcp_seg_setup.data = 0;
4393 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4395 buffer_info->time_stamp = jiffies;
4396 buffer_info->next_to_watch = i;
4399 if (i == tx_ring->count)
4401 tx_ring->next_to_use = i;
4406 #define E1000_MAX_PER_TXD 8192
4407 #define E1000_MAX_TXD_PWR 12
4409 static int e1000_tx_map(struct e1000_adapter *adapter,
4410 struct sk_buff *skb, unsigned int first,
4411 unsigned int max_per_txd, unsigned int nr_frags,
4414 struct e1000_ring *tx_ring = adapter->tx_ring;
4415 struct pci_dev *pdev = adapter->pdev;
4416 struct e1000_buffer *buffer_info;
4417 unsigned int len = skb_headlen(skb);
4418 unsigned int offset = 0, size, count = 0, i;
4419 unsigned int f, bytecount, segs;
4421 i = tx_ring->next_to_use;
4424 buffer_info = &tx_ring->buffer_info[i];
4425 size = min(len, max_per_txd);
4427 buffer_info->length = size;
4428 buffer_info->time_stamp = jiffies;
4429 buffer_info->next_to_watch = i;
4430 buffer_info->dma = dma_map_single(&pdev->dev,
4432 size, DMA_TO_DEVICE);
4433 buffer_info->mapped_as_page = false;
4434 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4443 if (i == tx_ring->count)
4448 for (f = 0; f < nr_frags; f++) {
4449 struct skb_frag_struct *frag;
4451 frag = &skb_shinfo(skb)->frags[f];
4453 offset = frag->page_offset;
4457 if (i == tx_ring->count)
4460 buffer_info = &tx_ring->buffer_info[i];
4461 size = min(len, max_per_txd);
4463 buffer_info->length = size;
4464 buffer_info->time_stamp = jiffies;
4465 buffer_info->next_to_watch = i;
4466 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
4469 buffer_info->mapped_as_page = true;
4470 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4479 segs = skb_shinfo(skb)->gso_segs ?: 1;
4480 /* multiply data chunks by size of headers */
4481 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4483 tx_ring->buffer_info[i].skb = skb;
4484 tx_ring->buffer_info[i].segs = segs;
4485 tx_ring->buffer_info[i].bytecount = bytecount;
4486 tx_ring->buffer_info[first].next_to_watch = i;
4491 dev_err(&pdev->dev, "TX DMA map failed\n");
4492 buffer_info->dma = 0;
4498 i += tx_ring->count;
4500 buffer_info = &tx_ring->buffer_info[i];
4501 e1000_put_txbuf(adapter, buffer_info);;
4507 static void e1000_tx_queue(struct e1000_adapter *adapter,
4508 int tx_flags, int count)
4510 struct e1000_ring *tx_ring = adapter->tx_ring;
4511 struct e1000_tx_desc *tx_desc = NULL;
4512 struct e1000_buffer *buffer_info;
4513 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4516 if (tx_flags & E1000_TX_FLAGS_TSO) {
4517 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4519 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4521 if (tx_flags & E1000_TX_FLAGS_IPV4)
4522 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4525 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4526 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4527 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4530 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4531 txd_lower |= E1000_TXD_CMD_VLE;
4532 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4535 i = tx_ring->next_to_use;
4538 buffer_info = &tx_ring->buffer_info[i];
4539 tx_desc = E1000_TX_DESC(*tx_ring, i);
4540 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4541 tx_desc->lower.data =
4542 cpu_to_le32(txd_lower | buffer_info->length);
4543 tx_desc->upper.data = cpu_to_le32(txd_upper);
4546 if (i == tx_ring->count)
4550 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4553 * Force memory writes to complete before letting h/w
4554 * know there are new descriptors to fetch. (Only
4555 * applicable for weak-ordered memory model archs,
4560 tx_ring->next_to_use = i;
4561 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4563 * we need this if more than one processor can write to our tail
4564 * at a time, it synchronizes IO on IA64/Altix systems
4569 #define MINIMUM_DHCP_PACKET_SIZE 282
4570 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4571 struct sk_buff *skb)
4573 struct e1000_hw *hw = &adapter->hw;
4576 if (vlan_tx_tag_present(skb)) {
4577 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4578 (adapter->hw.mng_cookie.status &
4579 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4583 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4586 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4590 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4593 if (ip->protocol != IPPROTO_UDP)
4596 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4597 if (ntohs(udp->dest) != 67)
4600 offset = (u8 *)udp + 8 - skb->data;
4601 length = skb->len - offset;
4602 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4608 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4610 struct e1000_adapter *adapter = netdev_priv(netdev);
4612 netif_stop_queue(netdev);
4614 * Herbert's original patch had:
4615 * smp_mb__after_netif_stop_queue();
4616 * but since that doesn't exist yet, just open code it.
4621 * We need to check again in a case another CPU has just
4622 * made room available.
4624 if (e1000_desc_unused(adapter->tx_ring) < size)
4628 netif_start_queue(netdev);
4629 ++adapter->restart_queue;
4633 static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4635 struct e1000_adapter *adapter = netdev_priv(netdev);
4637 if (e1000_desc_unused(adapter->tx_ring) >= size)
4639 return __e1000_maybe_stop_tx(netdev, size);
4642 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
4643 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4644 struct net_device *netdev)
4646 struct e1000_adapter *adapter = netdev_priv(netdev);
4647 struct e1000_ring *tx_ring = adapter->tx_ring;
4649 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4650 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4651 unsigned int tx_flags = 0;
4652 unsigned int len = skb_headlen(skb);
4653 unsigned int nr_frags;
4659 if (test_bit(__E1000_DOWN, &adapter->state)) {
4660 dev_kfree_skb_any(skb);
4661 return NETDEV_TX_OK;
4664 if (skb->len <= 0) {
4665 dev_kfree_skb_any(skb);
4666 return NETDEV_TX_OK;
4669 mss = skb_shinfo(skb)->gso_size;
4671 * The controller does a simple calculation to
4672 * make sure there is enough room in the FIFO before
4673 * initiating the DMA for each buffer. The calc is:
4674 * 4 = ceil(buffer len/mss). To make sure we don't
4675 * overrun the FIFO, adjust the max buffer len if mss
4680 max_per_txd = min(mss << 2, max_per_txd);
4681 max_txd_pwr = fls(max_per_txd) - 1;
4684 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4685 * points to just header, pull a few bytes of payload from
4686 * frags into skb->data
4688 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4690 * we do this workaround for ES2LAN, but it is un-necessary,
4691 * avoiding it could save a lot of cycles
4693 if (skb->data_len && (hdr_len == len)) {
4694 unsigned int pull_size;
4696 pull_size = min((unsigned int)4, skb->data_len);
4697 if (!__pskb_pull_tail(skb, pull_size)) {
4698 e_err("__pskb_pull_tail failed.\n");
4699 dev_kfree_skb_any(skb);
4700 return NETDEV_TX_OK;
4702 len = skb_headlen(skb);
4706 /* reserve a descriptor for the offload context */
4707 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
4711 count += TXD_USE_COUNT(len, max_txd_pwr);
4713 nr_frags = skb_shinfo(skb)->nr_frags;
4714 for (f = 0; f < nr_frags; f++)
4715 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
4718 if (adapter->hw.mac.tx_pkt_filtering)
4719 e1000_transfer_dhcp_info(adapter, skb);
4722 * need: count + 2 desc gap to keep tail from touching
4723 * head, otherwise try next time
4725 if (e1000_maybe_stop_tx(netdev, count + 2))
4726 return NETDEV_TX_BUSY;
4728 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4729 tx_flags |= E1000_TX_FLAGS_VLAN;
4730 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4733 first = tx_ring->next_to_use;
4735 tso = e1000_tso(adapter, skb);
4737 dev_kfree_skb_any(skb);
4738 return NETDEV_TX_OK;
4742 tx_flags |= E1000_TX_FLAGS_TSO;
4743 else if (e1000_tx_csum(adapter, skb))
4744 tx_flags |= E1000_TX_FLAGS_CSUM;
4747 * Old method was to assume IPv4 packet by default if TSO was enabled.
4748 * 82571 hardware supports TSO capabilities for IPv6 as well...
4749 * no longer assume, we must.
4751 if (skb->protocol == htons(ETH_P_IP))
4752 tx_flags |= E1000_TX_FLAGS_IPV4;
4754 /* if count is 0 then mapping error has occured */
4755 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
4757 e1000_tx_queue(adapter, tx_flags, count);
4758 /* Make sure there is space in the ring for the next send. */
4759 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4762 dev_kfree_skb_any(skb);
4763 tx_ring->buffer_info[first].time_stamp = 0;
4764 tx_ring->next_to_use = first;
4767 return NETDEV_TX_OK;
4771 * e1000_tx_timeout - Respond to a Tx Hang
4772 * @netdev: network interface device structure
4774 static void e1000_tx_timeout(struct net_device *netdev)
4776 struct e1000_adapter *adapter = netdev_priv(netdev);
4778 /* Do the reset outside of interrupt context */
4779 adapter->tx_timeout_count++;
4780 schedule_work(&adapter->reset_task);
4783 static void e1000_reset_task(struct work_struct *work)
4785 struct e1000_adapter *adapter;
4786 adapter = container_of(work, struct e1000_adapter, reset_task);
4788 e1000e_dump(adapter);
4789 e_err("Reset adapter\n");
4790 e1000e_reinit_locked(adapter);
4794 * e1000_get_stats - Get System Network Statistics
4795 * @netdev: network interface device structure
4797 * Returns the address of the device statistics structure.
4798 * The statistics are actually updated from the timer callback.
4800 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
4802 /* only return the current stats */
4803 return &netdev->stats;
4807 * e1000_change_mtu - Change the Maximum Transfer Unit
4808 * @netdev: network interface device structure
4809 * @new_mtu: new value for maximum frame size
4811 * Returns 0 on success, negative on failure
4813 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4815 struct e1000_adapter *adapter = netdev_priv(netdev);
4816 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4818 /* Jumbo frame support */
4819 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
4820 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
4821 e_err("Jumbo Frames not supported.\n");
4825 /* Supported frame sizes */
4826 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
4827 (max_frame > adapter->max_hw_frame_size)) {
4828 e_err("Unsupported MTU setting\n");
4832 /* 82573 Errata 17 */
4833 if (((adapter->hw.mac.type == e1000_82573) ||
4834 (adapter->hw.mac.type == e1000_82574)) &&
4835 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
4836 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
4837 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
4840 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4842 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
4843 adapter->max_frame_size = max_frame;
4844 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4845 netdev->mtu = new_mtu;
4846 if (netif_running(netdev))
4847 e1000e_down(adapter);
4850 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4851 * means we reserve 2 more, this pushes us to allocate from the next
4853 * i.e. RXBUFFER_2048 --> size-4096 slab
4854 * However with the new *_jumbo_rx* routines, jumbo receives will use
4858 if (max_frame <= 2048)
4859 adapter->rx_buffer_len = 2048;
4861 adapter->rx_buffer_len = 4096;
4863 /* adjust allocation if LPE protects us, and we aren't using SBP */
4864 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
4865 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
4866 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
4869 if (netif_running(netdev))
4872 e1000e_reset(adapter);
4874 clear_bit(__E1000_RESETTING, &adapter->state);
4879 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4882 struct e1000_adapter *adapter = netdev_priv(netdev);
4883 struct mii_ioctl_data *data = if_mii(ifr);
4885 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4890 data->phy_id = adapter->hw.phy.addr;
4893 e1000_phy_read_status(adapter);
4895 switch (data->reg_num & 0x1F) {
4897 data->val_out = adapter->phy_regs.bmcr;
4900 data->val_out = adapter->phy_regs.bmsr;
4903 data->val_out = (adapter->hw.phy.id >> 16);
4906 data->val_out = (adapter->hw.phy.id & 0xFFFF);
4909 data->val_out = adapter->phy_regs.advertise;
4912 data->val_out = adapter->phy_regs.lpa;
4915 data->val_out = adapter->phy_regs.expansion;
4918 data->val_out = adapter->phy_regs.ctrl1000;
4921 data->val_out = adapter->phy_regs.stat1000;
4924 data->val_out = adapter->phy_regs.estatus;
4937 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4943 return e1000_mii_ioctl(netdev, ifr, cmd);
4949 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4951 struct e1000_hw *hw = &adapter->hw;
4956 /* copy MAC RARs to PHY RARs */
4957 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
4959 /* copy MAC MTA to PHY MTA */
4960 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4961 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4962 e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
4963 e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
4966 /* configure PHY Rx Control register */
4967 e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
4968 mac_reg = er32(RCTL);
4969 if (mac_reg & E1000_RCTL_UPE)
4970 phy_reg |= BM_RCTL_UPE;
4971 if (mac_reg & E1000_RCTL_MPE)
4972 phy_reg |= BM_RCTL_MPE;
4973 phy_reg &= ~(BM_RCTL_MO_MASK);
4974 if (mac_reg & E1000_RCTL_MO_3)
4975 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4976 << BM_RCTL_MO_SHIFT);
4977 if (mac_reg & E1000_RCTL_BAM)
4978 phy_reg |= BM_RCTL_BAM;
4979 if (mac_reg & E1000_RCTL_PMCF)
4980 phy_reg |= BM_RCTL_PMCF;
4981 mac_reg = er32(CTRL);
4982 if (mac_reg & E1000_CTRL_RFCE)
4983 phy_reg |= BM_RCTL_RFCE;
4984 e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
4986 /* enable PHY wakeup in MAC register */
4988 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4990 /* configure and enable PHY wakeup in PHY registers */
4991 e1e_wphy(&adapter->hw, BM_WUFC, wufc);
4992 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4994 /* activate PHY wakeup */
4995 retval = hw->phy.ops.acquire(hw);
4997 e_err("Could not acquire PHY\n");
5000 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
5001 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
5002 retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
5004 e_err("Could not read PHY page 769\n");
5007 phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5008 retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
5010 e_err("Could not set PHY Host Wakeup bit\n");
5012 hw->phy.ops.release(hw);
5017 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5020 struct net_device *netdev = pci_get_drvdata(pdev);
5021 struct e1000_adapter *adapter = netdev_priv(netdev);
5022 struct e1000_hw *hw = &adapter->hw;
5023 u32 ctrl, ctrl_ext, rctl, status;
5024 /* Runtime suspend should only enable wakeup for link changes */
5025 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5028 netif_device_detach(netdev);
5030 if (netif_running(netdev)) {
5031 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5032 e1000e_down(adapter);
5033 e1000_free_irq(adapter);
5035 e1000e_reset_interrupt_capability(adapter);
5037 retval = pci_save_state(pdev);
5041 status = er32(STATUS);
5042 if (status & E1000_STATUS_LU)
5043 wufc &= ~E1000_WUFC_LNKC;
5046 e1000_setup_rctl(adapter);
5047 e1000_set_multi(netdev);
5049 /* turn on all-multi mode if wake on multicast is enabled */
5050 if (wufc & E1000_WUFC_MC) {
5052 rctl |= E1000_RCTL_MPE;
5057 /* advertise wake from D3Cold */
5058 #define E1000_CTRL_ADVD3WUC 0x00100000
5059 /* phy power management enable */
5060 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5061 ctrl |= E1000_CTRL_ADVD3WUC;
5062 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5063 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5066 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5067 adapter->hw.phy.media_type ==
5068 e1000_media_type_internal_serdes) {
5069 /* keep the laser running in D3 */
5070 ctrl_ext = er32(CTRL_EXT);
5071 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5072 ew32(CTRL_EXT, ctrl_ext);
5075 if (adapter->flags & FLAG_IS_ICH)
5076 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
5078 /* Allow time for pending master requests to run */
5079 e1000e_disable_pcie_master(&adapter->hw);
5081 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5082 /* enable wakeup by the PHY */
5083 retval = e1000_init_phy_wakeup(adapter, wufc);
5087 /* enable wakeup by the MAC */
5089 ew32(WUC, E1000_WUC_PME_EN);
5096 *enable_wake = !!wufc;
5098 /* make sure adapter isn't asleep if manageability is enabled */
5099 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5100 (hw->mac.ops.check_mng_mode(hw)))
5101 *enable_wake = true;
5103 if (adapter->hw.phy.type == e1000_phy_igp_3)
5104 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5107 * Release control of h/w to f/w. If f/w is AMT enabled, this
5108 * would have already happened in close and is redundant.
5110 e1000_release_hw_control(adapter);
5112 pci_disable_device(pdev);
5117 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5119 if (sleep && wake) {
5120 pci_prepare_to_sleep(pdev);
5124 pci_wake_from_d3(pdev, wake);
5125 pci_set_power_state(pdev, PCI_D3hot);
5128 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5131 struct net_device *netdev = pci_get_drvdata(pdev);
5132 struct e1000_adapter *adapter = netdev_priv(netdev);
5135 * The pci-e switch on some quad port adapters will report a
5136 * correctable error when the MAC transitions from D0 to D3. To
5137 * prevent this we need to mask off the correctable errors on the
5138 * downstream port of the pci-e switch.
5140 if (adapter->flags & FLAG_IS_QUAD_PORT) {
5141 struct pci_dev *us_dev = pdev->bus->self;
5142 int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
5145 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
5146 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
5147 (devctl & ~PCI_EXP_DEVCTL_CERE));
5149 e1000_power_off(pdev, sleep, wake);
5151 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
5153 e1000_power_off(pdev, sleep, wake);
5157 #ifdef CONFIG_PCIEASPM
5158 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5160 pci_disable_link_state(pdev, state);
5163 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5169 * Both device and parent should have the same ASPM setting.
5170 * Disable ASPM in downstream component first and then upstream.
5172 pos = pci_pcie_cap(pdev);
5173 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
5175 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
5177 if (!pdev->bus->self)
5180 pos = pci_pcie_cap(pdev->bus->self);
5181 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16);
5183 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5186 void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5188 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5189 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5190 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5192 __e1000e_disable_aspm(pdev, state);
5195 #ifdef CONFIG_PM_OPS
5196 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5198 return !!adapter->tx_ring->buffer_info;
5201 static int __e1000_resume(struct pci_dev *pdev)
5203 struct net_device *netdev = pci_get_drvdata(pdev);
5204 struct e1000_adapter *adapter = netdev_priv(netdev);
5205 struct e1000_hw *hw = &adapter->hw;
5208 pci_set_power_state(pdev, PCI_D0);
5209 pci_restore_state(pdev);
5210 pci_save_state(pdev);
5211 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5212 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5214 e1000e_set_interrupt_capability(adapter);
5215 if (netif_running(netdev)) {
5216 err = e1000_request_irq(adapter);
5221 e1000e_power_up_phy(adapter);
5223 /* report the system wakeup cause from S3/S4 */
5224 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5227 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5229 e_info("PHY Wakeup cause - %s\n",
5230 phy_data & E1000_WUS_EX ? "Unicast Packet" :
5231 phy_data & E1000_WUS_MC ? "Multicast Packet" :
5232 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5233 phy_data & E1000_WUS_MAG ? "Magic Packet" :
5234 phy_data & E1000_WUS_LNKC ? "Link Status "
5235 " Change" : "other");
5237 e1e_wphy(&adapter->hw, BM_WUS, ~0);
5239 u32 wus = er32(WUS);
5241 e_info("MAC Wakeup cause - %s\n",
5242 wus & E1000_WUS_EX ? "Unicast Packet" :
5243 wus & E1000_WUS_MC ? "Multicast Packet" :
5244 wus & E1000_WUS_BC ? "Broadcast Packet" :
5245 wus & E1000_WUS_MAG ? "Magic Packet" :
5246 wus & E1000_WUS_LNKC ? "Link Status Change" :
5252 e1000e_reset(adapter);
5254 e1000_init_manageability_pt(adapter);
5256 if (netif_running(netdev))
5259 netif_device_attach(netdev);
5262 * If the controller has AMT, do not set DRV_LOAD until the interface
5263 * is up. For all other cases, let the f/w know that the h/w is now
5264 * under the control of the driver.
5266 if (!(adapter->flags & FLAG_HAS_AMT))
5267 e1000_get_hw_control(adapter);
5272 #ifdef CONFIG_PM_SLEEP
5273 static int e1000_suspend(struct device *dev)
5275 struct pci_dev *pdev = to_pci_dev(dev);
5279 retval = __e1000_shutdown(pdev, &wake, false);
5281 e1000_complete_shutdown(pdev, true, wake);
5286 static int e1000_resume(struct device *dev)
5288 struct pci_dev *pdev = to_pci_dev(dev);
5289 struct net_device *netdev = pci_get_drvdata(pdev);
5290 struct e1000_adapter *adapter = netdev_priv(netdev);
5292 if (e1000e_pm_ready(adapter))
5293 adapter->idle_check = true;
5295 return __e1000_resume(pdev);
5297 #endif /* CONFIG_PM_SLEEP */
5299 #ifdef CONFIG_PM_RUNTIME
5300 static int e1000_runtime_suspend(struct device *dev)
5302 struct pci_dev *pdev = to_pci_dev(dev);
5303 struct net_device *netdev = pci_get_drvdata(pdev);
5304 struct e1000_adapter *adapter = netdev_priv(netdev);
5306 if (e1000e_pm_ready(adapter)) {
5309 __e1000_shutdown(pdev, &wake, true);
5315 static int e1000_idle(struct device *dev)
5317 struct pci_dev *pdev = to_pci_dev(dev);
5318 struct net_device *netdev = pci_get_drvdata(pdev);
5319 struct e1000_adapter *adapter = netdev_priv(netdev);
5321 if (!e1000e_pm_ready(adapter))
5324 if (adapter->idle_check) {
5325 adapter->idle_check = false;
5326 if (!e1000e_has_link(adapter))
5327 pm_schedule_suspend(dev, MSEC_PER_SEC);
5333 static int e1000_runtime_resume(struct device *dev)
5335 struct pci_dev *pdev = to_pci_dev(dev);
5336 struct net_device *netdev = pci_get_drvdata(pdev);
5337 struct e1000_adapter *adapter = netdev_priv(netdev);
5339 if (!e1000e_pm_ready(adapter))
5342 adapter->idle_check = !dev->power.runtime_auto;
5343 return __e1000_resume(pdev);
5345 #endif /* CONFIG_PM_RUNTIME */
5346 #endif /* CONFIG_PM_OPS */
5348 static void e1000_shutdown(struct pci_dev *pdev)
5352 __e1000_shutdown(pdev, &wake, false);
5354 if (system_state == SYSTEM_POWER_OFF)
5355 e1000_complete_shutdown(pdev, false, wake);
5358 #ifdef CONFIG_NET_POLL_CONTROLLER
5360 * Polling 'interrupt' - used by things like netconsole to send skbs
5361 * without having to re-enable interrupts. It's not called while
5362 * the interrupt routine is executing.
5364 static void e1000_netpoll(struct net_device *netdev)
5366 struct e1000_adapter *adapter = netdev_priv(netdev);
5368 disable_irq(adapter->pdev->irq);
5369 e1000_intr(adapter->pdev->irq, netdev);
5371 enable_irq(adapter->pdev->irq);
5376 * e1000_io_error_detected - called when PCI error is detected
5377 * @pdev: Pointer to PCI device
5378 * @state: The current pci connection state
5380 * This function is called after a PCI bus error affecting
5381 * this device has been detected.
5383 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5384 pci_channel_state_t state)
5386 struct net_device *netdev = pci_get_drvdata(pdev);
5387 struct e1000_adapter *adapter = netdev_priv(netdev);
5389 netif_device_detach(netdev);
5391 if (state == pci_channel_io_perm_failure)
5392 return PCI_ERS_RESULT_DISCONNECT;
5394 if (netif_running(netdev))
5395 e1000e_down(adapter);
5396 pci_disable_device(pdev);
5398 /* Request a slot slot reset. */
5399 return PCI_ERS_RESULT_NEED_RESET;
5403 * e1000_io_slot_reset - called after the pci bus has been reset.
5404 * @pdev: Pointer to PCI device
5406 * Restart the card from scratch, as if from a cold-boot. Implementation
5407 * resembles the first-half of the e1000_resume routine.
5409 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5411 struct net_device *netdev = pci_get_drvdata(pdev);
5412 struct e1000_adapter *adapter = netdev_priv(netdev);
5413 struct e1000_hw *hw = &adapter->hw;
5415 pci_ers_result_t result;
5417 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5418 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5419 err = pci_enable_device_mem(pdev);
5422 "Cannot re-enable PCI device after reset.\n");
5423 result = PCI_ERS_RESULT_DISCONNECT;
5425 pci_set_master(pdev);
5426 pdev->state_saved = true;
5427 pci_restore_state(pdev);
5429 pci_enable_wake(pdev, PCI_D3hot, 0);
5430 pci_enable_wake(pdev, PCI_D3cold, 0);
5432 e1000e_reset(adapter);
5434 result = PCI_ERS_RESULT_RECOVERED;
5437 pci_cleanup_aer_uncorrect_error_status(pdev);
5443 * e1000_io_resume - called when traffic can start flowing again.
5444 * @pdev: Pointer to PCI device
5446 * This callback is called when the error recovery driver tells us that
5447 * its OK to resume normal operation. Implementation resembles the
5448 * second-half of the e1000_resume routine.
5450 static void e1000_io_resume(struct pci_dev *pdev)
5452 struct net_device *netdev = pci_get_drvdata(pdev);
5453 struct e1000_adapter *adapter = netdev_priv(netdev);
5455 e1000_init_manageability_pt(adapter);
5457 if (netif_running(netdev)) {
5458 if (e1000e_up(adapter)) {
5460 "can't bring device back up after reset\n");
5465 netif_device_attach(netdev);
5468 * If the controller has AMT, do not set DRV_LOAD until the interface
5469 * is up. For all other cases, let the f/w know that the h/w is now
5470 * under the control of the driver.
5472 if (!(adapter->flags & FLAG_HAS_AMT))
5473 e1000_get_hw_control(adapter);
5477 static void e1000_print_device_info(struct e1000_adapter *adapter)
5479 struct e1000_hw *hw = &adapter->hw;
5480 struct net_device *netdev = adapter->netdev;
5483 /* print bus type/speed/width info */
5484 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
5486 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5490 e_info("Intel(R) PRO/%s Network Connection\n",
5491 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5492 e1000e_read_pba_num(hw, &pba_num);
5493 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
5494 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
5497 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5499 struct e1000_hw *hw = &adapter->hw;
5503 if (hw->mac.type != e1000_82573)
5506 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5507 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
5508 /* Deep Smart Power Down (DSPD) */
5509 dev_warn(&adapter->pdev->dev,
5510 "Warning: detected DSPD enabled in EEPROM\n");
5514 static const struct net_device_ops e1000e_netdev_ops = {
5515 .ndo_open = e1000_open,
5516 .ndo_stop = e1000_close,
5517 .ndo_start_xmit = e1000_xmit_frame,
5518 .ndo_get_stats = e1000_get_stats,
5519 .ndo_set_multicast_list = e1000_set_multi,
5520 .ndo_set_mac_address = e1000_set_mac,
5521 .ndo_change_mtu = e1000_change_mtu,
5522 .ndo_do_ioctl = e1000_ioctl,
5523 .ndo_tx_timeout = e1000_tx_timeout,
5524 .ndo_validate_addr = eth_validate_addr,
5526 .ndo_vlan_rx_register = e1000_vlan_rx_register,
5527 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
5528 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
5529 #ifdef CONFIG_NET_POLL_CONTROLLER
5530 .ndo_poll_controller = e1000_netpoll,
5535 * e1000_probe - Device Initialization Routine
5536 * @pdev: PCI device information struct
5537 * @ent: entry in e1000_pci_tbl
5539 * Returns 0 on success, negative on failure
5541 * e1000_probe initializes an adapter identified by a pci_dev structure.
5542 * The OS initialization, configuring of the adapter private structure,
5543 * and a hardware reset occur.
5545 static int __devinit e1000_probe(struct pci_dev *pdev,
5546 const struct pci_device_id *ent)
5548 struct net_device *netdev;
5549 struct e1000_adapter *adapter;
5550 struct e1000_hw *hw;
5551 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
5552 resource_size_t mmio_start, mmio_len;
5553 resource_size_t flash_start, flash_len;
5555 static int cards_found;
5556 int i, err, pci_using_dac;
5557 u16 eeprom_data = 0;
5558 u16 eeprom_apme_mask = E1000_EEPROM_APME;
5560 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
5561 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5563 err = pci_enable_device_mem(pdev);
5568 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5570 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5574 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5576 err = dma_set_coherent_mask(&pdev->dev,
5579 dev_err(&pdev->dev, "No usable DMA "
5580 "configuration, aborting\n");
5586 err = pci_request_selected_regions_exclusive(pdev,
5587 pci_select_bars(pdev, IORESOURCE_MEM),
5588 e1000e_driver_name);
5592 /* AER (Advanced Error Reporting) hooks */
5593 pci_enable_pcie_error_reporting(pdev);
5595 pci_set_master(pdev);
5596 /* PCI config space info */
5597 err = pci_save_state(pdev);
5599 goto err_alloc_etherdev;
5602 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
5604 goto err_alloc_etherdev;
5606 SET_NETDEV_DEV(netdev, &pdev->dev);
5608 netdev->irq = pdev->irq;
5610 pci_set_drvdata(pdev, netdev);
5611 adapter = netdev_priv(netdev);
5613 adapter->netdev = netdev;
5614 adapter->pdev = pdev;
5616 adapter->pba = ei->pba;
5617 adapter->flags = ei->flags;
5618 adapter->flags2 = ei->flags2;
5619 adapter->hw.adapter = adapter;
5620 adapter->hw.mac.type = ei->mac;
5621 adapter->max_hw_frame_size = ei->max_hw_frame_size;
5622 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
5624 mmio_start = pci_resource_start(pdev, 0);
5625 mmio_len = pci_resource_len(pdev, 0);
5628 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
5629 if (!adapter->hw.hw_addr)
5632 if ((adapter->flags & FLAG_HAS_FLASH) &&
5633 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
5634 flash_start = pci_resource_start(pdev, 1);
5635 flash_len = pci_resource_len(pdev, 1);
5636 adapter->hw.flash_address = ioremap(flash_start, flash_len);
5637 if (!adapter->hw.flash_address)
5641 /* construct the net_device struct */
5642 netdev->netdev_ops = &e1000e_netdev_ops;
5643 e1000e_set_ethtool_ops(netdev);
5644 netdev->watchdog_timeo = 5 * HZ;
5645 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
5646 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
5648 netdev->mem_start = mmio_start;
5649 netdev->mem_end = mmio_start + mmio_len;
5651 adapter->bd_number = cards_found++;
5653 e1000e_check_options(adapter);
5655 /* setup adapter struct */
5656 err = e1000_sw_init(adapter);
5660 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5661 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
5662 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5664 err = ei->get_variants(adapter);
5668 if ((adapter->flags & FLAG_IS_ICH) &&
5669 (adapter->flags & FLAG_READ_ONLY_NVM))
5670 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
5672 hw->mac.ops.get_bus_info(&adapter->hw);
5674 adapter->hw.phy.autoneg_wait_to_complete = 0;
5676 /* Copper options */
5677 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
5678 adapter->hw.phy.mdix = AUTO_ALL_MODES;
5679 adapter->hw.phy.disable_polarity_correction = 0;
5680 adapter->hw.phy.ms_type = e1000_ms_hw_default;
5683 if (e1000_check_reset_block(&adapter->hw))
5684 e_info("PHY reset is blocked due to SOL/IDER session.\n");
5686 netdev->features = NETIF_F_SG |
5688 NETIF_F_HW_VLAN_TX |
5691 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
5692 netdev->features |= NETIF_F_HW_VLAN_FILTER;
5694 netdev->features |= NETIF_F_TSO;
5695 netdev->features |= NETIF_F_TSO6;
5697 netdev->vlan_features |= NETIF_F_TSO;
5698 netdev->vlan_features |= NETIF_F_TSO6;
5699 netdev->vlan_features |= NETIF_F_HW_CSUM;
5700 netdev->vlan_features |= NETIF_F_SG;
5703 netdev->features |= NETIF_F_HIGHDMA;
5705 if (e1000e_enable_mng_pass_thru(&adapter->hw))
5706 adapter->flags |= FLAG_MNG_PT_ENABLED;
5709 * before reading the NVM, reset the controller to
5710 * put the device in a known good starting state
5712 adapter->hw.mac.ops.reset_hw(&adapter->hw);
5715 * systems with ASPM and others may see the checksum fail on the first
5716 * attempt. Let's give it a few tries
5719 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
5722 e_err("The NVM Checksum Is Not Valid\n");
5728 e1000_eeprom_checks(adapter);
5730 /* copy the MAC address */
5731 if (e1000e_read_mac_addr(&adapter->hw))
5732 e_err("NVM Read Error while reading MAC address\n");
5734 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
5735 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
5737 if (!is_valid_ether_addr(netdev->perm_addr)) {
5738 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
5743 init_timer(&adapter->watchdog_timer);
5744 adapter->watchdog_timer.function = &e1000_watchdog;
5745 adapter->watchdog_timer.data = (unsigned long) adapter;
5747 init_timer(&adapter->phy_info_timer);
5748 adapter->phy_info_timer.function = &e1000_update_phy_info;
5749 adapter->phy_info_timer.data = (unsigned long) adapter;
5751 INIT_WORK(&adapter->reset_task, e1000_reset_task);
5752 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
5753 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5754 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
5755 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
5757 /* Initialize link parameters. User can change them with ethtool */
5758 adapter->hw.mac.autoneg = 1;
5759 adapter->fc_autoneg = 1;
5760 adapter->hw.fc.requested_mode = e1000_fc_default;
5761 adapter->hw.fc.current_mode = e1000_fc_default;
5762 adapter->hw.phy.autoneg_advertised = 0x2f;
5764 /* ring size defaults */
5765 adapter->rx_ring->count = 256;
5766 adapter->tx_ring->count = 256;
5769 * Initial Wake on LAN setting - If APM wake is enabled in
5770 * the EEPROM, enable the ACPI Magic Packet filter
5772 if (adapter->flags & FLAG_APME_IN_WUC) {
5773 /* APME bit in EEPROM is mapped to WUC.APME */
5774 eeprom_data = er32(WUC);
5775 eeprom_apme_mask = E1000_WUC_APME;
5776 if (eeprom_data & E1000_WUC_PHY_WAKE)
5777 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
5778 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
5779 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
5780 (adapter->hw.bus.func == 1))
5781 e1000_read_nvm(&adapter->hw,
5782 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
5784 e1000_read_nvm(&adapter->hw,
5785 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
5788 /* fetch WoL from EEPROM */
5789 if (eeprom_data & eeprom_apme_mask)
5790 adapter->eeprom_wol |= E1000_WUFC_MAG;
5793 * now that we have the eeprom settings, apply the special cases
5794 * where the eeprom may be wrong or the board simply won't support
5795 * wake on lan on a particular port
5797 if (!(adapter->flags & FLAG_HAS_WOL))
5798 adapter->eeprom_wol = 0;
5800 /* initialize the wol settings based on the eeprom settings */
5801 adapter->wol = adapter->eeprom_wol;
5802 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5804 /* save off EEPROM version number */
5805 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
5807 /* reset the hardware with the new settings */
5808 e1000e_reset(adapter);
5811 * If the controller has AMT, do not set DRV_LOAD until the interface
5812 * is up. For all other cases, let the f/w know that the h/w is now
5813 * under the control of the driver.
5815 if (!(adapter->flags & FLAG_HAS_AMT))
5816 e1000_get_hw_control(adapter);
5818 strcpy(netdev->name, "eth%d");
5819 err = register_netdev(netdev);
5823 /* carrier off reporting is important to ethtool even BEFORE open */
5824 netif_carrier_off(netdev);
5826 e1000_print_device_info(adapter);
5828 if (pci_dev_run_wake(pdev)) {
5829 pm_runtime_set_active(&pdev->dev);
5830 pm_runtime_enable(&pdev->dev);
5832 pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
5837 if (!(adapter->flags & FLAG_HAS_AMT))
5838 e1000_release_hw_control(adapter);
5840 if (!e1000_check_reset_block(&adapter->hw))
5841 e1000_phy_hw_reset(&adapter->hw);
5844 kfree(adapter->tx_ring);
5845 kfree(adapter->rx_ring);
5847 if (adapter->hw.flash_address)
5848 iounmap(adapter->hw.flash_address);
5849 e1000e_reset_interrupt_capability(adapter);
5851 iounmap(adapter->hw.hw_addr);
5853 free_netdev(netdev);
5855 pci_release_selected_regions(pdev,
5856 pci_select_bars(pdev, IORESOURCE_MEM));
5859 pci_disable_device(pdev);
5864 * e1000_remove - Device Removal Routine
5865 * @pdev: PCI device information struct
5867 * e1000_remove is called by the PCI subsystem to alert the driver
5868 * that it should release a PCI device. The could be caused by a
5869 * Hot-Plug event, or because the driver is going to be removed from
5872 static void __devexit e1000_remove(struct pci_dev *pdev)
5874 struct net_device *netdev = pci_get_drvdata(pdev);
5875 struct e1000_adapter *adapter = netdev_priv(netdev);
5876 bool down = test_bit(__E1000_DOWN, &adapter->state);
5878 pm_runtime_get_sync(&pdev->dev);
5881 * flush_scheduled work may reschedule our watchdog task, so
5882 * explicitly disable watchdog tasks from being rescheduled
5885 set_bit(__E1000_DOWN, &adapter->state);
5886 del_timer_sync(&adapter->watchdog_timer);
5887 del_timer_sync(&adapter->phy_info_timer);
5889 cancel_work_sync(&adapter->reset_task);
5890 cancel_work_sync(&adapter->watchdog_task);
5891 cancel_work_sync(&adapter->downshift_task);
5892 cancel_work_sync(&adapter->update_phy_task);
5893 cancel_work_sync(&adapter->print_hang_task);
5894 flush_scheduled_work();
5896 if (!(netdev->flags & IFF_UP))
5897 e1000_power_down_phy(adapter);
5899 /* Don't lie to e1000_close() down the road. */
5901 clear_bit(__E1000_DOWN, &adapter->state);
5902 unregister_netdev(netdev);
5904 if (pci_dev_run_wake(pdev)) {
5905 pm_runtime_disable(&pdev->dev);
5906 pm_runtime_set_suspended(&pdev->dev);
5908 pm_runtime_put_noidle(&pdev->dev);
5911 * Release control of h/w to f/w. If f/w is AMT enabled, this
5912 * would have already happened in close and is redundant.
5914 e1000_release_hw_control(adapter);
5916 e1000e_reset_interrupt_capability(adapter);
5917 kfree(adapter->tx_ring);
5918 kfree(adapter->rx_ring);
5920 iounmap(adapter->hw.hw_addr);
5921 if (adapter->hw.flash_address)
5922 iounmap(adapter->hw.flash_address);
5923 pci_release_selected_regions(pdev,
5924 pci_select_bars(pdev, IORESOURCE_MEM));
5926 free_netdev(netdev);
5929 pci_disable_pcie_error_reporting(pdev);
5931 pci_disable_device(pdev);
5934 /* PCI Error Recovery (ERS) */
5935 static struct pci_error_handlers e1000_err_handler = {
5936 .error_detected = e1000_io_error_detected,
5937 .slot_reset = e1000_io_slot_reset,
5938 .resume = e1000_io_resume,
5941 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5942 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5943 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5944 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
5945 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
5946 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
5947 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
5948 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
5949 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
5950 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
5952 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
5953 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
5954 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
5955 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
5957 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
5958 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
5959 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
5961 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
5962 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
5963 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
5965 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
5966 board_80003es2lan },
5967 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
5968 board_80003es2lan },
5969 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
5970 board_80003es2lan },
5971 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
5972 board_80003es2lan },
5974 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
5975 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
5976 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
5977 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
5978 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
5979 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
5980 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
5981 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
5983 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
5984 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
5985 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
5986 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
5987 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
5988 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
5989 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
5990 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
5991 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
5993 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
5994 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
5995 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
5997 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5998 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5999 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6001 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6002 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6003 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6004 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6006 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6007 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6009 { } /* terminate list */
6011 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6013 #ifdef CONFIG_PM_OPS
6014 static const struct dev_pm_ops e1000_pm_ops = {
6015 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6016 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6017 e1000_runtime_resume, e1000_idle)
6021 /* PCI Device API Driver */
6022 static struct pci_driver e1000_driver = {
6023 .name = e1000e_driver_name,
6024 .id_table = e1000_pci_tbl,
6025 .probe = e1000_probe,
6026 .remove = __devexit_p(e1000_remove),
6027 #ifdef CONFIG_PM_OPS
6028 .driver.pm = &e1000_pm_ops,
6030 .shutdown = e1000_shutdown,
6031 .err_handler = &e1000_err_handler
6035 * e1000_init_module - Driver Registration Routine
6037 * e1000_init_module is the first routine called when the driver is
6038 * loaded. All it does is register with the PCI subsystem.
6040 static int __init e1000_init_module(void)
6043 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6044 e1000e_driver_version);
6045 pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
6046 ret = pci_register_driver(&e1000_driver);
6050 module_init(e1000_init_module);
6053 * e1000_exit_module - Driver Exit Cleanup Routine
6055 * e1000_exit_module is called just before the driver is removed
6058 static void __exit e1000_exit_module(void)
6060 pci_unregister_driver(&e1000_driver);
6062 module_exit(e1000_exit_module);
6066 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6067 MODULE_LICENSE("GPL");
6068 MODULE_VERSION(DRV_VERSION);