2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy.h>
43 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
44 #define DRIVER_NAME "lan78xx"
45 #define DRIVER_VERSION "1.0.6"
47 #define TX_TIMEOUT_JIFFIES (5 * HZ)
48 #define THROTTLE_JIFFIES (HZ / 8)
49 #define UNLINK_TIMEOUT_MS 3
51 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
53 #define SS_USB_PKT_SIZE (1024)
54 #define HS_USB_PKT_SIZE (512)
55 #define FS_USB_PKT_SIZE (64)
57 #define MAX_RX_FIFO_SIZE (12 * 1024)
58 #define MAX_TX_FIFO_SIZE (12 * 1024)
59 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
60 #define DEFAULT_BULK_IN_DELAY (0x0800)
61 #define MAX_SINGLE_PACKET_SIZE (9000)
62 #define DEFAULT_TX_CSUM_ENABLE (true)
63 #define DEFAULT_RX_CSUM_ENABLE (true)
64 #define DEFAULT_TSO_CSUM_ENABLE (true)
65 #define DEFAULT_VLAN_FILTER_ENABLE (true)
66 #define TX_OVERHEAD (8)
69 #define LAN78XX_USB_VENDOR_ID (0x0424)
70 #define LAN7800_USB_PRODUCT_ID (0x7800)
71 #define LAN7850_USB_PRODUCT_ID (0x7850)
72 #define LAN7801_USB_PRODUCT_ID (0x7801)
73 #define LAN78XX_EEPROM_MAGIC (0x78A5)
74 #define LAN78XX_OTP_MAGIC (0x78F3)
79 #define EEPROM_INDICATOR (0xA5)
80 #define EEPROM_MAC_OFFSET (0x01)
81 #define MAX_EEPROM_SIZE 512
82 #define OTP_INDICATOR_1 (0xF3)
83 #define OTP_INDICATOR_2 (0xF7)
85 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
86 WAKE_MCAST | WAKE_BCAST | \
87 WAKE_ARP | WAKE_MAGIC)
89 /* USB related defines */
90 #define BULK_IN_PIPE 1
91 #define BULK_OUT_PIPE 2
93 /* default autosuspend delay (mSec)*/
94 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
96 /* statistic update interval (mSec) */
97 #define STAT_UPDATE_TIMER (1 * 1000)
99 /* defines interrupts from interrupt EP */
100 #define MAX_INT_EP (32)
101 #define INT_EP_INTEP (31)
102 #define INT_EP_OTP_WR_DONE (28)
103 #define INT_EP_EEE_TX_LPI_START (26)
104 #define INT_EP_EEE_TX_LPI_STOP (25)
105 #define INT_EP_EEE_RX_LPI (24)
106 #define INT_EP_MAC_RESET_TIMEOUT (23)
107 #define INT_EP_RDFO (22)
108 #define INT_EP_TXE (21)
109 #define INT_EP_USB_STATUS (20)
110 #define INT_EP_TX_DIS (19)
111 #define INT_EP_RX_DIS (18)
112 #define INT_EP_PHY (17)
113 #define INT_EP_DP (16)
114 #define INT_EP_MAC_ERR (15)
115 #define INT_EP_TDFU (14)
116 #define INT_EP_TDFO (13)
117 #define INT_EP_UTX (12)
118 #define INT_EP_GPIO_11 (11)
119 #define INT_EP_GPIO_10 (10)
120 #define INT_EP_GPIO_9 (9)
121 #define INT_EP_GPIO_8 (8)
122 #define INT_EP_GPIO_7 (7)
123 #define INT_EP_GPIO_6 (6)
124 #define INT_EP_GPIO_5 (5)
125 #define INT_EP_GPIO_4 (4)
126 #define INT_EP_GPIO_3 (3)
127 #define INT_EP_GPIO_2 (2)
128 #define INT_EP_GPIO_1 (1)
129 #define INT_EP_GPIO_0 (0)
131 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
133 "RX Alignment Errors",
134 "Rx Fragment Errors",
136 "RX Undersize Frame Errors",
137 "RX Oversize Frame Errors",
139 "RX Unicast Byte Count",
140 "RX Broadcast Byte Count",
141 "RX Multicast Byte Count",
143 "RX Broadcast Frames",
144 "RX Multicast Frames",
147 "RX 65 - 127 Byte Frames",
148 "RX 128 - 255 Byte Frames",
149 "RX 256 - 511 Bytes Frames",
150 "RX 512 - 1023 Byte Frames",
151 "RX 1024 - 1518 Byte Frames",
152 "RX Greater 1518 Byte Frames",
153 "EEE RX LPI Transitions",
156 "TX Excess Deferral Errors",
159 "TX Single Collisions",
160 "TX Multiple Collisions",
161 "TX Excessive Collision",
162 "TX Late Collisions",
163 "TX Unicast Byte Count",
164 "TX Broadcast Byte Count",
165 "TX Multicast Byte Count",
167 "TX Broadcast Frames",
168 "TX Multicast Frames",
171 "TX 65 - 127 Byte Frames",
172 "TX 128 - 255 Byte Frames",
173 "TX 256 - 511 Bytes Frames",
174 "TX 512 - 1023 Byte Frames",
175 "TX 1024 - 1518 Byte Frames",
176 "TX Greater 1518 Byte Frames",
177 "EEE TX LPI Transitions",
181 struct lan78xx_statstage {
183 u32 rx_alignment_errors;
184 u32 rx_fragment_errors;
185 u32 rx_jabber_errors;
186 u32 rx_undersize_frame_errors;
187 u32 rx_oversize_frame_errors;
188 u32 rx_dropped_frames;
189 u32 rx_unicast_byte_count;
190 u32 rx_broadcast_byte_count;
191 u32 rx_multicast_byte_count;
192 u32 rx_unicast_frames;
193 u32 rx_broadcast_frames;
194 u32 rx_multicast_frames;
196 u32 rx_64_byte_frames;
197 u32 rx_65_127_byte_frames;
198 u32 rx_128_255_byte_frames;
199 u32 rx_256_511_bytes_frames;
200 u32 rx_512_1023_byte_frames;
201 u32 rx_1024_1518_byte_frames;
202 u32 rx_greater_1518_byte_frames;
203 u32 eee_rx_lpi_transitions;
206 u32 tx_excess_deferral_errors;
207 u32 tx_carrier_errors;
208 u32 tx_bad_byte_count;
209 u32 tx_single_collisions;
210 u32 tx_multiple_collisions;
211 u32 tx_excessive_collision;
212 u32 tx_late_collisions;
213 u32 tx_unicast_byte_count;
214 u32 tx_broadcast_byte_count;
215 u32 tx_multicast_byte_count;
216 u32 tx_unicast_frames;
217 u32 tx_broadcast_frames;
218 u32 tx_multicast_frames;
220 u32 tx_64_byte_frames;
221 u32 tx_65_127_byte_frames;
222 u32 tx_128_255_byte_frames;
223 u32 tx_256_511_bytes_frames;
224 u32 tx_512_1023_byte_frames;
225 u32 tx_1024_1518_byte_frames;
226 u32 tx_greater_1518_byte_frames;
227 u32 eee_tx_lpi_transitions;
231 struct lan78xx_statstage64 {
233 u64 rx_alignment_errors;
234 u64 rx_fragment_errors;
235 u64 rx_jabber_errors;
236 u64 rx_undersize_frame_errors;
237 u64 rx_oversize_frame_errors;
238 u64 rx_dropped_frames;
239 u64 rx_unicast_byte_count;
240 u64 rx_broadcast_byte_count;
241 u64 rx_multicast_byte_count;
242 u64 rx_unicast_frames;
243 u64 rx_broadcast_frames;
244 u64 rx_multicast_frames;
246 u64 rx_64_byte_frames;
247 u64 rx_65_127_byte_frames;
248 u64 rx_128_255_byte_frames;
249 u64 rx_256_511_bytes_frames;
250 u64 rx_512_1023_byte_frames;
251 u64 rx_1024_1518_byte_frames;
252 u64 rx_greater_1518_byte_frames;
253 u64 eee_rx_lpi_transitions;
256 u64 tx_excess_deferral_errors;
257 u64 tx_carrier_errors;
258 u64 tx_bad_byte_count;
259 u64 tx_single_collisions;
260 u64 tx_multiple_collisions;
261 u64 tx_excessive_collision;
262 u64 tx_late_collisions;
263 u64 tx_unicast_byte_count;
264 u64 tx_broadcast_byte_count;
265 u64 tx_multicast_byte_count;
266 u64 tx_unicast_frames;
267 u64 tx_broadcast_frames;
268 u64 tx_multicast_frames;
270 u64 tx_64_byte_frames;
271 u64 tx_65_127_byte_frames;
272 u64 tx_128_255_byte_frames;
273 u64 tx_256_511_bytes_frames;
274 u64 tx_512_1023_byte_frames;
275 u64 tx_1024_1518_byte_frames;
276 u64 tx_greater_1518_byte_frames;
277 u64 eee_tx_lpi_transitions;
283 struct lan78xx_priv {
284 struct lan78xx_net *dev;
286 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
287 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
288 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
289 struct mutex dataport_mutex; /* for dataport access */
290 spinlock_t rfe_ctl_lock; /* for rfe register access */
291 struct work_struct set_multicast;
292 struct work_struct set_vlan;
306 struct skb_data { /* skb->cb is one of these */
308 struct lan78xx_net *dev;
309 enum skb_state state;
315 struct usb_ctrlrequest req;
316 struct lan78xx_net *dev;
319 #define EVENT_TX_HALT 0
320 #define EVENT_RX_HALT 1
321 #define EVENT_RX_MEMORY 2
322 #define EVENT_STS_SPLIT 3
323 #define EVENT_LINK_RESET 4
324 #define EVENT_RX_PAUSED 5
325 #define EVENT_DEV_WAKING 6
326 #define EVENT_DEV_ASLEEP 7
327 #define EVENT_DEV_OPEN 8
328 #define EVENT_STAT_UPDATE 9
331 struct mutex access_lock; /* for stats access */
332 struct lan78xx_statstage saved;
333 struct lan78xx_statstage rollover_count;
334 struct lan78xx_statstage rollover_max;
335 struct lan78xx_statstage64 curr_stat;
338 struct irq_domain_data {
339 struct irq_domain *irqdomain;
341 struct irq_chip *irqchip;
342 irq_flow_handler_t irq_handler;
344 struct mutex irq_lock; /* for irq bus access */
348 struct net_device *net;
349 struct usb_device *udev;
350 struct usb_interface *intf;
355 struct sk_buff_head rxq;
356 struct sk_buff_head txq;
357 struct sk_buff_head done;
358 struct sk_buff_head rxq_pause;
359 struct sk_buff_head txq_pend;
361 struct tasklet_struct bh;
362 struct delayed_work wq;
364 struct usb_host_endpoint *ep_blkin;
365 struct usb_host_endpoint *ep_blkout;
366 struct usb_host_endpoint *ep_intr;
370 struct urb *urb_intr;
371 struct usb_anchor deferred;
373 struct mutex phy_mutex; /* for phy access */
374 unsigned pipe_in, pipe_out, pipe_intr;
376 u32 hard_mtu; /* count any extra framing */
377 size_t rx_urb_size; /* size for rx urbs */
381 wait_queue_head_t *wait;
382 unsigned char suspend_count;
385 struct timer_list delay;
386 struct timer_list stat_monitor;
388 unsigned long data[5];
395 struct mii_bus *mdiobus;
396 phy_interface_t interface;
399 u8 fc_request_control;
402 struct statstage stats;
404 struct irq_domain_data domain_data;
407 /* define external phy id */
408 #define PHY_LAN8835 (0x0007C130)
409 #define PHY_KSZ9031RNX (0x00221620)
411 /* use ethtool to change the level for any given device */
412 static int msg_level = -1;
413 module_param(msg_level, int, 0);
414 MODULE_PARM_DESC(msg_level, "Override default message level");
416 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
418 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
424 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
425 USB_VENDOR_REQUEST_READ_REGISTER,
426 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
427 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
428 if (likely(ret >= 0)) {
432 netdev_warn(dev->net,
433 "Failed to read register index 0x%08x. ret = %d",
442 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
444 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
453 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
454 USB_VENDOR_REQUEST_WRITE_REGISTER,
455 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
456 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
457 if (unlikely(ret < 0)) {
458 netdev_warn(dev->net,
459 "Failed to write register index 0x%08x. ret = %d",
468 static int lan78xx_read_stats(struct lan78xx_net *dev,
469 struct lan78xx_statstage *data)
473 struct lan78xx_statstage *stats;
477 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
481 ret = usb_control_msg(dev->udev,
482 usb_rcvctrlpipe(dev->udev, 0),
483 USB_VENDOR_REQUEST_GET_STATS,
484 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
489 USB_CTRL_SET_TIMEOUT);
490 if (likely(ret >= 0)) {
493 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
494 le32_to_cpus(&src[i]);
498 netdev_warn(dev->net,
499 "Failed to read stat ret = 0x%x", ret);
507 #define check_counter_rollover(struct1, dev_stats, member) { \
508 if (struct1->member < dev_stats.saved.member) \
509 dev_stats.rollover_count.member++; \
512 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
513 struct lan78xx_statstage *stats)
515 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
516 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
517 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
518 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
519 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
520 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
521 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
522 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
523 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
524 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
525 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
526 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
527 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
528 check_counter_rollover(stats, dev->stats, rx_pause_frames);
529 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
530 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
531 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
532 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
533 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
534 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
535 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
536 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
537 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
538 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
539 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
540 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
541 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
542 check_counter_rollover(stats, dev->stats, tx_single_collisions);
543 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
544 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
545 check_counter_rollover(stats, dev->stats, tx_late_collisions);
546 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
547 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
548 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
549 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
550 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
551 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
552 check_counter_rollover(stats, dev->stats, tx_pause_frames);
553 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
554 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
555 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
556 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
557 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
558 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
559 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
560 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
561 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
563 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
566 static void lan78xx_update_stats(struct lan78xx_net *dev)
568 u32 *p, *count, *max;
571 struct lan78xx_statstage lan78xx_stats;
573 if (usb_autopm_get_interface(dev->intf) < 0)
576 p = (u32 *)&lan78xx_stats;
577 count = (u32 *)&dev->stats.rollover_count;
578 max = (u32 *)&dev->stats.rollover_max;
579 data = (u64 *)&dev->stats.curr_stat;
581 mutex_lock(&dev->stats.access_lock);
583 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
584 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
586 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
587 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
589 mutex_unlock(&dev->stats.access_lock);
591 usb_autopm_put_interface(dev->intf);
594 /* Loop until the read is completed with timeout called with phy_mutex held */
595 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
597 unsigned long start_time = jiffies;
602 ret = lan78xx_read_reg(dev, MII_ACC, &val);
603 if (unlikely(ret < 0))
606 if (!(val & MII_ACC_MII_BUSY_))
608 } while (!time_after(jiffies, start_time + HZ));
613 static inline u32 mii_access(int id, int index, int read)
617 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
618 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
620 ret |= MII_ACC_MII_READ_;
622 ret |= MII_ACC_MII_WRITE_;
623 ret |= MII_ACC_MII_BUSY_;
628 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
630 unsigned long start_time = jiffies;
635 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
636 if (unlikely(ret < 0))
639 if (!(val & E2P_CMD_EPC_BUSY_) ||
640 (val & E2P_CMD_EPC_TIMEOUT_))
642 usleep_range(40, 100);
643 } while (!time_after(jiffies, start_time + HZ));
645 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
646 netdev_warn(dev->net, "EEPROM read operation timeout");
653 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
655 unsigned long start_time = jiffies;
660 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661 if (unlikely(ret < 0))
664 if (!(val & E2P_CMD_EPC_BUSY_))
667 usleep_range(40, 100);
668 } while (!time_after(jiffies, start_time + HZ));
670 netdev_warn(dev->net, "EEPROM is busy");
674 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
675 u32 length, u8 *data)
682 /* depends on chip, some EEPROM pins are muxed with LED function.
683 * disable & restore LED function to access EEPROM.
685 ret = lan78xx_read_reg(dev, HW_CFG, &val);
687 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
688 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
689 ret = lan78xx_write_reg(dev, HW_CFG, val);
692 retval = lan78xx_eeprom_confirm_not_busy(dev);
696 for (i = 0; i < length; i++) {
697 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
698 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
699 ret = lan78xx_write_reg(dev, E2P_CMD, val);
700 if (unlikely(ret < 0)) {
705 retval = lan78xx_wait_eeprom(dev);
709 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
710 if (unlikely(ret < 0)) {
715 data[i] = val & 0xFF;
721 if (dev->chipid == ID_REV_CHIP_ID_7800_)
722 ret = lan78xx_write_reg(dev, HW_CFG, saved);
727 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
728 u32 length, u8 *data)
733 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
734 if ((ret == 0) && (sig == EEPROM_INDICATOR))
735 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
742 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
743 u32 length, u8 *data)
750 /* depends on chip, some EEPROM pins are muxed with LED function.
751 * disable & restore LED function to access EEPROM.
753 ret = lan78xx_read_reg(dev, HW_CFG, &val);
755 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
756 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
757 ret = lan78xx_write_reg(dev, HW_CFG, val);
760 retval = lan78xx_eeprom_confirm_not_busy(dev);
764 /* Issue write/erase enable command */
765 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
766 ret = lan78xx_write_reg(dev, E2P_CMD, val);
767 if (unlikely(ret < 0)) {
772 retval = lan78xx_wait_eeprom(dev);
776 for (i = 0; i < length; i++) {
777 /* Fill data register */
779 ret = lan78xx_write_reg(dev, E2P_DATA, val);
785 /* Send "write" command */
786 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
787 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
788 ret = lan78xx_write_reg(dev, E2P_CMD, val);
794 retval = lan78xx_wait_eeprom(dev);
803 if (dev->chipid == ID_REV_CHIP_ID_7800_)
804 ret = lan78xx_write_reg(dev, HW_CFG, saved);
809 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
810 u32 length, u8 *data)
815 unsigned long timeout;
817 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
819 if (buf & OTP_PWR_DN_PWRDN_N_) {
820 /* clear it and wait to be cleared */
821 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
823 timeout = jiffies + HZ;
826 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
827 if (time_after(jiffies, timeout)) {
828 netdev_warn(dev->net,
829 "timeout on OTP_PWR_DN");
832 } while (buf & OTP_PWR_DN_PWRDN_N_);
835 for (i = 0; i < length; i++) {
836 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837 ((offset + i) >> 8) & OTP_ADDR1_15_11);
838 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839 ((offset + i) & OTP_ADDR2_10_3));
841 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
842 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
844 timeout = jiffies + HZ;
847 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848 if (time_after(jiffies, timeout)) {
849 netdev_warn(dev->net,
850 "timeout on OTP_STATUS");
853 } while (buf & OTP_STATUS_BUSY_);
855 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
857 data[i] = (u8)(buf & 0xFF);
863 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
864 u32 length, u8 *data)
869 unsigned long timeout;
871 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
873 if (buf & OTP_PWR_DN_PWRDN_N_) {
874 /* clear it and wait to be cleared */
875 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
877 timeout = jiffies + HZ;
880 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
881 if (time_after(jiffies, timeout)) {
882 netdev_warn(dev->net,
883 "timeout on OTP_PWR_DN completion");
886 } while (buf & OTP_PWR_DN_PWRDN_N_);
889 /* set to BYTE program mode */
890 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
892 for (i = 0; i < length; i++) {
893 ret = lan78xx_write_reg(dev, OTP_ADDR1,
894 ((offset + i) >> 8) & OTP_ADDR1_15_11);
895 ret = lan78xx_write_reg(dev, OTP_ADDR2,
896 ((offset + i) & OTP_ADDR2_10_3));
897 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
898 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
899 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
901 timeout = jiffies + HZ;
904 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
905 if (time_after(jiffies, timeout)) {
906 netdev_warn(dev->net,
907 "Timeout on OTP_STATUS completion");
910 } while (buf & OTP_STATUS_BUSY_);
916 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
917 u32 length, u8 *data)
922 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
925 if (sig == OTP_INDICATOR_1)
927 else if (sig == OTP_INDICATOR_2)
931 ret = lan78xx_read_raw_otp(dev, offset, length, data);
937 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
941 for (i = 0; i < 100; i++) {
944 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
945 if (unlikely(ret < 0))
948 if (dp_sel & DP_SEL_DPRDY_)
951 usleep_range(40, 100);
954 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
959 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
960 u32 addr, u32 length, u32 *buf)
962 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
966 if (usb_autopm_get_interface(dev->intf) < 0)
969 mutex_lock(&pdata->dataport_mutex);
971 ret = lan78xx_dataport_wait_not_busy(dev);
975 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
977 dp_sel &= ~DP_SEL_RSEL_MASK_;
978 dp_sel |= ram_select;
979 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
981 for (i = 0; i < length; i++) {
982 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
984 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
986 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
988 ret = lan78xx_dataport_wait_not_busy(dev);
994 mutex_unlock(&pdata->dataport_mutex);
995 usb_autopm_put_interface(dev->intf);
1000 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1001 int index, u8 addr[ETH_ALEN])
1005 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1007 temp = addr[2] | (temp << 8);
1008 temp = addr[1] | (temp << 8);
1009 temp = addr[0] | (temp << 8);
1010 pdata->pfilter_table[index][1] = temp;
1012 temp = addr[4] | (temp << 8);
1013 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1014 pdata->pfilter_table[index][0] = temp;
1018 /* returns hash bit number for given MAC address */
1019 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1021 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1024 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1026 struct lan78xx_priv *pdata =
1027 container_of(param, struct lan78xx_priv, set_multicast);
1028 struct lan78xx_net *dev = pdata->dev;
1032 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1035 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1036 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1038 for (i = 1; i < NUM_OF_MAF; i++) {
1039 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1040 ret = lan78xx_write_reg(dev, MAF_LO(i),
1041 pdata->pfilter_table[i][1]);
1042 ret = lan78xx_write_reg(dev, MAF_HI(i),
1043 pdata->pfilter_table[i][0]);
1046 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1049 static void lan78xx_set_multicast(struct net_device *netdev)
1051 struct lan78xx_net *dev = netdev_priv(netdev);
1052 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1053 unsigned long flags;
1056 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1058 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1059 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1061 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1062 pdata->mchash_table[i] = 0;
1063 /* pfilter_table[0] has own HW address */
1064 for (i = 1; i < NUM_OF_MAF; i++) {
1065 pdata->pfilter_table[i][0] =
1066 pdata->pfilter_table[i][1] = 0;
1069 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1071 if (dev->net->flags & IFF_PROMISC) {
1072 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1073 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1075 if (dev->net->flags & IFF_ALLMULTI) {
1076 netif_dbg(dev, drv, dev->net,
1077 "receive all multicast enabled");
1078 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1082 if (netdev_mc_count(dev->net)) {
1083 struct netdev_hw_addr *ha;
1086 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1088 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1091 netdev_for_each_mc_addr(ha, netdev) {
1092 /* set first 32 into Perfect Filter */
1094 lan78xx_set_addr_filter(pdata, i, ha->addr);
1096 u32 bitnum = lan78xx_hash(ha->addr);
1098 pdata->mchash_table[bitnum / 32] |=
1099 (1 << (bitnum % 32));
1100 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1106 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1108 /* defer register writes to a sleepable context */
1109 schedule_work(&pdata->set_multicast);
1112 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1113 u16 lcladv, u16 rmtadv)
1115 u32 flow = 0, fct_flow = 0;
1119 if (dev->fc_autoneg)
1120 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1122 cap = dev->fc_request_control;
1124 if (cap & FLOW_CTRL_TX)
1125 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1127 if (cap & FLOW_CTRL_RX)
1128 flow |= FLOW_CR_RX_FCEN_;
1130 if (dev->udev->speed == USB_SPEED_SUPER)
1132 else if (dev->udev->speed == USB_SPEED_HIGH)
1135 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1136 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1137 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1139 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1141 /* threshold value should be set before enabling flow */
1142 ret = lan78xx_write_reg(dev, FLOW, flow);
1147 static int lan78xx_link_reset(struct lan78xx_net *dev)
1149 struct phy_device *phydev = dev->net->phydev;
1150 struct ethtool_link_ksettings ecmd;
1151 int ladv, radv, ret;
1154 /* clear LAN78xx interrupt status */
1155 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1156 if (unlikely(ret < 0))
1159 phy_read_status(phydev);
1161 if (!phydev->link && dev->link_on) {
1162 dev->link_on = false;
1165 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1166 if (unlikely(ret < 0))
1169 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1170 if (unlikely(ret < 0))
1173 del_timer(&dev->stat_monitor);
1174 } else if (phydev->link && !dev->link_on) {
1175 dev->link_on = true;
1177 phy_ethtool_ksettings_get(phydev, &ecmd);
1179 if (dev->udev->speed == USB_SPEED_SUPER) {
1180 if (ecmd.base.speed == 1000) {
1182 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1183 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1184 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1186 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1187 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1188 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1190 /* enable U1 & U2 */
1191 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1192 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1193 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1194 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1198 ladv = phy_read(phydev, MII_ADVERTISE);
1202 radv = phy_read(phydev, MII_LPA);
1206 netif_dbg(dev, link, dev->net,
1207 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1208 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1210 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1213 if (!timer_pending(&dev->stat_monitor)) {
1215 mod_timer(&dev->stat_monitor,
1216 jiffies + STAT_UPDATE_TIMER);
1223 /* some work can't be done in tasklets, so we use keventd
1225 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1226 * but tasklet_schedule() doesn't. hope the failure is rare.
1228 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1230 set_bit(work, &dev->flags);
1231 if (!schedule_delayed_work(&dev->wq, 0))
1232 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1235 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1239 if (urb->actual_length != 4) {
1240 netdev_warn(dev->net,
1241 "unexpected urb length %d", urb->actual_length);
1245 memcpy(&intdata, urb->transfer_buffer, 4);
1246 le32_to_cpus(&intdata);
1248 if (intdata & INT_ENP_PHY_INT) {
1249 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1250 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1252 if (dev->domain_data.phyirq > 0)
1253 generic_handle_irq(dev->domain_data.phyirq);
1255 netdev_warn(dev->net,
1256 "unexpected interrupt: 0x%08x\n", intdata);
1259 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1261 return MAX_EEPROM_SIZE;
1264 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1265 struct ethtool_eeprom *ee, u8 *data)
1267 struct lan78xx_net *dev = netdev_priv(netdev);
1269 ee->magic = LAN78XX_EEPROM_MAGIC;
1271 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1274 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1275 struct ethtool_eeprom *ee, u8 *data)
1277 struct lan78xx_net *dev = netdev_priv(netdev);
1279 /* Allow entire eeprom update only */
1280 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1281 (ee->offset == 0) &&
1283 (data[0] == EEPROM_INDICATOR))
1284 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1285 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1286 (ee->offset == 0) &&
1288 (data[0] == OTP_INDICATOR_1))
1289 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1294 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1297 if (stringset == ETH_SS_STATS)
1298 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1301 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1303 if (sset == ETH_SS_STATS)
1304 return ARRAY_SIZE(lan78xx_gstrings);
1309 static void lan78xx_get_stats(struct net_device *netdev,
1310 struct ethtool_stats *stats, u64 *data)
1312 struct lan78xx_net *dev = netdev_priv(netdev);
1314 lan78xx_update_stats(dev);
1316 mutex_lock(&dev->stats.access_lock);
1317 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1318 mutex_unlock(&dev->stats.access_lock);
1321 static void lan78xx_get_wol(struct net_device *netdev,
1322 struct ethtool_wolinfo *wol)
1324 struct lan78xx_net *dev = netdev_priv(netdev);
1327 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1329 if (usb_autopm_get_interface(dev->intf) < 0)
1332 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1333 if (unlikely(ret < 0)) {
1337 if (buf & USB_CFG_RMT_WKP_) {
1338 wol->supported = WAKE_ALL;
1339 wol->wolopts = pdata->wol;
1346 usb_autopm_put_interface(dev->intf);
1349 static int lan78xx_set_wol(struct net_device *netdev,
1350 struct ethtool_wolinfo *wol)
1352 struct lan78xx_net *dev = netdev_priv(netdev);
1353 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1356 ret = usb_autopm_get_interface(dev->intf);
1361 if (wol->wolopts & WAKE_UCAST)
1362 pdata->wol |= WAKE_UCAST;
1363 if (wol->wolopts & WAKE_MCAST)
1364 pdata->wol |= WAKE_MCAST;
1365 if (wol->wolopts & WAKE_BCAST)
1366 pdata->wol |= WAKE_BCAST;
1367 if (wol->wolopts & WAKE_MAGIC)
1368 pdata->wol |= WAKE_MAGIC;
1369 if (wol->wolopts & WAKE_PHY)
1370 pdata->wol |= WAKE_PHY;
1371 if (wol->wolopts & WAKE_ARP)
1372 pdata->wol |= WAKE_ARP;
1374 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1376 phy_ethtool_set_wol(netdev->phydev, wol);
1378 usb_autopm_put_interface(dev->intf);
1383 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1385 struct lan78xx_net *dev = netdev_priv(net);
1386 struct phy_device *phydev = net->phydev;
1390 ret = usb_autopm_get_interface(dev->intf);
1394 ret = phy_ethtool_get_eee(phydev, edata);
1398 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1399 if (buf & MAC_CR_EEE_EN_) {
1400 edata->eee_enabled = true;
1401 edata->eee_active = !!(edata->advertised &
1402 edata->lp_advertised);
1403 edata->tx_lpi_enabled = true;
1404 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1405 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1406 edata->tx_lpi_timer = buf;
1408 edata->eee_enabled = false;
1409 edata->eee_active = false;
1410 edata->tx_lpi_enabled = false;
1411 edata->tx_lpi_timer = 0;
1416 usb_autopm_put_interface(dev->intf);
1421 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1423 struct lan78xx_net *dev = netdev_priv(net);
1427 ret = usb_autopm_get_interface(dev->intf);
1431 if (edata->eee_enabled) {
1432 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1433 buf |= MAC_CR_EEE_EN_;
1434 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1436 phy_ethtool_set_eee(net->phydev, edata);
1438 buf = (u32)edata->tx_lpi_timer;
1439 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1441 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1442 buf &= ~MAC_CR_EEE_EN_;
1443 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1446 usb_autopm_put_interface(dev->intf);
1451 static u32 lan78xx_get_link(struct net_device *net)
1453 phy_read_status(net->phydev);
1455 return net->phydev->link;
1458 static void lan78xx_get_drvinfo(struct net_device *net,
1459 struct ethtool_drvinfo *info)
1461 struct lan78xx_net *dev = netdev_priv(net);
1463 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1464 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1465 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1468 static u32 lan78xx_get_msglevel(struct net_device *net)
1470 struct lan78xx_net *dev = netdev_priv(net);
1472 return dev->msg_enable;
1475 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1477 struct lan78xx_net *dev = netdev_priv(net);
1479 dev->msg_enable = level;
1482 static int lan78xx_get_link_ksettings(struct net_device *net,
1483 struct ethtool_link_ksettings *cmd)
1485 struct lan78xx_net *dev = netdev_priv(net);
1486 struct phy_device *phydev = net->phydev;
1489 ret = usb_autopm_get_interface(dev->intf);
1493 ret = phy_ethtool_ksettings_get(phydev, cmd);
1495 usb_autopm_put_interface(dev->intf);
1500 static int lan78xx_set_link_ksettings(struct net_device *net,
1501 const struct ethtool_link_ksettings *cmd)
1503 struct lan78xx_net *dev = netdev_priv(net);
1504 struct phy_device *phydev = net->phydev;
1508 ret = usb_autopm_get_interface(dev->intf);
1512 /* change speed & duplex */
1513 ret = phy_ethtool_ksettings_set(phydev, cmd);
1515 if (!cmd->base.autoneg) {
1516 /* force link down */
1517 temp = phy_read(phydev, MII_BMCR);
1518 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1520 phy_write(phydev, MII_BMCR, temp);
1523 usb_autopm_put_interface(dev->intf);
1528 static void lan78xx_get_pause(struct net_device *net,
1529 struct ethtool_pauseparam *pause)
1531 struct lan78xx_net *dev = netdev_priv(net);
1532 struct phy_device *phydev = net->phydev;
1533 struct ethtool_link_ksettings ecmd;
1535 phy_ethtool_ksettings_get(phydev, &ecmd);
1537 pause->autoneg = dev->fc_autoneg;
1539 if (dev->fc_request_control & FLOW_CTRL_TX)
1540 pause->tx_pause = 1;
1542 if (dev->fc_request_control & FLOW_CTRL_RX)
1543 pause->rx_pause = 1;
1546 static int lan78xx_set_pause(struct net_device *net,
1547 struct ethtool_pauseparam *pause)
1549 struct lan78xx_net *dev = netdev_priv(net);
1550 struct phy_device *phydev = net->phydev;
1551 struct ethtool_link_ksettings ecmd;
1554 phy_ethtool_ksettings_get(phydev, &ecmd);
1556 if (pause->autoneg && !ecmd.base.autoneg) {
1561 dev->fc_request_control = 0;
1562 if (pause->rx_pause)
1563 dev->fc_request_control |= FLOW_CTRL_RX;
1565 if (pause->tx_pause)
1566 dev->fc_request_control |= FLOW_CTRL_TX;
1568 if (ecmd.base.autoneg) {
1572 ethtool_convert_link_mode_to_legacy_u32(
1573 &advertising, ecmd.link_modes.advertising);
1575 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1576 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1577 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1579 ethtool_convert_legacy_u32_to_link_mode(
1580 ecmd.link_modes.advertising, advertising);
1582 phy_ethtool_ksettings_set(phydev, &ecmd);
1585 dev->fc_autoneg = pause->autoneg;
1592 static const struct ethtool_ops lan78xx_ethtool_ops = {
1593 .get_link = lan78xx_get_link,
1594 .nway_reset = phy_ethtool_nway_reset,
1595 .get_drvinfo = lan78xx_get_drvinfo,
1596 .get_msglevel = lan78xx_get_msglevel,
1597 .set_msglevel = lan78xx_set_msglevel,
1598 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1599 .get_eeprom = lan78xx_ethtool_get_eeprom,
1600 .set_eeprom = lan78xx_ethtool_set_eeprom,
1601 .get_ethtool_stats = lan78xx_get_stats,
1602 .get_sset_count = lan78xx_get_sset_count,
1603 .get_strings = lan78xx_get_strings,
1604 .get_wol = lan78xx_get_wol,
1605 .set_wol = lan78xx_set_wol,
1606 .get_eee = lan78xx_get_eee,
1607 .set_eee = lan78xx_set_eee,
1608 .get_pauseparam = lan78xx_get_pause,
1609 .set_pauseparam = lan78xx_set_pause,
1610 .get_link_ksettings = lan78xx_get_link_ksettings,
1611 .set_link_ksettings = lan78xx_set_link_ksettings,
1614 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1616 if (!netif_running(netdev))
1619 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1622 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1624 u32 addr_lo, addr_hi;
1628 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1629 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1631 addr[0] = addr_lo & 0xFF;
1632 addr[1] = (addr_lo >> 8) & 0xFF;
1633 addr[2] = (addr_lo >> 16) & 0xFF;
1634 addr[3] = (addr_lo >> 24) & 0xFF;
1635 addr[4] = addr_hi & 0xFF;
1636 addr[5] = (addr_hi >> 8) & 0xFF;
1638 if (!is_valid_ether_addr(addr)) {
1639 /* reading mac address from EEPROM or OTP */
1640 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1642 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1644 if (is_valid_ether_addr(addr)) {
1645 /* eeprom values are valid so use them */
1646 netif_dbg(dev, ifup, dev->net,
1647 "MAC address read from EEPROM");
1649 /* generate random MAC */
1650 random_ether_addr(addr);
1651 netif_dbg(dev, ifup, dev->net,
1652 "MAC address set to random addr");
1655 addr_lo = addr[0] | (addr[1] << 8) |
1656 (addr[2] << 16) | (addr[3] << 24);
1657 addr_hi = addr[4] | (addr[5] << 8);
1659 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1660 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1662 /* generate random MAC */
1663 random_ether_addr(addr);
1664 netif_dbg(dev, ifup, dev->net,
1665 "MAC address set to random addr");
1669 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1670 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1672 ether_addr_copy(dev->net->dev_addr, addr);
1675 /* MDIO read and write wrappers for phylib */
1676 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1678 struct lan78xx_net *dev = bus->priv;
1682 ret = usb_autopm_get_interface(dev->intf);
1686 mutex_lock(&dev->phy_mutex);
1688 /* confirm MII not busy */
1689 ret = lan78xx_phy_wait_not_busy(dev);
1693 /* set the address, index & direction (read from PHY) */
1694 addr = mii_access(phy_id, idx, MII_READ);
1695 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1697 ret = lan78xx_phy_wait_not_busy(dev);
1701 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1703 ret = (int)(val & 0xFFFF);
1706 mutex_unlock(&dev->phy_mutex);
1707 usb_autopm_put_interface(dev->intf);
1712 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1715 struct lan78xx_net *dev = bus->priv;
1719 ret = usb_autopm_get_interface(dev->intf);
1723 mutex_lock(&dev->phy_mutex);
1725 /* confirm MII not busy */
1726 ret = lan78xx_phy_wait_not_busy(dev);
1731 ret = lan78xx_write_reg(dev, MII_DATA, val);
1733 /* set the address, index & direction (write to PHY) */
1734 addr = mii_access(phy_id, idx, MII_WRITE);
1735 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1737 ret = lan78xx_phy_wait_not_busy(dev);
1742 mutex_unlock(&dev->phy_mutex);
1743 usb_autopm_put_interface(dev->intf);
1747 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1751 dev->mdiobus = mdiobus_alloc();
1752 if (!dev->mdiobus) {
1753 netdev_err(dev->net, "can't allocate MDIO bus\n");
1757 dev->mdiobus->priv = (void *)dev;
1758 dev->mdiobus->read = lan78xx_mdiobus_read;
1759 dev->mdiobus->write = lan78xx_mdiobus_write;
1760 dev->mdiobus->name = "lan78xx-mdiobus";
1762 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1763 dev->udev->bus->busnum, dev->udev->devnum);
1765 switch (dev->chipid) {
1766 case ID_REV_CHIP_ID_7800_:
1767 case ID_REV_CHIP_ID_7850_:
1768 /* set to internal PHY id */
1769 dev->mdiobus->phy_mask = ~(1 << 1);
1771 case ID_REV_CHIP_ID_7801_:
1772 /* scan thru PHYAD[2..0] */
1773 dev->mdiobus->phy_mask = ~(0xFF);
1777 ret = mdiobus_register(dev->mdiobus);
1779 netdev_err(dev->net, "can't register MDIO bus\n");
1783 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1786 mdiobus_free(dev->mdiobus);
1790 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1792 mdiobus_unregister(dev->mdiobus);
1793 mdiobus_free(dev->mdiobus);
1796 static void lan78xx_link_status_change(struct net_device *net)
1798 struct phy_device *phydev = net->phydev;
1801 /* At forced 100 F/H mode, chip may fail to set mode correctly
1802 * when cable is switched between long(~50+m) and short one.
1803 * As workaround, set to 10 before setting to 100
1804 * at forced 100 F/H mode.
1806 if (!phydev->autoneg && (phydev->speed == 100)) {
1807 /* disable phy interrupt */
1808 temp = phy_read(phydev, LAN88XX_INT_MASK);
1809 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1810 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1812 temp = phy_read(phydev, MII_BMCR);
1813 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1814 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1815 temp |= BMCR_SPEED100;
1816 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1818 /* clear pending interrupt generated while workaround */
1819 temp = phy_read(phydev, LAN88XX_INT_STS);
1821 /* enable phy interrupt back */
1822 temp = phy_read(phydev, LAN88XX_INT_MASK);
1823 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1824 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1828 static int irq_map(struct irq_domain *d, unsigned int irq,
1829 irq_hw_number_t hwirq)
1831 struct irq_domain_data *data = d->host_data;
1833 irq_set_chip_data(irq, data);
1834 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1835 irq_set_noprobe(irq);
1840 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1842 irq_set_chip_and_handler(irq, NULL, NULL);
1843 irq_set_chip_data(irq, NULL);
1846 static const struct irq_domain_ops chip_domain_ops = {
1851 static void lan78xx_irq_mask(struct irq_data *irqd)
1853 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1855 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1858 static void lan78xx_irq_unmask(struct irq_data *irqd)
1860 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1862 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1865 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1867 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1869 mutex_lock(&data->irq_lock);
1872 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1874 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1875 struct lan78xx_net *dev =
1876 container_of(data, struct lan78xx_net, domain_data);
1880 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1881 * are only two callbacks executed in non-atomic contex.
1883 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1884 if (buf != data->irqenable)
1885 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1887 mutex_unlock(&data->irq_lock);
1890 static struct irq_chip lan78xx_irqchip = {
1891 .name = "lan78xx-irqs",
1892 .irq_mask = lan78xx_irq_mask,
1893 .irq_unmask = lan78xx_irq_unmask,
1894 .irq_bus_lock = lan78xx_irq_bus_lock,
1895 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1898 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1900 struct device_node *of_node;
1901 struct irq_domain *irqdomain;
1902 unsigned int irqmap = 0;
1906 of_node = dev->udev->dev.parent->of_node;
1908 mutex_init(&dev->domain_data.irq_lock);
1910 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1911 dev->domain_data.irqenable = buf;
1913 dev->domain_data.irqchip = &lan78xx_irqchip;
1914 dev->domain_data.irq_handler = handle_simple_irq;
1916 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1917 &chip_domain_ops, &dev->domain_data);
1919 /* create mapping for PHY interrupt */
1920 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1922 irq_domain_remove(irqdomain);
1931 dev->domain_data.irqdomain = irqdomain;
1932 dev->domain_data.phyirq = irqmap;
1937 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1939 if (dev->domain_data.phyirq > 0) {
1940 irq_dispose_mapping(dev->domain_data.phyirq);
1942 if (dev->domain_data.irqdomain)
1943 irq_domain_remove(dev->domain_data.irqdomain);
1945 dev->domain_data.phyirq = 0;
1946 dev->domain_data.irqdomain = NULL;
1949 static int lan8835_fixup(struct phy_device *phydev)
1953 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1955 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1956 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1959 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1961 /* RGMII MAC TXC Delay Enable */
1962 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1963 MAC_RGMII_ID_TXC_DELAY_EN_);
1965 /* RGMII TX DLL Tune Adjust */
1966 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1968 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1973 static int ksz9031rnx_fixup(struct phy_device *phydev)
1975 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1977 /* Micrel9301RNX PHY configuration */
1978 /* RGMII Control Signal Pad Skew */
1979 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1980 /* RGMII RX Data Pad Skew */
1981 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1982 /* RGMII RX Clock Pad Skew */
1983 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1985 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1990 static int lan78xx_phy_init(struct lan78xx_net *dev)
1994 struct phy_device *phydev = dev->net->phydev;
1996 phydev = phy_find_first(dev->mdiobus);
1998 netdev_err(dev->net, "no PHY found\n");
2002 if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2003 (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2004 phydev->is_internal = true;
2005 dev->interface = PHY_INTERFACE_MODE_GMII;
2007 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2009 netdev_err(dev->net, "no PHY driver found\n");
2013 dev->interface = PHY_INTERFACE_MODE_RGMII;
2015 /* external PHY fixup for KSZ9031RNX */
2016 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2019 netdev_err(dev->net, "fail to register fixup\n");
2022 /* external PHY fixup for LAN8835 */
2023 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2026 netdev_err(dev->net, "fail to register fixup\n");
2029 /* add more external PHY fixup here if needed */
2031 phydev->is_internal = false;
2033 netdev_err(dev->net, "unknown ID found\n");
2038 /* if phyirq is not set, use polling mode in phylib */
2039 if (dev->domain_data.phyirq > 0)
2040 phydev->irq = dev->domain_data.phyirq;
2043 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2045 /* set to AUTOMDIX */
2046 phydev->mdix = ETH_TP_MDI_AUTO;
2048 ret = phy_connect_direct(dev->net, phydev,
2049 lan78xx_link_status_change,
2052 netdev_err(dev->net, "can't attach PHY to %s\n",
2057 /* MAC doesn't support 1000T Half */
2058 phydev->supported &= ~SUPPORTED_1000baseT_Half;
2060 /* support both flow controls */
2061 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2062 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2063 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2064 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2066 genphy_config_aneg(phydev);
2068 dev->fc_autoneg = phydev->autoneg;
2072 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2077 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2078 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2083 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2089 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2091 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2094 buf &= ~MAC_RX_RXEN_;
2095 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2098 /* add 4 to size for FCS */
2099 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2100 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2102 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2105 buf |= MAC_RX_RXEN_;
2106 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2112 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2114 struct sk_buff *skb;
2115 unsigned long flags;
2118 spin_lock_irqsave(&q->lock, flags);
2119 while (!skb_queue_empty(q)) {
2120 struct skb_data *entry;
2124 skb_queue_walk(q, skb) {
2125 entry = (struct skb_data *)skb->cb;
2126 if (entry->state != unlink_start)
2131 entry->state = unlink_start;
2134 /* Get reference count of the URB to avoid it to be
2135 * freed during usb_unlink_urb, which may trigger
2136 * use-after-free problem inside usb_unlink_urb since
2137 * usb_unlink_urb is always racing with .complete
2138 * handler(include defer_bh).
2141 spin_unlock_irqrestore(&q->lock, flags);
2142 /* during some PM-driven resume scenarios,
2143 * these (async) unlinks complete immediately
2145 ret = usb_unlink_urb(urb);
2146 if (ret != -EINPROGRESS && ret != 0)
2147 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2151 spin_lock_irqsave(&q->lock, flags);
2153 spin_unlock_irqrestore(&q->lock, flags);
2157 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2159 struct lan78xx_net *dev = netdev_priv(netdev);
2160 int ll_mtu = new_mtu + netdev->hard_header_len;
2161 int old_hard_mtu = dev->hard_mtu;
2162 int old_rx_urb_size = dev->rx_urb_size;
2165 /* no second zero-length packet read wanted after mtu-sized packets */
2166 if ((ll_mtu % dev->maxpacket) == 0)
2169 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2171 netdev->mtu = new_mtu;
2173 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2174 if (dev->rx_urb_size == old_hard_mtu) {
2175 dev->rx_urb_size = dev->hard_mtu;
2176 if (dev->rx_urb_size > old_rx_urb_size) {
2177 if (netif_running(dev->net)) {
2178 unlink_urbs(dev, &dev->rxq);
2179 tasklet_schedule(&dev->bh);
2187 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2189 struct lan78xx_net *dev = netdev_priv(netdev);
2190 struct sockaddr *addr = p;
2191 u32 addr_lo, addr_hi;
2194 if (netif_running(netdev))
2197 if (!is_valid_ether_addr(addr->sa_data))
2198 return -EADDRNOTAVAIL;
2200 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2202 addr_lo = netdev->dev_addr[0] |
2203 netdev->dev_addr[1] << 8 |
2204 netdev->dev_addr[2] << 16 |
2205 netdev->dev_addr[3] << 24;
2206 addr_hi = netdev->dev_addr[4] |
2207 netdev->dev_addr[5] << 8;
2209 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2210 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2215 /* Enable or disable Rx checksum offload engine */
2216 static int lan78xx_set_features(struct net_device *netdev,
2217 netdev_features_t features)
2219 struct lan78xx_net *dev = netdev_priv(netdev);
2220 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2221 unsigned long flags;
2224 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2226 if (features & NETIF_F_RXCSUM) {
2227 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2228 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2230 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2231 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2234 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2235 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2237 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2239 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2241 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2246 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2248 struct lan78xx_priv *pdata =
2249 container_of(param, struct lan78xx_priv, set_vlan);
2250 struct lan78xx_net *dev = pdata->dev;
2252 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2253 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2256 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2257 __be16 proto, u16 vid)
2259 struct lan78xx_net *dev = netdev_priv(netdev);
2260 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2262 u16 vid_dword_index;
2264 vid_dword_index = (vid >> 5) & 0x7F;
2265 vid_bit_index = vid & 0x1F;
2267 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2269 /* defer register writes to a sleepable context */
2270 schedule_work(&pdata->set_vlan);
2275 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2276 __be16 proto, u16 vid)
2278 struct lan78xx_net *dev = netdev_priv(netdev);
2279 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2281 u16 vid_dword_index;
2283 vid_dword_index = (vid >> 5) & 0x7F;
2284 vid_bit_index = vid & 0x1F;
2286 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2288 /* defer register writes to a sleepable context */
2289 schedule_work(&pdata->set_vlan);
2294 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2298 u32 regs[6] = { 0 };
2300 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2301 if (buf & USB_CFG1_LTM_ENABLE_) {
2303 /* Get values from EEPROM first */
2304 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2305 if (temp[0] == 24) {
2306 ret = lan78xx_read_raw_eeprom(dev,
2313 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2314 if (temp[0] == 24) {
2315 ret = lan78xx_read_raw_otp(dev,
2325 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2326 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2327 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2328 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2329 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2330 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2333 static int lan78xx_reset(struct lan78xx_net *dev)
2335 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2338 unsigned long timeout;
2340 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2341 buf |= HW_CFG_LRST_;
2342 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2344 timeout = jiffies + HZ;
2347 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2348 if (time_after(jiffies, timeout)) {
2349 netdev_warn(dev->net,
2350 "timeout on completion of LiteReset");
2353 } while (buf & HW_CFG_LRST_);
2355 lan78xx_init_mac_address(dev);
2357 /* save DEVID for later usage */
2358 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2359 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2360 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2362 /* Respond to the IN token with a NAK */
2363 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2364 buf |= USB_CFG_BIR_;
2365 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2368 lan78xx_init_ltm(dev);
2370 dev->net->hard_header_len += TX_OVERHEAD;
2371 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2373 if (dev->udev->speed == USB_SPEED_SUPER) {
2374 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2375 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2378 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2379 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2380 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2381 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2382 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2384 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2385 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2389 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2390 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2392 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2394 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2396 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2397 buf |= USB_CFG_BCE_;
2398 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2400 /* set FIFO sizes */
2401 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2402 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2404 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2405 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2407 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2408 ret = lan78xx_write_reg(dev, FLOW, 0);
2409 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2411 /* Don't need rfe_ctl_lock during initialisation */
2412 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2413 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2414 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2416 /* Enable or disable checksum offload engines */
2417 lan78xx_set_features(dev->net, dev->net->features);
2419 lan78xx_set_multicast(dev->net);
2422 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2423 buf |= PMT_CTL_PHY_RST_;
2424 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2426 timeout = jiffies + HZ;
2429 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2430 if (time_after(jiffies, timeout)) {
2431 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2434 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2436 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2437 /* LAN7801 only has RGMII mode */
2438 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2439 buf &= ~MAC_CR_GMII_EN_;
2440 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2441 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2443 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2444 buf |= MAC_TX_TXEN_;
2445 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2447 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2448 buf |= FCT_TX_CTL_EN_;
2449 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2451 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2453 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2454 buf |= MAC_RX_RXEN_;
2455 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2457 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2458 buf |= FCT_RX_CTL_EN_;
2459 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2464 static void lan78xx_init_stats(struct lan78xx_net *dev)
2469 /* initialize for stats update
2470 * some counters are 20bits and some are 32bits
2472 p = (u32 *)&dev->stats.rollover_max;
2473 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2476 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2477 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2478 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2479 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2480 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2481 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2482 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2483 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2484 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2485 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2487 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2490 static int lan78xx_open(struct net_device *net)
2492 struct lan78xx_net *dev = netdev_priv(net);
2495 ret = usb_autopm_get_interface(dev->intf);
2499 ret = lan78xx_reset(dev);
2503 ret = lan78xx_phy_init(dev);
2507 /* for Link Check */
2508 if (dev->urb_intr) {
2509 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2511 netif_err(dev, ifup, dev->net,
2512 "intr submit %d\n", ret);
2517 lan78xx_init_stats(dev);
2519 set_bit(EVENT_DEV_OPEN, &dev->flags);
2521 netif_start_queue(net);
2523 dev->link_on = false;
2525 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2527 usb_autopm_put_interface(dev->intf);
2533 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2535 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2536 DECLARE_WAITQUEUE(wait, current);
2539 /* ensure there are no more active urbs */
2540 add_wait_queue(&unlink_wakeup, &wait);
2541 set_current_state(TASK_UNINTERRUPTIBLE);
2542 dev->wait = &unlink_wakeup;
2543 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2545 /* maybe wait for deletions to finish. */
2546 while (!skb_queue_empty(&dev->rxq) &&
2547 !skb_queue_empty(&dev->txq) &&
2548 !skb_queue_empty(&dev->done)) {
2549 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2550 set_current_state(TASK_UNINTERRUPTIBLE);
2551 netif_dbg(dev, ifdown, dev->net,
2552 "waited for %d urb completions\n", temp);
2554 set_current_state(TASK_RUNNING);
2556 remove_wait_queue(&unlink_wakeup, &wait);
2559 static int lan78xx_stop(struct net_device *net)
2561 struct lan78xx_net *dev = netdev_priv(net);
2563 if (timer_pending(&dev->stat_monitor))
2564 del_timer_sync(&dev->stat_monitor);
2566 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2567 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2569 phy_stop(net->phydev);
2570 phy_disconnect(net->phydev);
2574 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2575 netif_stop_queue(net);
2577 netif_info(dev, ifdown, dev->net,
2578 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2579 net->stats.rx_packets, net->stats.tx_packets,
2580 net->stats.rx_errors, net->stats.tx_errors);
2582 lan78xx_terminate_urbs(dev);
2584 usb_kill_urb(dev->urb_intr);
2586 skb_queue_purge(&dev->rxq_pause);
2588 /* deferred work (task, timer, softirq) must also stop.
2589 * can't flush_scheduled_work() until we drop rtnl (later),
2590 * else workers could deadlock; so make workers a NOP.
2593 cancel_delayed_work_sync(&dev->wq);
2594 tasklet_kill(&dev->bh);
2596 usb_autopm_put_interface(dev->intf);
2601 static int lan78xx_linearize(struct sk_buff *skb)
2603 return skb_linearize(skb);
2606 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2607 struct sk_buff *skb, gfp_t flags)
2609 u32 tx_cmd_a, tx_cmd_b;
2611 if (skb_cow_head(skb, TX_OVERHEAD)) {
2612 dev_kfree_skb_any(skb);
2616 if (lan78xx_linearize(skb) < 0)
2619 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2621 if (skb->ip_summed == CHECKSUM_PARTIAL)
2622 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2625 if (skb_is_gso(skb)) {
2626 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2628 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2630 tx_cmd_a |= TX_CMD_A_LSO_;
2633 if (skb_vlan_tag_present(skb)) {
2634 tx_cmd_a |= TX_CMD_A_IVTG_;
2635 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2639 cpu_to_le32s(&tx_cmd_b);
2640 memcpy(skb->data, &tx_cmd_b, 4);
2643 cpu_to_le32s(&tx_cmd_a);
2644 memcpy(skb->data, &tx_cmd_a, 4);
2649 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2650 struct sk_buff_head *list, enum skb_state state)
2652 unsigned long flags;
2653 enum skb_state old_state;
2654 struct skb_data *entry = (struct skb_data *)skb->cb;
2656 spin_lock_irqsave(&list->lock, flags);
2657 old_state = entry->state;
2658 entry->state = state;
2660 __skb_unlink(skb, list);
2661 spin_unlock(&list->lock);
2662 spin_lock(&dev->done.lock);
2664 __skb_queue_tail(&dev->done, skb);
2665 if (skb_queue_len(&dev->done) == 1)
2666 tasklet_schedule(&dev->bh);
2667 spin_unlock_irqrestore(&dev->done.lock, flags);
2672 static void tx_complete(struct urb *urb)
2674 struct sk_buff *skb = (struct sk_buff *)urb->context;
2675 struct skb_data *entry = (struct skb_data *)skb->cb;
2676 struct lan78xx_net *dev = entry->dev;
2678 if (urb->status == 0) {
2679 dev->net->stats.tx_packets += entry->num_of_packet;
2680 dev->net->stats.tx_bytes += entry->length;
2682 dev->net->stats.tx_errors++;
2684 switch (urb->status) {
2686 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2689 /* software-driven interface shutdown */
2697 netif_stop_queue(dev->net);
2700 netif_dbg(dev, tx_err, dev->net,
2701 "tx err %d\n", entry->urb->status);
2706 usb_autopm_put_interface_async(dev->intf);
2708 defer_bh(dev, skb, &dev->txq, tx_done);
2711 static void lan78xx_queue_skb(struct sk_buff_head *list,
2712 struct sk_buff *newsk, enum skb_state state)
2714 struct skb_data *entry = (struct skb_data *)newsk->cb;
2716 __skb_queue_tail(list, newsk);
2717 entry->state = state;
2721 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2723 struct lan78xx_net *dev = netdev_priv(net);
2724 struct sk_buff *skb2 = NULL;
2727 skb_tx_timestamp(skb);
2728 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2732 skb_queue_tail(&dev->txq_pend, skb2);
2734 /* throttle TX patch at slower than SUPER SPEED USB */
2735 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2736 (skb_queue_len(&dev->txq_pend) > 10))
2737 netif_stop_queue(net);
2739 netif_dbg(dev, tx_err, dev->net,
2740 "lan78xx_tx_prep return NULL\n");
2741 dev->net->stats.tx_errors++;
2742 dev->net->stats.tx_dropped++;
2745 tasklet_schedule(&dev->bh);
2747 return NETDEV_TX_OK;
2751 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2754 struct usb_host_interface *alt = NULL;
2755 struct usb_host_endpoint *in = NULL, *out = NULL;
2756 struct usb_host_endpoint *status = NULL;
2758 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2764 alt = intf->altsetting + tmp;
2766 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2767 struct usb_host_endpoint *e;
2770 e = alt->endpoint + ep;
2771 switch (e->desc.bmAttributes) {
2772 case USB_ENDPOINT_XFER_INT:
2773 if (!usb_endpoint_dir_in(&e->desc))
2777 case USB_ENDPOINT_XFER_BULK:
2782 if (usb_endpoint_dir_in(&e->desc)) {
2785 else if (intr && !status)
2795 if (!alt || !in || !out)
2798 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2799 in->desc.bEndpointAddress &
2800 USB_ENDPOINT_NUMBER_MASK);
2801 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2802 out->desc.bEndpointAddress &
2803 USB_ENDPOINT_NUMBER_MASK);
2804 dev->ep_intr = status;
2809 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2811 struct lan78xx_priv *pdata = NULL;
2815 ret = lan78xx_get_endpoints(dev, intf);
2817 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2819 pdata = (struct lan78xx_priv *)(dev->data[0]);
2821 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2827 spin_lock_init(&pdata->rfe_ctl_lock);
2828 mutex_init(&pdata->dataport_mutex);
2830 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2832 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2833 pdata->vlan_table[i] = 0;
2835 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2837 dev->net->features = 0;
2839 if (DEFAULT_TX_CSUM_ENABLE)
2840 dev->net->features |= NETIF_F_HW_CSUM;
2842 if (DEFAULT_RX_CSUM_ENABLE)
2843 dev->net->features |= NETIF_F_RXCSUM;
2845 if (DEFAULT_TSO_CSUM_ENABLE)
2846 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2848 dev->net->hw_features = dev->net->features;
2850 ret = lan78xx_setup_irq_domain(dev);
2852 netdev_warn(dev->net,
2853 "lan78xx_setup_irq_domain() failed : %d", ret);
2858 /* Init all registers */
2859 ret = lan78xx_reset(dev);
2861 lan78xx_mdio_init(dev);
2863 dev->net->flags |= IFF_MULTICAST;
2865 pdata->wol = WAKE_MAGIC;
2870 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2872 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2874 lan78xx_remove_irq_domain(dev);
2876 lan78xx_remove_mdio(dev);
2879 netif_dbg(dev, ifdown, dev->net, "free pdata");
2886 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2887 struct sk_buff *skb,
2888 u32 rx_cmd_a, u32 rx_cmd_b)
2890 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2891 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2892 skb->ip_summed = CHECKSUM_NONE;
2894 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2895 skb->ip_summed = CHECKSUM_COMPLETE;
2899 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2903 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2904 skb_queue_tail(&dev->rxq_pause, skb);
2908 dev->net->stats.rx_packets++;
2909 dev->net->stats.rx_bytes += skb->len;
2911 skb->protocol = eth_type_trans(skb, dev->net);
2913 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2914 skb->len + sizeof(struct ethhdr), skb->protocol);
2915 memset(skb->cb, 0, sizeof(struct skb_data));
2917 if (skb_defer_rx_timestamp(skb))
2920 status = netif_rx(skb);
2921 if (status != NET_RX_SUCCESS)
2922 netif_dbg(dev, rx_err, dev->net,
2923 "netif_rx status %d\n", status);
2926 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2928 if (skb->len < dev->net->hard_header_len)
2931 while (skb->len > 0) {
2932 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2934 struct sk_buff *skb2;
2935 unsigned char *packet;
2937 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2938 le32_to_cpus(&rx_cmd_a);
2939 skb_pull(skb, sizeof(rx_cmd_a));
2941 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2942 le32_to_cpus(&rx_cmd_b);
2943 skb_pull(skb, sizeof(rx_cmd_b));
2945 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2946 le16_to_cpus(&rx_cmd_c);
2947 skb_pull(skb, sizeof(rx_cmd_c));
2951 /* get the packet length */
2952 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2953 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2955 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2956 netif_dbg(dev, rx_err, dev->net,
2957 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2959 /* last frame in this batch */
2960 if (skb->len == size) {
2961 lan78xx_rx_csum_offload(dev, skb,
2962 rx_cmd_a, rx_cmd_b);
2964 skb_trim(skb, skb->len - 4); /* remove fcs */
2965 skb->truesize = size + sizeof(struct sk_buff);
2970 skb2 = skb_clone(skb, GFP_ATOMIC);
2971 if (unlikely(!skb2)) {
2972 netdev_warn(dev->net, "Error allocating skb");
2977 skb2->data = packet;
2978 skb_set_tail_pointer(skb2, size);
2980 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2982 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2983 skb2->truesize = size + sizeof(struct sk_buff);
2985 lan78xx_skb_return(dev, skb2);
2988 skb_pull(skb, size);
2990 /* padding bytes before the next frame starts */
2992 skb_pull(skb, align_count);
2998 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3000 if (!lan78xx_rx(dev, skb)) {
3001 dev->net->stats.rx_errors++;
3006 lan78xx_skb_return(dev, skb);
3010 netif_dbg(dev, rx_err, dev->net, "drop\n");
3011 dev->net->stats.rx_errors++;
3013 skb_queue_tail(&dev->done, skb);
3016 static void rx_complete(struct urb *urb);
3018 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3020 struct sk_buff *skb;
3021 struct skb_data *entry;
3022 unsigned long lockflags;
3023 size_t size = dev->rx_urb_size;
3026 skb = netdev_alloc_skb_ip_align(dev->net, size);
3032 entry = (struct skb_data *)skb->cb;
3037 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3038 skb->data, size, rx_complete, skb);
3040 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3042 if (netif_device_present(dev->net) &&
3043 netif_running(dev->net) &&
3044 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3045 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3046 ret = usb_submit_urb(urb, GFP_ATOMIC);
3049 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3052 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3055 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3056 netif_device_detach(dev->net);
3062 netif_dbg(dev, rx_err, dev->net,
3063 "rx submit, %d\n", ret);
3064 tasklet_schedule(&dev->bh);
3067 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3070 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3072 dev_kfree_skb_any(skb);
3078 static void rx_complete(struct urb *urb)
3080 struct sk_buff *skb = (struct sk_buff *)urb->context;
3081 struct skb_data *entry = (struct skb_data *)skb->cb;
3082 struct lan78xx_net *dev = entry->dev;
3083 int urb_status = urb->status;
3084 enum skb_state state;
3086 skb_put(skb, urb->actual_length);
3090 switch (urb_status) {
3092 if (skb->len < dev->net->hard_header_len) {
3094 dev->net->stats.rx_errors++;
3095 dev->net->stats.rx_length_errors++;
3096 netif_dbg(dev, rx_err, dev->net,
3097 "rx length %d\n", skb->len);
3099 usb_mark_last_busy(dev->udev);
3102 dev->net->stats.rx_errors++;
3103 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3105 case -ECONNRESET: /* async unlink */
3106 case -ESHUTDOWN: /* hardware gone */
3107 netif_dbg(dev, ifdown, dev->net,
3108 "rx shutdown, code %d\n", urb_status);
3116 dev->net->stats.rx_errors++;
3122 /* data overrun ... flush fifo? */
3124 dev->net->stats.rx_over_errors++;
3129 dev->net->stats.rx_errors++;
3130 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3134 state = defer_bh(dev, skb, &dev->rxq, state);
3137 if (netif_running(dev->net) &&
3138 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3139 state != unlink_start) {
3140 rx_submit(dev, urb, GFP_ATOMIC);
3145 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3148 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3151 struct urb *urb = NULL;
3152 struct skb_data *entry;
3153 unsigned long flags;
3154 struct sk_buff_head *tqp = &dev->txq_pend;
3155 struct sk_buff *skb, *skb2;
3158 int skb_totallen, pkt_cnt;
3164 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3165 if (skb_is_gso(skb)) {
3167 /* handle previous packets first */
3171 length = skb->len - TX_OVERHEAD;
3172 skb2 = skb_dequeue(tqp);
3176 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3178 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3182 /* copy to a single skb */
3183 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3187 skb_put(skb, skb_totallen);
3189 for (count = pos = 0; count < pkt_cnt; count++) {
3190 skb2 = skb_dequeue(tqp);
3192 length += (skb2->len - TX_OVERHEAD);
3193 memcpy(skb->data + pos, skb2->data, skb2->len);
3194 pos += roundup(skb2->len, sizeof(u32));
3195 dev_kfree_skb(skb2);
3200 urb = usb_alloc_urb(0, GFP_ATOMIC);
3204 entry = (struct skb_data *)skb->cb;
3207 entry->length = length;
3208 entry->num_of_packet = count;
3210 spin_lock_irqsave(&dev->txq.lock, flags);
3211 ret = usb_autopm_get_interface_async(dev->intf);
3213 spin_unlock_irqrestore(&dev->txq.lock, flags);
3217 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3218 skb->data, skb->len, tx_complete, skb);
3220 if (length % dev->maxpacket == 0) {
3221 /* send USB_ZERO_PACKET */
3222 urb->transfer_flags |= URB_ZERO_PACKET;
3226 /* if this triggers the device is still a sleep */
3227 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3228 /* transmission will be done in resume */
3229 usb_anchor_urb(urb, &dev->deferred);
3230 /* no use to process more packets */
3231 netif_stop_queue(dev->net);
3233 spin_unlock_irqrestore(&dev->txq.lock, flags);
3234 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3239 ret = usb_submit_urb(urb, GFP_ATOMIC);
3242 netif_trans_update(dev->net);
3243 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3244 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3245 netif_stop_queue(dev->net);
3248 netif_stop_queue(dev->net);
3249 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3250 usb_autopm_put_interface_async(dev->intf);
3253 usb_autopm_put_interface_async(dev->intf);
3254 netif_dbg(dev, tx_err, dev->net,
3255 "tx: submit urb err %d\n", ret);
3259 spin_unlock_irqrestore(&dev->txq.lock, flags);
3262 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3264 dev->net->stats.tx_dropped++;
3266 dev_kfree_skb_any(skb);
3269 netif_dbg(dev, tx_queued, dev->net,
3270 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3273 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3278 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3279 for (i = 0; i < 10; i++) {
3280 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3282 urb = usb_alloc_urb(0, GFP_ATOMIC);
3284 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3288 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3289 tasklet_schedule(&dev->bh);
3291 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3292 netif_wake_queue(dev->net);
3295 static void lan78xx_bh(unsigned long param)
3297 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3298 struct sk_buff *skb;
3299 struct skb_data *entry;
3301 while ((skb = skb_dequeue(&dev->done))) {
3302 entry = (struct skb_data *)(skb->cb);
3303 switch (entry->state) {
3305 entry->state = rx_cleanup;
3306 rx_process(dev, skb);
3309 usb_free_urb(entry->urb);
3313 usb_free_urb(entry->urb);
3317 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3322 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3323 /* reset update timer delta */
3324 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3326 mod_timer(&dev->stat_monitor,
3327 jiffies + STAT_UPDATE_TIMER);
3330 if (!skb_queue_empty(&dev->txq_pend))
3333 if (!timer_pending(&dev->delay) &&
3334 !test_bit(EVENT_RX_HALT, &dev->flags))
3339 static void lan78xx_delayedwork(struct work_struct *work)
3342 struct lan78xx_net *dev;
3344 dev = container_of(work, struct lan78xx_net, wq.work);
3346 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3347 unlink_urbs(dev, &dev->txq);
3348 status = usb_autopm_get_interface(dev->intf);
3351 status = usb_clear_halt(dev->udev, dev->pipe_out);
3352 usb_autopm_put_interface(dev->intf);
3355 status != -ESHUTDOWN) {
3356 if (netif_msg_tx_err(dev))
3358 netdev_err(dev->net,
3359 "can't clear tx halt, status %d\n",
3362 clear_bit(EVENT_TX_HALT, &dev->flags);
3363 if (status != -ESHUTDOWN)
3364 netif_wake_queue(dev->net);
3367 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3368 unlink_urbs(dev, &dev->rxq);
3369 status = usb_autopm_get_interface(dev->intf);
3372 status = usb_clear_halt(dev->udev, dev->pipe_in);
3373 usb_autopm_put_interface(dev->intf);
3376 status != -ESHUTDOWN) {
3377 if (netif_msg_rx_err(dev))
3379 netdev_err(dev->net,
3380 "can't clear rx halt, status %d\n",
3383 clear_bit(EVENT_RX_HALT, &dev->flags);
3384 tasklet_schedule(&dev->bh);
3388 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3391 clear_bit(EVENT_LINK_RESET, &dev->flags);
3392 status = usb_autopm_get_interface(dev->intf);
3395 if (lan78xx_link_reset(dev) < 0) {
3396 usb_autopm_put_interface(dev->intf);
3398 netdev_info(dev->net, "link reset failed (%d)\n",
3401 usb_autopm_put_interface(dev->intf);
3405 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3406 lan78xx_update_stats(dev);
3408 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3410 mod_timer(&dev->stat_monitor,
3411 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3413 dev->delta = min((dev->delta * 2), 50);
3417 static void intr_complete(struct urb *urb)
3419 struct lan78xx_net *dev = urb->context;
3420 int status = urb->status;
3425 lan78xx_status(dev, urb);
3428 /* software-driven interface shutdown */
3429 case -ENOENT: /* urb killed */
3430 case -ESHUTDOWN: /* hardware gone */
3431 netif_dbg(dev, ifdown, dev->net,
3432 "intr shutdown, code %d\n", status);
3435 /* NOTE: not throttling like RX/TX, since this endpoint
3436 * already polls infrequently
3439 netdev_dbg(dev->net, "intr status %d\n", status);
3443 if (!netif_running(dev->net))
3446 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3447 status = usb_submit_urb(urb, GFP_ATOMIC);
3449 netif_err(dev, timer, dev->net,
3450 "intr resubmit --> %d\n", status);
3453 static void lan78xx_disconnect(struct usb_interface *intf)
3455 struct lan78xx_net *dev;
3456 struct usb_device *udev;
3457 struct net_device *net;
3459 dev = usb_get_intfdata(intf);
3460 usb_set_intfdata(intf, NULL);
3464 udev = interface_to_usbdev(intf);
3467 unregister_netdev(net);
3469 cancel_delayed_work_sync(&dev->wq);
3471 usb_scuttle_anchored_urbs(&dev->deferred);
3473 lan78xx_unbind(dev, intf);
3475 usb_kill_urb(dev->urb_intr);
3476 usb_free_urb(dev->urb_intr);
3482 static void lan78xx_tx_timeout(struct net_device *net)
3484 struct lan78xx_net *dev = netdev_priv(net);
3486 unlink_urbs(dev, &dev->txq);
3487 tasklet_schedule(&dev->bh);
3490 static const struct net_device_ops lan78xx_netdev_ops = {
3491 .ndo_open = lan78xx_open,
3492 .ndo_stop = lan78xx_stop,
3493 .ndo_start_xmit = lan78xx_start_xmit,
3494 .ndo_tx_timeout = lan78xx_tx_timeout,
3495 .ndo_change_mtu = lan78xx_change_mtu,
3496 .ndo_set_mac_address = lan78xx_set_mac_addr,
3497 .ndo_validate_addr = eth_validate_addr,
3498 .ndo_do_ioctl = lan78xx_ioctl,
3499 .ndo_set_rx_mode = lan78xx_set_multicast,
3500 .ndo_set_features = lan78xx_set_features,
3501 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3502 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3505 static void lan78xx_stat_monitor(unsigned long param)
3507 struct lan78xx_net *dev;
3509 dev = (struct lan78xx_net *)param;
3511 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3514 static int lan78xx_probe(struct usb_interface *intf,
3515 const struct usb_device_id *id)
3517 struct lan78xx_net *dev;
3518 struct net_device *netdev;
3519 struct usb_device *udev;
3525 udev = interface_to_usbdev(intf);
3526 udev = usb_get_dev(udev);
3529 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3531 dev_err(&intf->dev, "Error: OOM\n");
3535 /* netdev_printk() needs this */
3536 SET_NETDEV_DEV(netdev, &intf->dev);
3538 dev = netdev_priv(netdev);
3542 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3543 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3545 skb_queue_head_init(&dev->rxq);
3546 skb_queue_head_init(&dev->txq);
3547 skb_queue_head_init(&dev->done);
3548 skb_queue_head_init(&dev->rxq_pause);
3549 skb_queue_head_init(&dev->txq_pend);
3550 mutex_init(&dev->phy_mutex);
3552 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3553 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3554 init_usb_anchor(&dev->deferred);
3556 netdev->netdev_ops = &lan78xx_netdev_ops;
3557 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3558 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3560 dev->stat_monitor.function = lan78xx_stat_monitor;
3561 dev->stat_monitor.data = (unsigned long)dev;
3563 init_timer(&dev->stat_monitor);
3565 mutex_init(&dev->stats.access_lock);
3567 ret = lan78xx_bind(dev, intf);
3570 strcpy(netdev->name, "eth%d");
3572 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3573 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3575 /* MTU range: 68 - 9000 */
3576 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3578 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3579 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3580 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3582 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3583 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3585 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3586 dev->ep_intr->desc.bEndpointAddress &
3587 USB_ENDPOINT_NUMBER_MASK);
3588 period = dev->ep_intr->desc.bInterval;
3590 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3591 buf = kmalloc(maxp, GFP_KERNEL);
3593 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3594 if (!dev->urb_intr) {
3599 usb_fill_int_urb(dev->urb_intr, dev->udev,
3600 dev->pipe_intr, buf, maxp,
3601 intr_complete, dev, period);
3605 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3607 /* driver requires remote-wakeup capability during autosuspend. */
3608 intf->needs_remote_wakeup = 1;
3610 ret = register_netdev(netdev);
3612 netif_err(dev, probe, netdev, "couldn't register the device\n");
3616 usb_set_intfdata(intf, dev);
3618 ret = device_set_wakeup_enable(&udev->dev, true);
3620 /* Default delay of 2sec has more overhead than advantage.
3621 * Set to 10sec as default.
3623 pm_runtime_set_autosuspend_delay(&udev->dev,
3624 DEFAULT_AUTOSUSPEND_DELAY);
3629 lan78xx_unbind(dev, intf);
3631 free_netdev(netdev);
3638 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3640 const u16 crc16poly = 0x8005;
3646 for (i = 0; i < len; i++) {
3648 for (bit = 0; bit < 8; bit++) {
3652 if (msb ^ (u16)(data & 1)) {
3654 crc |= (u16)0x0001U;
3663 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3671 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3672 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3673 const u8 arp_type[2] = { 0x08, 0x06 };
3675 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3676 buf &= ~MAC_TX_TXEN_;
3677 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3678 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3679 buf &= ~MAC_RX_RXEN_;
3680 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3682 ret = lan78xx_write_reg(dev, WUCSR, 0);
3683 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3684 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3689 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3690 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3691 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3693 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3694 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3697 if (wol & WAKE_PHY) {
3698 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3700 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3701 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3702 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3704 if (wol & WAKE_MAGIC) {
3705 temp_wucsr |= WUCSR_MPEN_;
3707 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3708 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3709 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3711 if (wol & WAKE_BCAST) {
3712 temp_wucsr |= WUCSR_BCST_EN_;
3714 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3715 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3716 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3718 if (wol & WAKE_MCAST) {
3719 temp_wucsr |= WUCSR_WAKE_EN_;
3721 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3722 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3723 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3725 WUF_CFGX_TYPE_MCAST_ |
3726 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3727 (crc & WUF_CFGX_CRC16_MASK_));
3729 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3730 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3731 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3732 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3735 /* for IPv6 Multicast */
3736 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3737 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3739 WUF_CFGX_TYPE_MCAST_ |
3740 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3741 (crc & WUF_CFGX_CRC16_MASK_));
3743 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3744 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3745 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3746 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3749 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3750 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3751 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3753 if (wol & WAKE_UCAST) {
3754 temp_wucsr |= WUCSR_PFDA_EN_;
3756 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3757 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3758 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3760 if (wol & WAKE_ARP) {
3761 temp_wucsr |= WUCSR_WAKE_EN_;
3763 /* set WUF_CFG & WUF_MASK
3764 * for packettype (offset 12,13) = ARP (0x0806)
3766 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3767 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3769 WUF_CFGX_TYPE_ALL_ |
3770 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3771 (crc & WUF_CFGX_CRC16_MASK_));
3773 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3774 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3775 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3776 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3779 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3780 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3781 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3784 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3786 /* when multiple WOL bits are set */
3787 if (hweight_long((unsigned long)wol) > 1) {
3788 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3789 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3790 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3792 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3795 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3796 buf |= PMT_CTL_WUPS_MASK_;
3797 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3799 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3800 buf |= MAC_RX_RXEN_;
3801 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3806 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3808 struct lan78xx_net *dev = usb_get_intfdata(intf);
3809 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3814 event = message.event;
3816 if (!dev->suspend_count++) {
3817 spin_lock_irq(&dev->txq.lock);
3818 /* don't autosuspend while transmitting */
3819 if ((skb_queue_len(&dev->txq) ||
3820 skb_queue_len(&dev->txq_pend)) &&
3821 PMSG_IS_AUTO(message)) {
3822 spin_unlock_irq(&dev->txq.lock);
3826 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3827 spin_unlock_irq(&dev->txq.lock);
3831 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3832 buf &= ~MAC_TX_TXEN_;
3833 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3834 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3835 buf &= ~MAC_RX_RXEN_;
3836 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3838 /* empty out the rx and queues */
3839 netif_device_detach(dev->net);
3840 lan78xx_terminate_urbs(dev);
3841 usb_kill_urb(dev->urb_intr);
3844 netif_device_attach(dev->net);
3847 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3848 del_timer(&dev->stat_monitor);
3850 if (PMSG_IS_AUTO(message)) {
3851 /* auto suspend (selective suspend) */
3852 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3853 buf &= ~MAC_TX_TXEN_;
3854 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3855 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3856 buf &= ~MAC_RX_RXEN_;
3857 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3859 ret = lan78xx_write_reg(dev, WUCSR, 0);
3860 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3861 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3863 /* set goodframe wakeup */
3864 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3866 buf |= WUCSR_RFE_WAKE_EN_;
3867 buf |= WUCSR_STORE_WAKE_;
3869 ret = lan78xx_write_reg(dev, WUCSR, buf);
3871 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3873 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3874 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3876 buf |= PMT_CTL_PHY_WAKE_EN_;
3877 buf |= PMT_CTL_WOL_EN_;
3878 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3879 buf |= PMT_CTL_SUS_MODE_3_;
3881 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3883 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3885 buf |= PMT_CTL_WUPS_MASK_;
3887 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3889 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3890 buf |= MAC_RX_RXEN_;
3891 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3893 lan78xx_set_suspend(dev, pdata->wol);
3902 static int lan78xx_resume(struct usb_interface *intf)
3904 struct lan78xx_net *dev = usb_get_intfdata(intf);
3905 struct sk_buff *skb;
3910 if (!timer_pending(&dev->stat_monitor)) {
3912 mod_timer(&dev->stat_monitor,
3913 jiffies + STAT_UPDATE_TIMER);
3916 if (!--dev->suspend_count) {
3917 /* resume interrupt URBs */
3918 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3919 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3921 spin_lock_irq(&dev->txq.lock);
3922 while ((res = usb_get_from_anchor(&dev->deferred))) {
3923 skb = (struct sk_buff *)res->context;
3924 ret = usb_submit_urb(res, GFP_ATOMIC);
3926 dev_kfree_skb_any(skb);
3928 usb_autopm_put_interface_async(dev->intf);
3930 netif_trans_update(dev->net);
3931 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3935 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3936 spin_unlock_irq(&dev->txq.lock);
3938 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3939 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3940 netif_start_queue(dev->net);
3941 tasklet_schedule(&dev->bh);
3945 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3946 ret = lan78xx_write_reg(dev, WUCSR, 0);
3947 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3949 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3951 WUCSR2_IPV6_TCPSYN_RCD_ |
3952 WUCSR2_IPV4_TCPSYN_RCD_);
3954 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3955 WUCSR_EEE_RX_WAKE_ |
3957 WUCSR_RFE_WAKE_FR_ |
3962 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3963 buf |= MAC_TX_TXEN_;
3964 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3969 static int lan78xx_reset_resume(struct usb_interface *intf)
3971 struct lan78xx_net *dev = usb_get_intfdata(intf);
3975 lan78xx_phy_init(dev);
3977 return lan78xx_resume(intf);
3980 static const struct usb_device_id products[] = {
3982 /* LAN7800 USB Gigabit Ethernet Device */
3983 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3986 /* LAN7850 USB Gigabit Ethernet Device */
3987 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3990 /* LAN7801 USB Gigabit Ethernet Device */
3991 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
3995 MODULE_DEVICE_TABLE(usb, products);
3997 static struct usb_driver lan78xx_driver = {
3998 .name = DRIVER_NAME,
3999 .id_table = products,
4000 .probe = lan78xx_probe,
4001 .disconnect = lan78xx_disconnect,
4002 .suspend = lan78xx_suspend,
4003 .resume = lan78xx_resume,
4004 .reset_resume = lan78xx_reset_resume,
4005 .supports_autosuspend = 1,
4006 .disable_hub_initiated_lpm = 1,
4009 module_usb_driver(lan78xx_driver);
4011 MODULE_AUTHOR(DRIVER_AUTHOR);
4012 MODULE_DESCRIPTION(DRIVER_DESC);
4013 MODULE_LICENSE("GPL");