]>
Commit | Line | Data |
---|---|---|
bf345707 CEB |
1 | /* Silan SC92031 PCI Fast Ethernet Adapter driver |
2 | * | |
3 | * Based on vendor drivers: | |
4 | * Silan Fast Ethernet Netcard Driver: | |
5 | * MODULE_AUTHOR ("gaoyonghong"); | |
6 | * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver"); | |
7 | * MODULE_LICENSE("GPL"); | |
8 | * 8139D Fast Ethernet driver: | |
9 | * (C) 2002 by gaoyonghong | |
10 | * MODULE_AUTHOR ("gaoyonghong"); | |
11 | * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver"); | |
12 | * MODULE_LICENSE("GPL"); | |
13 | * Both are almost identical and seem to be based on pci-skeleton.c | |
14 | * | |
15 | * Rewritten for 2.6 by Cesar Eduardo Barros | |
627af770 CEB |
16 | * |
17 | * A datasheet for this chip can be found at | |
631dd1a8 | 18 | * http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf |
bf345707 CEB |
19 | */ |
20 | ||
21 | /* Note about set_mac_address: I don't know how to change the hardware | |
22 | * matching, so you need to enable IFF_PROMISC when using it. | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/pci.h> | |
29 | #include <linux/dma-mapping.h> | |
30 | #include <linux/netdevice.h> | |
31 | #include <linux/etherdevice.h> | |
32 | #include <linux/ethtool.h> | |
33 | #include <linux/crc32.h> | |
34 | ||
35 | #include <asm/irq.h> | |
36 | ||
bf345707 | 37 | #define SC92031_NAME "sc92031" |
bf345707 CEB |
38 | |
39 | /* BAR 0 is MMIO, BAR 1 is PIO */ | |
40 | #ifndef SC92031_USE_BAR | |
41 | #define SC92031_USE_BAR 0 | |
42 | #endif | |
43 | ||
44 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ | |
45 | static int multicast_filter_limit = 64; | |
46 | module_param(multicast_filter_limit, int, 0); | |
47 | MODULE_PARM_DESC(multicast_filter_limit, | |
48 | "Maximum number of filtered multicast addresses"); | |
49 | ||
50 | static int media; | |
51 | module_param(media, int, 0); | |
52 | MODULE_PARM_DESC(media, "Media type (0x00 = autodetect," | |
53 | " 0x01 = 10M half, 0x02 = 10M full," | |
54 | " 0x04 = 100M half, 0x08 = 100M full)"); | |
55 | ||
56 | /* Size of the in-memory receive ring. */ | |
57 | #define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/ | |
58 | #define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX) | |
59 | ||
60 | /* Number of Tx descriptor registers. */ | |
61 | #define NUM_TX_DESC 4 | |
62 | ||
63 | /* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/ | |
64 | #define MAX_ETH_FRAME_SIZE 1536 | |
65 | ||
66 | /* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */ | |
67 | #define TX_BUF_SIZE MAX_ETH_FRAME_SIZE | |
68 | #define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) | |
69 | ||
70 | /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ | |
71 | #define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */ | |
72 | ||
73 | /* Time in jiffies before concluding the transmitter is hung. */ | |
74 | #define TX_TIMEOUT (4*HZ) | |
75 | ||
76 | #define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */ | |
77 | ||
78 | /* media options */ | |
79 | #define AUTOSELECT 0x00 | |
80 | #define M10_HALF 0x01 | |
81 | #define M10_FULL 0x02 | |
82 | #define M100_HALF 0x04 | |
83 | #define M100_FULL 0x08 | |
84 | ||
85 | /* Symbolic offsets to registers. */ | |
86 | enum silan_registers { | |
87 | Config0 = 0x00, // Config0 | |
88 | Config1 = 0x04, // Config1 | |
89 | RxBufWPtr = 0x08, // Rx buffer writer poiter | |
90 | IntrStatus = 0x0C, // Interrupt status | |
91 | IntrMask = 0x10, // Interrupt mask | |
92 | RxbufAddr = 0x14, // Rx buffer start address | |
93 | RxBufRPtr = 0x18, // Rx buffer read pointer | |
94 | Txstatusall = 0x1C, // Transmit status of all descriptors | |
95 | TxStatus0 = 0x20, // Transmit status (Four 32bit registers). | |
96 | TxAddr0 = 0x30, // Tx descriptors (also four 32bit). | |
97 | RxConfig = 0x40, // Rx configuration | |
98 | MAC0 = 0x44, // Ethernet hardware address. | |
99 | MAR0 = 0x4C, // Multicast filter. | |
100 | RxStatus0 = 0x54, // Rx status | |
101 | TxConfig = 0x5C, // Tx configuration | |
102 | PhyCtrl = 0x60, // physical control | |
103 | FlowCtrlConfig = 0x64, // flow control | |
104 | Miicmd0 = 0x68, // Mii command0 register | |
105 | Miicmd1 = 0x6C, // Mii command1 register | |
106 | Miistatus = 0x70, // Mii status register | |
107 | Timercnt = 0x74, // Timer counter register | |
108 | TimerIntr = 0x78, // Timer interrupt register | |
109 | PMConfig = 0x7C, // Power Manager configuration | |
110 | CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers) | |
111 | Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser) | |
112 | LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser) | |
113 | TestD0 = 0xD0, | |
114 | TestD4 = 0xD4, | |
115 | TestD8 = 0xD8, | |
116 | }; | |
117 | ||
118 | #define MII_BMCR 0 // Basic mode control register | |
119 | #define MII_BMSR 1 // Basic mode status register | |
120 | #define MII_JAB 16 | |
121 | #define MII_OutputStatus 24 | |
122 | ||
123 | #define BMCR_FULLDPLX 0x0100 // Full duplex | |
124 | #define BMCR_ANRESTART 0x0200 // Auto negotiation restart | |
125 | #define BMCR_ANENABLE 0x1000 // Enable auto negotiation | |
126 | #define BMCR_SPEED100 0x2000 // Select 100Mbps | |
127 | #define BMSR_LSTATUS 0x0004 // Link status | |
128 | #define PHY_16_JAB_ENB 0x1000 | |
129 | #define PHY_16_PORT_ENB 0x1 | |
130 | ||
131 | enum IntrStatusBits { | |
132 | LinkFail = 0x80000000, | |
133 | LinkOK = 0x40000000, | |
134 | TimeOut = 0x20000000, | |
135 | RxOverflow = 0x0040, | |
136 | RxOK = 0x0020, | |
137 | TxOK = 0x0001, | |
138 | IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK, | |
139 | }; | |
140 | ||
141 | enum TxStatusBits { | |
142 | TxCarrierLost = 0x20000000, | |
143 | TxAborted = 0x10000000, | |
144 | TxOutOfWindow = 0x08000000, | |
145 | TxNccShift = 22, | |
146 | EarlyTxThresShift = 16, | |
147 | TxStatOK = 0x8000, | |
148 | TxUnderrun = 0x4000, | |
149 | TxOwn = 0x2000, | |
150 | }; | |
151 | ||
152 | enum RxStatusBits { | |
153 | RxStatesOK = 0x80000, | |
154 | RxBadAlign = 0x40000, | |
155 | RxHugeFrame = 0x20000, | |
156 | RxSmallFrame = 0x10000, | |
157 | RxCRCOK = 0x8000, | |
158 | RxCrlFrame = 0x4000, | |
159 | Rx_Broadcast = 0x2000, | |
160 | Rx_Multicast = 0x1000, | |
161 | RxAddrMatch = 0x0800, | |
162 | MiiErr = 0x0400, | |
163 | }; | |
164 | ||
165 | enum RxConfigBits { | |
166 | RxFullDx = 0x80000000, | |
167 | RxEnb = 0x40000000, | |
168 | RxSmall = 0x20000000, | |
169 | RxHuge = 0x10000000, | |
170 | RxErr = 0x08000000, | |
171 | RxAllphys = 0x04000000, | |
172 | RxMulticast = 0x02000000, | |
173 | RxBroadcast = 0x01000000, | |
174 | RxLoopBack = (1 << 23) | (1 << 22), | |
175 | LowThresholdShift = 12, | |
176 | HighThresholdShift = 2, | |
177 | }; | |
178 | ||
179 | enum TxConfigBits { | |
180 | TxFullDx = 0x80000000, | |
181 | TxEnb = 0x40000000, | |
182 | TxEnbPad = 0x20000000, | |
183 | TxEnbHuge = 0x10000000, | |
184 | TxEnbFCS = 0x08000000, | |
185 | TxNoBackOff = 0x04000000, | |
186 | TxEnbPrem = 0x02000000, | |
187 | TxCareLostCrs = 0x1000000, | |
188 | TxExdCollNum = 0xf00000, | |
189 | TxDataRate = 0x80000, | |
190 | }; | |
191 | ||
192 | enum PhyCtrlconfigbits { | |
193 | PhyCtrlAne = 0x80000000, | |
194 | PhyCtrlSpd100 = 0x40000000, | |
195 | PhyCtrlSpd10 = 0x20000000, | |
196 | PhyCtrlPhyBaseAddr = 0x1f000000, | |
197 | PhyCtrlDux = 0x800000, | |
198 | PhyCtrlReset = 0x400000, | |
199 | }; | |
200 | ||
201 | enum FlowCtrlConfigBits { | |
202 | FlowCtrlFullDX = 0x80000000, | |
203 | FlowCtrlEnb = 0x40000000, | |
204 | }; | |
205 | ||
206 | enum Config0Bits { | |
207 | Cfg0_Reset = 0x80000000, | |
208 | Cfg0_Anaoff = 0x40000000, | |
209 | Cfg0_LDPS = 0x20000000, | |
210 | }; | |
211 | ||
212 | enum Config1Bits { | |
213 | Cfg1_EarlyRx = 1 << 31, | |
214 | Cfg1_EarlyTx = 1 << 30, | |
215 | ||
216 | //rx buffer size | |
217 | Cfg1_Rcv8K = 0x0, | |
218 | Cfg1_Rcv16K = 0x1, | |
219 | Cfg1_Rcv32K = 0x3, | |
220 | Cfg1_Rcv64K = 0x7, | |
221 | Cfg1_Rcv128K = 0xf, | |
222 | }; | |
223 | ||
224 | enum MiiCmd0Bits { | |
225 | Mii_Divider = 0x20000000, | |
226 | Mii_WRITE = 0x400000, | |
227 | Mii_READ = 0x200000, | |
228 | Mii_SCAN = 0x100000, | |
229 | Mii_Tamod = 0x80000, | |
230 | Mii_Drvmod = 0x40000, | |
231 | Mii_mdc = 0x20000, | |
232 | Mii_mdoen = 0x10000, | |
233 | Mii_mdo = 0x8000, | |
234 | Mii_mdi = 0x4000, | |
235 | }; | |
236 | ||
237 | enum MiiStatusBits { | |
238 | Mii_StatusBusy = 0x80000000, | |
239 | }; | |
240 | ||
241 | enum PMConfigBits { | |
242 | PM_Enable = 1 << 31, | |
243 | PM_LongWF = 1 << 30, | |
244 | PM_Magic = 1 << 29, | |
245 | PM_LANWake = 1 << 28, | |
246 | PM_LWPTN = (1 << 27 | 1<< 26), | |
247 | PM_LinkUp = 1 << 25, | |
248 | PM_WakeUp = 1 << 24, | |
249 | }; | |
250 | ||
251 | /* Locking rules: | |
252 | * priv->lock protects most of the fields of priv and most of the | |
253 | * hardware registers. It does not have to protect against softirqs | |
254 | * between sc92031_disable_interrupts and sc92031_enable_interrupts; | |
255 | * it also does not need to be used in ->open and ->stop while the | |
256 | * device interrupts are off. | |
257 | * Not having to protect against softirqs is very useful due to heavy | |
258 | * use of mdelay() at _sc92031_reset. | |
259 | * Functions prefixed with _sc92031_ must be called with the lock held; | |
260 | * functions prefixed with sc92031_ must be called without the lock held. | |
261 | * Use mmiowb() before unlocking if the hardware was written to. | |
262 | */ | |
263 | ||
264 | /* Locking rules for the interrupt: | |
265 | * - the interrupt and the tasklet never run at the same time | |
266 | * - neither run between sc92031_disable_interrupts and | |
267 | * sc92031_enable_interrupt | |
268 | */ | |
269 | ||
270 | struct sc92031_priv { | |
271 | spinlock_t lock; | |
272 | /* iomap.h cookie */ | |
273 | void __iomem *port_base; | |
274 | /* pci device structure */ | |
275 | struct pci_dev *pdev; | |
276 | /* tasklet */ | |
277 | struct tasklet_struct tasklet; | |
278 | ||
279 | /* CPU address of rx ring */ | |
280 | void *rx_ring; | |
281 | /* PCI address of rx ring */ | |
282 | dma_addr_t rx_ring_dma_addr; | |
283 | /* PCI address of rx ring read pointer */ | |
284 | dma_addr_t rx_ring_tail; | |
285 | ||
286 | /* tx ring write index */ | |
287 | unsigned tx_head; | |
288 | /* tx ring read index */ | |
289 | unsigned tx_tail; | |
290 | /* CPU address of tx bounce buffer */ | |
291 | void *tx_bufs; | |
292 | /* PCI address of tx bounce buffer */ | |
293 | dma_addr_t tx_bufs_dma_addr; | |
294 | ||
295 | /* copies of some hardware registers */ | |
296 | u32 intr_status; | |
297 | atomic_t intr_mask; | |
298 | u32 rx_config; | |
299 | u32 tx_config; | |
300 | u32 pm_config; | |
301 | ||
302 | /* copy of some flags from dev->flags */ | |
303 | unsigned int mc_flags; | |
304 | ||
305 | /* for ETHTOOL_GSTATS */ | |
306 | u64 tx_timeouts; | |
307 | u64 rx_loss; | |
308 | ||
309 | /* for dev->get_stats */ | |
310 | long rx_value; | |
bf345707 CEB |
311 | }; |
312 | ||
313 | /* I don't know which registers can be safely read; however, I can guess | |
314 | * MAC0 is one of them. */ | |
315 | static inline void _sc92031_dummy_read(void __iomem *port_base) | |
316 | { | |
317 | ioread32(port_base + MAC0); | |
318 | } | |
319 | ||
320 | static u32 _sc92031_mii_wait(void __iomem *port_base) | |
321 | { | |
322 | u32 mii_status; | |
323 | ||
324 | do { | |
325 | udelay(10); | |
326 | mii_status = ioread32(port_base + Miistatus); | |
327 | } while (mii_status & Mii_StatusBusy); | |
328 | ||
329 | return mii_status; | |
330 | } | |
331 | ||
332 | static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1) | |
333 | { | |
334 | iowrite32(Mii_Divider, port_base + Miicmd0); | |
335 | ||
336 | _sc92031_mii_wait(port_base); | |
337 | ||
338 | iowrite32(cmd1, port_base + Miicmd1); | |
339 | iowrite32(Mii_Divider | cmd0, port_base + Miicmd0); | |
340 | ||
341 | return _sc92031_mii_wait(port_base); | |
342 | } | |
343 | ||
344 | static void _sc92031_mii_scan(void __iomem *port_base) | |
345 | { | |
346 | _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6); | |
347 | } | |
348 | ||
349 | static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg) | |
350 | { | |
351 | return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13; | |
352 | } | |
353 | ||
354 | static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val) | |
355 | { | |
356 | _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11)); | |
357 | } | |
358 | ||
359 | static void sc92031_disable_interrupts(struct net_device *dev) | |
360 | { | |
361 | struct sc92031_priv *priv = netdev_priv(dev); | |
362 | void __iomem *port_base = priv->port_base; | |
363 | ||
364 | /* tell the tasklet/interrupt not to enable interrupts */ | |
365 | atomic_set(&priv->intr_mask, 0); | |
366 | wmb(); | |
367 | ||
368 | /* stop interrupts */ | |
369 | iowrite32(0, port_base + IntrMask); | |
370 | _sc92031_dummy_read(port_base); | |
371 | mmiowb(); | |
372 | ||
373 | /* wait for any concurrent interrupt/tasklet to finish */ | |
374 | synchronize_irq(dev->irq); | |
375 | tasklet_disable(&priv->tasklet); | |
376 | } | |
377 | ||
378 | static void sc92031_enable_interrupts(struct net_device *dev) | |
379 | { | |
380 | struct sc92031_priv *priv = netdev_priv(dev); | |
381 | void __iomem *port_base = priv->port_base; | |
382 | ||
383 | tasklet_enable(&priv->tasklet); | |
384 | ||
385 | atomic_set(&priv->intr_mask, IntrBits); | |
386 | wmb(); | |
387 | ||
388 | iowrite32(IntrBits, port_base + IntrMask); | |
389 | mmiowb(); | |
390 | } | |
391 | ||
392 | static void _sc92031_disable_tx_rx(struct net_device *dev) | |
393 | { | |
394 | struct sc92031_priv *priv = netdev_priv(dev); | |
395 | void __iomem *port_base = priv->port_base; | |
396 | ||
397 | priv->rx_config &= ~RxEnb; | |
398 | priv->tx_config &= ~TxEnb; | |
399 | iowrite32(priv->rx_config, port_base + RxConfig); | |
400 | iowrite32(priv->tx_config, port_base + TxConfig); | |
401 | } | |
402 | ||
403 | static void _sc92031_enable_tx_rx(struct net_device *dev) | |
404 | { | |
405 | struct sc92031_priv *priv = netdev_priv(dev); | |
406 | void __iomem *port_base = priv->port_base; | |
407 | ||
408 | priv->rx_config |= RxEnb; | |
409 | priv->tx_config |= TxEnb; | |
410 | iowrite32(priv->rx_config, port_base + RxConfig); | |
411 | iowrite32(priv->tx_config, port_base + TxConfig); | |
412 | } | |
413 | ||
414 | static void _sc92031_tx_clear(struct net_device *dev) | |
415 | { | |
416 | struct sc92031_priv *priv = netdev_priv(dev); | |
417 | ||
418 | while (priv->tx_head - priv->tx_tail > 0) { | |
419 | priv->tx_tail++; | |
9c28eaea | 420 | dev->stats.tx_dropped++; |
bf345707 CEB |
421 | } |
422 | priv->tx_head = priv->tx_tail = 0; | |
423 | } | |
424 | ||
425 | static void _sc92031_set_mar(struct net_device *dev) | |
426 | { | |
427 | struct sc92031_priv *priv = netdev_priv(dev); | |
428 | void __iomem *port_base = priv->port_base; | |
429 | u32 mar0 = 0, mar1 = 0; | |
430 | ||
8e95a202 | 431 | if ((dev->flags & IFF_PROMISC) || |
4cd24eaf | 432 | netdev_mc_count(dev) > multicast_filter_limit || |
8e95a202 | 433 | (dev->flags & IFF_ALLMULTI)) |
bf345707 CEB |
434 | mar0 = mar1 = 0xffffffff; |
435 | else if (dev->flags & IFF_MULTICAST) { | |
22bedad3 | 436 | struct netdev_hw_addr *ha; |
bf345707 | 437 | |
22bedad3 | 438 | netdev_for_each_mc_addr(ha, dev) { |
bf345707 CEB |
439 | u32 crc; |
440 | unsigned bit = 0; | |
441 | ||
22bedad3 | 442 | crc = ~ether_crc(ETH_ALEN, ha->addr); |
bf345707 CEB |
443 | crc >>= 24; |
444 | ||
445 | if (crc & 0x01) bit |= 0x02; | |
446 | if (crc & 0x02) bit |= 0x01; | |
447 | if (crc & 0x10) bit |= 0x20; | |
448 | if (crc & 0x20) bit |= 0x10; | |
449 | if (crc & 0x40) bit |= 0x08; | |
450 | if (crc & 0x80) bit |= 0x04; | |
451 | ||
452 | if (bit > 31) | |
453 | mar0 |= 0x1 << (bit - 32); | |
454 | else | |
455 | mar1 |= 0x1 << bit; | |
456 | } | |
457 | } | |
458 | ||
459 | iowrite32(mar0, port_base + MAR0); | |
460 | iowrite32(mar1, port_base + MAR0 + 4); | |
461 | } | |
462 | ||
463 | static void _sc92031_set_rx_config(struct net_device *dev) | |
464 | { | |
465 | struct sc92031_priv *priv = netdev_priv(dev); | |
466 | void __iomem *port_base = priv->port_base; | |
467 | unsigned int old_mc_flags; | |
468 | u32 rx_config_bits = 0; | |
469 | ||
470 | old_mc_flags = priv->mc_flags; | |
471 | ||
472 | if (dev->flags & IFF_PROMISC) | |
473 | rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast | |
474 | | RxMulticast | RxAllphys; | |
475 | ||
476 | if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) | |
477 | rx_config_bits |= RxMulticast; | |
478 | ||
479 | if (dev->flags & IFF_BROADCAST) | |
480 | rx_config_bits |= RxBroadcast; | |
481 | ||
482 | priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast | |
483 | | RxMulticast | RxAllphys); | |
484 | priv->rx_config |= rx_config_bits; | |
485 | ||
486 | priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI | |
487 | | IFF_MULTICAST | IFF_BROADCAST); | |
488 | ||
489 | if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags) | |
490 | iowrite32(priv->rx_config, port_base + RxConfig); | |
491 | } | |
492 | ||
493 | static bool _sc92031_check_media(struct net_device *dev) | |
494 | { | |
495 | struct sc92031_priv *priv = netdev_priv(dev); | |
496 | void __iomem *port_base = priv->port_base; | |
497 | u16 bmsr; | |
498 | ||
499 | bmsr = _sc92031_mii_read(port_base, MII_BMSR); | |
500 | rmb(); | |
501 | if (bmsr & BMSR_LSTATUS) { | |
502 | bool speed_100, duplex_full; | |
503 | u32 flow_ctrl_config = 0; | |
504 | u16 output_status = _sc92031_mii_read(port_base, | |
505 | MII_OutputStatus); | |
506 | _sc92031_mii_scan(port_base); | |
507 | ||
508 | speed_100 = output_status & 0x2; | |
509 | duplex_full = output_status & 0x4; | |
510 | ||
511 | /* Initial Tx/Rx configuration */ | |
512 | priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift); | |
513 | priv->tx_config = 0x48800000; | |
514 | ||
515 | /* NOTE: vendor driver had dead code here to enable tx padding */ | |
516 | ||
517 | if (!speed_100) | |
518 | priv->tx_config |= 0x80000; | |
519 | ||
520 | // configure rx mode | |
521 | _sc92031_set_rx_config(dev); | |
522 | ||
523 | if (duplex_full) { | |
524 | priv->rx_config |= RxFullDx; | |
525 | priv->tx_config |= TxFullDx; | |
526 | flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb; | |
527 | } else { | |
528 | priv->rx_config &= ~RxFullDx; | |
529 | priv->tx_config &= ~TxFullDx; | |
530 | } | |
531 | ||
532 | _sc92031_set_mar(dev); | |
533 | _sc92031_set_rx_config(dev); | |
534 | _sc92031_enable_tx_rx(dev); | |
535 | iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig); | |
536 | ||
537 | netif_carrier_on(dev); | |
538 | ||
539 | if (printk_ratelimit()) | |
540 | printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n", | |
541 | dev->name, | |
542 | speed_100 ? "100" : "10", | |
543 | duplex_full ? "full" : "half"); | |
544 | return true; | |
545 | } else { | |
546 | _sc92031_mii_scan(port_base); | |
547 | ||
548 | netif_carrier_off(dev); | |
549 | ||
550 | _sc92031_disable_tx_rx(dev); | |
551 | ||
552 | if (printk_ratelimit()) | |
553 | printk(KERN_INFO "%s: link down\n", dev->name); | |
554 | return false; | |
555 | } | |
556 | } | |
557 | ||
558 | static void _sc92031_phy_reset(struct net_device *dev) | |
559 | { | |
560 | struct sc92031_priv *priv = netdev_priv(dev); | |
561 | void __iomem *port_base = priv->port_base; | |
562 | u32 phy_ctrl; | |
563 | ||
564 | phy_ctrl = ioread32(port_base + PhyCtrl); | |
565 | phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10); | |
566 | phy_ctrl |= PhyCtrlAne | PhyCtrlReset; | |
567 | ||
568 | switch (media) { | |
569 | default: | |
570 | case AUTOSELECT: | |
571 | phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; | |
572 | break; | |
573 | case M10_HALF: | |
574 | phy_ctrl |= PhyCtrlSpd10; | |
575 | break; | |
576 | case M10_FULL: | |
577 | phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10; | |
578 | break; | |
579 | case M100_HALF: | |
580 | phy_ctrl |= PhyCtrlSpd100; | |
581 | break; | |
582 | case M100_FULL: | |
583 | phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; | |
584 | break; | |
585 | } | |
586 | ||
587 | iowrite32(phy_ctrl, port_base + PhyCtrl); | |
588 | mdelay(10); | |
589 | ||
590 | phy_ctrl &= ~PhyCtrlReset; | |
591 | iowrite32(phy_ctrl, port_base + PhyCtrl); | |
592 | mdelay(1); | |
593 | ||
594 | _sc92031_mii_write(port_base, MII_JAB, | |
595 | PHY_16_JAB_ENB | PHY_16_PORT_ENB); | |
596 | _sc92031_mii_scan(port_base); | |
597 | ||
598 | netif_carrier_off(dev); | |
599 | netif_stop_queue(dev); | |
600 | } | |
601 | ||
602 | static void _sc92031_reset(struct net_device *dev) | |
603 | { | |
604 | struct sc92031_priv *priv = netdev_priv(dev); | |
605 | void __iomem *port_base = priv->port_base; | |
606 | ||
607 | /* disable PM */ | |
608 | iowrite32(0, port_base + PMConfig); | |
609 | ||
610 | /* soft reset the chip */ | |
611 | iowrite32(Cfg0_Reset, port_base + Config0); | |
612 | mdelay(200); | |
613 | ||
614 | iowrite32(0, port_base + Config0); | |
615 | mdelay(10); | |
616 | ||
617 | /* disable interrupts */ | |
618 | iowrite32(0, port_base + IntrMask); | |
619 | ||
620 | /* clear multicast address */ | |
621 | iowrite32(0, port_base + MAR0); | |
622 | iowrite32(0, port_base + MAR0 + 4); | |
623 | ||
624 | /* init rx ring */ | |
625 | iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr); | |
626 | priv->rx_ring_tail = priv->rx_ring_dma_addr; | |
627 | ||
628 | /* init tx ring */ | |
629 | _sc92031_tx_clear(dev); | |
630 | ||
631 | /* clear old register values */ | |
632 | priv->intr_status = 0; | |
633 | atomic_set(&priv->intr_mask, 0); | |
634 | priv->rx_config = 0; | |
635 | priv->tx_config = 0; | |
636 | priv->mc_flags = 0; | |
637 | ||
638 | /* configure rx buffer size */ | |
639 | /* NOTE: vendor driver had dead code here to enable early tx/rx */ | |
640 | iowrite32(Cfg1_Rcv64K, port_base + Config1); | |
641 | ||
642 | _sc92031_phy_reset(dev); | |
643 | _sc92031_check_media(dev); | |
644 | ||
645 | /* calculate rx fifo overflow */ | |
646 | priv->rx_value = 0; | |
647 | ||
648 | /* enable PM */ | |
649 | iowrite32(priv->pm_config, port_base + PMConfig); | |
650 | ||
651 | /* clear intr register */ | |
652 | ioread32(port_base + IntrStatus); | |
653 | } | |
654 | ||
655 | static void _sc92031_tx_tasklet(struct net_device *dev) | |
656 | { | |
657 | struct sc92031_priv *priv = netdev_priv(dev); | |
658 | void __iomem *port_base = priv->port_base; | |
659 | ||
660 | unsigned old_tx_tail; | |
661 | unsigned entry; | |
662 | u32 tx_status; | |
663 | ||
664 | old_tx_tail = priv->tx_tail; | |
665 | while (priv->tx_head - priv->tx_tail > 0) { | |
666 | entry = priv->tx_tail % NUM_TX_DESC; | |
667 | tx_status = ioread32(port_base + TxStatus0 + entry * 4); | |
668 | ||
669 | if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted))) | |
670 | break; | |
671 | ||
672 | priv->tx_tail++; | |
673 | ||
674 | if (tx_status & TxStatOK) { | |
9c28eaea SH |
675 | dev->stats.tx_bytes += tx_status & 0x1fff; |
676 | dev->stats.tx_packets++; | |
bf345707 | 677 | /* Note: TxCarrierLost is always asserted at 100mbps. */ |
9c28eaea | 678 | dev->stats.collisions += (tx_status >> 22) & 0xf; |
bf345707 CEB |
679 | } |
680 | ||
681 | if (tx_status & (TxOutOfWindow | TxAborted)) { | |
9c28eaea | 682 | dev->stats.tx_errors++; |
bf345707 CEB |
683 | |
684 | if (tx_status & TxAborted) | |
9c28eaea | 685 | dev->stats.tx_aborted_errors++; |
bf345707 CEB |
686 | |
687 | if (tx_status & TxCarrierLost) | |
9c28eaea | 688 | dev->stats.tx_carrier_errors++; |
bf345707 CEB |
689 | |
690 | if (tx_status & TxOutOfWindow) | |
9c28eaea | 691 | dev->stats.tx_window_errors++; |
bf345707 CEB |
692 | } |
693 | ||
694 | if (tx_status & TxUnderrun) | |
9c28eaea | 695 | dev->stats.tx_fifo_errors++; |
bf345707 CEB |
696 | } |
697 | ||
698 | if (priv->tx_tail != old_tx_tail) | |
699 | if (netif_queue_stopped(dev)) | |
700 | netif_wake_queue(dev); | |
701 | } | |
702 | ||
9c28eaea SH |
703 | static void _sc92031_rx_tasklet_error(struct net_device *dev, |
704 | u32 rx_status, unsigned rx_size) | |
bf345707 CEB |
705 | { |
706 | if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) { | |
9c28eaea SH |
707 | dev->stats.rx_errors++; |
708 | dev->stats.rx_length_errors++; | |
bf345707 CEB |
709 | } |
710 | ||
711 | if (!(rx_status & RxStatesOK)) { | |
9c28eaea | 712 | dev->stats.rx_errors++; |
bf345707 CEB |
713 | |
714 | if (rx_status & (RxHugeFrame | RxSmallFrame)) | |
9c28eaea | 715 | dev->stats.rx_length_errors++; |
bf345707 CEB |
716 | |
717 | if (rx_status & RxBadAlign) | |
9c28eaea | 718 | dev->stats.rx_frame_errors++; |
bf345707 CEB |
719 | |
720 | if (!(rx_status & RxCRCOK)) | |
9c28eaea SH |
721 | dev->stats.rx_crc_errors++; |
722 | } else { | |
723 | struct sc92031_priv *priv = netdev_priv(dev); | |
bf345707 | 724 | priv->rx_loss++; |
9c28eaea | 725 | } |
bf345707 CEB |
726 | } |
727 | ||
728 | static void _sc92031_rx_tasklet(struct net_device *dev) | |
729 | { | |
730 | struct sc92031_priv *priv = netdev_priv(dev); | |
731 | void __iomem *port_base = priv->port_base; | |
732 | ||
733 | dma_addr_t rx_ring_head; | |
734 | unsigned rx_len; | |
735 | unsigned rx_ring_offset; | |
736 | void *rx_ring = priv->rx_ring; | |
737 | ||
738 | rx_ring_head = ioread32(port_base + RxBufWPtr); | |
739 | rmb(); | |
740 | ||
741 | /* rx_ring_head is only 17 bits in the RxBufWPtr register. | |
742 | * we need to change it to 32 bits physical address | |
743 | */ | |
744 | rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1); | |
745 | rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1); | |
746 | if (rx_ring_head < priv->rx_ring_dma_addr) | |
747 | rx_ring_head += RX_BUF_LEN; | |
748 | ||
749 | if (rx_ring_head >= priv->rx_ring_tail) | |
750 | rx_len = rx_ring_head - priv->rx_ring_tail; | |
751 | else | |
752 | rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head); | |
753 | ||
754 | if (!rx_len) | |
755 | return; | |
756 | ||
757 | if (unlikely(rx_len > RX_BUF_LEN)) { | |
758 | if (printk_ratelimit()) | |
759 | printk(KERN_ERR "%s: rx packets length > rx buffer\n", | |
760 | dev->name); | |
761 | return; | |
762 | } | |
763 | ||
764 | rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN; | |
765 | ||
766 | while (rx_len) { | |
767 | u32 rx_status; | |
768 | unsigned rx_size, rx_size_align, pkt_size; | |
769 | struct sk_buff *skb; | |
770 | ||
771 | rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset)); | |
772 | rmb(); | |
773 | ||
774 | rx_size = rx_status >> 20; | |
775 | rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned | |
776 | pkt_size = rx_size - 4; // Omit the four octet CRC from the length. | |
777 | ||
778 | rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN; | |
779 | ||
8e95a202 JP |
780 | if (unlikely(rx_status == 0 || |
781 | rx_size > (MAX_ETH_FRAME_SIZE + 4) || | |
782 | rx_size < 16 || | |
783 | !(rx_status & RxStatesOK))) { | |
9c28eaea | 784 | _sc92031_rx_tasklet_error(dev, rx_status, rx_size); |
bf345707 CEB |
785 | break; |
786 | } | |
787 | ||
788 | if (unlikely(rx_size_align + 4 > rx_len)) { | |
789 | if (printk_ratelimit()) | |
790 | printk(KERN_ERR "%s: rx_len is too small\n", dev->name); | |
791 | break; | |
792 | } | |
793 | ||
794 | rx_len -= rx_size_align + 4; | |
795 | ||
89d71a66 | 796 | skb = netdev_alloc_skb_ip_align(dev, pkt_size); |
bf345707 CEB |
797 | if (unlikely(!skb)) { |
798 | if (printk_ratelimit()) | |
799 | printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", | |
800 | dev->name, pkt_size); | |
801 | goto next; | |
802 | } | |
803 | ||
bf345707 CEB |
804 | if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) { |
805 | memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset), | |
806 | rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset); | |
807 | memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)), | |
808 | rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset)); | |
809 | } else { | |
810 | memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size); | |
811 | } | |
812 | ||
bf345707 | 813 | skb->protocol = eth_type_trans(skb, dev); |
bf345707 CEB |
814 | netif_rx(skb); |
815 | ||
9c28eaea SH |
816 | dev->stats.rx_bytes += pkt_size; |
817 | dev->stats.rx_packets++; | |
bf345707 CEB |
818 | |
819 | if (rx_status & Rx_Multicast) | |
9c28eaea | 820 | dev->stats.multicast++; |
bf345707 CEB |
821 | |
822 | next: | |
823 | rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN; | |
824 | } | |
825 | mb(); | |
826 | ||
827 | priv->rx_ring_tail = rx_ring_head; | |
828 | iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr); | |
829 | } | |
830 | ||
831 | static void _sc92031_link_tasklet(struct net_device *dev) | |
832 | { | |
bf345707 CEB |
833 | if (_sc92031_check_media(dev)) |
834 | netif_wake_queue(dev); | |
835 | else { | |
836 | netif_stop_queue(dev); | |
9c28eaea | 837 | dev->stats.tx_carrier_errors++; |
bf345707 CEB |
838 | } |
839 | } | |
840 | ||
841 | static void sc92031_tasklet(unsigned long data) | |
842 | { | |
843 | struct net_device *dev = (struct net_device *)data; | |
844 | struct sc92031_priv *priv = netdev_priv(dev); | |
845 | void __iomem *port_base = priv->port_base; | |
846 | u32 intr_status, intr_mask; | |
847 | ||
848 | intr_status = priv->intr_status; | |
849 | ||
850 | spin_lock(&priv->lock); | |
851 | ||
852 | if (unlikely(!netif_running(dev))) | |
853 | goto out; | |
854 | ||
855 | if (intr_status & TxOK) | |
856 | _sc92031_tx_tasklet(dev); | |
857 | ||
858 | if (intr_status & RxOK) | |
859 | _sc92031_rx_tasklet(dev); | |
860 | ||
861 | if (intr_status & RxOverflow) | |
9c28eaea | 862 | dev->stats.rx_errors++; |
bf345707 CEB |
863 | |
864 | if (intr_status & TimeOut) { | |
9c28eaea SH |
865 | dev->stats.rx_errors++; |
866 | dev->stats.rx_length_errors++; | |
bf345707 CEB |
867 | } |
868 | ||
869 | if (intr_status & (LinkFail | LinkOK)) | |
870 | _sc92031_link_tasklet(dev); | |
871 | ||
872 | out: | |
873 | intr_mask = atomic_read(&priv->intr_mask); | |
874 | rmb(); | |
875 | ||
876 | iowrite32(intr_mask, port_base + IntrMask); | |
877 | mmiowb(); | |
878 | ||
879 | spin_unlock(&priv->lock); | |
880 | } | |
881 | ||
882 | static irqreturn_t sc92031_interrupt(int irq, void *dev_id) | |
883 | { | |
884 | struct net_device *dev = dev_id; | |
885 | struct sc92031_priv *priv = netdev_priv(dev); | |
886 | void __iomem *port_base = priv->port_base; | |
887 | u32 intr_status, intr_mask; | |
888 | ||
889 | /* mask interrupts before clearing IntrStatus */ | |
890 | iowrite32(0, port_base + IntrMask); | |
891 | _sc92031_dummy_read(port_base); | |
892 | ||
893 | intr_status = ioread32(port_base + IntrStatus); | |
894 | if (unlikely(intr_status == 0xffffffff)) | |
895 | return IRQ_NONE; // hardware has gone missing | |
896 | ||
897 | intr_status &= IntrBits; | |
898 | if (!intr_status) | |
899 | goto out_none; | |
900 | ||
901 | priv->intr_status = intr_status; | |
902 | tasklet_schedule(&priv->tasklet); | |
903 | ||
904 | return IRQ_HANDLED; | |
905 | ||
906 | out_none: | |
907 | intr_mask = atomic_read(&priv->intr_mask); | |
908 | rmb(); | |
909 | ||
910 | iowrite32(intr_mask, port_base + IntrMask); | |
911 | mmiowb(); | |
912 | ||
913 | return IRQ_NONE; | |
914 | } | |
915 | ||
916 | static struct net_device_stats *sc92031_get_stats(struct net_device *dev) | |
917 | { | |
918 | struct sc92031_priv *priv = netdev_priv(dev); | |
919 | void __iomem *port_base = priv->port_base; | |
920 | ||
921 | // FIXME I do not understand what is this trying to do. | |
922 | if (netif_running(dev)) { | |
923 | int temp; | |
924 | ||
925 | spin_lock_bh(&priv->lock); | |
926 | ||
927 | /* Update the error count. */ | |
928 | temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff; | |
929 | ||
930 | if (temp == 0xffff) { | |
931 | priv->rx_value += temp; | |
9c28eaea SH |
932 | dev->stats.rx_fifo_errors = priv->rx_value; |
933 | } else | |
934 | dev->stats.rx_fifo_errors = temp + priv->rx_value; | |
bf345707 CEB |
935 | |
936 | spin_unlock_bh(&priv->lock); | |
937 | } | |
938 | ||
9c28eaea | 939 | return &dev->stats; |
bf345707 CEB |
940 | } |
941 | ||
61357325 SH |
942 | static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb, |
943 | struct net_device *dev) | |
bf345707 | 944 | { |
bf345707 CEB |
945 | struct sc92031_priv *priv = netdev_priv(dev); |
946 | void __iomem *port_base = priv->port_base; | |
bf345707 CEB |
947 | unsigned len; |
948 | unsigned entry; | |
949 | u32 tx_status; | |
950 | ||
951 | if (unlikely(skb->len > TX_BUF_SIZE)) { | |
9c28eaea | 952 | dev->stats.tx_dropped++; |
bf345707 CEB |
953 | goto out; |
954 | } | |
955 | ||
699784b7 | 956 | spin_lock(&priv->lock); |
bf345707 CEB |
957 | |
958 | if (unlikely(!netif_carrier_ok(dev))) { | |
9c28eaea | 959 | dev->stats.tx_dropped++; |
bf345707 CEB |
960 | goto out_unlock; |
961 | } | |
962 | ||
963 | BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC); | |
964 | ||
965 | entry = priv->tx_head++ % NUM_TX_DESC; | |
966 | ||
967 | skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); | |
968 | ||
969 | len = skb->len; | |
6f94f709 | 970 | if (len < ETH_ZLEN) { |
5a0a92e6 GR |
971 | memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, |
972 | 0, ETH_ZLEN - len); | |
973 | len = ETH_ZLEN; | |
974 | } | |
bf345707 CEB |
975 | |
976 | wmb(); | |
977 | ||
978 | if (len < 100) | |
979 | tx_status = len; | |
980 | else if (len < 300) | |
981 | tx_status = 0x30000 | len; | |
982 | else | |
983 | tx_status = 0x50000 | len; | |
984 | ||
985 | iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE, | |
986 | port_base + TxAddr0 + entry * 4); | |
987 | iowrite32(tx_status, port_base + TxStatus0 + entry * 4); | |
988 | mmiowb(); | |
989 | ||
bf345707 CEB |
990 | if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC) |
991 | netif_stop_queue(dev); | |
992 | ||
993 | out_unlock: | |
699784b7 | 994 | spin_unlock(&priv->lock); |
bf345707 CEB |
995 | |
996 | out: | |
997 | dev_kfree_skb(skb); | |
998 | ||
26a17b7b | 999 | return NETDEV_TX_OK; |
bf345707 CEB |
1000 | } |
1001 | ||
1002 | static int sc92031_open(struct net_device *dev) | |
1003 | { | |
1004 | int err; | |
1005 | struct sc92031_priv *priv = netdev_priv(dev); | |
1006 | struct pci_dev *pdev = priv->pdev; | |
1007 | ||
1008 | priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN, | |
1009 | &priv->rx_ring_dma_addr); | |
1010 | if (unlikely(!priv->rx_ring)) { | |
1011 | err = -ENOMEM; | |
1012 | goto out_alloc_rx_ring; | |
1013 | } | |
1014 | ||
1015 | priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN, | |
1016 | &priv->tx_bufs_dma_addr); | |
1017 | if (unlikely(!priv->tx_bufs)) { | |
1018 | err = -ENOMEM; | |
1019 | goto out_alloc_tx_bufs; | |
1020 | } | |
1021 | priv->tx_head = priv->tx_tail = 0; | |
1022 | ||
1023 | err = request_irq(pdev->irq, sc92031_interrupt, | |
2db6346f | 1024 | IRQF_SHARED, dev->name, dev); |
bf345707 CEB |
1025 | if (unlikely(err < 0)) |
1026 | goto out_request_irq; | |
1027 | ||
1028 | priv->pm_config = 0; | |
1029 | ||
1030 | /* Interrupts already disabled by sc92031_stop or sc92031_probe */ | |
699784b7 | 1031 | spin_lock_bh(&priv->lock); |
bf345707 CEB |
1032 | |
1033 | _sc92031_reset(dev); | |
1034 | mmiowb(); | |
1035 | ||
699784b7 | 1036 | spin_unlock_bh(&priv->lock); |
bf345707 CEB |
1037 | sc92031_enable_interrupts(dev); |
1038 | ||
1039 | if (netif_carrier_ok(dev)) | |
1040 | netif_start_queue(dev); | |
1041 | else | |
1042 | netif_tx_disable(dev); | |
1043 | ||
1044 | return 0; | |
1045 | ||
1046 | out_request_irq: | |
1047 | pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs, | |
1048 | priv->tx_bufs_dma_addr); | |
1049 | out_alloc_tx_bufs: | |
1050 | pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, | |
1051 | priv->rx_ring_dma_addr); | |
1052 | out_alloc_rx_ring: | |
1053 | return err; | |
1054 | } | |
1055 | ||
1056 | static int sc92031_stop(struct net_device *dev) | |
1057 | { | |
1058 | struct sc92031_priv *priv = netdev_priv(dev); | |
1059 | struct pci_dev *pdev = priv->pdev; | |
1060 | ||
1061 | netif_tx_disable(dev); | |
1062 | ||
1063 | /* Disable interrupts, stop Tx and Rx. */ | |
1064 | sc92031_disable_interrupts(dev); | |
1065 | ||
699784b7 | 1066 | spin_lock_bh(&priv->lock); |
bf345707 CEB |
1067 | |
1068 | _sc92031_disable_tx_rx(dev); | |
1069 | _sc92031_tx_clear(dev); | |
1070 | mmiowb(); | |
1071 | ||
699784b7 | 1072 | spin_unlock_bh(&priv->lock); |
bf345707 CEB |
1073 | |
1074 | free_irq(pdev->irq, dev); | |
1075 | pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs, | |
1076 | priv->tx_bufs_dma_addr); | |
1077 | pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, | |
1078 | priv->rx_ring_dma_addr); | |
1079 | ||
1080 | return 0; | |
1081 | } | |
1082 | ||
1083 | static void sc92031_set_multicast_list(struct net_device *dev) | |
1084 | { | |
1085 | struct sc92031_priv *priv = netdev_priv(dev); | |
1086 | ||
1087 | spin_lock_bh(&priv->lock); | |
1088 | ||
1089 | _sc92031_set_mar(dev); | |
1090 | _sc92031_set_rx_config(dev); | |
1091 | mmiowb(); | |
1092 | ||
1093 | spin_unlock_bh(&priv->lock); | |
1094 | } | |
1095 | ||
1096 | static void sc92031_tx_timeout(struct net_device *dev) | |
1097 | { | |
1098 | struct sc92031_priv *priv = netdev_priv(dev); | |
1099 | ||
1100 | /* Disable interrupts by clearing the interrupt mask.*/ | |
1101 | sc92031_disable_interrupts(dev); | |
1102 | ||
1103 | spin_lock(&priv->lock); | |
1104 | ||
1105 | priv->tx_timeouts++; | |
1106 | ||
1107 | _sc92031_reset(dev); | |
1108 | mmiowb(); | |
1109 | ||
1110 | spin_unlock(&priv->lock); | |
1111 | ||
1112 | /* enable interrupts */ | |
1113 | sc92031_enable_interrupts(dev); | |
1114 | ||
1115 | if (netif_carrier_ok(dev)) | |
1116 | netif_wake_queue(dev); | |
1117 | } | |
1118 | ||
1119 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1120 | static void sc92031_poll_controller(struct net_device *dev) | |
1121 | { | |
1122 | disable_irq(dev->irq); | |
1123 | if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE) | |
1124 | sc92031_tasklet((unsigned long)dev); | |
1125 | enable_irq(dev->irq); | |
1126 | } | |
1127 | #endif | |
1128 | ||
1129 | static int sc92031_ethtool_get_settings(struct net_device *dev, | |
1130 | struct ethtool_cmd *cmd) | |
1131 | { | |
1132 | struct sc92031_priv *priv = netdev_priv(dev); | |
1133 | void __iomem *port_base = priv->port_base; | |
1134 | u8 phy_address; | |
1135 | u32 phy_ctrl; | |
1136 | u16 output_status; | |
1137 | ||
1138 | spin_lock_bh(&priv->lock); | |
1139 | ||
1140 | phy_address = ioread32(port_base + Miicmd1) >> 27; | |
1141 | phy_ctrl = ioread32(port_base + PhyCtrl); | |
1142 | ||
1143 | output_status = _sc92031_mii_read(port_base, MII_OutputStatus); | |
1144 | _sc92031_mii_scan(port_base); | |
1145 | mmiowb(); | |
1146 | ||
1147 | spin_unlock_bh(&priv->lock); | |
1148 | ||
1149 | cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | |
1150 | | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | |
1151 | | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII; | |
1152 | ||
1153 | cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; | |
1154 | ||
1155 | if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) | |
1156 | == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) | |
1157 | cmd->advertising |= ADVERTISED_Autoneg; | |
1158 | ||
1159 | if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10) | |
1160 | cmd->advertising |= ADVERTISED_10baseT_Half; | |
1161 | ||
1162 | if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux)) | |
1163 | == (PhyCtrlSpd10 | PhyCtrlDux)) | |
1164 | cmd->advertising |= ADVERTISED_10baseT_Full; | |
1165 | ||
1166 | if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100) | |
1167 | cmd->advertising |= ADVERTISED_100baseT_Half; | |
1168 | ||
1169 | if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux)) | |
1170 | == (PhyCtrlSpd100 | PhyCtrlDux)) | |
1171 | cmd->advertising |= ADVERTISED_100baseT_Full; | |
1172 | ||
1173 | if (phy_ctrl & PhyCtrlAne) | |
1174 | cmd->advertising |= ADVERTISED_Autoneg; | |
1175 | ||
70739497 DD |
1176 | ethtool_cmd_speed_set(cmd, |
1177 | (output_status & 0x2) ? SPEED_100 : SPEED_10); | |
bf345707 CEB |
1178 | cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF; |
1179 | cmd->port = PORT_MII; | |
1180 | cmd->phy_address = phy_address; | |
1181 | cmd->transceiver = XCVR_INTERNAL; | |
1182 | cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE; | |
1183 | ||
1184 | return 0; | |
1185 | } | |
1186 | ||
1187 | static int sc92031_ethtool_set_settings(struct net_device *dev, | |
1188 | struct ethtool_cmd *cmd) | |
1189 | { | |
1190 | struct sc92031_priv *priv = netdev_priv(dev); | |
1191 | void __iomem *port_base = priv->port_base; | |
25db0338 | 1192 | u32 speed = ethtool_cmd_speed(cmd); |
bf345707 CEB |
1193 | u32 phy_ctrl; |
1194 | u32 old_phy_ctrl; | |
1195 | ||
25db0338 | 1196 | if (!(speed == SPEED_10 || speed == SPEED_100)) |
bf345707 CEB |
1197 | return -EINVAL; |
1198 | if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL)) | |
1199 | return -EINVAL; | |
1200 | if (!(cmd->port == PORT_MII)) | |
1201 | return -EINVAL; | |
1202 | if (!(cmd->phy_address == 0x1f)) | |
1203 | return -EINVAL; | |
1204 | if (!(cmd->transceiver == XCVR_INTERNAL)) | |
1205 | return -EINVAL; | |
1206 | if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE)) | |
1207 | return -EINVAL; | |
1208 | ||
1209 | if (cmd->autoneg == AUTONEG_ENABLE) { | |
1210 | if (!(cmd->advertising & (ADVERTISED_Autoneg | |
1211 | | ADVERTISED_100baseT_Full | |
1212 | | ADVERTISED_100baseT_Half | |
1213 | | ADVERTISED_10baseT_Full | |
1214 | | ADVERTISED_10baseT_Half))) | |
1215 | return -EINVAL; | |
1216 | ||
1217 | phy_ctrl = PhyCtrlAne; | |
1218 | ||
1219 | // FIXME: I'm not sure what the original code was trying to do | |
1220 | if (cmd->advertising & ADVERTISED_Autoneg) | |
1221 | phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; | |
1222 | if (cmd->advertising & ADVERTISED_100baseT_Full) | |
1223 | phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; | |
1224 | if (cmd->advertising & ADVERTISED_100baseT_Half) | |
1225 | phy_ctrl |= PhyCtrlSpd100; | |
1226 | if (cmd->advertising & ADVERTISED_10baseT_Full) | |
1227 | phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux; | |
1228 | if (cmd->advertising & ADVERTISED_10baseT_Half) | |
1229 | phy_ctrl |= PhyCtrlSpd10; | |
1230 | } else { | |
1231 | // FIXME: Whole branch guessed | |
1232 | phy_ctrl = 0; | |
1233 | ||
25db0338 | 1234 | if (speed == SPEED_10) |
bf345707 CEB |
1235 | phy_ctrl |= PhyCtrlSpd10; |
1236 | else /* cmd->speed == SPEED_100 */ | |
1237 | phy_ctrl |= PhyCtrlSpd100; | |
1238 | ||
1239 | if (cmd->duplex == DUPLEX_FULL) | |
1240 | phy_ctrl |= PhyCtrlDux; | |
1241 | } | |
1242 | ||
1243 | spin_lock_bh(&priv->lock); | |
1244 | ||
1245 | old_phy_ctrl = ioread32(port_base + PhyCtrl); | |
1246 | phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux | |
1247 | | PhyCtrlSpd100 | PhyCtrlSpd10); | |
1248 | if (phy_ctrl != old_phy_ctrl) | |
1249 | iowrite32(phy_ctrl, port_base + PhyCtrl); | |
1250 | ||
1251 | spin_unlock_bh(&priv->lock); | |
1252 | ||
1253 | return 0; | |
1254 | } | |
1255 | ||
bf345707 CEB |
1256 | static void sc92031_ethtool_get_wol(struct net_device *dev, |
1257 | struct ethtool_wolinfo *wolinfo) | |
1258 | { | |
1259 | struct sc92031_priv *priv = netdev_priv(dev); | |
1260 | void __iomem *port_base = priv->port_base; | |
1261 | u32 pm_config; | |
1262 | ||
1263 | spin_lock_bh(&priv->lock); | |
1264 | pm_config = ioread32(port_base + PMConfig); | |
1265 | spin_unlock_bh(&priv->lock); | |
1266 | ||
1267 | // FIXME: Guessed | |
1268 | wolinfo->supported = WAKE_PHY | WAKE_MAGIC | |
1269 | | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; | |
1270 | wolinfo->wolopts = 0; | |
1271 | ||
1272 | if (pm_config & PM_LinkUp) | |
1273 | wolinfo->wolopts |= WAKE_PHY; | |
1274 | ||
1275 | if (pm_config & PM_Magic) | |
1276 | wolinfo->wolopts |= WAKE_MAGIC; | |
1277 | ||
1278 | if (pm_config & PM_WakeUp) | |
1279 | // FIXME: Guessed | |
1280 | wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; | |
1281 | } | |
1282 | ||
1283 | static int sc92031_ethtool_set_wol(struct net_device *dev, | |
1284 | struct ethtool_wolinfo *wolinfo) | |
1285 | { | |
1286 | struct sc92031_priv *priv = netdev_priv(dev); | |
1287 | void __iomem *port_base = priv->port_base; | |
1288 | u32 pm_config; | |
1289 | ||
1290 | spin_lock_bh(&priv->lock); | |
1291 | ||
1292 | pm_config = ioread32(port_base + PMConfig) | |
1293 | & ~(PM_LinkUp | PM_Magic | PM_WakeUp); | |
1294 | ||
1295 | if (wolinfo->wolopts & WAKE_PHY) | |
1296 | pm_config |= PM_LinkUp; | |
1297 | ||
1298 | if (wolinfo->wolopts & WAKE_MAGIC) | |
1299 | pm_config |= PM_Magic; | |
1300 | ||
1301 | // FIXME: Guessed | |
1302 | if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) | |
1303 | pm_config |= PM_WakeUp; | |
1304 | ||
1305 | priv->pm_config = pm_config; | |
1306 | iowrite32(pm_config, port_base + PMConfig); | |
1307 | mmiowb(); | |
1308 | ||
1309 | spin_unlock_bh(&priv->lock); | |
1310 | ||
1311 | return 0; | |
1312 | } | |
1313 | ||
1314 | static int sc92031_ethtool_nway_reset(struct net_device *dev) | |
1315 | { | |
1316 | int err = 0; | |
1317 | struct sc92031_priv *priv = netdev_priv(dev); | |
1318 | void __iomem *port_base = priv->port_base; | |
1319 | u16 bmcr; | |
1320 | ||
1321 | spin_lock_bh(&priv->lock); | |
1322 | ||
1323 | bmcr = _sc92031_mii_read(port_base, MII_BMCR); | |
1324 | if (!(bmcr & BMCR_ANENABLE)) { | |
1325 | err = -EINVAL; | |
1326 | goto out; | |
1327 | } | |
1328 | ||
1329 | _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART); | |
1330 | ||
1331 | out: | |
1332 | _sc92031_mii_scan(port_base); | |
1333 | mmiowb(); | |
1334 | ||
1335 | spin_unlock_bh(&priv->lock); | |
1336 | ||
1337 | return err; | |
1338 | } | |
1339 | ||
1340 | static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = { | |
1341 | "tx_timeout", | |
1342 | "rx_loss", | |
1343 | }; | |
1344 | ||
1345 | static void sc92031_ethtool_get_strings(struct net_device *dev, | |
1346 | u32 stringset, u8 *data) | |
1347 | { | |
1348 | if (stringset == ETH_SS_STATS) | |
1349 | memcpy(data, sc92031_ethtool_stats_strings, | |
1350 | SILAN_STATS_NUM * ETH_GSTRING_LEN); | |
1351 | } | |
1352 | ||
b9f2c044 | 1353 | static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset) |
bf345707 | 1354 | { |
b9f2c044 JG |
1355 | switch (sset) { |
1356 | case ETH_SS_STATS: | |
1357 | return SILAN_STATS_NUM; | |
1358 | default: | |
1359 | return -EOPNOTSUPP; | |
1360 | } | |
bf345707 CEB |
1361 | } |
1362 | ||
1363 | static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev, | |
1364 | struct ethtool_stats *stats, u64 *data) | |
1365 | { | |
1366 | struct sc92031_priv *priv = netdev_priv(dev); | |
1367 | ||
1368 | spin_lock_bh(&priv->lock); | |
1369 | data[0] = priv->tx_timeouts; | |
1370 | data[1] = priv->rx_loss; | |
1371 | spin_unlock_bh(&priv->lock); | |
1372 | } | |
1373 | ||
974acda0 | 1374 | static const struct ethtool_ops sc92031_ethtool_ops = { |
bf345707 CEB |
1375 | .get_settings = sc92031_ethtool_get_settings, |
1376 | .set_settings = sc92031_ethtool_set_settings, | |
bf345707 CEB |
1377 | .get_wol = sc92031_ethtool_get_wol, |
1378 | .set_wol = sc92031_ethtool_set_wol, | |
1379 | .nway_reset = sc92031_ethtool_nway_reset, | |
1380 | .get_link = ethtool_op_get_link, | |
bf345707 | 1381 | .get_strings = sc92031_ethtool_get_strings, |
b9f2c044 | 1382 | .get_sset_count = sc92031_ethtool_get_sset_count, |
bf345707 | 1383 | .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, |
bf345707 CEB |
1384 | }; |
1385 | ||
974acda0 SH |
1386 | |
1387 | static const struct net_device_ops sc92031_netdev_ops = { | |
1388 | .ndo_get_stats = sc92031_get_stats, | |
1389 | .ndo_start_xmit = sc92031_start_xmit, | |
1390 | .ndo_open = sc92031_open, | |
1391 | .ndo_stop = sc92031_stop, | |
1392 | .ndo_set_multicast_list = sc92031_set_multicast_list, | |
1393 | .ndo_change_mtu = eth_change_mtu, | |
1394 | .ndo_validate_addr = eth_validate_addr, | |
fe96aaa1 | 1395 | .ndo_set_mac_address = eth_mac_addr, |
974acda0 SH |
1396 | .ndo_tx_timeout = sc92031_tx_timeout, |
1397 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1398 | .ndo_poll_controller = sc92031_poll_controller, | |
1399 | #endif | |
1400 | }; | |
1401 | ||
bf345707 CEB |
1402 | static int __devinit sc92031_probe(struct pci_dev *pdev, |
1403 | const struct pci_device_id *id) | |
1404 | { | |
1405 | int err; | |
1406 | void __iomem* port_base; | |
1407 | struct net_device *dev; | |
1408 | struct sc92031_priv *priv; | |
1409 | u32 mac0, mac1; | |
eb5c8bc1 | 1410 | unsigned long base_addr; |
bf345707 CEB |
1411 | |
1412 | err = pci_enable_device(pdev); | |
1413 | if (unlikely(err < 0)) | |
1414 | goto out_enable_device; | |
1415 | ||
1416 | pci_set_master(pdev); | |
1417 | ||
284901a9 | 1418 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
bf345707 CEB |
1419 | if (unlikely(err < 0)) |
1420 | goto out_set_dma_mask; | |
1421 | ||
284901a9 | 1422 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
bf345707 CEB |
1423 | if (unlikely(err < 0)) |
1424 | goto out_set_dma_mask; | |
1425 | ||
1426 | err = pci_request_regions(pdev, SC92031_NAME); | |
1427 | if (unlikely(err < 0)) | |
1428 | goto out_request_regions; | |
1429 | ||
1430 | port_base = pci_iomap(pdev, SC92031_USE_BAR, 0); | |
1431 | if (unlikely(!port_base)) { | |
1432 | err = -EIO; | |
1433 | goto out_iomap; | |
1434 | } | |
1435 | ||
1436 | dev = alloc_etherdev(sizeof(struct sc92031_priv)); | |
1437 | if (unlikely(!dev)) { | |
1438 | err = -ENOMEM; | |
1439 | goto out_alloc_etherdev; | |
1440 | } | |
1441 | ||
1442 | pci_set_drvdata(pdev, dev); | |
5a81f143 | 1443 | SET_NETDEV_DEV(dev, &pdev->dev); |
bf345707 CEB |
1444 | |
1445 | #if SC92031_USE_BAR == 0 | |
1446 | dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR); | |
1447 | dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR); | |
1448 | #elif SC92031_USE_BAR == 1 | |
1449 | dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR); | |
1450 | #endif | |
1451 | dev->irq = pdev->irq; | |
1452 | ||
1453 | /* faked with skb_copy_and_csum_dev */ | |
79032644 MM |
1454 | dev->features = NETIF_F_SG | NETIF_F_HIGHDMA | |
1455 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
bf345707 | 1456 | |
974acda0 | 1457 | dev->netdev_ops = &sc92031_netdev_ops; |
bf345707 | 1458 | dev->watchdog_timeo = TX_TIMEOUT; |
974acda0 | 1459 | dev->ethtool_ops = &sc92031_ethtool_ops; |
bf345707 CEB |
1460 | |
1461 | priv = netdev_priv(dev); | |
1462 | spin_lock_init(&priv->lock); | |
1463 | priv->port_base = port_base; | |
1464 | priv->pdev = pdev; | |
1465 | tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev); | |
1466 | /* Fudge tasklet count so the call to sc92031_enable_interrupts at | |
1467 | * sc92031_open will work correctly */ | |
1468 | tasklet_disable_nosync(&priv->tasklet); | |
1469 | ||
1470 | /* PCI PM Wakeup */ | |
1471 | iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig); | |
1472 | ||
1473 | mac0 = ioread32(port_base + MAC0); | |
1474 | mac1 = ioread32(port_base + MAC0 + 4); | |
1475 | dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24; | |
1476 | dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16; | |
1477 | dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8; | |
1478 | dev->dev_addr[3] = dev->perm_addr[3] = mac0; | |
1479 | dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8; | |
1480 | dev->dev_addr[5] = dev->perm_addr[5] = mac1; | |
1481 | ||
1482 | err = register_netdev(dev); | |
1483 | if (err < 0) | |
1484 | goto out_register_netdev; | |
1485 | ||
eb5c8bc1 CEB |
1486 | #if SC92031_USE_BAR == 0 |
1487 | base_addr = dev->mem_start; | |
1488 | #elif SC92031_USE_BAR == 1 | |
1489 | base_addr = dev->base_addr; | |
1490 | #endif | |
1491 | printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, | |
1492 | base_addr, dev->dev_addr, dev->irq); | |
1493 | ||
bf345707 CEB |
1494 | return 0; |
1495 | ||
1496 | out_register_netdev: | |
1497 | free_netdev(dev); | |
1498 | out_alloc_etherdev: | |
1499 | pci_iounmap(pdev, port_base); | |
1500 | out_iomap: | |
1501 | pci_release_regions(pdev); | |
1502 | out_request_regions: | |
1503 | out_set_dma_mask: | |
1504 | pci_disable_device(pdev); | |
1505 | out_enable_device: | |
1506 | return err; | |
1507 | } | |
1508 | ||
1509 | static void __devexit sc92031_remove(struct pci_dev *pdev) | |
1510 | { | |
1511 | struct net_device *dev = pci_get_drvdata(pdev); | |
1512 | struct sc92031_priv *priv = netdev_priv(dev); | |
1513 | void __iomem* port_base = priv->port_base; | |
1514 | ||
1515 | unregister_netdev(dev); | |
1516 | free_netdev(dev); | |
1517 | pci_iounmap(pdev, port_base); | |
1518 | pci_release_regions(pdev); | |
1519 | pci_disable_device(pdev); | |
1520 | } | |
1521 | ||
1522 | static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state) | |
1523 | { | |
1524 | struct net_device *dev = pci_get_drvdata(pdev); | |
1525 | struct sc92031_priv *priv = netdev_priv(dev); | |
1526 | ||
1527 | pci_save_state(pdev); | |
1528 | ||
1529 | if (!netif_running(dev)) | |
1530 | goto out; | |
1531 | ||
1532 | netif_device_detach(dev); | |
1533 | ||
1534 | /* Disable interrupts, stop Tx and Rx. */ | |
1535 | sc92031_disable_interrupts(dev); | |
1536 | ||
699784b7 | 1537 | spin_lock_bh(&priv->lock); |
bf345707 CEB |
1538 | |
1539 | _sc92031_disable_tx_rx(dev); | |
1540 | _sc92031_tx_clear(dev); | |
1541 | mmiowb(); | |
1542 | ||
699784b7 | 1543 | spin_unlock_bh(&priv->lock); |
bf345707 CEB |
1544 | |
1545 | out: | |
1546 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
1547 | ||
1548 | return 0; | |
1549 | } | |
1550 | ||
1551 | static int sc92031_resume(struct pci_dev *pdev) | |
1552 | { | |
1553 | struct net_device *dev = pci_get_drvdata(pdev); | |
1554 | struct sc92031_priv *priv = netdev_priv(dev); | |
1555 | ||
1556 | pci_restore_state(pdev); | |
1557 | pci_set_power_state(pdev, PCI_D0); | |
1558 | ||
1559 | if (!netif_running(dev)) | |
1560 | goto out; | |
1561 | ||
1562 | /* Interrupts already disabled by sc92031_suspend */ | |
699784b7 | 1563 | spin_lock_bh(&priv->lock); |
bf345707 CEB |
1564 | |
1565 | _sc92031_reset(dev); | |
1566 | mmiowb(); | |
1567 | ||
699784b7 | 1568 | spin_unlock_bh(&priv->lock); |
bf345707 CEB |
1569 | sc92031_enable_interrupts(dev); |
1570 | ||
1571 | netif_device_attach(dev); | |
1572 | ||
1573 | if (netif_carrier_ok(dev)) | |
1574 | netif_wake_queue(dev); | |
1575 | else | |
1576 | netif_tx_disable(dev); | |
1577 | ||
1578 | out: | |
1579 | return 0; | |
1580 | } | |
1581 | ||
a3aa1884 | 1582 | static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = { |
5ec99fdf CEB |
1583 | { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, |
1584 | { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, | |
90cb6659 | 1585 | { PCI_DEVICE(0x1088, 0x2031) }, |
bf345707 CEB |
1586 | { 0, } |
1587 | }; | |
1588 | MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); | |
1589 | ||
1590 | static struct pci_driver sc92031_pci_driver = { | |
1591 | .name = SC92031_NAME, | |
1592 | .id_table = sc92031_pci_device_id_table, | |
1593 | .probe = sc92031_probe, | |
1594 | .remove = __devexit_p(sc92031_remove), | |
1595 | .suspend = sc92031_suspend, | |
1596 | .resume = sc92031_resume, | |
1597 | }; | |
1598 | ||
1599 | static int __init sc92031_init(void) | |
1600 | { | |
bf345707 CEB |
1601 | return pci_register_driver(&sc92031_pci_driver); |
1602 | } | |
1603 | ||
1604 | static void __exit sc92031_exit(void) | |
1605 | { | |
1606 | pci_unregister_driver(&sc92031_pci_driver); | |
1607 | } | |
1608 | ||
1609 | module_init(sc92031_init); | |
1610 | module_exit(sc92031_exit); | |
1611 | ||
1612 | MODULE_LICENSE("GPL"); | |
1613 | MODULE_AUTHOR("Cesar Eduardo Barros <[email protected]>"); | |
f08d7c36 | 1614 | MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver"); |