]> Git Repo - J-linux.git/blob - drivers/net/ethernet/broadcom/b44.c
Merge tag 'kbuild-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[J-linux.git] / drivers / net / ethernet / broadcom / b44.c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller ([email protected])
4  * Copyright (C) 2004 Pekka Pietikainen ([email protected])
5  * Copyright (C) 2004 Florian Schirmer ([email protected])
6  * Copyright (C) 2006 Felix Fietkau ([email protected])
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <[email protected]>
9  * Copyright (C) 2013 Hauke Mehrtens <[email protected]>
10  *
11  * Distribute under GPL.
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/netdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/mii.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/etherdevice.h>
26 #include <linux/pci.h>
27 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/ssb/ssb.h>
32 #include <linux/slab.h>
33 #include <linux/phy.h>
34
35 #include <linux/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/irq.h>
38
39
40 #include "b44.h"
41
42 #define DRV_MODULE_NAME         "b44"
43 #define DRV_DESCRIPTION         "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
44
45 #define B44_DEF_MSG_ENABLE        \
46         (NETIF_MSG_DRV          | \
47          NETIF_MSG_PROBE        | \
48          NETIF_MSG_LINK         | \
49          NETIF_MSG_TIMER        | \
50          NETIF_MSG_IFDOWN       | \
51          NETIF_MSG_IFUP         | \
52          NETIF_MSG_RX_ERR       | \
53          NETIF_MSG_TX_ERR)
54
55 /* length of time before we decide the hardware is borked,
56  * and dev->tx_timeout() should be called to fix the problem
57  */
58 #define B44_TX_TIMEOUT                  (5 * HZ)
59
60 /* hardware minimum and maximum for a single frame's data payload */
61 #define B44_MIN_MTU                     ETH_ZLEN
62 #define B44_MAX_MTU                     ETH_DATA_LEN
63
64 #define B44_RX_RING_SIZE                512
65 #define B44_DEF_RX_RING_PENDING         200
66 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
67                                  B44_RX_RING_SIZE)
68 #define B44_TX_RING_SIZE                512
69 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
70 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
71                                  B44_TX_RING_SIZE)
72
73 #define TX_RING_GAP(BP) \
74         (B44_TX_RING_SIZE - (BP)->tx_pending)
75 #define TX_BUFFS_AVAIL(BP)                                              \
76         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
77           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
78           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
79 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
80
81 #define RX_PKT_OFFSET           (RX_HEADER_LEN + 2)
82 #define RX_PKT_BUF_SZ           (1536 + RX_PKT_OFFSET)
83
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
86
87 /* b44 internal pattern match filter info */
88 #define B44_PATTERN_BASE        0x400
89 #define B44_PATTERN_SIZE        0x80
90 #define B44_PMASK_BASE          0x600
91 #define B44_PMASK_SIZE          0x10
92 #define B44_MAX_PATTERNS        16
93 #define B44_ETHIPV6UDP_HLEN     62
94 #define B44_ETHIPV4UDP_HLEN     42
95
96 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97 MODULE_DESCRIPTION(DRV_DESCRIPTION);
98 MODULE_LICENSE("GPL");
99
100 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
101 module_param(b44_debug, int, 0);
102 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103
104
105 #ifdef CONFIG_B44_PCI
106 static const struct pci_device_id b44_pci_tbl[] = {
107         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110         { 0 } /* terminate list with empty entry */
111 };
112 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113
114 static struct pci_driver b44_pci_driver = {
115         .name           = DRV_MODULE_NAME,
116         .id_table       = b44_pci_tbl,
117 };
118 #endif /* CONFIG_B44_PCI */
119
120 static const struct ssb_device_id b44_ssb_tbl[] = {
121         SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122         {},
123 };
124 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125
126 static void b44_halt(struct b44 *);
127 static void b44_init_rings(struct b44 *);
128
129 #define B44_FULL_RESET          1
130 #define B44_FULL_RESET_SKIP_PHY 2
131 #define B44_PARTIAL_RESET       3
132 #define B44_CHIP_RESET_FULL     4
133 #define B44_CHIP_RESET_PARTIAL  5
134
135 static void b44_init_hw(struct b44 *, int);
136
137 static int dma_desc_sync_size;
138 static int instance;
139
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...)      # x,
142 B44_STAT_REG_DECLARE
143 #undef _B44
144 };
145
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147                                                 dma_addr_t dma_base,
148                                                 unsigned long offset,
149                                                 enum dma_data_direction dir)
150 {
151         dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152                                    dma_desc_sync_size, dir);
153 }
154
155 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156                                              dma_addr_t dma_base,
157                                              unsigned long offset,
158                                              enum dma_data_direction dir)
159 {
160         dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161                                 dma_desc_sync_size, dir);
162 }
163
164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165 {
166         return ssb_read32(bp->sdev, reg);
167 }
168
169 static inline void bw32(const struct b44 *bp,
170                         unsigned long reg, unsigned long val)
171 {
172         ssb_write32(bp->sdev, reg, val);
173 }
174
175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176                         u32 bit, unsigned long timeout, const int clear)
177 {
178         unsigned long i;
179
180         for (i = 0; i < timeout; i++) {
181                 u32 val = br32(bp, reg);
182
183                 if (clear && !(val & bit))
184                         break;
185                 if (!clear && (val & bit))
186                         break;
187                 udelay(10);
188         }
189         if (i == timeout) {
190                 if (net_ratelimit())
191                         netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
192                                    bit, reg, clear ? "clear" : "set");
193
194                 return -ENODEV;
195         }
196         return 0;
197 }
198
199 static inline void __b44_cam_write(struct b44 *bp,
200                                    const unsigned char *data, int index)
201 {
202         u32 val;
203
204         val  = ((u32) data[2]) << 24;
205         val |= ((u32) data[3]) << 16;
206         val |= ((u32) data[4]) <<  8;
207         val |= ((u32) data[5]) <<  0;
208         bw32(bp, B44_CAM_DATA_LO, val);
209         val = (CAM_DATA_HI_VALID |
210                (((u32) data[0]) << 8) |
211                (((u32) data[1]) << 0));
212         bw32(bp, B44_CAM_DATA_HI, val);
213         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
214                             (index << CAM_CTRL_INDEX_SHIFT)));
215         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
216 }
217
218 static inline void __b44_disable_ints(struct b44 *bp)
219 {
220         bw32(bp, B44_IMASK, 0);
221 }
222
223 static void b44_disable_ints(struct b44 *bp)
224 {
225         __b44_disable_ints(bp);
226
227         /* Flush posted writes. */
228         br32(bp, B44_IMASK);
229 }
230
231 static void b44_enable_ints(struct b44 *bp)
232 {
233         bw32(bp, B44_IMASK, bp->imask);
234 }
235
236 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
237 {
238         int err;
239
240         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
241         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
242                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
243                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
244                              (reg << MDIO_DATA_RA_SHIFT) |
245                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
246         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
247         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
248
249         return err;
250 }
251
252 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
253 {
254         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
255         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
256                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
257                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
258                              (reg << MDIO_DATA_RA_SHIFT) |
259                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
260                              (val & MDIO_DATA_DATA)));
261         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
262 }
263
264 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
265 {
266         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
267                 return 0;
268
269         return __b44_readphy(bp, bp->phy_addr, reg, val);
270 }
271
272 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
273 {
274         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
275                 return 0;
276
277         return __b44_writephy(bp, bp->phy_addr, reg, val);
278 }
279
280 /* miilib interface */
281 static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
282 {
283         u32 val;
284         struct b44 *bp = netdev_priv(dev);
285         int rc = __b44_readphy(bp, phy_id, location, &val);
286         if (rc)
287                 return 0xffffffff;
288         return val;
289 }
290
291 static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
292                                int val)
293 {
294         struct b44 *bp = netdev_priv(dev);
295         __b44_writephy(bp, phy_id, location, val);
296 }
297
298 static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
299 {
300         u32 val;
301         struct b44 *bp = bus->priv;
302         int rc = __b44_readphy(bp, phy_id, location, &val);
303         if (rc)
304                 return 0xffffffff;
305         return val;
306 }
307
308 static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
309                                  u16 val)
310 {
311         struct b44 *bp = bus->priv;
312         return __b44_writephy(bp, phy_id, location, val);
313 }
314
315 static int b44_phy_reset(struct b44 *bp)
316 {
317         u32 val;
318         int err;
319
320         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
321                 return 0;
322         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
323         if (err)
324                 return err;
325         udelay(100);
326         err = b44_readphy(bp, MII_BMCR, &val);
327         if (!err) {
328                 if (val & BMCR_RESET) {
329                         netdev_err(bp->dev, "PHY Reset would not complete\n");
330                         err = -ENODEV;
331                 }
332         }
333
334         return err;
335 }
336
337 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
338 {
339         u32 val;
340
341         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
342         bp->flags |= pause_flags;
343
344         val = br32(bp, B44_RXCONFIG);
345         if (pause_flags & B44_FLAG_RX_PAUSE)
346                 val |= RXCONFIG_FLOW;
347         else
348                 val &= ~RXCONFIG_FLOW;
349         bw32(bp, B44_RXCONFIG, val);
350
351         val = br32(bp, B44_MAC_FLOW);
352         if (pause_flags & B44_FLAG_TX_PAUSE)
353                 val |= (MAC_FLOW_PAUSE_ENAB |
354                         (0xc0 & MAC_FLOW_RX_HI_WATER));
355         else
356                 val &= ~MAC_FLOW_PAUSE_ENAB;
357         bw32(bp, B44_MAC_FLOW, val);
358 }
359
360 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
361 {
362         u32 pause_enab = 0;
363
364         /* The driver supports only rx pause by default because
365            the b44 mac tx pause mechanism generates excessive
366            pause frames.
367            Use ethtool to turn on b44 tx pause if necessary.
368          */
369         if ((local & ADVERTISE_PAUSE_CAP) &&
370             (local & ADVERTISE_PAUSE_ASYM)){
371                 if ((remote & LPA_PAUSE_ASYM) &&
372                     !(remote & LPA_PAUSE_CAP))
373                         pause_enab |= B44_FLAG_RX_PAUSE;
374         }
375
376         __b44_set_flow_ctrl(bp, pause_enab);
377 }
378
379 #ifdef CONFIG_BCM47XX
380 #include <linux/bcm47xx_nvram.h>
381 static void b44_wap54g10_workaround(struct b44 *bp)
382 {
383         char buf[20];
384         u32 val;
385         int err;
386
387         /*
388          * workaround for bad hardware design in Linksys WAP54G v1.0
389          * see https://dev.openwrt.org/ticket/146
390          * check and reset bit "isolate"
391          */
392         if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
393                 return;
394         if (simple_strtoul(buf, NULL, 0) == 2) {
395                 err = __b44_readphy(bp, 0, MII_BMCR, &val);
396                 if (err)
397                         goto error;
398                 if (!(val & BMCR_ISOLATE))
399                         return;
400                 val &= ~BMCR_ISOLATE;
401                 err = __b44_writephy(bp, 0, MII_BMCR, val);
402                 if (err)
403                         goto error;
404         }
405         return;
406 error:
407         pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
408 }
409 #else
410 static inline void b44_wap54g10_workaround(struct b44 *bp)
411 {
412 }
413 #endif
414
415 static int b44_setup_phy(struct b44 *bp)
416 {
417         u32 val;
418         int err;
419
420         b44_wap54g10_workaround(bp);
421
422         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
423                 return 0;
424         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
425                 goto out;
426         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
427                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
428                 goto out;
429         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
430                 goto out;
431         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
432                                 val | MII_TLEDCTRL_ENABLE)) != 0)
433                 goto out;
434
435         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
436                 u32 adv = ADVERTISE_CSMA;
437
438                 if (bp->flags & B44_FLAG_ADV_10HALF)
439                         adv |= ADVERTISE_10HALF;
440                 if (bp->flags & B44_FLAG_ADV_10FULL)
441                         adv |= ADVERTISE_10FULL;
442                 if (bp->flags & B44_FLAG_ADV_100HALF)
443                         adv |= ADVERTISE_100HALF;
444                 if (bp->flags & B44_FLAG_ADV_100FULL)
445                         adv |= ADVERTISE_100FULL;
446
447                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
448                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
449
450                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
451                         goto out;
452                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
453                                                        BMCR_ANRESTART))) != 0)
454                         goto out;
455         } else {
456                 u32 bmcr;
457
458                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
459                         goto out;
460                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
461                 if (bp->flags & B44_FLAG_100_BASE_T)
462                         bmcr |= BMCR_SPEED100;
463                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
464                         bmcr |= BMCR_FULLDPLX;
465                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
466                         goto out;
467
468                 /* Since we will not be negotiating there is no safe way
469                  * to determine if the link partner supports flow control
470                  * or not.  So just disable it completely in this case.
471                  */
472                 b44_set_flow_ctrl(bp, 0, 0);
473         }
474
475 out:
476         return err;
477 }
478
479 static void b44_stats_update(struct b44 *bp)
480 {
481         unsigned long reg;
482         u64 *val;
483
484         val = &bp->hw_stats.tx_good_octets;
485         u64_stats_update_begin(&bp->hw_stats.syncp);
486
487         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
488                 *val++ += br32(bp, reg);
489         }
490
491         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
492                 *val++ += br32(bp, reg);
493         }
494
495         u64_stats_update_end(&bp->hw_stats.syncp);
496 }
497
498 static void b44_link_report(struct b44 *bp)
499 {
500         if (!netif_carrier_ok(bp->dev)) {
501                 netdev_info(bp->dev, "Link is down\n");
502         } else {
503                 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
504                             (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
505                             (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
506
507                 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
508                             (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
509                             (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
510         }
511 }
512
513 static void b44_check_phy(struct b44 *bp)
514 {
515         u32 bmsr, aux;
516
517         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
518                 bp->flags |= B44_FLAG_100_BASE_T;
519                 if (!netif_carrier_ok(bp->dev)) {
520                         u32 val = br32(bp, B44_TX_CTRL);
521                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
522                                 val |= TX_CTRL_DUPLEX;
523                         else
524                                 val &= ~TX_CTRL_DUPLEX;
525                         bw32(bp, B44_TX_CTRL, val);
526                         netif_carrier_on(bp->dev);
527                         b44_link_report(bp);
528                 }
529                 return;
530         }
531
532         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
533             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
534             (bmsr != 0xffff)) {
535                 if (aux & MII_AUXCTRL_SPEED)
536                         bp->flags |= B44_FLAG_100_BASE_T;
537                 else
538                         bp->flags &= ~B44_FLAG_100_BASE_T;
539                 if (aux & MII_AUXCTRL_DUPLEX)
540                         bp->flags |= B44_FLAG_FULL_DUPLEX;
541                 else
542                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
543
544                 if (!netif_carrier_ok(bp->dev) &&
545                     (bmsr & BMSR_LSTATUS)) {
546                         u32 val = br32(bp, B44_TX_CTRL);
547                         u32 local_adv, remote_adv;
548
549                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
550                                 val |= TX_CTRL_DUPLEX;
551                         else
552                                 val &= ~TX_CTRL_DUPLEX;
553                         bw32(bp, B44_TX_CTRL, val);
554
555                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
556                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
557                             !b44_readphy(bp, MII_LPA, &remote_adv))
558                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
559
560                         /* Link now up */
561                         netif_carrier_on(bp->dev);
562                         b44_link_report(bp);
563                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
564                         /* Link now down */
565                         netif_carrier_off(bp->dev);
566                         b44_link_report(bp);
567                 }
568
569                 if (bmsr & BMSR_RFAULT)
570                         netdev_warn(bp->dev, "Remote fault detected in PHY\n");
571                 if (bmsr & BMSR_JCD)
572                         netdev_warn(bp->dev, "Jabber detected in PHY\n");
573         }
574 }
575
576 static void b44_timer(struct timer_list *t)
577 {
578         struct b44 *bp = from_timer(bp, t, timer);
579
580         spin_lock_irq(&bp->lock);
581
582         b44_check_phy(bp);
583
584         b44_stats_update(bp);
585
586         spin_unlock_irq(&bp->lock);
587
588         mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
589 }
590
591 static void b44_tx(struct b44 *bp)
592 {
593         u32 cur, cons;
594         unsigned bytes_compl = 0, pkts_compl = 0;
595
596         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
597         cur /= sizeof(struct dma_desc);
598
599         /* XXX needs updating when NETIF_F_SG is supported */
600         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
601                 struct ring_info *rp = &bp->tx_buffers[cons];
602                 struct sk_buff *skb = rp->skb;
603
604                 BUG_ON(skb == NULL);
605
606                 dma_unmap_single(bp->sdev->dma_dev,
607                                  rp->mapping,
608                                  skb->len,
609                                  DMA_TO_DEVICE);
610                 rp->skb = NULL;
611
612                 bytes_compl += skb->len;
613                 pkts_compl++;
614
615                 dev_consume_skb_irq(skb);
616         }
617
618         netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
619         bp->tx_cons = cons;
620         if (netif_queue_stopped(bp->dev) &&
621             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
622                 netif_wake_queue(bp->dev);
623
624         bw32(bp, B44_GPTIMER, 0);
625 }
626
627 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
628  * before the DMA address you give it.  So we allocate 30 more bytes
629  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
630  * point the chip at 30 bytes past where the rx_header will go.
631  */
632 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
633 {
634         struct dma_desc *dp;
635         struct ring_info *src_map, *map;
636         struct rx_header *rh;
637         struct sk_buff *skb;
638         dma_addr_t mapping;
639         int dest_idx;
640         u32 ctrl;
641
642         src_map = NULL;
643         if (src_idx >= 0)
644                 src_map = &bp->rx_buffers[src_idx];
645         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
646         map = &bp->rx_buffers[dest_idx];
647         skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
648         if (skb == NULL)
649                 return -ENOMEM;
650
651         mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
652                                  RX_PKT_BUF_SZ,
653                                  DMA_FROM_DEVICE);
654
655         /* Hardware bug work-around, the chip is unable to do PCI DMA
656            to/from anything above 1GB :-( */
657         if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
658                 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
659                 /* Sigh... */
660                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
661                         dma_unmap_single(bp->sdev->dma_dev, mapping,
662                                              RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
663                 dev_kfree_skb_any(skb);
664                 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
665                 if (skb == NULL)
666                         return -ENOMEM;
667                 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
668                                          RX_PKT_BUF_SZ,
669                                          DMA_FROM_DEVICE);
670                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
671                     mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
672                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
673                                 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
674                         dev_kfree_skb_any(skb);
675                         return -ENOMEM;
676                 }
677                 bp->force_copybreak = 1;
678         }
679
680         rh = (struct rx_header *) skb->data;
681
682         rh->len = 0;
683         rh->flags = 0;
684
685         map->skb = skb;
686         map->mapping = mapping;
687
688         if (src_map != NULL)
689                 src_map->skb = NULL;
690
691         ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
692         if (dest_idx == (B44_RX_RING_SIZE - 1))
693                 ctrl |= DESC_CTRL_EOT;
694
695         dp = &bp->rx_ring[dest_idx];
696         dp->ctrl = cpu_to_le32(ctrl);
697         dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
698
699         if (bp->flags & B44_FLAG_RX_RING_HACK)
700                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
701                                             dest_idx * sizeof(*dp),
702                                             DMA_BIDIRECTIONAL);
703
704         return RX_PKT_BUF_SZ;
705 }
706
707 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
708 {
709         struct dma_desc *src_desc, *dest_desc;
710         struct ring_info *src_map, *dest_map;
711         struct rx_header *rh;
712         int dest_idx;
713         __le32 ctrl;
714
715         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
716         dest_desc = &bp->rx_ring[dest_idx];
717         dest_map = &bp->rx_buffers[dest_idx];
718         src_desc = &bp->rx_ring[src_idx];
719         src_map = &bp->rx_buffers[src_idx];
720
721         dest_map->skb = src_map->skb;
722         rh = (struct rx_header *) src_map->skb->data;
723         rh->len = 0;
724         rh->flags = 0;
725         dest_map->mapping = src_map->mapping;
726
727         if (bp->flags & B44_FLAG_RX_RING_HACK)
728                 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
729                                          src_idx * sizeof(*src_desc),
730                                          DMA_BIDIRECTIONAL);
731
732         ctrl = src_desc->ctrl;
733         if (dest_idx == (B44_RX_RING_SIZE - 1))
734                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
735         else
736                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
737
738         dest_desc->ctrl = ctrl;
739         dest_desc->addr = src_desc->addr;
740
741         src_map->skb = NULL;
742
743         if (bp->flags & B44_FLAG_RX_RING_HACK)
744                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
745                                              dest_idx * sizeof(*dest_desc),
746                                              DMA_BIDIRECTIONAL);
747
748         dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
749                                    RX_PKT_BUF_SZ,
750                                    DMA_FROM_DEVICE);
751 }
752
753 static int b44_rx(struct b44 *bp, int budget)
754 {
755         int received;
756         u32 cons, prod;
757
758         received = 0;
759         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
760         prod /= sizeof(struct dma_desc);
761         cons = bp->rx_cons;
762
763         while (cons != prod && budget > 0) {
764                 struct ring_info *rp = &bp->rx_buffers[cons];
765                 struct sk_buff *skb = rp->skb;
766                 dma_addr_t map = rp->mapping;
767                 struct rx_header *rh;
768                 u16 len;
769
770                 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
771                                         RX_PKT_BUF_SZ,
772                                         DMA_FROM_DEVICE);
773                 rh = (struct rx_header *) skb->data;
774                 len = le16_to_cpu(rh->len);
775                 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
776                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
777                 drop_it:
778                         b44_recycle_rx(bp, cons, bp->rx_prod);
779                 drop_it_no_recycle:
780                         bp->dev->stats.rx_dropped++;
781                         goto next_pkt;
782                 }
783
784                 if (len == 0) {
785                         int i = 0;
786
787                         do {
788                                 udelay(2);
789                                 barrier();
790                                 len = le16_to_cpu(rh->len);
791                         } while (len == 0 && i++ < 5);
792                         if (len == 0)
793                                 goto drop_it;
794                 }
795
796                 /* Omit CRC. */
797                 len -= 4;
798
799                 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
800                         int skb_size;
801                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
802                         if (skb_size < 0)
803                                 goto drop_it;
804                         dma_unmap_single(bp->sdev->dma_dev, map,
805                                          skb_size, DMA_FROM_DEVICE);
806                         /* Leave out rx_header */
807                         skb_put(skb, len + RX_PKT_OFFSET);
808                         skb_pull(skb, RX_PKT_OFFSET);
809                 } else {
810                         struct sk_buff *copy_skb;
811
812                         b44_recycle_rx(bp, cons, bp->rx_prod);
813                         copy_skb = napi_alloc_skb(&bp->napi, len);
814                         if (copy_skb == NULL)
815                                 goto drop_it_no_recycle;
816
817                         skb_put(copy_skb, len);
818                         /* DMA sync done above, copy just the actual packet */
819                         skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
820                                                          copy_skb->data, len);
821                         skb = copy_skb;
822                 }
823                 skb_checksum_none_assert(skb);
824                 skb->protocol = eth_type_trans(skb, bp->dev);
825                 netif_receive_skb(skb);
826                 received++;
827                 budget--;
828         next_pkt:
829                 bp->rx_prod = (bp->rx_prod + 1) &
830                         (B44_RX_RING_SIZE - 1);
831                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
832         }
833
834         bp->rx_cons = cons;
835         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
836
837         return received;
838 }
839
840 static int b44_poll(struct napi_struct *napi, int budget)
841 {
842         struct b44 *bp = container_of(napi, struct b44, napi);
843         int work_done;
844         unsigned long flags;
845
846         spin_lock_irqsave(&bp->lock, flags);
847
848         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849                 /* spin_lock(&bp->tx_lock); */
850                 b44_tx(bp);
851                 /* spin_unlock(&bp->tx_lock); */
852         }
853         if (bp->istat & ISTAT_RFO) {    /* fast recovery, in ~20msec */
854                 bp->istat &= ~ISTAT_RFO;
855                 b44_disable_ints(bp);
856                 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
857                 b44_init_rings(bp);
858                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
859                 netif_wake_queue(bp->dev);
860         }
861
862         spin_unlock_irqrestore(&bp->lock, flags);
863
864         work_done = 0;
865         if (bp->istat & ISTAT_RX)
866                 work_done += b44_rx(bp, budget);
867
868         if (bp->istat & ISTAT_ERRORS) {
869                 spin_lock_irqsave(&bp->lock, flags);
870                 b44_halt(bp);
871                 b44_init_rings(bp);
872                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
873                 netif_wake_queue(bp->dev);
874                 spin_unlock_irqrestore(&bp->lock, flags);
875                 work_done = 0;
876         }
877
878         if (work_done < budget) {
879                 napi_complete_done(napi, work_done);
880                 b44_enable_ints(bp);
881         }
882
883         return work_done;
884 }
885
886 static irqreturn_t b44_interrupt(int irq, void *dev_id)
887 {
888         struct net_device *dev = dev_id;
889         struct b44 *bp = netdev_priv(dev);
890         u32 istat, imask;
891         int handled = 0;
892
893         spin_lock(&bp->lock);
894
895         istat = br32(bp, B44_ISTAT);
896         imask = br32(bp, B44_IMASK);
897
898         /* The interrupt mask register controls which interrupt bits
899          * will actually raise an interrupt to the CPU when set by hw/firmware,
900          * but doesn't mask off the bits.
901          */
902         istat &= imask;
903         if (istat) {
904                 handled = 1;
905
906                 if (unlikely(!netif_running(dev))) {
907                         netdev_info(dev, "late interrupt\n");
908                         goto irq_ack;
909                 }
910
911                 if (napi_schedule_prep(&bp->napi)) {
912                         /* NOTE: These writes are posted by the readback of
913                          *       the ISTAT register below.
914                          */
915                         bp->istat = istat;
916                         __b44_disable_ints(bp);
917                         __napi_schedule(&bp->napi);
918                 }
919
920 irq_ack:
921                 bw32(bp, B44_ISTAT, istat);
922                 br32(bp, B44_ISTAT);
923         }
924         spin_unlock(&bp->lock);
925         return IRQ_RETVAL(handled);
926 }
927
928 static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
929 {
930         struct b44 *bp = netdev_priv(dev);
931
932         netdev_err(dev, "transmit timed out, resetting\n");
933
934         spin_lock_irq(&bp->lock);
935
936         b44_halt(bp);
937         b44_init_rings(bp);
938         b44_init_hw(bp, B44_FULL_RESET);
939
940         spin_unlock_irq(&bp->lock);
941
942         b44_enable_ints(bp);
943
944         netif_wake_queue(dev);
945 }
946
947 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
948 {
949         struct b44 *bp = netdev_priv(dev);
950         int rc = NETDEV_TX_OK;
951         dma_addr_t mapping;
952         u32 len, entry, ctrl;
953         unsigned long flags;
954
955         len = skb->len;
956         spin_lock_irqsave(&bp->lock, flags);
957
958         /* This is a hard error, log it. */
959         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
960                 netif_stop_queue(dev);
961                 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
962                 goto err_out;
963         }
964
965         mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
966         if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
967                 struct sk_buff *bounce_skb;
968
969                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
970                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
971                         dma_unmap_single(bp->sdev->dma_dev, mapping, len,
972                                              DMA_TO_DEVICE);
973
974                 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
975                 if (!bounce_skb)
976                         goto err_out;
977
978                 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
979                                          len, DMA_TO_DEVICE);
980                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
981                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
982                                 dma_unmap_single(bp->sdev->dma_dev, mapping,
983                                                      len, DMA_TO_DEVICE);
984                         dev_kfree_skb_any(bounce_skb);
985                         goto err_out;
986                 }
987
988                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
989                 dev_consume_skb_any(skb);
990                 skb = bounce_skb;
991         }
992
993         entry = bp->tx_prod;
994         bp->tx_buffers[entry].skb = skb;
995         bp->tx_buffers[entry].mapping = mapping;
996
997         ctrl  = (len & DESC_CTRL_LEN);
998         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
999         if (entry == (B44_TX_RING_SIZE - 1))
1000                 ctrl |= DESC_CTRL_EOT;
1001
1002         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1003         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1004
1005         if (bp->flags & B44_FLAG_TX_RING_HACK)
1006                 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1007                                             entry * sizeof(bp->tx_ring[0]),
1008                                             DMA_TO_DEVICE);
1009
1010         entry = NEXT_TX(entry);
1011
1012         bp->tx_prod = entry;
1013
1014         wmb();
1015
1016         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1017         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1018                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019         if (bp->flags & B44_FLAG_REORDER_BUG)
1020                 br32(bp, B44_DMATX_PTR);
1021
1022         netdev_sent_queue(dev, skb->len);
1023
1024         if (TX_BUFFS_AVAIL(bp) < 1)
1025                 netif_stop_queue(dev);
1026
1027 out_unlock:
1028         spin_unlock_irqrestore(&bp->lock, flags);
1029
1030         return rc;
1031
1032 err_out:
1033         rc = NETDEV_TX_BUSY;
1034         goto out_unlock;
1035 }
1036
1037 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1038 {
1039         struct b44 *bp = netdev_priv(dev);
1040
1041         if (!netif_running(dev)) {
1042                 /* We'll just catch it later when the
1043                  * device is up'd.
1044                  */
1045                 dev->mtu = new_mtu;
1046                 return 0;
1047         }
1048
1049         spin_lock_irq(&bp->lock);
1050         b44_halt(bp);
1051         dev->mtu = new_mtu;
1052         b44_init_rings(bp);
1053         b44_init_hw(bp, B44_FULL_RESET);
1054         spin_unlock_irq(&bp->lock);
1055
1056         b44_enable_ints(bp);
1057
1058         return 0;
1059 }
1060
1061 /* Free up pending packets in all rx/tx rings.
1062  *
1063  * The chip has been shut down and the driver detached from
1064  * the networking, so no interrupts or new tx packets will
1065  * end up in the driver.  bp->lock is not held and we are not
1066  * in an interrupt context and thus may sleep.
1067  */
1068 static void b44_free_rings(struct b44 *bp)
1069 {
1070         struct ring_info *rp;
1071         int i;
1072
1073         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1074                 rp = &bp->rx_buffers[i];
1075
1076                 if (rp->skb == NULL)
1077                         continue;
1078                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1079                                  DMA_FROM_DEVICE);
1080                 dev_kfree_skb_any(rp->skb);
1081                 rp->skb = NULL;
1082         }
1083
1084         /* XXX needs changes once NETIF_F_SG is set... */
1085         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1086                 rp = &bp->tx_buffers[i];
1087
1088                 if (rp->skb == NULL)
1089                         continue;
1090                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1091                                  DMA_TO_DEVICE);
1092                 dev_kfree_skb_any(rp->skb);
1093                 rp->skb = NULL;
1094         }
1095 }
1096
1097 /* Initialize tx/rx rings for packet processing.
1098  *
1099  * The chip has been shut down and the driver detached from
1100  * the networking, so no interrupts or new tx packets will
1101  * end up in the driver.
1102  */
1103 static void b44_init_rings(struct b44 *bp)
1104 {
1105         int i;
1106
1107         b44_free_rings(bp);
1108
1109         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1110         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1111
1112         if (bp->flags & B44_FLAG_RX_RING_HACK)
1113                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1114                                            DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1115
1116         if (bp->flags & B44_FLAG_TX_RING_HACK)
1117                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1118                                            DMA_TABLE_BYTES, DMA_TO_DEVICE);
1119
1120         for (i = 0; i < bp->rx_pending; i++) {
1121                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1122                         break;
1123         }
1124 }
1125
1126 /*
1127  * Must not be invoked with interrupt sources disabled and
1128  * the hardware shutdown down.
1129  */
1130 static void b44_free_consistent(struct b44 *bp)
1131 {
1132         kfree(bp->rx_buffers);
1133         bp->rx_buffers = NULL;
1134         kfree(bp->tx_buffers);
1135         bp->tx_buffers = NULL;
1136         if (bp->rx_ring) {
1137                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1138                         dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1139                                          DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1140                         kfree(bp->rx_ring);
1141                 } else
1142                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1143                                           bp->rx_ring, bp->rx_ring_dma);
1144                 bp->rx_ring = NULL;
1145                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1146         }
1147         if (bp->tx_ring) {
1148                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1149                         dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1150                                          DMA_TABLE_BYTES, DMA_TO_DEVICE);
1151                         kfree(bp->tx_ring);
1152                 } else
1153                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1154                                           bp->tx_ring, bp->tx_ring_dma);
1155                 bp->tx_ring = NULL;
1156                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1157         }
1158 }
1159
1160 /*
1161  * Must not be invoked with interrupt sources disabled and
1162  * the hardware shutdown down.  Can sleep.
1163  */
1164 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1165 {
1166         int size;
1167
1168         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1169         bp->rx_buffers = kzalloc(size, gfp);
1170         if (!bp->rx_buffers)
1171                 goto out_err;
1172
1173         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1174         bp->tx_buffers = kzalloc(size, gfp);
1175         if (!bp->tx_buffers)
1176                 goto out_err;
1177
1178         size = DMA_TABLE_BYTES;
1179         bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1180                                          &bp->rx_ring_dma, gfp);
1181         if (!bp->rx_ring) {
1182                 /* Allocation may have failed due to dma_alloc_coherent
1183                    insisting on use of GFP_DMA, which is more restrictive
1184                    than necessary...  */
1185                 struct dma_desc *rx_ring;
1186                 dma_addr_t rx_ring_dma;
1187
1188                 rx_ring = kzalloc(size, gfp);
1189                 if (!rx_ring)
1190                         goto out_err;
1191
1192                 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1193                                              DMA_TABLE_BYTES,
1194                                              DMA_BIDIRECTIONAL);
1195
1196                 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1197                         rx_ring_dma + size > DMA_BIT_MASK(30)) {
1198                         kfree(rx_ring);
1199                         goto out_err;
1200                 }
1201
1202                 bp->rx_ring = rx_ring;
1203                 bp->rx_ring_dma = rx_ring_dma;
1204                 bp->flags |= B44_FLAG_RX_RING_HACK;
1205         }
1206
1207         bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1208                                          &bp->tx_ring_dma, gfp);
1209         if (!bp->tx_ring) {
1210                 /* Allocation may have failed due to ssb_dma_alloc_consistent
1211                    insisting on use of GFP_DMA, which is more restrictive
1212                    than necessary...  */
1213                 struct dma_desc *tx_ring;
1214                 dma_addr_t tx_ring_dma;
1215
1216                 tx_ring = kzalloc(size, gfp);
1217                 if (!tx_ring)
1218                         goto out_err;
1219
1220                 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1221                                              DMA_TABLE_BYTES,
1222                                              DMA_TO_DEVICE);
1223
1224                 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1225                         tx_ring_dma + size > DMA_BIT_MASK(30)) {
1226                         kfree(tx_ring);
1227                         goto out_err;
1228                 }
1229
1230                 bp->tx_ring = tx_ring;
1231                 bp->tx_ring_dma = tx_ring_dma;
1232                 bp->flags |= B44_FLAG_TX_RING_HACK;
1233         }
1234
1235         return 0;
1236
1237 out_err:
1238         b44_free_consistent(bp);
1239         return -ENOMEM;
1240 }
1241
1242 /* bp->lock is held. */
1243 static void b44_clear_stats(struct b44 *bp)
1244 {
1245         unsigned long reg;
1246
1247         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1248         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1249                 br32(bp, reg);
1250         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1251                 br32(bp, reg);
1252 }
1253
1254 /* bp->lock is held. */
1255 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1256 {
1257         struct ssb_device *sdev = bp->sdev;
1258         bool was_enabled;
1259
1260         was_enabled = ssb_device_is_enabled(bp->sdev);
1261
1262         ssb_device_enable(bp->sdev, 0);
1263         ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1264
1265         if (was_enabled) {
1266                 bw32(bp, B44_RCV_LAZY, 0);
1267                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1268                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1269                 bw32(bp, B44_DMATX_CTRL, 0);
1270                 bp->tx_prod = bp->tx_cons = 0;
1271                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1272                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1273                                      100, 0);
1274                 }
1275                 bw32(bp, B44_DMARX_CTRL, 0);
1276                 bp->rx_prod = bp->rx_cons = 0;
1277         }
1278
1279         b44_clear_stats(bp);
1280
1281         /*
1282          * Don't enable PHY if we are doing a partial reset
1283          * we are probably going to power down
1284          */
1285         if (reset_kind == B44_CHIP_RESET_PARTIAL)
1286                 return;
1287
1288         switch (sdev->bus->bustype) {
1289         case SSB_BUSTYPE_SSB:
1290                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1291                      (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1292                                         B44_MDC_RATIO)
1293                      & MDIO_CTRL_MAXF_MASK)));
1294                 break;
1295         case SSB_BUSTYPE_PCI:
1296                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297                      (0x0d & MDIO_CTRL_MAXF_MASK)));
1298                 break;
1299         case SSB_BUSTYPE_PCMCIA:
1300         case SSB_BUSTYPE_SDIO:
1301                 WARN_ON(1); /* A device with this bus does not exist. */
1302                 break;
1303         }
1304
1305         br32(bp, B44_MDIO_CTRL);
1306
1307         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1308                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1309                 br32(bp, B44_ENET_CTRL);
1310                 bp->flags |= B44_FLAG_EXTERNAL_PHY;
1311         } else {
1312                 u32 val = br32(bp, B44_DEVCTRL);
1313
1314                 if (val & DEVCTRL_EPR) {
1315                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1316                         br32(bp, B44_DEVCTRL);
1317                         udelay(100);
1318                 }
1319                 bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1320         }
1321 }
1322
1323 /* bp->lock is held. */
1324 static void b44_halt(struct b44 *bp)
1325 {
1326         b44_disable_ints(bp);
1327         /* reset PHY */
1328         b44_phy_reset(bp);
1329         /* power down PHY */
1330         netdev_info(bp->dev, "powering down PHY\n");
1331         bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1332         /* now reset the chip, but without enabling the MAC&PHY
1333          * part of it. This has to be done _after_ we shut down the PHY */
1334         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1335                 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1336         else
1337                 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1338 }
1339
1340 /* bp->lock is held. */
1341 static void __b44_set_mac_addr(struct b44 *bp)
1342 {
1343         bw32(bp, B44_CAM_CTRL, 0);
1344         if (!(bp->dev->flags & IFF_PROMISC)) {
1345                 u32 val;
1346
1347                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1348                 val = br32(bp, B44_CAM_CTRL);
1349                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1350         }
1351 }
1352
1353 static int b44_set_mac_addr(struct net_device *dev, void *p)
1354 {
1355         struct b44 *bp = netdev_priv(dev);
1356         struct sockaddr *addr = p;
1357         u32 val;
1358
1359         if (netif_running(dev))
1360                 return -EBUSY;
1361
1362         if (!is_valid_ether_addr(addr->sa_data))
1363                 return -EINVAL;
1364
1365         eth_hw_addr_set(dev, addr->sa_data);
1366
1367         spin_lock_irq(&bp->lock);
1368
1369         val = br32(bp, B44_RXCONFIG);
1370         if (!(val & RXCONFIG_CAM_ABSENT))
1371                 __b44_set_mac_addr(bp);
1372
1373         spin_unlock_irq(&bp->lock);
1374
1375         return 0;
1376 }
1377
1378 /* Called at device open time to get the chip ready for
1379  * packet processing.  Invoked with bp->lock held.
1380  */
1381 static void __b44_set_rx_mode(struct net_device *);
1382 static void b44_init_hw(struct b44 *bp, int reset_kind)
1383 {
1384         u32 val;
1385
1386         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1387         if (reset_kind == B44_FULL_RESET) {
1388                 b44_phy_reset(bp);
1389                 b44_setup_phy(bp);
1390         }
1391
1392         /* Enable CRC32, set proper LED modes and power on PHY */
1393         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1394         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1395
1396         /* This sets the MAC address too.  */
1397         __b44_set_rx_mode(bp->dev);
1398
1399         /* MTU + eth header + possible VLAN tag + struct rx_header */
1400         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1401         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1402
1403         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1404         if (reset_kind == B44_PARTIAL_RESET) {
1405                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1407         } else {
1408                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1409                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1410                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1411                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1412                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1413
1414                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1415                 bp->rx_prod = bp->rx_pending;
1416
1417                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1418         }
1419
1420         val = br32(bp, B44_ENET_CTRL);
1421         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1422
1423         netdev_reset_queue(bp->dev);
1424 }
1425
1426 static int b44_open(struct net_device *dev)
1427 {
1428         struct b44 *bp = netdev_priv(dev);
1429         int err;
1430
1431         err = b44_alloc_consistent(bp, GFP_KERNEL);
1432         if (err)
1433                 goto out;
1434
1435         napi_enable(&bp->napi);
1436
1437         b44_init_rings(bp);
1438         b44_init_hw(bp, B44_FULL_RESET);
1439
1440         b44_check_phy(bp);
1441
1442         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1443         if (unlikely(err < 0)) {
1444                 napi_disable(&bp->napi);
1445                 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1446                 b44_free_rings(bp);
1447                 b44_free_consistent(bp);
1448                 goto out;
1449         }
1450
1451         timer_setup(&bp->timer, b44_timer, 0);
1452         bp->timer.expires = jiffies + HZ;
1453         add_timer(&bp->timer);
1454
1455         b44_enable_ints(bp);
1456
1457         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1458                 phy_start(dev->phydev);
1459
1460         netif_start_queue(dev);
1461 out:
1462         return err;
1463 }
1464
1465 #ifdef CONFIG_NET_POLL_CONTROLLER
1466 /*
1467  * Polling receive - used by netconsole and other diagnostic tools
1468  * to allow network i/o with interrupts disabled.
1469  */
1470 static void b44_poll_controller(struct net_device *dev)
1471 {
1472         disable_irq(dev->irq);
1473         b44_interrupt(dev->irq, dev);
1474         enable_irq(dev->irq);
1475 }
1476 #endif
1477
1478 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1479 {
1480         u32 i;
1481         u32 *pattern = (u32 *) pp;
1482
1483         for (i = 0; i < bytes; i += sizeof(u32)) {
1484                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1485                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1486         }
1487 }
1488
1489 static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
1490                              int offset)
1491 {
1492         int magicsync = 6;
1493         int k, j, len = offset;
1494         int ethaddr_bytes = ETH_ALEN;
1495
1496         memset(ppattern + offset, 0xff, magicsync);
1497         for (j = 0; j < magicsync; j++) {
1498                 pmask[len >> 3] |= BIT(len & 7);
1499                 len++;
1500         }
1501
1502         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1503                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1504                         ethaddr_bytes = ETH_ALEN;
1505                 else
1506                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1507                 if (ethaddr_bytes <=0)
1508                         break;
1509                 for (k = 0; k< ethaddr_bytes; k++) {
1510                         ppattern[offset + magicsync +
1511                                 (j * ETH_ALEN) + k] = macaddr[k];
1512                         pmask[len >> 3] |= BIT(len & 7);
1513                         len++;
1514                 }
1515         }
1516         return len - 1;
1517 }
1518
1519 /* Setup magic packet patterns in the b44 WOL
1520  * pattern matching filter.
1521  */
1522 static void b44_setup_pseudo_magicp(struct b44 *bp)
1523 {
1524
1525         u32 val;
1526         int plen0, plen1, plen2;
1527         u8 *pwol_pattern;
1528         u8 pwol_mask[B44_PMASK_SIZE];
1529
1530         pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1531         if (!pwol_pattern)
1532                 return;
1533
1534         /* Ipv4 magic packet pattern - pattern 0.*/
1535         memset(pwol_mask, 0, B44_PMASK_SIZE);
1536         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1537                                   B44_ETHIPV4UDP_HLEN);
1538
1539         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1540         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1541
1542         /* Raw ethernet II magic packet pattern - pattern 1 */
1543         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1544         memset(pwol_mask, 0, B44_PMASK_SIZE);
1545         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1546                                   ETH_HLEN);
1547
1548         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1549                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1550         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1551                        B44_PMASK_BASE + B44_PMASK_SIZE);
1552
1553         /* Ipv6 magic packet pattern - pattern 2 */
1554         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1555         memset(pwol_mask, 0, B44_PMASK_SIZE);
1556         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1557                                   B44_ETHIPV6UDP_HLEN);
1558
1559         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1560                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1561         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1562                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1563
1564         kfree(pwol_pattern);
1565
1566         /* set these pattern's lengths: one less than each real length */
1567         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1568         bw32(bp, B44_WKUP_LEN, val);
1569
1570         /* enable wakeup pattern matching */
1571         val = br32(bp, B44_DEVCTRL);
1572         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1573
1574 }
1575
1576 #ifdef CONFIG_B44_PCI
1577 static void b44_setup_wol_pci(struct b44 *bp)
1578 {
1579         u16 val;
1580
1581         if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1582                 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1583                 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1584                 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1585         }
1586 }
1587 #else
1588 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1589 #endif /* CONFIG_B44_PCI */
1590
1591 static void b44_setup_wol(struct b44 *bp)
1592 {
1593         u32 val;
1594
1595         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1596
1597         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1598
1599                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1600
1601                 val = bp->dev->dev_addr[2] << 24 |
1602                         bp->dev->dev_addr[3] << 16 |
1603                         bp->dev->dev_addr[4] << 8 |
1604                         bp->dev->dev_addr[5];
1605                 bw32(bp, B44_ADDR_LO, val);
1606
1607                 val = bp->dev->dev_addr[0] << 8 |
1608                         bp->dev->dev_addr[1];
1609                 bw32(bp, B44_ADDR_HI, val);
1610
1611                 val = br32(bp, B44_DEVCTRL);
1612                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1613
1614         } else {
1615                 b44_setup_pseudo_magicp(bp);
1616         }
1617         b44_setup_wol_pci(bp);
1618 }
1619
1620 static int b44_close(struct net_device *dev)
1621 {
1622         struct b44 *bp = netdev_priv(dev);
1623
1624         netif_stop_queue(dev);
1625
1626         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1627                 phy_stop(dev->phydev);
1628
1629         napi_disable(&bp->napi);
1630
1631         del_timer_sync(&bp->timer);
1632
1633         spin_lock_irq(&bp->lock);
1634
1635         b44_halt(bp);
1636         b44_free_rings(bp);
1637         netif_carrier_off(dev);
1638
1639         spin_unlock_irq(&bp->lock);
1640
1641         free_irq(dev->irq, dev);
1642
1643         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1644                 b44_init_hw(bp, B44_PARTIAL_RESET);
1645                 b44_setup_wol(bp);
1646         }
1647
1648         b44_free_consistent(bp);
1649
1650         return 0;
1651 }
1652
1653 static void b44_get_stats64(struct net_device *dev,
1654                             struct rtnl_link_stats64 *nstat)
1655 {
1656         struct b44 *bp = netdev_priv(dev);
1657         struct b44_hw_stats *hwstat = &bp->hw_stats;
1658         unsigned int start;
1659
1660         do {
1661                 start = u64_stats_fetch_begin(&hwstat->syncp);
1662
1663                 /* Convert HW stats into rtnl_link_stats64 stats. */
1664                 nstat->rx_packets = hwstat->rx_pkts;
1665                 nstat->tx_packets = hwstat->tx_pkts;
1666                 nstat->rx_bytes   = hwstat->rx_octets;
1667                 nstat->tx_bytes   = hwstat->tx_octets;
1668                 nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1669                                      hwstat->tx_oversize_pkts +
1670                                      hwstat->tx_underruns +
1671                                      hwstat->tx_excessive_cols +
1672                                      hwstat->tx_late_cols);
1673                 nstat->multicast  = hwstat->rx_multicast_pkts;
1674                 nstat->collisions = hwstat->tx_total_cols;
1675
1676                 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1677                                            hwstat->rx_undersize);
1678                 nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1679                 nstat->rx_frame_errors  = hwstat->rx_align_errs;
1680                 nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1681                 nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1682                                            hwstat->rx_oversize_pkts +
1683                                            hwstat->rx_missed_pkts +
1684                                            hwstat->rx_crc_align_errs +
1685                                            hwstat->rx_undersize +
1686                                            hwstat->rx_crc_errs +
1687                                            hwstat->rx_align_errs +
1688                                            hwstat->rx_symbol_errs);
1689
1690                 nstat->tx_aborted_errors = hwstat->tx_underruns;
1691 #if 0
1692                 /* Carrier lost counter seems to be broken for some devices */
1693                 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1694 #endif
1695         } while (u64_stats_fetch_retry(&hwstat->syncp, start));
1696
1697 }
1698
1699 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1700 {
1701         struct netdev_hw_addr *ha;
1702         int i, num_ents;
1703
1704         num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1705         i = 0;
1706         netdev_for_each_mc_addr(ha, dev) {
1707                 if (i == num_ents)
1708                         break;
1709                 __b44_cam_write(bp, ha->addr, i++ + 1);
1710         }
1711         return i+1;
1712 }
1713
1714 static void __b44_set_rx_mode(struct net_device *dev)
1715 {
1716         struct b44 *bp = netdev_priv(dev);
1717         u32 val;
1718
1719         val = br32(bp, B44_RXCONFIG);
1720         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1721         if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1722                 val |= RXCONFIG_PROMISC;
1723                 bw32(bp, B44_RXCONFIG, val);
1724         } else {
1725                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1726                 int i = 1;
1727
1728                 __b44_set_mac_addr(bp);
1729
1730                 if ((dev->flags & IFF_ALLMULTI) ||
1731                     (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1732                         val |= RXCONFIG_ALLMULTI;
1733                 else
1734                         i = __b44_load_mcast(bp, dev);
1735
1736                 for (; i < 64; i++)
1737                         __b44_cam_write(bp, zero, i);
1738
1739                 bw32(bp, B44_RXCONFIG, val);
1740                 val = br32(bp, B44_CAM_CTRL);
1741                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1742         }
1743 }
1744
1745 static void b44_set_rx_mode(struct net_device *dev)
1746 {
1747         struct b44 *bp = netdev_priv(dev);
1748
1749         spin_lock_irq(&bp->lock);
1750         __b44_set_rx_mode(dev);
1751         spin_unlock_irq(&bp->lock);
1752 }
1753
1754 static u32 b44_get_msglevel(struct net_device *dev)
1755 {
1756         struct b44 *bp = netdev_priv(dev);
1757         return bp->msg_enable;
1758 }
1759
1760 static void b44_set_msglevel(struct net_device *dev, u32 value)
1761 {
1762         struct b44 *bp = netdev_priv(dev);
1763         bp->msg_enable = value;
1764 }
1765
1766 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1767 {
1768         struct b44 *bp = netdev_priv(dev);
1769         struct ssb_bus *bus = bp->sdev->bus;
1770
1771         strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1772         switch (bus->bustype) {
1773         case SSB_BUSTYPE_PCI:
1774                 strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1775                 break;
1776         case SSB_BUSTYPE_SSB:
1777                 strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
1778                 break;
1779         case SSB_BUSTYPE_PCMCIA:
1780         case SSB_BUSTYPE_SDIO:
1781                 WARN_ON(1); /* A device with this bus does not exist. */
1782                 break;
1783         }
1784 }
1785
1786 static int b44_nway_reset(struct net_device *dev)
1787 {
1788         struct b44 *bp = netdev_priv(dev);
1789         u32 bmcr;
1790         int r;
1791
1792         spin_lock_irq(&bp->lock);
1793         b44_readphy(bp, MII_BMCR, &bmcr);
1794         b44_readphy(bp, MII_BMCR, &bmcr);
1795         r = -EINVAL;
1796         if (bmcr & BMCR_ANENABLE)
1797                 r = b44_writephy(bp, MII_BMCR,
1798                                  bmcr | BMCR_ANRESTART);
1799         spin_unlock_irq(&bp->lock);
1800
1801         return r;
1802 }
1803
1804 static int b44_get_link_ksettings(struct net_device *dev,
1805                                   struct ethtool_link_ksettings *cmd)
1806 {
1807         struct b44 *bp = netdev_priv(dev);
1808         u32 supported, advertising;
1809
1810         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1811                 BUG_ON(!dev->phydev);
1812                 phy_ethtool_ksettings_get(dev->phydev, cmd);
1813
1814                 return 0;
1815         }
1816
1817         supported = (SUPPORTED_Autoneg);
1818         supported |= (SUPPORTED_100baseT_Half |
1819                       SUPPORTED_100baseT_Full |
1820                       SUPPORTED_10baseT_Half |
1821                       SUPPORTED_10baseT_Full |
1822                       SUPPORTED_MII);
1823
1824         advertising = 0;
1825         if (bp->flags & B44_FLAG_ADV_10HALF)
1826                 advertising |= ADVERTISED_10baseT_Half;
1827         if (bp->flags & B44_FLAG_ADV_10FULL)
1828                 advertising |= ADVERTISED_10baseT_Full;
1829         if (bp->flags & B44_FLAG_ADV_100HALF)
1830                 advertising |= ADVERTISED_100baseT_Half;
1831         if (bp->flags & B44_FLAG_ADV_100FULL)
1832                 advertising |= ADVERTISED_100baseT_Full;
1833         advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1834         cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1835                 SPEED_100 : SPEED_10;
1836         cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1837                 DUPLEX_FULL : DUPLEX_HALF;
1838         cmd->base.port = 0;
1839         cmd->base.phy_address = bp->phy_addr;
1840         cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1841                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1842         if (cmd->base.autoneg == AUTONEG_ENABLE)
1843                 advertising |= ADVERTISED_Autoneg;
1844
1845         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1846                                                 supported);
1847         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1848                                                 advertising);
1849
1850         if (!netif_running(dev)){
1851                 cmd->base.speed = 0;
1852                 cmd->base.duplex = 0xff;
1853         }
1854
1855         return 0;
1856 }
1857
1858 static int b44_set_link_ksettings(struct net_device *dev,
1859                                   const struct ethtool_link_ksettings *cmd)
1860 {
1861         struct b44 *bp = netdev_priv(dev);
1862         u32 speed;
1863         int ret;
1864         u32 advertising;
1865
1866         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1867                 BUG_ON(!dev->phydev);
1868                 spin_lock_irq(&bp->lock);
1869                 if (netif_running(dev))
1870                         b44_setup_phy(bp);
1871
1872                 ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1873
1874                 spin_unlock_irq(&bp->lock);
1875
1876                 return ret;
1877         }
1878
1879         speed = cmd->base.speed;
1880
1881         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1882                                                 cmd->link_modes.advertising);
1883
1884         /* We do not support gigabit. */
1885         if (cmd->base.autoneg == AUTONEG_ENABLE) {
1886                 if (advertising &
1887                     (ADVERTISED_1000baseT_Half |
1888                      ADVERTISED_1000baseT_Full))
1889                         return -EINVAL;
1890         } else if ((speed != SPEED_100 &&
1891                     speed != SPEED_10) ||
1892                    (cmd->base.duplex != DUPLEX_HALF &&
1893                     cmd->base.duplex != DUPLEX_FULL)) {
1894                         return -EINVAL;
1895         }
1896
1897         spin_lock_irq(&bp->lock);
1898
1899         if (cmd->base.autoneg == AUTONEG_ENABLE) {
1900                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1901                                B44_FLAG_100_BASE_T |
1902                                B44_FLAG_FULL_DUPLEX |
1903                                B44_FLAG_ADV_10HALF |
1904                                B44_FLAG_ADV_10FULL |
1905                                B44_FLAG_ADV_100HALF |
1906                                B44_FLAG_ADV_100FULL);
1907                 if (advertising == 0) {
1908                         bp->flags |= (B44_FLAG_ADV_10HALF |
1909                                       B44_FLAG_ADV_10FULL |
1910                                       B44_FLAG_ADV_100HALF |
1911                                       B44_FLAG_ADV_100FULL);
1912                 } else {
1913                         if (advertising & ADVERTISED_10baseT_Half)
1914                                 bp->flags |= B44_FLAG_ADV_10HALF;
1915                         if (advertising & ADVERTISED_10baseT_Full)
1916                                 bp->flags |= B44_FLAG_ADV_10FULL;
1917                         if (advertising & ADVERTISED_100baseT_Half)
1918                                 bp->flags |= B44_FLAG_ADV_100HALF;
1919                         if (advertising & ADVERTISED_100baseT_Full)
1920                                 bp->flags |= B44_FLAG_ADV_100FULL;
1921                 }
1922         } else {
1923                 bp->flags |= B44_FLAG_FORCE_LINK;
1924                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1925                 if (speed == SPEED_100)
1926                         bp->flags |= B44_FLAG_100_BASE_T;
1927                 if (cmd->base.duplex == DUPLEX_FULL)
1928                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1929         }
1930
1931         if (netif_running(dev))
1932                 b44_setup_phy(bp);
1933
1934         spin_unlock_irq(&bp->lock);
1935
1936         return 0;
1937 }
1938
1939 static void b44_get_ringparam(struct net_device *dev,
1940                               struct ethtool_ringparam *ering,
1941                               struct kernel_ethtool_ringparam *kernel_ering,
1942                               struct netlink_ext_ack *extack)
1943 {
1944         struct b44 *bp = netdev_priv(dev);
1945
1946         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1947         ering->rx_pending = bp->rx_pending;
1948
1949         /* XXX ethtool lacks a tx_max_pending, oops... */
1950 }
1951
1952 static int b44_set_ringparam(struct net_device *dev,
1953                              struct ethtool_ringparam *ering,
1954                              struct kernel_ethtool_ringparam *kernel_ering,
1955                              struct netlink_ext_ack *extack)
1956 {
1957         struct b44 *bp = netdev_priv(dev);
1958
1959         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1960             (ering->rx_mini_pending != 0) ||
1961             (ering->rx_jumbo_pending != 0) ||
1962             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1963                 return -EINVAL;
1964
1965         spin_lock_irq(&bp->lock);
1966
1967         bp->rx_pending = ering->rx_pending;
1968         bp->tx_pending = ering->tx_pending;
1969
1970         b44_halt(bp);
1971         b44_init_rings(bp);
1972         b44_init_hw(bp, B44_FULL_RESET);
1973         netif_wake_queue(bp->dev);
1974         spin_unlock_irq(&bp->lock);
1975
1976         b44_enable_ints(bp);
1977
1978         return 0;
1979 }
1980
1981 static void b44_get_pauseparam(struct net_device *dev,
1982                                 struct ethtool_pauseparam *epause)
1983 {
1984         struct b44 *bp = netdev_priv(dev);
1985
1986         epause->autoneg =
1987                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1988         epause->rx_pause =
1989                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1990         epause->tx_pause =
1991                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1992 }
1993
1994 static int b44_set_pauseparam(struct net_device *dev,
1995                                 struct ethtool_pauseparam *epause)
1996 {
1997         struct b44 *bp = netdev_priv(dev);
1998
1999         spin_lock_irq(&bp->lock);
2000         if (epause->autoneg)
2001                 bp->flags |= B44_FLAG_PAUSE_AUTO;
2002         else
2003                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2004         if (epause->rx_pause)
2005                 bp->flags |= B44_FLAG_RX_PAUSE;
2006         else
2007                 bp->flags &= ~B44_FLAG_RX_PAUSE;
2008         if (epause->tx_pause)
2009                 bp->flags |= B44_FLAG_TX_PAUSE;
2010         else
2011                 bp->flags &= ~B44_FLAG_TX_PAUSE;
2012         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2013                 b44_halt(bp);
2014                 b44_init_rings(bp);
2015                 b44_init_hw(bp, B44_FULL_RESET);
2016         } else {
2017                 __b44_set_flow_ctrl(bp, bp->flags);
2018         }
2019         spin_unlock_irq(&bp->lock);
2020
2021         b44_enable_ints(bp);
2022
2023         return 0;
2024 }
2025
2026 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2027 {
2028         switch(stringset) {
2029         case ETH_SS_STATS:
2030                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2031                 break;
2032         }
2033 }
2034
2035 static int b44_get_sset_count(struct net_device *dev, int sset)
2036 {
2037         switch (sset) {
2038         case ETH_SS_STATS:
2039                 return ARRAY_SIZE(b44_gstrings);
2040         default:
2041                 return -EOPNOTSUPP;
2042         }
2043 }
2044
2045 static void b44_get_ethtool_stats(struct net_device *dev,
2046                                   struct ethtool_stats *stats, u64 *data)
2047 {
2048         struct b44 *bp = netdev_priv(dev);
2049         struct b44_hw_stats *hwstat = &bp->hw_stats;
2050         u64 *data_src, *data_dst;
2051         unsigned int start;
2052         u32 i;
2053
2054         spin_lock_irq(&bp->lock);
2055         b44_stats_update(bp);
2056         spin_unlock_irq(&bp->lock);
2057
2058         do {
2059                 data_src = &hwstat->tx_good_octets;
2060                 data_dst = data;
2061                 start = u64_stats_fetch_begin(&hwstat->syncp);
2062
2063                 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2064                         *data_dst++ = *data_src++;
2065
2066         } while (u64_stats_fetch_retry(&hwstat->syncp, start));
2067 }
2068
2069 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2070 {
2071         struct b44 *bp = netdev_priv(dev);
2072
2073         wol->supported = WAKE_MAGIC;
2074         if (bp->flags & B44_FLAG_WOL_ENABLE)
2075                 wol->wolopts = WAKE_MAGIC;
2076         else
2077                 wol->wolopts = 0;
2078         memset(&wol->sopass, 0, sizeof(wol->sopass));
2079 }
2080
2081 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2082 {
2083         struct b44 *bp = netdev_priv(dev);
2084
2085         spin_lock_irq(&bp->lock);
2086         if (wol->wolopts & WAKE_MAGIC)
2087                 bp->flags |= B44_FLAG_WOL_ENABLE;
2088         else
2089                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2090         spin_unlock_irq(&bp->lock);
2091
2092         device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2093         return 0;
2094 }
2095
2096 static const struct ethtool_ops b44_ethtool_ops = {
2097         .get_drvinfo            = b44_get_drvinfo,
2098         .nway_reset             = b44_nway_reset,
2099         .get_link               = ethtool_op_get_link,
2100         .get_wol                = b44_get_wol,
2101         .set_wol                = b44_set_wol,
2102         .get_ringparam          = b44_get_ringparam,
2103         .set_ringparam          = b44_set_ringparam,
2104         .get_pauseparam         = b44_get_pauseparam,
2105         .set_pauseparam         = b44_set_pauseparam,
2106         .get_msglevel           = b44_get_msglevel,
2107         .set_msglevel           = b44_set_msglevel,
2108         .get_strings            = b44_get_strings,
2109         .get_sset_count         = b44_get_sset_count,
2110         .get_ethtool_stats      = b44_get_ethtool_stats,
2111         .get_link_ksettings     = b44_get_link_ksettings,
2112         .set_link_ksettings     = b44_set_link_ksettings,
2113 };
2114
2115 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2116 {
2117         struct b44 *bp = netdev_priv(dev);
2118         int err = -EINVAL;
2119
2120         if (!netif_running(dev))
2121                 goto out;
2122
2123         spin_lock_irq(&bp->lock);
2124         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2125                 BUG_ON(!dev->phydev);
2126                 err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2127         } else {
2128                 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2129         }
2130         spin_unlock_irq(&bp->lock);
2131 out:
2132         return err;
2133 }
2134
2135 static int b44_get_invariants(struct b44 *bp)
2136 {
2137         struct ssb_device *sdev = bp->sdev;
2138         int err = 0;
2139         u8 *addr;
2140
2141         bp->dma_offset = ssb_dma_translation(sdev);
2142
2143         if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2144             instance > 1) {
2145                 addr = sdev->bus->sprom.et1mac;
2146                 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2147         } else {
2148                 addr = sdev->bus->sprom.et0mac;
2149                 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2150         }
2151         /* Some ROMs have buggy PHY addresses with the high
2152          * bits set (sign extension?). Truncate them to a
2153          * valid PHY address. */
2154         bp->phy_addr &= 0x1F;
2155
2156         eth_hw_addr_set(bp->dev, addr);
2157
2158         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2159                 pr_err("Invalid MAC address found in EEPROM\n");
2160                 return -EINVAL;
2161         }
2162
2163         bp->imask = IMASK_DEF;
2164
2165         /* XXX - really required?
2166            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2167         */
2168
2169         if (bp->sdev->id.revision >= 7)
2170                 bp->flags |= B44_FLAG_B0_ANDLATER;
2171
2172         return err;
2173 }
2174
2175 static const struct net_device_ops b44_netdev_ops = {
2176         .ndo_open               = b44_open,
2177         .ndo_stop               = b44_close,
2178         .ndo_start_xmit         = b44_start_xmit,
2179         .ndo_get_stats64        = b44_get_stats64,
2180         .ndo_set_rx_mode        = b44_set_rx_mode,
2181         .ndo_set_mac_address    = b44_set_mac_addr,
2182         .ndo_validate_addr      = eth_validate_addr,
2183         .ndo_eth_ioctl          = b44_ioctl,
2184         .ndo_tx_timeout         = b44_tx_timeout,
2185         .ndo_change_mtu         = b44_change_mtu,
2186 #ifdef CONFIG_NET_POLL_CONTROLLER
2187         .ndo_poll_controller    = b44_poll_controller,
2188 #endif
2189 };
2190
2191 static void b44_adjust_link(struct net_device *dev)
2192 {
2193         struct b44 *bp = netdev_priv(dev);
2194         struct phy_device *phydev = dev->phydev;
2195         bool status_changed = false;
2196
2197         BUG_ON(!phydev);
2198
2199         if (bp->old_link != phydev->link) {
2200                 status_changed = true;
2201                 bp->old_link = phydev->link;
2202         }
2203
2204         /* reflect duplex change */
2205         if (phydev->link) {
2206                 if ((phydev->duplex == DUPLEX_HALF) &&
2207                     (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2208                         status_changed = true;
2209                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2210                 } else if ((phydev->duplex == DUPLEX_FULL) &&
2211                            !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2212                         status_changed = true;
2213                         bp->flags |= B44_FLAG_FULL_DUPLEX;
2214                 }
2215         }
2216
2217         if (status_changed) {
2218                 u32 val = br32(bp, B44_TX_CTRL);
2219                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
2220                         val |= TX_CTRL_DUPLEX;
2221                 else
2222                         val &= ~TX_CTRL_DUPLEX;
2223                 bw32(bp, B44_TX_CTRL, val);
2224                 phy_print_status(phydev);
2225         }
2226 }
2227
2228 static int b44_register_phy_one(struct b44 *bp)
2229 {
2230         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2231         struct mii_bus *mii_bus;
2232         struct ssb_device *sdev = bp->sdev;
2233         struct phy_device *phydev;
2234         char bus_id[MII_BUS_ID_SIZE + 3];
2235         struct ssb_sprom *sprom = &sdev->bus->sprom;
2236         int err;
2237
2238         mii_bus = mdiobus_alloc();
2239         if (!mii_bus) {
2240                 dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2241                 err = -ENOMEM;
2242                 goto err_out;
2243         }
2244
2245         mii_bus->priv = bp;
2246         mii_bus->read = b44_mdio_read_phylib;
2247         mii_bus->write = b44_mdio_write_phylib;
2248         mii_bus->name = "b44_eth_mii";
2249         mii_bus->parent = sdev->dev;
2250         mii_bus->phy_mask = ~(1 << bp->phy_addr);
2251         snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2252
2253         bp->mii_bus = mii_bus;
2254
2255         err = mdiobus_register(mii_bus);
2256         if (err) {
2257                 dev_err(sdev->dev, "failed to register MII bus\n");
2258                 goto err_out_mdiobus;
2259         }
2260
2261         if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2262             (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2263
2264                 dev_info(sdev->dev,
2265                          "could not find PHY at %i, use fixed one\n",
2266                          bp->phy_addr);
2267
2268                 bp->phy_addr = 0;
2269                 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2270                          bp->phy_addr);
2271         } else {
2272                 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2273                          bp->phy_addr);
2274         }
2275
2276         phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2277                              PHY_INTERFACE_MODE_MII);
2278         if (IS_ERR(phydev)) {
2279                 dev_err(sdev->dev, "could not attach PHY at %i\n",
2280                         bp->phy_addr);
2281                 err = PTR_ERR(phydev);
2282                 goto err_out_mdiobus_unregister;
2283         }
2284
2285         /* mask with MAC supported features */
2286         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2287         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2288         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2289         linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2290         linkmode_and(phydev->supported, phydev->supported, mask);
2291         linkmode_copy(phydev->advertising, phydev->supported);
2292
2293         bp->old_link = 0;
2294         bp->phy_addr = phydev->mdio.addr;
2295
2296         phy_attached_info(phydev);
2297
2298         return 0;
2299
2300 err_out_mdiobus_unregister:
2301         mdiobus_unregister(mii_bus);
2302
2303 err_out_mdiobus:
2304         mdiobus_free(mii_bus);
2305
2306 err_out:
2307         return err;
2308 }
2309
2310 static void b44_unregister_phy_one(struct b44 *bp)
2311 {
2312         struct net_device *dev = bp->dev;
2313         struct mii_bus *mii_bus = bp->mii_bus;
2314
2315         phy_disconnect(dev->phydev);
2316         mdiobus_unregister(mii_bus);
2317         mdiobus_free(mii_bus);
2318 }
2319
2320 static int b44_init_one(struct ssb_device *sdev,
2321                         const struct ssb_device_id *ent)
2322 {
2323         struct net_device *dev;
2324         struct b44 *bp;
2325         int err;
2326
2327         instance++;
2328
2329         dev = alloc_etherdev(sizeof(*bp));
2330         if (!dev) {
2331                 err = -ENOMEM;
2332                 goto out;
2333         }
2334
2335         SET_NETDEV_DEV(dev, sdev->dev);
2336
2337         /* No interesting netdevice features in this card... */
2338         dev->features |= 0;
2339
2340         bp = netdev_priv(dev);
2341         bp->sdev = sdev;
2342         bp->dev = dev;
2343         bp->force_copybreak = 0;
2344
2345         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2346
2347         spin_lock_init(&bp->lock);
2348         u64_stats_init(&bp->hw_stats.syncp);
2349
2350         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2351         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2352
2353         dev->netdev_ops = &b44_netdev_ops;
2354         netif_napi_add(dev, &bp->napi, b44_poll);
2355         dev->watchdog_timeo = B44_TX_TIMEOUT;
2356         dev->min_mtu = B44_MIN_MTU;
2357         dev->max_mtu = B44_MAX_MTU;
2358         dev->irq = sdev->irq;
2359         dev->ethtool_ops = &b44_ethtool_ops;
2360
2361         err = ssb_bus_powerup(sdev->bus, 0);
2362         if (err) {
2363                 dev_err(sdev->dev,
2364                         "Failed to powerup the bus\n");
2365                 goto err_out_free_dev;
2366         }
2367
2368         err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
2369         if (err) {
2370                 dev_err(sdev->dev,
2371                         "Required 30BIT DMA mask unsupported by the system\n");
2372                 goto err_out_powerdown;
2373         }
2374
2375         err = b44_get_invariants(bp);
2376         if (err) {
2377                 dev_err(sdev->dev,
2378                         "Problem fetching invariants of chip, aborting\n");
2379                 goto err_out_powerdown;
2380         }
2381
2382         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2383                 dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2384                 err = -ENODEV;
2385                 goto err_out_powerdown;
2386         }
2387
2388         bp->mii_if.dev = dev;
2389         bp->mii_if.mdio_read = b44_mdio_read_mii;
2390         bp->mii_if.mdio_write = b44_mdio_write_mii;
2391         bp->mii_if.phy_id = bp->phy_addr;
2392         bp->mii_if.phy_id_mask = 0x1f;
2393         bp->mii_if.reg_num_mask = 0x1f;
2394
2395         /* By default, advertise all speed/duplex settings. */
2396         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2397                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2398
2399         /* By default, auto-negotiate PAUSE. */
2400         bp->flags |= B44_FLAG_PAUSE_AUTO;
2401
2402         err = register_netdev(dev);
2403         if (err) {
2404                 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2405                 goto err_out_powerdown;
2406         }
2407
2408         netif_carrier_off(dev);
2409
2410         ssb_set_drvdata(sdev, dev);
2411
2412         /* Chip reset provides power to the b44 MAC & PCI cores, which
2413          * is necessary for MAC register access.
2414          */
2415         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2416
2417         /* do a phy reset to test if there is an active phy */
2418         err = b44_phy_reset(bp);
2419         if (err < 0) {
2420                 dev_err(sdev->dev, "phy reset failed\n");
2421                 goto err_out_unregister_netdev;
2422         }
2423
2424         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2425                 err = b44_register_phy_one(bp);
2426                 if (err) {
2427                         dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2428                         goto err_out_unregister_netdev;
2429                 }
2430         }
2431
2432         device_set_wakeup_capable(sdev->dev, true);
2433         netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2434
2435         return 0;
2436
2437 err_out_unregister_netdev:
2438         unregister_netdev(dev);
2439 err_out_powerdown:
2440         ssb_bus_may_powerdown(sdev->bus);
2441
2442 err_out_free_dev:
2443         netif_napi_del(&bp->napi);
2444         free_netdev(dev);
2445
2446 out:
2447         return err;
2448 }
2449
2450 static void b44_remove_one(struct ssb_device *sdev)
2451 {
2452         struct net_device *dev = ssb_get_drvdata(sdev);
2453         struct b44 *bp = netdev_priv(dev);
2454
2455         unregister_netdev(dev);
2456         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2457                 b44_unregister_phy_one(bp);
2458         ssb_device_disable(sdev, 0);
2459         ssb_bus_may_powerdown(sdev->bus);
2460         netif_napi_del(&bp->napi);
2461         free_netdev(dev);
2462         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2463         ssb_set_drvdata(sdev, NULL);
2464 }
2465
2466 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2467 {
2468         struct net_device *dev = ssb_get_drvdata(sdev);
2469         struct b44 *bp = netdev_priv(dev);
2470
2471         if (!netif_running(dev))
2472                 return 0;
2473
2474         del_timer_sync(&bp->timer);
2475
2476         spin_lock_irq(&bp->lock);
2477
2478         b44_halt(bp);
2479         netif_carrier_off(bp->dev);
2480         netif_device_detach(bp->dev);
2481         b44_free_rings(bp);
2482
2483         spin_unlock_irq(&bp->lock);
2484
2485         free_irq(dev->irq, dev);
2486         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2487                 b44_init_hw(bp, B44_PARTIAL_RESET);
2488                 b44_setup_wol(bp);
2489         }
2490
2491         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2492         return 0;
2493 }
2494
2495 static int b44_resume(struct ssb_device *sdev)
2496 {
2497         struct net_device *dev = ssb_get_drvdata(sdev);
2498         struct b44 *bp = netdev_priv(dev);
2499         int rc = 0;
2500
2501         rc = ssb_bus_powerup(sdev->bus, 0);
2502         if (rc) {
2503                 dev_err(sdev->dev,
2504                         "Failed to powerup the bus\n");
2505                 return rc;
2506         }
2507
2508         if (!netif_running(dev))
2509                 return 0;
2510
2511         spin_lock_irq(&bp->lock);
2512         b44_init_rings(bp);
2513         b44_init_hw(bp, B44_FULL_RESET);
2514         spin_unlock_irq(&bp->lock);
2515
2516         /*
2517          * As a shared interrupt, the handler can be called immediately. To be
2518          * able to check the interrupt status the hardware must already be
2519          * powered back on (b44_init_hw).
2520          */
2521         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2522         if (rc) {
2523                 netdev_err(dev, "request_irq failed\n");
2524                 spin_lock_irq(&bp->lock);
2525                 b44_halt(bp);
2526                 b44_free_rings(bp);
2527                 spin_unlock_irq(&bp->lock);
2528                 return rc;
2529         }
2530
2531         netif_device_attach(bp->dev);
2532
2533         b44_enable_ints(bp);
2534         netif_wake_queue(dev);
2535
2536         mod_timer(&bp->timer, jiffies + 1);
2537
2538         return 0;
2539 }
2540
2541 static struct ssb_driver b44_ssb_driver = {
2542         .name           = DRV_MODULE_NAME,
2543         .id_table       = b44_ssb_tbl,
2544         .probe          = b44_init_one,
2545         .remove         = b44_remove_one,
2546         .suspend        = b44_suspend,
2547         .resume         = b44_resume,
2548 };
2549
2550 static inline int __init b44_pci_init(void)
2551 {
2552         int err = 0;
2553 #ifdef CONFIG_B44_PCI
2554         err = ssb_pcihost_register(&b44_pci_driver);
2555 #endif
2556         return err;
2557 }
2558
2559 static inline void b44_pci_exit(void)
2560 {
2561 #ifdef CONFIG_B44_PCI
2562         ssb_pcihost_unregister(&b44_pci_driver);
2563 #endif
2564 }
2565
2566 static int __init b44_init(void)
2567 {
2568         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2569         int err;
2570
2571         /* Setup paramaters for syncing RX/TX DMA descriptors */
2572         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2573
2574         err = b44_pci_init();
2575         if (err)
2576                 return err;
2577         err = ssb_driver_register(&b44_ssb_driver);
2578         if (err)
2579                 b44_pci_exit();
2580         return err;
2581 }
2582
2583 static void __exit b44_cleanup(void)
2584 {
2585         ssb_driver_unregister(&b44_ssb_driver);
2586         b44_pci_exit();
2587 }
2588
2589 module_init(b44_init);
2590 module_exit(b44_cleanup);
2591
This page took 0.192739 seconds and 4 git commands to generate.