1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for BCM963xx builtin Ethernet mac
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/clk.h>
11 #include <linux/etherdevice.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/ethtool.h>
15 #include <linux/crc32.h>
16 #include <linux/err.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/platform_device.h>
19 #include <linux/if_vlan.h>
21 #include <bcm63xx_dev_enet.h>
22 #include "bcm63xx_enet.h"
24 static char bcm_enet_driver_name[] = "bcm63xx_enet";
26 static int copybreak __read_mostly = 128;
27 module_param(copybreak, int, 0);
28 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
30 /* io registers memory shared between all devices */
31 static void __iomem *bcm_enet_shared_base[3];
34 * io helpers to access mac registers
36 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
38 return bcm_readl(priv->base + off);
41 static inline void enet_writel(struct bcm_enet_priv *priv,
44 bcm_writel(val, priv->base + off);
48 * io helpers to access switch registers
50 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
52 return bcm_readl(priv->base + off);
55 static inline void enetsw_writel(struct bcm_enet_priv *priv,
58 bcm_writel(val, priv->base + off);
61 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
63 return bcm_readw(priv->base + off);
66 static inline void enetsw_writew(struct bcm_enet_priv *priv,
69 bcm_writew(val, priv->base + off);
72 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
74 return bcm_readb(priv->base + off);
77 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
80 bcm_writeb(val, priv->base + off);
84 /* io helpers to access shared registers */
85 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
87 return bcm_readl(bcm_enet_shared_base[0] + off);
90 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
93 bcm_writel(val, bcm_enet_shared_base[0] + off);
96 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
98 return bcm_readl(bcm_enet_shared_base[1] +
99 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
102 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
103 u32 val, u32 off, int chan)
105 bcm_writel(val, bcm_enet_shared_base[1] +
106 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
109 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
111 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
114 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
115 u32 val, u32 off, int chan)
117 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
121 * write given data into mii register and wait for transfer to end
122 * with timeout (average measured transfer time is 25us)
124 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
128 /* make sure mii interrupt status is cleared */
129 enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
131 enet_writel(priv, data, ENET_MIIDATA_REG);
134 /* busy wait on mii interrupt bit, with timeout */
137 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
140 } while (limit-- > 0);
142 return (limit < 0) ? 1 : 0;
146 * MII internal read callback
148 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
153 tmp = regnum << ENET_MIIDATA_REG_SHIFT;
154 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
155 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
156 tmp |= ENET_MIIDATA_OP_READ_MASK;
158 if (do_mdio_op(priv, tmp))
161 val = enet_readl(priv, ENET_MIIDATA_REG);
167 * MII internal write callback
169 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
170 int regnum, u16 value)
174 tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
175 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
176 tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
177 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
178 tmp |= ENET_MIIDATA_OP_WRITE_MASK;
180 (void)do_mdio_op(priv, tmp);
185 * MII read callback from phylib
187 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
190 return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
194 * MII write callback from phylib
196 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
197 int regnum, u16 value)
199 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
203 * MII read callback from mii core
205 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
208 return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
212 * MII write callback from mii core
214 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
215 int regnum, int value)
217 bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
223 static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
225 struct bcm_enet_priv *priv;
227 priv = netdev_priv(dev);
229 while (priv->rx_desc_count < priv->rx_ring_size) {
230 struct bcm_enet_desc *desc;
234 desc_idx = priv->rx_dirty_desc;
235 desc = &priv->rx_desc_cpu[desc_idx];
237 if (!priv->rx_buf[desc_idx]) {
240 if (likely(napi_mode))
241 buf = napi_alloc_frag(priv->rx_frag_size);
243 buf = netdev_alloc_frag(priv->rx_frag_size);
246 priv->rx_buf[desc_idx] = buf;
247 desc->address = dma_map_single(&priv->pdev->dev,
248 buf + priv->rx_buf_offset,
253 len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
254 len_stat |= DMADESC_OWNER_MASK;
255 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
256 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
257 priv->rx_dirty_desc = 0;
259 priv->rx_dirty_desc++;
262 desc->len_stat = len_stat;
264 priv->rx_desc_count++;
266 /* tell dma engine we allocated one buffer */
267 if (priv->dma_has_sram)
268 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
270 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
273 /* If rx ring is still empty, set a timer to try allocating
274 * again at a later time. */
275 if (priv->rx_desc_count == 0 && netif_running(dev)) {
276 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
277 priv->rx_timeout.expires = jiffies + HZ;
278 add_timer(&priv->rx_timeout);
285 * timer callback to defer refill rx queue in case we're OOM
287 static void bcm_enet_refill_rx_timer(struct timer_list *t)
289 struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
290 struct net_device *dev = priv->net_dev;
292 spin_lock(&priv->rx_lock);
293 bcm_enet_refill_rx(dev, false);
294 spin_unlock(&priv->rx_lock);
298 * extract packet from rx queue
300 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
302 struct bcm_enet_priv *priv;
303 struct list_head rx_list;
307 priv = netdev_priv(dev);
308 INIT_LIST_HEAD(&rx_list);
309 kdev = &priv->pdev->dev;
312 /* don't scan ring further than number of refilled
314 if (budget > priv->rx_desc_count)
315 budget = priv->rx_desc_count;
318 struct bcm_enet_desc *desc;
325 desc_idx = priv->rx_curr_desc;
326 desc = &priv->rx_desc_cpu[desc_idx];
328 /* make sure we actually read the descriptor status at
332 len_stat = desc->len_stat;
334 /* break if dma ownership belongs to hw */
335 if (len_stat & DMADESC_OWNER_MASK)
339 priv->rx_curr_desc++;
340 if (priv->rx_curr_desc == priv->rx_ring_size)
341 priv->rx_curr_desc = 0;
343 /* if the packet does not have start of packet _and_
344 * end of packet flag set, then just recycle it */
345 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
346 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
347 dev->stats.rx_dropped++;
351 /* recycle packet if it's marked as bad */
352 if (!priv->enet_is_sw &&
353 unlikely(len_stat & DMADESC_ERR_MASK)) {
354 dev->stats.rx_errors++;
356 if (len_stat & DMADESC_OVSIZE_MASK)
357 dev->stats.rx_length_errors++;
358 if (len_stat & DMADESC_CRC_MASK)
359 dev->stats.rx_crc_errors++;
360 if (len_stat & DMADESC_UNDER_MASK)
361 dev->stats.rx_frame_errors++;
362 if (len_stat & DMADESC_OV_MASK)
363 dev->stats.rx_fifo_errors++;
368 buf = priv->rx_buf[desc_idx];
369 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
370 /* don't include FCS */
373 if (len < copybreak) {
374 skb = napi_alloc_skb(&priv->napi, len);
375 if (unlikely(!skb)) {
376 /* forget packet, just rearm desc */
377 dev->stats.rx_dropped++;
381 dma_sync_single_for_cpu(kdev, desc->address,
382 len, DMA_FROM_DEVICE);
383 memcpy(skb->data, buf + priv->rx_buf_offset, len);
384 dma_sync_single_for_device(kdev, desc->address,
385 len, DMA_FROM_DEVICE);
387 dma_unmap_single(kdev, desc->address,
388 priv->rx_buf_size, DMA_FROM_DEVICE);
389 priv->rx_buf[desc_idx] = NULL;
391 skb = napi_build_skb(buf, priv->rx_frag_size);
392 if (unlikely(!skb)) {
394 dev->stats.rx_dropped++;
397 skb_reserve(skb, priv->rx_buf_offset);
401 skb->protocol = eth_type_trans(skb, dev);
402 dev->stats.rx_packets++;
403 dev->stats.rx_bytes += len;
404 list_add_tail(&skb->list, &rx_list);
406 } while (processed < budget);
408 netif_receive_skb_list(&rx_list);
409 priv->rx_desc_count -= processed;
411 if (processed || !priv->rx_desc_count) {
412 bcm_enet_refill_rx(dev, true);
415 enet_dmac_writel(priv, priv->dma_chan_en_mask,
416 ENETDMAC_CHANCFG, priv->rx_chan);
424 * try to or force reclaim of transmitted buffers
426 static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget)
428 struct bcm_enet_priv *priv;
432 priv = netdev_priv(dev);
436 while (priv->tx_desc_count < priv->tx_ring_size) {
437 struct bcm_enet_desc *desc;
440 /* We run in a bh and fight against start_xmit, which
441 * is called with bh disabled */
442 spin_lock(&priv->tx_lock);
444 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
446 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
447 spin_unlock(&priv->tx_lock);
451 /* ensure other field of the descriptor were not read
452 * before we checked ownership */
455 skb = priv->tx_skb[priv->tx_dirty_desc];
456 priv->tx_skb[priv->tx_dirty_desc] = NULL;
457 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
460 priv->tx_dirty_desc++;
461 if (priv->tx_dirty_desc == priv->tx_ring_size)
462 priv->tx_dirty_desc = 0;
463 priv->tx_desc_count++;
465 spin_unlock(&priv->tx_lock);
467 if (desc->len_stat & DMADESC_UNDER_MASK)
468 dev->stats.tx_errors++;
471 napi_consume_skb(skb, budget);
475 netdev_completed_queue(dev, released, bytes);
477 if (netif_queue_stopped(dev) && released)
478 netif_wake_queue(dev);
484 * poll func, called by network core
486 static int bcm_enet_poll(struct napi_struct *napi, int budget)
488 struct bcm_enet_priv *priv;
489 struct net_device *dev;
492 priv = container_of(napi, struct bcm_enet_priv, napi);
496 enet_dmac_writel(priv, priv->dma_chan_int_mask,
497 ENETDMAC_IR, priv->rx_chan);
498 enet_dmac_writel(priv, priv->dma_chan_int_mask,
499 ENETDMAC_IR, priv->tx_chan);
501 /* reclaim sent skb */
502 bcm_enet_tx_reclaim(dev, 0, budget);
504 spin_lock(&priv->rx_lock);
505 rx_work_done = bcm_enet_receive_queue(dev, budget);
506 spin_unlock(&priv->rx_lock);
508 if (rx_work_done >= budget) {
509 /* rx queue is not yet empty/clean */
513 /* no more packet in rx/tx queue, remove device from poll
515 napi_complete_done(napi, rx_work_done);
517 /* restore rx/tx interrupt */
518 enet_dmac_writel(priv, priv->dma_chan_int_mask,
519 ENETDMAC_IRMASK, priv->rx_chan);
520 enet_dmac_writel(priv, priv->dma_chan_int_mask,
521 ENETDMAC_IRMASK, priv->tx_chan);
527 * mac interrupt handler
529 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
531 struct net_device *dev;
532 struct bcm_enet_priv *priv;
536 priv = netdev_priv(dev);
538 stat = enet_readl(priv, ENET_IR_REG);
539 if (!(stat & ENET_IR_MIB))
542 /* clear & mask interrupt */
543 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
544 enet_writel(priv, 0, ENET_IRMASK_REG);
546 /* read mib registers in workqueue */
547 schedule_work(&priv->mib_update_task);
553 * rx/tx dma interrupt handler
555 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
557 struct net_device *dev;
558 struct bcm_enet_priv *priv;
561 priv = netdev_priv(dev);
563 /* mask rx/tx interrupts */
564 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
565 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
567 napi_schedule(&priv->napi);
573 * tx request callback
576 bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
578 struct bcm_enet_priv *priv;
579 struct bcm_enet_desc *desc;
583 priv = netdev_priv(dev);
585 /* lock against tx reclaim */
586 spin_lock(&priv->tx_lock);
588 /* make sure the tx hw queue is not full, should not happen
589 * since we stop queue before it's the case */
590 if (unlikely(!priv->tx_desc_count)) {
591 netif_stop_queue(dev);
592 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
594 ret = NETDEV_TX_BUSY;
598 /* pad small packets sent on a switch device */
599 if (priv->enet_is_sw && skb->len < 64) {
600 int needed = 64 - skb->len;
603 if (unlikely(skb_tailroom(skb) < needed)) {
604 struct sk_buff *nskb;
606 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
608 ret = NETDEV_TX_BUSY;
614 data = skb_put_zero(skb, needed);
617 /* point to the next available desc */
618 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
619 priv->tx_skb[priv->tx_curr_desc] = skb;
621 /* fill descriptor */
622 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
625 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
626 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
630 priv->tx_curr_desc++;
631 if (priv->tx_curr_desc == priv->tx_ring_size) {
632 priv->tx_curr_desc = 0;
633 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
635 priv->tx_desc_count--;
637 /* dma might be already polling, make sure we update desc
638 * fields in correct order */
640 desc->len_stat = len_stat;
643 netdev_sent_queue(dev, skb->len);
646 if (!netdev_xmit_more() || !priv->tx_desc_count)
647 enet_dmac_writel(priv, priv->dma_chan_en_mask,
648 ENETDMAC_CHANCFG, priv->tx_chan);
650 /* stop queue if no more desc available */
651 if (!priv->tx_desc_count)
652 netif_stop_queue(dev);
654 dev->stats.tx_bytes += skb->len;
655 dev->stats.tx_packets++;
659 spin_unlock(&priv->tx_lock);
664 * Change the interface's mac address.
666 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
668 struct bcm_enet_priv *priv;
669 struct sockaddr *addr = p;
672 priv = netdev_priv(dev);
673 eth_hw_addr_set(dev, addr->sa_data);
675 /* use perfect match register 0 to store my mac address */
676 val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
677 (dev->dev_addr[4] << 8) | dev->dev_addr[5];
678 enet_writel(priv, val, ENET_PML_REG(0));
680 val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
681 val |= ENET_PMH_DATAVALID_MASK;
682 enet_writel(priv, val, ENET_PMH_REG(0));
688 * Change rx mode (promiscuous/allmulti) and update multicast list
690 static void bcm_enet_set_multicast_list(struct net_device *dev)
692 struct bcm_enet_priv *priv;
693 struct netdev_hw_addr *ha;
697 priv = netdev_priv(dev);
699 val = enet_readl(priv, ENET_RXCFG_REG);
701 if (dev->flags & IFF_PROMISC)
702 val |= ENET_RXCFG_PROMISC_MASK;
704 val &= ~ENET_RXCFG_PROMISC_MASK;
706 /* only 3 perfect match registers left, first one is used for
708 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
709 val |= ENET_RXCFG_ALLMCAST_MASK;
711 val &= ~ENET_RXCFG_ALLMCAST_MASK;
713 /* no need to set perfect match registers if we catch all
715 if (val & ENET_RXCFG_ALLMCAST_MASK) {
716 enet_writel(priv, val, ENET_RXCFG_REG);
721 netdev_for_each_mc_addr(ha, dev) {
727 /* update perfect match registers */
729 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
730 (dmi_addr[4] << 8) | dmi_addr[5];
731 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
733 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
734 tmp |= ENET_PMH_DATAVALID_MASK;
735 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
739 enet_writel(priv, 0, ENET_PML_REG(i + 1));
740 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
743 enet_writel(priv, val, ENET_RXCFG_REG);
747 * set mac duplex parameters
749 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
753 val = enet_readl(priv, ENET_TXCTL_REG);
755 val |= ENET_TXCTL_FD_MASK;
757 val &= ~ENET_TXCTL_FD_MASK;
758 enet_writel(priv, val, ENET_TXCTL_REG);
762 * set mac flow control parameters
764 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
768 /* rx flow control (pause frame handling) */
769 val = enet_readl(priv, ENET_RXCFG_REG);
771 val |= ENET_RXCFG_ENFLOW_MASK;
773 val &= ~ENET_RXCFG_ENFLOW_MASK;
774 enet_writel(priv, val, ENET_RXCFG_REG);
776 if (!priv->dma_has_sram)
779 /* tx flow control (pause frame generation) */
780 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
782 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
784 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
785 enet_dma_writel(priv, val, ENETDMA_CFG_REG);
789 * link changed callback (from phylib)
791 static void bcm_enet_adjust_phy_link(struct net_device *dev)
793 struct bcm_enet_priv *priv;
794 struct phy_device *phydev;
797 priv = netdev_priv(dev);
798 phydev = dev->phydev;
801 if (priv->old_link != phydev->link) {
803 priv->old_link = phydev->link;
806 /* reflect duplex change in mac configuration */
807 if (phydev->link && phydev->duplex != priv->old_duplex) {
808 bcm_enet_set_duplex(priv,
809 (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
811 priv->old_duplex = phydev->duplex;
814 /* enable flow control if remote advertise it (trust phylib to
815 * check that duplex is full */
816 if (phydev->link && phydev->pause != priv->old_pause) {
817 int rx_pause_en, tx_pause_en;
820 /* pause was advertised by lpa and us */
823 } else if (!priv->pause_auto) {
824 /* pause setting overridden by user */
825 rx_pause_en = priv->pause_rx;
826 tx_pause_en = priv->pause_tx;
832 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
834 priv->old_pause = phydev->pause;
837 if (status_changed) {
838 pr_info("%s: link %s", dev->name, phydev->link ?
841 pr_cont(" - %d/%s - flow control %s", phydev->speed,
842 DUPLEX_FULL == phydev->duplex ? "full" : "half",
843 phydev->pause == 1 ? "rx&tx" : "off");
850 * link changed callback (if phylib is not used)
852 static void bcm_enet_adjust_link(struct net_device *dev)
854 struct bcm_enet_priv *priv;
856 priv = netdev_priv(dev);
857 bcm_enet_set_duplex(priv, priv->force_duplex_full);
858 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
859 netif_carrier_on(dev);
861 pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
863 priv->force_speed_100 ? 100 : 10,
864 priv->force_duplex_full ? "full" : "half",
865 priv->pause_rx ? "rx" : "off",
866 priv->pause_tx ? "tx" : "off");
869 static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
873 for (i = 0; i < priv->rx_ring_size; i++) {
874 struct bcm_enet_desc *desc;
876 if (!priv->rx_buf[i])
879 desc = &priv->rx_desc_cpu[i];
880 dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
882 skb_free_frag(priv->rx_buf[i]);
888 * open callback, allocate dma rings & buffers and start rx operation
890 static int bcm_enet_open(struct net_device *dev)
892 struct bcm_enet_priv *priv;
893 struct sockaddr addr;
895 struct phy_device *phydev;
898 char phy_id[MII_BUS_ID_SIZE + 3];
902 priv = netdev_priv(dev);
903 kdev = &priv->pdev->dev;
907 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
908 priv->mii_bus->id, priv->phy_id);
910 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
911 PHY_INTERFACE_MODE_MII);
913 if (IS_ERR(phydev)) {
914 dev_err(kdev, "could not attach to PHY\n");
915 return PTR_ERR(phydev);
918 /* mask with MAC supported features */
919 phy_support_sym_pause(phydev);
920 phy_set_max_speed(phydev, SPEED_100);
921 phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
924 phy_attached_info(phydev);
927 priv->old_duplex = -1;
928 priv->old_pause = -1;
933 /* mask all interrupts and request them */
934 enet_writel(priv, 0, ENET_IRMASK_REG);
935 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
936 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
938 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
940 goto out_phy_disconnect;
942 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
947 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
952 /* initialize perfect match registers */
953 for (i = 0; i < 4; i++) {
954 enet_writel(priv, 0, ENET_PML_REG(i));
955 enet_writel(priv, 0, ENET_PMH_REG(i));
958 /* write device mac address */
959 memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
960 bcm_enet_set_mac_address(dev, &addr);
962 /* allocate rx dma ring */
963 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
964 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
970 priv->rx_desc_alloc_size = size;
971 priv->rx_desc_cpu = p;
973 /* allocate tx dma ring */
974 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
975 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
978 goto out_free_rx_ring;
981 priv->tx_desc_alloc_size = size;
982 priv->tx_desc_cpu = p;
984 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
988 goto out_free_tx_ring;
991 priv->tx_desc_count = priv->tx_ring_size;
992 priv->tx_dirty_desc = 0;
993 priv->tx_curr_desc = 0;
994 spin_lock_init(&priv->tx_lock);
996 /* init & fill rx ring with buffers */
997 priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
1001 goto out_free_tx_skb;
1004 priv->rx_desc_count = 0;
1005 priv->rx_dirty_desc = 0;
1006 priv->rx_curr_desc = 0;
1008 /* initialize flow control buffer allocation */
1009 if (priv->dma_has_sram)
1010 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1011 ENETDMA_BUFALLOC_REG(priv->rx_chan));
1013 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1014 ENETDMAC_BUFALLOC, priv->rx_chan);
1016 if (bcm_enet_refill_rx(dev, false)) {
1017 dev_err(kdev, "cannot allocate rx buffer queue\n");
1022 /* write rx & tx ring addresses */
1023 if (priv->dma_has_sram) {
1024 enet_dmas_writel(priv, priv->rx_desc_dma,
1025 ENETDMAS_RSTART_REG, priv->rx_chan);
1026 enet_dmas_writel(priv, priv->tx_desc_dma,
1027 ENETDMAS_RSTART_REG, priv->tx_chan);
1029 enet_dmac_writel(priv, priv->rx_desc_dma,
1030 ENETDMAC_RSTART, priv->rx_chan);
1031 enet_dmac_writel(priv, priv->tx_desc_dma,
1032 ENETDMAC_RSTART, priv->tx_chan);
1035 /* clear remaining state ram for rx & tx channel */
1036 if (priv->dma_has_sram) {
1037 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1038 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1039 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1040 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1041 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1042 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1044 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1045 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1048 /* set max rx/tx length */
1049 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1050 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1052 /* set dma maximum burst len */
1053 enet_dmac_writel(priv, priv->dma_maxburst,
1054 ENETDMAC_MAXBURST, priv->rx_chan);
1055 enet_dmac_writel(priv, priv->dma_maxburst,
1056 ENETDMAC_MAXBURST, priv->tx_chan);
1058 /* set correct transmit fifo watermark */
1059 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1061 /* set flow control low/high threshold to 1/3 / 2/3 */
1062 if (priv->dma_has_sram) {
1063 val = priv->rx_ring_size / 3;
1064 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1065 val = (priv->rx_ring_size * 2) / 3;
1066 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1068 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1069 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1070 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1073 /* all set, enable mac and interrupts, start dma engine and
1074 * kick rx dma channel */
1076 val = enet_readl(priv, ENET_CTL_REG);
1077 val |= ENET_CTL_ENABLE_MASK;
1078 enet_writel(priv, val, ENET_CTL_REG);
1079 if (priv->dma_has_sram)
1080 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1081 enet_dmac_writel(priv, priv->dma_chan_en_mask,
1082 ENETDMAC_CHANCFG, priv->rx_chan);
1084 /* watch "mib counters about to overflow" interrupt */
1085 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1086 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1088 /* watch "packet transferred" interrupt in rx and tx */
1089 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1090 ENETDMAC_IR, priv->rx_chan);
1091 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1092 ENETDMAC_IR, priv->tx_chan);
1094 /* make sure we enable napi before rx interrupt */
1095 napi_enable(&priv->napi);
1097 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1098 ENETDMAC_IRMASK, priv->rx_chan);
1099 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1100 ENETDMAC_IRMASK, priv->tx_chan);
1105 bcm_enet_adjust_link(dev);
1107 netif_start_queue(dev);
1111 bcm_enet_free_rx_buf_ring(kdev, priv);
1114 kfree(priv->tx_skb);
1117 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1118 priv->tx_desc_cpu, priv->tx_desc_dma);
1121 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1122 priv->rx_desc_cpu, priv->rx_desc_dma);
1125 free_irq(priv->irq_tx, dev);
1128 free_irq(priv->irq_rx, dev);
1131 free_irq(dev->irq, dev);
1135 phy_disconnect(phydev);
1143 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1148 val = enet_readl(priv, ENET_CTL_REG);
1149 val |= ENET_CTL_DISABLE_MASK;
1150 enet_writel(priv, val, ENET_CTL_REG);
1156 val = enet_readl(priv, ENET_CTL_REG);
1157 if (!(val & ENET_CTL_DISABLE_MASK))
1164 * disable dma in given channel
1166 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1170 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1176 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1177 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1186 static int bcm_enet_stop(struct net_device *dev)
1188 struct bcm_enet_priv *priv;
1189 struct device *kdev;
1191 priv = netdev_priv(dev);
1192 kdev = &priv->pdev->dev;
1194 netif_stop_queue(dev);
1195 napi_disable(&priv->napi);
1197 phy_stop(dev->phydev);
1198 del_timer_sync(&priv->rx_timeout);
1200 /* mask all interrupts */
1201 enet_writel(priv, 0, ENET_IRMASK_REG);
1202 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1203 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1205 /* make sure no mib update is scheduled */
1206 cancel_work_sync(&priv->mib_update_task);
1208 /* disable dma & mac */
1209 bcm_enet_disable_dma(priv, priv->tx_chan);
1210 bcm_enet_disable_dma(priv, priv->rx_chan);
1211 bcm_enet_disable_mac(priv);
1213 /* force reclaim of all tx buffers */
1214 bcm_enet_tx_reclaim(dev, 1, 0);
1216 /* free the rx buffer ring */
1217 bcm_enet_free_rx_buf_ring(kdev, priv);
1219 /* free remaining allocated memory */
1220 kfree(priv->tx_skb);
1221 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1222 priv->rx_desc_cpu, priv->rx_desc_dma);
1223 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1224 priv->tx_desc_cpu, priv->tx_desc_dma);
1225 free_irq(priv->irq_tx, dev);
1226 free_irq(priv->irq_rx, dev);
1227 free_irq(dev->irq, dev);
1231 phy_disconnect(dev->phydev);
1233 /* reset BQL after forced tx reclaim to prevent kernel panic */
1234 netdev_reset_queue(dev);
1242 struct bcm_enet_stats {
1243 char stat_string[ETH_GSTRING_LEN];
1249 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1250 offsetof(struct bcm_enet_priv, m)
1251 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
1252 offsetof(struct net_device_stats, m)
1254 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1255 { "rx_packets", DEV_STAT(rx_packets), -1 },
1256 { "tx_packets", DEV_STAT(tx_packets), -1 },
1257 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1258 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1259 { "rx_errors", DEV_STAT(rx_errors), -1 },
1260 { "tx_errors", DEV_STAT(tx_errors), -1 },
1261 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1262 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1264 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1265 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1266 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1267 { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1268 { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1269 { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1270 { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1271 { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1272 { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1273 { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1274 { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1275 { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1276 { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1277 { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1278 { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1279 { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1280 { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1281 { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1282 { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1283 { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1284 { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1286 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1287 { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1288 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1289 { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1290 { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1291 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1292 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1293 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1294 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1295 { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1296 { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1297 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1298 { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1299 { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1300 { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1301 { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1302 { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1303 { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1304 { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1305 { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1306 { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1307 { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1311 #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
1313 static const u32 unused_mib_regs[] = {
1314 ETH_MIB_TX_ALL_OCTETS,
1315 ETH_MIB_TX_ALL_PKTS,
1316 ETH_MIB_RX_ALL_OCTETS,
1317 ETH_MIB_RX_ALL_PKTS,
1321 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1322 struct ethtool_drvinfo *drvinfo)
1324 strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1325 strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1328 static int bcm_enet_get_sset_count(struct net_device *netdev,
1331 switch (string_set) {
1333 return BCM_ENET_STATS_LEN;
1339 static void bcm_enet_get_strings(struct net_device *netdev,
1340 u32 stringset, u8 *data)
1345 switch (stringset) {
1347 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1348 str = bcm_enet_gstrings_stats[i].stat_string;
1349 ethtool_puts(&data, str);
1355 static void update_mib_counters(struct bcm_enet_priv *priv)
1359 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1360 const struct bcm_enet_stats *s;
1364 s = &bcm_enet_gstrings_stats[i];
1365 if (s->mib_reg == -1)
1368 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1369 p = (char *)priv + s->stat_offset;
1371 if (s->sizeof_stat == sizeof(u64))
1377 /* also empty unused mib counters to make sure mib counter
1378 * overflow interrupt is cleared */
1379 for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1380 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1383 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1385 struct bcm_enet_priv *priv;
1387 priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1388 mutex_lock(&priv->mib_update_lock);
1389 update_mib_counters(priv);
1390 mutex_unlock(&priv->mib_update_lock);
1392 /* reenable mib interrupt */
1393 if (netif_running(priv->net_dev))
1394 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1397 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1398 struct ethtool_stats *stats,
1401 struct bcm_enet_priv *priv;
1404 priv = netdev_priv(netdev);
1406 mutex_lock(&priv->mib_update_lock);
1407 update_mib_counters(priv);
1409 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1410 const struct bcm_enet_stats *s;
1413 s = &bcm_enet_gstrings_stats[i];
1414 if (s->mib_reg == -1)
1415 p = (char *)&netdev->stats;
1418 p += s->stat_offset;
1419 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1420 *(u64 *)p : *(u32 *)p;
1422 mutex_unlock(&priv->mib_update_lock);
1425 static int bcm_enet_nway_reset(struct net_device *dev)
1427 struct bcm_enet_priv *priv;
1429 priv = netdev_priv(dev);
1431 return phy_ethtool_nway_reset(dev);
1436 static int bcm_enet_get_link_ksettings(struct net_device *dev,
1437 struct ethtool_link_ksettings *cmd)
1439 struct bcm_enet_priv *priv;
1440 u32 supported, advertising;
1442 priv = netdev_priv(dev);
1444 if (priv->has_phy) {
1448 phy_ethtool_ksettings_get(dev->phydev, cmd);
1452 cmd->base.autoneg = 0;
1453 cmd->base.speed = (priv->force_speed_100) ?
1454 SPEED_100 : SPEED_10;
1455 cmd->base.duplex = (priv->force_duplex_full) ?
1456 DUPLEX_FULL : DUPLEX_HALF;
1457 supported = ADVERTISED_10baseT_Half |
1458 ADVERTISED_10baseT_Full |
1459 ADVERTISED_100baseT_Half |
1460 ADVERTISED_100baseT_Full;
1462 ethtool_convert_legacy_u32_to_link_mode(
1463 cmd->link_modes.supported, supported);
1464 ethtool_convert_legacy_u32_to_link_mode(
1465 cmd->link_modes.advertising, advertising);
1466 cmd->base.port = PORT_MII;
1471 static int bcm_enet_set_link_ksettings(struct net_device *dev,
1472 const struct ethtool_link_ksettings *cmd)
1474 struct bcm_enet_priv *priv;
1476 priv = netdev_priv(dev);
1477 if (priv->has_phy) {
1480 return phy_ethtool_ksettings_set(dev->phydev, cmd);
1483 if (cmd->base.autoneg ||
1484 (cmd->base.speed != SPEED_100 &&
1485 cmd->base.speed != SPEED_10) ||
1486 cmd->base.port != PORT_MII)
1489 priv->force_speed_100 =
1490 (cmd->base.speed == SPEED_100) ? 1 : 0;
1491 priv->force_duplex_full =
1492 (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1494 if (netif_running(dev))
1495 bcm_enet_adjust_link(dev);
1501 bcm_enet_get_ringparam(struct net_device *dev,
1502 struct ethtool_ringparam *ering,
1503 struct kernel_ethtool_ringparam *kernel_ering,
1504 struct netlink_ext_ack *extack)
1506 struct bcm_enet_priv *priv;
1508 priv = netdev_priv(dev);
1510 /* rx/tx ring is actually only limited by memory */
1511 ering->rx_max_pending = 8192;
1512 ering->tx_max_pending = 8192;
1513 ering->rx_pending = priv->rx_ring_size;
1514 ering->tx_pending = priv->tx_ring_size;
1517 static int bcm_enet_set_ringparam(struct net_device *dev,
1518 struct ethtool_ringparam *ering,
1519 struct kernel_ethtool_ringparam *kernel_ering,
1520 struct netlink_ext_ack *extack)
1522 struct bcm_enet_priv *priv;
1525 priv = netdev_priv(dev);
1528 if (netif_running(dev)) {
1533 priv->rx_ring_size = ering->rx_pending;
1534 priv->tx_ring_size = ering->tx_pending;
1539 err = bcm_enet_open(dev);
1543 bcm_enet_set_multicast_list(dev);
1548 static void bcm_enet_get_pauseparam(struct net_device *dev,
1549 struct ethtool_pauseparam *ecmd)
1551 struct bcm_enet_priv *priv;
1553 priv = netdev_priv(dev);
1554 ecmd->autoneg = priv->pause_auto;
1555 ecmd->rx_pause = priv->pause_rx;
1556 ecmd->tx_pause = priv->pause_tx;
1559 static int bcm_enet_set_pauseparam(struct net_device *dev,
1560 struct ethtool_pauseparam *ecmd)
1562 struct bcm_enet_priv *priv;
1564 priv = netdev_priv(dev);
1566 if (priv->has_phy) {
1567 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1568 /* asymetric pause mode not supported,
1569 * actually possible but integrated PHY has RO
1574 /* no pause autoneg on direct mii connection */
1579 priv->pause_auto = ecmd->autoneg;
1580 priv->pause_rx = ecmd->rx_pause;
1581 priv->pause_tx = ecmd->tx_pause;
1586 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1587 .get_strings = bcm_enet_get_strings,
1588 .get_sset_count = bcm_enet_get_sset_count,
1589 .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1590 .nway_reset = bcm_enet_nway_reset,
1591 .get_drvinfo = bcm_enet_get_drvinfo,
1592 .get_link = ethtool_op_get_link,
1593 .get_ringparam = bcm_enet_get_ringparam,
1594 .set_ringparam = bcm_enet_set_ringparam,
1595 .get_pauseparam = bcm_enet_get_pauseparam,
1596 .set_pauseparam = bcm_enet_set_pauseparam,
1597 .get_link_ksettings = bcm_enet_get_link_ksettings,
1598 .set_link_ksettings = bcm_enet_set_link_ksettings,
1601 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1603 struct bcm_enet_priv *priv;
1605 priv = netdev_priv(dev);
1606 if (priv->has_phy) {
1609 return phy_mii_ioctl(dev->phydev, rq, cmd);
1611 struct mii_if_info mii;
1614 mii.mdio_read = bcm_enet_mdio_read_mii;
1615 mii.mdio_write = bcm_enet_mdio_write_mii;
1617 mii.phy_id_mask = 0x3f;
1618 mii.reg_num_mask = 0x1f;
1619 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1624 * adjust mtu, can't be called while device is running
1626 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1628 struct bcm_enet_priv *priv = netdev_priv(dev);
1629 int actual_mtu = new_mtu;
1631 if (netif_running(dev))
1634 /* add ethernet header + vlan tag size */
1635 actual_mtu += VLAN_ETH_HLEN;
1638 * setup maximum size before we get overflow mark in
1639 * descriptor, note that this will not prevent reception of
1640 * big frames, they will be split into multiple buffers
1643 priv->hw_mtu = actual_mtu;
1646 * align rx buffer size to dma burst len, account FCS since
1649 priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1650 priv->dma_maxburst * 4);
1652 priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
1653 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1655 WRITE_ONCE(dev->mtu, new_mtu);
1660 * preinit hardware to allow mii operation while device is down
1662 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1667 /* make sure mac is disabled */
1668 bcm_enet_disable_mac(priv);
1670 /* soft reset mac */
1671 val = ENET_CTL_SRESET_MASK;
1672 enet_writel(priv, val, ENET_CTL_REG);
1677 val = enet_readl(priv, ENET_CTL_REG);
1678 if (!(val & ENET_CTL_SRESET_MASK))
1683 /* select correct mii interface */
1684 val = enet_readl(priv, ENET_CTL_REG);
1685 if (priv->use_external_mii)
1686 val |= ENET_CTL_EPHYSEL_MASK;
1688 val &= ~ENET_CTL_EPHYSEL_MASK;
1689 enet_writel(priv, val, ENET_CTL_REG);
1691 /* turn on mdc clock */
1692 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1693 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1695 /* set mib counters to self-clear when read */
1696 val = enet_readl(priv, ENET_MIBCTL_REG);
1697 val |= ENET_MIBCTL_RDCLEAR_MASK;
1698 enet_writel(priv, val, ENET_MIBCTL_REG);
1701 static const struct net_device_ops bcm_enet_ops = {
1702 .ndo_open = bcm_enet_open,
1703 .ndo_stop = bcm_enet_stop,
1704 .ndo_start_xmit = bcm_enet_start_xmit,
1705 .ndo_set_mac_address = bcm_enet_set_mac_address,
1706 .ndo_set_rx_mode = bcm_enet_set_multicast_list,
1707 .ndo_eth_ioctl = bcm_enet_ioctl,
1708 .ndo_change_mtu = bcm_enet_change_mtu,
1712 * allocate netdevice, request register memory and register device.
1714 static int bcm_enet_probe(struct platform_device *pdev)
1716 struct bcm_enet_priv *priv;
1717 struct net_device *dev;
1718 struct bcm63xx_enet_platform_data *pd;
1719 int irq, irq_rx, irq_tx;
1720 struct mii_bus *bus;
1723 if (!bcm_enet_shared_base[0])
1724 return -EPROBE_DEFER;
1726 irq = platform_get_irq(pdev, 0);
1727 irq_rx = platform_get_irq(pdev, 1);
1728 irq_tx = platform_get_irq(pdev, 2);
1729 if (irq < 0 || irq_rx < 0 || irq_tx < 0)
1732 dev = alloc_etherdev(sizeof(*priv));
1735 priv = netdev_priv(dev);
1737 priv->enet_is_sw = false;
1738 priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1739 priv->rx_buf_offset = NET_SKB_PAD;
1741 ret = bcm_enet_change_mtu(dev, dev->mtu);
1745 priv->base = devm_platform_ioremap_resource(pdev, 0);
1746 if (IS_ERR(priv->base)) {
1747 ret = PTR_ERR(priv->base);
1751 dev->irq = priv->irq = irq;
1752 priv->irq_rx = irq_rx;
1753 priv->irq_tx = irq_tx;
1755 priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1756 if (IS_ERR(priv->mac_clk)) {
1757 ret = PTR_ERR(priv->mac_clk);
1760 ret = clk_prepare_enable(priv->mac_clk);
1764 /* initialize default and fetch platform data */
1765 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1766 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1768 pd = dev_get_platdata(&pdev->dev);
1770 eth_hw_addr_set(dev, pd->mac_addr);
1771 priv->has_phy = pd->has_phy;
1772 priv->phy_id = pd->phy_id;
1773 priv->has_phy_interrupt = pd->has_phy_interrupt;
1774 priv->phy_interrupt = pd->phy_interrupt;
1775 priv->use_external_mii = !pd->use_internal_phy;
1776 priv->pause_auto = pd->pause_auto;
1777 priv->pause_rx = pd->pause_rx;
1778 priv->pause_tx = pd->pause_tx;
1779 priv->force_duplex_full = pd->force_duplex_full;
1780 priv->force_speed_100 = pd->force_speed_100;
1781 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1782 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1783 priv->dma_chan_width = pd->dma_chan_width;
1784 priv->dma_has_sram = pd->dma_has_sram;
1785 priv->dma_desc_shift = pd->dma_desc_shift;
1786 priv->rx_chan = pd->rx_chan;
1787 priv->tx_chan = pd->tx_chan;
1790 if (priv->has_phy && !priv->use_external_mii) {
1791 /* using internal PHY, enable clock */
1792 priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1793 if (IS_ERR(priv->phy_clk)) {
1794 ret = PTR_ERR(priv->phy_clk);
1795 priv->phy_clk = NULL;
1796 goto out_disable_clk_mac;
1798 ret = clk_prepare_enable(priv->phy_clk);
1800 goto out_disable_clk_mac;
1803 /* do minimal hardware init to be able to probe mii bus */
1804 bcm_enet_hw_preinit(priv);
1806 /* MII bus registration */
1807 if (priv->has_phy) {
1809 priv->mii_bus = mdiobus_alloc();
1810 if (!priv->mii_bus) {
1815 bus = priv->mii_bus;
1816 bus->name = "bcm63xx_enet MII bus";
1817 bus->parent = &pdev->dev;
1819 bus->read = bcm_enet_mdio_read_phylib;
1820 bus->write = bcm_enet_mdio_write_phylib;
1821 sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1823 /* only probe bus where we think the PHY is, because
1824 * the mdio read operation return 0 instead of 0xffff
1825 * if a slave is not present on hw */
1826 bus->phy_mask = ~(1 << priv->phy_id);
1828 if (priv->has_phy_interrupt)
1829 bus->irq[priv->phy_id] = priv->phy_interrupt;
1831 ret = mdiobus_register(bus);
1833 dev_err(&pdev->dev, "unable to register mdio bus\n");
1838 /* run platform code to initialize PHY device */
1839 if (pd && pd->mii_config &&
1840 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1841 bcm_enet_mdio_write_mii)) {
1842 dev_err(&pdev->dev, "unable to configure mdio bus\n");
1847 spin_lock_init(&priv->rx_lock);
1849 /* init rx timeout (used for oom) */
1850 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1852 /* init the mib update lock&work */
1853 mutex_init(&priv->mib_update_lock);
1854 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1856 /* zero mib counters */
1857 for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1858 enet_writel(priv, 0, ENET_MIB_REG(i));
1860 /* register netdevice */
1861 dev->netdev_ops = &bcm_enet_ops;
1862 netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
1864 dev->ethtool_ops = &bcm_enet_ethtool_ops;
1865 /* MTU range: 46 - 2028 */
1866 dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1867 dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1868 SET_NETDEV_DEV(dev, &pdev->dev);
1870 ret = register_netdev(dev);
1872 goto out_unregister_mdio;
1874 netif_carrier_off(dev);
1875 platform_set_drvdata(pdev, dev);
1877 priv->net_dev = dev;
1881 out_unregister_mdio:
1883 mdiobus_unregister(priv->mii_bus);
1887 mdiobus_free(priv->mii_bus);
1890 /* turn off mdc clock */
1891 enet_writel(priv, 0, ENET_MIISC_REG);
1892 clk_disable_unprepare(priv->phy_clk);
1894 out_disable_clk_mac:
1895 clk_disable_unprepare(priv->mac_clk);
1903 * exit func, stops hardware and unregisters netdevice
1905 static void bcm_enet_remove(struct platform_device *pdev)
1907 struct bcm_enet_priv *priv;
1908 struct net_device *dev;
1910 /* stop netdevice */
1911 dev = platform_get_drvdata(pdev);
1912 priv = netdev_priv(dev);
1913 unregister_netdev(dev);
1915 /* turn off mdc clock */
1916 enet_writel(priv, 0, ENET_MIISC_REG);
1918 if (priv->has_phy) {
1919 mdiobus_unregister(priv->mii_bus);
1920 mdiobus_free(priv->mii_bus);
1922 struct bcm63xx_enet_platform_data *pd;
1924 pd = dev_get_platdata(&pdev->dev);
1925 if (pd && pd->mii_config)
1926 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1927 bcm_enet_mdio_write_mii);
1930 /* disable hw block clocks */
1931 clk_disable_unprepare(priv->phy_clk);
1932 clk_disable_unprepare(priv->mac_clk);
1937 static struct platform_driver bcm63xx_enet_driver = {
1938 .probe = bcm_enet_probe,
1939 .remove = bcm_enet_remove,
1941 .name = "bcm63xx_enet",
1946 * switch mii access callbacks
1948 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1949 int ext, int phy_id, int location)
1954 spin_lock_bh(&priv->enetsw_mdio_lock);
1955 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1957 reg = ENETSW_MDIOC_RD_MASK |
1958 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1959 (location << ENETSW_MDIOC_REG_SHIFT);
1962 reg |= ENETSW_MDIOC_EXT_MASK;
1964 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1966 ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1967 spin_unlock_bh(&priv->enetsw_mdio_lock);
1971 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1972 int ext, int phy_id, int location,
1977 spin_lock_bh(&priv->enetsw_mdio_lock);
1978 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1980 reg = ENETSW_MDIOC_WR_MASK |
1981 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1982 (location << ENETSW_MDIOC_REG_SHIFT);
1985 reg |= ENETSW_MDIOC_EXT_MASK;
1989 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1991 spin_unlock_bh(&priv->enetsw_mdio_lock);
1994 static inline int bcm_enet_port_is_rgmii(int portid)
1996 return portid >= ENETSW_RGMII_PORT0;
2000 * enet sw PHY polling
2002 static void swphy_poll_timer(struct timer_list *t)
2004 struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
2007 for (i = 0; i < priv->num_ports; i++) {
2008 struct bcm63xx_enetsw_port *port;
2009 int val, j, up, advertise, lpa, speed, duplex, media;
2010 int external_phy = bcm_enet_port_is_rgmii(i);
2013 port = &priv->used_ports[i];
2017 if (port->bypass_link)
2020 /* dummy read to clear */
2021 for (j = 0; j < 2; j++)
2022 val = bcmenet_sw_mdio_read(priv, external_phy,
2023 port->phy_id, MII_BMSR);
2028 up = (val & BMSR_LSTATUS) ? 1 : 0;
2029 if (!(up ^ priv->sw_port_link[i]))
2032 priv->sw_port_link[i] = up;
2036 dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2038 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2039 ENETSW_PORTOV_REG(i));
2040 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2041 ENETSW_PTCTRL_TXDIS_MASK,
2042 ENETSW_PTCTRL_REG(i));
2046 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2047 port->phy_id, MII_ADVERTISE);
2049 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2052 /* figure out media and duplex from advertise and LPA values */
2053 media = mii_nway_result(lpa & advertise);
2054 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2056 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2061 if (val & BMSR_ESTATEN) {
2062 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2063 port->phy_id, MII_CTRL1000);
2065 lpa = bcmenet_sw_mdio_read(priv, external_phy,
2066 port->phy_id, MII_STAT1000);
2068 if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2069 && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2071 duplex = (lpa & LPA_1000FULL);
2075 dev_info(&priv->pdev->dev,
2076 "link UP on %s, %dMbps, %s-duplex\n",
2077 port->name, speed, duplex ? "full" : "half");
2079 override = ENETSW_PORTOV_ENABLE_MASK |
2080 ENETSW_PORTOV_LINKUP_MASK;
2083 override |= ENETSW_IMPOV_1000_MASK;
2084 else if (speed == 100)
2085 override |= ENETSW_IMPOV_100_MASK;
2087 override |= ENETSW_IMPOV_FDX_MASK;
2089 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2090 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2093 priv->swphy_poll.expires = jiffies + HZ;
2094 add_timer(&priv->swphy_poll);
2098 * open callback, allocate dma rings & buffers and start rx operation
2100 static int bcm_enetsw_open(struct net_device *dev)
2102 struct bcm_enet_priv *priv;
2103 struct device *kdev;
2109 priv = netdev_priv(dev);
2110 kdev = &priv->pdev->dev;
2112 /* mask all interrupts and request them */
2113 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2114 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2116 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2121 if (priv->irq_tx != -1) {
2122 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2125 goto out_freeirq_rx;
2128 /* allocate rx dma ring */
2129 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2130 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2132 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2134 goto out_freeirq_tx;
2137 priv->rx_desc_alloc_size = size;
2138 priv->rx_desc_cpu = p;
2140 /* allocate tx dma ring */
2141 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2142 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2144 dev_err(kdev, "cannot allocate tx ring\n");
2146 goto out_free_rx_ring;
2149 priv->tx_desc_alloc_size = size;
2150 priv->tx_desc_cpu = p;
2152 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2154 if (!priv->tx_skb) {
2155 dev_err(kdev, "cannot allocate tx skb queue\n");
2157 goto out_free_tx_ring;
2160 priv->tx_desc_count = priv->tx_ring_size;
2161 priv->tx_dirty_desc = 0;
2162 priv->tx_curr_desc = 0;
2163 spin_lock_init(&priv->tx_lock);
2165 /* init & fill rx ring with buffers */
2166 priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
2168 if (!priv->rx_buf) {
2169 dev_err(kdev, "cannot allocate rx buffer queue\n");
2171 goto out_free_tx_skb;
2174 priv->rx_desc_count = 0;
2175 priv->rx_dirty_desc = 0;
2176 priv->rx_curr_desc = 0;
2178 /* disable all ports */
2179 for (i = 0; i < priv->num_ports; i++) {
2180 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2181 ENETSW_PORTOV_REG(i));
2182 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2183 ENETSW_PTCTRL_TXDIS_MASK,
2184 ENETSW_PTCTRL_REG(i));
2186 priv->sw_port_link[i] = 0;
2190 val = enetsw_readb(priv, ENETSW_GMCR_REG);
2191 val |= ENETSW_GMCR_RST_MIB_MASK;
2192 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2194 val &= ~ENETSW_GMCR_RST_MIB_MASK;
2195 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2198 /* force CPU port state */
2199 val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2200 val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2201 enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2203 /* enable switch forward engine */
2204 val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2205 val |= ENETSW_SWMODE_FWD_EN_MASK;
2206 enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2208 /* enable jumbo on all ports */
2209 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2210 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2212 /* initialize flow control buffer allocation */
2213 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2214 ENETDMA_BUFALLOC_REG(priv->rx_chan));
2216 if (bcm_enet_refill_rx(dev, false)) {
2217 dev_err(kdev, "cannot allocate rx buffer queue\n");
2222 /* write rx & tx ring addresses */
2223 enet_dmas_writel(priv, priv->rx_desc_dma,
2224 ENETDMAS_RSTART_REG, priv->rx_chan);
2225 enet_dmas_writel(priv, priv->tx_desc_dma,
2226 ENETDMAS_RSTART_REG, priv->tx_chan);
2228 /* clear remaining state ram for rx & tx channel */
2229 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2230 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2231 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2232 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2233 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2234 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2236 /* set dma maximum burst len */
2237 enet_dmac_writel(priv, priv->dma_maxburst,
2238 ENETDMAC_MAXBURST, priv->rx_chan);
2239 enet_dmac_writel(priv, priv->dma_maxburst,
2240 ENETDMAC_MAXBURST, priv->tx_chan);
2242 /* set flow control low/high threshold to 1/3 / 2/3 */
2243 val = priv->rx_ring_size / 3;
2244 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2245 val = (priv->rx_ring_size * 2) / 3;
2246 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2248 /* all set, enable mac and interrupts, start dma engine and
2249 * kick rx dma channel
2252 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2253 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2254 ENETDMAC_CHANCFG, priv->rx_chan);
2256 /* watch "packet transferred" interrupt in rx and tx */
2257 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2258 ENETDMAC_IR, priv->rx_chan);
2259 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2260 ENETDMAC_IR, priv->tx_chan);
2262 /* make sure we enable napi before rx interrupt */
2263 napi_enable(&priv->napi);
2265 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2266 ENETDMAC_IRMASK, priv->rx_chan);
2267 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2268 ENETDMAC_IRMASK, priv->tx_chan);
2270 netif_carrier_on(dev);
2271 netif_start_queue(dev);
2273 /* apply override config for bypass_link ports here. */
2274 for (i = 0; i < priv->num_ports; i++) {
2275 struct bcm63xx_enetsw_port *port;
2277 port = &priv->used_ports[i];
2281 if (!port->bypass_link)
2284 override = ENETSW_PORTOV_ENABLE_MASK |
2285 ENETSW_PORTOV_LINKUP_MASK;
2287 switch (port->force_speed) {
2289 override |= ENETSW_IMPOV_1000_MASK;
2292 override |= ENETSW_IMPOV_100_MASK;
2297 pr_warn("invalid forced speed on port %s: assume 10\n",
2302 if (port->force_duplex_full)
2303 override |= ENETSW_IMPOV_FDX_MASK;
2306 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2307 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2310 /* start phy polling timer */
2311 timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2312 mod_timer(&priv->swphy_poll, jiffies);
2316 bcm_enet_free_rx_buf_ring(kdev, priv);
2319 kfree(priv->tx_skb);
2322 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2323 priv->tx_desc_cpu, priv->tx_desc_dma);
2326 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2327 priv->rx_desc_cpu, priv->rx_desc_dma);
2330 if (priv->irq_tx != -1)
2331 free_irq(priv->irq_tx, dev);
2334 free_irq(priv->irq_rx, dev);
2341 static int bcm_enetsw_stop(struct net_device *dev)
2343 struct bcm_enet_priv *priv;
2344 struct device *kdev;
2346 priv = netdev_priv(dev);
2347 kdev = &priv->pdev->dev;
2349 del_timer_sync(&priv->swphy_poll);
2350 netif_stop_queue(dev);
2351 napi_disable(&priv->napi);
2352 del_timer_sync(&priv->rx_timeout);
2354 /* mask all interrupts */
2355 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2356 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2358 /* disable dma & mac */
2359 bcm_enet_disable_dma(priv, priv->tx_chan);
2360 bcm_enet_disable_dma(priv, priv->rx_chan);
2362 /* force reclaim of all tx buffers */
2363 bcm_enet_tx_reclaim(dev, 1, 0);
2365 /* free the rx buffer ring */
2366 bcm_enet_free_rx_buf_ring(kdev, priv);
2368 /* free remaining allocated memory */
2369 kfree(priv->tx_skb);
2370 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2371 priv->rx_desc_cpu, priv->rx_desc_dma);
2372 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2373 priv->tx_desc_cpu, priv->tx_desc_dma);
2374 if (priv->irq_tx != -1)
2375 free_irq(priv->irq_tx, dev);
2376 free_irq(priv->irq_rx, dev);
2378 /* reset BQL after forced tx reclaim to prevent kernel panic */
2379 netdev_reset_queue(dev);
2384 /* try to sort out phy external status by walking the used_port field
2385 * in the bcm_enet_priv structure. in case the phy address is not
2386 * assigned to any physical port on the switch, assume it is external
2387 * (and yell at the user).
2389 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2393 for (i = 0; i < priv->num_ports; ++i) {
2394 if (!priv->used_ports[i].used)
2396 if (priv->used_ports[i].phy_id == phy_id)
2397 return bcm_enet_port_is_rgmii(i);
2400 printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2405 /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2406 * external/internal status of the given phy_id first.
2408 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2411 struct bcm_enet_priv *priv;
2413 priv = netdev_priv(dev);
2414 return bcmenet_sw_mdio_read(priv,
2415 bcm_enetsw_phy_is_external(priv, phy_id),
2419 /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2420 * external/internal status of the given phy_id first.
2422 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2426 struct bcm_enet_priv *priv;
2428 priv = netdev_priv(dev);
2429 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2430 phy_id, location, val);
2433 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2435 struct mii_if_info mii;
2438 mii.mdio_read = bcm_enetsw_mii_mdio_read;
2439 mii.mdio_write = bcm_enetsw_mii_mdio_write;
2441 mii.phy_id_mask = 0x3f;
2442 mii.reg_num_mask = 0x1f;
2443 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2447 static const struct net_device_ops bcm_enetsw_ops = {
2448 .ndo_open = bcm_enetsw_open,
2449 .ndo_stop = bcm_enetsw_stop,
2450 .ndo_start_xmit = bcm_enet_start_xmit,
2451 .ndo_change_mtu = bcm_enet_change_mtu,
2452 .ndo_eth_ioctl = bcm_enetsw_ioctl,
2456 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2457 { "rx_packets", DEV_STAT(rx_packets), -1 },
2458 { "tx_packets", DEV_STAT(tx_packets), -1 },
2459 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2460 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2461 { "rx_errors", DEV_STAT(rx_errors), -1 },
2462 { "tx_errors", DEV_STAT(tx_errors), -1 },
2463 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2464 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2466 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2467 { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2468 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2469 { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2470 { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2471 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2472 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2473 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2474 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2475 { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2476 ETHSW_MIB_RX_1024_1522 },
2477 { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2478 ETHSW_MIB_RX_1523_2047 },
2479 { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2480 ETHSW_MIB_RX_2048_4095 },
2481 { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2482 ETHSW_MIB_RX_4096_8191 },
2483 { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2484 ETHSW_MIB_RX_8192_9728 },
2485 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2486 { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2487 { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2488 { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2489 { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2491 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2492 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2493 { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2494 { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2495 { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2496 { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2500 #define BCM_ENETSW_STATS_LEN \
2501 (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2503 static void bcm_enetsw_get_strings(struct net_device *netdev,
2504 u32 stringset, u8 *data)
2509 switch (stringset) {
2511 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2512 str = bcm_enetsw_gstrings_stats[i].stat_string;
2513 ethtool_puts(&data, str);
2519 static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2522 switch (string_set) {
2524 return BCM_ENETSW_STATS_LEN;
2530 static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2531 struct ethtool_drvinfo *drvinfo)
2533 strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
2534 strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
2537 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2538 struct ethtool_stats *stats,
2541 struct bcm_enet_priv *priv;
2544 priv = netdev_priv(netdev);
2546 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2547 const struct bcm_enet_stats *s;
2552 s = &bcm_enetsw_gstrings_stats[i];
2558 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2559 p = (char *)priv + s->stat_offset;
2561 if (s->sizeof_stat == sizeof(u64)) {
2562 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2563 *(u64 *)p = ((u64)hi << 32 | lo);
2569 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2570 const struct bcm_enet_stats *s;
2573 s = &bcm_enetsw_gstrings_stats[i];
2575 if (s->mib_reg == -1)
2576 p = (char *)&netdev->stats + s->stat_offset;
2578 p = (char *)priv + s->stat_offset;
2580 data[i] = (s->sizeof_stat == sizeof(u64)) ?
2581 *(u64 *)p : *(u32 *)p;
2586 bcm_enetsw_get_ringparam(struct net_device *dev,
2587 struct ethtool_ringparam *ering,
2588 struct kernel_ethtool_ringparam *kernel_ering,
2589 struct netlink_ext_ack *extack)
2591 struct bcm_enet_priv *priv;
2593 priv = netdev_priv(dev);
2595 /* rx/tx ring is actually only limited by memory */
2596 ering->rx_max_pending = 8192;
2597 ering->tx_max_pending = 8192;
2598 ering->rx_mini_max_pending = 0;
2599 ering->rx_jumbo_max_pending = 0;
2600 ering->rx_pending = priv->rx_ring_size;
2601 ering->tx_pending = priv->tx_ring_size;
2605 bcm_enetsw_set_ringparam(struct net_device *dev,
2606 struct ethtool_ringparam *ering,
2607 struct kernel_ethtool_ringparam *kernel_ering,
2608 struct netlink_ext_ack *extack)
2610 struct bcm_enet_priv *priv;
2613 priv = netdev_priv(dev);
2616 if (netif_running(dev)) {
2617 bcm_enetsw_stop(dev);
2621 priv->rx_ring_size = ering->rx_pending;
2622 priv->tx_ring_size = ering->tx_pending;
2627 err = bcm_enetsw_open(dev);
2634 static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2635 .get_strings = bcm_enetsw_get_strings,
2636 .get_sset_count = bcm_enetsw_get_sset_count,
2637 .get_ethtool_stats = bcm_enetsw_get_ethtool_stats,
2638 .get_drvinfo = bcm_enetsw_get_drvinfo,
2639 .get_ringparam = bcm_enetsw_get_ringparam,
2640 .set_ringparam = bcm_enetsw_set_ringparam,
2643 /* allocate netdevice, request register memory and register device. */
2644 static int bcm_enetsw_probe(struct platform_device *pdev)
2646 struct bcm_enet_priv *priv;
2647 struct net_device *dev;
2648 struct bcm63xx_enetsw_platform_data *pd;
2649 struct resource *res_mem;
2650 int ret, irq_rx, irq_tx;
2652 if (!bcm_enet_shared_base[0])
2653 return -EPROBE_DEFER;
2655 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2656 irq_rx = platform_get_irq(pdev, 0);
2657 irq_tx = platform_get_irq(pdev, 1);
2658 if (!res_mem || irq_rx < 0)
2661 dev = alloc_etherdev(sizeof(*priv));
2664 priv = netdev_priv(dev);
2666 /* initialize default and fetch platform data */
2667 priv->enet_is_sw = true;
2668 priv->irq_rx = irq_rx;
2669 priv->irq_tx = irq_tx;
2670 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2671 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2672 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2673 priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
2675 pd = dev_get_platdata(&pdev->dev);
2677 eth_hw_addr_set(dev, pd->mac_addr);
2678 memcpy(priv->used_ports, pd->used_ports,
2679 sizeof(pd->used_ports));
2680 priv->num_ports = pd->num_ports;
2681 priv->dma_has_sram = pd->dma_has_sram;
2682 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2683 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2684 priv->dma_chan_width = pd->dma_chan_width;
2687 ret = bcm_enet_change_mtu(dev, dev->mtu);
2691 priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2692 if (IS_ERR(priv->base)) {
2693 ret = PTR_ERR(priv->base);
2697 priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2698 if (IS_ERR(priv->mac_clk)) {
2699 ret = PTR_ERR(priv->mac_clk);
2702 ret = clk_prepare_enable(priv->mac_clk);
2708 spin_lock_init(&priv->rx_lock);
2710 /* init rx timeout (used for oom) */
2711 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2713 /* register netdevice */
2714 dev->netdev_ops = &bcm_enetsw_ops;
2715 netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
2716 dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2717 SET_NETDEV_DEV(dev, &pdev->dev);
2719 spin_lock_init(&priv->enetsw_mdio_lock);
2721 ret = register_netdev(dev);
2723 goto out_disable_clk;
2725 netif_carrier_off(dev);
2726 platform_set_drvdata(pdev, dev);
2728 priv->net_dev = dev;
2733 clk_disable_unprepare(priv->mac_clk);
2740 /* exit func, stops hardware and unregisters netdevice */
2741 static void bcm_enetsw_remove(struct platform_device *pdev)
2743 struct bcm_enet_priv *priv;
2744 struct net_device *dev;
2746 /* stop netdevice */
2747 dev = platform_get_drvdata(pdev);
2748 priv = netdev_priv(dev);
2749 unregister_netdev(dev);
2751 clk_disable_unprepare(priv->mac_clk);
2756 static struct platform_driver bcm63xx_enetsw_driver = {
2757 .probe = bcm_enetsw_probe,
2758 .remove = bcm_enetsw_remove,
2760 .name = "bcm63xx_enetsw",
2764 /* reserve & remap memory space shared between all macs */
2765 static int bcm_enet_shared_probe(struct platform_device *pdev)
2770 memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2772 for (i = 0; i < 3; i++) {
2773 p[i] = devm_platform_ioremap_resource(pdev, i);
2775 return PTR_ERR(p[i]);
2778 memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2783 /* this "shared" driver is needed because both macs share a single
2786 struct platform_driver bcm63xx_enet_shared_driver = {
2787 .probe = bcm_enet_shared_probe,
2789 .name = "bcm63xx_enet_shared",
2793 static struct platform_driver * const drivers[] = {
2794 &bcm63xx_enet_shared_driver,
2795 &bcm63xx_enet_driver,
2796 &bcm63xx_enetsw_driver,
2800 static int __init bcm_enet_init(void)
2802 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2805 static void __exit bcm_enet_exit(void)
2807 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2811 module_init(bcm_enet_init);
2812 module_exit(bcm_enet_exit);
2814 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2816 MODULE_LICENSE("GPL");