1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Xilinx TEMAC Ethernet device
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
9 * This is a driver for the Xilinx ll_temac ipcore which is often used
10 * in the Virtex and Spartan series of chips.
13 * - The ll_temac hardware uses indirect access for many of the TEMAC
14 * registers, include the MDIO bus. However, indirect access to MDIO
15 * registers take considerably more clock cycles than to TEMAC registers.
16 * MDIO accesses are long, so threads doing them should probably sleep
17 * rather than busywait. However, since only one indirect access can be
18 * in progress at any given time, that means that *all* indirect accesses
19 * could end up sleeping (to wait for an MDIO access to complete).
20 * Fortunately none of the indirect accesses are on the 'hot' path for tx
21 * or rx, so this should be okay.
24 * - Factor out locallink DMA code into separate driver
25 * - Fix support for hardware checksumming.
26 * - Testing. Lots and lots of testing.
30 #include <linux/delay.h>
31 #include <linux/etherdevice.h>
32 #include <linux/mii.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_ether.h>
38 #include <linux/of_device.h>
39 #include <linux/of_irq.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_address.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h> /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/workqueue.h>
55 #include <linux/dma-mapping.h>
56 #include <linux/processor.h>
57 #include <linux/platform_data/xilinx-ll-temac.h>
61 /* Descriptors defines for Tx and Rx DMA */
62 #define TX_BD_NUM_DEFAULT 64
63 #define RX_BD_NUM_DEFAULT 1024
64 #define TX_BD_NUM_MAX 4096
65 #define RX_BD_NUM_MAX 4096
67 /* ---------------------------------------------------------------------
68 * Low level register access functions
71 static u32 _temac_ior_be(struct temac_local *lp, int offset)
73 return ioread32be(lp->regs + offset);
76 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
78 return iowrite32be(value, lp->regs + offset);
81 static u32 _temac_ior_le(struct temac_local *lp, int offset)
83 return ioread32(lp->regs + offset);
86 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
88 return iowrite32(value, lp->regs + offset);
91 static bool hard_acs_rdy(struct temac_local *lp)
93 return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
96 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
98 ktime_t cur = ktime_get();
100 return hard_acs_rdy(lp) || ktime_after(cur, timeout);
103 /* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz
104 * that was used before, and should cover MDIO bus speed down to 3200
107 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
110 * temac_indirect_busywait - Wait for current indirect register access
113 int temac_indirect_busywait(struct temac_local *lp)
115 ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
117 spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
118 if (WARN_ON(!hard_acs_rdy(lp)))
125 * temac_indirect_in32 - Indirect register read access. This function
126 * must be called without lp->indirect_lock being held.
128 u32 temac_indirect_in32(struct temac_local *lp, int reg)
133 spin_lock_irqsave(lp->indirect_lock, flags);
134 val = temac_indirect_in32_locked(lp, reg);
135 spin_unlock_irqrestore(lp->indirect_lock, flags);
140 * temac_indirect_in32_locked - Indirect register read access. This
141 * function must be called with lp->indirect_lock being held. Use
142 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
143 * repeated lock/unlock and to ensure uninterrupted access to indirect
146 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
148 /* This initial wait should normally not spin, as we always
149 * try to wait for indirect access to complete before
150 * releasing the indirect_lock.
152 if (WARN_ON(temac_indirect_busywait(lp)))
154 /* Initiate read from indirect register */
155 temac_iow(lp, XTE_CTL0_OFFSET, reg);
156 /* Wait for indirect register access to complete. We really
157 * should not see timeouts, and could even end up causing
158 * problem for following indirect access, so let's make a bit
161 if (WARN_ON(temac_indirect_busywait(lp)))
163 /* Value is ready now */
164 return temac_ior(lp, XTE_LSW0_OFFSET);
168 * temac_indirect_out32 - Indirect register write access. This function
169 * must be called without lp->indirect_lock being held.
171 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
175 spin_lock_irqsave(lp->indirect_lock, flags);
176 temac_indirect_out32_locked(lp, reg, value);
177 spin_unlock_irqrestore(lp->indirect_lock, flags);
181 * temac_indirect_out32_locked - Indirect register write access. This
182 * function must be called with lp->indirect_lock being held. Use
183 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
184 * repeated lock/unlock and to ensure uninterrupted access to indirect
187 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
189 /* As in temac_indirect_in32_locked(), we should normally not
190 * spin here. And if it happens, we actually end up silently
191 * ignoring the write request. Ouch.
193 if (WARN_ON(temac_indirect_busywait(lp)))
195 /* Initiate write to indirect register */
196 temac_iow(lp, XTE_LSW0_OFFSET, value);
197 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
198 /* As in temac_indirect_in32_locked(), we should not see timeouts
199 * here. And if it happens, we continue before the write has
200 * completed. Not good.
202 WARN_ON(temac_indirect_busywait(lp));
206 * temac_dma_in32_* - Memory mapped DMA read, these function expects a
207 * register input that is based on DCR word addresses which are then
208 * converted to memory mapped byte addresses. To be assigned to
211 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
213 return ioread32be(lp->sdma_regs + (reg << 2));
216 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
218 return ioread32(lp->sdma_regs + (reg << 2));
222 * temac_dma_out32_* - Memory mapped DMA read, these function expects
223 * a register input that is based on DCR word addresses which are then
224 * converted to memory mapped byte addresses. To be assigned to
227 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
229 iowrite32be(value, lp->sdma_regs + (reg << 2));
232 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
234 iowrite32(value, lp->sdma_regs + (reg << 2));
237 /* DMA register access functions can be DCR based or memory mapped.
238 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
241 #ifdef CONFIG_PPC_DCR
244 * temac_dma_dcr_in32 - DCR based DMA read
246 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
248 return dcr_read(lp->sdma_dcrs, reg);
252 * temac_dma_dcr_out32 - DCR based DMA write
254 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
256 dcr_write(lp->sdma_dcrs, reg, value);
260 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
263 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
264 struct device_node *np)
268 /* setup the dcr address mapping if it's in the device tree */
270 dcrs = dcr_resource_start(np, 0);
272 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
273 lp->dma_in = temac_dma_dcr_in;
274 lp->dma_out = temac_dma_dcr_out;
275 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
278 /* no DCR in the device tree, indicate a failure */
285 * temac_dcr_setup - This is a stub for when DCR is not supported,
286 * such as with MicroBlaze and x86
288 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
289 struct device_node *np)
297 * temac_dma_bd_release - Release buffer descriptor rings
299 static void temac_dma_bd_release(struct net_device *ndev)
301 struct temac_local *lp = netdev_priv(ndev);
304 /* Reset Local Link (DMA) */
305 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
307 for (i = 0; i < lp->rx_bd_num; i++) {
311 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
312 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
313 dev_kfree_skb(lp->rx_skb[i]);
317 dma_free_coherent(ndev->dev.parent,
318 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
319 lp->rx_bd_v, lp->rx_bd_p);
321 dma_free_coherent(ndev->dev.parent,
322 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
323 lp->tx_bd_v, lp->tx_bd_p);
327 * temac_dma_bd_init - Setup buffer descriptor rings
329 static int temac_dma_bd_init(struct net_device *ndev)
331 struct temac_local *lp = netdev_priv(ndev);
333 dma_addr_t skb_dma_addr;
336 lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
337 sizeof(*lp->rx_skb), GFP_KERNEL);
341 /* allocate the tx and rx ring buffer descriptors. */
342 /* returns a virtual address and a physical address. */
343 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
344 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
345 &lp->tx_bd_p, GFP_KERNEL);
349 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
350 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
351 &lp->rx_bd_p, GFP_KERNEL);
355 for (i = 0; i < lp->tx_bd_num; i++) {
356 lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
357 + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
360 for (i = 0; i < lp->rx_bd_num; i++) {
361 lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
362 + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
364 skb = netdev_alloc_skb_ip_align(ndev,
365 XTE_MAX_JUMBO_FRAME_SIZE);
370 /* returns physical address of skb->data */
371 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
372 XTE_MAX_JUMBO_FRAME_SIZE,
374 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
376 lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
377 lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
378 lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
381 /* Configure DMA channel (irq setup) */
382 lp->dma_out(lp, TX_CHNL_CTRL,
383 lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
384 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
385 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
386 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
387 lp->dma_out(lp, RX_CHNL_CTRL,
388 lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
390 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
391 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
393 /* Init descriptor indexes */
397 lp->rx_bd_tail = lp->rx_bd_num - 1;
399 /* Enable RX DMA transfers */
401 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
402 lp->dma_out(lp, RX_TAILDESC_PTR,
403 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
405 /* Prepare for TX DMA transfer */
406 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
411 temac_dma_bd_release(ndev);
415 /* ---------------------------------------------------------------------
419 static void temac_do_set_mac_address(struct net_device *ndev)
421 struct temac_local *lp = netdev_priv(ndev);
424 /* set up unicast MAC address filter set its mac address */
425 spin_lock_irqsave(lp->indirect_lock, flags);
426 temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
427 (ndev->dev_addr[0]) |
428 (ndev->dev_addr[1] << 8) |
429 (ndev->dev_addr[2] << 16) |
430 (ndev->dev_addr[3] << 24));
431 /* There are reserved bits in EUAW1
432 * so don't affect them Set MAC bits [47:32] in EUAW1 */
433 temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
434 (ndev->dev_addr[4] & 0x000000ff) |
435 (ndev->dev_addr[5] << 8));
436 spin_unlock_irqrestore(lp->indirect_lock, flags);
439 static int temac_init_mac_address(struct net_device *ndev, const void *address)
441 eth_hw_addr_set(ndev, address);
442 if (!is_valid_ether_addr(ndev->dev_addr))
443 eth_hw_addr_random(ndev);
444 temac_do_set_mac_address(ndev);
448 static int temac_set_mac_address(struct net_device *ndev, void *p)
450 struct sockaddr *addr = p;
452 if (!is_valid_ether_addr(addr->sa_data))
453 return -EADDRNOTAVAIL;
454 eth_hw_addr_set(ndev, addr->sa_data);
455 temac_do_set_mac_address(ndev);
459 static void temac_set_multicast_list(struct net_device *ndev)
461 struct temac_local *lp = netdev_priv(ndev);
462 u32 multi_addr_msw, multi_addr_lsw;
465 bool promisc_mode_disabled = false;
467 if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
468 (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
469 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
470 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
474 spin_lock_irqsave(lp->indirect_lock, flags);
476 if (!netdev_mc_empty(ndev)) {
477 struct netdev_hw_addr *ha;
479 netdev_for_each_mc_addr(ha, ndev) {
480 if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
482 multi_addr_msw = ((ha->addr[3] << 24) |
483 (ha->addr[2] << 16) |
486 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
488 multi_addr_lsw = ((ha->addr[5] << 8) |
489 (ha->addr[4]) | (i << 16));
490 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
496 /* Clear all or remaining/unused address table entries */
497 while (i < MULTICAST_CAM_TABLE_NUM) {
498 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
499 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
503 /* Enable address filter block if currently disabled */
504 if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
505 & XTE_AFM_EPPRM_MASK) {
506 temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
507 promisc_mode_disabled = true;
510 spin_unlock_irqrestore(lp->indirect_lock, flags);
512 if (promisc_mode_disabled)
513 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
516 static struct temac_option {
522 } temac_options[] = {
523 /* Turn on jumbo packet support for both Rx and Tx */
525 .opt = XTE_OPTION_JUMBO,
526 .reg = XTE_TXC_OFFSET,
527 .m_or = XTE_TXC_TXJMBO_MASK,
530 .opt = XTE_OPTION_JUMBO,
531 .reg = XTE_RXC1_OFFSET,
532 .m_or =XTE_RXC1_RXJMBO_MASK,
534 /* Turn on VLAN packet support for both Rx and Tx */
536 .opt = XTE_OPTION_VLAN,
537 .reg = XTE_TXC_OFFSET,
538 .m_or =XTE_TXC_TXVLAN_MASK,
541 .opt = XTE_OPTION_VLAN,
542 .reg = XTE_RXC1_OFFSET,
543 .m_or =XTE_RXC1_RXVLAN_MASK,
545 /* Turn on FCS stripping on receive packets */
547 .opt = XTE_OPTION_FCS_STRIP,
548 .reg = XTE_RXC1_OFFSET,
549 .m_or =XTE_RXC1_RXFCS_MASK,
551 /* Turn on FCS insertion on transmit packets */
553 .opt = XTE_OPTION_FCS_INSERT,
554 .reg = XTE_TXC_OFFSET,
555 .m_or =XTE_TXC_TXFCS_MASK,
557 /* Turn on length/type field checking on receive packets */
559 .opt = XTE_OPTION_LENTYPE_ERR,
560 .reg = XTE_RXC1_OFFSET,
561 .m_or =XTE_RXC1_RXLT_MASK,
563 /* Turn on flow control */
565 .opt = XTE_OPTION_FLOW_CONTROL,
566 .reg = XTE_FCC_OFFSET,
567 .m_or =XTE_FCC_RXFLO_MASK,
569 /* Turn on flow control */
571 .opt = XTE_OPTION_FLOW_CONTROL,
572 .reg = XTE_FCC_OFFSET,
573 .m_or =XTE_FCC_TXFLO_MASK,
575 /* Turn on promiscuous frame filtering (all frames are received ) */
577 .opt = XTE_OPTION_PROMISC,
578 .reg = XTE_AFM_OFFSET,
579 .m_or =XTE_AFM_EPPRM_MASK,
581 /* Enable transmitter if not already enabled */
583 .opt = XTE_OPTION_TXEN,
584 .reg = XTE_TXC_OFFSET,
585 .m_or =XTE_TXC_TXEN_MASK,
587 /* Enable receiver? */
589 .opt = XTE_OPTION_RXEN,
590 .reg = XTE_RXC1_OFFSET,
591 .m_or =XTE_RXC1_RXEN_MASK,
599 static u32 temac_setoptions(struct net_device *ndev, u32 options)
601 struct temac_local *lp = netdev_priv(ndev);
602 struct temac_option *tp = &temac_options[0];
606 spin_lock_irqsave(lp->indirect_lock, flags);
608 reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
609 if (options & tp->opt) {
611 temac_indirect_out32_locked(lp, tp->reg, reg);
615 spin_unlock_irqrestore(lp->indirect_lock, flags);
616 lp->options |= options;
621 /* Initialize temac */
622 static void temac_device_reset(struct net_device *ndev)
624 struct temac_local *lp = netdev_priv(ndev);
629 /* Perform a software reset */
631 /* 0x300 host enable bit ? */
632 /* reset PHY through control register ?:1 */
634 dev_dbg(&ndev->dev, "%s()\n", __func__);
636 /* Reset the receiver and wait for it to finish reset */
637 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
639 while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
641 if (--timeout == 0) {
643 "temac_device_reset RX reset timeout!!\n");
648 /* Reset the transmitter and wait for it to finish reset */
649 temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
651 while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
653 if (--timeout == 0) {
655 "temac_device_reset TX reset timeout!!\n");
660 /* Disable the receiver */
661 spin_lock_irqsave(lp->indirect_lock, flags);
662 val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
663 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
664 val & ~XTE_RXC1_RXEN_MASK);
665 spin_unlock_irqrestore(lp->indirect_lock, flags);
667 /* Reset Local Link (DMA) */
668 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
670 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
672 if (--timeout == 0) {
674 "temac_device_reset DMA reset timeout!!\n");
678 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
680 if (temac_dma_bd_init(ndev)) {
682 "temac_device_reset descriptor allocation failed\n");
685 spin_lock_irqsave(lp->indirect_lock, flags);
686 temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
687 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
688 temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
689 temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
690 spin_unlock_irqrestore(lp->indirect_lock, flags);
692 /* Sync default options with HW
693 * but leave receiver and transmitter disabled. */
694 temac_setoptions(ndev,
695 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
697 temac_do_set_mac_address(ndev);
699 /* Set address filter table */
700 temac_set_multicast_list(ndev);
701 if (temac_setoptions(ndev, lp->options))
702 dev_err(&ndev->dev, "Error setting TEMAC options\n");
704 /* Init Driver variable */
705 netif_trans_update(ndev); /* prevent tx timeout */
708 static void temac_adjust_link(struct net_device *ndev)
710 struct temac_local *lp = netdev_priv(ndev);
711 struct phy_device *phy = ndev->phydev;
716 /* hash together the state values to decide if something has changed */
717 link_state = phy->speed | (phy->duplex << 1) | phy->link;
719 if (lp->last_link != link_state) {
720 spin_lock_irqsave(lp->indirect_lock, flags);
721 mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
722 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
724 switch (phy->speed) {
725 case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
726 case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
727 case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
730 /* Write new speed setting out to TEMAC */
731 temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
732 spin_unlock_irqrestore(lp->indirect_lock, flags);
734 lp->last_link = link_state;
735 phy_print_status(phy);
741 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
743 bd->app3 = (u32)(((u64)p) >> 32);
744 bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
747 static void *ptr_from_txbd(struct cdmac_bd *bd)
749 return (void *)(((u64)(bd->app3) << 32) | bd->app4);
754 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
759 static void *ptr_from_txbd(struct cdmac_bd *bd)
761 return (void *)(bd->app4);
766 static void temac_start_xmit_done(struct net_device *ndev)
768 struct temac_local *lp = netdev_priv(ndev);
769 struct cdmac_bd *cur_p;
770 unsigned int stat = 0;
773 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
774 stat = be32_to_cpu(cur_p->app0);
776 while (stat & STS_CTRL_APP0_CMPLT) {
777 /* Make sure that the other fields are read after bd is
781 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
782 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
783 skb = (struct sk_buff *)ptr_from_txbd(cur_p);
785 dev_consume_skb_irq(skb);
791 ndev->stats.tx_packets++;
792 ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
794 /* app0 must be visible last, as it is used to flag
795 * availability of the bd
801 if (lp->tx_bd_ci >= lp->tx_bd_num)
804 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
805 stat = be32_to_cpu(cur_p->app0);
808 /* Matches barrier in temac_start_xmit */
811 netif_wake_queue(ndev);
814 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
816 struct cdmac_bd *cur_p;
819 tail = lp->tx_bd_tail;
820 cur_p = &lp->tx_bd_v[tail];
824 return NETDEV_TX_BUSY;
826 /* Make sure to read next bd app0 after this one */
830 if (tail >= lp->tx_bd_num)
833 cur_p = &lp->tx_bd_v[tail];
835 } while (num_frag >= 0);
841 temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
843 struct temac_local *lp = netdev_priv(ndev);
844 struct cdmac_bd *cur_p;
845 dma_addr_t tail_p, skb_dma_addr;
847 unsigned long num_frag;
850 num_frag = skb_shinfo(skb)->nr_frags;
851 frag = &skb_shinfo(skb)->frags[0];
852 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
854 if (temac_check_tx_bd_space(lp, num_frag + 1)) {
855 if (netif_queue_stopped(ndev))
856 return NETDEV_TX_BUSY;
858 netif_stop_queue(ndev);
860 /* Matches barrier in temac_start_xmit_done */
863 /* Space might have just been freed - check again */
864 if (temac_check_tx_bd_space(lp, num_frag + 1))
865 return NETDEV_TX_BUSY;
867 netif_wake_queue(ndev);
871 if (skb->ip_summed == CHECKSUM_PARTIAL) {
872 unsigned int csum_start_off = skb_checksum_start_offset(skb);
873 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
875 cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
876 cur_p->app1 = cpu_to_be32((csum_start_off << 16)
878 cur_p->app2 = 0; /* initial checksum seed */
881 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
882 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
883 skb_headlen(skb), DMA_TO_DEVICE);
884 cur_p->len = cpu_to_be32(skb_headlen(skb));
885 if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
886 dev_kfree_skb_any(skb);
887 ndev->stats.tx_dropped++;
890 cur_p->phys = cpu_to_be32(skb_dma_addr);
892 for (ii = 0; ii < num_frag; ii++) {
893 if (++lp->tx_bd_tail >= lp->tx_bd_num)
896 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
897 skb_dma_addr = dma_map_single(ndev->dev.parent,
898 skb_frag_address(frag),
901 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
902 if (--lp->tx_bd_tail < 0)
903 lp->tx_bd_tail = lp->tx_bd_num - 1;
904 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
907 dma_unmap_single(ndev->dev.parent,
908 be32_to_cpu(cur_p->phys),
911 if (--lp->tx_bd_tail < 0)
912 lp->tx_bd_tail = lp->tx_bd_num - 1;
913 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
915 dma_unmap_single(ndev->dev.parent,
916 be32_to_cpu(cur_p->phys),
917 skb_headlen(skb), DMA_TO_DEVICE);
918 dev_kfree_skb_any(skb);
919 ndev->stats.tx_dropped++;
922 cur_p->phys = cpu_to_be32(skb_dma_addr);
923 cur_p->len = cpu_to_be32(skb_frag_size(frag));
927 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
929 /* Mark last fragment with skb address, so it can be consumed
930 * in temac_start_xmit_done()
932 ptr_to_txbd((void *)skb, cur_p);
934 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
936 if (lp->tx_bd_tail >= lp->tx_bd_num)
939 skb_tx_timestamp(skb);
941 /* Kick off the transfer */
943 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
945 if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
946 netif_stop_queue(ndev);
951 static int ll_temac_recv_buffers_available(struct temac_local *lp)
955 if (!lp->rx_skb[lp->rx_bd_ci])
957 available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
959 available += lp->rx_bd_num;
963 static void ll_temac_recv(struct net_device *ndev)
965 struct temac_local *lp = netdev_priv(ndev);
968 bool update_tail = false;
970 spin_lock_irqsave(&lp->rx_lock, flags);
972 /* Process all received buffers, passing them on network
973 * stack. After this, the buffer descriptors will be in an
974 * un-allocated stage, where no skb is allocated for it, and
975 * they are therefore not available for TEMAC/DMA.
978 struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
979 struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
980 unsigned int bdstat = be32_to_cpu(bd->app0);
983 /* While this should not normally happen, we can end
984 * here when GFP_ATOMIC allocations fail, and we
985 * therefore have un-allocated buffers.
990 /* Loop over all completed buffer descriptors */
991 if (!(bdstat & STS_CTRL_APP0_CMPLT))
994 dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
995 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
996 /* The buffer is not valid for DMA anymore */
1000 length = be32_to_cpu(bd->app4) & 0x3FFF;
1001 skb_put(skb, length);
1002 skb->protocol = eth_type_trans(skb, ndev);
1003 skb_checksum_none_assert(skb);
1005 /* if we're doing rx csum offload, set it up */
1006 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
1007 (skb->protocol == htons(ETH_P_IP)) &&
1010 /* Convert from device endianness (be32) to cpu
1011 * endiannes, and if necessary swap the bytes
1012 * (back) for proper IP checksum byte order
1015 skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
1016 skb->ip_summed = CHECKSUM_COMPLETE;
1019 if (!skb_defer_rx_timestamp(skb))
1021 /* The skb buffer is now owned by network stack above */
1022 lp->rx_skb[lp->rx_bd_ci] = NULL;
1024 ndev->stats.rx_packets++;
1025 ndev->stats.rx_bytes += length;
1027 rx_bd = lp->rx_bd_ci;
1028 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1030 } while (rx_bd != lp->rx_bd_tail);
1032 /* DMA operations will halt when the last buffer descriptor is
1033 * processed (ie. the one pointed to by RX_TAILDESC_PTR).
1034 * When that happens, no more interrupt events will be
1035 * generated. No IRQ_COAL or IRQ_DLY, and not even an
1036 * IRQ_ERR. To avoid stalling, we schedule a delayed work
1037 * when there is a potential risk of that happening. The work
1038 * will call this function, and thus re-schedule itself until
1039 * enough buffers are available again.
1041 if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1042 schedule_delayed_work(&lp->restart_work, HZ / 1000);
1044 /* Allocate new buffers for those buffer descriptors that were
1045 * passed to network stack. Note that GFP_ATOMIC allocations
1046 * can fail (e.g. when a larger burst of GFP_ATOMIC
1047 * allocations occurs), so while we try to allocate all
1048 * buffers in the same interrupt where they were processed, we
1049 * continue with what we could get in case of allocation
1050 * failure. Allocation of remaining buffers will be retried
1051 * in following calls.
1054 struct sk_buff *skb;
1055 struct cdmac_bd *bd;
1056 dma_addr_t skb_dma_addr;
1058 rx_bd = lp->rx_bd_tail + 1;
1059 if (rx_bd >= lp->rx_bd_num)
1061 bd = &lp->rx_bd_v[rx_bd];
1064 break; /* All skb's allocated */
1066 skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1068 dev_warn(&ndev->dev, "skb alloc failed\n");
1072 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1073 XTE_MAX_JUMBO_FRAME_SIZE,
1075 if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1077 dev_kfree_skb_any(skb);
1081 bd->phys = cpu_to_be32(skb_dma_addr);
1082 bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1083 bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1084 lp->rx_skb[rx_bd] = skb;
1086 lp->rx_bd_tail = rx_bd;
1090 /* Move tail pointer when buffers have been allocated */
1092 lp->dma_out(lp, RX_TAILDESC_PTR,
1093 lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1096 spin_unlock_irqrestore(&lp->rx_lock, flags);
1099 /* Function scheduled to ensure a restart in case of DMA halt
1100 * condition caused by running out of buffer descriptors.
1102 static void ll_temac_restart_work_func(struct work_struct *work)
1104 struct temac_local *lp = container_of(work, struct temac_local,
1106 struct net_device *ndev = lp->ndev;
1108 ll_temac_recv(ndev);
1111 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
1113 struct net_device *ndev = _ndev;
1114 struct temac_local *lp = netdev_priv(ndev);
1115 unsigned int status;
1117 status = lp->dma_in(lp, TX_IRQ_REG);
1118 lp->dma_out(lp, TX_IRQ_REG, status);
1120 if (status & (IRQ_COAL | IRQ_DLY))
1121 temac_start_xmit_done(lp->ndev);
1122 if (status & (IRQ_ERR | IRQ_DMAERR))
1123 dev_err_ratelimited(&ndev->dev,
1124 "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1125 status, lp->dma_in(lp, TX_CHNL_STS));
1130 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
1132 struct net_device *ndev = _ndev;
1133 struct temac_local *lp = netdev_priv(ndev);
1134 unsigned int status;
1136 /* Read and clear the status registers */
1137 status = lp->dma_in(lp, RX_IRQ_REG);
1138 lp->dma_out(lp, RX_IRQ_REG, status);
1140 if (status & (IRQ_COAL | IRQ_DLY))
1141 ll_temac_recv(lp->ndev);
1142 if (status & (IRQ_ERR | IRQ_DMAERR))
1143 dev_err_ratelimited(&ndev->dev,
1144 "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1145 status, lp->dma_in(lp, RX_CHNL_STS));
1150 static int temac_open(struct net_device *ndev)
1152 struct temac_local *lp = netdev_priv(ndev);
1153 struct phy_device *phydev = NULL;
1156 dev_dbg(&ndev->dev, "temac_open()\n");
1159 phydev = of_phy_connect(lp->ndev, lp->phy_node,
1160 temac_adjust_link, 0, 0);
1162 dev_err(lp->dev, "of_phy_connect() failed\n");
1166 } else if (strlen(lp->phy_name) > 0) {
1167 phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1169 if (IS_ERR(phydev)) {
1170 dev_err(lp->dev, "phy_connect() failed\n");
1171 return PTR_ERR(phydev);
1176 temac_device_reset(ndev);
1178 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1181 rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1188 free_irq(lp->tx_irq, ndev);
1191 phy_disconnect(phydev);
1192 dev_err(lp->dev, "request_irq() failed\n");
1196 static int temac_stop(struct net_device *ndev)
1198 struct temac_local *lp = netdev_priv(ndev);
1199 struct phy_device *phydev = ndev->phydev;
1201 dev_dbg(&ndev->dev, "temac_close()\n");
1203 cancel_delayed_work_sync(&lp->restart_work);
1205 free_irq(lp->tx_irq, ndev);
1206 free_irq(lp->rx_irq, ndev);
1209 phy_disconnect(phydev);
1211 temac_dma_bd_release(ndev);
1216 #ifdef CONFIG_NET_POLL_CONTROLLER
1218 temac_poll_controller(struct net_device *ndev)
1220 struct temac_local *lp = netdev_priv(ndev);
1222 disable_irq(lp->tx_irq);
1223 disable_irq(lp->rx_irq);
1225 ll_temac_rx_irq(lp->tx_irq, ndev);
1226 ll_temac_tx_irq(lp->rx_irq, ndev);
1228 enable_irq(lp->tx_irq);
1229 enable_irq(lp->rx_irq);
1233 static const struct net_device_ops temac_netdev_ops = {
1234 .ndo_open = temac_open,
1235 .ndo_stop = temac_stop,
1236 .ndo_start_xmit = temac_start_xmit,
1237 .ndo_set_rx_mode = temac_set_multicast_list,
1238 .ndo_set_mac_address = temac_set_mac_address,
1239 .ndo_validate_addr = eth_validate_addr,
1240 .ndo_eth_ioctl = phy_do_ioctl_running,
1241 #ifdef CONFIG_NET_POLL_CONTROLLER
1242 .ndo_poll_controller = temac_poll_controller,
1246 /* ---------------------------------------------------------------------
1247 * SYSFS device attributes
1249 static ssize_t temac_show_llink_regs(struct device *dev,
1250 struct device_attribute *attr, char *buf)
1252 struct net_device *ndev = dev_get_drvdata(dev);
1253 struct temac_local *lp = netdev_priv(ndev);
1256 for (i = 0; i < 0x11; i++)
1257 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1258 (i % 8) == 7 ? "\n" : " ");
1259 len += sprintf(buf + len, "\n");
1264 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1266 static struct attribute *temac_device_attrs[] = {
1267 &dev_attr_llink_regs.attr,
1271 static const struct attribute_group temac_attr_group = {
1272 .attrs = temac_device_attrs,
1275 /* ---------------------------------------------------------------------
1279 static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
1280 struct ethtool_ringparam *ering)
1282 struct temac_local *lp = netdev_priv(ndev);
1284 ering->rx_max_pending = RX_BD_NUM_MAX;
1285 ering->rx_mini_max_pending = 0;
1286 ering->rx_jumbo_max_pending = 0;
1287 ering->tx_max_pending = TX_BD_NUM_MAX;
1288 ering->rx_pending = lp->rx_bd_num;
1289 ering->rx_mini_pending = 0;
1290 ering->rx_jumbo_pending = 0;
1291 ering->tx_pending = lp->tx_bd_num;
1294 static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
1295 struct ethtool_ringparam *ering)
1297 struct temac_local *lp = netdev_priv(ndev);
1299 if (ering->rx_pending > RX_BD_NUM_MAX ||
1300 ering->rx_mini_pending ||
1301 ering->rx_jumbo_pending ||
1302 ering->rx_pending > TX_BD_NUM_MAX)
1305 if (netif_running(ndev))
1308 lp->rx_bd_num = ering->rx_pending;
1309 lp->tx_bd_num = ering->tx_pending;
1314 ll_temac_ethtools_get_coalesce(struct net_device *ndev,
1315 struct ethtool_coalesce *ec,
1316 struct kernel_ethtool_coalesce *kernel_coal,
1317 struct netlink_ext_ack *extack)
1319 struct temac_local *lp = netdev_priv(ndev);
1321 ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
1322 ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
1323 ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
1324 ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
1329 ll_temac_ethtools_set_coalesce(struct net_device *ndev,
1330 struct ethtool_coalesce *ec,
1331 struct kernel_ethtool_coalesce *kernel_coal,
1332 struct netlink_ext_ack *extack)
1334 struct temac_local *lp = netdev_priv(ndev);
1336 if (netif_running(ndev)) {
1338 "Please stop netif before applying configuration\n");
1342 if (ec->rx_max_coalesced_frames)
1343 lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
1344 if (ec->tx_max_coalesced_frames)
1345 lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
1346 /* With typical LocalLink clock speed of 200 MHz and
1347 * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
1349 if (ec->rx_coalesce_usecs)
1350 lp->coalesce_delay_rx =
1351 min(255U, (ec->rx_coalesce_usecs * 100) / 512);
1352 if (ec->tx_coalesce_usecs)
1353 lp->coalesce_delay_tx =
1354 min(255U, (ec->tx_coalesce_usecs * 100) / 512);
1359 static const struct ethtool_ops temac_ethtool_ops = {
1360 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1361 ETHTOOL_COALESCE_MAX_FRAMES,
1362 .nway_reset = phy_ethtool_nway_reset,
1363 .get_link = ethtool_op_get_link,
1364 .get_ts_info = ethtool_op_get_ts_info,
1365 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1366 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1367 .get_ringparam = ll_temac_ethtools_get_ringparam,
1368 .set_ringparam = ll_temac_ethtools_set_ringparam,
1369 .get_coalesce = ll_temac_ethtools_get_coalesce,
1370 .set_coalesce = ll_temac_ethtools_set_coalesce,
1373 static int temac_probe(struct platform_device *pdev)
1375 struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1376 struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1377 struct temac_local *lp;
1378 struct net_device *ndev;
1384 /* Init network device structure */
1385 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1389 platform_set_drvdata(pdev, ndev);
1390 SET_NETDEV_DEV(ndev, &pdev->dev);
1391 ndev->features = NETIF_F_SG;
1392 ndev->netdev_ops = &temac_netdev_ops;
1393 ndev->ethtool_ops = &temac_ethtool_ops;
1395 ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1396 ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1397 ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1398 ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1399 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1400 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1401 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1402 ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1403 ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1404 ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1405 ndev->features |= NETIF_F_LRO; /* large receive offload */
1408 /* setup temac private info structure */
1409 lp = netdev_priv(ndev);
1411 lp->dev = &pdev->dev;
1412 lp->options = XTE_OPTION_DEFAULTS;
1413 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1414 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1415 spin_lock_init(&lp->rx_lock);
1416 INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1418 /* Setup mutex for synchronization of indirect register access */
1420 if (!pdata->indirect_lock) {
1422 "indirect_lock missing in platform_data\n");
1425 lp->indirect_lock = pdata->indirect_lock;
1427 lp->indirect_lock = devm_kmalloc(&pdev->dev,
1428 sizeof(*lp->indirect_lock),
1430 spin_lock_init(lp->indirect_lock);
1433 /* map device registers */
1434 lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
1435 if (IS_ERR(lp->regs)) {
1436 dev_err(&pdev->dev, "could not map TEMAC registers\n");
1440 /* Select register access functions with the specified
1441 * endianness mode. Default for OF devices is big-endian.
1443 little_endian = false;
1445 if (of_get_property(temac_np, "little-endian", NULL))
1446 little_endian = true;
1448 little_endian = pdata->reg_little_endian;
1450 if (little_endian) {
1451 lp->temac_ior = _temac_ior_le;
1452 lp->temac_iow = _temac_iow_le;
1454 lp->temac_ior = _temac_ior_be;
1455 lp->temac_iow = _temac_iow_be;
1458 /* Setup checksum offload, but default to off if not specified */
1459 lp->temac_features = 0;
1461 p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1462 if (p && be32_to_cpu(*p))
1463 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1464 p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1465 if (p && be32_to_cpu(*p))
1466 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1469 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1471 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1473 if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1474 /* Can checksum TCP/UDP over IPv4. */
1475 ndev->features |= NETIF_F_IP_CSUM;
1477 /* Defaults for IRQ delay/coalescing setup. These are
1478 * configuration values, so does not belong in device-tree.
1480 lp->coalesce_delay_tx = 0x10;
1481 lp->coalesce_count_tx = 0x22;
1482 lp->coalesce_delay_rx = 0xff;
1483 lp->coalesce_count_rx = 0x07;
1485 /* Setup LocalLink DMA */
1487 /* Find the DMA node, map the DMA registers, and
1488 * decode the DMA IRQs.
1490 dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1492 dev_err(&pdev->dev, "could not find DMA node\n");
1496 /* Setup the DMA register accesses, could be DCR or
1499 if (temac_dcr_setup(lp, pdev, dma_np)) {
1500 /* no DCR in the device tree, try non-DCR */
1501 lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1503 if (IS_ERR(lp->sdma_regs)) {
1505 "unable to map DMA registers\n");
1506 of_node_put(dma_np);
1507 return PTR_ERR(lp->sdma_regs);
1509 if (of_get_property(dma_np, "little-endian", NULL)) {
1510 lp->dma_in = temac_dma_in32_le;
1511 lp->dma_out = temac_dma_out32_le;
1513 lp->dma_in = temac_dma_in32_be;
1514 lp->dma_out = temac_dma_out32_be;
1516 dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1519 /* Get DMA RX and TX interrupts */
1520 lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1521 lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1523 /* Finished with the DMA node; drop the reference */
1524 of_node_put(dma_np);
1526 /* 2nd memory resource specifies DMA registers */
1527 lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
1528 if (IS_ERR(lp->sdma_regs)) {
1530 "could not map DMA registers\n");
1531 return PTR_ERR(lp->sdma_regs);
1533 if (pdata->dma_little_endian) {
1534 lp->dma_in = temac_dma_in32_le;
1535 lp->dma_out = temac_dma_out32_le;
1537 lp->dma_in = temac_dma_in32_be;
1538 lp->dma_out = temac_dma_out32_be;
1541 /* Get DMA RX and TX interrupts */
1542 lp->rx_irq = platform_get_irq(pdev, 0);
1543 lp->tx_irq = platform_get_irq(pdev, 1);
1545 /* IRQ delay/coalescing setup */
1546 if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
1547 lp->coalesce_delay_tx = pdata->tx_irq_timeout;
1548 lp->coalesce_count_tx = pdata->tx_irq_count;
1550 if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1551 lp->coalesce_delay_rx = pdata->rx_irq_timeout;
1552 lp->coalesce_count_rx = pdata->rx_irq_count;
1556 /* Error handle returned DMA RX and TX interrupts */
1557 if (lp->rx_irq < 0) {
1558 if (lp->rx_irq != -EPROBE_DEFER)
1559 dev_err(&pdev->dev, "could not get DMA RX irq\n");
1562 if (lp->tx_irq < 0) {
1563 if (lp->tx_irq != -EPROBE_DEFER)
1564 dev_err(&pdev->dev, "could not get DMA TX irq\n");
1569 /* Retrieve the MAC address */
1570 rc = of_get_mac_address(temac_np, addr);
1572 dev_err(&pdev->dev, "could not find MAC address\n");
1575 temac_init_mac_address(ndev, addr);
1577 temac_init_mac_address(ndev, pdata->mac_addr);
1580 rc = temac_mdio_setup(lp, pdev);
1582 dev_warn(&pdev->dev, "error registering MDIO bus\n");
1585 lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1587 dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1589 snprintf(lp->phy_name, sizeof(lp->phy_name),
1590 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1591 lp->phy_interface = pdata->phy_interface;
1594 /* Add the device attributes */
1595 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1597 dev_err(lp->dev, "Error creating sysfs files\n");
1598 goto err_sysfs_create;
1601 rc = register_netdev(lp->ndev);
1603 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1604 goto err_register_ndev;
1610 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1613 of_node_put(lp->phy_node);
1614 temac_mdio_teardown(lp);
1618 static int temac_remove(struct platform_device *pdev)
1620 struct net_device *ndev = platform_get_drvdata(pdev);
1621 struct temac_local *lp = netdev_priv(ndev);
1623 unregister_netdev(ndev);
1624 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1626 of_node_put(lp->phy_node);
1627 temac_mdio_teardown(lp);
1631 static const struct of_device_id temac_of_match[] = {
1632 { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1633 { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1634 { .compatible = "xlnx,xps-ll-temac-2.02.a", },
1635 { .compatible = "xlnx,xps-ll-temac-2.03.a", },
1638 MODULE_DEVICE_TABLE(of, temac_of_match);
1640 static struct platform_driver temac_driver = {
1641 .probe = temac_probe,
1642 .remove = temac_remove,
1644 .name = "xilinx_temac",
1645 .of_match_table = temac_of_match,
1649 module_platform_driver(temac_driver);
1651 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1652 MODULE_AUTHOR("Yoshio Kashiwagi");
1653 MODULE_LICENSE("GPL");